b9a1f715df18baf83234e919f1dd989b550842d3
[deliverable/linux.git] / net / can / bcm.c
1 /*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/interrupt.h>
45 #include <linux/hrtimer.h>
46 #include <linux/list.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uio.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/socket.h>
53 #include <linux/if_arp.h>
54 #include <linux/skbuff.h>
55 #include <linux/can.h>
56 #include <linux/can/core.h>
57 #include <linux/can/skb.h>
58 #include <linux/can/bcm.h>
59 #include <linux/slab.h>
60 #include <net/sock.h>
61 #include <net/net_namespace.h>
62
63 /*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68 #define MAX_NFRAMES 256
69
70 /* use of last_frames[index].can_dlc */
71 #define RX_RECV 0x40 /* received data for this element */
72 #define RX_THR 0x80 /* element not been sent due to throttle feature */
73 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
74
75 /* get best masking value for can_rx_register() for a given single can_id */
76 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
79
80 #define CAN_BCM_VERSION CAN_VERSION
81 static __initconst const char banner[] = KERN_INFO
82 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
83
84 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
85 MODULE_LICENSE("Dual BSD/GPL");
86 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
87 MODULE_ALIAS("can-proto-2");
88
89 /* easy access to can_frame payload */
90 static inline u64 GET_U64(const struct can_frame *cp)
91 {
92 return *(u64 *)cp->data;
93 }
94
95 struct bcm_op {
96 struct list_head list;
97 int ifindex;
98 canid_t can_id;
99 u32 flags;
100 unsigned long frames_abs, frames_filtered;
101 struct timeval ival1, ival2;
102 struct hrtimer timer, thrtimer;
103 struct tasklet_struct tsklet, thrtsklet;
104 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
105 int rx_ifindex;
106 u32 count;
107 u32 nframes;
108 u32 currframe;
109 struct can_frame *frames;
110 struct can_frame *last_frames;
111 struct can_frame sframe;
112 struct can_frame last_sframe;
113 struct sock *sk;
114 struct net_device *rx_reg_dev;
115 };
116
117 static struct proc_dir_entry *proc_dir;
118
119 struct bcm_sock {
120 struct sock sk;
121 int bound;
122 int ifindex;
123 struct notifier_block notifier;
124 struct list_head rx_ops;
125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read;
128 char procname [32]; /* inode number in decimal with \0 */
129 };
130
131 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
132 {
133 return (struct bcm_sock *)sk;
134 }
135
136 #define CFSIZ sizeof(struct can_frame)
137 #define OPSIZ sizeof(struct bcm_op)
138 #define MHSIZ sizeof(struct bcm_msg_head)
139
140 /*
141 * procfs functions
142 */
143 static char *bcm_proc_getifname(char *result, int ifindex)
144 {
145 struct net_device *dev;
146
147 if (!ifindex)
148 return "any";
149
150 rcu_read_lock();
151 dev = dev_get_by_index_rcu(&init_net, ifindex);
152 if (dev)
153 strcpy(result, dev->name);
154 else
155 strcpy(result, "???");
156 rcu_read_unlock();
157
158 return result;
159 }
160
161 static int bcm_proc_show(struct seq_file *m, void *v)
162 {
163 char ifname[IFNAMSIZ];
164 struct sock *sk = (struct sock *)m->private;
165 struct bcm_sock *bo = bcm_sk(sk);
166 struct bcm_op *op;
167
168 seq_printf(m, ">>> socket %pK", sk->sk_socket);
169 seq_printf(m, " / sk %pK", sk);
170 seq_printf(m, " / bo %pK", bo);
171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
173 seq_printf(m, " <<<\n");
174
175 list_for_each_entry(op, &bo->rx_ops, list) {
176
177 unsigned long reduction;
178
179 /* print only active entries & prevent division by zero */
180 if (!op->frames_abs)
181 continue;
182
183 seq_printf(m, "rx_op: %03X %-5s ",
184 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
185 seq_printf(m, "[%u]%c ", op->nframes,
186 (op->flags & RX_CHECK_DLC)?'d':' ');
187 if (op->kt_ival1.tv64)
188 seq_printf(m, "timeo=%lld ",
189 (long long)
190 ktime_to_us(op->kt_ival1));
191
192 if (op->kt_ival2.tv64)
193 seq_printf(m, "thr=%lld ",
194 (long long)
195 ktime_to_us(op->kt_ival2));
196
197 seq_printf(m, "# recv %ld (%ld) => reduction: ",
198 op->frames_filtered, op->frames_abs);
199
200 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
201
202 seq_printf(m, "%s%ld%%\n",
203 (reduction == 100)?"near ":"", reduction);
204 }
205
206 list_for_each_entry(op, &bo->tx_ops, list) {
207
208 seq_printf(m, "tx_op: %03X %s [%u] ",
209 op->can_id,
210 bcm_proc_getifname(ifname, op->ifindex),
211 op->nframes);
212
213 if (op->kt_ival1.tv64)
214 seq_printf(m, "t1=%lld ",
215 (long long) ktime_to_us(op->kt_ival1));
216
217 if (op->kt_ival2.tv64)
218 seq_printf(m, "t2=%lld ",
219 (long long) ktime_to_us(op->kt_ival2));
220
221 seq_printf(m, "# sent %ld\n", op->frames_abs);
222 }
223 seq_putc(m, '\n');
224 return 0;
225 }
226
227 static int bcm_proc_open(struct inode *inode, struct file *file)
228 {
229 return single_open(file, bcm_proc_show, PDE_DATA(inode));
230 }
231
232 static const struct file_operations bcm_proc_fops = {
233 .owner = THIS_MODULE,
234 .open = bcm_proc_open,
235 .read = seq_read,
236 .llseek = seq_lseek,
237 .release = single_release,
238 };
239
240 /*
241 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
242 * of the given bcm tx op
243 */
244 static void bcm_can_tx(struct bcm_op *op)
245 {
246 struct sk_buff *skb;
247 struct net_device *dev;
248 struct can_frame *cf = &op->frames[op->currframe];
249
250 /* no target device? => exit */
251 if (!op->ifindex)
252 return;
253
254 dev = dev_get_by_index(&init_net, op->ifindex);
255 if (!dev) {
256 /* RFC: should this bcm_op remove itself here? */
257 return;
258 }
259
260 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any());
261 if (!skb)
262 goto out;
263
264 can_skb_reserve(skb);
265 can_skb_prv(skb)->ifindex = dev->ifindex;
266
267 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
268
269 /* send with loopback */
270 skb->dev = dev;
271 can_skb_set_owner(skb, op->sk);
272 can_send(skb, 1);
273
274 /* update statistics */
275 op->currframe++;
276 op->frames_abs++;
277
278 /* reached last frame? */
279 if (op->currframe >= op->nframes)
280 op->currframe = 0;
281 out:
282 dev_put(dev);
283 }
284
285 /*
286 * bcm_send_to_user - send a BCM message to the userspace
287 * (consisting of bcm_msg_head + x CAN frames)
288 */
289 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
290 struct can_frame *frames, int has_timestamp)
291 {
292 struct sk_buff *skb;
293 struct can_frame *firstframe;
294 struct sockaddr_can *addr;
295 struct sock *sk = op->sk;
296 unsigned int datalen = head->nframes * CFSIZ;
297 int err;
298
299 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
300 if (!skb)
301 return;
302
303 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
304
305 if (head->nframes) {
306 /* can_frames starting here */
307 firstframe = (struct can_frame *)skb_tail_pointer(skb);
308
309 memcpy(skb_put(skb, datalen), frames, datalen);
310
311 /*
312 * the BCM uses the can_dlc-element of the can_frame
313 * structure for internal purposes. This is only
314 * relevant for updates that are generated by the
315 * BCM, where nframes is 1
316 */
317 if (head->nframes == 1)
318 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
319 }
320
321 if (has_timestamp) {
322 /* restore rx timestamp */
323 skb->tstamp = op->rx_stamp;
324 }
325
326 /*
327 * Put the datagram to the queue so that bcm_recvmsg() can
328 * get it from there. We need to pass the interface index to
329 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
330 * containing the interface index.
331 */
332
333 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
334 addr = (struct sockaddr_can *)skb->cb;
335 memset(addr, 0, sizeof(*addr));
336 addr->can_family = AF_CAN;
337 addr->can_ifindex = op->rx_ifindex;
338
339 err = sock_queue_rcv_skb(sk, skb);
340 if (err < 0) {
341 struct bcm_sock *bo = bcm_sk(sk);
342
343 kfree_skb(skb);
344 /* don't care about overflows in this statistic */
345 bo->dropped_usr_msgs++;
346 }
347 }
348
349 static void bcm_tx_start_timer(struct bcm_op *op)
350 {
351 if (op->kt_ival1.tv64 && op->count)
352 hrtimer_start(&op->timer,
353 ktime_add(ktime_get(), op->kt_ival1),
354 HRTIMER_MODE_ABS);
355 else if (op->kt_ival2.tv64)
356 hrtimer_start(&op->timer,
357 ktime_add(ktime_get(), op->kt_ival2),
358 HRTIMER_MODE_ABS);
359 }
360
361 static void bcm_tx_timeout_tsklet(unsigned long data)
362 {
363 struct bcm_op *op = (struct bcm_op *)data;
364 struct bcm_msg_head msg_head;
365
366 if (op->kt_ival1.tv64 && (op->count > 0)) {
367
368 op->count--;
369 if (!op->count && (op->flags & TX_COUNTEVT)) {
370
371 /* create notification to user */
372 msg_head.opcode = TX_EXPIRED;
373 msg_head.flags = op->flags;
374 msg_head.count = op->count;
375 msg_head.ival1 = op->ival1;
376 msg_head.ival2 = op->ival2;
377 msg_head.can_id = op->can_id;
378 msg_head.nframes = 0;
379
380 bcm_send_to_user(op, &msg_head, NULL, 0);
381 }
382 bcm_can_tx(op);
383
384 } else if (op->kt_ival2.tv64)
385 bcm_can_tx(op);
386
387 bcm_tx_start_timer(op);
388 }
389
390 /*
391 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
392 */
393 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
394 {
395 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
396
397 tasklet_schedule(&op->tsklet);
398
399 return HRTIMER_NORESTART;
400 }
401
402 /*
403 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
404 */
405 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
406 {
407 struct bcm_msg_head head;
408
409 /* update statistics */
410 op->frames_filtered++;
411
412 /* prevent statistics overflow */
413 if (op->frames_filtered > ULONG_MAX/100)
414 op->frames_filtered = op->frames_abs = 0;
415
416 /* this element is not throttled anymore */
417 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
418
419 head.opcode = RX_CHANGED;
420 head.flags = op->flags;
421 head.count = op->count;
422 head.ival1 = op->ival1;
423 head.ival2 = op->ival2;
424 head.can_id = op->can_id;
425 head.nframes = 1;
426
427 bcm_send_to_user(op, &head, data, 1);
428 }
429
430 /*
431 * bcm_rx_update_and_send - process a detected relevant receive content change
432 * 1. update the last received data
433 * 2. send a notification to the user (if possible)
434 */
435 static void bcm_rx_update_and_send(struct bcm_op *op,
436 struct can_frame *lastdata,
437 const struct can_frame *rxdata)
438 {
439 memcpy(lastdata, rxdata, CFSIZ);
440
441 /* mark as used and throttled by default */
442 lastdata->can_dlc |= (RX_RECV|RX_THR);
443
444 /* throtteling mode inactive ? */
445 if (!op->kt_ival2.tv64) {
446 /* send RX_CHANGED to the user immediately */
447 bcm_rx_changed(op, lastdata);
448 return;
449 }
450
451 /* with active throttling timer we are just done here */
452 if (hrtimer_active(&op->thrtimer))
453 return;
454
455 /* first receiption with enabled throttling mode */
456 if (!op->kt_lastmsg.tv64)
457 goto rx_changed_settime;
458
459 /* got a second frame inside a potential throttle period? */
460 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
461 ktime_to_us(op->kt_ival2)) {
462 /* do not send the saved data - only start throttle timer */
463 hrtimer_start(&op->thrtimer,
464 ktime_add(op->kt_lastmsg, op->kt_ival2),
465 HRTIMER_MODE_ABS);
466 return;
467 }
468
469 /* the gap was that big, that throttling was not needed here */
470 rx_changed_settime:
471 bcm_rx_changed(op, lastdata);
472 op->kt_lastmsg = ktime_get();
473 }
474
475 /*
476 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
477 * received data stored in op->last_frames[]
478 */
479 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
480 const struct can_frame *rxdata)
481 {
482 /*
483 * no one uses the MSBs of can_dlc for comparation,
484 * so we use it here to detect the first time of reception
485 */
486
487 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
488 /* received data for the first time => send update to user */
489 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
490 return;
491 }
492
493 /* do a real check in can_frame data section */
494
495 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
496 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
497 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
498 return;
499 }
500
501 if (op->flags & RX_CHECK_DLC) {
502 /* do a real check in can_frame dlc */
503 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
504 BCM_CAN_DLC_MASK)) {
505 bcm_rx_update_and_send(op, &op->last_frames[index],
506 rxdata);
507 return;
508 }
509 }
510 }
511
512 /*
513 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
514 */
515 static void bcm_rx_starttimer(struct bcm_op *op)
516 {
517 if (op->flags & RX_NO_AUTOTIMER)
518 return;
519
520 if (op->kt_ival1.tv64)
521 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
522 }
523
524 static void bcm_rx_timeout_tsklet(unsigned long data)
525 {
526 struct bcm_op *op = (struct bcm_op *)data;
527 struct bcm_msg_head msg_head;
528
529 /* create notification to user */
530 msg_head.opcode = RX_TIMEOUT;
531 msg_head.flags = op->flags;
532 msg_head.count = op->count;
533 msg_head.ival1 = op->ival1;
534 msg_head.ival2 = op->ival2;
535 msg_head.can_id = op->can_id;
536 msg_head.nframes = 0;
537
538 bcm_send_to_user(op, &msg_head, NULL, 0);
539 }
540
541 /*
542 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
543 */
544 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
545 {
546 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
547
548 /* schedule before NET_RX_SOFTIRQ */
549 tasklet_hi_schedule(&op->tsklet);
550
551 /* no restart of the timer is done here! */
552
553 /* if user wants to be informed, when cyclic CAN-Messages come back */
554 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
555 /* clear received can_frames to indicate 'nothing received' */
556 memset(op->last_frames, 0, op->nframes * CFSIZ);
557 }
558
559 return HRTIMER_NORESTART;
560 }
561
562 /*
563 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
564 */
565 static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
566 unsigned int index)
567 {
568 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
569 if (update)
570 bcm_rx_changed(op, &op->last_frames[index]);
571 return 1;
572 }
573 return 0;
574 }
575
576 /*
577 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
578 *
579 * update == 0 : just check if throttled data is available (any irq context)
580 * update == 1 : check and send throttled data to userspace (soft_irq context)
581 */
582 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
583 {
584 int updated = 0;
585
586 if (op->nframes > 1) {
587 unsigned int i;
588
589 /* for MUX filter we start at index 1 */
590 for (i = 1; i < op->nframes; i++)
591 updated += bcm_rx_do_flush(op, update, i);
592
593 } else {
594 /* for RX_FILTER_ID and simple filter */
595 updated += bcm_rx_do_flush(op, update, 0);
596 }
597
598 return updated;
599 }
600
601 static void bcm_rx_thr_tsklet(unsigned long data)
602 {
603 struct bcm_op *op = (struct bcm_op *)data;
604
605 /* push the changed data to the userspace */
606 bcm_rx_thr_flush(op, 1);
607 }
608
609 /*
610 * bcm_rx_thr_handler - the time for blocked content updates is over now:
611 * Check for throttled data and send it to the userspace
612 */
613 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
614 {
615 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
616
617 tasklet_schedule(&op->thrtsklet);
618
619 if (bcm_rx_thr_flush(op, 0)) {
620 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
621 return HRTIMER_RESTART;
622 } else {
623 /* rearm throttle handling */
624 op->kt_lastmsg = ktime_set(0, 0);
625 return HRTIMER_NORESTART;
626 }
627 }
628
629 /*
630 * bcm_rx_handler - handle a CAN frame receiption
631 */
632 static void bcm_rx_handler(struct sk_buff *skb, void *data)
633 {
634 struct bcm_op *op = (struct bcm_op *)data;
635 const struct can_frame *rxframe = (struct can_frame *)skb->data;
636 unsigned int i;
637
638 /* disable timeout */
639 hrtimer_cancel(&op->timer);
640
641 if (op->can_id != rxframe->can_id)
642 return;
643
644 /* save rx timestamp */
645 op->rx_stamp = skb->tstamp;
646 /* save originator for recvfrom() */
647 op->rx_ifindex = skb->dev->ifindex;
648 /* update statistics */
649 op->frames_abs++;
650
651 if (op->flags & RX_RTR_FRAME) {
652 /* send reply for RTR-request (placed in op->frames[0]) */
653 bcm_can_tx(op);
654 return;
655 }
656
657 if (op->flags & RX_FILTER_ID) {
658 /* the easiest case */
659 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
660 goto rx_starttimer;
661 }
662
663 if (op->nframes == 1) {
664 /* simple compare with index 0 */
665 bcm_rx_cmp_to_index(op, 0, rxframe);
666 goto rx_starttimer;
667 }
668
669 if (op->nframes > 1) {
670 /*
671 * multiplex compare
672 *
673 * find the first multiplex mask that fits.
674 * Remark: The MUX-mask is stored in index 0
675 */
676
677 for (i = 1; i < op->nframes; i++) {
678 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
679 (GET_U64(&op->frames[0]) &
680 GET_U64(&op->frames[i]))) {
681 bcm_rx_cmp_to_index(op, i, rxframe);
682 break;
683 }
684 }
685 }
686
687 rx_starttimer:
688 bcm_rx_starttimer(op);
689 }
690
691 /*
692 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
693 */
694 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
695 int ifindex)
696 {
697 struct bcm_op *op;
698
699 list_for_each_entry(op, ops, list) {
700 if ((op->can_id == can_id) && (op->ifindex == ifindex))
701 return op;
702 }
703
704 return NULL;
705 }
706
707 static void bcm_remove_op(struct bcm_op *op)
708 {
709 hrtimer_cancel(&op->timer);
710 hrtimer_cancel(&op->thrtimer);
711
712 if (op->tsklet.func)
713 tasklet_kill(&op->tsklet);
714
715 if (op->thrtsklet.func)
716 tasklet_kill(&op->thrtsklet);
717
718 if ((op->frames) && (op->frames != &op->sframe))
719 kfree(op->frames);
720
721 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
722 kfree(op->last_frames);
723
724 kfree(op);
725 }
726
727 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
728 {
729 if (op->rx_reg_dev == dev) {
730 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
731 bcm_rx_handler, op);
732
733 /* mark as removed subscription */
734 op->rx_reg_dev = NULL;
735 } else
736 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
737 "mismatch %p %p\n", op->rx_reg_dev, dev);
738 }
739
740 /*
741 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
742 */
743 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
744 {
745 struct bcm_op *op, *n;
746
747 list_for_each_entry_safe(op, n, ops, list) {
748 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
749
750 /*
751 * Don't care if we're bound or not (due to netdev
752 * problems) can_rx_unregister() is always a save
753 * thing to do here.
754 */
755 if (op->ifindex) {
756 /*
757 * Only remove subscriptions that had not
758 * been removed due to NETDEV_UNREGISTER
759 * in bcm_notifier()
760 */
761 if (op->rx_reg_dev) {
762 struct net_device *dev;
763
764 dev = dev_get_by_index(&init_net,
765 op->ifindex);
766 if (dev) {
767 bcm_rx_unreg(dev, op);
768 dev_put(dev);
769 }
770 }
771 } else
772 can_rx_unregister(NULL, op->can_id,
773 REGMASK(op->can_id),
774 bcm_rx_handler, op);
775
776 list_del(&op->list);
777 bcm_remove_op(op);
778 return 1; /* done */
779 }
780 }
781
782 return 0; /* not found */
783 }
784
785 /*
786 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
787 */
788 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
789 {
790 struct bcm_op *op, *n;
791
792 list_for_each_entry_safe(op, n, ops, list) {
793 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
794 list_del(&op->list);
795 bcm_remove_op(op);
796 return 1; /* done */
797 }
798 }
799
800 return 0; /* not found */
801 }
802
803 /*
804 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
805 */
806 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
807 int ifindex)
808 {
809 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
810
811 if (!op)
812 return -EINVAL;
813
814 /* put current values into msg_head */
815 msg_head->flags = op->flags;
816 msg_head->count = op->count;
817 msg_head->ival1 = op->ival1;
818 msg_head->ival2 = op->ival2;
819 msg_head->nframes = op->nframes;
820
821 bcm_send_to_user(op, msg_head, op->frames, 0);
822
823 return MHSIZ;
824 }
825
826 /*
827 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
828 */
829 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
830 int ifindex, struct sock *sk)
831 {
832 struct bcm_sock *bo = bcm_sk(sk);
833 struct bcm_op *op;
834 unsigned int i;
835 int err;
836
837 /* we need a real device to send frames */
838 if (!ifindex)
839 return -ENODEV;
840
841 /* check nframes boundaries - we need at least one can_frame */
842 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
843 return -EINVAL;
844
845 /* check the given can_id */
846 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
847
848 if (op) {
849 /* update existing BCM operation */
850
851 /*
852 * Do we need more space for the can_frames than currently
853 * allocated? -> This is a _really_ unusual use-case and
854 * therefore (complexity / locking) it is not supported.
855 */
856 if (msg_head->nframes > op->nframes)
857 return -E2BIG;
858
859 /* update can_frames content */
860 for (i = 0; i < msg_head->nframes; i++) {
861 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
862
863 if (op->frames[i].can_dlc > 8)
864 err = -EINVAL;
865
866 if (err < 0)
867 return err;
868
869 if (msg_head->flags & TX_CP_CAN_ID) {
870 /* copy can_id into frame */
871 op->frames[i].can_id = msg_head->can_id;
872 }
873 }
874
875 } else {
876 /* insert new BCM operation for the given can_id */
877
878 op = kzalloc(OPSIZ, GFP_KERNEL);
879 if (!op)
880 return -ENOMEM;
881
882 op->can_id = msg_head->can_id;
883
884 /* create array for can_frames and copy the data */
885 if (msg_head->nframes > 1) {
886 op->frames = kmalloc(msg_head->nframes * CFSIZ,
887 GFP_KERNEL);
888 if (!op->frames) {
889 kfree(op);
890 return -ENOMEM;
891 }
892 } else
893 op->frames = &op->sframe;
894
895 for (i = 0; i < msg_head->nframes; i++) {
896 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
897
898 if (op->frames[i].can_dlc > 8)
899 err = -EINVAL;
900
901 if (err < 0) {
902 if (op->frames != &op->sframe)
903 kfree(op->frames);
904 kfree(op);
905 return err;
906 }
907
908 if (msg_head->flags & TX_CP_CAN_ID) {
909 /* copy can_id into frame */
910 op->frames[i].can_id = msg_head->can_id;
911 }
912 }
913
914 /* tx_ops never compare with previous received messages */
915 op->last_frames = NULL;
916
917 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
918 op->sk = sk;
919 op->ifindex = ifindex;
920
921 /* initialize uninitialized (kzalloc) structure */
922 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
923 op->timer.function = bcm_tx_timeout_handler;
924
925 /* initialize tasklet for tx countevent notification */
926 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
927 (unsigned long) op);
928
929 /* currently unused in tx_ops */
930 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
931
932 /* add this bcm_op to the list of the tx_ops */
933 list_add(&op->list, &bo->tx_ops);
934
935 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
936
937 if (op->nframes != msg_head->nframes) {
938 op->nframes = msg_head->nframes;
939 /* start multiple frame transmission with index 0 */
940 op->currframe = 0;
941 }
942
943 /* check flags */
944
945 op->flags = msg_head->flags;
946
947 if (op->flags & TX_RESET_MULTI_IDX) {
948 /* start multiple frame transmission with index 0 */
949 op->currframe = 0;
950 }
951
952 if (op->flags & SETTIMER) {
953 /* set timer values */
954 op->count = msg_head->count;
955 op->ival1 = msg_head->ival1;
956 op->ival2 = msg_head->ival2;
957 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
958 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
959
960 /* disable an active timer due to zero values? */
961 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
962 hrtimer_cancel(&op->timer);
963 }
964
965 if (op->flags & STARTTIMER) {
966 hrtimer_cancel(&op->timer);
967 /* spec: send can_frame when starting timer */
968 op->flags |= TX_ANNOUNCE;
969 }
970
971 if (op->flags & TX_ANNOUNCE) {
972 bcm_can_tx(op);
973 if (op->count)
974 op->count--;
975 }
976
977 if (op->flags & STARTTIMER)
978 bcm_tx_start_timer(op);
979
980 return msg_head->nframes * CFSIZ + MHSIZ;
981 }
982
983 /*
984 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
985 */
986 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
987 int ifindex, struct sock *sk)
988 {
989 struct bcm_sock *bo = bcm_sk(sk);
990 struct bcm_op *op;
991 int do_rx_register;
992 int err = 0;
993
994 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
995 /* be robust against wrong usage ... */
996 msg_head->flags |= RX_FILTER_ID;
997 /* ignore trailing garbage */
998 msg_head->nframes = 0;
999 }
1000
1001 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1002 if (msg_head->nframes > MAX_NFRAMES + 1)
1003 return -EINVAL;
1004
1005 if ((msg_head->flags & RX_RTR_FRAME) &&
1006 ((msg_head->nframes != 1) ||
1007 (!(msg_head->can_id & CAN_RTR_FLAG))))
1008 return -EINVAL;
1009
1010 /* check the given can_id */
1011 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1012 if (op) {
1013 /* update existing BCM operation */
1014
1015 /*
1016 * Do we need more space for the can_frames than currently
1017 * allocated? -> This is a _really_ unusual use-case and
1018 * therefore (complexity / locking) it is not supported.
1019 */
1020 if (msg_head->nframes > op->nframes)
1021 return -E2BIG;
1022
1023 if (msg_head->nframes) {
1024 /* update can_frames content */
1025 err = memcpy_from_msg((u8 *)op->frames, msg,
1026 msg_head->nframes * CFSIZ);
1027 if (err < 0)
1028 return err;
1029
1030 /* clear last_frames to indicate 'nothing received' */
1031 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1032 }
1033
1034 op->nframes = msg_head->nframes;
1035
1036 /* Only an update -> do not call can_rx_register() */
1037 do_rx_register = 0;
1038
1039 } else {
1040 /* insert new BCM operation for the given can_id */
1041 op = kzalloc(OPSIZ, GFP_KERNEL);
1042 if (!op)
1043 return -ENOMEM;
1044
1045 op->can_id = msg_head->can_id;
1046 op->nframes = msg_head->nframes;
1047
1048 if (msg_head->nframes > 1) {
1049 /* create array for can_frames and copy the data */
1050 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1051 GFP_KERNEL);
1052 if (!op->frames) {
1053 kfree(op);
1054 return -ENOMEM;
1055 }
1056
1057 /* create and init array for received can_frames */
1058 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1059 GFP_KERNEL);
1060 if (!op->last_frames) {
1061 kfree(op->frames);
1062 kfree(op);
1063 return -ENOMEM;
1064 }
1065
1066 } else {
1067 op->frames = &op->sframe;
1068 op->last_frames = &op->last_sframe;
1069 }
1070
1071 if (msg_head->nframes) {
1072 err = memcpy_from_msg((u8 *)op->frames, msg,
1073 msg_head->nframes * CFSIZ);
1074 if (err < 0) {
1075 if (op->frames != &op->sframe)
1076 kfree(op->frames);
1077 if (op->last_frames != &op->last_sframe)
1078 kfree(op->last_frames);
1079 kfree(op);
1080 return err;
1081 }
1082 }
1083
1084 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1085 op->sk = sk;
1086 op->ifindex = ifindex;
1087
1088 /* ifindex for timeout events w/o previous frame reception */
1089 op->rx_ifindex = ifindex;
1090
1091 /* initialize uninitialized (kzalloc) structure */
1092 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1093 op->timer.function = bcm_rx_timeout_handler;
1094
1095 /* initialize tasklet for rx timeout notification */
1096 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1097 (unsigned long) op);
1098
1099 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1100 op->thrtimer.function = bcm_rx_thr_handler;
1101
1102 /* initialize tasklet for rx throttle handling */
1103 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1104 (unsigned long) op);
1105
1106 /* add this bcm_op to the list of the rx_ops */
1107 list_add(&op->list, &bo->rx_ops);
1108
1109 /* call can_rx_register() */
1110 do_rx_register = 1;
1111
1112 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1113
1114 /* check flags */
1115 op->flags = msg_head->flags;
1116
1117 if (op->flags & RX_RTR_FRAME) {
1118
1119 /* no timers in RTR-mode */
1120 hrtimer_cancel(&op->thrtimer);
1121 hrtimer_cancel(&op->timer);
1122
1123 /*
1124 * funny feature in RX(!)_SETUP only for RTR-mode:
1125 * copy can_id into frame BUT without RTR-flag to
1126 * prevent a full-load-loopback-test ... ;-]
1127 */
1128 if ((op->flags & TX_CP_CAN_ID) ||
1129 (op->frames[0].can_id == op->can_id))
1130 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1131
1132 } else {
1133 if (op->flags & SETTIMER) {
1134
1135 /* set timer value */
1136 op->ival1 = msg_head->ival1;
1137 op->ival2 = msg_head->ival2;
1138 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1139 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1140
1141 /* disable an active timer due to zero value? */
1142 if (!op->kt_ival1.tv64)
1143 hrtimer_cancel(&op->timer);
1144
1145 /*
1146 * In any case cancel the throttle timer, flush
1147 * potentially blocked msgs and reset throttle handling
1148 */
1149 op->kt_lastmsg = ktime_set(0, 0);
1150 hrtimer_cancel(&op->thrtimer);
1151 bcm_rx_thr_flush(op, 1);
1152 }
1153
1154 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1155 hrtimer_start(&op->timer, op->kt_ival1,
1156 HRTIMER_MODE_REL);
1157 }
1158
1159 /* now we can register for can_ids, if we added a new bcm_op */
1160 if (do_rx_register) {
1161 if (ifindex) {
1162 struct net_device *dev;
1163
1164 dev = dev_get_by_index(&init_net, ifindex);
1165 if (dev) {
1166 err = can_rx_register(dev, op->can_id,
1167 REGMASK(op->can_id),
1168 bcm_rx_handler, op,
1169 "bcm");
1170
1171 op->rx_reg_dev = dev;
1172 dev_put(dev);
1173 }
1174
1175 } else
1176 err = can_rx_register(NULL, op->can_id,
1177 REGMASK(op->can_id),
1178 bcm_rx_handler, op, "bcm");
1179 if (err) {
1180 /* this bcm rx op is broken -> remove it */
1181 list_del(&op->list);
1182 bcm_remove_op(op);
1183 return err;
1184 }
1185 }
1186
1187 return msg_head->nframes * CFSIZ + MHSIZ;
1188 }
1189
1190 /*
1191 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1192 */
1193 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1194 {
1195 struct sk_buff *skb;
1196 struct net_device *dev;
1197 int err;
1198
1199 /* we need a real device to send frames */
1200 if (!ifindex)
1201 return -ENODEV;
1202
1203 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL);
1204 if (!skb)
1205 return -ENOMEM;
1206
1207 can_skb_reserve(skb);
1208
1209 err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ);
1210 if (err < 0) {
1211 kfree_skb(skb);
1212 return err;
1213 }
1214
1215 dev = dev_get_by_index(&init_net, ifindex);
1216 if (!dev) {
1217 kfree_skb(skb);
1218 return -ENODEV;
1219 }
1220
1221 can_skb_prv(skb)->ifindex = dev->ifindex;
1222 skb->dev = dev;
1223 can_skb_set_owner(skb, sk);
1224 err = can_send(skb, 1); /* send with loopback */
1225 dev_put(dev);
1226
1227 if (err)
1228 return err;
1229
1230 return CFSIZ + MHSIZ;
1231 }
1232
1233 /*
1234 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1235 */
1236 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1237 struct msghdr *msg, size_t size)
1238 {
1239 struct sock *sk = sock->sk;
1240 struct bcm_sock *bo = bcm_sk(sk);
1241 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1242 struct bcm_msg_head msg_head;
1243 int ret; /* read bytes or error codes as return value */
1244
1245 if (!bo->bound)
1246 return -ENOTCONN;
1247
1248 /* check for valid message length from userspace */
1249 if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1250 return -EINVAL;
1251
1252 /* check for alternative ifindex for this bcm_op */
1253
1254 if (!ifindex && msg->msg_name) {
1255 /* no bound device as default => check msg_name */
1256 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1257
1258 if (msg->msg_namelen < sizeof(*addr))
1259 return -EINVAL;
1260
1261 if (addr->can_family != AF_CAN)
1262 return -EINVAL;
1263
1264 /* ifindex from sendto() */
1265 ifindex = addr->can_ifindex;
1266
1267 if (ifindex) {
1268 struct net_device *dev;
1269
1270 dev = dev_get_by_index(&init_net, ifindex);
1271 if (!dev)
1272 return -ENODEV;
1273
1274 if (dev->type != ARPHRD_CAN) {
1275 dev_put(dev);
1276 return -ENODEV;
1277 }
1278
1279 dev_put(dev);
1280 }
1281 }
1282
1283 /* read message head information */
1284
1285 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1286 if (ret < 0)
1287 return ret;
1288
1289 lock_sock(sk);
1290
1291 switch (msg_head.opcode) {
1292
1293 case TX_SETUP:
1294 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1295 break;
1296
1297 case RX_SETUP:
1298 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1299 break;
1300
1301 case TX_DELETE:
1302 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1303 ret = MHSIZ;
1304 else
1305 ret = -EINVAL;
1306 break;
1307
1308 case RX_DELETE:
1309 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1310 ret = MHSIZ;
1311 else
1312 ret = -EINVAL;
1313 break;
1314
1315 case TX_READ:
1316 /* reuse msg_head for the reply to TX_READ */
1317 msg_head.opcode = TX_STATUS;
1318 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1319 break;
1320
1321 case RX_READ:
1322 /* reuse msg_head for the reply to RX_READ */
1323 msg_head.opcode = RX_STATUS;
1324 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1325 break;
1326
1327 case TX_SEND:
1328 /* we need exactly one can_frame behind the msg head */
1329 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1330 ret = -EINVAL;
1331 else
1332 ret = bcm_tx_send(msg, ifindex, sk);
1333 break;
1334
1335 default:
1336 ret = -EINVAL;
1337 break;
1338 }
1339
1340 release_sock(sk);
1341
1342 return ret;
1343 }
1344
1345 /*
1346 * notification handler for netdevice status changes
1347 */
1348 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1349 void *ptr)
1350 {
1351 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1352 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1353 struct sock *sk = &bo->sk;
1354 struct bcm_op *op;
1355 int notify_enodev = 0;
1356
1357 if (!net_eq(dev_net(dev), &init_net))
1358 return NOTIFY_DONE;
1359
1360 if (dev->type != ARPHRD_CAN)
1361 return NOTIFY_DONE;
1362
1363 switch (msg) {
1364
1365 case NETDEV_UNREGISTER:
1366 lock_sock(sk);
1367
1368 /* remove device specific receive entries */
1369 list_for_each_entry(op, &bo->rx_ops, list)
1370 if (op->rx_reg_dev == dev)
1371 bcm_rx_unreg(dev, op);
1372
1373 /* remove device reference, if this is our bound device */
1374 if (bo->bound && bo->ifindex == dev->ifindex) {
1375 bo->bound = 0;
1376 bo->ifindex = 0;
1377 notify_enodev = 1;
1378 }
1379
1380 release_sock(sk);
1381
1382 if (notify_enodev) {
1383 sk->sk_err = ENODEV;
1384 if (!sock_flag(sk, SOCK_DEAD))
1385 sk->sk_error_report(sk);
1386 }
1387 break;
1388
1389 case NETDEV_DOWN:
1390 if (bo->bound && bo->ifindex == dev->ifindex) {
1391 sk->sk_err = ENETDOWN;
1392 if (!sock_flag(sk, SOCK_DEAD))
1393 sk->sk_error_report(sk);
1394 }
1395 }
1396
1397 return NOTIFY_DONE;
1398 }
1399
1400 /*
1401 * initial settings for all BCM sockets to be set at socket creation time
1402 */
1403 static int bcm_init(struct sock *sk)
1404 {
1405 struct bcm_sock *bo = bcm_sk(sk);
1406
1407 bo->bound = 0;
1408 bo->ifindex = 0;
1409 bo->dropped_usr_msgs = 0;
1410 bo->bcm_proc_read = NULL;
1411
1412 INIT_LIST_HEAD(&bo->tx_ops);
1413 INIT_LIST_HEAD(&bo->rx_ops);
1414
1415 /* set notifier */
1416 bo->notifier.notifier_call = bcm_notifier;
1417
1418 register_netdevice_notifier(&bo->notifier);
1419
1420 return 0;
1421 }
1422
1423 /*
1424 * standard socket functions
1425 */
1426 static int bcm_release(struct socket *sock)
1427 {
1428 struct sock *sk = sock->sk;
1429 struct bcm_sock *bo;
1430 struct bcm_op *op, *next;
1431
1432 if (sk == NULL)
1433 return 0;
1434
1435 bo = bcm_sk(sk);
1436
1437 /* remove bcm_ops, timer, rx_unregister(), etc. */
1438
1439 unregister_netdevice_notifier(&bo->notifier);
1440
1441 lock_sock(sk);
1442
1443 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1444 bcm_remove_op(op);
1445
1446 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1447 /*
1448 * Don't care if we're bound or not (due to netdev problems)
1449 * can_rx_unregister() is always a save thing to do here.
1450 */
1451 if (op->ifindex) {
1452 /*
1453 * Only remove subscriptions that had not
1454 * been removed due to NETDEV_UNREGISTER
1455 * in bcm_notifier()
1456 */
1457 if (op->rx_reg_dev) {
1458 struct net_device *dev;
1459
1460 dev = dev_get_by_index(&init_net, op->ifindex);
1461 if (dev) {
1462 bcm_rx_unreg(dev, op);
1463 dev_put(dev);
1464 }
1465 }
1466 } else
1467 can_rx_unregister(NULL, op->can_id,
1468 REGMASK(op->can_id),
1469 bcm_rx_handler, op);
1470
1471 bcm_remove_op(op);
1472 }
1473
1474 /* remove procfs entry */
1475 if (proc_dir && bo->bcm_proc_read)
1476 remove_proc_entry(bo->procname, proc_dir);
1477
1478 /* remove device reference */
1479 if (bo->bound) {
1480 bo->bound = 0;
1481 bo->ifindex = 0;
1482 }
1483
1484 sock_orphan(sk);
1485 sock->sk = NULL;
1486
1487 release_sock(sk);
1488 sock_put(sk);
1489
1490 return 0;
1491 }
1492
1493 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1494 int flags)
1495 {
1496 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1497 struct sock *sk = sock->sk;
1498 struct bcm_sock *bo = bcm_sk(sk);
1499
1500 if (len < sizeof(*addr))
1501 return -EINVAL;
1502
1503 if (bo->bound)
1504 return -EISCONN;
1505
1506 /* bind a device to this socket */
1507 if (addr->can_ifindex) {
1508 struct net_device *dev;
1509
1510 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1511 if (!dev)
1512 return -ENODEV;
1513
1514 if (dev->type != ARPHRD_CAN) {
1515 dev_put(dev);
1516 return -ENODEV;
1517 }
1518
1519 bo->ifindex = dev->ifindex;
1520 dev_put(dev);
1521
1522 } else {
1523 /* no interface reference for ifindex = 0 ('any' CAN device) */
1524 bo->ifindex = 0;
1525 }
1526
1527 bo->bound = 1;
1528
1529 if (proc_dir) {
1530 /* unique socket address as filename */
1531 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1532 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1533 proc_dir,
1534 &bcm_proc_fops, sk);
1535 }
1536
1537 return 0;
1538 }
1539
1540 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1541 struct msghdr *msg, size_t size, int flags)
1542 {
1543 struct sock *sk = sock->sk;
1544 struct sk_buff *skb;
1545 int error = 0;
1546 int noblock;
1547 int err;
1548
1549 noblock = flags & MSG_DONTWAIT;
1550 flags &= ~MSG_DONTWAIT;
1551 skb = skb_recv_datagram(sk, flags, noblock, &error);
1552 if (!skb)
1553 return error;
1554
1555 if (skb->len < size)
1556 size = skb->len;
1557
1558 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1559 if (err < 0) {
1560 skb_free_datagram(sk, skb);
1561 return err;
1562 }
1563
1564 sock_recv_ts_and_drops(msg, sk, skb);
1565
1566 if (msg->msg_name) {
1567 __sockaddr_check_size(sizeof(struct sockaddr_can));
1568 msg->msg_namelen = sizeof(struct sockaddr_can);
1569 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1570 }
1571
1572 skb_free_datagram(sk, skb);
1573
1574 return size;
1575 }
1576
1577 static const struct proto_ops bcm_ops = {
1578 .family = PF_CAN,
1579 .release = bcm_release,
1580 .bind = sock_no_bind,
1581 .connect = bcm_connect,
1582 .socketpair = sock_no_socketpair,
1583 .accept = sock_no_accept,
1584 .getname = sock_no_getname,
1585 .poll = datagram_poll,
1586 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
1587 .listen = sock_no_listen,
1588 .shutdown = sock_no_shutdown,
1589 .setsockopt = sock_no_setsockopt,
1590 .getsockopt = sock_no_getsockopt,
1591 .sendmsg = bcm_sendmsg,
1592 .recvmsg = bcm_recvmsg,
1593 .mmap = sock_no_mmap,
1594 .sendpage = sock_no_sendpage,
1595 };
1596
1597 static struct proto bcm_proto __read_mostly = {
1598 .name = "CAN_BCM",
1599 .owner = THIS_MODULE,
1600 .obj_size = sizeof(struct bcm_sock),
1601 .init = bcm_init,
1602 };
1603
1604 static const struct can_proto bcm_can_proto = {
1605 .type = SOCK_DGRAM,
1606 .protocol = CAN_BCM,
1607 .ops = &bcm_ops,
1608 .prot = &bcm_proto,
1609 };
1610
1611 static int __init bcm_module_init(void)
1612 {
1613 int err;
1614
1615 printk(banner);
1616
1617 err = can_proto_register(&bcm_can_proto);
1618 if (err < 0) {
1619 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1620 return err;
1621 }
1622
1623 /* create /proc/net/can-bcm directory */
1624 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1625 return 0;
1626 }
1627
1628 static void __exit bcm_module_exit(void)
1629 {
1630 can_proto_unregister(&bcm_can_proto);
1631
1632 if (proc_dir)
1633 remove_proc_entry("can-bcm", init_net.proc_net);
1634 }
1635
1636 module_init(bcm_module_init);
1637 module_exit(bcm_module_exit);
This page took 0.064536 seconds and 4 git commands to generate.