]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/can/bcm.c
17fb7967f8ca7881387b0ff62d11857f689d3179
[mirror_ubuntu-bionic-kernel.git] / net / can / bcm.c
1 /*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/interrupt.h>
45 #include <linux/hrtimer.h>
46 #include <linux/list.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uio.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/socket.h>
53 #include <linux/if_arp.h>
54 #include <linux/skbuff.h>
55 #include <linux/can.h>
56 #include <linux/can/core.h>
57 #include <linux/can/skb.h>
58 #include <linux/can/bcm.h>
59 #include <linux/slab.h>
60 #include <net/sock.h>
61 #include <net/net_namespace.h>
62
63 /*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68 #define MAX_NFRAMES 256
69
70 /* use of last_frames[index].can_dlc */
71 #define RX_RECV 0x40 /* received data for this element */
72 #define RX_THR 0x80 /* element not been sent due to throttle feature */
73 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
74
75 /* get best masking value for can_rx_register() for a given single can_id */
76 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
79
80 #define CAN_BCM_VERSION CAN_VERSION
81
82 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
83 MODULE_LICENSE("Dual BSD/GPL");
84 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
85 MODULE_ALIAS("can-proto-2");
86
87 /* easy access to can_frame payload */
88 static inline u64 GET_U64(const struct can_frame *cp)
89 {
90 return *(u64 *)cp->data;
91 }
92
93 struct bcm_op {
94 struct list_head list;
95 int ifindex;
96 canid_t can_id;
97 u32 flags;
98 unsigned long frames_abs, frames_filtered;
99 struct bcm_timeval ival1, ival2;
100 struct hrtimer timer, thrtimer;
101 struct tasklet_struct tsklet, thrtsklet;
102 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
103 int rx_ifindex;
104 u32 count;
105 u32 nframes;
106 u32 currframe;
107 struct can_frame *frames;
108 struct can_frame *last_frames;
109 struct can_frame sframe;
110 struct can_frame last_sframe;
111 struct sock *sk;
112 struct net_device *rx_reg_dev;
113 };
114
115 static struct proc_dir_entry *proc_dir;
116
117 struct bcm_sock {
118 struct sock sk;
119 int bound;
120 int ifindex;
121 struct notifier_block notifier;
122 struct list_head rx_ops;
123 struct list_head tx_ops;
124 unsigned long dropped_usr_msgs;
125 struct proc_dir_entry *bcm_proc_read;
126 char procname [32]; /* inode number in decimal with \0 */
127 };
128
129 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
130 {
131 return (struct bcm_sock *)sk;
132 }
133
134 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
135 {
136 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
137 }
138
139 #define CFSIZ sizeof(struct can_frame)
140 #define OPSIZ sizeof(struct bcm_op)
141 #define MHSIZ sizeof(struct bcm_msg_head)
142
143 /*
144 * procfs functions
145 */
146 static char *bcm_proc_getifname(char *result, int ifindex)
147 {
148 struct net_device *dev;
149
150 if (!ifindex)
151 return "any";
152
153 rcu_read_lock();
154 dev = dev_get_by_index_rcu(&init_net, ifindex);
155 if (dev)
156 strcpy(result, dev->name);
157 else
158 strcpy(result, "???");
159 rcu_read_unlock();
160
161 return result;
162 }
163
164 static int bcm_proc_show(struct seq_file *m, void *v)
165 {
166 char ifname[IFNAMSIZ];
167 struct sock *sk = (struct sock *)m->private;
168 struct bcm_sock *bo = bcm_sk(sk);
169 struct bcm_op *op;
170
171 seq_printf(m, ">>> socket %pK", sk->sk_socket);
172 seq_printf(m, " / sk %pK", sk);
173 seq_printf(m, " / bo %pK", bo);
174 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
175 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
176 seq_printf(m, " <<<\n");
177
178 list_for_each_entry(op, &bo->rx_ops, list) {
179
180 unsigned long reduction;
181
182 /* print only active entries & prevent division by zero */
183 if (!op->frames_abs)
184 continue;
185
186 seq_printf(m, "rx_op: %03X %-5s ",
187 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
188 seq_printf(m, "[%u]%c ", op->nframes,
189 (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
190 if (op->kt_ival1.tv64)
191 seq_printf(m, "timeo=%lld ",
192 (long long)ktime_to_us(op->kt_ival1));
193
194 if (op->kt_ival2.tv64)
195 seq_printf(m, "thr=%lld ",
196 (long long)ktime_to_us(op->kt_ival2));
197
198 seq_printf(m, "# recv %ld (%ld) => reduction: ",
199 op->frames_filtered, op->frames_abs);
200
201 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
202
203 seq_printf(m, "%s%ld%%\n",
204 (reduction == 100) ? "near " : "", reduction);
205 }
206
207 list_for_each_entry(op, &bo->tx_ops, list) {
208
209 seq_printf(m, "tx_op: %03X %s [%u] ",
210 op->can_id,
211 bcm_proc_getifname(ifname, op->ifindex),
212 op->nframes);
213
214 if (op->kt_ival1.tv64)
215 seq_printf(m, "t1=%lld ",
216 (long long)ktime_to_us(op->kt_ival1));
217
218 if (op->kt_ival2.tv64)
219 seq_printf(m, "t2=%lld ",
220 (long long)ktime_to_us(op->kt_ival2));
221
222 seq_printf(m, "# sent %ld\n", op->frames_abs);
223 }
224 seq_putc(m, '\n');
225 return 0;
226 }
227
228 static int bcm_proc_open(struct inode *inode, struct file *file)
229 {
230 return single_open(file, bcm_proc_show, PDE_DATA(inode));
231 }
232
233 static const struct file_operations bcm_proc_fops = {
234 .owner = THIS_MODULE,
235 .open = bcm_proc_open,
236 .read = seq_read,
237 .llseek = seq_lseek,
238 .release = single_release,
239 };
240
241 /*
242 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
243 * of the given bcm tx op
244 */
245 static void bcm_can_tx(struct bcm_op *op)
246 {
247 struct sk_buff *skb;
248 struct net_device *dev;
249 struct can_frame *cf = &op->frames[op->currframe];
250
251 /* no target device? => exit */
252 if (!op->ifindex)
253 return;
254
255 dev = dev_get_by_index(&init_net, op->ifindex);
256 if (!dev) {
257 /* RFC: should this bcm_op remove itself here? */
258 return;
259 }
260
261 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), gfp_any());
262 if (!skb)
263 goto out;
264
265 can_skb_reserve(skb);
266 can_skb_prv(skb)->ifindex = dev->ifindex;
267 can_skb_prv(skb)->skbcnt = 0;
268
269 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
270
271 /* send with loopback */
272 skb->dev = dev;
273 can_skb_set_owner(skb, op->sk);
274 can_send(skb, 1);
275
276 /* update statistics */
277 op->currframe++;
278 op->frames_abs++;
279
280 /* reached last frame? */
281 if (op->currframe >= op->nframes)
282 op->currframe = 0;
283 out:
284 dev_put(dev);
285 }
286
287 /*
288 * bcm_send_to_user - send a BCM message to the userspace
289 * (consisting of bcm_msg_head + x CAN frames)
290 */
291 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
292 struct can_frame *frames, int has_timestamp)
293 {
294 struct sk_buff *skb;
295 struct can_frame *firstframe;
296 struct sockaddr_can *addr;
297 struct sock *sk = op->sk;
298 unsigned int datalen = head->nframes * CFSIZ;
299 int err;
300
301 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
302 if (!skb)
303 return;
304
305 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
306
307 if (head->nframes) {
308 /* can_frames starting here */
309 firstframe = (struct can_frame *)skb_tail_pointer(skb);
310
311 memcpy(skb_put(skb, datalen), frames, datalen);
312
313 /*
314 * the BCM uses the can_dlc-element of the can_frame
315 * structure for internal purposes. This is only
316 * relevant for updates that are generated by the
317 * BCM, where nframes is 1
318 */
319 if (head->nframes == 1)
320 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
321 }
322
323 if (has_timestamp) {
324 /* restore rx timestamp */
325 skb->tstamp = op->rx_stamp;
326 }
327
328 /*
329 * Put the datagram to the queue so that bcm_recvmsg() can
330 * get it from there. We need to pass the interface index to
331 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
332 * containing the interface index.
333 */
334
335 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
336 addr = (struct sockaddr_can *)skb->cb;
337 memset(addr, 0, sizeof(*addr));
338 addr->can_family = AF_CAN;
339 addr->can_ifindex = op->rx_ifindex;
340
341 err = sock_queue_rcv_skb(sk, skb);
342 if (err < 0) {
343 struct bcm_sock *bo = bcm_sk(sk);
344
345 kfree_skb(skb);
346 /* don't care about overflows in this statistic */
347 bo->dropped_usr_msgs++;
348 }
349 }
350
351 static void bcm_tx_start_timer(struct bcm_op *op)
352 {
353 if (op->kt_ival1.tv64 && op->count)
354 hrtimer_start(&op->timer,
355 ktime_add(ktime_get(), op->kt_ival1),
356 HRTIMER_MODE_ABS);
357 else if (op->kt_ival2.tv64)
358 hrtimer_start(&op->timer,
359 ktime_add(ktime_get(), op->kt_ival2),
360 HRTIMER_MODE_ABS);
361 }
362
363 static void bcm_tx_timeout_tsklet(unsigned long data)
364 {
365 struct bcm_op *op = (struct bcm_op *)data;
366 struct bcm_msg_head msg_head;
367
368 if (op->kt_ival1.tv64 && (op->count > 0)) {
369
370 op->count--;
371 if (!op->count && (op->flags & TX_COUNTEVT)) {
372
373 /* create notification to user */
374 msg_head.opcode = TX_EXPIRED;
375 msg_head.flags = op->flags;
376 msg_head.count = op->count;
377 msg_head.ival1 = op->ival1;
378 msg_head.ival2 = op->ival2;
379 msg_head.can_id = op->can_id;
380 msg_head.nframes = 0;
381
382 bcm_send_to_user(op, &msg_head, NULL, 0);
383 }
384 bcm_can_tx(op);
385
386 } else if (op->kt_ival2.tv64)
387 bcm_can_tx(op);
388
389 bcm_tx_start_timer(op);
390 }
391
392 /*
393 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
394 */
395 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
396 {
397 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
398
399 tasklet_schedule(&op->tsklet);
400
401 return HRTIMER_NORESTART;
402 }
403
404 /*
405 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
406 */
407 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
408 {
409 struct bcm_msg_head head;
410
411 /* update statistics */
412 op->frames_filtered++;
413
414 /* prevent statistics overflow */
415 if (op->frames_filtered > ULONG_MAX/100)
416 op->frames_filtered = op->frames_abs = 0;
417
418 /* this element is not throttled anymore */
419 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
420
421 head.opcode = RX_CHANGED;
422 head.flags = op->flags;
423 head.count = op->count;
424 head.ival1 = op->ival1;
425 head.ival2 = op->ival2;
426 head.can_id = op->can_id;
427 head.nframes = 1;
428
429 bcm_send_to_user(op, &head, data, 1);
430 }
431
432 /*
433 * bcm_rx_update_and_send - process a detected relevant receive content change
434 * 1. update the last received data
435 * 2. send a notification to the user (if possible)
436 */
437 static void bcm_rx_update_and_send(struct bcm_op *op,
438 struct can_frame *lastdata,
439 const struct can_frame *rxdata)
440 {
441 memcpy(lastdata, rxdata, CFSIZ);
442
443 /* mark as used and throttled by default */
444 lastdata->can_dlc |= (RX_RECV|RX_THR);
445
446 /* throttling mode inactive ? */
447 if (!op->kt_ival2.tv64) {
448 /* send RX_CHANGED to the user immediately */
449 bcm_rx_changed(op, lastdata);
450 return;
451 }
452
453 /* with active throttling timer we are just done here */
454 if (hrtimer_active(&op->thrtimer))
455 return;
456
457 /* first reception with enabled throttling mode */
458 if (!op->kt_lastmsg.tv64)
459 goto rx_changed_settime;
460
461 /* got a second frame inside a potential throttle period? */
462 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
463 ktime_to_us(op->kt_ival2)) {
464 /* do not send the saved data - only start throttle timer */
465 hrtimer_start(&op->thrtimer,
466 ktime_add(op->kt_lastmsg, op->kt_ival2),
467 HRTIMER_MODE_ABS);
468 return;
469 }
470
471 /* the gap was that big, that throttling was not needed here */
472 rx_changed_settime:
473 bcm_rx_changed(op, lastdata);
474 op->kt_lastmsg = ktime_get();
475 }
476
477 /*
478 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
479 * received data stored in op->last_frames[]
480 */
481 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
482 const struct can_frame *rxdata)
483 {
484 /*
485 * no one uses the MSBs of can_dlc for comparison,
486 * so we use it here to detect the first time of reception
487 */
488
489 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
490 /* received data for the first time => send update to user */
491 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
492 return;
493 }
494
495 /* do a real check in can_frame data section */
496
497 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
498 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
499 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
500 return;
501 }
502
503 if (op->flags & RX_CHECK_DLC) {
504 /* do a real check in can_frame dlc */
505 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
506 BCM_CAN_DLC_MASK)) {
507 bcm_rx_update_and_send(op, &op->last_frames[index],
508 rxdata);
509 return;
510 }
511 }
512 }
513
514 /*
515 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
516 */
517 static void bcm_rx_starttimer(struct bcm_op *op)
518 {
519 if (op->flags & RX_NO_AUTOTIMER)
520 return;
521
522 if (op->kt_ival1.tv64)
523 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
524 }
525
526 static void bcm_rx_timeout_tsklet(unsigned long data)
527 {
528 struct bcm_op *op = (struct bcm_op *)data;
529 struct bcm_msg_head msg_head;
530
531 /* create notification to user */
532 msg_head.opcode = RX_TIMEOUT;
533 msg_head.flags = op->flags;
534 msg_head.count = op->count;
535 msg_head.ival1 = op->ival1;
536 msg_head.ival2 = op->ival2;
537 msg_head.can_id = op->can_id;
538 msg_head.nframes = 0;
539
540 bcm_send_to_user(op, &msg_head, NULL, 0);
541 }
542
543 /*
544 * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out
545 */
546 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
547 {
548 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
549
550 /* schedule before NET_RX_SOFTIRQ */
551 tasklet_hi_schedule(&op->tsklet);
552
553 /* no restart of the timer is done here! */
554
555 /* if user wants to be informed, when cyclic CAN-Messages come back */
556 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
557 /* clear received can_frames to indicate 'nothing received' */
558 memset(op->last_frames, 0, op->nframes * CFSIZ);
559 }
560
561 return HRTIMER_NORESTART;
562 }
563
564 /*
565 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
566 */
567 static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
568 unsigned int index)
569 {
570 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
571 if (update)
572 bcm_rx_changed(op, &op->last_frames[index]);
573 return 1;
574 }
575 return 0;
576 }
577
578 /*
579 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
580 *
581 * update == 0 : just check if throttled data is available (any irq context)
582 * update == 1 : check and send throttled data to userspace (soft_irq context)
583 */
584 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
585 {
586 int updated = 0;
587
588 if (op->nframes > 1) {
589 unsigned int i;
590
591 /* for MUX filter we start at index 1 */
592 for (i = 1; i < op->nframes; i++)
593 updated += bcm_rx_do_flush(op, update, i);
594
595 } else {
596 /* for RX_FILTER_ID and simple filter */
597 updated += bcm_rx_do_flush(op, update, 0);
598 }
599
600 return updated;
601 }
602
603 static void bcm_rx_thr_tsklet(unsigned long data)
604 {
605 struct bcm_op *op = (struct bcm_op *)data;
606
607 /* push the changed data to the userspace */
608 bcm_rx_thr_flush(op, 1);
609 }
610
611 /*
612 * bcm_rx_thr_handler - the time for blocked content updates is over now:
613 * Check for throttled data and send it to the userspace
614 */
615 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
616 {
617 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
618
619 tasklet_schedule(&op->thrtsklet);
620
621 if (bcm_rx_thr_flush(op, 0)) {
622 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
623 return HRTIMER_RESTART;
624 } else {
625 /* rearm throttle handling */
626 op->kt_lastmsg = ktime_set(0, 0);
627 return HRTIMER_NORESTART;
628 }
629 }
630
631 /*
632 * bcm_rx_handler - handle a CAN frame reception
633 */
634 static void bcm_rx_handler(struct sk_buff *skb, void *data)
635 {
636 struct bcm_op *op = (struct bcm_op *)data;
637 const struct can_frame *rxframe = (struct can_frame *)skb->data;
638 unsigned int i;
639
640 /* disable timeout */
641 hrtimer_cancel(&op->timer);
642
643 if (op->can_id != rxframe->can_id)
644 return;
645
646 /* save rx timestamp */
647 op->rx_stamp = skb->tstamp;
648 /* save originator for recvfrom() */
649 op->rx_ifindex = skb->dev->ifindex;
650 /* update statistics */
651 op->frames_abs++;
652
653 if (op->flags & RX_RTR_FRAME) {
654 /* send reply for RTR-request (placed in op->frames[0]) */
655 bcm_can_tx(op);
656 return;
657 }
658
659 if (op->flags & RX_FILTER_ID) {
660 /* the easiest case */
661 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
662 goto rx_starttimer;
663 }
664
665 if (op->nframes == 1) {
666 /* simple compare with index 0 */
667 bcm_rx_cmp_to_index(op, 0, rxframe);
668 goto rx_starttimer;
669 }
670
671 if (op->nframes > 1) {
672 /*
673 * multiplex compare
674 *
675 * find the first multiplex mask that fits.
676 * Remark: The MUX-mask is stored in index 0
677 */
678
679 for (i = 1; i < op->nframes; i++) {
680 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
681 (GET_U64(&op->frames[0]) &
682 GET_U64(&op->frames[i]))) {
683 bcm_rx_cmp_to_index(op, i, rxframe);
684 break;
685 }
686 }
687 }
688
689 rx_starttimer:
690 bcm_rx_starttimer(op);
691 }
692
693 /*
694 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
695 */
696 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
697 int ifindex)
698 {
699 struct bcm_op *op;
700
701 list_for_each_entry(op, ops, list) {
702 if ((op->can_id == can_id) && (op->ifindex == ifindex))
703 return op;
704 }
705
706 return NULL;
707 }
708
709 static void bcm_remove_op(struct bcm_op *op)
710 {
711 hrtimer_cancel(&op->timer);
712 hrtimer_cancel(&op->thrtimer);
713
714 if (op->tsklet.func)
715 tasklet_kill(&op->tsklet);
716
717 if (op->thrtsklet.func)
718 tasklet_kill(&op->thrtsklet);
719
720 if ((op->frames) && (op->frames != &op->sframe))
721 kfree(op->frames);
722
723 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
724 kfree(op->last_frames);
725
726 kfree(op);
727 }
728
729 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
730 {
731 if (op->rx_reg_dev == dev) {
732 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
733 bcm_rx_handler, op);
734
735 /* mark as removed subscription */
736 op->rx_reg_dev = NULL;
737 } else
738 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
739 "mismatch %p %p\n", op->rx_reg_dev, dev);
740 }
741
742 /*
743 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
744 */
745 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
746 {
747 struct bcm_op *op, *n;
748
749 list_for_each_entry_safe(op, n, ops, list) {
750 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
751
752 /*
753 * Don't care if we're bound or not (due to netdev
754 * problems) can_rx_unregister() is always a save
755 * thing to do here.
756 */
757 if (op->ifindex) {
758 /*
759 * Only remove subscriptions that had not
760 * been removed due to NETDEV_UNREGISTER
761 * in bcm_notifier()
762 */
763 if (op->rx_reg_dev) {
764 struct net_device *dev;
765
766 dev = dev_get_by_index(&init_net,
767 op->ifindex);
768 if (dev) {
769 bcm_rx_unreg(dev, op);
770 dev_put(dev);
771 }
772 }
773 } else
774 can_rx_unregister(NULL, op->can_id,
775 REGMASK(op->can_id),
776 bcm_rx_handler, op);
777
778 list_del(&op->list);
779 bcm_remove_op(op);
780 return 1; /* done */
781 }
782 }
783
784 return 0; /* not found */
785 }
786
787 /*
788 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
789 */
790 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
791 {
792 struct bcm_op *op, *n;
793
794 list_for_each_entry_safe(op, n, ops, list) {
795 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
796 list_del(&op->list);
797 bcm_remove_op(op);
798 return 1; /* done */
799 }
800 }
801
802 return 0; /* not found */
803 }
804
805 /*
806 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
807 */
808 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
809 int ifindex)
810 {
811 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
812
813 if (!op)
814 return -EINVAL;
815
816 /* put current values into msg_head */
817 msg_head->flags = op->flags;
818 msg_head->count = op->count;
819 msg_head->ival1 = op->ival1;
820 msg_head->ival2 = op->ival2;
821 msg_head->nframes = op->nframes;
822
823 bcm_send_to_user(op, msg_head, op->frames, 0);
824
825 return MHSIZ;
826 }
827
828 /*
829 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
830 */
831 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
832 int ifindex, struct sock *sk)
833 {
834 struct bcm_sock *bo = bcm_sk(sk);
835 struct bcm_op *op;
836 unsigned int i;
837 int err;
838
839 /* we need a real device to send frames */
840 if (!ifindex)
841 return -ENODEV;
842
843 /* check nframes boundaries - we need at least one can_frame */
844 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
845 return -EINVAL;
846
847 /* check the given can_id */
848 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
849
850 if (op) {
851 /* update existing BCM operation */
852
853 /*
854 * Do we need more space for the can_frames than currently
855 * allocated? -> This is a _really_ unusual use-case and
856 * therefore (complexity / locking) it is not supported.
857 */
858 if (msg_head->nframes > op->nframes)
859 return -E2BIG;
860
861 /* update can_frames content */
862 for (i = 0; i < msg_head->nframes; i++) {
863 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
864
865 if (op->frames[i].can_dlc > 8)
866 err = -EINVAL;
867
868 if (err < 0)
869 return err;
870
871 if (msg_head->flags & TX_CP_CAN_ID) {
872 /* copy can_id into frame */
873 op->frames[i].can_id = msg_head->can_id;
874 }
875 }
876
877 } else {
878 /* insert new BCM operation for the given can_id */
879
880 op = kzalloc(OPSIZ, GFP_KERNEL);
881 if (!op)
882 return -ENOMEM;
883
884 op->can_id = msg_head->can_id;
885
886 /* create array for can_frames and copy the data */
887 if (msg_head->nframes > 1) {
888 op->frames = kmalloc(msg_head->nframes * CFSIZ,
889 GFP_KERNEL);
890 if (!op->frames) {
891 kfree(op);
892 return -ENOMEM;
893 }
894 } else
895 op->frames = &op->sframe;
896
897 for (i = 0; i < msg_head->nframes; i++) {
898 err = memcpy_from_msg((u8 *)&op->frames[i], msg, CFSIZ);
899
900 if (op->frames[i].can_dlc > 8)
901 err = -EINVAL;
902
903 if (err < 0) {
904 if (op->frames != &op->sframe)
905 kfree(op->frames);
906 kfree(op);
907 return err;
908 }
909
910 if (msg_head->flags & TX_CP_CAN_ID) {
911 /* copy can_id into frame */
912 op->frames[i].can_id = msg_head->can_id;
913 }
914 }
915
916 /* tx_ops never compare with previous received messages */
917 op->last_frames = NULL;
918
919 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
920 op->sk = sk;
921 op->ifindex = ifindex;
922
923 /* initialize uninitialized (kzalloc) structure */
924 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
925 op->timer.function = bcm_tx_timeout_handler;
926
927 /* initialize tasklet for tx countevent notification */
928 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
929 (unsigned long) op);
930
931 /* currently unused in tx_ops */
932 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
933
934 /* add this bcm_op to the list of the tx_ops */
935 list_add(&op->list, &bo->tx_ops);
936
937 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
938
939 if (op->nframes != msg_head->nframes) {
940 op->nframes = msg_head->nframes;
941 /* start multiple frame transmission with index 0 */
942 op->currframe = 0;
943 }
944
945 /* check flags */
946
947 op->flags = msg_head->flags;
948
949 if (op->flags & TX_RESET_MULTI_IDX) {
950 /* start multiple frame transmission with index 0 */
951 op->currframe = 0;
952 }
953
954 if (op->flags & SETTIMER) {
955 /* set timer values */
956 op->count = msg_head->count;
957 op->ival1 = msg_head->ival1;
958 op->ival2 = msg_head->ival2;
959 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
960 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
961
962 /* disable an active timer due to zero values? */
963 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
964 hrtimer_cancel(&op->timer);
965 }
966
967 if (op->flags & STARTTIMER) {
968 hrtimer_cancel(&op->timer);
969 /* spec: send can_frame when starting timer */
970 op->flags |= TX_ANNOUNCE;
971 }
972
973 if (op->flags & TX_ANNOUNCE) {
974 bcm_can_tx(op);
975 if (op->count)
976 op->count--;
977 }
978
979 if (op->flags & STARTTIMER)
980 bcm_tx_start_timer(op);
981
982 return msg_head->nframes * CFSIZ + MHSIZ;
983 }
984
985 /*
986 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
987 */
988 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
989 int ifindex, struct sock *sk)
990 {
991 struct bcm_sock *bo = bcm_sk(sk);
992 struct bcm_op *op;
993 int do_rx_register;
994 int err = 0;
995
996 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
997 /* be robust against wrong usage ... */
998 msg_head->flags |= RX_FILTER_ID;
999 /* ignore trailing garbage */
1000 msg_head->nframes = 0;
1001 }
1002
1003 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1004 if (msg_head->nframes > MAX_NFRAMES + 1)
1005 return -EINVAL;
1006
1007 if ((msg_head->flags & RX_RTR_FRAME) &&
1008 ((msg_head->nframes != 1) ||
1009 (!(msg_head->can_id & CAN_RTR_FLAG))))
1010 return -EINVAL;
1011
1012 /* check the given can_id */
1013 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1014 if (op) {
1015 /* update existing BCM operation */
1016
1017 /*
1018 * Do we need more space for the can_frames than currently
1019 * allocated? -> This is a _really_ unusual use-case and
1020 * therefore (complexity / locking) it is not supported.
1021 */
1022 if (msg_head->nframes > op->nframes)
1023 return -E2BIG;
1024
1025 if (msg_head->nframes) {
1026 /* update can_frames content */
1027 err = memcpy_from_msg((u8 *)op->frames, msg,
1028 msg_head->nframes * CFSIZ);
1029 if (err < 0)
1030 return err;
1031
1032 /* clear last_frames to indicate 'nothing received' */
1033 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1034 }
1035
1036 op->nframes = msg_head->nframes;
1037
1038 /* Only an update -> do not call can_rx_register() */
1039 do_rx_register = 0;
1040
1041 } else {
1042 /* insert new BCM operation for the given can_id */
1043 op = kzalloc(OPSIZ, GFP_KERNEL);
1044 if (!op)
1045 return -ENOMEM;
1046
1047 op->can_id = msg_head->can_id;
1048 op->nframes = msg_head->nframes;
1049
1050 if (msg_head->nframes > 1) {
1051 /* create array for can_frames and copy the data */
1052 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1053 GFP_KERNEL);
1054 if (!op->frames) {
1055 kfree(op);
1056 return -ENOMEM;
1057 }
1058
1059 /* create and init array for received can_frames */
1060 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1061 GFP_KERNEL);
1062 if (!op->last_frames) {
1063 kfree(op->frames);
1064 kfree(op);
1065 return -ENOMEM;
1066 }
1067
1068 } else {
1069 op->frames = &op->sframe;
1070 op->last_frames = &op->last_sframe;
1071 }
1072
1073 if (msg_head->nframes) {
1074 err = memcpy_from_msg((u8 *)op->frames, msg,
1075 msg_head->nframes * CFSIZ);
1076 if (err < 0) {
1077 if (op->frames != &op->sframe)
1078 kfree(op->frames);
1079 if (op->last_frames != &op->last_sframe)
1080 kfree(op->last_frames);
1081 kfree(op);
1082 return err;
1083 }
1084 }
1085
1086 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1087 op->sk = sk;
1088 op->ifindex = ifindex;
1089
1090 /* ifindex for timeout events w/o previous frame reception */
1091 op->rx_ifindex = ifindex;
1092
1093 /* initialize uninitialized (kzalloc) structure */
1094 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1095 op->timer.function = bcm_rx_timeout_handler;
1096
1097 /* initialize tasklet for rx timeout notification */
1098 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1099 (unsigned long) op);
1100
1101 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1102 op->thrtimer.function = bcm_rx_thr_handler;
1103
1104 /* initialize tasklet for rx throttle handling */
1105 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1106 (unsigned long) op);
1107
1108 /* add this bcm_op to the list of the rx_ops */
1109 list_add(&op->list, &bo->rx_ops);
1110
1111 /* call can_rx_register() */
1112 do_rx_register = 1;
1113
1114 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1115
1116 /* check flags */
1117 op->flags = msg_head->flags;
1118
1119 if (op->flags & RX_RTR_FRAME) {
1120
1121 /* no timers in RTR-mode */
1122 hrtimer_cancel(&op->thrtimer);
1123 hrtimer_cancel(&op->timer);
1124
1125 /*
1126 * funny feature in RX(!)_SETUP only for RTR-mode:
1127 * copy can_id into frame BUT without RTR-flag to
1128 * prevent a full-load-loopback-test ... ;-]
1129 */
1130 if ((op->flags & TX_CP_CAN_ID) ||
1131 (op->frames[0].can_id == op->can_id))
1132 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1133
1134 } else {
1135 if (op->flags & SETTIMER) {
1136
1137 /* set timer value */
1138 op->ival1 = msg_head->ival1;
1139 op->ival2 = msg_head->ival2;
1140 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1141 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1142
1143 /* disable an active timer due to zero value? */
1144 if (!op->kt_ival1.tv64)
1145 hrtimer_cancel(&op->timer);
1146
1147 /*
1148 * In any case cancel the throttle timer, flush
1149 * potentially blocked msgs and reset throttle handling
1150 */
1151 op->kt_lastmsg = ktime_set(0, 0);
1152 hrtimer_cancel(&op->thrtimer);
1153 bcm_rx_thr_flush(op, 1);
1154 }
1155
1156 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1157 hrtimer_start(&op->timer, op->kt_ival1,
1158 HRTIMER_MODE_REL);
1159 }
1160
1161 /* now we can register for can_ids, if we added a new bcm_op */
1162 if (do_rx_register) {
1163 if (ifindex) {
1164 struct net_device *dev;
1165
1166 dev = dev_get_by_index(&init_net, ifindex);
1167 if (dev) {
1168 err = can_rx_register(dev, op->can_id,
1169 REGMASK(op->can_id),
1170 bcm_rx_handler, op,
1171 "bcm");
1172
1173 op->rx_reg_dev = dev;
1174 dev_put(dev);
1175 }
1176
1177 } else
1178 err = can_rx_register(NULL, op->can_id,
1179 REGMASK(op->can_id),
1180 bcm_rx_handler, op, "bcm");
1181 if (err) {
1182 /* this bcm rx op is broken -> remove it */
1183 list_del(&op->list);
1184 bcm_remove_op(op);
1185 return err;
1186 }
1187 }
1188
1189 return msg_head->nframes * CFSIZ + MHSIZ;
1190 }
1191
1192 /*
1193 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1194 */
1195 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1196 {
1197 struct sk_buff *skb;
1198 struct net_device *dev;
1199 int err;
1200
1201 /* we need a real device to send frames */
1202 if (!ifindex)
1203 return -ENODEV;
1204
1205 skb = alloc_skb(CFSIZ + sizeof(struct can_skb_priv), GFP_KERNEL);
1206 if (!skb)
1207 return -ENOMEM;
1208
1209 can_skb_reserve(skb);
1210
1211 err = memcpy_from_msg(skb_put(skb, CFSIZ), msg, CFSIZ);
1212 if (err < 0) {
1213 kfree_skb(skb);
1214 return err;
1215 }
1216
1217 dev = dev_get_by_index(&init_net, ifindex);
1218 if (!dev) {
1219 kfree_skb(skb);
1220 return -ENODEV;
1221 }
1222
1223 can_skb_prv(skb)->ifindex = dev->ifindex;
1224 can_skb_prv(skb)->skbcnt = 0;
1225 skb->dev = dev;
1226 can_skb_set_owner(skb, sk);
1227 err = can_send(skb, 1); /* send with loopback */
1228 dev_put(dev);
1229
1230 if (err)
1231 return err;
1232
1233 return CFSIZ + MHSIZ;
1234 }
1235
1236 /*
1237 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1238 */
1239 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1240 {
1241 struct sock *sk = sock->sk;
1242 struct bcm_sock *bo = bcm_sk(sk);
1243 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1244 struct bcm_msg_head msg_head;
1245 int ret; /* read bytes or error codes as return value */
1246
1247 if (!bo->bound)
1248 return -ENOTCONN;
1249
1250 /* check for valid message length from userspace */
1251 if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1252 return -EINVAL;
1253
1254 /* check for alternative ifindex for this bcm_op */
1255
1256 if (!ifindex && msg->msg_name) {
1257 /* no bound device as default => check msg_name */
1258 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1259
1260 if (msg->msg_namelen < sizeof(*addr))
1261 return -EINVAL;
1262
1263 if (addr->can_family != AF_CAN)
1264 return -EINVAL;
1265
1266 /* ifindex from sendto() */
1267 ifindex = addr->can_ifindex;
1268
1269 if (ifindex) {
1270 struct net_device *dev;
1271
1272 dev = dev_get_by_index(&init_net, ifindex);
1273 if (!dev)
1274 return -ENODEV;
1275
1276 if (dev->type != ARPHRD_CAN) {
1277 dev_put(dev);
1278 return -ENODEV;
1279 }
1280
1281 dev_put(dev);
1282 }
1283 }
1284
1285 /* read message head information */
1286
1287 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1288 if (ret < 0)
1289 return ret;
1290
1291 lock_sock(sk);
1292
1293 switch (msg_head.opcode) {
1294
1295 case TX_SETUP:
1296 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1297 break;
1298
1299 case RX_SETUP:
1300 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1301 break;
1302
1303 case TX_DELETE:
1304 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1305 ret = MHSIZ;
1306 else
1307 ret = -EINVAL;
1308 break;
1309
1310 case RX_DELETE:
1311 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1312 ret = MHSIZ;
1313 else
1314 ret = -EINVAL;
1315 break;
1316
1317 case TX_READ:
1318 /* reuse msg_head for the reply to TX_READ */
1319 msg_head.opcode = TX_STATUS;
1320 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1321 break;
1322
1323 case RX_READ:
1324 /* reuse msg_head for the reply to RX_READ */
1325 msg_head.opcode = RX_STATUS;
1326 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1327 break;
1328
1329 case TX_SEND:
1330 /* we need exactly one can_frame behind the msg head */
1331 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1332 ret = -EINVAL;
1333 else
1334 ret = bcm_tx_send(msg, ifindex, sk);
1335 break;
1336
1337 default:
1338 ret = -EINVAL;
1339 break;
1340 }
1341
1342 release_sock(sk);
1343
1344 return ret;
1345 }
1346
1347 /*
1348 * notification handler for netdevice status changes
1349 */
1350 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1351 void *ptr)
1352 {
1353 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1354 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1355 struct sock *sk = &bo->sk;
1356 struct bcm_op *op;
1357 int notify_enodev = 0;
1358
1359 if (!net_eq(dev_net(dev), &init_net))
1360 return NOTIFY_DONE;
1361
1362 if (dev->type != ARPHRD_CAN)
1363 return NOTIFY_DONE;
1364
1365 switch (msg) {
1366
1367 case NETDEV_UNREGISTER:
1368 lock_sock(sk);
1369
1370 /* remove device specific receive entries */
1371 list_for_each_entry(op, &bo->rx_ops, list)
1372 if (op->rx_reg_dev == dev)
1373 bcm_rx_unreg(dev, op);
1374
1375 /* remove device reference, if this is our bound device */
1376 if (bo->bound && bo->ifindex == dev->ifindex) {
1377 bo->bound = 0;
1378 bo->ifindex = 0;
1379 notify_enodev = 1;
1380 }
1381
1382 release_sock(sk);
1383
1384 if (notify_enodev) {
1385 sk->sk_err = ENODEV;
1386 if (!sock_flag(sk, SOCK_DEAD))
1387 sk->sk_error_report(sk);
1388 }
1389 break;
1390
1391 case NETDEV_DOWN:
1392 if (bo->bound && bo->ifindex == dev->ifindex) {
1393 sk->sk_err = ENETDOWN;
1394 if (!sock_flag(sk, SOCK_DEAD))
1395 sk->sk_error_report(sk);
1396 }
1397 }
1398
1399 return NOTIFY_DONE;
1400 }
1401
1402 /*
1403 * initial settings for all BCM sockets to be set at socket creation time
1404 */
1405 static int bcm_init(struct sock *sk)
1406 {
1407 struct bcm_sock *bo = bcm_sk(sk);
1408
1409 bo->bound = 0;
1410 bo->ifindex = 0;
1411 bo->dropped_usr_msgs = 0;
1412 bo->bcm_proc_read = NULL;
1413
1414 INIT_LIST_HEAD(&bo->tx_ops);
1415 INIT_LIST_HEAD(&bo->rx_ops);
1416
1417 /* set notifier */
1418 bo->notifier.notifier_call = bcm_notifier;
1419
1420 register_netdevice_notifier(&bo->notifier);
1421
1422 return 0;
1423 }
1424
1425 /*
1426 * standard socket functions
1427 */
1428 static int bcm_release(struct socket *sock)
1429 {
1430 struct sock *sk = sock->sk;
1431 struct bcm_sock *bo;
1432 struct bcm_op *op, *next;
1433
1434 if (sk == NULL)
1435 return 0;
1436
1437 bo = bcm_sk(sk);
1438
1439 /* remove bcm_ops, timer, rx_unregister(), etc. */
1440
1441 unregister_netdevice_notifier(&bo->notifier);
1442
1443 lock_sock(sk);
1444
1445 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1446 bcm_remove_op(op);
1447
1448 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1449 /*
1450 * Don't care if we're bound or not (due to netdev problems)
1451 * can_rx_unregister() is always a save thing to do here.
1452 */
1453 if (op->ifindex) {
1454 /*
1455 * Only remove subscriptions that had not
1456 * been removed due to NETDEV_UNREGISTER
1457 * in bcm_notifier()
1458 */
1459 if (op->rx_reg_dev) {
1460 struct net_device *dev;
1461
1462 dev = dev_get_by_index(&init_net, op->ifindex);
1463 if (dev) {
1464 bcm_rx_unreg(dev, op);
1465 dev_put(dev);
1466 }
1467 }
1468 } else
1469 can_rx_unregister(NULL, op->can_id,
1470 REGMASK(op->can_id),
1471 bcm_rx_handler, op);
1472
1473 bcm_remove_op(op);
1474 }
1475
1476 /* remove procfs entry */
1477 if (proc_dir && bo->bcm_proc_read)
1478 remove_proc_entry(bo->procname, proc_dir);
1479
1480 /* remove device reference */
1481 if (bo->bound) {
1482 bo->bound = 0;
1483 bo->ifindex = 0;
1484 }
1485
1486 sock_orphan(sk);
1487 sock->sk = NULL;
1488
1489 release_sock(sk);
1490 sock_put(sk);
1491
1492 return 0;
1493 }
1494
1495 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1496 int flags)
1497 {
1498 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1499 struct sock *sk = sock->sk;
1500 struct bcm_sock *bo = bcm_sk(sk);
1501
1502 if (len < sizeof(*addr))
1503 return -EINVAL;
1504
1505 if (bo->bound)
1506 return -EISCONN;
1507
1508 /* bind a device to this socket */
1509 if (addr->can_ifindex) {
1510 struct net_device *dev;
1511
1512 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1513 if (!dev)
1514 return -ENODEV;
1515
1516 if (dev->type != ARPHRD_CAN) {
1517 dev_put(dev);
1518 return -ENODEV;
1519 }
1520
1521 bo->ifindex = dev->ifindex;
1522 dev_put(dev);
1523
1524 } else {
1525 /* no interface reference for ifindex = 0 ('any' CAN device) */
1526 bo->ifindex = 0;
1527 }
1528
1529 bo->bound = 1;
1530
1531 if (proc_dir) {
1532 /* unique socket address as filename */
1533 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1534 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1535 proc_dir,
1536 &bcm_proc_fops, sk);
1537 }
1538
1539 return 0;
1540 }
1541
1542 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1543 int flags)
1544 {
1545 struct sock *sk = sock->sk;
1546 struct sk_buff *skb;
1547 int error = 0;
1548 int noblock;
1549 int err;
1550
1551 noblock = flags & MSG_DONTWAIT;
1552 flags &= ~MSG_DONTWAIT;
1553 skb = skb_recv_datagram(sk, flags, noblock, &error);
1554 if (!skb)
1555 return error;
1556
1557 if (skb->len < size)
1558 size = skb->len;
1559
1560 err = memcpy_to_msg(msg, skb->data, size);
1561 if (err < 0) {
1562 skb_free_datagram(sk, skb);
1563 return err;
1564 }
1565
1566 sock_recv_ts_and_drops(msg, sk, skb);
1567
1568 if (msg->msg_name) {
1569 __sockaddr_check_size(sizeof(struct sockaddr_can));
1570 msg->msg_namelen = sizeof(struct sockaddr_can);
1571 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1572 }
1573
1574 skb_free_datagram(sk, skb);
1575
1576 return size;
1577 }
1578
1579 static const struct proto_ops bcm_ops = {
1580 .family = PF_CAN,
1581 .release = bcm_release,
1582 .bind = sock_no_bind,
1583 .connect = bcm_connect,
1584 .socketpair = sock_no_socketpair,
1585 .accept = sock_no_accept,
1586 .getname = sock_no_getname,
1587 .poll = datagram_poll,
1588 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
1589 .listen = sock_no_listen,
1590 .shutdown = sock_no_shutdown,
1591 .setsockopt = sock_no_setsockopt,
1592 .getsockopt = sock_no_getsockopt,
1593 .sendmsg = bcm_sendmsg,
1594 .recvmsg = bcm_recvmsg,
1595 .mmap = sock_no_mmap,
1596 .sendpage = sock_no_sendpage,
1597 };
1598
1599 static struct proto bcm_proto __read_mostly = {
1600 .name = "CAN_BCM",
1601 .owner = THIS_MODULE,
1602 .obj_size = sizeof(struct bcm_sock),
1603 .init = bcm_init,
1604 };
1605
1606 static const struct can_proto bcm_can_proto = {
1607 .type = SOCK_DGRAM,
1608 .protocol = CAN_BCM,
1609 .ops = &bcm_ops,
1610 .prot = &bcm_proto,
1611 };
1612
1613 static int __init bcm_module_init(void)
1614 {
1615 int err;
1616
1617 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
1618
1619 err = can_proto_register(&bcm_can_proto);
1620 if (err < 0) {
1621 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1622 return err;
1623 }
1624
1625 /* create /proc/net/can-bcm directory */
1626 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1627 return 0;
1628 }
1629
1630 static void __exit bcm_module_exit(void)
1631 {
1632 can_proto_unregister(&bcm_can_proto);
1633
1634 if (proc_dir)
1635 remove_proc_entry("can-bcm", init_net.proc_net);
1636 }
1637
1638 module_init(bcm_module_init);
1639 module_exit(bcm_module_exit);