]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/can/bcm.c
Merge tag 'gvt-fixes-2020-09-17' of https://github.com/intel/gvt-linux into drm-intel...
[mirror_ubuntu-jammy-kernel.git] / net / can / bcm.c
1 // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
2 /*
3 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/interrupt.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/seq_file.h>
50 #include <linux/uio.h>
51 #include <linux/net.h>
52 #include <linux/netdevice.h>
53 #include <linux/socket.h>
54 #include <linux/if_arp.h>
55 #include <linux/skbuff.h>
56 #include <linux/can.h>
57 #include <linux/can/core.h>
58 #include <linux/can/skb.h>
59 #include <linux/can/bcm.h>
60 #include <linux/slab.h>
61 #include <net/sock.h>
62 #include <net/net_namespace.h>
63
64 /*
65 * To send multiple CAN frame content within TX_SETUP or to filter
66 * CAN messages with multiplex index within RX_SETUP, the number of
67 * different filters is limited to 256 due to the one byte index value.
68 */
69 #define MAX_NFRAMES 256
70
71 /* limit timers to 400 days for sending/timeouts */
72 #define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
73
74 /* use of last_frames[index].flags */
75 #define RX_RECV 0x40 /* received data for this element */
76 #define RX_THR 0x80 /* element not been sent due to throttle feature */
77 #define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
78
79 /* get best masking value for can_rx_register() for a given single can_id */
80 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
81 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
82 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
83
84 #define CAN_BCM_VERSION "20170425"
85
86 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
87 MODULE_LICENSE("Dual BSD/GPL");
88 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
89 MODULE_ALIAS("can-proto-2");
90
91 /*
92 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
93 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
94 * by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
95 */
96 static inline u64 get_u64(const struct canfd_frame *cp, int offset)
97 {
98 return *(u64 *)(cp->data + offset);
99 }
100
101 struct bcm_op {
102 struct list_head list;
103 int ifindex;
104 canid_t can_id;
105 u32 flags;
106 unsigned long frames_abs, frames_filtered;
107 struct bcm_timeval ival1, ival2;
108 struct hrtimer timer, thrtimer;
109 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
110 int rx_ifindex;
111 int cfsiz;
112 u32 count;
113 u32 nframes;
114 u32 currframe;
115 /* void pointers to arrays of struct can[fd]_frame */
116 void *frames;
117 void *last_frames;
118 struct canfd_frame sframe;
119 struct canfd_frame last_sframe;
120 struct sock *sk;
121 struct net_device *rx_reg_dev;
122 };
123
124 struct bcm_sock {
125 struct sock sk;
126 int bound;
127 int ifindex;
128 struct notifier_block notifier;
129 struct list_head rx_ops;
130 struct list_head tx_ops;
131 unsigned long dropped_usr_msgs;
132 struct proc_dir_entry *bcm_proc_read;
133 char procname [32]; /* inode number in decimal with \0 */
134 };
135
136 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
137 {
138 return (struct bcm_sock *)sk;
139 }
140
141 static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
142 {
143 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
144 }
145
146 /* check limitations for timeval provided by user */
147 static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
148 {
149 if ((msg_head->ival1.tv_sec < 0) ||
150 (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
151 (msg_head->ival1.tv_usec < 0) ||
152 (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
153 (msg_head->ival2.tv_sec < 0) ||
154 (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
155 (msg_head->ival2.tv_usec < 0) ||
156 (msg_head->ival2.tv_usec >= USEC_PER_SEC))
157 return true;
158
159 return false;
160 }
161
162 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
163 #define OPSIZ sizeof(struct bcm_op)
164 #define MHSIZ sizeof(struct bcm_msg_head)
165
166 /*
167 * procfs functions
168 */
169 #if IS_ENABLED(CONFIG_PROC_FS)
170 static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
171 {
172 struct net_device *dev;
173
174 if (!ifindex)
175 return "any";
176
177 rcu_read_lock();
178 dev = dev_get_by_index_rcu(net, ifindex);
179 if (dev)
180 strcpy(result, dev->name);
181 else
182 strcpy(result, "???");
183 rcu_read_unlock();
184
185 return result;
186 }
187
188 static int bcm_proc_show(struct seq_file *m, void *v)
189 {
190 char ifname[IFNAMSIZ];
191 struct net *net = m->private;
192 struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
193 struct bcm_sock *bo = bcm_sk(sk);
194 struct bcm_op *op;
195
196 seq_printf(m, ">>> socket %pK", sk->sk_socket);
197 seq_printf(m, " / sk %pK", sk);
198 seq_printf(m, " / bo %pK", bo);
199 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
200 seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
201 seq_printf(m, " <<<\n");
202
203 list_for_each_entry(op, &bo->rx_ops, list) {
204
205 unsigned long reduction;
206
207 /* print only active entries & prevent division by zero */
208 if (!op->frames_abs)
209 continue;
210
211 seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
212 bcm_proc_getifname(net, ifname, op->ifindex));
213
214 if (op->flags & CAN_FD_FRAME)
215 seq_printf(m, "(%u)", op->nframes);
216 else
217 seq_printf(m, "[%u]", op->nframes);
218
219 seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
220
221 if (op->kt_ival1)
222 seq_printf(m, "timeo=%lld ",
223 (long long)ktime_to_us(op->kt_ival1));
224
225 if (op->kt_ival2)
226 seq_printf(m, "thr=%lld ",
227 (long long)ktime_to_us(op->kt_ival2));
228
229 seq_printf(m, "# recv %ld (%ld) => reduction: ",
230 op->frames_filtered, op->frames_abs);
231
232 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
233
234 seq_printf(m, "%s%ld%%\n",
235 (reduction == 100) ? "near " : "", reduction);
236 }
237
238 list_for_each_entry(op, &bo->tx_ops, list) {
239
240 seq_printf(m, "tx_op: %03X %s ", op->can_id,
241 bcm_proc_getifname(net, ifname, op->ifindex));
242
243 if (op->flags & CAN_FD_FRAME)
244 seq_printf(m, "(%u) ", op->nframes);
245 else
246 seq_printf(m, "[%u] ", op->nframes);
247
248 if (op->kt_ival1)
249 seq_printf(m, "t1=%lld ",
250 (long long)ktime_to_us(op->kt_ival1));
251
252 if (op->kt_ival2)
253 seq_printf(m, "t2=%lld ",
254 (long long)ktime_to_us(op->kt_ival2));
255
256 seq_printf(m, "# sent %ld\n", op->frames_abs);
257 }
258 seq_putc(m, '\n');
259 return 0;
260 }
261 #endif /* CONFIG_PROC_FS */
262
263 /*
264 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
265 * of the given bcm tx op
266 */
267 static void bcm_can_tx(struct bcm_op *op)
268 {
269 struct sk_buff *skb;
270 struct net_device *dev;
271 struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
272
273 /* no target device? => exit */
274 if (!op->ifindex)
275 return;
276
277 dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
278 if (!dev) {
279 /* RFC: should this bcm_op remove itself here? */
280 return;
281 }
282
283 skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
284 if (!skb)
285 goto out;
286
287 can_skb_reserve(skb);
288 can_skb_prv(skb)->ifindex = dev->ifindex;
289 can_skb_prv(skb)->skbcnt = 0;
290
291 skb_put_data(skb, cf, op->cfsiz);
292
293 /* send with loopback */
294 skb->dev = dev;
295 can_skb_set_owner(skb, op->sk);
296 can_send(skb, 1);
297
298 /* update statistics */
299 op->currframe++;
300 op->frames_abs++;
301
302 /* reached last frame? */
303 if (op->currframe >= op->nframes)
304 op->currframe = 0;
305 out:
306 dev_put(dev);
307 }
308
309 /*
310 * bcm_send_to_user - send a BCM message to the userspace
311 * (consisting of bcm_msg_head + x CAN frames)
312 */
313 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
314 struct canfd_frame *frames, int has_timestamp)
315 {
316 struct sk_buff *skb;
317 struct canfd_frame *firstframe;
318 struct sockaddr_can *addr;
319 struct sock *sk = op->sk;
320 unsigned int datalen = head->nframes * op->cfsiz;
321 int err;
322
323 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
324 if (!skb)
325 return;
326
327 skb_put_data(skb, head, sizeof(*head));
328
329 if (head->nframes) {
330 /* CAN frames starting here */
331 firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
332
333 skb_put_data(skb, frames, datalen);
334
335 /*
336 * the BCM uses the flags-element of the canfd_frame
337 * structure for internal purposes. This is only
338 * relevant for updates that are generated by the
339 * BCM, where nframes is 1
340 */
341 if (head->nframes == 1)
342 firstframe->flags &= BCM_CAN_FLAGS_MASK;
343 }
344
345 if (has_timestamp) {
346 /* restore rx timestamp */
347 skb->tstamp = op->rx_stamp;
348 }
349
350 /*
351 * Put the datagram to the queue so that bcm_recvmsg() can
352 * get it from there. We need to pass the interface index to
353 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
354 * containing the interface index.
355 */
356
357 sock_skb_cb_check_size(sizeof(struct sockaddr_can));
358 addr = (struct sockaddr_can *)skb->cb;
359 memset(addr, 0, sizeof(*addr));
360 addr->can_family = AF_CAN;
361 addr->can_ifindex = op->rx_ifindex;
362
363 err = sock_queue_rcv_skb(sk, skb);
364 if (err < 0) {
365 struct bcm_sock *bo = bcm_sk(sk);
366
367 kfree_skb(skb);
368 /* don't care about overflows in this statistic */
369 bo->dropped_usr_msgs++;
370 }
371 }
372
373 static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
374 {
375 ktime_t ival;
376
377 if (op->kt_ival1 && op->count)
378 ival = op->kt_ival1;
379 else if (op->kt_ival2)
380 ival = op->kt_ival2;
381 else
382 return false;
383
384 hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
385 return true;
386 }
387
388 static void bcm_tx_start_timer(struct bcm_op *op)
389 {
390 if (bcm_tx_set_expiry(op, &op->timer))
391 hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
392 }
393
394 /* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
395 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
396 {
397 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
398 struct bcm_msg_head msg_head;
399
400 if (op->kt_ival1 && (op->count > 0)) {
401 op->count--;
402 if (!op->count && (op->flags & TX_COUNTEVT)) {
403
404 /* create notification to user */
405 msg_head.opcode = TX_EXPIRED;
406 msg_head.flags = op->flags;
407 msg_head.count = op->count;
408 msg_head.ival1 = op->ival1;
409 msg_head.ival2 = op->ival2;
410 msg_head.can_id = op->can_id;
411 msg_head.nframes = 0;
412
413 bcm_send_to_user(op, &msg_head, NULL, 0);
414 }
415 bcm_can_tx(op);
416
417 } else if (op->kt_ival2) {
418 bcm_can_tx(op);
419 }
420
421 return bcm_tx_set_expiry(op, &op->timer) ?
422 HRTIMER_RESTART : HRTIMER_NORESTART;
423 }
424
425 /*
426 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
427 */
428 static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
429 {
430 struct bcm_msg_head head;
431
432 /* update statistics */
433 op->frames_filtered++;
434
435 /* prevent statistics overflow */
436 if (op->frames_filtered > ULONG_MAX/100)
437 op->frames_filtered = op->frames_abs = 0;
438
439 /* this element is not throttled anymore */
440 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
441
442 head.opcode = RX_CHANGED;
443 head.flags = op->flags;
444 head.count = op->count;
445 head.ival1 = op->ival1;
446 head.ival2 = op->ival2;
447 head.can_id = op->can_id;
448 head.nframes = 1;
449
450 bcm_send_to_user(op, &head, data, 1);
451 }
452
453 /*
454 * bcm_rx_update_and_send - process a detected relevant receive content change
455 * 1. update the last received data
456 * 2. send a notification to the user (if possible)
457 */
458 static void bcm_rx_update_and_send(struct bcm_op *op,
459 struct canfd_frame *lastdata,
460 const struct canfd_frame *rxdata)
461 {
462 memcpy(lastdata, rxdata, op->cfsiz);
463
464 /* mark as used and throttled by default */
465 lastdata->flags |= (RX_RECV|RX_THR);
466
467 /* throttling mode inactive ? */
468 if (!op->kt_ival2) {
469 /* send RX_CHANGED to the user immediately */
470 bcm_rx_changed(op, lastdata);
471 return;
472 }
473
474 /* with active throttling timer we are just done here */
475 if (hrtimer_active(&op->thrtimer))
476 return;
477
478 /* first reception with enabled throttling mode */
479 if (!op->kt_lastmsg)
480 goto rx_changed_settime;
481
482 /* got a second frame inside a potential throttle period? */
483 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
484 ktime_to_us(op->kt_ival2)) {
485 /* do not send the saved data - only start throttle timer */
486 hrtimer_start(&op->thrtimer,
487 ktime_add(op->kt_lastmsg, op->kt_ival2),
488 HRTIMER_MODE_ABS_SOFT);
489 return;
490 }
491
492 /* the gap was that big, that throttling was not needed here */
493 rx_changed_settime:
494 bcm_rx_changed(op, lastdata);
495 op->kt_lastmsg = ktime_get();
496 }
497
498 /*
499 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
500 * received data stored in op->last_frames[]
501 */
502 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
503 const struct canfd_frame *rxdata)
504 {
505 struct canfd_frame *cf = op->frames + op->cfsiz * index;
506 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
507 int i;
508
509 /*
510 * no one uses the MSBs of flags for comparison,
511 * so we use it here to detect the first time of reception
512 */
513
514 if (!(lcf->flags & RX_RECV)) {
515 /* received data for the first time => send update to user */
516 bcm_rx_update_and_send(op, lcf, rxdata);
517 return;
518 }
519
520 /* do a real check in CAN frame data section */
521 for (i = 0; i < rxdata->len; i += 8) {
522 if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
523 (get_u64(cf, i) & get_u64(lcf, i))) {
524 bcm_rx_update_and_send(op, lcf, rxdata);
525 return;
526 }
527 }
528
529 if (op->flags & RX_CHECK_DLC) {
530 /* do a real check in CAN frame length */
531 if (rxdata->len != lcf->len) {
532 bcm_rx_update_and_send(op, lcf, rxdata);
533 return;
534 }
535 }
536 }
537
538 /*
539 * bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
540 */
541 static void bcm_rx_starttimer(struct bcm_op *op)
542 {
543 if (op->flags & RX_NO_AUTOTIMER)
544 return;
545
546 if (op->kt_ival1)
547 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
548 }
549
550 /* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
551 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
552 {
553 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
554 struct bcm_msg_head msg_head;
555
556 /* if user wants to be informed, when cyclic CAN-Messages come back */
557 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
558 /* clear received CAN frames to indicate 'nothing received' */
559 memset(op->last_frames, 0, op->nframes * op->cfsiz);
560 }
561
562 /* create notification to user */
563 msg_head.opcode = RX_TIMEOUT;
564 msg_head.flags = op->flags;
565 msg_head.count = op->count;
566 msg_head.ival1 = op->ival1;
567 msg_head.ival2 = op->ival2;
568 msg_head.can_id = op->can_id;
569 msg_head.nframes = 0;
570
571 bcm_send_to_user(op, &msg_head, NULL, 0);
572
573 return HRTIMER_NORESTART;
574 }
575
576 /*
577 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
578 */
579 static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
580 {
581 struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
582
583 if ((op->last_frames) && (lcf->flags & RX_THR)) {
584 bcm_rx_changed(op, lcf);
585 return 1;
586 }
587 return 0;
588 }
589
590 /*
591 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
592 */
593 static int bcm_rx_thr_flush(struct bcm_op *op)
594 {
595 int updated = 0;
596
597 if (op->nframes > 1) {
598 unsigned int i;
599
600 /* for MUX filter we start at index 1 */
601 for (i = 1; i < op->nframes; i++)
602 updated += bcm_rx_do_flush(op, i);
603
604 } else {
605 /* for RX_FILTER_ID and simple filter */
606 updated += bcm_rx_do_flush(op, 0);
607 }
608
609 return updated;
610 }
611
612 /*
613 * bcm_rx_thr_handler - the time for blocked content updates is over now:
614 * Check for throttled data and send it to the userspace
615 */
616 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
617 {
618 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
619
620 if (bcm_rx_thr_flush(op)) {
621 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
622 return HRTIMER_RESTART;
623 } else {
624 /* rearm throttle handling */
625 op->kt_lastmsg = 0;
626 return HRTIMER_NORESTART;
627 }
628 }
629
630 /*
631 * bcm_rx_handler - handle a CAN frame reception
632 */
633 static void bcm_rx_handler(struct sk_buff *skb, void *data)
634 {
635 struct bcm_op *op = (struct bcm_op *)data;
636 const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
637 unsigned int i;
638
639 if (op->can_id != rxframe->can_id)
640 return;
641
642 /* make sure to handle the correct frame type (CAN / CAN FD) */
643 if (skb->len != op->cfsiz)
644 return;
645
646 /* disable timeout */
647 hrtimer_cancel(&op->timer);
648
649 /* save rx timestamp */
650 op->rx_stamp = skb->tstamp;
651 /* save originator for recvfrom() */
652 op->rx_ifindex = skb->dev->ifindex;
653 /* update statistics */
654 op->frames_abs++;
655
656 if (op->flags & RX_RTR_FRAME) {
657 /* send reply for RTR-request (placed in op->frames[0]) */
658 bcm_can_tx(op);
659 return;
660 }
661
662 if (op->flags & RX_FILTER_ID) {
663 /* the easiest case */
664 bcm_rx_update_and_send(op, op->last_frames, rxframe);
665 goto rx_starttimer;
666 }
667
668 if (op->nframes == 1) {
669 /* simple compare with index 0 */
670 bcm_rx_cmp_to_index(op, 0, rxframe);
671 goto rx_starttimer;
672 }
673
674 if (op->nframes > 1) {
675 /*
676 * multiplex compare
677 *
678 * find the first multiplex mask that fits.
679 * Remark: The MUX-mask is stored in index 0 - but only the
680 * first 64 bits of the frame data[] are relevant (CAN FD)
681 */
682
683 for (i = 1; i < op->nframes; i++) {
684 if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
685 (get_u64(op->frames, 0) &
686 get_u64(op->frames + op->cfsiz * i, 0))) {
687 bcm_rx_cmp_to_index(op, i, rxframe);
688 break;
689 }
690 }
691 }
692
693 rx_starttimer:
694 bcm_rx_starttimer(op);
695 }
696
697 /*
698 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
699 */
700 static struct bcm_op *bcm_find_op(struct list_head *ops,
701 struct bcm_msg_head *mh, int ifindex)
702 {
703 struct bcm_op *op;
704
705 list_for_each_entry(op, ops, list) {
706 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
707 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
708 return op;
709 }
710
711 return NULL;
712 }
713
714 static void bcm_remove_op(struct bcm_op *op)
715 {
716 hrtimer_cancel(&op->timer);
717 hrtimer_cancel(&op->thrtimer);
718
719 if ((op->frames) && (op->frames != &op->sframe))
720 kfree(op->frames);
721
722 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
723 kfree(op->last_frames);
724
725 kfree(op);
726 }
727
728 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
729 {
730 if (op->rx_reg_dev == dev) {
731 can_rx_unregister(dev_net(dev), dev, op->can_id,
732 REGMASK(op->can_id), bcm_rx_handler, op);
733
734 /* mark as removed subscription */
735 op->rx_reg_dev = NULL;
736 } else
737 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
738 "mismatch %p %p\n", op->rx_reg_dev, dev);
739 }
740
741 /*
742 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
743 */
744 static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
745 int ifindex)
746 {
747 struct bcm_op *op, *n;
748
749 list_for_each_entry_safe(op, n, ops, list) {
750 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
751 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
752
753 /*
754 * Don't care if we're bound or not (due to netdev
755 * problems) can_rx_unregister() is always a save
756 * thing to do here.
757 */
758 if (op->ifindex) {
759 /*
760 * Only remove subscriptions that had not
761 * been removed due to NETDEV_UNREGISTER
762 * in bcm_notifier()
763 */
764 if (op->rx_reg_dev) {
765 struct net_device *dev;
766
767 dev = dev_get_by_index(sock_net(op->sk),
768 op->ifindex);
769 if (dev) {
770 bcm_rx_unreg(dev, op);
771 dev_put(dev);
772 }
773 }
774 } else
775 can_rx_unregister(sock_net(op->sk), NULL,
776 op->can_id,
777 REGMASK(op->can_id),
778 bcm_rx_handler, op);
779
780 list_del(&op->list);
781 bcm_remove_op(op);
782 return 1; /* done */
783 }
784 }
785
786 return 0; /* not found */
787 }
788
789 /*
790 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
791 */
792 static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
793 int ifindex)
794 {
795 struct bcm_op *op, *n;
796
797 list_for_each_entry_safe(op, n, ops, list) {
798 if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
799 (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
800 list_del(&op->list);
801 bcm_remove_op(op);
802 return 1; /* done */
803 }
804 }
805
806 return 0; /* not found */
807 }
808
809 /*
810 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
811 */
812 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
813 int ifindex)
814 {
815 struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
816
817 if (!op)
818 return -EINVAL;
819
820 /* put current values into msg_head */
821 msg_head->flags = op->flags;
822 msg_head->count = op->count;
823 msg_head->ival1 = op->ival1;
824 msg_head->ival2 = op->ival2;
825 msg_head->nframes = op->nframes;
826
827 bcm_send_to_user(op, msg_head, op->frames, 0);
828
829 return MHSIZ;
830 }
831
832 /*
833 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
834 */
835 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
836 int ifindex, struct sock *sk)
837 {
838 struct bcm_sock *bo = bcm_sk(sk);
839 struct bcm_op *op;
840 struct canfd_frame *cf;
841 unsigned int i;
842 int err;
843
844 /* we need a real device to send frames */
845 if (!ifindex)
846 return -ENODEV;
847
848 /* check nframes boundaries - we need at least one CAN frame */
849 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
850 return -EINVAL;
851
852 /* check timeval limitations */
853 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
854 return -EINVAL;
855
856 /* check the given can_id */
857 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
858 if (op) {
859 /* update existing BCM operation */
860
861 /*
862 * Do we need more space for the CAN frames than currently
863 * allocated? -> This is a _really_ unusual use-case and
864 * therefore (complexity / locking) it is not supported.
865 */
866 if (msg_head->nframes > op->nframes)
867 return -E2BIG;
868
869 /* update CAN frames content */
870 for (i = 0; i < msg_head->nframes; i++) {
871
872 cf = op->frames + op->cfsiz * i;
873 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
874
875 if (op->flags & CAN_FD_FRAME) {
876 if (cf->len > 64)
877 err = -EINVAL;
878 } else {
879 if (cf->len > 8)
880 err = -EINVAL;
881 }
882
883 if (err < 0)
884 return err;
885
886 if (msg_head->flags & TX_CP_CAN_ID) {
887 /* copy can_id into frame */
888 cf->can_id = msg_head->can_id;
889 }
890 }
891 op->flags = msg_head->flags;
892
893 } else {
894 /* insert new BCM operation for the given can_id */
895
896 op = kzalloc(OPSIZ, GFP_KERNEL);
897 if (!op)
898 return -ENOMEM;
899
900 op->can_id = msg_head->can_id;
901 op->cfsiz = CFSIZ(msg_head->flags);
902 op->flags = msg_head->flags;
903
904 /* create array for CAN frames and copy the data */
905 if (msg_head->nframes > 1) {
906 op->frames = kmalloc_array(msg_head->nframes,
907 op->cfsiz,
908 GFP_KERNEL);
909 if (!op->frames) {
910 kfree(op);
911 return -ENOMEM;
912 }
913 } else
914 op->frames = &op->sframe;
915
916 for (i = 0; i < msg_head->nframes; i++) {
917
918 cf = op->frames + op->cfsiz * i;
919 err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
920
921 if (op->flags & CAN_FD_FRAME) {
922 if (cf->len > 64)
923 err = -EINVAL;
924 } else {
925 if (cf->len > 8)
926 err = -EINVAL;
927 }
928
929 if (err < 0) {
930 if (op->frames != &op->sframe)
931 kfree(op->frames);
932 kfree(op);
933 return err;
934 }
935
936 if (msg_head->flags & TX_CP_CAN_ID) {
937 /* copy can_id into frame */
938 cf->can_id = msg_head->can_id;
939 }
940 }
941
942 /* tx_ops never compare with previous received messages */
943 op->last_frames = NULL;
944
945 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
946 op->sk = sk;
947 op->ifindex = ifindex;
948
949 /* initialize uninitialized (kzalloc) structure */
950 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
951 HRTIMER_MODE_REL_SOFT);
952 op->timer.function = bcm_tx_timeout_handler;
953
954 /* currently unused in tx_ops */
955 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
956 HRTIMER_MODE_REL_SOFT);
957
958 /* add this bcm_op to the list of the tx_ops */
959 list_add(&op->list, &bo->tx_ops);
960
961 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
962
963 if (op->nframes != msg_head->nframes) {
964 op->nframes = msg_head->nframes;
965 /* start multiple frame transmission with index 0 */
966 op->currframe = 0;
967 }
968
969 /* check flags */
970
971 if (op->flags & TX_RESET_MULTI_IDX) {
972 /* start multiple frame transmission with index 0 */
973 op->currframe = 0;
974 }
975
976 if (op->flags & SETTIMER) {
977 /* set timer values */
978 op->count = msg_head->count;
979 op->ival1 = msg_head->ival1;
980 op->ival2 = msg_head->ival2;
981 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
982 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
983
984 /* disable an active timer due to zero values? */
985 if (!op->kt_ival1 && !op->kt_ival2)
986 hrtimer_cancel(&op->timer);
987 }
988
989 if (op->flags & STARTTIMER) {
990 hrtimer_cancel(&op->timer);
991 /* spec: send CAN frame when starting timer */
992 op->flags |= TX_ANNOUNCE;
993 }
994
995 if (op->flags & TX_ANNOUNCE) {
996 bcm_can_tx(op);
997 if (op->count)
998 op->count--;
999 }
1000
1001 if (op->flags & STARTTIMER)
1002 bcm_tx_start_timer(op);
1003
1004 return msg_head->nframes * op->cfsiz + MHSIZ;
1005 }
1006
1007 /*
1008 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
1009 */
1010 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1011 int ifindex, struct sock *sk)
1012 {
1013 struct bcm_sock *bo = bcm_sk(sk);
1014 struct bcm_op *op;
1015 int do_rx_register;
1016 int err = 0;
1017
1018 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
1019 /* be robust against wrong usage ... */
1020 msg_head->flags |= RX_FILTER_ID;
1021 /* ignore trailing garbage */
1022 msg_head->nframes = 0;
1023 }
1024
1025 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1026 if (msg_head->nframes > MAX_NFRAMES + 1)
1027 return -EINVAL;
1028
1029 if ((msg_head->flags & RX_RTR_FRAME) &&
1030 ((msg_head->nframes != 1) ||
1031 (!(msg_head->can_id & CAN_RTR_FLAG))))
1032 return -EINVAL;
1033
1034 /* check timeval limitations */
1035 if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1036 return -EINVAL;
1037
1038 /* check the given can_id */
1039 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1040 if (op) {
1041 /* update existing BCM operation */
1042
1043 /*
1044 * Do we need more space for the CAN frames than currently
1045 * allocated? -> This is a _really_ unusual use-case and
1046 * therefore (complexity / locking) it is not supported.
1047 */
1048 if (msg_head->nframes > op->nframes)
1049 return -E2BIG;
1050
1051 if (msg_head->nframes) {
1052 /* update CAN frames content */
1053 err = memcpy_from_msg(op->frames, msg,
1054 msg_head->nframes * op->cfsiz);
1055 if (err < 0)
1056 return err;
1057
1058 /* clear last_frames to indicate 'nothing received' */
1059 memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
1060 }
1061
1062 op->nframes = msg_head->nframes;
1063 op->flags = msg_head->flags;
1064
1065 /* Only an update -> do not call can_rx_register() */
1066 do_rx_register = 0;
1067
1068 } else {
1069 /* insert new BCM operation for the given can_id */
1070 op = kzalloc(OPSIZ, GFP_KERNEL);
1071 if (!op)
1072 return -ENOMEM;
1073
1074 op->can_id = msg_head->can_id;
1075 op->nframes = msg_head->nframes;
1076 op->cfsiz = CFSIZ(msg_head->flags);
1077 op->flags = msg_head->flags;
1078
1079 if (msg_head->nframes > 1) {
1080 /* create array for CAN frames and copy the data */
1081 op->frames = kmalloc_array(msg_head->nframes,
1082 op->cfsiz,
1083 GFP_KERNEL);
1084 if (!op->frames) {
1085 kfree(op);
1086 return -ENOMEM;
1087 }
1088
1089 /* create and init array for received CAN frames */
1090 op->last_frames = kcalloc(msg_head->nframes,
1091 op->cfsiz,
1092 GFP_KERNEL);
1093 if (!op->last_frames) {
1094 kfree(op->frames);
1095 kfree(op);
1096 return -ENOMEM;
1097 }
1098
1099 } else {
1100 op->frames = &op->sframe;
1101 op->last_frames = &op->last_sframe;
1102 }
1103
1104 if (msg_head->nframes) {
1105 err = memcpy_from_msg(op->frames, msg,
1106 msg_head->nframes * op->cfsiz);
1107 if (err < 0) {
1108 if (op->frames != &op->sframe)
1109 kfree(op->frames);
1110 if (op->last_frames != &op->last_sframe)
1111 kfree(op->last_frames);
1112 kfree(op);
1113 return err;
1114 }
1115 }
1116
1117 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1118 op->sk = sk;
1119 op->ifindex = ifindex;
1120
1121 /* ifindex for timeout events w/o previous frame reception */
1122 op->rx_ifindex = ifindex;
1123
1124 /* initialize uninitialized (kzalloc) structure */
1125 hrtimer_init(&op->timer, CLOCK_MONOTONIC,
1126 HRTIMER_MODE_REL_SOFT);
1127 op->timer.function = bcm_rx_timeout_handler;
1128
1129 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
1130 HRTIMER_MODE_REL_SOFT);
1131 op->thrtimer.function = bcm_rx_thr_handler;
1132
1133 /* add this bcm_op to the list of the rx_ops */
1134 list_add(&op->list, &bo->rx_ops);
1135
1136 /* call can_rx_register() */
1137 do_rx_register = 1;
1138
1139 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1140
1141 /* check flags */
1142
1143 if (op->flags & RX_RTR_FRAME) {
1144 struct canfd_frame *frame0 = op->frames;
1145
1146 /* no timers in RTR-mode */
1147 hrtimer_cancel(&op->thrtimer);
1148 hrtimer_cancel(&op->timer);
1149
1150 /*
1151 * funny feature in RX(!)_SETUP only for RTR-mode:
1152 * copy can_id into frame BUT without RTR-flag to
1153 * prevent a full-load-loopback-test ... ;-]
1154 */
1155 if ((op->flags & TX_CP_CAN_ID) ||
1156 (frame0->can_id == op->can_id))
1157 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1158
1159 } else {
1160 if (op->flags & SETTIMER) {
1161
1162 /* set timer value */
1163 op->ival1 = msg_head->ival1;
1164 op->ival2 = msg_head->ival2;
1165 op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
1166 op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
1167
1168 /* disable an active timer due to zero value? */
1169 if (!op->kt_ival1)
1170 hrtimer_cancel(&op->timer);
1171
1172 /*
1173 * In any case cancel the throttle timer, flush
1174 * potentially blocked msgs and reset throttle handling
1175 */
1176 op->kt_lastmsg = 0;
1177 hrtimer_cancel(&op->thrtimer);
1178 bcm_rx_thr_flush(op);
1179 }
1180
1181 if ((op->flags & STARTTIMER) && op->kt_ival1)
1182 hrtimer_start(&op->timer, op->kt_ival1,
1183 HRTIMER_MODE_REL_SOFT);
1184 }
1185
1186 /* now we can register for can_ids, if we added a new bcm_op */
1187 if (do_rx_register) {
1188 if (ifindex) {
1189 struct net_device *dev;
1190
1191 dev = dev_get_by_index(sock_net(sk), ifindex);
1192 if (dev) {
1193 err = can_rx_register(sock_net(sk), dev,
1194 op->can_id,
1195 REGMASK(op->can_id),
1196 bcm_rx_handler, op,
1197 "bcm", sk);
1198
1199 op->rx_reg_dev = dev;
1200 dev_put(dev);
1201 }
1202
1203 } else
1204 err = can_rx_register(sock_net(sk), NULL, op->can_id,
1205 REGMASK(op->can_id),
1206 bcm_rx_handler, op, "bcm", sk);
1207 if (err) {
1208 /* this bcm rx op is broken -> remove it */
1209 list_del(&op->list);
1210 bcm_remove_op(op);
1211 return err;
1212 }
1213 }
1214
1215 return msg_head->nframes * op->cfsiz + MHSIZ;
1216 }
1217
1218 /*
1219 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1220 */
1221 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
1222 int cfsiz)
1223 {
1224 struct sk_buff *skb;
1225 struct net_device *dev;
1226 int err;
1227
1228 /* we need a real device to send frames */
1229 if (!ifindex)
1230 return -ENODEV;
1231
1232 skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
1233 if (!skb)
1234 return -ENOMEM;
1235
1236 can_skb_reserve(skb);
1237
1238 err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
1239 if (err < 0) {
1240 kfree_skb(skb);
1241 return err;
1242 }
1243
1244 dev = dev_get_by_index(sock_net(sk), ifindex);
1245 if (!dev) {
1246 kfree_skb(skb);
1247 return -ENODEV;
1248 }
1249
1250 can_skb_prv(skb)->ifindex = dev->ifindex;
1251 can_skb_prv(skb)->skbcnt = 0;
1252 skb->dev = dev;
1253 can_skb_set_owner(skb, sk);
1254 err = can_send(skb, 1); /* send with loopback */
1255 dev_put(dev);
1256
1257 if (err)
1258 return err;
1259
1260 return cfsiz + MHSIZ;
1261 }
1262
1263 /*
1264 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1265 */
1266 static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1267 {
1268 struct sock *sk = sock->sk;
1269 struct bcm_sock *bo = bcm_sk(sk);
1270 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1271 struct bcm_msg_head msg_head;
1272 int cfsiz;
1273 int ret; /* read bytes or error codes as return value */
1274
1275 if (!bo->bound)
1276 return -ENOTCONN;
1277
1278 /* check for valid message length from userspace */
1279 if (size < MHSIZ)
1280 return -EINVAL;
1281
1282 /* read message head information */
1283 ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
1284 if (ret < 0)
1285 return ret;
1286
1287 cfsiz = CFSIZ(msg_head.flags);
1288 if ((size - MHSIZ) % cfsiz)
1289 return -EINVAL;
1290
1291 /* check for alternative ifindex for this bcm_op */
1292
1293 if (!ifindex && msg->msg_name) {
1294 /* no bound device as default => check msg_name */
1295 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
1296
1297 if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
1298 return -EINVAL;
1299
1300 if (addr->can_family != AF_CAN)
1301 return -EINVAL;
1302
1303 /* ifindex from sendto() */
1304 ifindex = addr->can_ifindex;
1305
1306 if (ifindex) {
1307 struct net_device *dev;
1308
1309 dev = dev_get_by_index(sock_net(sk), ifindex);
1310 if (!dev)
1311 return -ENODEV;
1312
1313 if (dev->type != ARPHRD_CAN) {
1314 dev_put(dev);
1315 return -ENODEV;
1316 }
1317
1318 dev_put(dev);
1319 }
1320 }
1321
1322 lock_sock(sk);
1323
1324 switch (msg_head.opcode) {
1325
1326 case TX_SETUP:
1327 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1328 break;
1329
1330 case RX_SETUP:
1331 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1332 break;
1333
1334 case TX_DELETE:
1335 if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
1336 ret = MHSIZ;
1337 else
1338 ret = -EINVAL;
1339 break;
1340
1341 case RX_DELETE:
1342 if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
1343 ret = MHSIZ;
1344 else
1345 ret = -EINVAL;
1346 break;
1347
1348 case TX_READ:
1349 /* reuse msg_head for the reply to TX_READ */
1350 msg_head.opcode = TX_STATUS;
1351 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1352 break;
1353
1354 case RX_READ:
1355 /* reuse msg_head for the reply to RX_READ */
1356 msg_head.opcode = RX_STATUS;
1357 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1358 break;
1359
1360 case TX_SEND:
1361 /* we need exactly one CAN frame behind the msg head */
1362 if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
1363 ret = -EINVAL;
1364 else
1365 ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
1366 break;
1367
1368 default:
1369 ret = -EINVAL;
1370 break;
1371 }
1372
1373 release_sock(sk);
1374
1375 return ret;
1376 }
1377
1378 /*
1379 * notification handler for netdevice status changes
1380 */
1381 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1382 void *ptr)
1383 {
1384 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1385 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1386 struct sock *sk = &bo->sk;
1387 struct bcm_op *op;
1388 int notify_enodev = 0;
1389
1390 if (!net_eq(dev_net(dev), sock_net(sk)))
1391 return NOTIFY_DONE;
1392
1393 if (dev->type != ARPHRD_CAN)
1394 return NOTIFY_DONE;
1395
1396 switch (msg) {
1397
1398 case NETDEV_UNREGISTER:
1399 lock_sock(sk);
1400
1401 /* remove device specific receive entries */
1402 list_for_each_entry(op, &bo->rx_ops, list)
1403 if (op->rx_reg_dev == dev)
1404 bcm_rx_unreg(dev, op);
1405
1406 /* remove device reference, if this is our bound device */
1407 if (bo->bound && bo->ifindex == dev->ifindex) {
1408 bo->bound = 0;
1409 bo->ifindex = 0;
1410 notify_enodev = 1;
1411 }
1412
1413 release_sock(sk);
1414
1415 if (notify_enodev) {
1416 sk->sk_err = ENODEV;
1417 if (!sock_flag(sk, SOCK_DEAD))
1418 sk->sk_error_report(sk);
1419 }
1420 break;
1421
1422 case NETDEV_DOWN:
1423 if (bo->bound && bo->ifindex == dev->ifindex) {
1424 sk->sk_err = ENETDOWN;
1425 if (!sock_flag(sk, SOCK_DEAD))
1426 sk->sk_error_report(sk);
1427 }
1428 }
1429
1430 return NOTIFY_DONE;
1431 }
1432
1433 /*
1434 * initial settings for all BCM sockets to be set at socket creation time
1435 */
1436 static int bcm_init(struct sock *sk)
1437 {
1438 struct bcm_sock *bo = bcm_sk(sk);
1439
1440 bo->bound = 0;
1441 bo->ifindex = 0;
1442 bo->dropped_usr_msgs = 0;
1443 bo->bcm_proc_read = NULL;
1444
1445 INIT_LIST_HEAD(&bo->tx_ops);
1446 INIT_LIST_HEAD(&bo->rx_ops);
1447
1448 /* set notifier */
1449 bo->notifier.notifier_call = bcm_notifier;
1450
1451 register_netdevice_notifier(&bo->notifier);
1452
1453 return 0;
1454 }
1455
1456 /*
1457 * standard socket functions
1458 */
1459 static int bcm_release(struct socket *sock)
1460 {
1461 struct sock *sk = sock->sk;
1462 struct net *net;
1463 struct bcm_sock *bo;
1464 struct bcm_op *op, *next;
1465
1466 if (!sk)
1467 return 0;
1468
1469 net = sock_net(sk);
1470 bo = bcm_sk(sk);
1471
1472 /* remove bcm_ops, timer, rx_unregister(), etc. */
1473
1474 unregister_netdevice_notifier(&bo->notifier);
1475
1476 lock_sock(sk);
1477
1478 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1479 bcm_remove_op(op);
1480
1481 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1482 /*
1483 * Don't care if we're bound or not (due to netdev problems)
1484 * can_rx_unregister() is always a save thing to do here.
1485 */
1486 if (op->ifindex) {
1487 /*
1488 * Only remove subscriptions that had not
1489 * been removed due to NETDEV_UNREGISTER
1490 * in bcm_notifier()
1491 */
1492 if (op->rx_reg_dev) {
1493 struct net_device *dev;
1494
1495 dev = dev_get_by_index(net, op->ifindex);
1496 if (dev) {
1497 bcm_rx_unreg(dev, op);
1498 dev_put(dev);
1499 }
1500 }
1501 } else
1502 can_rx_unregister(net, NULL, op->can_id,
1503 REGMASK(op->can_id),
1504 bcm_rx_handler, op);
1505
1506 bcm_remove_op(op);
1507 }
1508
1509 #if IS_ENABLED(CONFIG_PROC_FS)
1510 /* remove procfs entry */
1511 if (net->can.bcmproc_dir && bo->bcm_proc_read)
1512 remove_proc_entry(bo->procname, net->can.bcmproc_dir);
1513 #endif /* CONFIG_PROC_FS */
1514
1515 /* remove device reference */
1516 if (bo->bound) {
1517 bo->bound = 0;
1518 bo->ifindex = 0;
1519 }
1520
1521 sock_orphan(sk);
1522 sock->sk = NULL;
1523
1524 release_sock(sk);
1525 sock_put(sk);
1526
1527 return 0;
1528 }
1529
1530 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1531 int flags)
1532 {
1533 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1534 struct sock *sk = sock->sk;
1535 struct bcm_sock *bo = bcm_sk(sk);
1536 struct net *net = sock_net(sk);
1537 int ret = 0;
1538
1539 if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
1540 return -EINVAL;
1541
1542 lock_sock(sk);
1543
1544 if (bo->bound) {
1545 ret = -EISCONN;
1546 goto fail;
1547 }
1548
1549 /* bind a device to this socket */
1550 if (addr->can_ifindex) {
1551 struct net_device *dev;
1552
1553 dev = dev_get_by_index(net, addr->can_ifindex);
1554 if (!dev) {
1555 ret = -ENODEV;
1556 goto fail;
1557 }
1558 if (dev->type != ARPHRD_CAN) {
1559 dev_put(dev);
1560 ret = -ENODEV;
1561 goto fail;
1562 }
1563
1564 bo->ifindex = dev->ifindex;
1565 dev_put(dev);
1566
1567 } else {
1568 /* no interface reference for ifindex = 0 ('any' CAN device) */
1569 bo->ifindex = 0;
1570 }
1571
1572 #if IS_ENABLED(CONFIG_PROC_FS)
1573 if (net->can.bcmproc_dir) {
1574 /* unique socket address as filename */
1575 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1576 bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
1577 net->can.bcmproc_dir,
1578 bcm_proc_show, sk);
1579 if (!bo->bcm_proc_read) {
1580 ret = -ENOMEM;
1581 goto fail;
1582 }
1583 }
1584 #endif /* CONFIG_PROC_FS */
1585
1586 bo->bound = 1;
1587
1588 fail:
1589 release_sock(sk);
1590
1591 return ret;
1592 }
1593
1594 static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1595 int flags)
1596 {
1597 struct sock *sk = sock->sk;
1598 struct sk_buff *skb;
1599 int error = 0;
1600 int noblock;
1601 int err;
1602
1603 noblock = flags & MSG_DONTWAIT;
1604 flags &= ~MSG_DONTWAIT;
1605 skb = skb_recv_datagram(sk, flags, noblock, &error);
1606 if (!skb)
1607 return error;
1608
1609 if (skb->len < size)
1610 size = skb->len;
1611
1612 err = memcpy_to_msg(msg, skb->data, size);
1613 if (err < 0) {
1614 skb_free_datagram(sk, skb);
1615 return err;
1616 }
1617
1618 sock_recv_ts_and_drops(msg, sk, skb);
1619
1620 if (msg->msg_name) {
1621 __sockaddr_check_size(sizeof(struct sockaddr_can));
1622 msg->msg_namelen = sizeof(struct sockaddr_can);
1623 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1624 }
1625
1626 skb_free_datagram(sk, skb);
1627
1628 return size;
1629 }
1630
1631 static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1632 unsigned long arg)
1633 {
1634 /* no ioctls for socket layer -> hand it down to NIC layer */
1635 return -ENOIOCTLCMD;
1636 }
1637
1638 static const struct proto_ops bcm_ops = {
1639 .family = PF_CAN,
1640 .release = bcm_release,
1641 .bind = sock_no_bind,
1642 .connect = bcm_connect,
1643 .socketpair = sock_no_socketpair,
1644 .accept = sock_no_accept,
1645 .getname = sock_no_getname,
1646 .poll = datagram_poll,
1647 .ioctl = bcm_sock_no_ioctlcmd,
1648 .gettstamp = sock_gettstamp,
1649 .listen = sock_no_listen,
1650 .shutdown = sock_no_shutdown,
1651 .sendmsg = bcm_sendmsg,
1652 .recvmsg = bcm_recvmsg,
1653 .mmap = sock_no_mmap,
1654 .sendpage = sock_no_sendpage,
1655 };
1656
1657 static struct proto bcm_proto __read_mostly = {
1658 .name = "CAN_BCM",
1659 .owner = THIS_MODULE,
1660 .obj_size = sizeof(struct bcm_sock),
1661 .init = bcm_init,
1662 };
1663
1664 static const struct can_proto bcm_can_proto = {
1665 .type = SOCK_DGRAM,
1666 .protocol = CAN_BCM,
1667 .ops = &bcm_ops,
1668 .prot = &bcm_proto,
1669 };
1670
1671 static int canbcm_pernet_init(struct net *net)
1672 {
1673 #if IS_ENABLED(CONFIG_PROC_FS)
1674 /* create /proc/net/can-bcm directory */
1675 net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
1676 #endif /* CONFIG_PROC_FS */
1677
1678 return 0;
1679 }
1680
1681 static void canbcm_pernet_exit(struct net *net)
1682 {
1683 #if IS_ENABLED(CONFIG_PROC_FS)
1684 /* remove /proc/net/can-bcm directory */
1685 if (net->can.bcmproc_dir)
1686 remove_proc_entry("can-bcm", net->proc_net);
1687 #endif /* CONFIG_PROC_FS */
1688 }
1689
1690 static struct pernet_operations canbcm_pernet_ops __read_mostly = {
1691 .init = canbcm_pernet_init,
1692 .exit = canbcm_pernet_exit,
1693 };
1694
1695 static int __init bcm_module_init(void)
1696 {
1697 int err;
1698
1699 pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
1700
1701 err = can_proto_register(&bcm_can_proto);
1702 if (err < 0) {
1703 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1704 return err;
1705 }
1706
1707 register_pernet_subsys(&canbcm_pernet_ops);
1708 return 0;
1709 }
1710
1711 static void __exit bcm_module_exit(void)
1712 {
1713 can_proto_unregister(&bcm_can_proto);
1714 unregister_pernet_subsys(&canbcm_pernet_ops);
1715 }
1716
1717 module_init(bcm_module_init);
1718 module_exit(bcm_module_exit);