]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/rose/af_rose.c
Merge branch 'omap3-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[mirror_ubuntu-zesty-kernel.git] / net / rose / af_rose.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 */
12
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/in.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/spinlock.h>
24 #include <linux/timer.h>
25 #include <linux/string.h>
26 #include <linux/sockios.h>
27 #include <linux/net.h>
28 #include <linux/stat.h>
29 #include <net/net_namespace.h>
30 #include <net/ax25.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/skbuff.h>
35 #include <net/sock.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <linux/fcntl.h>
39 #include <linux/termios.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <net/rose.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <net/tcp_states.h>
47 #include <net/ip.h>
48 #include <net/arp.h>
49
50 static int rose_ndevs = 10;
51
52 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
53 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
54 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
55 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
56 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
57 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
58 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
59 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
60 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
61 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
62
63 static HLIST_HEAD(rose_list);
64 static DEFINE_SPINLOCK(rose_list_lock);
65
66 static struct proto_ops rose_proto_ops;
67
68 ax25_address rose_callsign;
69
70 /*
71 * ROSE network devices are virtual network devices encapsulating ROSE
72 * frames into AX.25 which will be sent through an AX.25 device, so form a
73 * special "super class" of normal net devices; split their locks off into a
74 * separate class since they always nest.
75 */
76 static struct lock_class_key rose_netdev_xmit_lock_key;
77 static struct lock_class_key rose_netdev_addr_lock_key;
78
79 static void rose_set_lockdep_one(struct net_device *dev,
80 struct netdev_queue *txq,
81 void *_unused)
82 {
83 lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
84 }
85
86 static void rose_set_lockdep_key(struct net_device *dev)
87 {
88 lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
89 netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
90 }
91
92 /*
93 * Convert a ROSE address into text.
94 */
95 const char *rose2asc(const rose_address *addr)
96 {
97 static char buffer[11];
98
99 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
100 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
101 addr->rose_addr[4] == 0x00) {
102 strcpy(buffer, "*");
103 } else {
104 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
105 addr->rose_addr[1] & 0xFF,
106 addr->rose_addr[2] & 0xFF,
107 addr->rose_addr[3] & 0xFF,
108 addr->rose_addr[4] & 0xFF);
109 }
110
111 return buffer;
112 }
113
114 /*
115 * Compare two ROSE addresses, 0 == equal.
116 */
117 int rosecmp(rose_address *addr1, rose_address *addr2)
118 {
119 int i;
120
121 for (i = 0; i < 5; i++)
122 if (addr1->rose_addr[i] != addr2->rose_addr[i])
123 return 1;
124
125 return 0;
126 }
127
128 /*
129 * Compare two ROSE addresses for only mask digits, 0 == equal.
130 */
131 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
132 {
133 unsigned int i, j;
134
135 if (mask > 10)
136 return 1;
137
138 for (i = 0; i < mask; i++) {
139 j = i / 2;
140
141 if ((i % 2) != 0) {
142 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
143 return 1;
144 } else {
145 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
146 return 1;
147 }
148 }
149
150 return 0;
151 }
152
153 /*
154 * Socket removal during an interrupt is now safe.
155 */
156 static void rose_remove_socket(struct sock *sk)
157 {
158 spin_lock_bh(&rose_list_lock);
159 sk_del_node_init(sk);
160 spin_unlock_bh(&rose_list_lock);
161 }
162
163 /*
164 * Kill all bound sockets on a broken link layer connection to a
165 * particular neighbour.
166 */
167 void rose_kill_by_neigh(struct rose_neigh *neigh)
168 {
169 struct sock *s;
170 struct hlist_node *node;
171
172 spin_lock_bh(&rose_list_lock);
173 sk_for_each(s, node, &rose_list) {
174 struct rose_sock *rose = rose_sk(s);
175
176 if (rose->neighbour == neigh) {
177 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
178 rose->neighbour->use--;
179 rose->neighbour = NULL;
180 }
181 }
182 spin_unlock_bh(&rose_list_lock);
183 }
184
185 /*
186 * Kill all bound sockets on a dropped device.
187 */
188 static void rose_kill_by_device(struct net_device *dev)
189 {
190 struct sock *s;
191 struct hlist_node *node;
192
193 spin_lock_bh(&rose_list_lock);
194 sk_for_each(s, node, &rose_list) {
195 struct rose_sock *rose = rose_sk(s);
196
197 if (rose->device == dev) {
198 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
199 rose->neighbour->use--;
200 rose->device = NULL;
201 }
202 }
203 spin_unlock_bh(&rose_list_lock);
204 }
205
206 /*
207 * Handle device status changes.
208 */
209 static int rose_device_event(struct notifier_block *this, unsigned long event,
210 void *ptr)
211 {
212 struct net_device *dev = (struct net_device *)ptr;
213
214 if (!net_eq(dev_net(dev), &init_net))
215 return NOTIFY_DONE;
216
217 if (event != NETDEV_DOWN)
218 return NOTIFY_DONE;
219
220 switch (dev->type) {
221 case ARPHRD_ROSE:
222 rose_kill_by_device(dev);
223 break;
224 case ARPHRD_AX25:
225 rose_link_device_down(dev);
226 rose_rt_device_down(dev);
227 break;
228 }
229
230 return NOTIFY_DONE;
231 }
232
233 /*
234 * Add a socket to the bound sockets list.
235 */
236 static void rose_insert_socket(struct sock *sk)
237 {
238
239 spin_lock_bh(&rose_list_lock);
240 sk_add_node(sk, &rose_list);
241 spin_unlock_bh(&rose_list_lock);
242 }
243
244 /*
245 * Find a socket that wants to accept the Call Request we just
246 * received.
247 */
248 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
249 {
250 struct sock *s;
251 struct hlist_node *node;
252
253 spin_lock_bh(&rose_list_lock);
254 sk_for_each(s, node, &rose_list) {
255 struct rose_sock *rose = rose_sk(s);
256
257 if (!rosecmp(&rose->source_addr, addr) &&
258 !ax25cmp(&rose->source_call, call) &&
259 !rose->source_ndigis && s->sk_state == TCP_LISTEN)
260 goto found;
261 }
262
263 sk_for_each(s, node, &rose_list) {
264 struct rose_sock *rose = rose_sk(s);
265
266 if (!rosecmp(&rose->source_addr, addr) &&
267 !ax25cmp(&rose->source_call, &null_ax25_address) &&
268 s->sk_state == TCP_LISTEN)
269 goto found;
270 }
271 s = NULL;
272 found:
273 spin_unlock_bh(&rose_list_lock);
274 return s;
275 }
276
277 /*
278 * Find a connected ROSE socket given my LCI and device.
279 */
280 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
281 {
282 struct sock *s;
283 struct hlist_node *node;
284
285 spin_lock_bh(&rose_list_lock);
286 sk_for_each(s, node, &rose_list) {
287 struct rose_sock *rose = rose_sk(s);
288
289 if (rose->lci == lci && rose->neighbour == neigh)
290 goto found;
291 }
292 s = NULL;
293 found:
294 spin_unlock_bh(&rose_list_lock);
295 return s;
296 }
297
298 /*
299 * Find a unique LCI for a given device.
300 */
301 unsigned int rose_new_lci(struct rose_neigh *neigh)
302 {
303 int lci;
304
305 if (neigh->dce_mode) {
306 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
307 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
308 return lci;
309 } else {
310 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
311 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
312 return lci;
313 }
314
315 return 0;
316 }
317
318 /*
319 * Deferred destroy.
320 */
321 void rose_destroy_socket(struct sock *);
322
323 /*
324 * Handler for deferred kills.
325 */
326 static void rose_destroy_timer(unsigned long data)
327 {
328 rose_destroy_socket((struct sock *)data);
329 }
330
331 /*
332 * This is called from user mode and the timers. Thus it protects itself
333 * against interrupt users but doesn't worry about being called during
334 * work. Once it is removed from the queue no interrupt or bottom half
335 * will touch it and we are (fairly 8-) ) safe.
336 */
337 void rose_destroy_socket(struct sock *sk)
338 {
339 struct sk_buff *skb;
340
341 rose_remove_socket(sk);
342 rose_stop_heartbeat(sk);
343 rose_stop_idletimer(sk);
344 rose_stop_timer(sk);
345
346 rose_clear_queues(sk); /* Flush the queues */
347
348 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
349 if (skb->sk != sk) { /* A pending connection */
350 /* Queue the unaccepted socket for death */
351 sock_set_flag(skb->sk, SOCK_DEAD);
352 rose_start_heartbeat(skb->sk);
353 rose_sk(skb->sk)->state = ROSE_STATE_0;
354 }
355
356 kfree_skb(skb);
357 }
358
359 if (atomic_read(&sk->sk_wmem_alloc) ||
360 atomic_read(&sk->sk_rmem_alloc)) {
361 /* Defer: outstanding buffers */
362 setup_timer(&sk->sk_timer, rose_destroy_timer,
363 (unsigned long)sk);
364 sk->sk_timer.expires = jiffies + 10 * HZ;
365 add_timer(&sk->sk_timer);
366 } else
367 sock_put(sk);
368 }
369
370 /*
371 * Handling for system calls applied via the various interfaces to a
372 * ROSE socket object.
373 */
374
375 static int rose_setsockopt(struct socket *sock, int level, int optname,
376 char __user *optval, int optlen)
377 {
378 struct sock *sk = sock->sk;
379 struct rose_sock *rose = rose_sk(sk);
380 int opt;
381
382 if (level != SOL_ROSE)
383 return -ENOPROTOOPT;
384
385 if (optlen < sizeof(int))
386 return -EINVAL;
387
388 if (get_user(opt, (int __user *)optval))
389 return -EFAULT;
390
391 switch (optname) {
392 case ROSE_DEFER:
393 rose->defer = opt ? 1 : 0;
394 return 0;
395
396 case ROSE_T1:
397 if (opt < 1)
398 return -EINVAL;
399 rose->t1 = opt * HZ;
400 return 0;
401
402 case ROSE_T2:
403 if (opt < 1)
404 return -EINVAL;
405 rose->t2 = opt * HZ;
406 return 0;
407
408 case ROSE_T3:
409 if (opt < 1)
410 return -EINVAL;
411 rose->t3 = opt * HZ;
412 return 0;
413
414 case ROSE_HOLDBACK:
415 if (opt < 1)
416 return -EINVAL;
417 rose->hb = opt * HZ;
418 return 0;
419
420 case ROSE_IDLE:
421 if (opt < 0)
422 return -EINVAL;
423 rose->idle = opt * 60 * HZ;
424 return 0;
425
426 case ROSE_QBITINCL:
427 rose->qbitincl = opt ? 1 : 0;
428 return 0;
429
430 default:
431 return -ENOPROTOOPT;
432 }
433 }
434
435 static int rose_getsockopt(struct socket *sock, int level, int optname,
436 char __user *optval, int __user *optlen)
437 {
438 struct sock *sk = sock->sk;
439 struct rose_sock *rose = rose_sk(sk);
440 int val = 0;
441 int len;
442
443 if (level != SOL_ROSE)
444 return -ENOPROTOOPT;
445
446 if (get_user(len, optlen))
447 return -EFAULT;
448
449 if (len < 0)
450 return -EINVAL;
451
452 switch (optname) {
453 case ROSE_DEFER:
454 val = rose->defer;
455 break;
456
457 case ROSE_T1:
458 val = rose->t1 / HZ;
459 break;
460
461 case ROSE_T2:
462 val = rose->t2 / HZ;
463 break;
464
465 case ROSE_T3:
466 val = rose->t3 / HZ;
467 break;
468
469 case ROSE_HOLDBACK:
470 val = rose->hb / HZ;
471 break;
472
473 case ROSE_IDLE:
474 val = rose->idle / (60 * HZ);
475 break;
476
477 case ROSE_QBITINCL:
478 val = rose->qbitincl;
479 break;
480
481 default:
482 return -ENOPROTOOPT;
483 }
484
485 len = min_t(unsigned int, len, sizeof(int));
486
487 if (put_user(len, optlen))
488 return -EFAULT;
489
490 return copy_to_user(optval, &val, len) ? -EFAULT : 0;
491 }
492
493 static int rose_listen(struct socket *sock, int backlog)
494 {
495 struct sock *sk = sock->sk;
496
497 if (sk->sk_state != TCP_LISTEN) {
498 struct rose_sock *rose = rose_sk(sk);
499
500 rose->dest_ndigis = 0;
501 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
502 memset(&rose->dest_call, 0, AX25_ADDR_LEN);
503 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
504 sk->sk_max_ack_backlog = backlog;
505 sk->sk_state = TCP_LISTEN;
506 return 0;
507 }
508
509 return -EOPNOTSUPP;
510 }
511
512 static struct proto rose_proto = {
513 .name = "ROSE",
514 .owner = THIS_MODULE,
515 .obj_size = sizeof(struct rose_sock),
516 };
517
518 static int rose_create(struct net *net, struct socket *sock, int protocol)
519 {
520 struct sock *sk;
521 struct rose_sock *rose;
522
523 if (net != &init_net)
524 return -EAFNOSUPPORT;
525
526 if (sock->type != SOCK_SEQPACKET || protocol != 0)
527 return -ESOCKTNOSUPPORT;
528
529 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
530 if (sk == NULL)
531 return -ENOMEM;
532
533 rose = rose_sk(sk);
534
535 sock_init_data(sock, sk);
536
537 skb_queue_head_init(&rose->ack_queue);
538 #ifdef M_BIT
539 skb_queue_head_init(&rose->frag_queue);
540 rose->fraglen = 0;
541 #endif
542
543 sock->ops = &rose_proto_ops;
544 sk->sk_protocol = protocol;
545
546 init_timer(&rose->timer);
547 init_timer(&rose->idletimer);
548
549 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
550 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
551 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
552 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
553 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
554
555 rose->state = ROSE_STATE_0;
556
557 return 0;
558 }
559
560 static struct sock *rose_make_new(struct sock *osk)
561 {
562 struct sock *sk;
563 struct rose_sock *rose, *orose;
564
565 if (osk->sk_type != SOCK_SEQPACKET)
566 return NULL;
567
568 sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
569 if (sk == NULL)
570 return NULL;
571
572 rose = rose_sk(sk);
573
574 sock_init_data(NULL, sk);
575
576 skb_queue_head_init(&rose->ack_queue);
577 #ifdef M_BIT
578 skb_queue_head_init(&rose->frag_queue);
579 rose->fraglen = 0;
580 #endif
581
582 sk->sk_type = osk->sk_type;
583 sk->sk_priority = osk->sk_priority;
584 sk->sk_protocol = osk->sk_protocol;
585 sk->sk_rcvbuf = osk->sk_rcvbuf;
586 sk->sk_sndbuf = osk->sk_sndbuf;
587 sk->sk_state = TCP_ESTABLISHED;
588 sock_copy_flags(sk, osk);
589
590 init_timer(&rose->timer);
591 init_timer(&rose->idletimer);
592
593 orose = rose_sk(osk);
594 rose->t1 = orose->t1;
595 rose->t2 = orose->t2;
596 rose->t3 = orose->t3;
597 rose->hb = orose->hb;
598 rose->idle = orose->idle;
599 rose->defer = orose->defer;
600 rose->device = orose->device;
601 rose->qbitincl = orose->qbitincl;
602
603 return sk;
604 }
605
606 static int rose_release(struct socket *sock)
607 {
608 struct sock *sk = sock->sk;
609 struct rose_sock *rose;
610
611 if (sk == NULL) return 0;
612
613 sock_hold(sk);
614 sock_orphan(sk);
615 lock_sock(sk);
616 rose = rose_sk(sk);
617
618 switch (rose->state) {
619 case ROSE_STATE_0:
620 release_sock(sk);
621 rose_disconnect(sk, 0, -1, -1);
622 lock_sock(sk);
623 rose_destroy_socket(sk);
624 break;
625
626 case ROSE_STATE_2:
627 rose->neighbour->use--;
628 release_sock(sk);
629 rose_disconnect(sk, 0, -1, -1);
630 lock_sock(sk);
631 rose_destroy_socket(sk);
632 break;
633
634 case ROSE_STATE_1:
635 case ROSE_STATE_3:
636 case ROSE_STATE_4:
637 case ROSE_STATE_5:
638 rose_clear_queues(sk);
639 rose_stop_idletimer(sk);
640 rose_write_internal(sk, ROSE_CLEAR_REQUEST);
641 rose_start_t3timer(sk);
642 rose->state = ROSE_STATE_2;
643 sk->sk_state = TCP_CLOSE;
644 sk->sk_shutdown |= SEND_SHUTDOWN;
645 sk->sk_state_change(sk);
646 sock_set_flag(sk, SOCK_DEAD);
647 sock_set_flag(sk, SOCK_DESTROY);
648 break;
649
650 default:
651 break;
652 }
653
654 sock->sk = NULL;
655 release_sock(sk);
656 sock_put(sk);
657
658 return 0;
659 }
660
661 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
662 {
663 struct sock *sk = sock->sk;
664 struct rose_sock *rose = rose_sk(sk);
665 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
666 struct net_device *dev;
667 ax25_address *source;
668 ax25_uid_assoc *user;
669 int n;
670
671 if (!sock_flag(sk, SOCK_ZAPPED))
672 return -EINVAL;
673
674 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
675 return -EINVAL;
676
677 if (addr->srose_family != AF_ROSE)
678 return -EINVAL;
679
680 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
681 return -EINVAL;
682
683 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
684 return -EINVAL;
685
686 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
687 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
688 return -EADDRNOTAVAIL;
689 }
690
691 source = &addr->srose_call;
692
693 user = ax25_findbyuid(current->euid);
694 if (user) {
695 rose->source_call = user->call;
696 ax25_uid_put(user);
697 } else {
698 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
699 return -EACCES;
700 rose->source_call = *source;
701 }
702
703 rose->source_addr = addr->srose_addr;
704 rose->device = dev;
705 rose->source_ndigis = addr->srose_ndigis;
706
707 if (addr_len == sizeof(struct full_sockaddr_rose)) {
708 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
709 for (n = 0 ; n < addr->srose_ndigis ; n++)
710 rose->source_digis[n] = full_addr->srose_digis[n];
711 } else {
712 if (rose->source_ndigis == 1) {
713 rose->source_digis[0] = addr->srose_digi;
714 }
715 }
716
717 rose_insert_socket(sk);
718
719 sock_reset_flag(sk, SOCK_ZAPPED);
720 SOCK_DEBUG(sk, "ROSE: socket is bound\n");
721 return 0;
722 }
723
724 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
725 {
726 struct sock *sk = sock->sk;
727 struct rose_sock *rose = rose_sk(sk);
728 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
729 unsigned char cause, diagnostic;
730 struct net_device *dev;
731 ax25_uid_assoc *user;
732 int n, err = 0;
733
734 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
735 return -EINVAL;
736
737 if (addr->srose_family != AF_ROSE)
738 return -EINVAL;
739
740 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
741 return -EINVAL;
742
743 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
744 return -EINVAL;
745
746 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
747 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
748 return -EINVAL;
749
750 lock_sock(sk);
751
752 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
753 /* Connect completed during a ERESTARTSYS event */
754 sock->state = SS_CONNECTED;
755 goto out_release;
756 }
757
758 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
759 sock->state = SS_UNCONNECTED;
760 err = -ECONNREFUSED;
761 goto out_release;
762 }
763
764 if (sk->sk_state == TCP_ESTABLISHED) {
765 /* No reconnect on a seqpacket socket */
766 err = -EISCONN;
767 goto out_release;
768 }
769
770 sk->sk_state = TCP_CLOSE;
771 sock->state = SS_UNCONNECTED;
772
773 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
774 &diagnostic, 0);
775 if (!rose->neighbour) {
776 err = -ENETUNREACH;
777 goto out_release;
778 }
779
780 rose->lci = rose_new_lci(rose->neighbour);
781 if (!rose->lci) {
782 err = -ENETUNREACH;
783 goto out_release;
784 }
785
786 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
787 sock_reset_flag(sk, SOCK_ZAPPED);
788
789 if ((dev = rose_dev_first()) == NULL) {
790 err = -ENETUNREACH;
791 goto out_release;
792 }
793
794 user = ax25_findbyuid(current->euid);
795 if (!user) {
796 err = -EINVAL;
797 goto out_release;
798 }
799
800 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
801 rose->source_call = user->call;
802 rose->device = dev;
803 ax25_uid_put(user);
804
805 rose_insert_socket(sk); /* Finish the bind */
806 }
807 rose_try_next_neigh:
808 rose->dest_addr = addr->srose_addr;
809 rose->dest_call = addr->srose_call;
810 rose->rand = ((long)rose & 0xFFFF) + rose->lci;
811 rose->dest_ndigis = addr->srose_ndigis;
812
813 if (addr_len == sizeof(struct full_sockaddr_rose)) {
814 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
815 for (n = 0 ; n < addr->srose_ndigis ; n++)
816 rose->dest_digis[n] = full_addr->srose_digis[n];
817 } else {
818 if (rose->dest_ndigis == 1) {
819 rose->dest_digis[0] = addr->srose_digi;
820 }
821 }
822
823 /* Move to connecting socket, start sending Connect Requests */
824 sock->state = SS_CONNECTING;
825 sk->sk_state = TCP_SYN_SENT;
826
827 rose->state = ROSE_STATE_1;
828
829 rose->neighbour->use++;
830
831 rose_write_internal(sk, ROSE_CALL_REQUEST);
832 rose_start_heartbeat(sk);
833 rose_start_t1timer(sk);
834
835 /* Now the loop */
836 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
837 err = -EINPROGRESS;
838 goto out_release;
839 }
840
841 /*
842 * A Connect Ack with Choke or timeout or failed routing will go to
843 * closed.
844 */
845 if (sk->sk_state == TCP_SYN_SENT) {
846 DEFINE_WAIT(wait);
847
848 for (;;) {
849 prepare_to_wait(sk->sk_sleep, &wait,
850 TASK_INTERRUPTIBLE);
851 if (sk->sk_state != TCP_SYN_SENT)
852 break;
853 if (!signal_pending(current)) {
854 release_sock(sk);
855 schedule();
856 lock_sock(sk);
857 continue;
858 }
859 err = -ERESTARTSYS;
860 break;
861 }
862 finish_wait(sk->sk_sleep, &wait);
863
864 if (err)
865 goto out_release;
866 }
867
868 if (sk->sk_state != TCP_ESTABLISHED) {
869 /* Try next neighbour */
870 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
871 if (rose->neighbour)
872 goto rose_try_next_neigh;
873
874 /* No more neighbours */
875 sock->state = SS_UNCONNECTED;
876 err = sock_error(sk); /* Always set at this point */
877 goto out_release;
878 }
879
880 sock->state = SS_CONNECTED;
881
882 out_release:
883 release_sock(sk);
884
885 return err;
886 }
887
888 static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
889 {
890 struct sk_buff *skb;
891 struct sock *newsk;
892 DEFINE_WAIT(wait);
893 struct sock *sk;
894 int err = 0;
895
896 if ((sk = sock->sk) == NULL)
897 return -EINVAL;
898
899 lock_sock(sk);
900 if (sk->sk_type != SOCK_SEQPACKET) {
901 err = -EOPNOTSUPP;
902 goto out_release;
903 }
904
905 if (sk->sk_state != TCP_LISTEN) {
906 err = -EINVAL;
907 goto out_release;
908 }
909
910 /*
911 * The write queue this time is holding sockets ready to use
912 * hooked into the SABM we saved
913 */
914 for (;;) {
915 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
916
917 skb = skb_dequeue(&sk->sk_receive_queue);
918 if (skb)
919 break;
920
921 if (flags & O_NONBLOCK) {
922 err = -EWOULDBLOCK;
923 break;
924 }
925 if (!signal_pending(current)) {
926 release_sock(sk);
927 schedule();
928 lock_sock(sk);
929 continue;
930 }
931 err = -ERESTARTSYS;
932 break;
933 }
934 finish_wait(sk->sk_sleep, &wait);
935 if (err)
936 goto out_release;
937
938 newsk = skb->sk;
939 sock_graft(newsk, newsock);
940
941 /* Now attach up the new socket */
942 skb->sk = NULL;
943 kfree_skb(skb);
944 sk->sk_ack_backlog--;
945
946 out_release:
947 release_sock(sk);
948
949 return err;
950 }
951
952 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
953 int *uaddr_len, int peer)
954 {
955 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
956 struct sock *sk = sock->sk;
957 struct rose_sock *rose = rose_sk(sk);
958 int n;
959
960 if (peer != 0) {
961 if (sk->sk_state != TCP_ESTABLISHED)
962 return -ENOTCONN;
963 srose->srose_family = AF_ROSE;
964 srose->srose_addr = rose->dest_addr;
965 srose->srose_call = rose->dest_call;
966 srose->srose_ndigis = rose->dest_ndigis;
967 for (n = 0; n < rose->dest_ndigis; n++)
968 srose->srose_digis[n] = rose->dest_digis[n];
969 } else {
970 srose->srose_family = AF_ROSE;
971 srose->srose_addr = rose->source_addr;
972 srose->srose_call = rose->source_call;
973 srose->srose_ndigis = rose->source_ndigis;
974 for (n = 0; n < rose->source_ndigis; n++)
975 srose->srose_digis[n] = rose->source_digis[n];
976 }
977
978 *uaddr_len = sizeof(struct full_sockaddr_rose);
979 return 0;
980 }
981
982 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
983 {
984 struct sock *sk;
985 struct sock *make;
986 struct rose_sock *make_rose;
987 struct rose_facilities_struct facilities;
988 int n, len;
989
990 skb->sk = NULL; /* Initially we don't know who it's for */
991
992 /*
993 * skb->data points to the rose frame start
994 */
995 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
996
997 len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1;
998 len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1;
999 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
1000 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
1001 return 0;
1002 }
1003
1004 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
1005
1006 /*
1007 * We can't accept the Call Request.
1008 */
1009 if (sk == NULL || sk_acceptq_is_full(sk) ||
1010 (make = rose_make_new(sk)) == NULL) {
1011 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1012 return 0;
1013 }
1014
1015 skb->sk = make;
1016 make->sk_state = TCP_ESTABLISHED;
1017 make_rose = rose_sk(make);
1018
1019 make_rose->lci = lci;
1020 make_rose->dest_addr = facilities.dest_addr;
1021 make_rose->dest_call = facilities.dest_call;
1022 make_rose->dest_ndigis = facilities.dest_ndigis;
1023 for (n = 0 ; n < facilities.dest_ndigis ; n++)
1024 make_rose->dest_digis[n] = facilities.dest_digis[n];
1025 make_rose->source_addr = facilities.source_addr;
1026 make_rose->source_call = facilities.source_call;
1027 make_rose->source_ndigis = facilities.source_ndigis;
1028 for (n = 0 ; n < facilities.source_ndigis ; n++)
1029 make_rose->source_digis[n]= facilities.source_digis[n];
1030 make_rose->neighbour = neigh;
1031 make_rose->device = dev;
1032 make_rose->facilities = facilities;
1033
1034 make_rose->neighbour->use++;
1035
1036 if (rose_sk(sk)->defer) {
1037 make_rose->state = ROSE_STATE_5;
1038 } else {
1039 rose_write_internal(make, ROSE_CALL_ACCEPTED);
1040 make_rose->state = ROSE_STATE_3;
1041 rose_start_idletimer(make);
1042 }
1043
1044 make_rose->condition = 0x00;
1045 make_rose->vs = 0;
1046 make_rose->va = 0;
1047 make_rose->vr = 0;
1048 make_rose->vl = 0;
1049 sk->sk_ack_backlog++;
1050
1051 rose_insert_socket(make);
1052
1053 skb_queue_head(&sk->sk_receive_queue, skb);
1054
1055 rose_start_heartbeat(make);
1056
1057 if (!sock_flag(sk, SOCK_DEAD))
1058 sk->sk_data_ready(sk, skb->len);
1059
1060 return 1;
1061 }
1062
1063 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1064 struct msghdr *msg, size_t len)
1065 {
1066 struct sock *sk = sock->sk;
1067 struct rose_sock *rose = rose_sk(sk);
1068 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
1069 int err;
1070 struct full_sockaddr_rose srose;
1071 struct sk_buff *skb;
1072 unsigned char *asmptr;
1073 int n, size, qbit = 0;
1074
1075 /* ROSE empty frame has no meaning : don't send */
1076 if (len == 0)
1077 return 0;
1078
1079 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1080 return -EINVAL;
1081
1082 if (sock_flag(sk, SOCK_ZAPPED))
1083 return -EADDRNOTAVAIL;
1084
1085 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1086 send_sig(SIGPIPE, current, 0);
1087 return -EPIPE;
1088 }
1089
1090 if (rose->neighbour == NULL || rose->device == NULL)
1091 return -ENETUNREACH;
1092
1093 if (usrose != NULL) {
1094 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1095 return -EINVAL;
1096 memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1097 memcpy(&srose, usrose, msg->msg_namelen);
1098 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1099 ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1100 return -EISCONN;
1101 if (srose.srose_ndigis != rose->dest_ndigis)
1102 return -EISCONN;
1103 if (srose.srose_ndigis == rose->dest_ndigis) {
1104 for (n = 0 ; n < srose.srose_ndigis ; n++)
1105 if (ax25cmp(&rose->dest_digis[n],
1106 &srose.srose_digis[n]))
1107 return -EISCONN;
1108 }
1109 if (srose.srose_family != AF_ROSE)
1110 return -EINVAL;
1111 } else {
1112 if (sk->sk_state != TCP_ESTABLISHED)
1113 return -ENOTCONN;
1114
1115 srose.srose_family = AF_ROSE;
1116 srose.srose_addr = rose->dest_addr;
1117 srose.srose_call = rose->dest_call;
1118 srose.srose_ndigis = rose->dest_ndigis;
1119 for (n = 0 ; n < rose->dest_ndigis ; n++)
1120 srose.srose_digis[n] = rose->dest_digis[n];
1121 }
1122
1123 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1124
1125 /* Build a packet */
1126 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1127 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1128
1129 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1130 return err;
1131
1132 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1133
1134 /*
1135 * Put the data on the end
1136 */
1137 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1138
1139 skb_reset_transport_header(skb);
1140 skb_put(skb, len);
1141
1142 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1143 if (err) {
1144 kfree_skb(skb);
1145 return err;
1146 }
1147
1148 /*
1149 * If the Q BIT Include socket option is in force, the first
1150 * byte of the user data is the logical value of the Q Bit.
1151 */
1152 if (rose->qbitincl) {
1153 qbit = skb->data[0];
1154 skb_pull(skb, 1);
1155 }
1156
1157 /*
1158 * Push down the ROSE header
1159 */
1160 asmptr = skb_push(skb, ROSE_MIN_LEN);
1161
1162 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1163
1164 /* Build a ROSE Network header */
1165 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1166 asmptr[1] = (rose->lci >> 0) & 0xFF;
1167 asmptr[2] = ROSE_DATA;
1168
1169 if (qbit)
1170 asmptr[0] |= ROSE_Q_BIT;
1171
1172 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1173
1174 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1175
1176 if (sk->sk_state != TCP_ESTABLISHED) {
1177 kfree_skb(skb);
1178 return -ENOTCONN;
1179 }
1180
1181 #ifdef M_BIT
1182 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1183 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1184 unsigned char header[ROSE_MIN_LEN];
1185 struct sk_buff *skbn;
1186 int frontlen;
1187 int lg;
1188
1189 /* Save a copy of the Header */
1190 skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1191 skb_pull(skb, ROSE_MIN_LEN);
1192
1193 frontlen = skb_headroom(skb);
1194
1195 while (skb->len > 0) {
1196 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1197 kfree_skb(skb);
1198 return err;
1199 }
1200
1201 skbn->sk = sk;
1202 skbn->free = 1;
1203 skbn->arp = 1;
1204
1205 skb_reserve(skbn, frontlen);
1206
1207 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1208
1209 /* Copy the user data */
1210 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1211 skb_pull(skb, lg);
1212
1213 /* Duplicate the Header */
1214 skb_push(skbn, ROSE_MIN_LEN);
1215 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1216
1217 if (skb->len > 0)
1218 skbn->data[2] |= M_BIT;
1219
1220 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1221 }
1222
1223 skb->free = 1;
1224 kfree_skb(skb);
1225 } else {
1226 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
1227 }
1228 #else
1229 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
1230 #endif
1231
1232 rose_kick(sk);
1233
1234 return len;
1235 }
1236
1237
1238 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1239 struct msghdr *msg, size_t size, int flags)
1240 {
1241 struct sock *sk = sock->sk;
1242 struct rose_sock *rose = rose_sk(sk);
1243 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1244 size_t copied;
1245 unsigned char *asmptr;
1246 struct sk_buff *skb;
1247 int n, er, qbit;
1248
1249 /*
1250 * This works for seqpacket too. The receiver has ordered the queue for
1251 * us! We do one quick check first though
1252 */
1253 if (sk->sk_state != TCP_ESTABLISHED)
1254 return -ENOTCONN;
1255
1256 /* Now we can treat all alike */
1257 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1258 return er;
1259
1260 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1261
1262 skb_pull(skb, ROSE_MIN_LEN);
1263
1264 if (rose->qbitincl) {
1265 asmptr = skb_push(skb, 1);
1266 *asmptr = qbit;
1267 }
1268
1269 skb_reset_transport_header(skb);
1270 copied = skb->len;
1271
1272 /* ROSE empty frame has no meaning : ignore it */
1273 if (copied == 0) {
1274 skb_free_datagram(sk, skb);
1275 return copied;
1276 }
1277
1278 if (copied > size) {
1279 copied = size;
1280 msg->msg_flags |= MSG_TRUNC;
1281 }
1282
1283 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1284
1285 if (srose != NULL) {
1286 srose->srose_family = AF_ROSE;
1287 srose->srose_addr = rose->dest_addr;
1288 srose->srose_call = rose->dest_call;
1289 srose->srose_ndigis = rose->dest_ndigis;
1290 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1291 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1292 for (n = 0 ; n < rose->dest_ndigis ; n++)
1293 full_srose->srose_digis[n] = rose->dest_digis[n];
1294 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1295 } else {
1296 if (rose->dest_ndigis >= 1) {
1297 srose->srose_ndigis = 1;
1298 srose->srose_digi = rose->dest_digis[0];
1299 }
1300 msg->msg_namelen = sizeof(struct sockaddr_rose);
1301 }
1302 }
1303
1304 skb_free_datagram(sk, skb);
1305
1306 return copied;
1307 }
1308
1309
1310 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1311 {
1312 struct sock *sk = sock->sk;
1313 struct rose_sock *rose = rose_sk(sk);
1314 void __user *argp = (void __user *)arg;
1315
1316 switch (cmd) {
1317 case TIOCOUTQ: {
1318 long amount;
1319 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1320 if (amount < 0)
1321 amount = 0;
1322 return put_user(amount, (unsigned int __user *) argp);
1323 }
1324
1325 case TIOCINQ: {
1326 struct sk_buff *skb;
1327 long amount = 0L;
1328 /* These two are safe on a single CPU system as only user tasks fiddle here */
1329 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1330 amount = skb->len;
1331 return put_user(amount, (unsigned int __user *) argp);
1332 }
1333
1334 case SIOCGSTAMP:
1335 return sock_get_timestamp(sk, (struct timeval __user *) argp);
1336
1337 case SIOCGSTAMPNS:
1338 return sock_get_timestampns(sk, (struct timespec __user *) argp);
1339
1340 case SIOCGIFADDR:
1341 case SIOCSIFADDR:
1342 case SIOCGIFDSTADDR:
1343 case SIOCSIFDSTADDR:
1344 case SIOCGIFBRDADDR:
1345 case SIOCSIFBRDADDR:
1346 case SIOCGIFNETMASK:
1347 case SIOCSIFNETMASK:
1348 case SIOCGIFMETRIC:
1349 case SIOCSIFMETRIC:
1350 return -EINVAL;
1351
1352 case SIOCADDRT:
1353 case SIOCDELRT:
1354 case SIOCRSCLRRT:
1355 if (!capable(CAP_NET_ADMIN))
1356 return -EPERM;
1357 return rose_rt_ioctl(cmd, argp);
1358
1359 case SIOCRSGCAUSE: {
1360 struct rose_cause_struct rose_cause;
1361 rose_cause.cause = rose->cause;
1362 rose_cause.diagnostic = rose->diagnostic;
1363 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1364 }
1365
1366 case SIOCRSSCAUSE: {
1367 struct rose_cause_struct rose_cause;
1368 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1369 return -EFAULT;
1370 rose->cause = rose_cause.cause;
1371 rose->diagnostic = rose_cause.diagnostic;
1372 return 0;
1373 }
1374
1375 case SIOCRSSL2CALL:
1376 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1377 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1378 ax25_listen_release(&rose_callsign, NULL);
1379 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1380 return -EFAULT;
1381 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1382 return ax25_listen_register(&rose_callsign, NULL);
1383
1384 return 0;
1385
1386 case SIOCRSGL2CALL:
1387 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1388
1389 case SIOCRSACCEPT:
1390 if (rose->state == ROSE_STATE_5) {
1391 rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1392 rose_start_idletimer(sk);
1393 rose->condition = 0x00;
1394 rose->vs = 0;
1395 rose->va = 0;
1396 rose->vr = 0;
1397 rose->vl = 0;
1398 rose->state = ROSE_STATE_3;
1399 }
1400 return 0;
1401
1402 default:
1403 return -ENOIOCTLCMD;
1404 }
1405
1406 return 0;
1407 }
1408
1409 #ifdef CONFIG_PROC_FS
1410 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1411 __acquires(rose_list_lock)
1412 {
1413 int i;
1414 struct sock *s;
1415 struct hlist_node *node;
1416
1417 spin_lock_bh(&rose_list_lock);
1418 if (*pos == 0)
1419 return SEQ_START_TOKEN;
1420
1421 i = 1;
1422 sk_for_each(s, node, &rose_list) {
1423 if (i == *pos)
1424 return s;
1425 ++i;
1426 }
1427 return NULL;
1428 }
1429
1430 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1431 {
1432 ++*pos;
1433
1434 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1435 : sk_next((struct sock *)v);
1436 }
1437
1438 static void rose_info_stop(struct seq_file *seq, void *v)
1439 __releases(rose_list_lock)
1440 {
1441 spin_unlock_bh(&rose_list_lock);
1442 }
1443
1444 static int rose_info_show(struct seq_file *seq, void *v)
1445 {
1446 char buf[11];
1447
1448 if (v == SEQ_START_TOKEN)
1449 seq_puts(seq,
1450 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1451
1452 else {
1453 struct sock *s = v;
1454 struct rose_sock *rose = rose_sk(s);
1455 const char *devname, *callsign;
1456 const struct net_device *dev = rose->device;
1457
1458 if (!dev)
1459 devname = "???";
1460 else
1461 devname = dev->name;
1462
1463 seq_printf(seq, "%-10s %-9s ",
1464 rose2asc(&rose->dest_addr),
1465 ax2asc(buf, &rose->dest_call));
1466
1467 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1468 callsign = "??????-?";
1469 else
1470 callsign = ax2asc(buf, &rose->source_call);
1471
1472 seq_printf(seq,
1473 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1474 rose2asc(&rose->source_addr),
1475 callsign,
1476 devname,
1477 rose->lci & 0x0FFF,
1478 (rose->neighbour) ? rose->neighbour->number : 0,
1479 rose->state,
1480 rose->vs,
1481 rose->vr,
1482 rose->va,
1483 ax25_display_timer(&rose->timer) / HZ,
1484 rose->t1 / HZ,
1485 rose->t2 / HZ,
1486 rose->t3 / HZ,
1487 rose->hb / HZ,
1488 ax25_display_timer(&rose->idletimer) / (60 * HZ),
1489 rose->idle / (60 * HZ),
1490 atomic_read(&s->sk_wmem_alloc),
1491 atomic_read(&s->sk_rmem_alloc),
1492 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1493 }
1494
1495 return 0;
1496 }
1497
1498 static const struct seq_operations rose_info_seqops = {
1499 .start = rose_info_start,
1500 .next = rose_info_next,
1501 .stop = rose_info_stop,
1502 .show = rose_info_show,
1503 };
1504
1505 static int rose_info_open(struct inode *inode, struct file *file)
1506 {
1507 return seq_open(file, &rose_info_seqops);
1508 }
1509
1510 static const struct file_operations rose_info_fops = {
1511 .owner = THIS_MODULE,
1512 .open = rose_info_open,
1513 .read = seq_read,
1514 .llseek = seq_lseek,
1515 .release = seq_release,
1516 };
1517 #endif /* CONFIG_PROC_FS */
1518
1519 static struct net_proto_family rose_family_ops = {
1520 .family = PF_ROSE,
1521 .create = rose_create,
1522 .owner = THIS_MODULE,
1523 };
1524
1525 static struct proto_ops rose_proto_ops = {
1526 .family = PF_ROSE,
1527 .owner = THIS_MODULE,
1528 .release = rose_release,
1529 .bind = rose_bind,
1530 .connect = rose_connect,
1531 .socketpair = sock_no_socketpair,
1532 .accept = rose_accept,
1533 .getname = rose_getname,
1534 .poll = datagram_poll,
1535 .ioctl = rose_ioctl,
1536 .listen = rose_listen,
1537 .shutdown = sock_no_shutdown,
1538 .setsockopt = rose_setsockopt,
1539 .getsockopt = rose_getsockopt,
1540 .sendmsg = rose_sendmsg,
1541 .recvmsg = rose_recvmsg,
1542 .mmap = sock_no_mmap,
1543 .sendpage = sock_no_sendpage,
1544 };
1545
1546 static struct notifier_block rose_dev_notifier = {
1547 .notifier_call = rose_device_event,
1548 };
1549
1550 static struct net_device **dev_rose;
1551
1552 static struct ax25_protocol rose_pid = {
1553 .pid = AX25_P_ROSE,
1554 .func = rose_route_frame
1555 };
1556
1557 static struct ax25_linkfail rose_linkfail_notifier = {
1558 .func = rose_link_failed
1559 };
1560
1561 static int __init rose_proto_init(void)
1562 {
1563 int i;
1564 int rc;
1565
1566 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1567 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1568 rc = -EINVAL;
1569 goto out;
1570 }
1571
1572 rc = proto_register(&rose_proto, 0);
1573 if (rc != 0)
1574 goto out;
1575
1576 rose_callsign = null_ax25_address;
1577
1578 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1579 if (dev_rose == NULL) {
1580 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1581 rc = -ENOMEM;
1582 goto out_proto_unregister;
1583 }
1584
1585 for (i = 0; i < rose_ndevs; i++) {
1586 struct net_device *dev;
1587 char name[IFNAMSIZ];
1588
1589 sprintf(name, "rose%d", i);
1590 dev = alloc_netdev(sizeof(struct net_device_stats),
1591 name, rose_setup);
1592 if (!dev) {
1593 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1594 rc = -ENOMEM;
1595 goto fail;
1596 }
1597 rc = register_netdev(dev);
1598 if (rc) {
1599 printk(KERN_ERR "ROSE: netdevice registration failed\n");
1600 free_netdev(dev);
1601 goto fail;
1602 }
1603 rose_set_lockdep_key(dev);
1604 dev_rose[i] = dev;
1605 }
1606
1607 sock_register(&rose_family_ops);
1608 register_netdevice_notifier(&rose_dev_notifier);
1609
1610 ax25_register_pid(&rose_pid);
1611 ax25_linkfail_register(&rose_linkfail_notifier);
1612
1613 #ifdef CONFIG_SYSCTL
1614 rose_register_sysctl();
1615 #endif
1616 rose_loopback_init();
1617
1618 rose_add_loopback_neigh();
1619
1620 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
1621 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
1622 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
1623 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
1624 out:
1625 return rc;
1626 fail:
1627 while (--i >= 0) {
1628 unregister_netdev(dev_rose[i]);
1629 free_netdev(dev_rose[i]);
1630 }
1631 kfree(dev_rose);
1632 out_proto_unregister:
1633 proto_unregister(&rose_proto);
1634 goto out;
1635 }
1636 module_init(rose_proto_init);
1637
1638 module_param(rose_ndevs, int, 0);
1639 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1640
1641 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1642 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1643 MODULE_LICENSE("GPL");
1644 MODULE_ALIAS_NETPROTO(PF_ROSE);
1645
1646 static void __exit rose_exit(void)
1647 {
1648 int i;
1649
1650 proc_net_remove(&init_net, "rose");
1651 proc_net_remove(&init_net, "rose_neigh");
1652 proc_net_remove(&init_net, "rose_nodes");
1653 proc_net_remove(&init_net, "rose_routes");
1654 rose_loopback_clear();
1655
1656 rose_rt_free();
1657
1658 ax25_protocol_release(AX25_P_ROSE);
1659 ax25_linkfail_release(&rose_linkfail_notifier);
1660
1661 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1662 ax25_listen_release(&rose_callsign, NULL);
1663
1664 #ifdef CONFIG_SYSCTL
1665 rose_unregister_sysctl();
1666 #endif
1667 unregister_netdevice_notifier(&rose_dev_notifier);
1668
1669 sock_unregister(PF_ROSE);
1670
1671 for (i = 0; i < rose_ndevs; i++) {
1672 struct net_device *dev = dev_rose[i];
1673
1674 if (dev) {
1675 unregister_netdev(dev);
1676 free_netdev(dev);
1677 }
1678 }
1679
1680 kfree(dev_rose);
1681 proto_unregister(&rose_proto);
1682 }
1683
1684 module_exit(rose_exit);