]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/rose/af_rose.c
[NET]: Forget the zero_it argument of sk_alloc()
[mirror_ubuntu-bionic-kernel.git] / net / rose / af_rose.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 */
12
13 #include <linux/capability.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/in.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/spinlock.h>
24 #include <linux/timer.h>
25 #include <linux/string.h>
26 #include <linux/sockios.h>
27 #include <linux/net.h>
28 #include <linux/stat.h>
29 #include <net/net_namespace.h>
30 #include <net/ax25.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/if_arp.h>
34 #include <linux/skbuff.h>
35 #include <net/sock.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <linux/fcntl.h>
39 #include <linux/termios.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <net/rose.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <net/tcp_states.h>
47 #include <net/ip.h>
48 #include <net/arp.h>
49
50 static int rose_ndevs = 10;
51
52 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
53 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
54 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
55 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
56 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
57 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
58 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
59 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
60 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
61 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
62
63 static HLIST_HEAD(rose_list);
64 static DEFINE_SPINLOCK(rose_list_lock);
65
66 static struct proto_ops rose_proto_ops;
67
68 ax25_address rose_callsign;
69
70 /*
71 * ROSE network devices are virtual network devices encapsulating ROSE
72 * frames into AX.25 which will be sent through an AX.25 device, so form a
73 * special "super class" of normal net devices; split their locks off into a
74 * separate class since they always nest.
75 */
76 static struct lock_class_key rose_netdev_xmit_lock_key;
77
78 /*
79 * Convert a ROSE address into text.
80 */
81 const char *rose2asc(const rose_address *addr)
82 {
83 static char buffer[11];
84
85 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
86 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
87 addr->rose_addr[4] == 0x00) {
88 strcpy(buffer, "*");
89 } else {
90 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
91 addr->rose_addr[1] & 0xFF,
92 addr->rose_addr[2] & 0xFF,
93 addr->rose_addr[3] & 0xFF,
94 addr->rose_addr[4] & 0xFF);
95 }
96
97 return buffer;
98 }
99
100 /*
101 * Compare two ROSE addresses, 0 == equal.
102 */
103 int rosecmp(rose_address *addr1, rose_address *addr2)
104 {
105 int i;
106
107 for (i = 0; i < 5; i++)
108 if (addr1->rose_addr[i] != addr2->rose_addr[i])
109 return 1;
110
111 return 0;
112 }
113
114 /*
115 * Compare two ROSE addresses for only mask digits, 0 == equal.
116 */
117 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
118 {
119 int i, j;
120
121 if (mask > 10)
122 return 1;
123
124 for (i = 0; i < mask; i++) {
125 j = i / 2;
126
127 if ((i % 2) != 0) {
128 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
129 return 1;
130 } else {
131 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
132 return 1;
133 }
134 }
135
136 return 0;
137 }
138
139 /*
140 * Socket removal during an interrupt is now safe.
141 */
142 static void rose_remove_socket(struct sock *sk)
143 {
144 spin_lock_bh(&rose_list_lock);
145 sk_del_node_init(sk);
146 spin_unlock_bh(&rose_list_lock);
147 }
148
149 /*
150 * Kill all bound sockets on a broken link layer connection to a
151 * particular neighbour.
152 */
153 void rose_kill_by_neigh(struct rose_neigh *neigh)
154 {
155 struct sock *s;
156 struct hlist_node *node;
157
158 spin_lock_bh(&rose_list_lock);
159 sk_for_each(s, node, &rose_list) {
160 struct rose_sock *rose = rose_sk(s);
161
162 if (rose->neighbour == neigh) {
163 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
164 rose->neighbour->use--;
165 rose->neighbour = NULL;
166 }
167 }
168 spin_unlock_bh(&rose_list_lock);
169 }
170
171 /*
172 * Kill all bound sockets on a dropped device.
173 */
174 static void rose_kill_by_device(struct net_device *dev)
175 {
176 struct sock *s;
177 struct hlist_node *node;
178
179 spin_lock_bh(&rose_list_lock);
180 sk_for_each(s, node, &rose_list) {
181 struct rose_sock *rose = rose_sk(s);
182
183 if (rose->device == dev) {
184 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
185 rose->neighbour->use--;
186 rose->device = NULL;
187 }
188 }
189 spin_unlock_bh(&rose_list_lock);
190 }
191
192 /*
193 * Handle device status changes.
194 */
195 static int rose_device_event(struct notifier_block *this, unsigned long event,
196 void *ptr)
197 {
198 struct net_device *dev = (struct net_device *)ptr;
199
200 if (dev->nd_net != &init_net)
201 return NOTIFY_DONE;
202
203 if (event != NETDEV_DOWN)
204 return NOTIFY_DONE;
205
206 switch (dev->type) {
207 case ARPHRD_ROSE:
208 rose_kill_by_device(dev);
209 break;
210 case ARPHRD_AX25:
211 rose_link_device_down(dev);
212 rose_rt_device_down(dev);
213 break;
214 }
215
216 return NOTIFY_DONE;
217 }
218
219 /*
220 * Add a socket to the bound sockets list.
221 */
222 static void rose_insert_socket(struct sock *sk)
223 {
224
225 spin_lock_bh(&rose_list_lock);
226 sk_add_node(sk, &rose_list);
227 spin_unlock_bh(&rose_list_lock);
228 }
229
230 /*
231 * Find a socket that wants to accept the Call Request we just
232 * received.
233 */
234 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
235 {
236 struct sock *s;
237 struct hlist_node *node;
238
239 spin_lock_bh(&rose_list_lock);
240 sk_for_each(s, node, &rose_list) {
241 struct rose_sock *rose = rose_sk(s);
242
243 if (!rosecmp(&rose->source_addr, addr) &&
244 !ax25cmp(&rose->source_call, call) &&
245 !rose->source_ndigis && s->sk_state == TCP_LISTEN)
246 goto found;
247 }
248
249 sk_for_each(s, node, &rose_list) {
250 struct rose_sock *rose = rose_sk(s);
251
252 if (!rosecmp(&rose->source_addr, addr) &&
253 !ax25cmp(&rose->source_call, &null_ax25_address) &&
254 s->sk_state == TCP_LISTEN)
255 goto found;
256 }
257 s = NULL;
258 found:
259 spin_unlock_bh(&rose_list_lock);
260 return s;
261 }
262
263 /*
264 * Find a connected ROSE socket given my LCI and device.
265 */
266 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
267 {
268 struct sock *s;
269 struct hlist_node *node;
270
271 spin_lock_bh(&rose_list_lock);
272 sk_for_each(s, node, &rose_list) {
273 struct rose_sock *rose = rose_sk(s);
274
275 if (rose->lci == lci && rose->neighbour == neigh)
276 goto found;
277 }
278 s = NULL;
279 found:
280 spin_unlock_bh(&rose_list_lock);
281 return s;
282 }
283
284 /*
285 * Find a unique LCI for a given device.
286 */
287 unsigned int rose_new_lci(struct rose_neigh *neigh)
288 {
289 int lci;
290
291 if (neigh->dce_mode) {
292 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
293 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
294 return lci;
295 } else {
296 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
297 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
298 return lci;
299 }
300
301 return 0;
302 }
303
304 /*
305 * Deferred destroy.
306 */
307 void rose_destroy_socket(struct sock *);
308
309 /*
310 * Handler for deferred kills.
311 */
312 static void rose_destroy_timer(unsigned long data)
313 {
314 rose_destroy_socket((struct sock *)data);
315 }
316
317 /*
318 * This is called from user mode and the timers. Thus it protects itself
319 * against interrupt users but doesn't worry about being called during
320 * work. Once it is removed from the queue no interrupt or bottom half
321 * will touch it and we are (fairly 8-) ) safe.
322 */
323 void rose_destroy_socket(struct sock *sk)
324 {
325 struct sk_buff *skb;
326
327 rose_remove_socket(sk);
328 rose_stop_heartbeat(sk);
329 rose_stop_idletimer(sk);
330 rose_stop_timer(sk);
331
332 rose_clear_queues(sk); /* Flush the queues */
333
334 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
335 if (skb->sk != sk) { /* A pending connection */
336 /* Queue the unaccepted socket for death */
337 sock_set_flag(skb->sk, SOCK_DEAD);
338 rose_start_heartbeat(skb->sk);
339 rose_sk(skb->sk)->state = ROSE_STATE_0;
340 }
341
342 kfree_skb(skb);
343 }
344
345 if (atomic_read(&sk->sk_wmem_alloc) ||
346 atomic_read(&sk->sk_rmem_alloc)) {
347 /* Defer: outstanding buffers */
348 init_timer(&sk->sk_timer);
349 sk->sk_timer.expires = jiffies + 10 * HZ;
350 sk->sk_timer.function = rose_destroy_timer;
351 sk->sk_timer.data = (unsigned long)sk;
352 add_timer(&sk->sk_timer);
353 } else
354 sock_put(sk);
355 }
356
357 /*
358 * Handling for system calls applied via the various interfaces to a
359 * ROSE socket object.
360 */
361
362 static int rose_setsockopt(struct socket *sock, int level, int optname,
363 char __user *optval, int optlen)
364 {
365 struct sock *sk = sock->sk;
366 struct rose_sock *rose = rose_sk(sk);
367 int opt;
368
369 if (level != SOL_ROSE)
370 return -ENOPROTOOPT;
371
372 if (optlen < sizeof(int))
373 return -EINVAL;
374
375 if (get_user(opt, (int __user *)optval))
376 return -EFAULT;
377
378 switch (optname) {
379 case ROSE_DEFER:
380 rose->defer = opt ? 1 : 0;
381 return 0;
382
383 case ROSE_T1:
384 if (opt < 1)
385 return -EINVAL;
386 rose->t1 = opt * HZ;
387 return 0;
388
389 case ROSE_T2:
390 if (opt < 1)
391 return -EINVAL;
392 rose->t2 = opt * HZ;
393 return 0;
394
395 case ROSE_T3:
396 if (opt < 1)
397 return -EINVAL;
398 rose->t3 = opt * HZ;
399 return 0;
400
401 case ROSE_HOLDBACK:
402 if (opt < 1)
403 return -EINVAL;
404 rose->hb = opt * HZ;
405 return 0;
406
407 case ROSE_IDLE:
408 if (opt < 0)
409 return -EINVAL;
410 rose->idle = opt * 60 * HZ;
411 return 0;
412
413 case ROSE_QBITINCL:
414 rose->qbitincl = opt ? 1 : 0;
415 return 0;
416
417 default:
418 return -ENOPROTOOPT;
419 }
420 }
421
422 static int rose_getsockopt(struct socket *sock, int level, int optname,
423 char __user *optval, int __user *optlen)
424 {
425 struct sock *sk = sock->sk;
426 struct rose_sock *rose = rose_sk(sk);
427 int val = 0;
428 int len;
429
430 if (level != SOL_ROSE)
431 return -ENOPROTOOPT;
432
433 if (get_user(len, optlen))
434 return -EFAULT;
435
436 if (len < 0)
437 return -EINVAL;
438
439 switch (optname) {
440 case ROSE_DEFER:
441 val = rose->defer;
442 break;
443
444 case ROSE_T1:
445 val = rose->t1 / HZ;
446 break;
447
448 case ROSE_T2:
449 val = rose->t2 / HZ;
450 break;
451
452 case ROSE_T3:
453 val = rose->t3 / HZ;
454 break;
455
456 case ROSE_HOLDBACK:
457 val = rose->hb / HZ;
458 break;
459
460 case ROSE_IDLE:
461 val = rose->idle / (60 * HZ);
462 break;
463
464 case ROSE_QBITINCL:
465 val = rose->qbitincl;
466 break;
467
468 default:
469 return -ENOPROTOOPT;
470 }
471
472 len = min_t(unsigned int, len, sizeof(int));
473
474 if (put_user(len, optlen))
475 return -EFAULT;
476
477 return copy_to_user(optval, &val, len) ? -EFAULT : 0;
478 }
479
480 static int rose_listen(struct socket *sock, int backlog)
481 {
482 struct sock *sk = sock->sk;
483
484 if (sk->sk_state != TCP_LISTEN) {
485 struct rose_sock *rose = rose_sk(sk);
486
487 rose->dest_ndigis = 0;
488 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
489 memset(&rose->dest_call, 0, AX25_ADDR_LEN);
490 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
491 sk->sk_max_ack_backlog = backlog;
492 sk->sk_state = TCP_LISTEN;
493 return 0;
494 }
495
496 return -EOPNOTSUPP;
497 }
498
499 static struct proto rose_proto = {
500 .name = "ROSE",
501 .owner = THIS_MODULE,
502 .obj_size = sizeof(struct rose_sock),
503 };
504
505 static int rose_create(struct net *net, struct socket *sock, int protocol)
506 {
507 struct sock *sk;
508 struct rose_sock *rose;
509
510 if (net != &init_net)
511 return -EAFNOSUPPORT;
512
513 if (sock->type != SOCK_SEQPACKET || protocol != 0)
514 return -ESOCKTNOSUPPORT;
515
516 sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
517 if (sk == NULL)
518 return -ENOMEM;
519
520 rose = rose_sk(sk);
521
522 sock_init_data(sock, sk);
523
524 skb_queue_head_init(&rose->ack_queue);
525 #ifdef M_BIT
526 skb_queue_head_init(&rose->frag_queue);
527 rose->fraglen = 0;
528 #endif
529
530 sock->ops = &rose_proto_ops;
531 sk->sk_protocol = protocol;
532
533 init_timer(&rose->timer);
534 init_timer(&rose->idletimer);
535
536 rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
537 rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
538 rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
539 rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
540 rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
541
542 rose->state = ROSE_STATE_0;
543
544 return 0;
545 }
546
547 static struct sock *rose_make_new(struct sock *osk)
548 {
549 struct sock *sk;
550 struct rose_sock *rose, *orose;
551
552 if (osk->sk_type != SOCK_SEQPACKET)
553 return NULL;
554
555 sk = sk_alloc(osk->sk_net, PF_ROSE, GFP_ATOMIC, &rose_proto);
556 if (sk == NULL)
557 return NULL;
558
559 rose = rose_sk(sk);
560
561 sock_init_data(NULL, sk);
562
563 skb_queue_head_init(&rose->ack_queue);
564 #ifdef M_BIT
565 skb_queue_head_init(&rose->frag_queue);
566 rose->fraglen = 0;
567 #endif
568
569 sk->sk_type = osk->sk_type;
570 sk->sk_socket = osk->sk_socket;
571 sk->sk_priority = osk->sk_priority;
572 sk->sk_protocol = osk->sk_protocol;
573 sk->sk_rcvbuf = osk->sk_rcvbuf;
574 sk->sk_sndbuf = osk->sk_sndbuf;
575 sk->sk_state = TCP_ESTABLISHED;
576 sk->sk_sleep = osk->sk_sleep;
577 sock_copy_flags(sk, osk);
578
579 init_timer(&rose->timer);
580 init_timer(&rose->idletimer);
581
582 orose = rose_sk(osk);
583 rose->t1 = orose->t1;
584 rose->t2 = orose->t2;
585 rose->t3 = orose->t3;
586 rose->hb = orose->hb;
587 rose->idle = orose->idle;
588 rose->defer = orose->defer;
589 rose->device = orose->device;
590 rose->qbitincl = orose->qbitincl;
591
592 return sk;
593 }
594
595 static int rose_release(struct socket *sock)
596 {
597 struct sock *sk = sock->sk;
598 struct rose_sock *rose;
599
600 if (sk == NULL) return 0;
601
602 rose = rose_sk(sk);
603
604 switch (rose->state) {
605 case ROSE_STATE_0:
606 rose_disconnect(sk, 0, -1, -1);
607 rose_destroy_socket(sk);
608 break;
609
610 case ROSE_STATE_2:
611 rose->neighbour->use--;
612 rose_disconnect(sk, 0, -1, -1);
613 rose_destroy_socket(sk);
614 break;
615
616 case ROSE_STATE_1:
617 case ROSE_STATE_3:
618 case ROSE_STATE_4:
619 case ROSE_STATE_5:
620 rose_clear_queues(sk);
621 rose_stop_idletimer(sk);
622 rose_write_internal(sk, ROSE_CLEAR_REQUEST);
623 rose_start_t3timer(sk);
624 rose->state = ROSE_STATE_2;
625 sk->sk_state = TCP_CLOSE;
626 sk->sk_shutdown |= SEND_SHUTDOWN;
627 sk->sk_state_change(sk);
628 sock_set_flag(sk, SOCK_DEAD);
629 sock_set_flag(sk, SOCK_DESTROY);
630 break;
631
632 default:
633 break;
634 }
635
636 sock->sk = NULL;
637
638 return 0;
639 }
640
641 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
642 {
643 struct sock *sk = sock->sk;
644 struct rose_sock *rose = rose_sk(sk);
645 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
646 struct net_device *dev;
647 ax25_address *source;
648 ax25_uid_assoc *user;
649 int n;
650
651 if (!sock_flag(sk, SOCK_ZAPPED))
652 return -EINVAL;
653
654 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
655 return -EINVAL;
656
657 if (addr->srose_family != AF_ROSE)
658 return -EINVAL;
659
660 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
661 return -EINVAL;
662
663 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
664 return -EINVAL;
665
666 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
667 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
668 return -EADDRNOTAVAIL;
669 }
670
671 source = &addr->srose_call;
672
673 user = ax25_findbyuid(current->euid);
674 if (user) {
675 rose->source_call = user->call;
676 ax25_uid_put(user);
677 } else {
678 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
679 return -EACCES;
680 rose->source_call = *source;
681 }
682
683 rose->source_addr = addr->srose_addr;
684 rose->device = dev;
685 rose->source_ndigis = addr->srose_ndigis;
686
687 if (addr_len == sizeof(struct full_sockaddr_rose)) {
688 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
689 for (n = 0 ; n < addr->srose_ndigis ; n++)
690 rose->source_digis[n] = full_addr->srose_digis[n];
691 } else {
692 if (rose->source_ndigis == 1) {
693 rose->source_digis[0] = addr->srose_digi;
694 }
695 }
696
697 rose_insert_socket(sk);
698
699 sock_reset_flag(sk, SOCK_ZAPPED);
700 SOCK_DEBUG(sk, "ROSE: socket is bound\n");
701 return 0;
702 }
703
704 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
705 {
706 struct sock *sk = sock->sk;
707 struct rose_sock *rose = rose_sk(sk);
708 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
709 unsigned char cause, diagnostic;
710 struct net_device *dev;
711 ax25_uid_assoc *user;
712 int n, err = 0;
713
714 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
715 return -EINVAL;
716
717 if (addr->srose_family != AF_ROSE)
718 return -EINVAL;
719
720 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
721 return -EINVAL;
722
723 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
724 return -EINVAL;
725
726 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
727 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
728 return -EINVAL;
729
730 lock_sock(sk);
731
732 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
733 /* Connect completed during a ERESTARTSYS event */
734 sock->state = SS_CONNECTED;
735 goto out_release;
736 }
737
738 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
739 sock->state = SS_UNCONNECTED;
740 err = -ECONNREFUSED;
741 goto out_release;
742 }
743
744 if (sk->sk_state == TCP_ESTABLISHED) {
745 /* No reconnect on a seqpacket socket */
746 err = -EISCONN;
747 goto out_release;
748 }
749
750 sk->sk_state = TCP_CLOSE;
751 sock->state = SS_UNCONNECTED;
752
753 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
754 &diagnostic);
755 if (!rose->neighbour)
756 return -ENETUNREACH;
757
758 rose->lci = rose_new_lci(rose->neighbour);
759 if (!rose->lci) {
760 err = -ENETUNREACH;
761 goto out_release;
762 }
763
764 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
765 sock_reset_flag(sk, SOCK_ZAPPED);
766
767 if ((dev = rose_dev_first()) == NULL) {
768 err = -ENETUNREACH;
769 goto out_release;
770 }
771
772 user = ax25_findbyuid(current->euid);
773 if (!user) {
774 err = -EINVAL;
775 goto out_release;
776 }
777
778 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
779 rose->source_call = user->call;
780 rose->device = dev;
781 ax25_uid_put(user);
782
783 rose_insert_socket(sk); /* Finish the bind */
784 }
785 rose_try_next_neigh:
786 rose->dest_addr = addr->srose_addr;
787 rose->dest_call = addr->srose_call;
788 rose->rand = ((long)rose & 0xFFFF) + rose->lci;
789 rose->dest_ndigis = addr->srose_ndigis;
790
791 if (addr_len == sizeof(struct full_sockaddr_rose)) {
792 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
793 for (n = 0 ; n < addr->srose_ndigis ; n++)
794 rose->dest_digis[n] = full_addr->srose_digis[n];
795 } else {
796 if (rose->dest_ndigis == 1) {
797 rose->dest_digis[0] = addr->srose_digi;
798 }
799 }
800
801 /* Move to connecting socket, start sending Connect Requests */
802 sock->state = SS_CONNECTING;
803 sk->sk_state = TCP_SYN_SENT;
804
805 rose->state = ROSE_STATE_1;
806
807 rose->neighbour->use++;
808
809 rose_write_internal(sk, ROSE_CALL_REQUEST);
810 rose_start_heartbeat(sk);
811 rose_start_t1timer(sk);
812
813 /* Now the loop */
814 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
815 err = -EINPROGRESS;
816 goto out_release;
817 }
818
819 /*
820 * A Connect Ack with Choke or timeout or failed routing will go to
821 * closed.
822 */
823 if (sk->sk_state == TCP_SYN_SENT) {
824 DEFINE_WAIT(wait);
825
826 for (;;) {
827 prepare_to_wait(sk->sk_sleep, &wait,
828 TASK_INTERRUPTIBLE);
829 if (sk->sk_state != TCP_SYN_SENT)
830 break;
831 if (!signal_pending(current)) {
832 release_sock(sk);
833 schedule();
834 lock_sock(sk);
835 continue;
836 }
837 err = -ERESTARTSYS;
838 break;
839 }
840 finish_wait(sk->sk_sleep, &wait);
841
842 if (err)
843 goto out_release;
844 }
845
846 if (sk->sk_state != TCP_ESTABLISHED) {
847 /* Try next neighbour */
848 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic);
849 if (rose->neighbour)
850 goto rose_try_next_neigh;
851
852 /* No more neighbours */
853 sock->state = SS_UNCONNECTED;
854 err = sock_error(sk); /* Always set at this point */
855 goto out_release;
856 }
857
858 sock->state = SS_CONNECTED;
859
860 out_release:
861 release_sock(sk);
862
863 return err;
864 }
865
866 static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
867 {
868 struct sk_buff *skb;
869 struct sock *newsk;
870 DEFINE_WAIT(wait);
871 struct sock *sk;
872 int err = 0;
873
874 if ((sk = sock->sk) == NULL)
875 return -EINVAL;
876
877 lock_sock(sk);
878 if (sk->sk_type != SOCK_SEQPACKET) {
879 err = -EOPNOTSUPP;
880 goto out_release;
881 }
882
883 if (sk->sk_state != TCP_LISTEN) {
884 err = -EINVAL;
885 goto out_release;
886 }
887
888 /*
889 * The write queue this time is holding sockets ready to use
890 * hooked into the SABM we saved
891 */
892 for (;;) {
893 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
894
895 skb = skb_dequeue(&sk->sk_receive_queue);
896 if (skb)
897 break;
898
899 if (flags & O_NONBLOCK) {
900 err = -EWOULDBLOCK;
901 break;
902 }
903 if (!signal_pending(current)) {
904 release_sock(sk);
905 schedule();
906 lock_sock(sk);
907 continue;
908 }
909 err = -ERESTARTSYS;
910 break;
911 }
912 finish_wait(sk->sk_sleep, &wait);
913 if (err)
914 goto out_release;
915
916 newsk = skb->sk;
917 newsk->sk_socket = newsock;
918 newsk->sk_sleep = &newsock->wait;
919
920 /* Now attach up the new socket */
921 skb->sk = NULL;
922 kfree_skb(skb);
923 sk->sk_ack_backlog--;
924 newsock->sk = newsk;
925
926 out_release:
927 release_sock(sk);
928
929 return err;
930 }
931
932 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
933 int *uaddr_len, int peer)
934 {
935 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
936 struct sock *sk = sock->sk;
937 struct rose_sock *rose = rose_sk(sk);
938 int n;
939
940 if (peer != 0) {
941 if (sk->sk_state != TCP_ESTABLISHED)
942 return -ENOTCONN;
943 srose->srose_family = AF_ROSE;
944 srose->srose_addr = rose->dest_addr;
945 srose->srose_call = rose->dest_call;
946 srose->srose_ndigis = rose->dest_ndigis;
947 for (n = 0; n < rose->dest_ndigis; n++)
948 srose->srose_digis[n] = rose->dest_digis[n];
949 } else {
950 srose->srose_family = AF_ROSE;
951 srose->srose_addr = rose->source_addr;
952 srose->srose_call = rose->source_call;
953 srose->srose_ndigis = rose->source_ndigis;
954 for (n = 0; n < rose->source_ndigis; n++)
955 srose->srose_digis[n] = rose->source_digis[n];
956 }
957
958 *uaddr_len = sizeof(struct full_sockaddr_rose);
959 return 0;
960 }
961
962 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
963 {
964 struct sock *sk;
965 struct sock *make;
966 struct rose_sock *make_rose;
967 struct rose_facilities_struct facilities;
968 int n, len;
969
970 skb->sk = NULL; /* Initially we don't know who it's for */
971
972 /*
973 * skb->data points to the rose frame start
974 */
975 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
976
977 len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2;
978 len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2;
979 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
980 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
981 return 0;
982 }
983
984 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
985
986 /*
987 * We can't accept the Call Request.
988 */
989 if (sk == NULL || sk_acceptq_is_full(sk) ||
990 (make = rose_make_new(sk)) == NULL) {
991 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
992 return 0;
993 }
994
995 skb->sk = make;
996 make->sk_state = TCP_ESTABLISHED;
997 make_rose = rose_sk(make);
998
999 make_rose->lci = lci;
1000 make_rose->dest_addr = facilities.dest_addr;
1001 make_rose->dest_call = facilities.dest_call;
1002 make_rose->dest_ndigis = facilities.dest_ndigis;
1003 for (n = 0 ; n < facilities.dest_ndigis ; n++)
1004 make_rose->dest_digis[n] = facilities.dest_digis[n];
1005 make_rose->source_addr = facilities.source_addr;
1006 make_rose->source_call = facilities.source_call;
1007 make_rose->source_ndigis = facilities.source_ndigis;
1008 for (n = 0 ; n < facilities.source_ndigis ; n++)
1009 make_rose->source_digis[n]= facilities.source_digis[n];
1010 make_rose->neighbour = neigh;
1011 make_rose->device = dev;
1012 make_rose->facilities = facilities;
1013
1014 make_rose->neighbour->use++;
1015
1016 if (rose_sk(sk)->defer) {
1017 make_rose->state = ROSE_STATE_5;
1018 } else {
1019 rose_write_internal(make, ROSE_CALL_ACCEPTED);
1020 make_rose->state = ROSE_STATE_3;
1021 rose_start_idletimer(make);
1022 }
1023
1024 make_rose->condition = 0x00;
1025 make_rose->vs = 0;
1026 make_rose->va = 0;
1027 make_rose->vr = 0;
1028 make_rose->vl = 0;
1029 sk->sk_ack_backlog++;
1030
1031 rose_insert_socket(make);
1032
1033 skb_queue_head(&sk->sk_receive_queue, skb);
1034
1035 rose_start_heartbeat(make);
1036
1037 if (!sock_flag(sk, SOCK_DEAD))
1038 sk->sk_data_ready(sk, skb->len);
1039
1040 return 1;
1041 }
1042
1043 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1044 struct msghdr *msg, size_t len)
1045 {
1046 struct sock *sk = sock->sk;
1047 struct rose_sock *rose = rose_sk(sk);
1048 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
1049 int err;
1050 struct full_sockaddr_rose srose;
1051 struct sk_buff *skb;
1052 unsigned char *asmptr;
1053 int n, size, qbit = 0;
1054
1055 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1056 return -EINVAL;
1057
1058 if (sock_flag(sk, SOCK_ZAPPED))
1059 return -EADDRNOTAVAIL;
1060
1061 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1062 send_sig(SIGPIPE, current, 0);
1063 return -EPIPE;
1064 }
1065
1066 if (rose->neighbour == NULL || rose->device == NULL)
1067 return -ENETUNREACH;
1068
1069 if (usrose != NULL) {
1070 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1071 return -EINVAL;
1072 memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1073 memcpy(&srose, usrose, msg->msg_namelen);
1074 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1075 ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1076 return -EISCONN;
1077 if (srose.srose_ndigis != rose->dest_ndigis)
1078 return -EISCONN;
1079 if (srose.srose_ndigis == rose->dest_ndigis) {
1080 for (n = 0 ; n < srose.srose_ndigis ; n++)
1081 if (ax25cmp(&rose->dest_digis[n],
1082 &srose.srose_digis[n]))
1083 return -EISCONN;
1084 }
1085 if (srose.srose_family != AF_ROSE)
1086 return -EINVAL;
1087 } else {
1088 if (sk->sk_state != TCP_ESTABLISHED)
1089 return -ENOTCONN;
1090
1091 srose.srose_family = AF_ROSE;
1092 srose.srose_addr = rose->dest_addr;
1093 srose.srose_call = rose->dest_call;
1094 srose.srose_ndigis = rose->dest_ndigis;
1095 for (n = 0 ; n < rose->dest_ndigis ; n++)
1096 srose.srose_digis[n] = rose->dest_digis[n];
1097 }
1098
1099 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1100
1101 /* Build a packet */
1102 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1103 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1104
1105 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1106 return err;
1107
1108 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1109
1110 /*
1111 * Put the data on the end
1112 */
1113 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1114
1115 skb_reset_transport_header(skb);
1116 skb_put(skb, len);
1117
1118 err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len);
1119 if (err) {
1120 kfree_skb(skb);
1121 return err;
1122 }
1123
1124 /*
1125 * If the Q BIT Include socket option is in force, the first
1126 * byte of the user data is the logical value of the Q Bit.
1127 */
1128 if (rose->qbitincl) {
1129 qbit = skb->data[0];
1130 skb_pull(skb, 1);
1131 }
1132
1133 /*
1134 * Push down the ROSE header
1135 */
1136 asmptr = skb_push(skb, ROSE_MIN_LEN);
1137
1138 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1139
1140 /* Build a ROSE Network header */
1141 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1142 asmptr[1] = (rose->lci >> 0) & 0xFF;
1143 asmptr[2] = ROSE_DATA;
1144
1145 if (qbit)
1146 asmptr[0] |= ROSE_Q_BIT;
1147
1148 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1149
1150 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1151
1152 if (sk->sk_state != TCP_ESTABLISHED) {
1153 kfree_skb(skb);
1154 return -ENOTCONN;
1155 }
1156
1157 #ifdef M_BIT
1158 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1159 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1160 unsigned char header[ROSE_MIN_LEN];
1161 struct sk_buff *skbn;
1162 int frontlen;
1163 int lg;
1164
1165 /* Save a copy of the Header */
1166 skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1167 skb_pull(skb, ROSE_MIN_LEN);
1168
1169 frontlen = skb_headroom(skb);
1170
1171 while (skb->len > 0) {
1172 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1173 kfree_skb(skb);
1174 return err;
1175 }
1176
1177 skbn->sk = sk;
1178 skbn->free = 1;
1179 skbn->arp = 1;
1180
1181 skb_reserve(skbn, frontlen);
1182
1183 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1184
1185 /* Copy the user data */
1186 skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1187 skb_pull(skb, lg);
1188
1189 /* Duplicate the Header */
1190 skb_push(skbn, ROSE_MIN_LEN);
1191 skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1192
1193 if (skb->len > 0)
1194 skbn->data[2] |= M_BIT;
1195
1196 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1197 }
1198
1199 skb->free = 1;
1200 kfree_skb(skb);
1201 } else {
1202 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
1203 }
1204 #else
1205 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
1206 #endif
1207
1208 rose_kick(sk);
1209
1210 return len;
1211 }
1212
1213
1214 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1215 struct msghdr *msg, size_t size, int flags)
1216 {
1217 struct sock *sk = sock->sk;
1218 struct rose_sock *rose = rose_sk(sk);
1219 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1220 size_t copied;
1221 unsigned char *asmptr;
1222 struct sk_buff *skb;
1223 int n, er, qbit;
1224
1225 /*
1226 * This works for seqpacket too. The receiver has ordered the queue for
1227 * us! We do one quick check first though
1228 */
1229 if (sk->sk_state != TCP_ESTABLISHED)
1230 return -ENOTCONN;
1231
1232 /* Now we can treat all alike */
1233 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1234 return er;
1235
1236 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1237
1238 skb_pull(skb, ROSE_MIN_LEN);
1239
1240 if (rose->qbitincl) {
1241 asmptr = skb_push(skb, 1);
1242 *asmptr = qbit;
1243 }
1244
1245 skb_reset_transport_header(skb);
1246 copied = skb->len;
1247
1248 if (copied > size) {
1249 copied = size;
1250 msg->msg_flags |= MSG_TRUNC;
1251 }
1252
1253 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1254
1255 if (srose != NULL) {
1256 srose->srose_family = AF_ROSE;
1257 srose->srose_addr = rose->dest_addr;
1258 srose->srose_call = rose->dest_call;
1259 srose->srose_ndigis = rose->dest_ndigis;
1260 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1261 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1262 for (n = 0 ; n < rose->dest_ndigis ; n++)
1263 full_srose->srose_digis[n] = rose->dest_digis[n];
1264 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1265 } else {
1266 if (rose->dest_ndigis >= 1) {
1267 srose->srose_ndigis = 1;
1268 srose->srose_digi = rose->dest_digis[0];
1269 }
1270 msg->msg_namelen = sizeof(struct sockaddr_rose);
1271 }
1272 }
1273
1274 skb_free_datagram(sk, skb);
1275
1276 return copied;
1277 }
1278
1279
1280 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1281 {
1282 struct sock *sk = sock->sk;
1283 struct rose_sock *rose = rose_sk(sk);
1284 void __user *argp = (void __user *)arg;
1285
1286 switch (cmd) {
1287 case TIOCOUTQ: {
1288 long amount;
1289 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1290 if (amount < 0)
1291 amount = 0;
1292 return put_user(amount, (unsigned int __user *) argp);
1293 }
1294
1295 case TIOCINQ: {
1296 struct sk_buff *skb;
1297 long amount = 0L;
1298 /* These two are safe on a single CPU system as only user tasks fiddle here */
1299 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1300 amount = skb->len;
1301 return put_user(amount, (unsigned int __user *) argp);
1302 }
1303
1304 case SIOCGSTAMP:
1305 return sock_get_timestamp(sk, (struct timeval __user *) argp);
1306
1307 case SIOCGSTAMPNS:
1308 return sock_get_timestampns(sk, (struct timespec __user *) argp);
1309
1310 case SIOCGIFADDR:
1311 case SIOCSIFADDR:
1312 case SIOCGIFDSTADDR:
1313 case SIOCSIFDSTADDR:
1314 case SIOCGIFBRDADDR:
1315 case SIOCSIFBRDADDR:
1316 case SIOCGIFNETMASK:
1317 case SIOCSIFNETMASK:
1318 case SIOCGIFMETRIC:
1319 case SIOCSIFMETRIC:
1320 return -EINVAL;
1321
1322 case SIOCADDRT:
1323 case SIOCDELRT:
1324 case SIOCRSCLRRT:
1325 if (!capable(CAP_NET_ADMIN))
1326 return -EPERM;
1327 return rose_rt_ioctl(cmd, argp);
1328
1329 case SIOCRSGCAUSE: {
1330 struct rose_cause_struct rose_cause;
1331 rose_cause.cause = rose->cause;
1332 rose_cause.diagnostic = rose->diagnostic;
1333 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1334 }
1335
1336 case SIOCRSSCAUSE: {
1337 struct rose_cause_struct rose_cause;
1338 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1339 return -EFAULT;
1340 rose->cause = rose_cause.cause;
1341 rose->diagnostic = rose_cause.diagnostic;
1342 return 0;
1343 }
1344
1345 case SIOCRSSL2CALL:
1346 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1347 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1348 ax25_listen_release(&rose_callsign, NULL);
1349 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1350 return -EFAULT;
1351 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1352 return ax25_listen_register(&rose_callsign, NULL);
1353
1354 return 0;
1355
1356 case SIOCRSGL2CALL:
1357 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1358
1359 case SIOCRSACCEPT:
1360 if (rose->state == ROSE_STATE_5) {
1361 rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1362 rose_start_idletimer(sk);
1363 rose->condition = 0x00;
1364 rose->vs = 0;
1365 rose->va = 0;
1366 rose->vr = 0;
1367 rose->vl = 0;
1368 rose->state = ROSE_STATE_3;
1369 }
1370 return 0;
1371
1372 default:
1373 return -ENOIOCTLCMD;
1374 }
1375
1376 return 0;
1377 }
1378
1379 #ifdef CONFIG_PROC_FS
1380 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1381 {
1382 int i;
1383 struct sock *s;
1384 struct hlist_node *node;
1385
1386 spin_lock_bh(&rose_list_lock);
1387 if (*pos == 0)
1388 return SEQ_START_TOKEN;
1389
1390 i = 1;
1391 sk_for_each(s, node, &rose_list) {
1392 if (i == *pos)
1393 return s;
1394 ++i;
1395 }
1396 return NULL;
1397 }
1398
1399 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1400 {
1401 ++*pos;
1402
1403 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1404 : sk_next((struct sock *)v);
1405 }
1406
1407 static void rose_info_stop(struct seq_file *seq, void *v)
1408 {
1409 spin_unlock_bh(&rose_list_lock);
1410 }
1411
1412 static int rose_info_show(struct seq_file *seq, void *v)
1413 {
1414 char buf[11];
1415
1416 if (v == SEQ_START_TOKEN)
1417 seq_puts(seq,
1418 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1419
1420 else {
1421 struct sock *s = v;
1422 struct rose_sock *rose = rose_sk(s);
1423 const char *devname, *callsign;
1424 const struct net_device *dev = rose->device;
1425
1426 if (!dev)
1427 devname = "???";
1428 else
1429 devname = dev->name;
1430
1431 seq_printf(seq, "%-10s %-9s ",
1432 rose2asc(&rose->dest_addr),
1433 ax2asc(buf, &rose->dest_call));
1434
1435 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1436 callsign = "??????-?";
1437 else
1438 callsign = ax2asc(buf, &rose->source_call);
1439
1440 seq_printf(seq,
1441 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1442 rose2asc(&rose->source_addr),
1443 callsign,
1444 devname,
1445 rose->lci & 0x0FFF,
1446 (rose->neighbour) ? rose->neighbour->number : 0,
1447 rose->state,
1448 rose->vs,
1449 rose->vr,
1450 rose->va,
1451 ax25_display_timer(&rose->timer) / HZ,
1452 rose->t1 / HZ,
1453 rose->t2 / HZ,
1454 rose->t3 / HZ,
1455 rose->hb / HZ,
1456 ax25_display_timer(&rose->idletimer) / (60 * HZ),
1457 rose->idle / (60 * HZ),
1458 atomic_read(&s->sk_wmem_alloc),
1459 atomic_read(&s->sk_rmem_alloc),
1460 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1461 }
1462
1463 return 0;
1464 }
1465
1466 static const struct seq_operations rose_info_seqops = {
1467 .start = rose_info_start,
1468 .next = rose_info_next,
1469 .stop = rose_info_stop,
1470 .show = rose_info_show,
1471 };
1472
1473 static int rose_info_open(struct inode *inode, struct file *file)
1474 {
1475 return seq_open(file, &rose_info_seqops);
1476 }
1477
1478 static const struct file_operations rose_info_fops = {
1479 .owner = THIS_MODULE,
1480 .open = rose_info_open,
1481 .read = seq_read,
1482 .llseek = seq_lseek,
1483 .release = seq_release,
1484 };
1485 #endif /* CONFIG_PROC_FS */
1486
1487 static struct net_proto_family rose_family_ops = {
1488 .family = PF_ROSE,
1489 .create = rose_create,
1490 .owner = THIS_MODULE,
1491 };
1492
1493 static struct proto_ops rose_proto_ops = {
1494 .family = PF_ROSE,
1495 .owner = THIS_MODULE,
1496 .release = rose_release,
1497 .bind = rose_bind,
1498 .connect = rose_connect,
1499 .socketpair = sock_no_socketpair,
1500 .accept = rose_accept,
1501 .getname = rose_getname,
1502 .poll = datagram_poll,
1503 .ioctl = rose_ioctl,
1504 .listen = rose_listen,
1505 .shutdown = sock_no_shutdown,
1506 .setsockopt = rose_setsockopt,
1507 .getsockopt = rose_getsockopt,
1508 .sendmsg = rose_sendmsg,
1509 .recvmsg = rose_recvmsg,
1510 .mmap = sock_no_mmap,
1511 .sendpage = sock_no_sendpage,
1512 };
1513
1514 static struct notifier_block rose_dev_notifier = {
1515 .notifier_call = rose_device_event,
1516 };
1517
1518 static struct net_device **dev_rose;
1519
1520 static struct ax25_protocol rose_pid = {
1521 .pid = AX25_P_ROSE,
1522 .func = rose_route_frame
1523 };
1524
1525 static struct ax25_linkfail rose_linkfail_notifier = {
1526 .func = rose_link_failed
1527 };
1528
1529 static int __init rose_proto_init(void)
1530 {
1531 int i;
1532 int rc;
1533
1534 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1535 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1536 rc = -EINVAL;
1537 goto out;
1538 }
1539
1540 rc = proto_register(&rose_proto, 0);
1541 if (rc != 0)
1542 goto out;
1543
1544 rose_callsign = null_ax25_address;
1545
1546 dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1547 if (dev_rose == NULL) {
1548 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1549 rc = -ENOMEM;
1550 goto out_proto_unregister;
1551 }
1552
1553 for (i = 0; i < rose_ndevs; i++) {
1554 struct net_device *dev;
1555 char name[IFNAMSIZ];
1556
1557 sprintf(name, "rose%d", i);
1558 dev = alloc_netdev(sizeof(struct net_device_stats),
1559 name, rose_setup);
1560 if (!dev) {
1561 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1562 rc = -ENOMEM;
1563 goto fail;
1564 }
1565 rc = register_netdev(dev);
1566 if (rc) {
1567 printk(KERN_ERR "ROSE: netdevice registration failed\n");
1568 free_netdev(dev);
1569 goto fail;
1570 }
1571 lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key);
1572 dev_rose[i] = dev;
1573 }
1574
1575 sock_register(&rose_family_ops);
1576 register_netdevice_notifier(&rose_dev_notifier);
1577
1578 ax25_register_pid(&rose_pid);
1579 ax25_linkfail_register(&rose_linkfail_notifier);
1580
1581 #ifdef CONFIG_SYSCTL
1582 rose_register_sysctl();
1583 #endif
1584 rose_loopback_init();
1585
1586 rose_add_loopback_neigh();
1587
1588 proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops);
1589 proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops);
1590 proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops);
1591 proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops);
1592 out:
1593 return rc;
1594 fail:
1595 while (--i >= 0) {
1596 unregister_netdev(dev_rose[i]);
1597 free_netdev(dev_rose[i]);
1598 }
1599 kfree(dev_rose);
1600 out_proto_unregister:
1601 proto_unregister(&rose_proto);
1602 goto out;
1603 }
1604 module_init(rose_proto_init);
1605
1606 module_param(rose_ndevs, int, 0);
1607 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1608
1609 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1610 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1611 MODULE_LICENSE("GPL");
1612 MODULE_ALIAS_NETPROTO(PF_ROSE);
1613
1614 static void __exit rose_exit(void)
1615 {
1616 int i;
1617
1618 proc_net_remove(&init_net, "rose");
1619 proc_net_remove(&init_net, "rose_neigh");
1620 proc_net_remove(&init_net, "rose_nodes");
1621 proc_net_remove(&init_net, "rose_routes");
1622 rose_loopback_clear();
1623
1624 rose_rt_free();
1625
1626 ax25_protocol_release(AX25_P_ROSE);
1627 ax25_linkfail_release(&rose_linkfail_notifier);
1628
1629 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1630 ax25_listen_release(&rose_callsign, NULL);
1631
1632 #ifdef CONFIG_SYSCTL
1633 rose_unregister_sysctl();
1634 #endif
1635 unregister_netdevice_notifier(&rose_dev_notifier);
1636
1637 sock_unregister(PF_ROSE);
1638
1639 for (i = 0; i < rose_ndevs; i++) {
1640 struct net_device *dev = dev_rose[i];
1641
1642 if (dev) {
1643 unregister_netdev(dev);
1644 free_netdev(dev);
1645 }
1646 }
1647
1648 kfree(dev_rose);
1649 proto_unregister(&rose_proto);
1650 }
1651
1652 module_exit(rose_exit);