]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/rose/af_rose.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / net / rose / af_rose.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
9 * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
10 * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
11 */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/init.h>
16 #include <linux/errno.h>
17 #include <linux/types.h>
18 #include <linux/socket.h>
19 #include <linux/in.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/spinlock.h>
23 #include <linux/timer.h>
24 #include <linux/string.h>
25 #include <linux/sockios.h>
26 #include <linux/net.h>
27 #include <linux/stat.h>
28 #include <net/ax25.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <asm/system.h>
35 #include <asm/uaccess.h>
36 #include <linux/fcntl.h>
37 #include <linux/termios.h>
38 #include <linux/mm.h>
39 #include <linux/interrupt.h>
40 #include <linux/notifier.h>
41 #include <net/rose.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <net/tcp.h>
45 #include <net/ip.h>
46 #include <net/arp.h>
47
48 static int rose_ndevs = 10;
49
50 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
51 int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1;
52 int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2;
53 int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3;
54 int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE;
55 int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB;
56 int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING;
57 int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT;
58 int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC;
59 int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
60
61 static HLIST_HEAD(rose_list);
62 static DEFINE_SPINLOCK(rose_list_lock);
63
64 static struct proto_ops rose_proto_ops;
65
66 ax25_address rose_callsign;
67
68 /*
69 * Convert a ROSE address into text.
70 */
71 const char *rose2asc(const rose_address *addr)
72 {
73 static char buffer[11];
74
75 if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
76 addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
77 addr->rose_addr[4] == 0x00) {
78 strcpy(buffer, "*");
79 } else {
80 sprintf(buffer, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
81 addr->rose_addr[1] & 0xFF,
82 addr->rose_addr[2] & 0xFF,
83 addr->rose_addr[3] & 0xFF,
84 addr->rose_addr[4] & 0xFF);
85 }
86
87 return buffer;
88 }
89
90 /*
91 * Compare two ROSE addresses, 0 == equal.
92 */
93 int rosecmp(rose_address *addr1, rose_address *addr2)
94 {
95 int i;
96
97 for (i = 0; i < 5; i++)
98 if (addr1->rose_addr[i] != addr2->rose_addr[i])
99 return 1;
100
101 return 0;
102 }
103
104 /*
105 * Compare two ROSE addresses for only mask digits, 0 == equal.
106 */
107 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
108 {
109 int i, j;
110
111 if (mask > 10)
112 return 1;
113
114 for (i = 0; i < mask; i++) {
115 j = i / 2;
116
117 if ((i % 2) != 0) {
118 if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
119 return 1;
120 } else {
121 if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
122 return 1;
123 }
124 }
125
126 return 0;
127 }
128
129 /*
130 * Socket removal during an interrupt is now safe.
131 */
132 static void rose_remove_socket(struct sock *sk)
133 {
134 spin_lock_bh(&rose_list_lock);
135 sk_del_node_init(sk);
136 spin_unlock_bh(&rose_list_lock);
137 }
138
139 /*
140 * Kill all bound sockets on a broken link layer connection to a
141 * particular neighbour.
142 */
143 void rose_kill_by_neigh(struct rose_neigh *neigh)
144 {
145 struct sock *s;
146 struct hlist_node *node;
147
148 spin_lock_bh(&rose_list_lock);
149 sk_for_each(s, node, &rose_list) {
150 struct rose_sock *rose = rose_sk(s);
151
152 if (rose->neighbour == neigh) {
153 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
154 rose->neighbour->use--;
155 rose->neighbour = NULL;
156 }
157 }
158 spin_unlock_bh(&rose_list_lock);
159 }
160
161 /*
162 * Kill all bound sockets on a dropped device.
163 */
164 static void rose_kill_by_device(struct net_device *dev)
165 {
166 struct sock *s;
167 struct hlist_node *node;
168
169 spin_lock_bh(&rose_list_lock);
170 sk_for_each(s, node, &rose_list) {
171 struct rose_sock *rose = rose_sk(s);
172
173 if (rose->device == dev) {
174 rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
175 rose->neighbour->use--;
176 rose->device = NULL;
177 }
178 }
179 spin_unlock_bh(&rose_list_lock);
180 }
181
182 /*
183 * Handle device status changes.
184 */
185 static int rose_device_event(struct notifier_block *this, unsigned long event,
186 void *ptr)
187 {
188 struct net_device *dev = (struct net_device *)ptr;
189
190 if (event != NETDEV_DOWN)
191 return NOTIFY_DONE;
192
193 switch (dev->type) {
194 case ARPHRD_ROSE:
195 rose_kill_by_device(dev);
196 break;
197 case ARPHRD_AX25:
198 rose_link_device_down(dev);
199 rose_rt_device_down(dev);
200 break;
201 }
202
203 return NOTIFY_DONE;
204 }
205
206 /*
207 * Add a socket to the bound sockets list.
208 */
209 static void rose_insert_socket(struct sock *sk)
210 {
211
212 spin_lock_bh(&rose_list_lock);
213 sk_add_node(sk, &rose_list);
214 spin_unlock_bh(&rose_list_lock);
215 }
216
217 /*
218 * Find a socket that wants to accept the Call Request we just
219 * received.
220 */
221 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
222 {
223 struct sock *s;
224 struct hlist_node *node;
225
226 spin_lock_bh(&rose_list_lock);
227 sk_for_each(s, node, &rose_list) {
228 struct rose_sock *rose = rose_sk(s);
229
230 if (!rosecmp(&rose->source_addr, addr) &&
231 !ax25cmp(&rose->source_call, call) &&
232 !rose->source_ndigis && s->sk_state == TCP_LISTEN)
233 goto found;
234 }
235
236 sk_for_each(s, node, &rose_list) {
237 struct rose_sock *rose = rose_sk(s);
238
239 if (!rosecmp(&rose->source_addr, addr) &&
240 !ax25cmp(&rose->source_call, &null_ax25_address) &&
241 s->sk_state == TCP_LISTEN)
242 goto found;
243 }
244 s = NULL;
245 found:
246 spin_unlock_bh(&rose_list_lock);
247 return s;
248 }
249
250 /*
251 * Find a connected ROSE socket given my LCI and device.
252 */
253 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
254 {
255 struct sock *s;
256 struct hlist_node *node;
257
258 spin_lock_bh(&rose_list_lock);
259 sk_for_each(s, node, &rose_list) {
260 struct rose_sock *rose = rose_sk(s);
261
262 if (rose->lci == lci && rose->neighbour == neigh)
263 goto found;
264 }
265 s = NULL;
266 found:
267 spin_unlock_bh(&rose_list_lock);
268 return s;
269 }
270
271 /*
272 * Find a unique LCI for a given device.
273 */
274 unsigned int rose_new_lci(struct rose_neigh *neigh)
275 {
276 int lci;
277
278 if (neigh->dce_mode) {
279 for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
280 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
281 return lci;
282 } else {
283 for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
284 if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
285 return lci;
286 }
287
288 return 0;
289 }
290
291 /*
292 * Deferred destroy.
293 */
294 void rose_destroy_socket(struct sock *);
295
296 /*
297 * Handler for deferred kills.
298 */
299 static void rose_destroy_timer(unsigned long data)
300 {
301 rose_destroy_socket((struct sock *)data);
302 }
303
304 /*
305 * This is called from user mode and the timers. Thus it protects itself
306 * against interrupt users but doesn't worry about being called during
307 * work. Once it is removed from the queue no interrupt or bottom half
308 * will touch it and we are (fairly 8-) ) safe.
309 */
310 void rose_destroy_socket(struct sock *sk)
311 {
312 struct sk_buff *skb;
313
314 rose_remove_socket(sk);
315 rose_stop_heartbeat(sk);
316 rose_stop_idletimer(sk);
317 rose_stop_timer(sk);
318
319 rose_clear_queues(sk); /* Flush the queues */
320
321 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
322 if (skb->sk != sk) { /* A pending connection */
323 /* Queue the unaccepted socket for death */
324 sock_set_flag(skb->sk, SOCK_DEAD);
325 rose_start_heartbeat(skb->sk);
326 rose_sk(skb->sk)->state = ROSE_STATE_0;
327 }
328
329 kfree_skb(skb);
330 }
331
332 if (atomic_read(&sk->sk_wmem_alloc) ||
333 atomic_read(&sk->sk_rmem_alloc)) {
334 /* Defer: outstanding buffers */
335 init_timer(&sk->sk_timer);
336 sk->sk_timer.expires = jiffies + 10 * HZ;
337 sk->sk_timer.function = rose_destroy_timer;
338 sk->sk_timer.data = (unsigned long)sk;
339 add_timer(&sk->sk_timer);
340 } else
341 sock_put(sk);
342 }
343
344 /*
345 * Handling for system calls applied via the various interfaces to a
346 * ROSE socket object.
347 */
348
349 static int rose_setsockopt(struct socket *sock, int level, int optname,
350 char __user *optval, int optlen)
351 {
352 struct sock *sk = sock->sk;
353 struct rose_sock *rose = rose_sk(sk);
354 int opt;
355
356 if (level != SOL_ROSE)
357 return -ENOPROTOOPT;
358
359 if (optlen < sizeof(int))
360 return -EINVAL;
361
362 if (get_user(opt, (int __user *)optval))
363 return -EFAULT;
364
365 switch (optname) {
366 case ROSE_DEFER:
367 rose->defer = opt ? 1 : 0;
368 return 0;
369
370 case ROSE_T1:
371 if (opt < 1)
372 return -EINVAL;
373 rose->t1 = opt * HZ;
374 return 0;
375
376 case ROSE_T2:
377 if (opt < 1)
378 return -EINVAL;
379 rose->t2 = opt * HZ;
380 return 0;
381
382 case ROSE_T3:
383 if (opt < 1)
384 return -EINVAL;
385 rose->t3 = opt * HZ;
386 return 0;
387
388 case ROSE_HOLDBACK:
389 if (opt < 1)
390 return -EINVAL;
391 rose->hb = opt * HZ;
392 return 0;
393
394 case ROSE_IDLE:
395 if (opt < 0)
396 return -EINVAL;
397 rose->idle = opt * 60 * HZ;
398 return 0;
399
400 case ROSE_QBITINCL:
401 rose->qbitincl = opt ? 1 : 0;
402 return 0;
403
404 default:
405 return -ENOPROTOOPT;
406 }
407 }
408
409 static int rose_getsockopt(struct socket *sock, int level, int optname,
410 char __user *optval, int __user *optlen)
411 {
412 struct sock *sk = sock->sk;
413 struct rose_sock *rose = rose_sk(sk);
414 int val = 0;
415 int len;
416
417 if (level != SOL_ROSE)
418 return -ENOPROTOOPT;
419
420 if (get_user(len, optlen))
421 return -EFAULT;
422
423 if (len < 0)
424 return -EINVAL;
425
426 switch (optname) {
427 case ROSE_DEFER:
428 val = rose->defer;
429 break;
430
431 case ROSE_T1:
432 val = rose->t1 / HZ;
433 break;
434
435 case ROSE_T2:
436 val = rose->t2 / HZ;
437 break;
438
439 case ROSE_T3:
440 val = rose->t3 / HZ;
441 break;
442
443 case ROSE_HOLDBACK:
444 val = rose->hb / HZ;
445 break;
446
447 case ROSE_IDLE:
448 val = rose->idle / (60 * HZ);
449 break;
450
451 case ROSE_QBITINCL:
452 val = rose->qbitincl;
453 break;
454
455 default:
456 return -ENOPROTOOPT;
457 }
458
459 len = min_t(unsigned int, len, sizeof(int));
460
461 if (put_user(len, optlen))
462 return -EFAULT;
463
464 return copy_to_user(optval, &val, len) ? -EFAULT : 0;
465 }
466
467 static int rose_listen(struct socket *sock, int backlog)
468 {
469 struct sock *sk = sock->sk;
470
471 if (sk->sk_state != TCP_LISTEN) {
472 struct rose_sock *rose = rose_sk(sk);
473
474 rose->dest_ndigis = 0;
475 memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
476 memset(&rose->dest_call, 0, AX25_ADDR_LEN);
477 memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
478 sk->sk_max_ack_backlog = backlog;
479 sk->sk_state = TCP_LISTEN;
480 return 0;
481 }
482
483 return -EOPNOTSUPP;
484 }
485
486 static struct proto rose_proto = {
487 .name = "ROSE",
488 .owner = THIS_MODULE,
489 .obj_size = sizeof(struct rose_sock),
490 };
491
492 static int rose_create(struct socket *sock, int protocol)
493 {
494 struct sock *sk;
495 struct rose_sock *rose;
496
497 if (sock->type != SOCK_SEQPACKET || protocol != 0)
498 return -ESOCKTNOSUPPORT;
499
500 if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
501 return -ENOMEM;
502
503 rose = rose_sk(sk);
504
505 sock_init_data(sock, sk);
506
507 skb_queue_head_init(&rose->ack_queue);
508 #ifdef M_BIT
509 skb_queue_head_init(&rose->frag_queue);
510 rose->fraglen = 0;
511 #endif
512
513 sock->ops = &rose_proto_ops;
514 sk->sk_protocol = protocol;
515
516 init_timer(&rose->timer);
517 init_timer(&rose->idletimer);
518
519 rose->t1 = sysctl_rose_call_request_timeout;
520 rose->t2 = sysctl_rose_reset_request_timeout;
521 rose->t3 = sysctl_rose_clear_request_timeout;
522 rose->hb = sysctl_rose_ack_hold_back_timeout;
523 rose->idle = sysctl_rose_no_activity_timeout;
524
525 rose->state = ROSE_STATE_0;
526
527 return 0;
528 }
529
530 static struct sock *rose_make_new(struct sock *osk)
531 {
532 struct sock *sk;
533 struct rose_sock *rose, *orose;
534
535 if (osk->sk_type != SOCK_SEQPACKET)
536 return NULL;
537
538 if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, &rose_proto, 1)) == NULL)
539 return NULL;
540
541 rose = rose_sk(sk);
542
543 sock_init_data(NULL, sk);
544
545 skb_queue_head_init(&rose->ack_queue);
546 #ifdef M_BIT
547 skb_queue_head_init(&rose->frag_queue);
548 rose->fraglen = 0;
549 #endif
550
551 sk->sk_type = osk->sk_type;
552 sk->sk_socket = osk->sk_socket;
553 sk->sk_priority = osk->sk_priority;
554 sk->sk_protocol = osk->sk_protocol;
555 sk->sk_rcvbuf = osk->sk_rcvbuf;
556 sk->sk_sndbuf = osk->sk_sndbuf;
557 sk->sk_state = TCP_ESTABLISHED;
558 sk->sk_sleep = osk->sk_sleep;
559
560 if (sock_flag(osk, SOCK_ZAPPED))
561 sock_set_flag(sk, SOCK_ZAPPED);
562
563 if (sock_flag(osk, SOCK_DBG))
564 sock_set_flag(sk, SOCK_DBG);
565
566 init_timer(&rose->timer);
567 init_timer(&rose->idletimer);
568
569 orose = rose_sk(osk);
570 rose->t1 = orose->t1;
571 rose->t2 = orose->t2;
572 rose->t3 = orose->t3;
573 rose->hb = orose->hb;
574 rose->idle = orose->idle;
575 rose->defer = orose->defer;
576 rose->device = orose->device;
577 rose->qbitincl = orose->qbitincl;
578
579 return sk;
580 }
581
582 static int rose_release(struct socket *sock)
583 {
584 struct sock *sk = sock->sk;
585 struct rose_sock *rose;
586
587 if (sk == NULL) return 0;
588
589 rose = rose_sk(sk);
590
591 switch (rose->state) {
592 case ROSE_STATE_0:
593 rose_disconnect(sk, 0, -1, -1);
594 rose_destroy_socket(sk);
595 break;
596
597 case ROSE_STATE_2:
598 rose->neighbour->use--;
599 rose_disconnect(sk, 0, -1, -1);
600 rose_destroy_socket(sk);
601 break;
602
603 case ROSE_STATE_1:
604 case ROSE_STATE_3:
605 case ROSE_STATE_4:
606 case ROSE_STATE_5:
607 rose_clear_queues(sk);
608 rose_stop_idletimer(sk);
609 rose_write_internal(sk, ROSE_CLEAR_REQUEST);
610 rose_start_t3timer(sk);
611 rose->state = ROSE_STATE_2;
612 sk->sk_state = TCP_CLOSE;
613 sk->sk_shutdown |= SEND_SHUTDOWN;
614 sk->sk_state_change(sk);
615 sock_set_flag(sk, SOCK_DEAD);
616 sock_set_flag(sk, SOCK_DESTROY);
617 break;
618
619 default:
620 break;
621 }
622
623 sock->sk = NULL;
624
625 return 0;
626 }
627
628 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
629 {
630 struct sock *sk = sock->sk;
631 struct rose_sock *rose = rose_sk(sk);
632 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
633 struct net_device *dev;
634 ax25_address *user, *source;
635 int n;
636
637 if (!sock_flag(sk, SOCK_ZAPPED))
638 return -EINVAL;
639
640 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
641 return -EINVAL;
642
643 if (addr->srose_family != AF_ROSE)
644 return -EINVAL;
645
646 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
647 return -EINVAL;
648
649 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
650 return -EINVAL;
651
652 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
653 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
654 return -EADDRNOTAVAIL;
655 }
656
657 source = &addr->srose_call;
658
659 if ((user = ax25_findbyuid(current->euid)) == NULL) {
660 if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE))
661 return -EACCES;
662 user = source;
663 }
664
665 rose->source_addr = addr->srose_addr;
666 rose->source_call = *user;
667 rose->device = dev;
668 rose->source_ndigis = addr->srose_ndigis;
669
670 if (addr_len == sizeof(struct full_sockaddr_rose)) {
671 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
672 for (n = 0 ; n < addr->srose_ndigis ; n++)
673 rose->source_digis[n] = full_addr->srose_digis[n];
674 } else {
675 if (rose->source_ndigis == 1) {
676 rose->source_digis[0] = addr->srose_digi;
677 }
678 }
679
680 rose_insert_socket(sk);
681
682 sock_reset_flag(sk, SOCK_ZAPPED);
683 SOCK_DEBUG(sk, "ROSE: socket is bound\n");
684 return 0;
685 }
686
687 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
688 {
689 struct sock *sk = sock->sk;
690 struct rose_sock *rose = rose_sk(sk);
691 struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
692 unsigned char cause, diagnostic;
693 ax25_address *user;
694 struct net_device *dev;
695 int n;
696
697 if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
698 sock->state = SS_CONNECTED;
699 return 0; /* Connect completed during a ERESTARTSYS event */
700 }
701
702 if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
703 sock->state = SS_UNCONNECTED;
704 return -ECONNREFUSED;
705 }
706
707 if (sk->sk_state == TCP_ESTABLISHED)
708 return -EISCONN; /* No reconnect on a seqpacket socket */
709
710 sk->sk_state = TCP_CLOSE;
711 sock->state = SS_UNCONNECTED;
712
713 if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
714 return -EINVAL;
715
716 if (addr->srose_family != AF_ROSE)
717 return -EINVAL;
718
719 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
720 return -EINVAL;
721
722 if (addr->srose_ndigis > ROSE_MAX_DIGIS)
723 return -EINVAL;
724
725 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
726 if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
727 return -EINVAL;
728
729 rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
730 &diagnostic);
731 if (!rose->neighbour)
732 return -ENETUNREACH;
733
734 rose->lci = rose_new_lci(rose->neighbour);
735 if (!rose->lci)
736 return -ENETUNREACH;
737
738 if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
739 sock_reset_flag(sk, SOCK_ZAPPED);
740
741 if ((dev = rose_dev_first()) == NULL)
742 return -ENETUNREACH;
743
744 if ((user = ax25_findbyuid(current->euid)) == NULL)
745 return -EINVAL;
746
747 memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
748 rose->source_call = *user;
749 rose->device = dev;
750
751 rose_insert_socket(sk); /* Finish the bind */
752 }
753
754 rose->dest_addr = addr->srose_addr;
755 rose->dest_call = addr->srose_call;
756 rose->rand = ((long)rose & 0xFFFF) + rose->lci;
757 rose->dest_ndigis = addr->srose_ndigis;
758
759 if (addr_len == sizeof(struct full_sockaddr_rose)) {
760 struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
761 for (n = 0 ; n < addr->srose_ndigis ; n++)
762 rose->dest_digis[n] = full_addr->srose_digis[n];
763 } else {
764 if (rose->dest_ndigis == 1) {
765 rose->dest_digis[0] = addr->srose_digi;
766 }
767 }
768
769 /* Move to connecting socket, start sending Connect Requests */
770 sock->state = SS_CONNECTING;
771 sk->sk_state = TCP_SYN_SENT;
772
773 rose->state = ROSE_STATE_1;
774
775 rose->neighbour->use++;
776
777 rose_write_internal(sk, ROSE_CALL_REQUEST);
778 rose_start_heartbeat(sk);
779 rose_start_t1timer(sk);
780
781 /* Now the loop */
782 if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
783 return -EINPROGRESS;
784
785 /*
786 * A Connect Ack with Choke or timeout or failed routing will go to
787 * closed.
788 */
789 if (sk->sk_state == TCP_SYN_SENT) {
790 struct task_struct *tsk = current;
791 DECLARE_WAITQUEUE(wait, tsk);
792
793 add_wait_queue(sk->sk_sleep, &wait);
794 for (;;) {
795 set_current_state(TASK_INTERRUPTIBLE);
796 if (sk->sk_state != TCP_SYN_SENT)
797 break;
798 if (!signal_pending(tsk)) {
799 schedule();
800 continue;
801 }
802 current->state = TASK_RUNNING;
803 remove_wait_queue(sk->sk_sleep, &wait);
804 return -ERESTARTSYS;
805 }
806 current->state = TASK_RUNNING;
807 remove_wait_queue(sk->sk_sleep, &wait);
808 }
809
810 if (sk->sk_state != TCP_ESTABLISHED) {
811 sock->state = SS_UNCONNECTED;
812 return sock_error(sk); /* Always set at this point */
813 }
814
815 sock->state = SS_CONNECTED;
816
817 return 0;
818 }
819
820 static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
821 {
822 struct task_struct *tsk = current;
823 DECLARE_WAITQUEUE(wait, tsk);
824 struct sk_buff *skb;
825 struct sock *newsk;
826 struct sock *sk;
827 int err = 0;
828
829 if ((sk = sock->sk) == NULL)
830 return -EINVAL;
831
832 lock_sock(sk);
833 if (sk->sk_type != SOCK_SEQPACKET) {
834 err = -EOPNOTSUPP;
835 goto out;
836 }
837
838 if (sk->sk_state != TCP_LISTEN) {
839 err = -EINVAL;
840 goto out;
841 }
842
843 /*
844 * The write queue this time is holding sockets ready to use
845 * hooked into the SABM we saved
846 */
847 add_wait_queue(sk->sk_sleep, &wait);
848 for (;;) {
849 skb = skb_dequeue(&sk->sk_receive_queue);
850 if (skb)
851 break;
852
853 current->state = TASK_INTERRUPTIBLE;
854 release_sock(sk);
855 if (flags & O_NONBLOCK) {
856 current->state = TASK_RUNNING;
857 remove_wait_queue(sk->sk_sleep, &wait);
858 return -EWOULDBLOCK;
859 }
860 if (!signal_pending(tsk)) {
861 schedule();
862 lock_sock(sk);
863 continue;
864 }
865 return -ERESTARTSYS;
866 }
867 current->state = TASK_RUNNING;
868 remove_wait_queue(sk->sk_sleep, &wait);
869
870 newsk = skb->sk;
871 newsk->sk_socket = newsock;
872 newsk->sk_sleep = &newsock->wait;
873
874 /* Now attach up the new socket */
875 skb->sk = NULL;
876 kfree_skb(skb);
877 sk->sk_ack_backlog--;
878 newsock->sk = newsk;
879
880 out:
881 release_sock(sk);
882
883 return err;
884 }
885
886 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
887 int *uaddr_len, int peer)
888 {
889 struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
890 struct sock *sk = sock->sk;
891 struct rose_sock *rose = rose_sk(sk);
892 int n;
893
894 if (peer != 0) {
895 if (sk->sk_state != TCP_ESTABLISHED)
896 return -ENOTCONN;
897 srose->srose_family = AF_ROSE;
898 srose->srose_addr = rose->dest_addr;
899 srose->srose_call = rose->dest_call;
900 srose->srose_ndigis = rose->dest_ndigis;
901 for (n = 0; n < rose->dest_ndigis; n++)
902 srose->srose_digis[n] = rose->dest_digis[n];
903 } else {
904 srose->srose_family = AF_ROSE;
905 srose->srose_addr = rose->source_addr;
906 srose->srose_call = rose->source_call;
907 srose->srose_ndigis = rose->source_ndigis;
908 for (n = 0; n < rose->source_ndigis; n++)
909 srose->srose_digis[n] = rose->source_digis[n];
910 }
911
912 *uaddr_len = sizeof(struct full_sockaddr_rose);
913 return 0;
914 }
915
916 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
917 {
918 struct sock *sk;
919 struct sock *make;
920 struct rose_sock *make_rose;
921 struct rose_facilities_struct facilities;
922 int n, len;
923
924 skb->sk = NULL; /* Initially we don't know who it's for */
925
926 /*
927 * skb->data points to the rose frame start
928 */
929 memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
930
931 len = (((skb->data[3] >> 4) & 0x0F) + 1) / 2;
932 len += (((skb->data[3] >> 0) & 0x0F) + 1) / 2;
933 if (!rose_parse_facilities(skb->data + len + 4, &facilities)) {
934 rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
935 return 0;
936 }
937
938 sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
939
940 /*
941 * We can't accept the Call Request.
942 */
943 if (sk == NULL || sk_acceptq_is_full(sk) ||
944 (make = rose_make_new(sk)) == NULL) {
945 rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
946 return 0;
947 }
948
949 skb->sk = make;
950 make->sk_state = TCP_ESTABLISHED;
951 make_rose = rose_sk(make);
952
953 make_rose->lci = lci;
954 make_rose->dest_addr = facilities.dest_addr;
955 make_rose->dest_call = facilities.dest_call;
956 make_rose->dest_ndigis = facilities.dest_ndigis;
957 for (n = 0 ; n < facilities.dest_ndigis ; n++)
958 make_rose->dest_digis[n] = facilities.dest_digis[n];
959 make_rose->source_addr = facilities.source_addr;
960 make_rose->source_call = facilities.source_call;
961 make_rose->source_ndigis = facilities.source_ndigis;
962 for (n = 0 ; n < facilities.source_ndigis ; n++)
963 make_rose->source_digis[n]= facilities.source_digis[n];
964 make_rose->neighbour = neigh;
965 make_rose->device = dev;
966 make_rose->facilities = facilities;
967
968 make_rose->neighbour->use++;
969
970 if (rose_sk(sk)->defer) {
971 make_rose->state = ROSE_STATE_5;
972 } else {
973 rose_write_internal(make, ROSE_CALL_ACCEPTED);
974 make_rose->state = ROSE_STATE_3;
975 rose_start_idletimer(make);
976 }
977
978 make_rose->condition = 0x00;
979 make_rose->vs = 0;
980 make_rose->va = 0;
981 make_rose->vr = 0;
982 make_rose->vl = 0;
983 sk->sk_ack_backlog++;
984
985 rose_insert_socket(make);
986
987 skb_queue_head(&sk->sk_receive_queue, skb);
988
989 rose_start_heartbeat(make);
990
991 if (!sock_flag(sk, SOCK_DEAD))
992 sk->sk_data_ready(sk, skb->len);
993
994 return 1;
995 }
996
997 static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
998 struct msghdr *msg, size_t len)
999 {
1000 struct sock *sk = sock->sk;
1001 struct rose_sock *rose = rose_sk(sk);
1002 struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name;
1003 int err;
1004 struct full_sockaddr_rose srose;
1005 struct sk_buff *skb;
1006 unsigned char *asmptr;
1007 int n, size, qbit = 0;
1008
1009 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1010 return -EINVAL;
1011
1012 if (sock_flag(sk, SOCK_ZAPPED))
1013 return -EADDRNOTAVAIL;
1014
1015 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1016 send_sig(SIGPIPE, current, 0);
1017 return -EPIPE;
1018 }
1019
1020 if (rose->neighbour == NULL || rose->device == NULL)
1021 return -ENETUNREACH;
1022
1023 if (usrose != NULL) {
1024 if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1025 return -EINVAL;
1026 memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1027 memcpy(&srose, usrose, msg->msg_namelen);
1028 if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1029 ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1030 return -EISCONN;
1031 if (srose.srose_ndigis != rose->dest_ndigis)
1032 return -EISCONN;
1033 if (srose.srose_ndigis == rose->dest_ndigis) {
1034 for (n = 0 ; n < srose.srose_ndigis ; n++)
1035 if (ax25cmp(&rose->dest_digis[n],
1036 &srose.srose_digis[n]))
1037 return -EISCONN;
1038 }
1039 if (srose.srose_family != AF_ROSE)
1040 return -EINVAL;
1041 } else {
1042 if (sk->sk_state != TCP_ESTABLISHED)
1043 return -ENOTCONN;
1044
1045 srose.srose_family = AF_ROSE;
1046 srose.srose_addr = rose->dest_addr;
1047 srose.srose_call = rose->dest_call;
1048 srose.srose_ndigis = rose->dest_ndigis;
1049 for (n = 0 ; n < rose->dest_ndigis ; n++)
1050 srose.srose_digis[n] = rose->dest_digis[n];
1051 }
1052
1053 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1054
1055 /* Build a packet */
1056 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1057 size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1058
1059 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1060 return err;
1061
1062 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1063
1064 /*
1065 * Put the data on the end
1066 */
1067 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1068
1069 asmptr = skb->h.raw = skb_put(skb, len);
1070
1071 err = memcpy_fromiovec(asmptr, msg->msg_iov, len);
1072 if (err) {
1073 kfree_skb(skb);
1074 return err;
1075 }
1076
1077 /*
1078 * If the Q BIT Include socket option is in force, the first
1079 * byte of the user data is the logical value of the Q Bit.
1080 */
1081 if (rose->qbitincl) {
1082 qbit = skb->data[0];
1083 skb_pull(skb, 1);
1084 }
1085
1086 /*
1087 * Push down the ROSE header
1088 */
1089 asmptr = skb_push(skb, ROSE_MIN_LEN);
1090
1091 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1092
1093 /* Build a ROSE Network header */
1094 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1095 asmptr[1] = (rose->lci >> 0) & 0xFF;
1096 asmptr[2] = ROSE_DATA;
1097
1098 if (qbit)
1099 asmptr[0] |= ROSE_Q_BIT;
1100
1101 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1102
1103 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1104
1105 if (sk->sk_state != TCP_ESTABLISHED) {
1106 kfree_skb(skb);
1107 return -ENOTCONN;
1108 }
1109
1110 #ifdef M_BIT
1111 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1112 if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1113 unsigned char header[ROSE_MIN_LEN];
1114 struct sk_buff *skbn;
1115 int frontlen;
1116 int lg;
1117
1118 /* Save a copy of the Header */
1119 memcpy(header, skb->data, ROSE_MIN_LEN);
1120 skb_pull(skb, ROSE_MIN_LEN);
1121
1122 frontlen = skb_headroom(skb);
1123
1124 while (skb->len > 0) {
1125 if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1126 kfree_skb(skb);
1127 return err;
1128 }
1129
1130 skbn->sk = sk;
1131 skbn->free = 1;
1132 skbn->arp = 1;
1133
1134 skb_reserve(skbn, frontlen);
1135
1136 lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1137
1138 /* Copy the user data */
1139 memcpy(skb_put(skbn, lg), skb->data, lg);
1140 skb_pull(skb, lg);
1141
1142 /* Duplicate the Header */
1143 skb_push(skbn, ROSE_MIN_LEN);
1144 memcpy(skbn->data, header, ROSE_MIN_LEN);
1145
1146 if (skb->len > 0)
1147 skbn->data[2] |= M_BIT;
1148
1149 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1150 }
1151
1152 skb->free = 1;
1153 kfree_skb(skb);
1154 } else {
1155 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
1156 }
1157 #else
1158 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */
1159 #endif
1160
1161 rose_kick(sk);
1162
1163 return len;
1164 }
1165
1166
1167 static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
1168 struct msghdr *msg, size_t size, int flags)
1169 {
1170 struct sock *sk = sock->sk;
1171 struct rose_sock *rose = rose_sk(sk);
1172 struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
1173 size_t copied;
1174 unsigned char *asmptr;
1175 struct sk_buff *skb;
1176 int n, er, qbit;
1177
1178 /*
1179 * This works for seqpacket too. The receiver has ordered the queue for
1180 * us! We do one quick check first though
1181 */
1182 if (sk->sk_state != TCP_ESTABLISHED)
1183 return -ENOTCONN;
1184
1185 /* Now we can treat all alike */
1186 if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1187 return er;
1188
1189 qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1190
1191 skb_pull(skb, ROSE_MIN_LEN);
1192
1193 if (rose->qbitincl) {
1194 asmptr = skb_push(skb, 1);
1195 *asmptr = qbit;
1196 }
1197
1198 skb->h.raw = skb->data;
1199 copied = skb->len;
1200
1201 if (copied > size) {
1202 copied = size;
1203 msg->msg_flags |= MSG_TRUNC;
1204 }
1205
1206 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1207
1208 if (srose != NULL) {
1209 srose->srose_family = AF_ROSE;
1210 srose->srose_addr = rose->dest_addr;
1211 srose->srose_call = rose->dest_call;
1212 srose->srose_ndigis = rose->dest_ndigis;
1213 if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
1214 struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
1215 for (n = 0 ; n < rose->dest_ndigis ; n++)
1216 full_srose->srose_digis[n] = rose->dest_digis[n];
1217 msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1218 } else {
1219 if (rose->dest_ndigis >= 1) {
1220 srose->srose_ndigis = 1;
1221 srose->srose_digi = rose->dest_digis[0];
1222 }
1223 msg->msg_namelen = sizeof(struct sockaddr_rose);
1224 }
1225 }
1226
1227 skb_free_datagram(sk, skb);
1228
1229 return copied;
1230 }
1231
1232
1233 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1234 {
1235 struct sock *sk = sock->sk;
1236 struct rose_sock *rose = rose_sk(sk);
1237 void __user *argp = (void __user *)arg;
1238
1239 switch (cmd) {
1240 case TIOCOUTQ: {
1241 long amount;
1242 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1243 if (amount < 0)
1244 amount = 0;
1245 return put_user(amount, (unsigned int __user *)argp);
1246 }
1247
1248 case TIOCINQ: {
1249 struct sk_buff *skb;
1250 long amount = 0L;
1251 /* These two are safe on a single CPU system as only user tasks fiddle here */
1252 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1253 amount = skb->len;
1254 return put_user(amount, (unsigned int __user *)argp);
1255 }
1256
1257 case SIOCGSTAMP:
1258 if (sk != NULL)
1259 return sock_get_timestamp(sk, (struct timeval __user *)argp);
1260 return -EINVAL;
1261
1262 case SIOCGIFADDR:
1263 case SIOCSIFADDR:
1264 case SIOCGIFDSTADDR:
1265 case SIOCSIFDSTADDR:
1266 case SIOCGIFBRDADDR:
1267 case SIOCSIFBRDADDR:
1268 case SIOCGIFNETMASK:
1269 case SIOCSIFNETMASK:
1270 case SIOCGIFMETRIC:
1271 case SIOCSIFMETRIC:
1272 return -EINVAL;
1273
1274 case SIOCADDRT:
1275 case SIOCDELRT:
1276 case SIOCRSCLRRT:
1277 if (!capable(CAP_NET_ADMIN))
1278 return -EPERM;
1279 return rose_rt_ioctl(cmd, argp);
1280
1281 case SIOCRSGCAUSE: {
1282 struct rose_cause_struct rose_cause;
1283 rose_cause.cause = rose->cause;
1284 rose_cause.diagnostic = rose->diagnostic;
1285 return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1286 }
1287
1288 case SIOCRSSCAUSE: {
1289 struct rose_cause_struct rose_cause;
1290 if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1291 return -EFAULT;
1292 rose->cause = rose_cause.cause;
1293 rose->diagnostic = rose_cause.diagnostic;
1294 return 0;
1295 }
1296
1297 case SIOCRSSL2CALL:
1298 if (!capable(CAP_NET_ADMIN)) return -EPERM;
1299 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1300 ax25_listen_release(&rose_callsign, NULL);
1301 if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1302 return -EFAULT;
1303 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1304 ax25_listen_register(&rose_callsign, NULL);
1305 return 0;
1306
1307 case SIOCRSGL2CALL:
1308 return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1309
1310 case SIOCRSACCEPT:
1311 if (rose->state == ROSE_STATE_5) {
1312 rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1313 rose_start_idletimer(sk);
1314 rose->condition = 0x00;
1315 rose->vs = 0;
1316 rose->va = 0;
1317 rose->vr = 0;
1318 rose->vl = 0;
1319 rose->state = ROSE_STATE_3;
1320 }
1321 return 0;
1322
1323 default:
1324 return dev_ioctl(cmd, argp);
1325 }
1326
1327 return 0;
1328 }
1329
1330 #ifdef CONFIG_PROC_FS
1331 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1332 {
1333 int i;
1334 struct sock *s;
1335 struct hlist_node *node;
1336
1337 spin_lock_bh(&rose_list_lock);
1338 if (*pos == 0)
1339 return SEQ_START_TOKEN;
1340
1341 i = 1;
1342 sk_for_each(s, node, &rose_list) {
1343 if (i == *pos)
1344 return s;
1345 ++i;
1346 }
1347 return NULL;
1348 }
1349
1350 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1351 {
1352 ++*pos;
1353
1354 return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
1355 : sk_next((struct sock *)v);
1356 }
1357
1358 static void rose_info_stop(struct seq_file *seq, void *v)
1359 {
1360 spin_unlock_bh(&rose_list_lock);
1361 }
1362
1363 static int rose_info_show(struct seq_file *seq, void *v)
1364 {
1365 if (v == SEQ_START_TOKEN)
1366 seq_puts(seq,
1367 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
1368
1369 else {
1370 struct sock *s = v;
1371 struct rose_sock *rose = rose_sk(s);
1372 const char *devname, *callsign;
1373 const struct net_device *dev = rose->device;
1374
1375 if (!dev)
1376 devname = "???";
1377 else
1378 devname = dev->name;
1379
1380 seq_printf(seq, "%-10s %-9s ",
1381 rose2asc(&rose->dest_addr),
1382 ax2asc(&rose->dest_call));
1383
1384 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1385 callsign = "??????-?";
1386 else
1387 callsign = ax2asc(&rose->source_call);
1388
1389 seq_printf(seq,
1390 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1391 rose2asc(&rose->source_addr),
1392 callsign,
1393 devname,
1394 rose->lci & 0x0FFF,
1395 (rose->neighbour) ? rose->neighbour->number : 0,
1396 rose->state,
1397 rose->vs,
1398 rose->vr,
1399 rose->va,
1400 ax25_display_timer(&rose->timer) / HZ,
1401 rose->t1 / HZ,
1402 rose->t2 / HZ,
1403 rose->t3 / HZ,
1404 rose->hb / HZ,
1405 ax25_display_timer(&rose->idletimer) / (60 * HZ),
1406 rose->idle / (60 * HZ),
1407 atomic_read(&s->sk_wmem_alloc),
1408 atomic_read(&s->sk_rmem_alloc),
1409 s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1410 }
1411
1412 return 0;
1413 }
1414
1415 static struct seq_operations rose_info_seqops = {
1416 .start = rose_info_start,
1417 .next = rose_info_next,
1418 .stop = rose_info_stop,
1419 .show = rose_info_show,
1420 };
1421
1422 static int rose_info_open(struct inode *inode, struct file *file)
1423 {
1424 return seq_open(file, &rose_info_seqops);
1425 }
1426
1427 static struct file_operations rose_info_fops = {
1428 .owner = THIS_MODULE,
1429 .open = rose_info_open,
1430 .read = seq_read,
1431 .llseek = seq_lseek,
1432 .release = seq_release,
1433 };
1434 #endif /* CONFIG_PROC_FS */
1435
1436 static struct net_proto_family rose_family_ops = {
1437 .family = PF_ROSE,
1438 .create = rose_create,
1439 .owner = THIS_MODULE,
1440 };
1441
1442 static struct proto_ops rose_proto_ops = {
1443 .family = PF_ROSE,
1444 .owner = THIS_MODULE,
1445 .release = rose_release,
1446 .bind = rose_bind,
1447 .connect = rose_connect,
1448 .socketpair = sock_no_socketpair,
1449 .accept = rose_accept,
1450 .getname = rose_getname,
1451 .poll = datagram_poll,
1452 .ioctl = rose_ioctl,
1453 .listen = rose_listen,
1454 .shutdown = sock_no_shutdown,
1455 .setsockopt = rose_setsockopt,
1456 .getsockopt = rose_getsockopt,
1457 .sendmsg = rose_sendmsg,
1458 .recvmsg = rose_recvmsg,
1459 .mmap = sock_no_mmap,
1460 .sendpage = sock_no_sendpage,
1461 };
1462
1463 static struct notifier_block rose_dev_notifier = {
1464 .notifier_call = rose_device_event,
1465 };
1466
1467 static struct net_device **dev_rose;
1468
1469 static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62 for AX25.037 Linux 2.4\n";
1470
1471 static int __init rose_proto_init(void)
1472 {
1473 int i;
1474 int rc = proto_register(&rose_proto, 0);
1475
1476 if (rc != 0)
1477 goto out;
1478
1479 rose_callsign = null_ax25_address;
1480
1481 if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1482 printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
1483 return -1;
1484 }
1485
1486 dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL);
1487 if (dev_rose == NULL) {
1488 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1489 return -1;
1490 }
1491
1492 memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*));
1493 for (i = 0; i < rose_ndevs; i++) {
1494 struct net_device *dev;
1495 char name[IFNAMSIZ];
1496
1497 sprintf(name, "rose%d", i);
1498 dev = alloc_netdev(sizeof(struct net_device_stats),
1499 name, rose_setup);
1500 if (!dev) {
1501 printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1502 goto fail;
1503 }
1504 if (register_netdev(dev)) {
1505 printk(KERN_ERR "ROSE: netdevice regeistration failed\n");
1506 free_netdev(dev);
1507 goto fail;
1508 }
1509 dev_rose[i] = dev;
1510 }
1511
1512 sock_register(&rose_family_ops);
1513 register_netdevice_notifier(&rose_dev_notifier);
1514 printk(banner);
1515
1516 ax25_protocol_register(AX25_P_ROSE, rose_route_frame);
1517 ax25_linkfail_register(rose_link_failed);
1518
1519 #ifdef CONFIG_SYSCTL
1520 rose_register_sysctl();
1521 #endif
1522 rose_loopback_init();
1523
1524 rose_add_loopback_neigh();
1525
1526 proc_net_fops_create("rose", S_IRUGO, &rose_info_fops);
1527 proc_net_fops_create("rose_neigh", S_IRUGO, &rose_neigh_fops);
1528 proc_net_fops_create("rose_nodes", S_IRUGO, &rose_nodes_fops);
1529 proc_net_fops_create("rose_routes", S_IRUGO, &rose_routes_fops);
1530 out:
1531 return rc;
1532 fail:
1533 while (--i >= 0) {
1534 unregister_netdev(dev_rose[i]);
1535 free_netdev(dev_rose[i]);
1536 }
1537 kfree(dev_rose);
1538 proto_unregister(&rose_proto);
1539 return -ENOMEM;
1540 }
1541 module_init(rose_proto_init);
1542
1543 module_param(rose_ndevs, int, 0);
1544 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1545
1546 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1547 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1548 MODULE_LICENSE("GPL");
1549 MODULE_ALIAS_NETPROTO(PF_ROSE);
1550
1551 static void __exit rose_exit(void)
1552 {
1553 int i;
1554
1555 proc_net_remove("rose");
1556 proc_net_remove("rose_neigh");
1557 proc_net_remove("rose_nodes");
1558 proc_net_remove("rose_routes");
1559 rose_loopback_clear();
1560
1561 rose_rt_free();
1562
1563 ax25_protocol_release(AX25_P_ROSE);
1564 ax25_linkfail_release(rose_link_failed);
1565
1566 if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1567 ax25_listen_release(&rose_callsign, NULL);
1568
1569 #ifdef CONFIG_SYSCTL
1570 rose_unregister_sysctl();
1571 #endif
1572 unregister_netdevice_notifier(&rose_dev_notifier);
1573
1574 sock_unregister(PF_ROSE);
1575
1576 for (i = 0; i < rose_ndevs; i++) {
1577 struct net_device *dev = dev_rose[i];
1578
1579 if (dev) {
1580 unregister_netdev(dev);
1581 free_netdev(dev);
1582 }
1583 }
1584
1585 kfree(dev_rose);
1586 proto_unregister(&rose_proto);
1587 }
1588
1589 module_exit(rose_exit);