]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/decnet/af_decnet.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / net / decnet / af_decnet.c
1
2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * DECnet Socket Layer Interface
8 *
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
11 *
12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved
15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc.
43 */
44
45
46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
48
49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or
52 any later version.
53
54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details.
58
59 HISTORY:
60
61 Version Kernel Date Author/Comments
62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com)
65
66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing
68 connections.
69
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk)
72
73 Port to new kernel development version.
74
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com)
77 _
78 Added support for incoming connections
79 so we can start developing server apps
80 on Linux.
81 -
82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com)
85 _
86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!!
88 -
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com)
91 Removed bugs on flow control
92 Removed bugs on incoming accessdata
93 order
94 -
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes
97
98 Patrick J. Caulfield
99 dn_bind fixes
100 *******************************************************************************/
101
102 #include <linux/module.h>
103 #include <linux/errno.h>
104 #include <linux/types.h>
105 #include <linux/slab.h>
106 #include <linux/socket.h>
107 #include <linux/in.h>
108 #include <linux/kernel.h>
109 #include <linux/sched/signal.h>
110 #include <linux/timer.h>
111 #include <linux/string.h>
112 #include <linux/sockios.h>
113 #include <linux/net.h>
114 #include <linux/netdevice.h>
115 #include <linux/inet.h>
116 #include <linux/route.h>
117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h>
119 #include <net/sock.h>
120 #include <net/tcp_states.h>
121 #include <net/flow.h>
122 #include <asm/ioctls.h>
123 #include <linux/capability.h>
124 #include <linux/mm.h>
125 #include <linux/interrupt.h>
126 #include <linux/proc_fs.h>
127 #include <linux/stat.h>
128 #include <linux/init.h>
129 #include <linux/poll.h>
130 #include <linux/jiffies.h>
131 #include <net/net_namespace.h>
132 #include <net/neighbour.h>
133 #include <net/dst.h>
134 #include <net/fib_rules.h>
135 #include <net/dn.h>
136 #include <net/dn_nsp.h>
137 #include <net/dn_dev.h>
138 #include <net/dn_route.h>
139 #include <net/dn_fib.h>
140 #include <net/dn_neigh.h>
141
142 struct dn_sock {
143 struct sock sk;
144 struct dn_scp scp;
145 };
146
147 static void dn_keepalive(struct sock *sk);
148
149 #define DN_SK_HASH_SHIFT 8
150 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
151 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
152
153
154 static const struct proto_ops dn_proto_ops;
155 static DEFINE_RWLOCK(dn_hash_lock);
156 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157 static struct hlist_head dn_wild_sk;
158 static atomic_long_t decnet_memory_allocated;
159
160 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
162
163 static struct hlist_head *dn_find_list(struct sock *sk)
164 {
165 struct dn_scp *scp = DN_SK(sk);
166
167 if (scp->addr.sdn_flags & SDF_WILD)
168 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
169
170 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
171 }
172
173 /*
174 * Valid ports are those greater than zero and not already in use.
175 */
176 static int check_port(__le16 port)
177 {
178 struct sock *sk;
179
180 if (port == 0)
181 return -1;
182
183 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
184 struct dn_scp *scp = DN_SK(sk);
185 if (scp->addrloc == port)
186 return -1;
187 }
188 return 0;
189 }
190
191 static unsigned short port_alloc(struct sock *sk)
192 {
193 struct dn_scp *scp = DN_SK(sk);
194 static unsigned short port = 0x2000;
195 unsigned short i_port = port;
196
197 while(check_port(cpu_to_le16(++port)) != 0) {
198 if (port == i_port)
199 return 0;
200 }
201
202 scp->addrloc = cpu_to_le16(port);
203
204 return 1;
205 }
206
207 /*
208 * Since this is only ever called from user
209 * level, we don't need a write_lock() version
210 * of this.
211 */
212 static int dn_hash_sock(struct sock *sk)
213 {
214 struct dn_scp *scp = DN_SK(sk);
215 struct hlist_head *list;
216 int rv = -EUSERS;
217
218 BUG_ON(sk_hashed(sk));
219
220 write_lock_bh(&dn_hash_lock);
221
222 if (!scp->addrloc && !port_alloc(sk))
223 goto out;
224
225 rv = -EADDRINUSE;
226 if ((list = dn_find_list(sk)) == NULL)
227 goto out;
228
229 sk_add_node(sk, list);
230 rv = 0;
231 out:
232 write_unlock_bh(&dn_hash_lock);
233 return rv;
234 }
235
236 static void dn_unhash_sock(struct sock *sk)
237 {
238 write_lock(&dn_hash_lock);
239 sk_del_node_init(sk);
240 write_unlock(&dn_hash_lock);
241 }
242
243 static void dn_unhash_sock_bh(struct sock *sk)
244 {
245 write_lock_bh(&dn_hash_lock);
246 sk_del_node_init(sk);
247 write_unlock_bh(&dn_hash_lock);
248 }
249
250 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
251 {
252 int i;
253 unsigned int hash = addr->sdn_objnum;
254
255 if (hash == 0) {
256 hash = addr->sdn_objnamel;
257 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
258 hash ^= addr->sdn_objname[i];
259 hash ^= (hash << 3);
260 }
261 }
262
263 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
264 }
265
266 /*
267 * Called to transform a socket from bound (i.e. with a local address)
268 * into a listening socket (doesn't need a local port number) and rehashes
269 * based upon the object name/number.
270 */
271 static void dn_rehash_sock(struct sock *sk)
272 {
273 struct hlist_head *list;
274 struct dn_scp *scp = DN_SK(sk);
275
276 if (scp->addr.sdn_flags & SDF_WILD)
277 return;
278
279 write_lock_bh(&dn_hash_lock);
280 sk_del_node_init(sk);
281 DN_SK(sk)->addrloc = 0;
282 list = listen_hash(&DN_SK(sk)->addr);
283 sk_add_node(sk, list);
284 write_unlock_bh(&dn_hash_lock);
285 }
286
287 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
288 {
289 int len = 2;
290
291 *buf++ = type;
292
293 switch (type) {
294 case 0:
295 *buf++ = sdn->sdn_objnum;
296 break;
297 case 1:
298 *buf++ = 0;
299 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
300 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
301 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
302 break;
303 case 2:
304 memset(buf, 0, 5);
305 buf += 5;
306 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
307 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
308 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
309 break;
310 }
311
312 return len;
313 }
314
315 /*
316 * On reception of usernames, we handle types 1 and 0 for destination
317 * addresses only. Types 2 and 4 are used for source addresses, but the
318 * UIC, GIC are ignored and they are both treated the same way. Type 3
319 * is never used as I've no idea what its purpose might be or what its
320 * format is.
321 */
322 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
323 {
324 unsigned char type;
325 int size = len;
326 int namel = 12;
327
328 sdn->sdn_objnum = 0;
329 sdn->sdn_objnamel = cpu_to_le16(0);
330 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
331
332 if (len < 2)
333 return -1;
334
335 len -= 2;
336 *fmt = *data++;
337 type = *data++;
338
339 switch (*fmt) {
340 case 0:
341 sdn->sdn_objnum = type;
342 return 2;
343 case 1:
344 namel = 16;
345 break;
346 case 2:
347 len -= 4;
348 data += 4;
349 break;
350 case 4:
351 len -= 8;
352 data += 8;
353 break;
354 default:
355 return -1;
356 }
357
358 len -= 1;
359
360 if (len < 0)
361 return -1;
362
363 sdn->sdn_objnamel = cpu_to_le16(*data++);
364 len -= le16_to_cpu(sdn->sdn_objnamel);
365
366 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
367 return -1;
368
369 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
370
371 return size - len;
372 }
373
374 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
375 {
376 struct hlist_head *list = listen_hash(addr);
377 struct sock *sk;
378
379 read_lock(&dn_hash_lock);
380 sk_for_each(sk, list) {
381 struct dn_scp *scp = DN_SK(sk);
382 if (sk->sk_state != TCP_LISTEN)
383 continue;
384 if (scp->addr.sdn_objnum) {
385 if (scp->addr.sdn_objnum != addr->sdn_objnum)
386 continue;
387 } else {
388 if (addr->sdn_objnum)
389 continue;
390 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
391 continue;
392 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
393 continue;
394 }
395 sock_hold(sk);
396 read_unlock(&dn_hash_lock);
397 return sk;
398 }
399
400 sk = sk_head(&dn_wild_sk);
401 if (sk) {
402 if (sk->sk_state == TCP_LISTEN)
403 sock_hold(sk);
404 else
405 sk = NULL;
406 }
407
408 read_unlock(&dn_hash_lock);
409 return sk;
410 }
411
412 struct sock *dn_find_by_skb(struct sk_buff *skb)
413 {
414 struct dn_skb_cb *cb = DN_SKB_CB(skb);
415 struct sock *sk;
416 struct dn_scp *scp;
417
418 read_lock(&dn_hash_lock);
419 sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
420 scp = DN_SK(sk);
421 if (cb->src != dn_saddr2dn(&scp->peer))
422 continue;
423 if (cb->dst_port != scp->addrloc)
424 continue;
425 if (scp->addrrem && (cb->src_port != scp->addrrem))
426 continue;
427 sock_hold(sk);
428 goto found;
429 }
430 sk = NULL;
431 found:
432 read_unlock(&dn_hash_lock);
433 return sk;
434 }
435
436
437
438 static void dn_destruct(struct sock *sk)
439 {
440 struct dn_scp *scp = DN_SK(sk);
441
442 skb_queue_purge(&scp->data_xmit_queue);
443 skb_queue_purge(&scp->other_xmit_queue);
444 skb_queue_purge(&scp->other_receive_queue);
445
446 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
447 }
448
449 static int dn_memory_pressure;
450
451 static void dn_enter_memory_pressure(struct sock *sk)
452 {
453 if (!dn_memory_pressure) {
454 dn_memory_pressure = 1;
455 }
456 }
457
458 static struct proto dn_proto = {
459 .name = "NSP",
460 .owner = THIS_MODULE,
461 .enter_memory_pressure = dn_enter_memory_pressure,
462 .memory_pressure = &dn_memory_pressure,
463 .memory_allocated = &decnet_memory_allocated,
464 .sysctl_mem = sysctl_decnet_mem,
465 .sysctl_wmem = sysctl_decnet_wmem,
466 .sysctl_rmem = sysctl_decnet_rmem,
467 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
468 .obj_size = sizeof(struct dn_sock),
469 };
470
471 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
472 {
473 struct dn_scp *scp;
474 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
475
476 if (!sk)
477 goto out;
478
479 if (sock)
480 sock->ops = &dn_proto_ops;
481 sock_init_data(sock, sk);
482
483 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
484 sk->sk_destruct = dn_destruct;
485 sk->sk_no_check_tx = 1;
486 sk->sk_family = PF_DECnet;
487 sk->sk_protocol = 0;
488 sk->sk_allocation = gfp;
489 sk->sk_sndbuf = sysctl_decnet_wmem[1];
490 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
491
492 /* Initialization of DECnet Session Control Port */
493 scp = DN_SK(sk);
494 scp->state = DN_O; /* Open */
495 scp->numdat = 1; /* Next data seg to tx */
496 scp->numoth = 1; /* Next oth data to tx */
497 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
498 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
499 scp->ackrcv_dat = 0; /* Highest data ack recv*/
500 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
501 scp->flowrem_sw = DN_SEND;
502 scp->flowloc_sw = DN_SEND;
503 scp->flowrem_dat = 0;
504 scp->flowrem_oth = 1;
505 scp->flowloc_dat = 0;
506 scp->flowloc_oth = 1;
507 scp->services_rem = 0;
508 scp->services_loc = 1 | NSP_FC_NONE;
509 scp->info_rem = 0;
510 scp->info_loc = 0x03; /* NSP version 4.1 */
511 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
512 scp->nonagle = 0;
513 scp->multi_ireq = 1;
514 scp->accept_mode = ACC_IMMED;
515 scp->addr.sdn_family = AF_DECnet;
516 scp->peer.sdn_family = AF_DECnet;
517 scp->accessdata.acc_accl = 5;
518 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
519
520 scp->max_window = NSP_MAX_WINDOW;
521 scp->snd_window = NSP_MIN_WINDOW;
522 scp->nsp_srtt = NSP_INITIAL_SRTT;
523 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
524 scp->nsp_rxtshift = 0;
525
526 skb_queue_head_init(&scp->data_xmit_queue);
527 skb_queue_head_init(&scp->other_xmit_queue);
528 skb_queue_head_init(&scp->other_receive_queue);
529
530 scp->persist = 0;
531 scp->persist_fxn = NULL;
532 scp->keepalive = 10 * HZ;
533 scp->keepalive_fxn = dn_keepalive;
534
535 init_timer(&scp->delack_timer);
536 scp->delack_pending = 0;
537 scp->delack_fxn = dn_nsp_delayed_ack;
538
539 dn_start_slow_timer(sk);
540 out:
541 return sk;
542 }
543
544 /*
545 * Keepalive timer.
546 * FIXME: Should respond to SO_KEEPALIVE etc.
547 */
548 static void dn_keepalive(struct sock *sk)
549 {
550 struct dn_scp *scp = DN_SK(sk);
551
552 /*
553 * By checking the other_data transmit queue is empty
554 * we are double checking that we are not sending too
555 * many of these keepalive frames.
556 */
557 if (skb_queue_empty(&scp->other_xmit_queue))
558 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
559 }
560
561
562 /*
563 * Timer for shutdown/destroyed sockets.
564 * When socket is dead & no packets have been sent for a
565 * certain amount of time, they are removed by this
566 * routine. Also takes care of sending out DI & DC
567 * frames at correct times.
568 */
569 int dn_destroy_timer(struct sock *sk)
570 {
571 struct dn_scp *scp = DN_SK(sk);
572
573 scp->persist = dn_nsp_persist(sk);
574
575 switch (scp->state) {
576 case DN_DI:
577 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
578 if (scp->nsp_rxtshift >= decnet_di_count)
579 scp->state = DN_CN;
580 return 0;
581
582 case DN_DR:
583 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
584 if (scp->nsp_rxtshift >= decnet_dr_count)
585 scp->state = DN_DRC;
586 return 0;
587
588 case DN_DN:
589 if (scp->nsp_rxtshift < decnet_dn_count) {
590 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
591 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
592 GFP_ATOMIC);
593 return 0;
594 }
595 }
596
597 scp->persist = (HZ * decnet_time_wait);
598
599 if (sk->sk_socket)
600 return 0;
601
602 if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
603 dn_unhash_sock(sk);
604 sock_put(sk);
605 return 1;
606 }
607
608 return 0;
609 }
610
611 static void dn_destroy_sock(struct sock *sk)
612 {
613 struct dn_scp *scp = DN_SK(sk);
614
615 scp->nsp_rxtshift = 0; /* reset back off */
616
617 if (sk->sk_socket) {
618 if (sk->sk_socket->state != SS_UNCONNECTED)
619 sk->sk_socket->state = SS_DISCONNECTING;
620 }
621
622 sk->sk_state = TCP_CLOSE;
623
624 switch (scp->state) {
625 case DN_DN:
626 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
627 sk->sk_allocation);
628 scp->persist_fxn = dn_destroy_timer;
629 scp->persist = dn_nsp_persist(sk);
630 break;
631 case DN_CR:
632 scp->state = DN_DR;
633 goto disc_reject;
634 case DN_RUN:
635 scp->state = DN_DI;
636 case DN_DI:
637 case DN_DR:
638 disc_reject:
639 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
640 case DN_NC:
641 case DN_NR:
642 case DN_RJ:
643 case DN_DIC:
644 case DN_CN:
645 case DN_DRC:
646 case DN_CI:
647 case DN_CD:
648 scp->persist_fxn = dn_destroy_timer;
649 scp->persist = dn_nsp_persist(sk);
650 break;
651 default:
652 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
653 case DN_O:
654 dn_stop_slow_timer(sk);
655
656 dn_unhash_sock_bh(sk);
657 sock_put(sk);
658
659 break;
660 }
661 }
662
663 char *dn_addr2asc(__u16 addr, char *buf)
664 {
665 unsigned short node, area;
666
667 node = addr & 0x03ff;
668 area = addr >> 10;
669 sprintf(buf, "%hd.%hd", area, node);
670
671 return buf;
672 }
673
674
675
676 static int dn_create(struct net *net, struct socket *sock, int protocol,
677 int kern)
678 {
679 struct sock *sk;
680
681 if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
682 return -EINVAL;
683
684 if (!net_eq(net, &init_net))
685 return -EAFNOSUPPORT;
686
687 switch (sock->type) {
688 case SOCK_SEQPACKET:
689 if (protocol != DNPROTO_NSP)
690 return -EPROTONOSUPPORT;
691 break;
692 case SOCK_STREAM:
693 break;
694 default:
695 return -ESOCKTNOSUPPORT;
696 }
697
698
699 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
700 return -ENOBUFS;
701
702 sk->sk_protocol = protocol;
703
704 return 0;
705 }
706
707
708 static int
709 dn_release(struct socket *sock)
710 {
711 struct sock *sk = sock->sk;
712
713 if (sk) {
714 sock_orphan(sk);
715 sock_hold(sk);
716 lock_sock(sk);
717 dn_destroy_sock(sk);
718 release_sock(sk);
719 sock_put(sk);
720 }
721
722 return 0;
723 }
724
725 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
726 {
727 struct sock *sk = sock->sk;
728 struct dn_scp *scp = DN_SK(sk);
729 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
730 struct net_device *dev, *ldev;
731 int rv;
732
733 if (addr_len != sizeof(struct sockaddr_dn))
734 return -EINVAL;
735
736 if (saddr->sdn_family != AF_DECnet)
737 return -EINVAL;
738
739 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
740 return -EINVAL;
741
742 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
743 return -EINVAL;
744
745 if (saddr->sdn_flags & ~SDF_WILD)
746 return -EINVAL;
747
748 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
749 (saddr->sdn_flags & SDF_WILD)))
750 return -EACCES;
751
752 if (!(saddr->sdn_flags & SDF_WILD)) {
753 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
754 rcu_read_lock();
755 ldev = NULL;
756 for_each_netdev_rcu(&init_net, dev) {
757 if (!dev->dn_ptr)
758 continue;
759 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
760 ldev = dev;
761 break;
762 }
763 }
764 rcu_read_unlock();
765 if (ldev == NULL)
766 return -EADDRNOTAVAIL;
767 }
768 }
769
770 rv = -EINVAL;
771 lock_sock(sk);
772 if (sock_flag(sk, SOCK_ZAPPED)) {
773 memcpy(&scp->addr, saddr, addr_len);
774 sock_reset_flag(sk, SOCK_ZAPPED);
775
776 rv = dn_hash_sock(sk);
777 if (rv)
778 sock_set_flag(sk, SOCK_ZAPPED);
779 }
780 release_sock(sk);
781
782 return rv;
783 }
784
785
786 static int dn_auto_bind(struct socket *sock)
787 {
788 struct sock *sk = sock->sk;
789 struct dn_scp *scp = DN_SK(sk);
790 int rv;
791
792 sock_reset_flag(sk, SOCK_ZAPPED);
793
794 scp->addr.sdn_flags = 0;
795 scp->addr.sdn_objnum = 0;
796
797 /*
798 * This stuff is to keep compatibility with Eduardo's
799 * patch. I hope I can dispense with it shortly...
800 */
801 if ((scp->accessdata.acc_accl != 0) &&
802 (scp->accessdata.acc_accl <= 12)) {
803
804 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
805 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
806
807 scp->accessdata.acc_accl = 0;
808 memset(scp->accessdata.acc_acc, 0, 40);
809 }
810 /* End of compatibility stuff */
811
812 scp->addr.sdn_add.a_len = cpu_to_le16(2);
813 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
814 if (rv == 0) {
815 rv = dn_hash_sock(sk);
816 if (rv)
817 sock_set_flag(sk, SOCK_ZAPPED);
818 }
819
820 return rv;
821 }
822
823 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
824 {
825 struct dn_scp *scp = DN_SK(sk);
826 DEFINE_WAIT(wait);
827 int err;
828
829 if (scp->state != DN_CR)
830 return -EINVAL;
831
832 scp->state = DN_CC;
833 scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
834 dn_send_conn_conf(sk, allocation);
835
836 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
837 for(;;) {
838 release_sock(sk);
839 if (scp->state == DN_CC)
840 *timeo = schedule_timeout(*timeo);
841 lock_sock(sk);
842 err = 0;
843 if (scp->state == DN_RUN)
844 break;
845 err = sock_error(sk);
846 if (err)
847 break;
848 err = sock_intr_errno(*timeo);
849 if (signal_pending(current))
850 break;
851 err = -EAGAIN;
852 if (!*timeo)
853 break;
854 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
855 }
856 finish_wait(sk_sleep(sk), &wait);
857 if (err == 0) {
858 sk->sk_socket->state = SS_CONNECTED;
859 } else if (scp->state != DN_CC) {
860 sk->sk_socket->state = SS_UNCONNECTED;
861 }
862 return err;
863 }
864
865 static int dn_wait_run(struct sock *sk, long *timeo)
866 {
867 struct dn_scp *scp = DN_SK(sk);
868 DEFINE_WAIT(wait);
869 int err = 0;
870
871 if (scp->state == DN_RUN)
872 goto out;
873
874 if (!*timeo)
875 return -EALREADY;
876
877 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
878 for(;;) {
879 release_sock(sk);
880 if (scp->state == DN_CI || scp->state == DN_CC)
881 *timeo = schedule_timeout(*timeo);
882 lock_sock(sk);
883 err = 0;
884 if (scp->state == DN_RUN)
885 break;
886 err = sock_error(sk);
887 if (err)
888 break;
889 err = sock_intr_errno(*timeo);
890 if (signal_pending(current))
891 break;
892 err = -ETIMEDOUT;
893 if (!*timeo)
894 break;
895 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
896 }
897 finish_wait(sk_sleep(sk), &wait);
898 out:
899 if (err == 0) {
900 sk->sk_socket->state = SS_CONNECTED;
901 } else if (scp->state != DN_CI && scp->state != DN_CC) {
902 sk->sk_socket->state = SS_UNCONNECTED;
903 }
904 return err;
905 }
906
907 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
908 {
909 struct socket *sock = sk->sk_socket;
910 struct dn_scp *scp = DN_SK(sk);
911 int err = -EISCONN;
912 struct flowidn fld;
913 struct dst_entry *dst;
914
915 if (sock->state == SS_CONNECTED)
916 goto out;
917
918 if (sock->state == SS_CONNECTING) {
919 err = 0;
920 if (scp->state == DN_RUN) {
921 sock->state = SS_CONNECTED;
922 goto out;
923 }
924 err = -ECONNREFUSED;
925 if (scp->state != DN_CI && scp->state != DN_CC) {
926 sock->state = SS_UNCONNECTED;
927 goto out;
928 }
929 return dn_wait_run(sk, timeo);
930 }
931
932 err = -EINVAL;
933 if (scp->state != DN_O)
934 goto out;
935
936 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
937 goto out;
938 if (addr->sdn_family != AF_DECnet)
939 goto out;
940 if (addr->sdn_flags & SDF_WILD)
941 goto out;
942
943 if (sock_flag(sk, SOCK_ZAPPED)) {
944 err = dn_auto_bind(sk->sk_socket);
945 if (err)
946 goto out;
947 }
948
949 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
950
951 err = -EHOSTUNREACH;
952 memset(&fld, 0, sizeof(fld));
953 fld.flowidn_oif = sk->sk_bound_dev_if;
954 fld.daddr = dn_saddr2dn(&scp->peer);
955 fld.saddr = dn_saddr2dn(&scp->addr);
956 dn_sk_ports_copy(&fld, scp);
957 fld.flowidn_proto = DNPROTO_NSP;
958 if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
959 goto out;
960 dst = __sk_dst_get(sk);
961 sk->sk_route_caps = dst->dev->features;
962 sock->state = SS_CONNECTING;
963 scp->state = DN_CI;
964 scp->segsize_loc = dst_metric_advmss(dst);
965
966 dn_nsp_send_conninit(sk, NSP_CI);
967 err = -EINPROGRESS;
968 if (*timeo) {
969 err = dn_wait_run(sk, timeo);
970 }
971 out:
972 return err;
973 }
974
975 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
976 {
977 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
978 struct sock *sk = sock->sk;
979 int err;
980 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
981
982 lock_sock(sk);
983 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
984 release_sock(sk);
985
986 return err;
987 }
988
989 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
990 {
991 struct dn_scp *scp = DN_SK(sk);
992
993 switch (scp->state) {
994 case DN_RUN:
995 return 0;
996 case DN_CR:
997 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
998 case DN_CI:
999 case DN_CC:
1000 return dn_wait_run(sk, timeo);
1001 case DN_O:
1002 return __dn_connect(sk, addr, addrlen, timeo, flags);
1003 }
1004
1005 return -EINVAL;
1006 }
1007
1008
1009 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1010 {
1011 unsigned char *ptr = skb->data;
1012
1013 acc->acc_userl = *ptr++;
1014 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1015 ptr += acc->acc_userl;
1016
1017 acc->acc_passl = *ptr++;
1018 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1019 ptr += acc->acc_passl;
1020
1021 acc->acc_accl = *ptr++;
1022 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1023
1024 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1025
1026 }
1027
1028 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1029 {
1030 unsigned char *ptr = skb->data;
1031 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1032
1033 BUG_ON(len > 16); /* we've checked the contents earlier */
1034 opt->opt_optl = cpu_to_le16(len);
1035 opt->opt_status = 0;
1036 memcpy(opt->opt_data, ptr, len);
1037 skb_pull(skb, len + 1);
1038 }
1039
1040 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1041 {
1042 DEFINE_WAIT(wait);
1043 struct sk_buff *skb = NULL;
1044 int err = 0;
1045
1046 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1047 for(;;) {
1048 release_sock(sk);
1049 skb = skb_dequeue(&sk->sk_receive_queue);
1050 if (skb == NULL) {
1051 *timeo = schedule_timeout(*timeo);
1052 skb = skb_dequeue(&sk->sk_receive_queue);
1053 }
1054 lock_sock(sk);
1055 if (skb != NULL)
1056 break;
1057 err = -EINVAL;
1058 if (sk->sk_state != TCP_LISTEN)
1059 break;
1060 err = sock_intr_errno(*timeo);
1061 if (signal_pending(current))
1062 break;
1063 err = -EAGAIN;
1064 if (!*timeo)
1065 break;
1066 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1067 }
1068 finish_wait(sk_sleep(sk), &wait);
1069
1070 return skb == NULL ? ERR_PTR(err) : skb;
1071 }
1072
1073 static int dn_accept(struct socket *sock, struct socket *newsock, int flags,
1074 bool kern)
1075 {
1076 struct sock *sk = sock->sk, *newsk;
1077 struct sk_buff *skb = NULL;
1078 struct dn_skb_cb *cb;
1079 unsigned char menuver;
1080 int err = 0;
1081 unsigned char type;
1082 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1083 struct dst_entry *dst;
1084
1085 lock_sock(sk);
1086
1087 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1088 release_sock(sk);
1089 return -EINVAL;
1090 }
1091
1092 skb = skb_dequeue(&sk->sk_receive_queue);
1093 if (skb == NULL) {
1094 skb = dn_wait_for_connect(sk, &timeo);
1095 if (IS_ERR(skb)) {
1096 release_sock(sk);
1097 return PTR_ERR(skb);
1098 }
1099 }
1100
1101 cb = DN_SKB_CB(skb);
1102 sk->sk_ack_backlog--;
1103 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
1104 if (newsk == NULL) {
1105 release_sock(sk);
1106 kfree_skb(skb);
1107 return -ENOBUFS;
1108 }
1109 release_sock(sk);
1110
1111 dst = skb_dst(skb);
1112 sk_dst_set(newsk, dst);
1113 skb_dst_set(skb, NULL);
1114
1115 DN_SK(newsk)->state = DN_CR;
1116 DN_SK(newsk)->addrrem = cb->src_port;
1117 DN_SK(newsk)->services_rem = cb->services;
1118 DN_SK(newsk)->info_rem = cb->info;
1119 DN_SK(newsk)->segsize_rem = cb->segsize;
1120 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1121
1122 if (DN_SK(newsk)->segsize_rem < 230)
1123 DN_SK(newsk)->segsize_rem = 230;
1124
1125 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1126 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1127
1128 newsk->sk_state = TCP_LISTEN;
1129 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1130
1131 /*
1132 * If we are listening on a wild socket, we don't want
1133 * the newly created socket on the wrong hash queue.
1134 */
1135 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1136
1137 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1138 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1139 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1140 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1141
1142 menuver = *skb->data;
1143 skb_pull(skb, 1);
1144
1145 if (menuver & DN_MENUVER_ACC)
1146 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1147
1148 if (menuver & DN_MENUVER_USR)
1149 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1150
1151 if (menuver & DN_MENUVER_PRX)
1152 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1153
1154 if (menuver & DN_MENUVER_UIC)
1155 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1156
1157 kfree_skb(skb);
1158
1159 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1160 sizeof(struct optdata_dn));
1161 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1162 sizeof(struct optdata_dn));
1163
1164 lock_sock(newsk);
1165 err = dn_hash_sock(newsk);
1166 if (err == 0) {
1167 sock_reset_flag(newsk, SOCK_ZAPPED);
1168 dn_send_conn_ack(newsk);
1169
1170 /*
1171 * Here we use sk->sk_allocation since although the conn conf is
1172 * for the newsk, the context is the old socket.
1173 */
1174 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1175 err = dn_confirm_accept(newsk, &timeo,
1176 sk->sk_allocation);
1177 }
1178 release_sock(newsk);
1179 return err;
1180 }
1181
1182
1183 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1184 {
1185 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1186 struct sock *sk = sock->sk;
1187 struct dn_scp *scp = DN_SK(sk);
1188
1189 *uaddr_len = sizeof(struct sockaddr_dn);
1190
1191 lock_sock(sk);
1192
1193 if (peer) {
1194 if ((sock->state != SS_CONNECTED &&
1195 sock->state != SS_CONNECTING) &&
1196 scp->accept_mode == ACC_IMMED) {
1197 release_sock(sk);
1198 return -ENOTCONN;
1199 }
1200
1201 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1202 } else {
1203 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1204 }
1205
1206 release_sock(sk);
1207
1208 return 0;
1209 }
1210
1211
1212 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1213 {
1214 struct sock *sk = sock->sk;
1215 struct dn_scp *scp = DN_SK(sk);
1216 int mask = datagram_poll(file, sock, wait);
1217
1218 if (!skb_queue_empty(&scp->other_receive_queue))
1219 mask |= POLLRDBAND;
1220
1221 return mask;
1222 }
1223
1224 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1225 {
1226 struct sock *sk = sock->sk;
1227 struct dn_scp *scp = DN_SK(sk);
1228 int err = -EOPNOTSUPP;
1229 long amount = 0;
1230 struct sk_buff *skb;
1231 int val;
1232
1233 switch(cmd)
1234 {
1235 case SIOCGIFADDR:
1236 case SIOCSIFADDR:
1237 return dn_dev_ioctl(cmd, (void __user *)arg);
1238
1239 case SIOCATMARK:
1240 lock_sock(sk);
1241 val = !skb_queue_empty(&scp->other_receive_queue);
1242 if (scp->state != DN_RUN)
1243 val = -ENOTCONN;
1244 release_sock(sk);
1245 return val;
1246
1247 case TIOCOUTQ:
1248 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1249 if (amount < 0)
1250 amount = 0;
1251 err = put_user(amount, (int __user *)arg);
1252 break;
1253
1254 case TIOCINQ:
1255 lock_sock(sk);
1256 skb = skb_peek(&scp->other_receive_queue);
1257 if (skb) {
1258 amount = skb->len;
1259 } else {
1260 skb_queue_walk(&sk->sk_receive_queue, skb)
1261 amount += skb->len;
1262 }
1263 release_sock(sk);
1264 err = put_user(amount, (int __user *)arg);
1265 break;
1266
1267 default:
1268 err = -ENOIOCTLCMD;
1269 break;
1270 }
1271
1272 return err;
1273 }
1274
1275 static int dn_listen(struct socket *sock, int backlog)
1276 {
1277 struct sock *sk = sock->sk;
1278 int err = -EINVAL;
1279
1280 lock_sock(sk);
1281
1282 if (sock_flag(sk, SOCK_ZAPPED))
1283 goto out;
1284
1285 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1286 goto out;
1287
1288 sk->sk_max_ack_backlog = backlog;
1289 sk->sk_ack_backlog = 0;
1290 sk->sk_state = TCP_LISTEN;
1291 err = 0;
1292 dn_rehash_sock(sk);
1293
1294 out:
1295 release_sock(sk);
1296
1297 return err;
1298 }
1299
1300
1301 static int dn_shutdown(struct socket *sock, int how)
1302 {
1303 struct sock *sk = sock->sk;
1304 struct dn_scp *scp = DN_SK(sk);
1305 int err = -ENOTCONN;
1306
1307 lock_sock(sk);
1308
1309 if (sock->state == SS_UNCONNECTED)
1310 goto out;
1311
1312 err = 0;
1313 if (sock->state == SS_DISCONNECTING)
1314 goto out;
1315
1316 err = -EINVAL;
1317 if (scp->state == DN_O)
1318 goto out;
1319
1320 if (how != SHUT_RDWR)
1321 goto out;
1322
1323 sk->sk_shutdown = SHUTDOWN_MASK;
1324 dn_destroy_sock(sk);
1325 err = 0;
1326
1327 out:
1328 release_sock(sk);
1329
1330 return err;
1331 }
1332
1333 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1334 {
1335 struct sock *sk = sock->sk;
1336 int err;
1337
1338 lock_sock(sk);
1339 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1340 release_sock(sk);
1341
1342 return err;
1343 }
1344
1345 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
1346 {
1347 struct sock *sk = sock->sk;
1348 struct dn_scp *scp = DN_SK(sk);
1349 long timeo;
1350 union {
1351 struct optdata_dn opt;
1352 struct accessdata_dn acc;
1353 int mode;
1354 unsigned long win;
1355 int val;
1356 unsigned char services;
1357 unsigned char info;
1358 } u;
1359 int err;
1360
1361 if (optlen && !optval)
1362 return -EINVAL;
1363
1364 if (optlen > sizeof(u))
1365 return -EINVAL;
1366
1367 if (copy_from_user(&u, optval, optlen))
1368 return -EFAULT;
1369
1370 switch (optname) {
1371 case DSO_CONDATA:
1372 if (sock->state == SS_CONNECTED)
1373 return -EISCONN;
1374 if ((scp->state != DN_O) && (scp->state != DN_CR))
1375 return -EINVAL;
1376
1377 if (optlen != sizeof(struct optdata_dn))
1378 return -EINVAL;
1379
1380 if (le16_to_cpu(u.opt.opt_optl) > 16)
1381 return -EINVAL;
1382
1383 memcpy(&scp->conndata_out, &u.opt, optlen);
1384 break;
1385
1386 case DSO_DISDATA:
1387 if (sock->state != SS_CONNECTED &&
1388 scp->accept_mode == ACC_IMMED)
1389 return -ENOTCONN;
1390
1391 if (optlen != sizeof(struct optdata_dn))
1392 return -EINVAL;
1393
1394 if (le16_to_cpu(u.opt.opt_optl) > 16)
1395 return -EINVAL;
1396
1397 memcpy(&scp->discdata_out, &u.opt, optlen);
1398 break;
1399
1400 case DSO_CONACCESS:
1401 if (sock->state == SS_CONNECTED)
1402 return -EISCONN;
1403 if (scp->state != DN_O)
1404 return -EINVAL;
1405
1406 if (optlen != sizeof(struct accessdata_dn))
1407 return -EINVAL;
1408
1409 if ((u.acc.acc_accl > DN_MAXACCL) ||
1410 (u.acc.acc_passl > DN_MAXACCL) ||
1411 (u.acc.acc_userl > DN_MAXACCL))
1412 return -EINVAL;
1413
1414 memcpy(&scp->accessdata, &u.acc, optlen);
1415 break;
1416
1417 case DSO_ACCEPTMODE:
1418 if (sock->state == SS_CONNECTED)
1419 return -EISCONN;
1420 if (scp->state != DN_O)
1421 return -EINVAL;
1422
1423 if (optlen != sizeof(int))
1424 return -EINVAL;
1425
1426 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1427 return -EINVAL;
1428
1429 scp->accept_mode = (unsigned char)u.mode;
1430 break;
1431
1432 case DSO_CONACCEPT:
1433 if (scp->state != DN_CR)
1434 return -EINVAL;
1435 timeo = sock_rcvtimeo(sk, 0);
1436 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1437 return err;
1438
1439 case DSO_CONREJECT:
1440 if (scp->state != DN_CR)
1441 return -EINVAL;
1442
1443 scp->state = DN_DR;
1444 sk->sk_shutdown = SHUTDOWN_MASK;
1445 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1446 break;
1447
1448 default:
1449 #ifdef CONFIG_NETFILTER
1450 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1451 #endif
1452 case DSO_LINKINFO:
1453 case DSO_STREAM:
1454 case DSO_SEQPACKET:
1455 return -ENOPROTOOPT;
1456
1457 case DSO_MAXWINDOW:
1458 if (optlen != sizeof(unsigned long))
1459 return -EINVAL;
1460 if (u.win > NSP_MAX_WINDOW)
1461 u.win = NSP_MAX_WINDOW;
1462 if (u.win == 0)
1463 return -EINVAL;
1464 scp->max_window = u.win;
1465 if (scp->snd_window > u.win)
1466 scp->snd_window = u.win;
1467 break;
1468
1469 case DSO_NODELAY:
1470 if (optlen != sizeof(int))
1471 return -EINVAL;
1472 if (scp->nonagle == 2)
1473 return -EINVAL;
1474 scp->nonagle = (u.val == 0) ? 0 : 1;
1475 /* if (scp->nonagle == 1) { Push pending frames } */
1476 break;
1477
1478 case DSO_CORK:
1479 if (optlen != sizeof(int))
1480 return -EINVAL;
1481 if (scp->nonagle == 1)
1482 return -EINVAL;
1483 scp->nonagle = (u.val == 0) ? 0 : 2;
1484 /* if (scp->nonagle == 0) { Push pending frames } */
1485 break;
1486
1487 case DSO_SERVICES:
1488 if (optlen != sizeof(unsigned char))
1489 return -EINVAL;
1490 if ((u.services & ~NSP_FC_MASK) != 0x01)
1491 return -EINVAL;
1492 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1493 return -EINVAL;
1494 scp->services_loc = u.services;
1495 break;
1496
1497 case DSO_INFO:
1498 if (optlen != sizeof(unsigned char))
1499 return -EINVAL;
1500 if (u.info & 0xfc)
1501 return -EINVAL;
1502 scp->info_loc = u.info;
1503 break;
1504 }
1505
1506 return 0;
1507 }
1508
1509 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1510 {
1511 struct sock *sk = sock->sk;
1512 int err;
1513
1514 lock_sock(sk);
1515 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1516 release_sock(sk);
1517
1518 return err;
1519 }
1520
1521 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1522 {
1523 struct sock *sk = sock->sk;
1524 struct dn_scp *scp = DN_SK(sk);
1525 struct linkinfo_dn link;
1526 unsigned int r_len;
1527 void *r_data = NULL;
1528 unsigned int val;
1529
1530 if(get_user(r_len , optlen))
1531 return -EFAULT;
1532
1533 switch (optname) {
1534 case DSO_CONDATA:
1535 if (r_len > sizeof(struct optdata_dn))
1536 r_len = sizeof(struct optdata_dn);
1537 r_data = &scp->conndata_in;
1538 break;
1539
1540 case DSO_DISDATA:
1541 if (r_len > sizeof(struct optdata_dn))
1542 r_len = sizeof(struct optdata_dn);
1543 r_data = &scp->discdata_in;
1544 break;
1545
1546 case DSO_CONACCESS:
1547 if (r_len > sizeof(struct accessdata_dn))
1548 r_len = sizeof(struct accessdata_dn);
1549 r_data = &scp->accessdata;
1550 break;
1551
1552 case DSO_ACCEPTMODE:
1553 if (r_len > sizeof(unsigned char))
1554 r_len = sizeof(unsigned char);
1555 r_data = &scp->accept_mode;
1556 break;
1557
1558 case DSO_LINKINFO:
1559 if (r_len > sizeof(struct linkinfo_dn))
1560 r_len = sizeof(struct linkinfo_dn);
1561
1562 memset(&link, 0, sizeof(link));
1563
1564 switch (sock->state) {
1565 case SS_CONNECTING:
1566 link.idn_linkstate = LL_CONNECTING;
1567 break;
1568 case SS_DISCONNECTING:
1569 link.idn_linkstate = LL_DISCONNECTING;
1570 break;
1571 case SS_CONNECTED:
1572 link.idn_linkstate = LL_RUNNING;
1573 break;
1574 default:
1575 link.idn_linkstate = LL_INACTIVE;
1576 }
1577
1578 link.idn_segsize = scp->segsize_rem;
1579 r_data = &link;
1580 break;
1581
1582 default:
1583 #ifdef CONFIG_NETFILTER
1584 {
1585 int ret, len;
1586
1587 if (get_user(len, optlen))
1588 return -EFAULT;
1589
1590 ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1591 if (ret >= 0)
1592 ret = put_user(len, optlen);
1593 return ret;
1594 }
1595 #endif
1596 case DSO_STREAM:
1597 case DSO_SEQPACKET:
1598 case DSO_CONACCEPT:
1599 case DSO_CONREJECT:
1600 return -ENOPROTOOPT;
1601
1602 case DSO_MAXWINDOW:
1603 if (r_len > sizeof(unsigned long))
1604 r_len = sizeof(unsigned long);
1605 r_data = &scp->max_window;
1606 break;
1607
1608 case DSO_NODELAY:
1609 if (r_len > sizeof(int))
1610 r_len = sizeof(int);
1611 val = (scp->nonagle == 1);
1612 r_data = &val;
1613 break;
1614
1615 case DSO_CORK:
1616 if (r_len > sizeof(int))
1617 r_len = sizeof(int);
1618 val = (scp->nonagle == 2);
1619 r_data = &val;
1620 break;
1621
1622 case DSO_SERVICES:
1623 if (r_len > sizeof(unsigned char))
1624 r_len = sizeof(unsigned char);
1625 r_data = &scp->services_rem;
1626 break;
1627
1628 case DSO_INFO:
1629 if (r_len > sizeof(unsigned char))
1630 r_len = sizeof(unsigned char);
1631 r_data = &scp->info_rem;
1632 break;
1633 }
1634
1635 if (r_data) {
1636 if (copy_to_user(optval, r_data, r_len))
1637 return -EFAULT;
1638 if (put_user(r_len, optlen))
1639 return -EFAULT;
1640 }
1641
1642 return 0;
1643 }
1644
1645
1646 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1647 {
1648 struct sk_buff *skb;
1649 int len = 0;
1650
1651 if (flags & MSG_OOB)
1652 return !skb_queue_empty(q) ? 1 : 0;
1653
1654 skb_queue_walk(q, skb) {
1655 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1656 len += skb->len;
1657
1658 if (cb->nsp_flags & 0x40) {
1659 /* SOCK_SEQPACKET reads to EOM */
1660 if (sk->sk_type == SOCK_SEQPACKET)
1661 return 1;
1662 /* so does SOCK_STREAM unless WAITALL is specified */
1663 if (!(flags & MSG_WAITALL))
1664 return 1;
1665 }
1666
1667 /* minimum data length for read exceeded */
1668 if (len >= target)
1669 return 1;
1670 }
1671
1672 return 0;
1673 }
1674
1675
1676 static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1677 int flags)
1678 {
1679 struct sock *sk = sock->sk;
1680 struct dn_scp *scp = DN_SK(sk);
1681 struct sk_buff_head *queue = &sk->sk_receive_queue;
1682 size_t target = size > 1 ? 1 : 0;
1683 size_t copied = 0;
1684 int rv = 0;
1685 struct sk_buff *skb, *n;
1686 struct dn_skb_cb *cb = NULL;
1687 unsigned char eor = 0;
1688 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1689
1690 lock_sock(sk);
1691
1692 if (sock_flag(sk, SOCK_ZAPPED)) {
1693 rv = -EADDRNOTAVAIL;
1694 goto out;
1695 }
1696
1697 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1698 rv = 0;
1699 goto out;
1700 }
1701
1702 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1703 if (rv)
1704 goto out;
1705
1706 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1707 rv = -EOPNOTSUPP;
1708 goto out;
1709 }
1710
1711 if (flags & MSG_OOB)
1712 queue = &scp->other_receive_queue;
1713
1714 if (flags & MSG_WAITALL)
1715 target = size;
1716
1717
1718 /*
1719 * See if there is data ready to read, sleep if there isn't
1720 */
1721 for(;;) {
1722 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1723
1724 if (sk->sk_err)
1725 goto out;
1726
1727 if (!skb_queue_empty(&scp->other_receive_queue)) {
1728 if (!(flags & MSG_OOB)) {
1729 msg->msg_flags |= MSG_OOB;
1730 if (!scp->other_report) {
1731 scp->other_report = 1;
1732 goto out;
1733 }
1734 }
1735 }
1736
1737 if (scp->state != DN_RUN)
1738 goto out;
1739
1740 if (signal_pending(current)) {
1741 rv = sock_intr_errno(timeo);
1742 goto out;
1743 }
1744
1745 if (dn_data_ready(sk, queue, flags, target))
1746 break;
1747
1748 if (flags & MSG_DONTWAIT) {
1749 rv = -EWOULDBLOCK;
1750 goto out;
1751 }
1752
1753 add_wait_queue(sk_sleep(sk), &wait);
1754 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1755 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
1756 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1757 remove_wait_queue(sk_sleep(sk), &wait);
1758 }
1759
1760 skb_queue_walk_safe(queue, skb, n) {
1761 unsigned int chunk = skb->len;
1762 cb = DN_SKB_CB(skb);
1763
1764 if ((chunk + copied) > size)
1765 chunk = size - copied;
1766
1767 if (memcpy_to_msg(msg, skb->data, chunk)) {
1768 rv = -EFAULT;
1769 break;
1770 }
1771 copied += chunk;
1772
1773 if (!(flags & MSG_PEEK))
1774 skb_pull(skb, chunk);
1775
1776 eor = cb->nsp_flags & 0x40;
1777
1778 if (skb->len == 0) {
1779 skb_unlink(skb, queue);
1780 kfree_skb(skb);
1781 /*
1782 * N.B. Don't refer to skb or cb after this point
1783 * in loop.
1784 */
1785 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1786 scp->flowloc_sw = DN_SEND;
1787 dn_nsp_send_link(sk, DN_SEND, 0);
1788 }
1789 }
1790
1791 if (eor) {
1792 if (sk->sk_type == SOCK_SEQPACKET)
1793 break;
1794 if (!(flags & MSG_WAITALL))
1795 break;
1796 }
1797
1798 if (flags & MSG_OOB)
1799 break;
1800
1801 if (copied >= target)
1802 break;
1803 }
1804
1805 rv = copied;
1806
1807
1808 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1809 msg->msg_flags |= MSG_EOR;
1810
1811 out:
1812 if (rv == 0)
1813 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1814
1815 if ((rv >= 0) && msg->msg_name) {
1816 __sockaddr_check_size(sizeof(struct sockaddr_dn));
1817 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1818 msg->msg_namelen = sizeof(struct sockaddr_dn);
1819 }
1820
1821 release_sock(sk);
1822
1823 return rv;
1824 }
1825
1826
1827 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1828 {
1829 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1830 if (skb_queue_len(queue) >= scp->snd_window)
1831 return 1;
1832 if (fctype != NSP_FC_NONE) {
1833 if (flags & MSG_OOB) {
1834 if (scp->flowrem_oth == 0)
1835 return 1;
1836 } else {
1837 if (scp->flowrem_dat == 0)
1838 return 1;
1839 }
1840 }
1841 return 0;
1842 }
1843
1844 /*
1845 * The DECnet spec requires that the "routing layer" accepts packets which
1846 * are at least 230 bytes in size. This excludes any headers which the NSP
1847 * layer might add, so we always assume that we'll be using the maximal
1848 * length header on data packets. The variation in length is due to the
1849 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1850 * make much practical difference.
1851 */
1852 unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
1853 {
1854 unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
1855 if (dev) {
1856 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1857 mtu -= LL_RESERVED_SPACE(dev);
1858 if (dn_db->use_long)
1859 mtu -= 21;
1860 else
1861 mtu -= 6;
1862 mtu -= DN_MAX_NSP_DATA_HEADER;
1863 } else {
1864 /*
1865 * 21 = long header, 16 = guess at MAC header length
1866 */
1867 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1868 }
1869 if (mtu > mss)
1870 mss = mtu;
1871 return mss;
1872 }
1873
1874 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1875 {
1876 struct dst_entry *dst = __sk_dst_get(sk);
1877 struct dn_scp *scp = DN_SK(sk);
1878 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1879
1880 /* Other data messages are limited to 16 bytes per packet */
1881 if (flags & MSG_OOB)
1882 return 16;
1883
1884 /* This works out the maximum size of segment we can send out */
1885 if (dst) {
1886 u32 mtu = dst_mtu(dst);
1887 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1888 }
1889
1890 return mss_now;
1891 }
1892
1893 /*
1894 * N.B. We get the timeout wrong here, but then we always did get it
1895 * wrong before and this is another step along the road to correcting
1896 * it. It ought to get updated each time we pass through the routine,
1897 * but in practise it probably doesn't matter too much for now.
1898 */
1899 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1900 unsigned long datalen, int noblock,
1901 int *errcode)
1902 {
1903 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1904 noblock, errcode);
1905 if (skb) {
1906 skb->protocol = htons(ETH_P_DNA_RT);
1907 skb->pkt_type = PACKET_OUTGOING;
1908 }
1909 return skb;
1910 }
1911
1912 static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1913 {
1914 struct sock *sk = sock->sk;
1915 struct dn_scp *scp = DN_SK(sk);
1916 size_t mss;
1917 struct sk_buff_head *queue = &scp->data_xmit_queue;
1918 int flags = msg->msg_flags;
1919 int err = 0;
1920 size_t sent = 0;
1921 int addr_len = msg->msg_namelen;
1922 DECLARE_SOCKADDR(struct sockaddr_dn *, addr, msg->msg_name);
1923 struct sk_buff *skb = NULL;
1924 struct dn_skb_cb *cb;
1925 size_t len;
1926 unsigned char fctype;
1927 long timeo;
1928
1929 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1930 return -EOPNOTSUPP;
1931
1932 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1933 return -EINVAL;
1934
1935 lock_sock(sk);
1936 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1937 /*
1938 * The only difference between stream sockets and sequenced packet
1939 * sockets is that the stream sockets always behave as if MSG_EOR
1940 * has been set.
1941 */
1942 if (sock->type == SOCK_STREAM) {
1943 if (flags & MSG_EOR) {
1944 err = -EINVAL;
1945 goto out;
1946 }
1947 flags |= MSG_EOR;
1948 }
1949
1950
1951 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1952 if (err)
1953 goto out_err;
1954
1955 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1956 err = -EPIPE;
1957 if (!(flags & MSG_NOSIGNAL))
1958 send_sig(SIGPIPE, current, 0);
1959 goto out_err;
1960 }
1961
1962 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1963 dst_negative_advice(sk);
1964
1965 mss = scp->segsize_rem;
1966 fctype = scp->services_rem & NSP_FC_MASK;
1967
1968 mss = dn_current_mss(sk, flags);
1969
1970 if (flags & MSG_OOB) {
1971 queue = &scp->other_xmit_queue;
1972 if (size > mss) {
1973 err = -EMSGSIZE;
1974 goto out;
1975 }
1976 }
1977
1978 scp->persist_fxn = dn_nsp_xmit_timeout;
1979
1980 while(sent < size) {
1981 err = sock_error(sk);
1982 if (err)
1983 goto out;
1984
1985 if (signal_pending(current)) {
1986 err = sock_intr_errno(timeo);
1987 goto out;
1988 }
1989
1990 /*
1991 * Calculate size that we wish to send.
1992 */
1993 len = size - sent;
1994
1995 if (len > mss)
1996 len = mss;
1997
1998 /*
1999 * Wait for queue size to go down below the window
2000 * size.
2001 */
2002 if (dn_queue_too_long(scp, queue, flags)) {
2003 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2004
2005 if (flags & MSG_DONTWAIT) {
2006 err = -EWOULDBLOCK;
2007 goto out;
2008 }
2009
2010 add_wait_queue(sk_sleep(sk), &wait);
2011 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2012 sk_wait_event(sk, &timeo,
2013 !dn_queue_too_long(scp, queue, flags), &wait);
2014 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2015 remove_wait_queue(sk_sleep(sk), &wait);
2016 continue;
2017 }
2018
2019 /*
2020 * Get a suitably sized skb.
2021 * 64 is a bit of a hack really, but its larger than any
2022 * link-layer headers and has served us well as a good
2023 * guess as to their real length.
2024 */
2025 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2026 flags & MSG_DONTWAIT, &err);
2027
2028 if (err)
2029 break;
2030
2031 if (!skb)
2032 continue;
2033
2034 cb = DN_SKB_CB(skb);
2035
2036 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2037
2038 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2039 err = -EFAULT;
2040 goto out;
2041 }
2042
2043 if (flags & MSG_OOB) {
2044 cb->nsp_flags = 0x30;
2045 if (fctype != NSP_FC_NONE)
2046 scp->flowrem_oth--;
2047 } else {
2048 cb->nsp_flags = 0x00;
2049 if (scp->seg_total == 0)
2050 cb->nsp_flags |= 0x20;
2051
2052 scp->seg_total += len;
2053
2054 if (((sent + len) == size) && (flags & MSG_EOR)) {
2055 cb->nsp_flags |= 0x40;
2056 scp->seg_total = 0;
2057 if (fctype == NSP_FC_SCMC)
2058 scp->flowrem_dat--;
2059 }
2060 if (fctype == NSP_FC_SRC)
2061 scp->flowrem_dat--;
2062 }
2063
2064 sent += len;
2065 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2066 skb = NULL;
2067
2068 scp->persist = dn_nsp_persist(sk);
2069
2070 }
2071 out:
2072
2073 kfree_skb(skb);
2074
2075 release_sock(sk);
2076
2077 return sent ? sent : err;
2078
2079 out_err:
2080 err = sk_stream_error(sk, flags, err);
2081 release_sock(sk);
2082 return err;
2083 }
2084
2085 static int dn_device_event(struct notifier_block *this, unsigned long event,
2086 void *ptr)
2087 {
2088 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2089
2090 if (!net_eq(dev_net(dev), &init_net))
2091 return NOTIFY_DONE;
2092
2093 switch (event) {
2094 case NETDEV_UP:
2095 dn_dev_up(dev);
2096 break;
2097 case NETDEV_DOWN:
2098 dn_dev_down(dev);
2099 break;
2100 default:
2101 break;
2102 }
2103
2104 return NOTIFY_DONE;
2105 }
2106
2107 static struct notifier_block dn_dev_notifier = {
2108 .notifier_call = dn_device_event,
2109 };
2110
2111 static struct packet_type dn_dix_packet_type __read_mostly = {
2112 .type = cpu_to_be16(ETH_P_DNA_RT),
2113 .func = dn_route_rcv,
2114 };
2115
2116 #ifdef CONFIG_PROC_FS
2117 struct dn_iter_state {
2118 int bucket;
2119 };
2120
2121 static struct sock *dn_socket_get_first(struct seq_file *seq)
2122 {
2123 struct dn_iter_state *state = seq->private;
2124 struct sock *n = NULL;
2125
2126 for(state->bucket = 0;
2127 state->bucket < DN_SK_HASH_SIZE;
2128 ++state->bucket) {
2129 n = sk_head(&dn_sk_hash[state->bucket]);
2130 if (n)
2131 break;
2132 }
2133
2134 return n;
2135 }
2136
2137 static struct sock *dn_socket_get_next(struct seq_file *seq,
2138 struct sock *n)
2139 {
2140 struct dn_iter_state *state = seq->private;
2141
2142 n = sk_next(n);
2143 try_again:
2144 if (n)
2145 goto out;
2146 if (++state->bucket >= DN_SK_HASH_SIZE)
2147 goto out;
2148 n = sk_head(&dn_sk_hash[state->bucket]);
2149 goto try_again;
2150 out:
2151 return n;
2152 }
2153
2154 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2155 {
2156 struct sock *sk = dn_socket_get_first(seq);
2157
2158 if (sk) {
2159 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2160 --*pos;
2161 }
2162 return *pos ? NULL : sk;
2163 }
2164
2165 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2166 {
2167 void *rc;
2168 read_lock_bh(&dn_hash_lock);
2169 rc = socket_get_idx(seq, &pos);
2170 if (!rc) {
2171 read_unlock_bh(&dn_hash_lock);
2172 }
2173 return rc;
2174 }
2175
2176 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2177 {
2178 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2179 }
2180
2181 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2182 {
2183 void *rc;
2184
2185 if (v == SEQ_START_TOKEN) {
2186 rc = dn_socket_get_idx(seq, 0);
2187 goto out;
2188 }
2189
2190 rc = dn_socket_get_next(seq, v);
2191 if (rc)
2192 goto out;
2193 read_unlock_bh(&dn_hash_lock);
2194 out:
2195 ++*pos;
2196 return rc;
2197 }
2198
2199 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2200 {
2201 if (v && v != SEQ_START_TOKEN)
2202 read_unlock_bh(&dn_hash_lock);
2203 }
2204
2205 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2206
2207 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2208 {
2209 int i;
2210
2211 switch (le16_to_cpu(dn->sdn_objnamel)) {
2212 case 0:
2213 sprintf(buf, "%d", dn->sdn_objnum);
2214 break;
2215 default:
2216 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2217 buf[i] = dn->sdn_objname[i];
2218 if (IS_NOT_PRINTABLE(buf[i]))
2219 buf[i] = '.';
2220 }
2221 buf[i] = 0;
2222 }
2223 }
2224
2225 static char *dn_state2asc(unsigned char state)
2226 {
2227 switch (state) {
2228 case DN_O:
2229 return "OPEN";
2230 case DN_CR:
2231 return " CR";
2232 case DN_DR:
2233 return " DR";
2234 case DN_DRC:
2235 return " DRC";
2236 case DN_CC:
2237 return " CC";
2238 case DN_CI:
2239 return " CI";
2240 case DN_NR:
2241 return " NR";
2242 case DN_NC:
2243 return " NC";
2244 case DN_CD:
2245 return " CD";
2246 case DN_RJ:
2247 return " RJ";
2248 case DN_RUN:
2249 return " RUN";
2250 case DN_DI:
2251 return " DI";
2252 case DN_DIC:
2253 return " DIC";
2254 case DN_DN:
2255 return " DN";
2256 case DN_CL:
2257 return " CL";
2258 case DN_CN:
2259 return " CN";
2260 }
2261
2262 return "????";
2263 }
2264
2265 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2266 {
2267 struct dn_scp *scp = DN_SK(sk);
2268 char buf1[DN_ASCBUF_LEN];
2269 char buf2[DN_ASCBUF_LEN];
2270 char local_object[DN_MAXOBJL+3];
2271 char remote_object[DN_MAXOBJL+3];
2272
2273 dn_printable_object(&scp->addr, local_object);
2274 dn_printable_object(&scp->peer, remote_object);
2275
2276 seq_printf(seq,
2277 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2278 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2279 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2280 scp->addrloc,
2281 scp->numdat,
2282 scp->numoth,
2283 scp->ackxmt_dat,
2284 scp->ackxmt_oth,
2285 scp->flowloc_sw,
2286 local_object,
2287 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2288 scp->addrrem,
2289 scp->numdat_rcv,
2290 scp->numoth_rcv,
2291 scp->ackrcv_dat,
2292 scp->ackrcv_oth,
2293 scp->flowrem_sw,
2294 remote_object,
2295 dn_state2asc(scp->state),
2296 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2297 }
2298
2299 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2300 {
2301 if (v == SEQ_START_TOKEN) {
2302 seq_puts(seq, "Local Remote\n");
2303 } else {
2304 dn_socket_format_entry(seq, v);
2305 }
2306 return 0;
2307 }
2308
2309 static const struct seq_operations dn_socket_seq_ops = {
2310 .start = dn_socket_seq_start,
2311 .next = dn_socket_seq_next,
2312 .stop = dn_socket_seq_stop,
2313 .show = dn_socket_seq_show,
2314 };
2315
2316 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2317 {
2318 return seq_open_private(file, &dn_socket_seq_ops,
2319 sizeof(struct dn_iter_state));
2320 }
2321
2322 static const struct file_operations dn_socket_seq_fops = {
2323 .owner = THIS_MODULE,
2324 .open = dn_socket_seq_open,
2325 .read = seq_read,
2326 .llseek = seq_lseek,
2327 .release = seq_release_private,
2328 };
2329 #endif
2330
2331 static const struct net_proto_family dn_family_ops = {
2332 .family = AF_DECnet,
2333 .create = dn_create,
2334 .owner = THIS_MODULE,
2335 };
2336
2337 static const struct proto_ops dn_proto_ops = {
2338 .family = AF_DECnet,
2339 .owner = THIS_MODULE,
2340 .release = dn_release,
2341 .bind = dn_bind,
2342 .connect = dn_connect,
2343 .socketpair = sock_no_socketpair,
2344 .accept = dn_accept,
2345 .getname = dn_getname,
2346 .poll = dn_poll,
2347 .ioctl = dn_ioctl,
2348 .listen = dn_listen,
2349 .shutdown = dn_shutdown,
2350 .setsockopt = dn_setsockopt,
2351 .getsockopt = dn_getsockopt,
2352 .sendmsg = dn_sendmsg,
2353 .recvmsg = dn_recvmsg,
2354 .mmap = sock_no_mmap,
2355 .sendpage = sock_no_sendpage,
2356 };
2357
2358 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2359 MODULE_AUTHOR("Linux DECnet Project Team");
2360 MODULE_LICENSE("GPL");
2361 MODULE_ALIAS_NETPROTO(PF_DECnet);
2362
2363 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2364
2365 static int __init decnet_init(void)
2366 {
2367 int rc;
2368
2369 printk(banner);
2370
2371 rc = proto_register(&dn_proto, 1);
2372 if (rc != 0)
2373 goto out;
2374
2375 dn_neigh_init();
2376 dn_dev_init();
2377 dn_route_init();
2378 dn_fib_init();
2379
2380 sock_register(&dn_family_ops);
2381 dev_add_pack(&dn_dix_packet_type);
2382 register_netdevice_notifier(&dn_dev_notifier);
2383
2384 proc_create("decnet", S_IRUGO, init_net.proc_net, &dn_socket_seq_fops);
2385 dn_register_sysctl();
2386 out:
2387 return rc;
2388
2389 }
2390 module_init(decnet_init);
2391
2392 /*
2393 * Prevent DECnet module unloading until its fixed properly.
2394 * Requires an audit of the code to check for memory leaks and
2395 * initialisation problems etc.
2396 */
2397 #if 0
2398 static void __exit decnet_exit(void)
2399 {
2400 sock_unregister(AF_DECnet);
2401 rtnl_unregister_all(PF_DECnet);
2402 dev_remove_pack(&dn_dix_packet_type);
2403
2404 dn_unregister_sysctl();
2405
2406 unregister_netdevice_notifier(&dn_dev_notifier);
2407
2408 dn_route_cleanup();
2409 dn_dev_cleanup();
2410 dn_neigh_cleanup();
2411 dn_fib_cleanup();
2412
2413 remove_proc_entry("decnet", init_net.proc_net);
2414
2415 proto_unregister(&dn_proto);
2416
2417 rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */
2418 }
2419 module_exit(decnet_exit);
2420 #endif