]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/sctp/socket.c
Merge tag 'powerpc-4.5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-zesty-kernel.git] / net / sctp / socket.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
13 *
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
17 *
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
33 *
34 * Please send any bug reports or fixes you make to the
35 * email address(es):
36 * lksctp developers <linux-sctp@vger.kernel.org>
37 *
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
51 */
52
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55 #include <linux/types.h>
56 #include <linux/kernel.h>
57 #include <linux/wait.h>
58 #include <linux/time.h>
59 #include <linux/ip.h>
60 #include <linux/capability.h>
61 #include <linux/fcntl.h>
62 #include <linux/poll.h>
63 #include <linux/init.h>
64 #include <linux/crypto.h>
65 #include <linux/slab.h>
66 #include <linux/file.h>
67 #include <linux/compat.h>
68
69 #include <net/ip.h>
70 #include <net/icmp.h>
71 #include <net/route.h>
72 #include <net/ipv6.h>
73 #include <net/inet_common.h>
74 #include <net/busy_poll.h>
75
76 #include <linux/socket.h> /* for sa_family_t */
77 #include <linux/export.h>
78 #include <net/sock.h>
79 #include <net/sctp/sctp.h>
80 #include <net/sctp/sm.h>
81
82 /* Forward declarations for internal helper functions. */
83 static int sctp_writeable(struct sock *sk);
84 static void sctp_wfree(struct sk_buff *skb);
85 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
86 size_t msg_len);
87 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
88 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
89 static int sctp_wait_for_accept(struct sock *sk, long timeo);
90 static void sctp_wait_for_close(struct sock *sk, long timeo);
91 static void sctp_destruct_sock(struct sock *sk);
92 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
93 union sctp_addr *addr, int len);
94 static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
95 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
96 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
97 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
98 static int sctp_send_asconf(struct sctp_association *asoc,
99 struct sctp_chunk *chunk);
100 static int sctp_do_bind(struct sock *, union sctp_addr *, int);
101 static int sctp_autobind(struct sock *sk);
102 static void sctp_sock_migrate(struct sock *, struct sock *,
103 struct sctp_association *, sctp_socket_type_t);
104
105 static int sctp_memory_pressure;
106 static atomic_long_t sctp_memory_allocated;
107 struct percpu_counter sctp_sockets_allocated;
108
109 static void sctp_enter_memory_pressure(struct sock *sk)
110 {
111 sctp_memory_pressure = 1;
112 }
113
114
115 /* Get the sndbuf space available at the time on the association. */
116 static inline int sctp_wspace(struct sctp_association *asoc)
117 {
118 int amt;
119
120 if (asoc->ep->sndbuf_policy)
121 amt = asoc->sndbuf_used;
122 else
123 amt = sk_wmem_alloc_get(asoc->base.sk);
124
125 if (amt >= asoc->base.sk->sk_sndbuf) {
126 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
127 amt = 0;
128 else {
129 amt = sk_stream_wspace(asoc->base.sk);
130 if (amt < 0)
131 amt = 0;
132 }
133 } else {
134 amt = asoc->base.sk->sk_sndbuf - amt;
135 }
136 return amt;
137 }
138
139 /* Increment the used sndbuf space count of the corresponding association by
140 * the size of the outgoing data chunk.
141 * Also, set the skb destructor for sndbuf accounting later.
142 *
143 * Since it is always 1-1 between chunk and skb, and also a new skb is always
144 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
145 * destructor in the data chunk skb for the purpose of the sndbuf space
146 * tracking.
147 */
148 static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
149 {
150 struct sctp_association *asoc = chunk->asoc;
151 struct sock *sk = asoc->base.sk;
152
153 /* The sndbuf space is tracked per association. */
154 sctp_association_hold(asoc);
155
156 skb_set_owner_w(chunk->skb, sk);
157
158 chunk->skb->destructor = sctp_wfree;
159 /* Save the chunk pointer in skb for sctp_wfree to use later. */
160 skb_shinfo(chunk->skb)->destructor_arg = chunk;
161
162 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
163 sizeof(struct sk_buff) +
164 sizeof(struct sctp_chunk);
165
166 atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
167 sk->sk_wmem_queued += chunk->skb->truesize;
168 sk_mem_charge(sk, chunk->skb->truesize);
169 }
170
171 /* Verify that this is a valid address. */
172 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
173 int len)
174 {
175 struct sctp_af *af;
176
177 /* Verify basic sockaddr. */
178 af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
179 if (!af)
180 return -EINVAL;
181
182 /* Is this a valid SCTP address? */
183 if (!af->addr_valid(addr, sctp_sk(sk), NULL))
184 return -EINVAL;
185
186 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
187 return -EINVAL;
188
189 return 0;
190 }
191
192 /* Look up the association by its id. If this is not a UDP-style
193 * socket, the ID field is always ignored.
194 */
195 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
196 {
197 struct sctp_association *asoc = NULL;
198
199 /* If this is not a UDP-style socket, assoc id should be ignored. */
200 if (!sctp_style(sk, UDP)) {
201 /* Return NULL if the socket state is not ESTABLISHED. It
202 * could be a TCP-style listening socket or a socket which
203 * hasn't yet called connect() to establish an association.
204 */
205 if (!sctp_sstate(sk, ESTABLISHED))
206 return NULL;
207
208 /* Get the first and the only association from the list. */
209 if (!list_empty(&sctp_sk(sk)->ep->asocs))
210 asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
211 struct sctp_association, asocs);
212 return asoc;
213 }
214
215 /* Otherwise this is a UDP-style socket. */
216 if (!id || (id == (sctp_assoc_t)-1))
217 return NULL;
218
219 spin_lock_bh(&sctp_assocs_id_lock);
220 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
221 spin_unlock_bh(&sctp_assocs_id_lock);
222
223 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
224 return NULL;
225
226 return asoc;
227 }
228
229 /* Look up the transport from an address and an assoc id. If both address and
230 * id are specified, the associations matching the address and the id should be
231 * the same.
232 */
233 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
234 struct sockaddr_storage *addr,
235 sctp_assoc_t id)
236 {
237 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
238 struct sctp_transport *transport;
239 union sctp_addr *laddr = (union sctp_addr *)addr;
240
241 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
242 laddr,
243 &transport);
244
245 if (!addr_asoc)
246 return NULL;
247
248 id_asoc = sctp_id2assoc(sk, id);
249 if (id_asoc && (id_asoc != addr_asoc))
250 return NULL;
251
252 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
253 (union sctp_addr *)addr);
254
255 return transport;
256 }
257
258 /* API 3.1.2 bind() - UDP Style Syntax
259 * The syntax of bind() is,
260 *
261 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
262 *
263 * sd - the socket descriptor returned by socket().
264 * addr - the address structure (struct sockaddr_in or struct
265 * sockaddr_in6 [RFC 2553]),
266 * addr_len - the size of the address structure.
267 */
268 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
269 {
270 int retval = 0;
271
272 lock_sock(sk);
273
274 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
275 addr, addr_len);
276
277 /* Disallow binding twice. */
278 if (!sctp_sk(sk)->ep->base.bind_addr.port)
279 retval = sctp_do_bind(sk, (union sctp_addr *)addr,
280 addr_len);
281 else
282 retval = -EINVAL;
283
284 release_sock(sk);
285
286 return retval;
287 }
288
289 static long sctp_get_port_local(struct sock *, union sctp_addr *);
290
291 /* Verify this is a valid sockaddr. */
292 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
293 union sctp_addr *addr, int len)
294 {
295 struct sctp_af *af;
296
297 /* Check minimum size. */
298 if (len < sizeof (struct sockaddr))
299 return NULL;
300
301 /* V4 mapped address are really of AF_INET family */
302 if (addr->sa.sa_family == AF_INET6 &&
303 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
304 if (!opt->pf->af_supported(AF_INET, opt))
305 return NULL;
306 } else {
307 /* Does this PF support this AF? */
308 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
309 return NULL;
310 }
311
312 /* If we get this far, af is valid. */
313 af = sctp_get_af_specific(addr->sa.sa_family);
314
315 if (len < af->sockaddr_len)
316 return NULL;
317
318 return af;
319 }
320
321 /* Bind a local address either to an endpoint or to an association. */
322 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
323 {
324 struct net *net = sock_net(sk);
325 struct sctp_sock *sp = sctp_sk(sk);
326 struct sctp_endpoint *ep = sp->ep;
327 struct sctp_bind_addr *bp = &ep->base.bind_addr;
328 struct sctp_af *af;
329 unsigned short snum;
330 int ret = 0;
331
332 /* Common sockaddr verification. */
333 af = sctp_sockaddr_af(sp, addr, len);
334 if (!af) {
335 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
336 __func__, sk, addr, len);
337 return -EINVAL;
338 }
339
340 snum = ntohs(addr->v4.sin_port);
341
342 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
343 __func__, sk, &addr->sa, bp->port, snum, len);
344
345 /* PF specific bind() address verification. */
346 if (!sp->pf->bind_verify(sp, addr))
347 return -EADDRNOTAVAIL;
348
349 /* We must either be unbound, or bind to the same port.
350 * It's OK to allow 0 ports if we are already bound.
351 * We'll just inhert an already bound port in this case
352 */
353 if (bp->port) {
354 if (!snum)
355 snum = bp->port;
356 else if (snum != bp->port) {
357 pr_debug("%s: new port %d doesn't match existing port "
358 "%d\n", __func__, snum, bp->port);
359 return -EINVAL;
360 }
361 }
362
363 if (snum && snum < PROT_SOCK &&
364 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
365 return -EACCES;
366
367 /* See if the address matches any of the addresses we may have
368 * already bound before checking against other endpoints.
369 */
370 if (sctp_bind_addr_match(bp, addr, sp))
371 return -EINVAL;
372
373 /* Make sure we are allowed to bind here.
374 * The function sctp_get_port_local() does duplicate address
375 * detection.
376 */
377 addr->v4.sin_port = htons(snum);
378 if ((ret = sctp_get_port_local(sk, addr))) {
379 return -EADDRINUSE;
380 }
381
382 /* Refresh ephemeral port. */
383 if (!bp->port)
384 bp->port = inet_sk(sk)->inet_num;
385
386 /* Add the address to the bind address list.
387 * Use GFP_ATOMIC since BHs will be disabled.
388 */
389 ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC);
390
391 /* Copy back into socket for getsockname() use. */
392 if (!ret) {
393 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
394 sp->pf->to_sk_saddr(addr, sk);
395 }
396
397 return ret;
398 }
399
400 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
401 *
402 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
403 * at any one time. If a sender, after sending an ASCONF chunk, decides
404 * it needs to transfer another ASCONF Chunk, it MUST wait until the
405 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
406 * subsequent ASCONF. Note this restriction binds each side, so at any
407 * time two ASCONF may be in-transit on any given association (one sent
408 * from each endpoint).
409 */
410 static int sctp_send_asconf(struct sctp_association *asoc,
411 struct sctp_chunk *chunk)
412 {
413 struct net *net = sock_net(asoc->base.sk);
414 int retval = 0;
415
416 /* If there is an outstanding ASCONF chunk, queue it for later
417 * transmission.
418 */
419 if (asoc->addip_last_asconf) {
420 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
421 goto out;
422 }
423
424 /* Hold the chunk until an ASCONF_ACK is received. */
425 sctp_chunk_hold(chunk);
426 retval = sctp_primitive_ASCONF(net, asoc, chunk);
427 if (retval)
428 sctp_chunk_free(chunk);
429 else
430 asoc->addip_last_asconf = chunk;
431
432 out:
433 return retval;
434 }
435
436 /* Add a list of addresses as bind addresses to local endpoint or
437 * association.
438 *
439 * Basically run through each address specified in the addrs/addrcnt
440 * array/length pair, determine if it is IPv6 or IPv4 and call
441 * sctp_do_bind() on it.
442 *
443 * If any of them fails, then the operation will be reversed and the
444 * ones that were added will be removed.
445 *
446 * Only sctp_setsockopt_bindx() is supposed to call this function.
447 */
448 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
449 {
450 int cnt;
451 int retval = 0;
452 void *addr_buf;
453 struct sockaddr *sa_addr;
454 struct sctp_af *af;
455
456 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
457 addrs, addrcnt);
458
459 addr_buf = addrs;
460 for (cnt = 0; cnt < addrcnt; cnt++) {
461 /* The list may contain either IPv4 or IPv6 address;
462 * determine the address length for walking thru the list.
463 */
464 sa_addr = addr_buf;
465 af = sctp_get_af_specific(sa_addr->sa_family);
466 if (!af) {
467 retval = -EINVAL;
468 goto err_bindx_add;
469 }
470
471 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
472 af->sockaddr_len);
473
474 addr_buf += af->sockaddr_len;
475
476 err_bindx_add:
477 if (retval < 0) {
478 /* Failed. Cleanup the ones that have been added */
479 if (cnt > 0)
480 sctp_bindx_rem(sk, addrs, cnt);
481 return retval;
482 }
483 }
484
485 return retval;
486 }
487
488 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
489 * associations that are part of the endpoint indicating that a list of local
490 * addresses are added to the endpoint.
491 *
492 * If any of the addresses is already in the bind address list of the
493 * association, we do not send the chunk for that association. But it will not
494 * affect other associations.
495 *
496 * Only sctp_setsockopt_bindx() is supposed to call this function.
497 */
498 static int sctp_send_asconf_add_ip(struct sock *sk,
499 struct sockaddr *addrs,
500 int addrcnt)
501 {
502 struct net *net = sock_net(sk);
503 struct sctp_sock *sp;
504 struct sctp_endpoint *ep;
505 struct sctp_association *asoc;
506 struct sctp_bind_addr *bp;
507 struct sctp_chunk *chunk;
508 struct sctp_sockaddr_entry *laddr;
509 union sctp_addr *addr;
510 union sctp_addr saveaddr;
511 void *addr_buf;
512 struct sctp_af *af;
513 struct list_head *p;
514 int i;
515 int retval = 0;
516
517 if (!net->sctp.addip_enable)
518 return retval;
519
520 sp = sctp_sk(sk);
521 ep = sp->ep;
522
523 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
524 __func__, sk, addrs, addrcnt);
525
526 list_for_each_entry(asoc, &ep->asocs, asocs) {
527 if (!asoc->peer.asconf_capable)
528 continue;
529
530 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
531 continue;
532
533 if (!sctp_state(asoc, ESTABLISHED))
534 continue;
535
536 /* Check if any address in the packed array of addresses is
537 * in the bind address list of the association. If so,
538 * do not send the asconf chunk to its peer, but continue with
539 * other associations.
540 */
541 addr_buf = addrs;
542 for (i = 0; i < addrcnt; i++) {
543 addr = addr_buf;
544 af = sctp_get_af_specific(addr->v4.sin_family);
545 if (!af) {
546 retval = -EINVAL;
547 goto out;
548 }
549
550 if (sctp_assoc_lookup_laddr(asoc, addr))
551 break;
552
553 addr_buf += af->sockaddr_len;
554 }
555 if (i < addrcnt)
556 continue;
557
558 /* Use the first valid address in bind addr list of
559 * association as Address Parameter of ASCONF CHUNK.
560 */
561 bp = &asoc->base.bind_addr;
562 p = bp->address_list.next;
563 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
564 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
565 addrcnt, SCTP_PARAM_ADD_IP);
566 if (!chunk) {
567 retval = -ENOMEM;
568 goto out;
569 }
570
571 /* Add the new addresses to the bind address list with
572 * use_as_src set to 0.
573 */
574 addr_buf = addrs;
575 for (i = 0; i < addrcnt; i++) {
576 addr = addr_buf;
577 af = sctp_get_af_specific(addr->v4.sin_family);
578 memcpy(&saveaddr, addr, af->sockaddr_len);
579 retval = sctp_add_bind_addr(bp, &saveaddr,
580 SCTP_ADDR_NEW, GFP_ATOMIC);
581 addr_buf += af->sockaddr_len;
582 }
583 if (asoc->src_out_of_asoc_ok) {
584 struct sctp_transport *trans;
585
586 list_for_each_entry(trans,
587 &asoc->peer.transport_addr_list, transports) {
588 /* Clear the source and route cache */
589 dst_release(trans->dst);
590 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
591 2*asoc->pathmtu, 4380));
592 trans->ssthresh = asoc->peer.i.a_rwnd;
593 trans->rto = asoc->rto_initial;
594 sctp_max_rto(asoc, trans);
595 trans->rtt = trans->srtt = trans->rttvar = 0;
596 sctp_transport_route(trans, NULL,
597 sctp_sk(asoc->base.sk));
598 }
599 }
600 retval = sctp_send_asconf(asoc, chunk);
601 }
602
603 out:
604 return retval;
605 }
606
607 /* Remove a list of addresses from bind addresses list. Do not remove the
608 * last address.
609 *
610 * Basically run through each address specified in the addrs/addrcnt
611 * array/length pair, determine if it is IPv6 or IPv4 and call
612 * sctp_del_bind() on it.
613 *
614 * If any of them fails, then the operation will be reversed and the
615 * ones that were removed will be added back.
616 *
617 * At least one address has to be left; if only one address is
618 * available, the operation will return -EBUSY.
619 *
620 * Only sctp_setsockopt_bindx() is supposed to call this function.
621 */
622 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
623 {
624 struct sctp_sock *sp = sctp_sk(sk);
625 struct sctp_endpoint *ep = sp->ep;
626 int cnt;
627 struct sctp_bind_addr *bp = &ep->base.bind_addr;
628 int retval = 0;
629 void *addr_buf;
630 union sctp_addr *sa_addr;
631 struct sctp_af *af;
632
633 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
634 __func__, sk, addrs, addrcnt);
635
636 addr_buf = addrs;
637 for (cnt = 0; cnt < addrcnt; cnt++) {
638 /* If the bind address list is empty or if there is only one
639 * bind address, there is nothing more to be removed (we need
640 * at least one address here).
641 */
642 if (list_empty(&bp->address_list) ||
643 (sctp_list_single_entry(&bp->address_list))) {
644 retval = -EBUSY;
645 goto err_bindx_rem;
646 }
647
648 sa_addr = addr_buf;
649 af = sctp_get_af_specific(sa_addr->sa.sa_family);
650 if (!af) {
651 retval = -EINVAL;
652 goto err_bindx_rem;
653 }
654
655 if (!af->addr_valid(sa_addr, sp, NULL)) {
656 retval = -EADDRNOTAVAIL;
657 goto err_bindx_rem;
658 }
659
660 if (sa_addr->v4.sin_port &&
661 sa_addr->v4.sin_port != htons(bp->port)) {
662 retval = -EINVAL;
663 goto err_bindx_rem;
664 }
665
666 if (!sa_addr->v4.sin_port)
667 sa_addr->v4.sin_port = htons(bp->port);
668
669 /* FIXME - There is probably a need to check if sk->sk_saddr and
670 * sk->sk_rcv_addr are currently set to one of the addresses to
671 * be removed. This is something which needs to be looked into
672 * when we are fixing the outstanding issues with multi-homing
673 * socket routing and failover schemes. Refer to comments in
674 * sctp_do_bind(). -daisy
675 */
676 retval = sctp_del_bind_addr(bp, sa_addr);
677
678 addr_buf += af->sockaddr_len;
679 err_bindx_rem:
680 if (retval < 0) {
681 /* Failed. Add the ones that has been removed back */
682 if (cnt > 0)
683 sctp_bindx_add(sk, addrs, cnt);
684 return retval;
685 }
686 }
687
688 return retval;
689 }
690
691 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
692 * the associations that are part of the endpoint indicating that a list of
693 * local addresses are removed from the endpoint.
694 *
695 * If any of the addresses is already in the bind address list of the
696 * association, we do not send the chunk for that association. But it will not
697 * affect other associations.
698 *
699 * Only sctp_setsockopt_bindx() is supposed to call this function.
700 */
701 static int sctp_send_asconf_del_ip(struct sock *sk,
702 struct sockaddr *addrs,
703 int addrcnt)
704 {
705 struct net *net = sock_net(sk);
706 struct sctp_sock *sp;
707 struct sctp_endpoint *ep;
708 struct sctp_association *asoc;
709 struct sctp_transport *transport;
710 struct sctp_bind_addr *bp;
711 struct sctp_chunk *chunk;
712 union sctp_addr *laddr;
713 void *addr_buf;
714 struct sctp_af *af;
715 struct sctp_sockaddr_entry *saddr;
716 int i;
717 int retval = 0;
718 int stored = 0;
719
720 chunk = NULL;
721 if (!net->sctp.addip_enable)
722 return retval;
723
724 sp = sctp_sk(sk);
725 ep = sp->ep;
726
727 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
728 __func__, sk, addrs, addrcnt);
729
730 list_for_each_entry(asoc, &ep->asocs, asocs) {
731
732 if (!asoc->peer.asconf_capable)
733 continue;
734
735 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
736 continue;
737
738 if (!sctp_state(asoc, ESTABLISHED))
739 continue;
740
741 /* Check if any address in the packed array of addresses is
742 * not present in the bind address list of the association.
743 * If so, do not send the asconf chunk to its peer, but
744 * continue with other associations.
745 */
746 addr_buf = addrs;
747 for (i = 0; i < addrcnt; i++) {
748 laddr = addr_buf;
749 af = sctp_get_af_specific(laddr->v4.sin_family);
750 if (!af) {
751 retval = -EINVAL;
752 goto out;
753 }
754
755 if (!sctp_assoc_lookup_laddr(asoc, laddr))
756 break;
757
758 addr_buf += af->sockaddr_len;
759 }
760 if (i < addrcnt)
761 continue;
762
763 /* Find one address in the association's bind address list
764 * that is not in the packed array of addresses. This is to
765 * make sure that we do not delete all the addresses in the
766 * association.
767 */
768 bp = &asoc->base.bind_addr;
769 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
770 addrcnt, sp);
771 if ((laddr == NULL) && (addrcnt == 1)) {
772 if (asoc->asconf_addr_del_pending)
773 continue;
774 asoc->asconf_addr_del_pending =
775 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
776 if (asoc->asconf_addr_del_pending == NULL) {
777 retval = -ENOMEM;
778 goto out;
779 }
780 asoc->asconf_addr_del_pending->sa.sa_family =
781 addrs->sa_family;
782 asoc->asconf_addr_del_pending->v4.sin_port =
783 htons(bp->port);
784 if (addrs->sa_family == AF_INET) {
785 struct sockaddr_in *sin;
786
787 sin = (struct sockaddr_in *)addrs;
788 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
789 } else if (addrs->sa_family == AF_INET6) {
790 struct sockaddr_in6 *sin6;
791
792 sin6 = (struct sockaddr_in6 *)addrs;
793 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
794 }
795
796 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
797 __func__, asoc, &asoc->asconf_addr_del_pending->sa,
798 asoc->asconf_addr_del_pending);
799
800 asoc->src_out_of_asoc_ok = 1;
801 stored = 1;
802 goto skip_mkasconf;
803 }
804
805 if (laddr == NULL)
806 return -EINVAL;
807
808 /* We do not need RCU protection throughout this loop
809 * because this is done under a socket lock from the
810 * setsockopt call.
811 */
812 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
813 SCTP_PARAM_DEL_IP);
814 if (!chunk) {
815 retval = -ENOMEM;
816 goto out;
817 }
818
819 skip_mkasconf:
820 /* Reset use_as_src flag for the addresses in the bind address
821 * list that are to be deleted.
822 */
823 addr_buf = addrs;
824 for (i = 0; i < addrcnt; i++) {
825 laddr = addr_buf;
826 af = sctp_get_af_specific(laddr->v4.sin_family);
827 list_for_each_entry(saddr, &bp->address_list, list) {
828 if (sctp_cmp_addr_exact(&saddr->a, laddr))
829 saddr->state = SCTP_ADDR_DEL;
830 }
831 addr_buf += af->sockaddr_len;
832 }
833
834 /* Update the route and saddr entries for all the transports
835 * as some of the addresses in the bind address list are
836 * about to be deleted and cannot be used as source addresses.
837 */
838 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
839 transports) {
840 dst_release(transport->dst);
841 sctp_transport_route(transport, NULL,
842 sctp_sk(asoc->base.sk));
843 }
844
845 if (stored)
846 /* We don't need to transmit ASCONF */
847 continue;
848 retval = sctp_send_asconf(asoc, chunk);
849 }
850 out:
851 return retval;
852 }
853
854 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
855 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
856 {
857 struct sock *sk = sctp_opt2sk(sp);
858 union sctp_addr *addr;
859 struct sctp_af *af;
860
861 /* It is safe to write port space in caller. */
862 addr = &addrw->a;
863 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
864 af = sctp_get_af_specific(addr->sa.sa_family);
865 if (!af)
866 return -EINVAL;
867 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
868 return -EINVAL;
869
870 if (addrw->state == SCTP_ADDR_NEW)
871 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
872 else
873 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
874 }
875
876 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
877 *
878 * API 8.1
879 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
880 * int flags);
881 *
882 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
883 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
884 * or IPv6 addresses.
885 *
886 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
887 * Section 3.1.2 for this usage.
888 *
889 * addrs is a pointer to an array of one or more socket addresses. Each
890 * address is contained in its appropriate structure (i.e. struct
891 * sockaddr_in or struct sockaddr_in6) the family of the address type
892 * must be used to distinguish the address length (note that this
893 * representation is termed a "packed array" of addresses). The caller
894 * specifies the number of addresses in the array with addrcnt.
895 *
896 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
897 * -1, and sets errno to the appropriate error code.
898 *
899 * For SCTP, the port given in each socket address must be the same, or
900 * sctp_bindx() will fail, setting errno to EINVAL.
901 *
902 * The flags parameter is formed from the bitwise OR of zero or more of
903 * the following currently defined flags:
904 *
905 * SCTP_BINDX_ADD_ADDR
906 *
907 * SCTP_BINDX_REM_ADDR
908 *
909 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
910 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
911 * addresses from the association. The two flags are mutually exclusive;
912 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
913 * not remove all addresses from an association; sctp_bindx() will
914 * reject such an attempt with EINVAL.
915 *
916 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
917 * additional addresses with an endpoint after calling bind(). Or use
918 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
919 * socket is associated with so that no new association accepted will be
920 * associated with those addresses. If the endpoint supports dynamic
921 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
922 * endpoint to send the appropriate message to the peer to change the
923 * peers address lists.
924 *
925 * Adding and removing addresses from a connected association is
926 * optional functionality. Implementations that do not support this
927 * functionality should return EOPNOTSUPP.
928 *
929 * Basically do nothing but copying the addresses from user to kernel
930 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
931 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
932 * from userspace.
933 *
934 * We don't use copy_from_user() for optimization: we first do the
935 * sanity checks (buffer size -fast- and access check-healthy
936 * pointer); if all of those succeed, then we can alloc the memory
937 * (expensive operation) needed to copy the data to kernel. Then we do
938 * the copying without checking the user space area
939 * (__copy_from_user()).
940 *
941 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
942 * it.
943 *
944 * sk The sk of the socket
945 * addrs The pointer to the addresses in user land
946 * addrssize Size of the addrs buffer
947 * op Operation to perform (add or remove, see the flags of
948 * sctp_bindx)
949 *
950 * Returns 0 if ok, <0 errno code on error.
951 */
952 static int sctp_setsockopt_bindx(struct sock *sk,
953 struct sockaddr __user *addrs,
954 int addrs_size, int op)
955 {
956 struct sockaddr *kaddrs;
957 int err;
958 int addrcnt = 0;
959 int walk_size = 0;
960 struct sockaddr *sa_addr;
961 void *addr_buf;
962 struct sctp_af *af;
963
964 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
965 __func__, sk, addrs, addrs_size, op);
966
967 if (unlikely(addrs_size <= 0))
968 return -EINVAL;
969
970 /* Check the user passed a healthy pointer. */
971 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
972 return -EFAULT;
973
974 /* Alloc space for the address array in kernel memory. */
975 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
976 if (unlikely(!kaddrs))
977 return -ENOMEM;
978
979 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
980 kfree(kaddrs);
981 return -EFAULT;
982 }
983
984 /* Walk through the addrs buffer and count the number of addresses. */
985 addr_buf = kaddrs;
986 while (walk_size < addrs_size) {
987 if (walk_size + sizeof(sa_family_t) > addrs_size) {
988 kfree(kaddrs);
989 return -EINVAL;
990 }
991
992 sa_addr = addr_buf;
993 af = sctp_get_af_specific(sa_addr->sa_family);
994
995 /* If the address family is not supported or if this address
996 * causes the address buffer to overflow return EINVAL.
997 */
998 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
999 kfree(kaddrs);
1000 return -EINVAL;
1001 }
1002 addrcnt++;
1003 addr_buf += af->sockaddr_len;
1004 walk_size += af->sockaddr_len;
1005 }
1006
1007 /* Do the work. */
1008 switch (op) {
1009 case SCTP_BINDX_ADD_ADDR:
1010 err = sctp_bindx_add(sk, kaddrs, addrcnt);
1011 if (err)
1012 goto out;
1013 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
1014 break;
1015
1016 case SCTP_BINDX_REM_ADDR:
1017 err = sctp_bindx_rem(sk, kaddrs, addrcnt);
1018 if (err)
1019 goto out;
1020 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
1021 break;
1022
1023 default:
1024 err = -EINVAL;
1025 break;
1026 }
1027
1028 out:
1029 kfree(kaddrs);
1030
1031 return err;
1032 }
1033
1034 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1035 *
1036 * Common routine for handling connect() and sctp_connectx().
1037 * Connect will come in with just a single address.
1038 */
1039 static int __sctp_connect(struct sock *sk,
1040 struct sockaddr *kaddrs,
1041 int addrs_size,
1042 sctp_assoc_t *assoc_id)
1043 {
1044 struct net *net = sock_net(sk);
1045 struct sctp_sock *sp;
1046 struct sctp_endpoint *ep;
1047 struct sctp_association *asoc = NULL;
1048 struct sctp_association *asoc2;
1049 struct sctp_transport *transport;
1050 union sctp_addr to;
1051 sctp_scope_t scope;
1052 long timeo;
1053 int err = 0;
1054 int addrcnt = 0;
1055 int walk_size = 0;
1056 union sctp_addr *sa_addr = NULL;
1057 void *addr_buf;
1058 unsigned short port;
1059 unsigned int f_flags = 0;
1060
1061 sp = sctp_sk(sk);
1062 ep = sp->ep;
1063
1064 /* connect() cannot be done on a socket that is already in ESTABLISHED
1065 * state - UDP-style peeled off socket or a TCP-style socket that
1066 * is already connected.
1067 * It cannot be done even on a TCP-style listening socket.
1068 */
1069 if (sctp_sstate(sk, ESTABLISHED) ||
1070 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
1071 err = -EISCONN;
1072 goto out_free;
1073 }
1074
1075 /* Walk through the addrs buffer and count the number of addresses. */
1076 addr_buf = kaddrs;
1077 while (walk_size < addrs_size) {
1078 struct sctp_af *af;
1079
1080 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1081 err = -EINVAL;
1082 goto out_free;
1083 }
1084
1085 sa_addr = addr_buf;
1086 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1087
1088 /* If the address family is not supported or if this address
1089 * causes the address buffer to overflow return EINVAL.
1090 */
1091 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1092 err = -EINVAL;
1093 goto out_free;
1094 }
1095
1096 port = ntohs(sa_addr->v4.sin_port);
1097
1098 /* Save current address so we can work with it */
1099 memcpy(&to, sa_addr, af->sockaddr_len);
1100
1101 err = sctp_verify_addr(sk, &to, af->sockaddr_len);
1102 if (err)
1103 goto out_free;
1104
1105 /* Make sure the destination port is correctly set
1106 * in all addresses.
1107 */
1108 if (asoc && asoc->peer.port && asoc->peer.port != port) {
1109 err = -EINVAL;
1110 goto out_free;
1111 }
1112
1113 /* Check if there already is a matching association on the
1114 * endpoint (other than the one created here).
1115 */
1116 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1117 if (asoc2 && asoc2 != asoc) {
1118 if (asoc2->state >= SCTP_STATE_ESTABLISHED)
1119 err = -EISCONN;
1120 else
1121 err = -EALREADY;
1122 goto out_free;
1123 }
1124
1125 /* If we could not find a matching association on the endpoint,
1126 * make sure that there is no peeled-off association matching
1127 * the peer address even on another socket.
1128 */
1129 if (sctp_endpoint_is_peeled_off(ep, &to)) {
1130 err = -EADDRNOTAVAIL;
1131 goto out_free;
1132 }
1133
1134 if (!asoc) {
1135 /* If a bind() or sctp_bindx() is not called prior to
1136 * an sctp_connectx() call, the system picks an
1137 * ephemeral port and will choose an address set
1138 * equivalent to binding with a wildcard address.
1139 */
1140 if (!ep->base.bind_addr.port) {
1141 if (sctp_autobind(sk)) {
1142 err = -EAGAIN;
1143 goto out_free;
1144 }
1145 } else {
1146 /*
1147 * If an unprivileged user inherits a 1-many
1148 * style socket with open associations on a
1149 * privileged port, it MAY be permitted to
1150 * accept new associations, but it SHOULD NOT
1151 * be permitted to open new associations.
1152 */
1153 if (ep->base.bind_addr.port < PROT_SOCK &&
1154 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1155 err = -EACCES;
1156 goto out_free;
1157 }
1158 }
1159
1160 scope = sctp_scope(&to);
1161 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1162 if (!asoc) {
1163 err = -ENOMEM;
1164 goto out_free;
1165 }
1166
1167 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1168 GFP_KERNEL);
1169 if (err < 0) {
1170 goto out_free;
1171 }
1172
1173 }
1174
1175 /* Prime the peer's transport structures. */
1176 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
1177 SCTP_UNKNOWN);
1178 if (!transport) {
1179 err = -ENOMEM;
1180 goto out_free;
1181 }
1182
1183 addrcnt++;
1184 addr_buf += af->sockaddr_len;
1185 walk_size += af->sockaddr_len;
1186 }
1187
1188 /* In case the user of sctp_connectx() wants an association
1189 * id back, assign one now.
1190 */
1191 if (assoc_id) {
1192 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1193 if (err < 0)
1194 goto out_free;
1195 }
1196
1197 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1198 if (err < 0) {
1199 goto out_free;
1200 }
1201
1202 /* Initialize sk's dport and daddr for getpeername() */
1203 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
1204 sp->pf->to_sk_daddr(sa_addr, sk);
1205 sk->sk_err = 0;
1206
1207 /* in-kernel sockets don't generally have a file allocated to them
1208 * if all they do is call sock_create_kern().
1209 */
1210 if (sk->sk_socket->file)
1211 f_flags = sk->sk_socket->file->f_flags;
1212
1213 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1214
1215 err = sctp_wait_for_connect(asoc, &timeo);
1216 if ((err == 0 || err == -EINPROGRESS) && assoc_id)
1217 *assoc_id = asoc->assoc_id;
1218
1219 /* Don't free association on exit. */
1220 asoc = NULL;
1221
1222 out_free:
1223 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1224 __func__, asoc, kaddrs, err);
1225
1226 if (asoc) {
1227 /* sctp_primitive_ASSOCIATE may have added this association
1228 * To the hash table, try to unhash it, just in case, its a noop
1229 * if it wasn't hashed so we're safe
1230 */
1231 sctp_association_free(asoc);
1232 }
1233 return err;
1234 }
1235
1236 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1237 *
1238 * API 8.9
1239 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1240 * sctp_assoc_t *asoc);
1241 *
1242 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1243 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1244 * or IPv6 addresses.
1245 *
1246 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1247 * Section 3.1.2 for this usage.
1248 *
1249 * addrs is a pointer to an array of one or more socket addresses. Each
1250 * address is contained in its appropriate structure (i.e. struct
1251 * sockaddr_in or struct sockaddr_in6) the family of the address type
1252 * must be used to distengish the address length (note that this
1253 * representation is termed a "packed array" of addresses). The caller
1254 * specifies the number of addresses in the array with addrcnt.
1255 *
1256 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1257 * the association id of the new association. On failure, sctp_connectx()
1258 * returns -1, and sets errno to the appropriate error code. The assoc_id
1259 * is not touched by the kernel.
1260 *
1261 * For SCTP, the port given in each socket address must be the same, or
1262 * sctp_connectx() will fail, setting errno to EINVAL.
1263 *
1264 * An application can use sctp_connectx to initiate an association with
1265 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1266 * allows a caller to specify multiple addresses at which a peer can be
1267 * reached. The way the SCTP stack uses the list of addresses to set up
1268 * the association is implementation dependent. This function only
1269 * specifies that the stack will try to make use of all the addresses in
1270 * the list when needed.
1271 *
1272 * Note that the list of addresses passed in is only used for setting up
1273 * the association. It does not necessarily equal the set of addresses
1274 * the peer uses for the resulting association. If the caller wants to
1275 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1276 * retrieve them after the association has been set up.
1277 *
1278 * Basically do nothing but copying the addresses from user to kernel
1279 * land and invoking either sctp_connectx(). This is used for tunneling
1280 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1281 *
1282 * We don't use copy_from_user() for optimization: we first do the
1283 * sanity checks (buffer size -fast- and access check-healthy
1284 * pointer); if all of those succeed, then we can alloc the memory
1285 * (expensive operation) needed to copy the data to kernel. Then we do
1286 * the copying without checking the user space area
1287 * (__copy_from_user()).
1288 *
1289 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1290 * it.
1291 *
1292 * sk The sk of the socket
1293 * addrs The pointer to the addresses in user land
1294 * addrssize Size of the addrs buffer
1295 *
1296 * Returns >=0 if ok, <0 errno code on error.
1297 */
1298 static int __sctp_setsockopt_connectx(struct sock *sk,
1299 struct sockaddr __user *addrs,
1300 int addrs_size,
1301 sctp_assoc_t *assoc_id)
1302 {
1303 struct sockaddr *kaddrs;
1304 gfp_t gfp = GFP_KERNEL;
1305 int err = 0;
1306
1307 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1308 __func__, sk, addrs, addrs_size);
1309
1310 if (unlikely(addrs_size <= 0))
1311 return -EINVAL;
1312
1313 /* Check the user passed a healthy pointer. */
1314 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
1315 return -EFAULT;
1316
1317 /* Alloc space for the address array in kernel memory. */
1318 if (sk->sk_socket->file)
1319 gfp = GFP_USER | __GFP_NOWARN;
1320 kaddrs = kmalloc(addrs_size, gfp);
1321 if (unlikely(!kaddrs))
1322 return -ENOMEM;
1323
1324 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1325 err = -EFAULT;
1326 } else {
1327 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1328 }
1329
1330 kfree(kaddrs);
1331
1332 return err;
1333 }
1334
1335 /*
1336 * This is an older interface. It's kept for backward compatibility
1337 * to the option that doesn't provide association id.
1338 */
1339 static int sctp_setsockopt_connectx_old(struct sock *sk,
1340 struct sockaddr __user *addrs,
1341 int addrs_size)
1342 {
1343 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1344 }
1345
1346 /*
1347 * New interface for the API. The since the API is done with a socket
1348 * option, to make it simple we feed back the association id is as a return
1349 * indication to the call. Error is always negative and association id is
1350 * always positive.
1351 */
1352 static int sctp_setsockopt_connectx(struct sock *sk,
1353 struct sockaddr __user *addrs,
1354 int addrs_size)
1355 {
1356 sctp_assoc_t assoc_id = 0;
1357 int err = 0;
1358
1359 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1360
1361 if (err)
1362 return err;
1363 else
1364 return assoc_id;
1365 }
1366
1367 /*
1368 * New (hopefully final) interface for the API.
1369 * We use the sctp_getaddrs_old structure so that use-space library
1370 * can avoid any unnecessary allocations. The only different part
1371 * is that we store the actual length of the address buffer into the
1372 * addrs_num structure member. That way we can re-use the existing
1373 * code.
1374 */
1375 #ifdef CONFIG_COMPAT
1376 struct compat_sctp_getaddrs_old {
1377 sctp_assoc_t assoc_id;
1378 s32 addr_num;
1379 compat_uptr_t addrs; /* struct sockaddr * */
1380 };
1381 #endif
1382
1383 static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1384 char __user *optval,
1385 int __user *optlen)
1386 {
1387 struct sctp_getaddrs_old param;
1388 sctp_assoc_t assoc_id = 0;
1389 int err = 0;
1390
1391 #ifdef CONFIG_COMPAT
1392 if (is_compat_task()) {
1393 struct compat_sctp_getaddrs_old param32;
1394
1395 if (len < sizeof(param32))
1396 return -EINVAL;
1397 if (copy_from_user(&param32, optval, sizeof(param32)))
1398 return -EFAULT;
1399
1400 param.assoc_id = param32.assoc_id;
1401 param.addr_num = param32.addr_num;
1402 param.addrs = compat_ptr(param32.addrs);
1403 } else
1404 #endif
1405 {
1406 if (len < sizeof(param))
1407 return -EINVAL;
1408 if (copy_from_user(&param, optval, sizeof(param)))
1409 return -EFAULT;
1410 }
1411
1412 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1413 param.addrs, param.addr_num,
1414 &assoc_id);
1415 if (err == 0 || err == -EINPROGRESS) {
1416 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1417 return -EFAULT;
1418 if (put_user(sizeof(assoc_id), optlen))
1419 return -EFAULT;
1420 }
1421
1422 return err;
1423 }
1424
1425 /* API 3.1.4 close() - UDP Style Syntax
1426 * Applications use close() to perform graceful shutdown (as described in
1427 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1428 * by a UDP-style socket.
1429 *
1430 * The syntax is
1431 *
1432 * ret = close(int sd);
1433 *
1434 * sd - the socket descriptor of the associations to be closed.
1435 *
1436 * To gracefully shutdown a specific association represented by the
1437 * UDP-style socket, an application should use the sendmsg() call,
1438 * passing no user data, but including the appropriate flag in the
1439 * ancillary data (see Section xxxx).
1440 *
1441 * If sd in the close() call is a branched-off socket representing only
1442 * one association, the shutdown is performed on that association only.
1443 *
1444 * 4.1.6 close() - TCP Style Syntax
1445 *
1446 * Applications use close() to gracefully close down an association.
1447 *
1448 * The syntax is:
1449 *
1450 * int close(int sd);
1451 *
1452 * sd - the socket descriptor of the association to be closed.
1453 *
1454 * After an application calls close() on a socket descriptor, no further
1455 * socket operations will succeed on that descriptor.
1456 *
1457 * API 7.1.4 SO_LINGER
1458 *
1459 * An application using the TCP-style socket can use this option to
1460 * perform the SCTP ABORT primitive. The linger option structure is:
1461 *
1462 * struct linger {
1463 * int l_onoff; // option on/off
1464 * int l_linger; // linger time
1465 * };
1466 *
1467 * To enable the option, set l_onoff to 1. If the l_linger value is set
1468 * to 0, calling close() is the same as the ABORT primitive. If the
1469 * value is set to a negative value, the setsockopt() call will return
1470 * an error. If the value is set to a positive value linger_time, the
1471 * close() can be blocked for at most linger_time ms. If the graceful
1472 * shutdown phase does not finish during this period, close() will
1473 * return but the graceful shutdown phase continues in the system.
1474 */
1475 static void sctp_close(struct sock *sk, long timeout)
1476 {
1477 struct net *net = sock_net(sk);
1478 struct sctp_endpoint *ep;
1479 struct sctp_association *asoc;
1480 struct list_head *pos, *temp;
1481 unsigned int data_was_unread;
1482
1483 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1484
1485 lock_sock(sk);
1486 sk->sk_shutdown = SHUTDOWN_MASK;
1487 sk->sk_state = SCTP_SS_CLOSING;
1488
1489 ep = sctp_sk(sk)->ep;
1490
1491 /* Clean up any skbs sitting on the receive queue. */
1492 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1493 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1494
1495 /* Walk all associations on an endpoint. */
1496 list_for_each_safe(pos, temp, &ep->asocs) {
1497 asoc = list_entry(pos, struct sctp_association, asocs);
1498
1499 if (sctp_style(sk, TCP)) {
1500 /* A closed association can still be in the list if
1501 * it belongs to a TCP-style listening socket that is
1502 * not yet accepted. If so, free it. If not, send an
1503 * ABORT or SHUTDOWN based on the linger options.
1504 */
1505 if (sctp_state(asoc, CLOSED)) {
1506 sctp_association_free(asoc);
1507 continue;
1508 }
1509 }
1510
1511 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1512 !skb_queue_empty(&asoc->ulpq.reasm) ||
1513 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1514 struct sctp_chunk *chunk;
1515
1516 chunk = sctp_make_abort_user(asoc, NULL, 0);
1517 sctp_primitive_ABORT(net, asoc, chunk);
1518 } else
1519 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1520 }
1521
1522 /* On a TCP-style socket, block for at most linger_time if set. */
1523 if (sctp_style(sk, TCP) && timeout)
1524 sctp_wait_for_close(sk, timeout);
1525
1526 /* This will run the backlog queue. */
1527 release_sock(sk);
1528
1529 /* Supposedly, no process has access to the socket, but
1530 * the net layers still may.
1531 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1532 * held and that should be grabbed before socket lock.
1533 */
1534 spin_lock_bh(&net->sctp.addr_wq_lock);
1535 bh_lock_sock(sk);
1536
1537 /* Hold the sock, since sk_common_release() will put sock_put()
1538 * and we have just a little more cleanup.
1539 */
1540 sock_hold(sk);
1541 sk_common_release(sk);
1542
1543 bh_unlock_sock(sk);
1544 spin_unlock_bh(&net->sctp.addr_wq_lock);
1545
1546 sock_put(sk);
1547
1548 SCTP_DBG_OBJCNT_DEC(sock);
1549 }
1550
1551 /* Handle EPIPE error. */
1552 static int sctp_error(struct sock *sk, int flags, int err)
1553 {
1554 if (err == -EPIPE)
1555 err = sock_error(sk) ? : -EPIPE;
1556 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1557 send_sig(SIGPIPE, current, 0);
1558 return err;
1559 }
1560
1561 /* API 3.1.3 sendmsg() - UDP Style Syntax
1562 *
1563 * An application uses sendmsg() and recvmsg() calls to transmit data to
1564 * and receive data from its peer.
1565 *
1566 * ssize_t sendmsg(int socket, const struct msghdr *message,
1567 * int flags);
1568 *
1569 * socket - the socket descriptor of the endpoint.
1570 * message - pointer to the msghdr structure which contains a single
1571 * user message and possibly some ancillary data.
1572 *
1573 * See Section 5 for complete description of the data
1574 * structures.
1575 *
1576 * flags - flags sent or received with the user message, see Section
1577 * 5 for complete description of the flags.
1578 *
1579 * Note: This function could use a rewrite especially when explicit
1580 * connect support comes in.
1581 */
1582 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1583
1584 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
1585
1586 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1587 {
1588 struct net *net = sock_net(sk);
1589 struct sctp_sock *sp;
1590 struct sctp_endpoint *ep;
1591 struct sctp_association *new_asoc = NULL, *asoc = NULL;
1592 struct sctp_transport *transport, *chunk_tp;
1593 struct sctp_chunk *chunk;
1594 union sctp_addr to;
1595 struct sockaddr *msg_name = NULL;
1596 struct sctp_sndrcvinfo default_sinfo;
1597 struct sctp_sndrcvinfo *sinfo;
1598 struct sctp_initmsg *sinit;
1599 sctp_assoc_t associd = 0;
1600 sctp_cmsgs_t cmsgs = { NULL };
1601 sctp_scope_t scope;
1602 bool fill_sinfo_ttl = false, wait_connect = false;
1603 struct sctp_datamsg *datamsg;
1604 int msg_flags = msg->msg_flags;
1605 __u16 sinfo_flags = 0;
1606 long timeo;
1607 int err;
1608
1609 err = 0;
1610 sp = sctp_sk(sk);
1611 ep = sp->ep;
1612
1613 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
1614 msg, msg_len, ep);
1615
1616 /* We cannot send a message over a TCP-style listening socket. */
1617 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
1618 err = -EPIPE;
1619 goto out_nounlock;
1620 }
1621
1622 /* Parse out the SCTP CMSGs. */
1623 err = sctp_msghdr_parse(msg, &cmsgs);
1624 if (err) {
1625 pr_debug("%s: msghdr parse err:%x\n", __func__, err);
1626 goto out_nounlock;
1627 }
1628
1629 /* Fetch the destination address for this packet. This
1630 * address only selects the association--it is not necessarily
1631 * the address we will send to.
1632 * For a peeled-off socket, msg_name is ignored.
1633 */
1634 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
1635 int msg_namelen = msg->msg_namelen;
1636
1637 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
1638 msg_namelen);
1639 if (err)
1640 return err;
1641
1642 if (msg_namelen > sizeof(to))
1643 msg_namelen = sizeof(to);
1644 memcpy(&to, msg->msg_name, msg_namelen);
1645 msg_name = msg->msg_name;
1646 }
1647
1648 sinit = cmsgs.init;
1649 if (cmsgs.sinfo != NULL) {
1650 memset(&default_sinfo, 0, sizeof(default_sinfo));
1651 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
1652 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
1653 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
1654 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
1655 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
1656
1657 sinfo = &default_sinfo;
1658 fill_sinfo_ttl = true;
1659 } else {
1660 sinfo = cmsgs.srinfo;
1661 }
1662 /* Did the user specify SNDINFO/SNDRCVINFO? */
1663 if (sinfo) {
1664 sinfo_flags = sinfo->sinfo_flags;
1665 associd = sinfo->sinfo_assoc_id;
1666 }
1667
1668 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
1669 msg_len, sinfo_flags);
1670
1671 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1672 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
1673 err = -EINVAL;
1674 goto out_nounlock;
1675 }
1676
1677 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1678 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1679 * If SCTP_ABORT is set, the message length could be non zero with
1680 * the msg_iov set to the user abort reason.
1681 */
1682 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
1683 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
1684 err = -EINVAL;
1685 goto out_nounlock;
1686 }
1687
1688 /* If SCTP_ADDR_OVER is set, there must be an address
1689 * specified in msg_name.
1690 */
1691 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
1692 err = -EINVAL;
1693 goto out_nounlock;
1694 }
1695
1696 transport = NULL;
1697
1698 pr_debug("%s: about to look up association\n", __func__);
1699
1700 lock_sock(sk);
1701
1702 /* If a msg_name has been specified, assume this is to be used. */
1703 if (msg_name) {
1704 /* Look for a matching association on the endpoint. */
1705 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1706 if (!asoc) {
1707 /* If we could not find a matching association on the
1708 * endpoint, make sure that it is not a TCP-style
1709 * socket that already has an association or there is
1710 * no peeled-off association on another socket.
1711 */
1712 if ((sctp_style(sk, TCP) &&
1713 sctp_sstate(sk, ESTABLISHED)) ||
1714 sctp_endpoint_is_peeled_off(ep, &to)) {
1715 err = -EADDRNOTAVAIL;
1716 goto out_unlock;
1717 }
1718 }
1719 } else {
1720 asoc = sctp_id2assoc(sk, associd);
1721 if (!asoc) {
1722 err = -EPIPE;
1723 goto out_unlock;
1724 }
1725 }
1726
1727 if (asoc) {
1728 pr_debug("%s: just looked up association:%p\n", __func__, asoc);
1729
1730 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1731 * socket that has an association in CLOSED state. This can
1732 * happen when an accepted socket has an association that is
1733 * already CLOSED.
1734 */
1735 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
1736 err = -EPIPE;
1737 goto out_unlock;
1738 }
1739
1740 if (sinfo_flags & SCTP_EOF) {
1741 pr_debug("%s: shutting down association:%p\n",
1742 __func__, asoc);
1743
1744 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1745 err = 0;
1746 goto out_unlock;
1747 }
1748 if (sinfo_flags & SCTP_ABORT) {
1749
1750 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1751 if (!chunk) {
1752 err = -ENOMEM;
1753 goto out_unlock;
1754 }
1755
1756 pr_debug("%s: aborting association:%p\n",
1757 __func__, asoc);
1758
1759 sctp_primitive_ABORT(net, asoc, chunk);
1760 err = 0;
1761 goto out_unlock;
1762 }
1763 }
1764
1765 /* Do we need to create the association? */
1766 if (!asoc) {
1767 pr_debug("%s: there is no association yet\n", __func__);
1768
1769 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
1770 err = -EINVAL;
1771 goto out_unlock;
1772 }
1773
1774 /* Check for invalid stream against the stream counts,
1775 * either the default or the user specified stream counts.
1776 */
1777 if (sinfo) {
1778 if (!sinit || !sinit->sinit_num_ostreams) {
1779 /* Check against the defaults. */
1780 if (sinfo->sinfo_stream >=
1781 sp->initmsg.sinit_num_ostreams) {
1782 err = -EINVAL;
1783 goto out_unlock;
1784 }
1785 } else {
1786 /* Check against the requested. */
1787 if (sinfo->sinfo_stream >=
1788 sinit->sinit_num_ostreams) {
1789 err = -EINVAL;
1790 goto out_unlock;
1791 }
1792 }
1793 }
1794
1795 /*
1796 * API 3.1.2 bind() - UDP Style Syntax
1797 * If a bind() or sctp_bindx() is not called prior to a
1798 * sendmsg() call that initiates a new association, the
1799 * system picks an ephemeral port and will choose an address
1800 * set equivalent to binding with a wildcard address.
1801 */
1802 if (!ep->base.bind_addr.port) {
1803 if (sctp_autobind(sk)) {
1804 err = -EAGAIN;
1805 goto out_unlock;
1806 }
1807 } else {
1808 /*
1809 * If an unprivileged user inherits a one-to-many
1810 * style socket with open associations on a privileged
1811 * port, it MAY be permitted to accept new associations,
1812 * but it SHOULD NOT be permitted to open new
1813 * associations.
1814 */
1815 if (ep->base.bind_addr.port < PROT_SOCK &&
1816 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1817 err = -EACCES;
1818 goto out_unlock;
1819 }
1820 }
1821
1822 scope = sctp_scope(&to);
1823 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1824 if (!new_asoc) {
1825 err = -ENOMEM;
1826 goto out_unlock;
1827 }
1828 asoc = new_asoc;
1829 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1830 if (err < 0) {
1831 err = -ENOMEM;
1832 goto out_free;
1833 }
1834
1835 /* If the SCTP_INIT ancillary data is specified, set all
1836 * the association init values accordingly.
1837 */
1838 if (sinit) {
1839 if (sinit->sinit_num_ostreams) {
1840 asoc->c.sinit_num_ostreams =
1841 sinit->sinit_num_ostreams;
1842 }
1843 if (sinit->sinit_max_instreams) {
1844 asoc->c.sinit_max_instreams =
1845 sinit->sinit_max_instreams;
1846 }
1847 if (sinit->sinit_max_attempts) {
1848 asoc->max_init_attempts
1849 = sinit->sinit_max_attempts;
1850 }
1851 if (sinit->sinit_max_init_timeo) {
1852 asoc->max_init_timeo =
1853 msecs_to_jiffies(sinit->sinit_max_init_timeo);
1854 }
1855 }
1856
1857 /* Prime the peer's transport structures. */
1858 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
1859 if (!transport) {
1860 err = -ENOMEM;
1861 goto out_free;
1862 }
1863 }
1864
1865 /* ASSERT: we have a valid association at this point. */
1866 pr_debug("%s: we have a valid association\n", __func__);
1867
1868 if (!sinfo) {
1869 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
1870 * one with some defaults.
1871 */
1872 memset(&default_sinfo, 0, sizeof(default_sinfo));
1873 default_sinfo.sinfo_stream = asoc->default_stream;
1874 default_sinfo.sinfo_flags = asoc->default_flags;
1875 default_sinfo.sinfo_ppid = asoc->default_ppid;
1876 default_sinfo.sinfo_context = asoc->default_context;
1877 default_sinfo.sinfo_timetolive = asoc->default_timetolive;
1878 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
1879
1880 sinfo = &default_sinfo;
1881 } else if (fill_sinfo_ttl) {
1882 /* In case SNDINFO was specified, we still need to fill
1883 * it with a default ttl from the assoc here.
1884 */
1885 sinfo->sinfo_timetolive = asoc->default_timetolive;
1886 }
1887
1888 /* API 7.1.7, the sndbuf size per association bounds the
1889 * maximum size of data that can be sent in a single send call.
1890 */
1891 if (msg_len > sk->sk_sndbuf) {
1892 err = -EMSGSIZE;
1893 goto out_free;
1894 }
1895
1896 if (asoc->pmtu_pending)
1897 sctp_assoc_pending_pmtu(sk, asoc);
1898
1899 /* If fragmentation is disabled and the message length exceeds the
1900 * association fragmentation point, return EMSGSIZE. The I-D
1901 * does not specify what this error is, but this looks like
1902 * a great fit.
1903 */
1904 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
1905 err = -EMSGSIZE;
1906 goto out_free;
1907 }
1908
1909 /* Check for invalid stream. */
1910 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
1911 err = -EINVAL;
1912 goto out_free;
1913 }
1914
1915 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1916 if (!sctp_wspace(asoc)) {
1917 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1918 if (err)
1919 goto out_free;
1920 }
1921
1922 /* If an address is passed with the sendto/sendmsg call, it is used
1923 * to override the primary destination address in the TCP model, or
1924 * when SCTP_ADDR_OVER flag is set in the UDP model.
1925 */
1926 if ((sctp_style(sk, TCP) && msg_name) ||
1927 (sinfo_flags & SCTP_ADDR_OVER)) {
1928 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
1929 if (!chunk_tp) {
1930 err = -EINVAL;
1931 goto out_free;
1932 }
1933 } else
1934 chunk_tp = NULL;
1935
1936 /* Auto-connect, if we aren't connected already. */
1937 if (sctp_state(asoc, CLOSED)) {
1938 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1939 if (err < 0)
1940 goto out_free;
1941
1942 wait_connect = true;
1943 pr_debug("%s: we associated primitively\n", __func__);
1944 }
1945
1946 /* Break the message into multiple chunks of maximum size. */
1947 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
1948 if (IS_ERR(datamsg)) {
1949 err = PTR_ERR(datamsg);
1950 goto out_free;
1951 }
1952
1953 /* Now send the (possibly) fragmented message. */
1954 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1955 /* Do accounting for the write space. */
1956 sctp_set_owner_w(chunk);
1957
1958 chunk->transport = chunk_tp;
1959 }
1960
1961 /* Send it to the lower layers. Note: all chunks
1962 * must either fail or succeed. The lower layer
1963 * works that way today. Keep it that way or this
1964 * breaks.
1965 */
1966 err = sctp_primitive_SEND(net, asoc, datamsg);
1967 sctp_datamsg_put(datamsg);
1968 /* Did the lower layer accept the chunk? */
1969 if (err)
1970 goto out_free;
1971
1972 pr_debug("%s: we sent primitively\n", __func__);
1973
1974 err = msg_len;
1975
1976 if (unlikely(wait_connect)) {
1977 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
1978 sctp_wait_for_connect(asoc, &timeo);
1979 }
1980
1981 /* If we are already past ASSOCIATE, the lower
1982 * layers are responsible for association cleanup.
1983 */
1984 goto out_unlock;
1985
1986 out_free:
1987 if (new_asoc)
1988 sctp_association_free(asoc);
1989 out_unlock:
1990 release_sock(sk);
1991
1992 out_nounlock:
1993 return sctp_error(sk, msg_flags, err);
1994
1995 #if 0
1996 do_sock_err:
1997 if (msg_len)
1998 err = msg_len;
1999 else
2000 err = sock_error(sk);
2001 goto out;
2002
2003 do_interrupted:
2004 if (msg_len)
2005 err = msg_len;
2006 goto out;
2007 #endif /* 0 */
2008 }
2009
2010 /* This is an extended version of skb_pull() that removes the data from the
2011 * start of a skb even when data is spread across the list of skb's in the
2012 * frag_list. len specifies the total amount of data that needs to be removed.
2013 * when 'len' bytes could be removed from the skb, it returns 0.
2014 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2015 * could not be removed.
2016 */
2017 static int sctp_skb_pull(struct sk_buff *skb, int len)
2018 {
2019 struct sk_buff *list;
2020 int skb_len = skb_headlen(skb);
2021 int rlen;
2022
2023 if (len <= skb_len) {
2024 __skb_pull(skb, len);
2025 return 0;
2026 }
2027 len -= skb_len;
2028 __skb_pull(skb, skb_len);
2029
2030 skb_walk_frags(skb, list) {
2031 rlen = sctp_skb_pull(list, len);
2032 skb->len -= (len-rlen);
2033 skb->data_len -= (len-rlen);
2034
2035 if (!rlen)
2036 return 0;
2037
2038 len = rlen;
2039 }
2040
2041 return len;
2042 }
2043
2044 /* API 3.1.3 recvmsg() - UDP Style Syntax
2045 *
2046 * ssize_t recvmsg(int socket, struct msghdr *message,
2047 * int flags);
2048 *
2049 * socket - the socket descriptor of the endpoint.
2050 * message - pointer to the msghdr structure which contains a single
2051 * user message and possibly some ancillary data.
2052 *
2053 * See Section 5 for complete description of the data
2054 * structures.
2055 *
2056 * flags - flags sent or received with the user message, see Section
2057 * 5 for complete description of the flags.
2058 */
2059 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2060 int noblock, int flags, int *addr_len)
2061 {
2062 struct sctp_ulpevent *event = NULL;
2063 struct sctp_sock *sp = sctp_sk(sk);
2064 struct sk_buff *skb;
2065 int copied;
2066 int err = 0;
2067 int skb_len;
2068
2069 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2070 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
2071 addr_len);
2072
2073 lock_sock(sk);
2074
2075 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) {
2076 err = -ENOTCONN;
2077 goto out;
2078 }
2079
2080 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
2081 if (!skb)
2082 goto out;
2083
2084 /* Get the total length of the skb including any skb's in the
2085 * frag_list.
2086 */
2087 skb_len = skb->len;
2088
2089 copied = skb_len;
2090 if (copied > len)
2091 copied = len;
2092
2093 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2094
2095 event = sctp_skb2event(skb);
2096
2097 if (err)
2098 goto out_free;
2099
2100 sock_recv_ts_and_drops(msg, sk, skb);
2101 if (sctp_ulpevent_is_notification(event)) {
2102 msg->msg_flags |= MSG_NOTIFICATION;
2103 sp->pf->event_msgname(event, msg->msg_name, addr_len);
2104 } else {
2105 sp->pf->skb_msgname(skb, msg->msg_name, addr_len);
2106 }
2107
2108 /* Check if we allow SCTP_NXTINFO. */
2109 if (sp->recvnxtinfo)
2110 sctp_ulpevent_read_nxtinfo(event, msg, sk);
2111 /* Check if we allow SCTP_RCVINFO. */
2112 if (sp->recvrcvinfo)
2113 sctp_ulpevent_read_rcvinfo(event, msg);
2114 /* Check if we allow SCTP_SNDRCVINFO. */
2115 if (sp->subscribe.sctp_data_io_event)
2116 sctp_ulpevent_read_sndrcvinfo(event, msg);
2117
2118 err = copied;
2119
2120 /* If skb's length exceeds the user's buffer, update the skb and
2121 * push it back to the receive_queue so that the next call to
2122 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2123 */
2124 if (skb_len > copied) {
2125 msg->msg_flags &= ~MSG_EOR;
2126 if (flags & MSG_PEEK)
2127 goto out_free;
2128 sctp_skb_pull(skb, copied);
2129 skb_queue_head(&sk->sk_receive_queue, skb);
2130
2131 /* When only partial message is copied to the user, increase
2132 * rwnd by that amount. If all the data in the skb is read,
2133 * rwnd is updated when the event is freed.
2134 */
2135 if (!sctp_ulpevent_is_notification(event))
2136 sctp_assoc_rwnd_increase(event->asoc, copied);
2137 goto out;
2138 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2139 (event->msg_flags & MSG_EOR))
2140 msg->msg_flags |= MSG_EOR;
2141 else
2142 msg->msg_flags &= ~MSG_EOR;
2143
2144 out_free:
2145 if (flags & MSG_PEEK) {
2146 /* Release the skb reference acquired after peeking the skb in
2147 * sctp_skb_recv_datagram().
2148 */
2149 kfree_skb(skb);
2150 } else {
2151 /* Free the event which includes releasing the reference to
2152 * the owner of the skb, freeing the skb and updating the
2153 * rwnd.
2154 */
2155 sctp_ulpevent_free(event);
2156 }
2157 out:
2158 release_sock(sk);
2159 return err;
2160 }
2161
2162 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2163 *
2164 * This option is a on/off flag. If enabled no SCTP message
2165 * fragmentation will be performed. Instead if a message being sent
2166 * exceeds the current PMTU size, the message will NOT be sent and
2167 * instead a error will be indicated to the user.
2168 */
2169 static int sctp_setsockopt_disable_fragments(struct sock *sk,
2170 char __user *optval,
2171 unsigned int optlen)
2172 {
2173 int val;
2174
2175 if (optlen < sizeof(int))
2176 return -EINVAL;
2177
2178 if (get_user(val, (int __user *)optval))
2179 return -EFAULT;
2180
2181 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
2182
2183 return 0;
2184 }
2185
2186 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2187 unsigned int optlen)
2188 {
2189 struct sctp_association *asoc;
2190 struct sctp_ulpevent *event;
2191
2192 if (optlen > sizeof(struct sctp_event_subscribe))
2193 return -EINVAL;
2194 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2195 return -EFAULT;
2196
2197 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2198 * if there is no data to be sent or retransmit, the stack will
2199 * immediately send up this notification.
2200 */
2201 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2202 &sctp_sk(sk)->subscribe)) {
2203 asoc = sctp_id2assoc(sk, 0);
2204
2205 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2206 event = sctp_ulpevent_make_sender_dry_event(asoc,
2207 GFP_ATOMIC);
2208 if (!event)
2209 return -ENOMEM;
2210
2211 sctp_ulpq_tail_event(&asoc->ulpq, event);
2212 }
2213 }
2214
2215 return 0;
2216 }
2217
2218 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2219 *
2220 * This socket option is applicable to the UDP-style socket only. When
2221 * set it will cause associations that are idle for more than the
2222 * specified number of seconds to automatically close. An association
2223 * being idle is defined an association that has NOT sent or received
2224 * user data. The special value of '0' indicates that no automatic
2225 * close of any associations should be performed. The option expects an
2226 * integer defining the number of seconds of idle time before an
2227 * association is closed.
2228 */
2229 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2230 unsigned int optlen)
2231 {
2232 struct sctp_sock *sp = sctp_sk(sk);
2233 struct net *net = sock_net(sk);
2234
2235 /* Applicable to UDP-style socket only */
2236 if (sctp_style(sk, TCP))
2237 return -EOPNOTSUPP;
2238 if (optlen != sizeof(int))
2239 return -EINVAL;
2240 if (copy_from_user(&sp->autoclose, optval, optlen))
2241 return -EFAULT;
2242
2243 if (sp->autoclose > net->sctp.max_autoclose)
2244 sp->autoclose = net->sctp.max_autoclose;
2245
2246 return 0;
2247 }
2248
2249 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2250 *
2251 * Applications can enable or disable heartbeats for any peer address of
2252 * an association, modify an address's heartbeat interval, force a
2253 * heartbeat to be sent immediately, and adjust the address's maximum
2254 * number of retransmissions sent before an address is considered
2255 * unreachable. The following structure is used to access and modify an
2256 * address's parameters:
2257 *
2258 * struct sctp_paddrparams {
2259 * sctp_assoc_t spp_assoc_id;
2260 * struct sockaddr_storage spp_address;
2261 * uint32_t spp_hbinterval;
2262 * uint16_t spp_pathmaxrxt;
2263 * uint32_t spp_pathmtu;
2264 * uint32_t spp_sackdelay;
2265 * uint32_t spp_flags;
2266 * };
2267 *
2268 * spp_assoc_id - (one-to-many style socket) This is filled in the
2269 * application, and identifies the association for
2270 * this query.
2271 * spp_address - This specifies which address is of interest.
2272 * spp_hbinterval - This contains the value of the heartbeat interval,
2273 * in milliseconds. If a value of zero
2274 * is present in this field then no changes are to
2275 * be made to this parameter.
2276 * spp_pathmaxrxt - This contains the maximum number of
2277 * retransmissions before this address shall be
2278 * considered unreachable. If a value of zero
2279 * is present in this field then no changes are to
2280 * be made to this parameter.
2281 * spp_pathmtu - When Path MTU discovery is disabled the value
2282 * specified here will be the "fixed" path mtu.
2283 * Note that if the spp_address field is empty
2284 * then all associations on this address will
2285 * have this fixed path mtu set upon them.
2286 *
2287 * spp_sackdelay - When delayed sack is enabled, this value specifies
2288 * the number of milliseconds that sacks will be delayed
2289 * for. This value will apply to all addresses of an
2290 * association if the spp_address field is empty. Note
2291 * also, that if delayed sack is enabled and this
2292 * value is set to 0, no change is made to the last
2293 * recorded delayed sack timer value.
2294 *
2295 * spp_flags - These flags are used to control various features
2296 * on an association. The flag field may contain
2297 * zero or more of the following options.
2298 *
2299 * SPP_HB_ENABLE - Enable heartbeats on the
2300 * specified address. Note that if the address
2301 * field is empty all addresses for the association
2302 * have heartbeats enabled upon them.
2303 *
2304 * SPP_HB_DISABLE - Disable heartbeats on the
2305 * speicifed address. Note that if the address
2306 * field is empty all addresses for the association
2307 * will have their heartbeats disabled. Note also
2308 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2309 * mutually exclusive, only one of these two should
2310 * be specified. Enabling both fields will have
2311 * undetermined results.
2312 *
2313 * SPP_HB_DEMAND - Request a user initiated heartbeat
2314 * to be made immediately.
2315 *
2316 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2317 * heartbeat delayis to be set to the value of 0
2318 * milliseconds.
2319 *
2320 * SPP_PMTUD_ENABLE - This field will enable PMTU
2321 * discovery upon the specified address. Note that
2322 * if the address feild is empty then all addresses
2323 * on the association are effected.
2324 *
2325 * SPP_PMTUD_DISABLE - This field will disable PMTU
2326 * discovery upon the specified address. Note that
2327 * if the address feild is empty then all addresses
2328 * on the association are effected. Not also that
2329 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2330 * exclusive. Enabling both will have undetermined
2331 * results.
2332 *
2333 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2334 * on delayed sack. The time specified in spp_sackdelay
2335 * is used to specify the sack delay for this address. Note
2336 * that if spp_address is empty then all addresses will
2337 * enable delayed sack and take on the sack delay
2338 * value specified in spp_sackdelay.
2339 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2340 * off delayed sack. If the spp_address field is blank then
2341 * delayed sack is disabled for the entire association. Note
2342 * also that this field is mutually exclusive to
2343 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2344 * results.
2345 */
2346 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2347 struct sctp_transport *trans,
2348 struct sctp_association *asoc,
2349 struct sctp_sock *sp,
2350 int hb_change,
2351 int pmtud_change,
2352 int sackdelay_change)
2353 {
2354 int error;
2355
2356 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2357 struct net *net = sock_net(trans->asoc->base.sk);
2358
2359 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2360 if (error)
2361 return error;
2362 }
2363
2364 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2365 * this field is ignored. Note also that a value of zero indicates
2366 * the current setting should be left unchanged.
2367 */
2368 if (params->spp_flags & SPP_HB_ENABLE) {
2369
2370 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2371 * set. This lets us use 0 value when this flag
2372 * is set.
2373 */
2374 if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
2375 params->spp_hbinterval = 0;
2376
2377 if (params->spp_hbinterval ||
2378 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
2379 if (trans) {
2380 trans->hbinterval =
2381 msecs_to_jiffies(params->spp_hbinterval);
2382 } else if (asoc) {
2383 asoc->hbinterval =
2384 msecs_to_jiffies(params->spp_hbinterval);
2385 } else {
2386 sp->hbinterval = params->spp_hbinterval;
2387 }
2388 }
2389 }
2390
2391 if (hb_change) {
2392 if (trans) {
2393 trans->param_flags =
2394 (trans->param_flags & ~SPP_HB) | hb_change;
2395 } else if (asoc) {
2396 asoc->param_flags =
2397 (asoc->param_flags & ~SPP_HB) | hb_change;
2398 } else {
2399 sp->param_flags =
2400 (sp->param_flags & ~SPP_HB) | hb_change;
2401 }
2402 }
2403
2404 /* When Path MTU discovery is disabled the value specified here will
2405 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2406 * include the flag SPP_PMTUD_DISABLE for this field to have any
2407 * effect).
2408 */
2409 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2410 if (trans) {
2411 trans->pathmtu = params->spp_pathmtu;
2412 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2413 } else if (asoc) {
2414 asoc->pathmtu = params->spp_pathmtu;
2415 sctp_frag_point(asoc, params->spp_pathmtu);
2416 } else {
2417 sp->pathmtu = params->spp_pathmtu;
2418 }
2419 }
2420
2421 if (pmtud_change) {
2422 if (trans) {
2423 int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
2424 (params->spp_flags & SPP_PMTUD_ENABLE);
2425 trans->param_flags =
2426 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2427 if (update) {
2428 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2429 sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
2430 }
2431 } else if (asoc) {
2432 asoc->param_flags =
2433 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
2434 } else {
2435 sp->param_flags =
2436 (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
2437 }
2438 }
2439
2440 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2441 * value of this field is ignored. Note also that a value of zero
2442 * indicates the current setting should be left unchanged.
2443 */
2444 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
2445 if (trans) {
2446 trans->sackdelay =
2447 msecs_to_jiffies(params->spp_sackdelay);
2448 } else if (asoc) {
2449 asoc->sackdelay =
2450 msecs_to_jiffies(params->spp_sackdelay);
2451 } else {
2452 sp->sackdelay = params->spp_sackdelay;
2453 }
2454 }
2455
2456 if (sackdelay_change) {
2457 if (trans) {
2458 trans->param_flags =
2459 (trans->param_flags & ~SPP_SACKDELAY) |
2460 sackdelay_change;
2461 } else if (asoc) {
2462 asoc->param_flags =
2463 (asoc->param_flags & ~SPP_SACKDELAY) |
2464 sackdelay_change;
2465 } else {
2466 sp->param_flags =
2467 (sp->param_flags & ~SPP_SACKDELAY) |
2468 sackdelay_change;
2469 }
2470 }
2471
2472 /* Note that a value of zero indicates the current setting should be
2473 left unchanged.
2474 */
2475 if (params->spp_pathmaxrxt) {
2476 if (trans) {
2477 trans->pathmaxrxt = params->spp_pathmaxrxt;
2478 } else if (asoc) {
2479 asoc->pathmaxrxt = params->spp_pathmaxrxt;
2480 } else {
2481 sp->pathmaxrxt = params->spp_pathmaxrxt;
2482 }
2483 }
2484
2485 return 0;
2486 }
2487
2488 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2489 char __user *optval,
2490 unsigned int optlen)
2491 {
2492 struct sctp_paddrparams params;
2493 struct sctp_transport *trans = NULL;
2494 struct sctp_association *asoc = NULL;
2495 struct sctp_sock *sp = sctp_sk(sk);
2496 int error;
2497 int hb_change, pmtud_change, sackdelay_change;
2498
2499 if (optlen != sizeof(struct sctp_paddrparams))
2500 return -EINVAL;
2501
2502 if (copy_from_user(&params, optval, optlen))
2503 return -EFAULT;
2504
2505 /* Validate flags and value parameters. */
2506 hb_change = params.spp_flags & SPP_HB;
2507 pmtud_change = params.spp_flags & SPP_PMTUD;
2508 sackdelay_change = params.spp_flags & SPP_SACKDELAY;
2509
2510 if (hb_change == SPP_HB ||
2511 pmtud_change == SPP_PMTUD ||
2512 sackdelay_change == SPP_SACKDELAY ||
2513 params.spp_sackdelay > 500 ||
2514 (params.spp_pathmtu &&
2515 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2516 return -EINVAL;
2517
2518 /* If an address other than INADDR_ANY is specified, and
2519 * no transport is found, then the request is invalid.
2520 */
2521 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
2522 trans = sctp_addr_id2transport(sk, &params.spp_address,
2523 params.spp_assoc_id);
2524 if (!trans)
2525 return -EINVAL;
2526 }
2527
2528 /* Get association, if assoc_id != 0 and the socket is a one
2529 * to many style socket, and an association was not found, then
2530 * the id was invalid.
2531 */
2532 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
2533 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
2534 return -EINVAL;
2535
2536 /* Heartbeat demand can only be sent on a transport or
2537 * association, but not a socket.
2538 */
2539 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
2540 return -EINVAL;
2541
2542 /* Process parameters. */
2543 error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2544 hb_change, pmtud_change,
2545 sackdelay_change);
2546
2547 if (error)
2548 return error;
2549
2550 /* If changes are for association, also apply parameters to each
2551 * transport.
2552 */
2553 if (!trans && asoc) {
2554 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2555 transports) {
2556 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2557 hb_change, pmtud_change,
2558 sackdelay_change);
2559 }
2560 }
2561
2562 return 0;
2563 }
2564
2565 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
2566 {
2567 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
2568 }
2569
2570 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
2571 {
2572 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
2573 }
2574
2575 /*
2576 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2577 *
2578 * This option will effect the way delayed acks are performed. This
2579 * option allows you to get or set the delayed ack time, in
2580 * milliseconds. It also allows changing the delayed ack frequency.
2581 * Changing the frequency to 1 disables the delayed sack algorithm. If
2582 * the assoc_id is 0, then this sets or gets the endpoints default
2583 * values. If the assoc_id field is non-zero, then the set or get
2584 * effects the specified association for the one to many model (the
2585 * assoc_id field is ignored by the one to one model). Note that if
2586 * sack_delay or sack_freq are 0 when setting this option, then the
2587 * current values will remain unchanged.
2588 *
2589 * struct sctp_sack_info {
2590 * sctp_assoc_t sack_assoc_id;
2591 * uint32_t sack_delay;
2592 * uint32_t sack_freq;
2593 * };
2594 *
2595 * sack_assoc_id - This parameter, indicates which association the user
2596 * is performing an action upon. Note that if this field's value is
2597 * zero then the endpoints default value is changed (effecting future
2598 * associations only).
2599 *
2600 * sack_delay - This parameter contains the number of milliseconds that
2601 * the user is requesting the delayed ACK timer be set to. Note that
2602 * this value is defined in the standard to be between 200 and 500
2603 * milliseconds.
2604 *
2605 * sack_freq - This parameter contains the number of packets that must
2606 * be received before a sack is sent without waiting for the delay
2607 * timer to expire. The default value for this is 2, setting this
2608 * value to 1 will disable the delayed sack algorithm.
2609 */
2610
2611 static int sctp_setsockopt_delayed_ack(struct sock *sk,
2612 char __user *optval, unsigned int optlen)
2613 {
2614 struct sctp_sack_info params;
2615 struct sctp_transport *trans = NULL;
2616 struct sctp_association *asoc = NULL;
2617 struct sctp_sock *sp = sctp_sk(sk);
2618
2619 if (optlen == sizeof(struct sctp_sack_info)) {
2620 if (copy_from_user(&params, optval, optlen))
2621 return -EFAULT;
2622
2623 if (params.sack_delay == 0 && params.sack_freq == 0)
2624 return 0;
2625 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2626 pr_warn_ratelimited(DEPRECATED
2627 "%s (pid %d) "
2628 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2629 "Use struct sctp_sack_info instead\n",
2630 current->comm, task_pid_nr(current));
2631 if (copy_from_user(&params, optval, optlen))
2632 return -EFAULT;
2633
2634 if (params.sack_delay == 0)
2635 params.sack_freq = 1;
2636 else
2637 params.sack_freq = 0;
2638 } else
2639 return -EINVAL;
2640
2641 /* Validate value parameter. */
2642 if (params.sack_delay > 500)
2643 return -EINVAL;
2644
2645 /* Get association, if sack_assoc_id != 0 and the socket is a one
2646 * to many style socket, and an association was not found, then
2647 * the id was invalid.
2648 */
2649 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2650 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2651 return -EINVAL;
2652
2653 if (params.sack_delay) {
2654 if (asoc) {
2655 asoc->sackdelay =
2656 msecs_to_jiffies(params.sack_delay);
2657 asoc->param_flags =
2658 sctp_spp_sackdelay_enable(asoc->param_flags);
2659 } else {
2660 sp->sackdelay = params.sack_delay;
2661 sp->param_flags =
2662 sctp_spp_sackdelay_enable(sp->param_flags);
2663 }
2664 }
2665
2666 if (params.sack_freq == 1) {
2667 if (asoc) {
2668 asoc->param_flags =
2669 sctp_spp_sackdelay_disable(asoc->param_flags);
2670 } else {
2671 sp->param_flags =
2672 sctp_spp_sackdelay_disable(sp->param_flags);
2673 }
2674 } else if (params.sack_freq > 1) {
2675 if (asoc) {
2676 asoc->sackfreq = params.sack_freq;
2677 asoc->param_flags =
2678 sctp_spp_sackdelay_enable(asoc->param_flags);
2679 } else {
2680 sp->sackfreq = params.sack_freq;
2681 sp->param_flags =
2682 sctp_spp_sackdelay_enable(sp->param_flags);
2683 }
2684 }
2685
2686 /* If change is for association, also apply to each transport. */
2687 if (asoc) {
2688 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2689 transports) {
2690 if (params.sack_delay) {
2691 trans->sackdelay =
2692 msecs_to_jiffies(params.sack_delay);
2693 trans->param_flags =
2694 sctp_spp_sackdelay_enable(trans->param_flags);
2695 }
2696 if (params.sack_freq == 1) {
2697 trans->param_flags =
2698 sctp_spp_sackdelay_disable(trans->param_flags);
2699 } else if (params.sack_freq > 1) {
2700 trans->sackfreq = params.sack_freq;
2701 trans->param_flags =
2702 sctp_spp_sackdelay_enable(trans->param_flags);
2703 }
2704 }
2705 }
2706
2707 return 0;
2708 }
2709
2710 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2711 *
2712 * Applications can specify protocol parameters for the default association
2713 * initialization. The option name argument to setsockopt() and getsockopt()
2714 * is SCTP_INITMSG.
2715 *
2716 * Setting initialization parameters is effective only on an unconnected
2717 * socket (for UDP-style sockets only future associations are effected
2718 * by the change). With TCP-style sockets, this option is inherited by
2719 * sockets derived from a listener socket.
2720 */
2721 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2722 {
2723 struct sctp_initmsg sinit;
2724 struct sctp_sock *sp = sctp_sk(sk);
2725
2726 if (optlen != sizeof(struct sctp_initmsg))
2727 return -EINVAL;
2728 if (copy_from_user(&sinit, optval, optlen))
2729 return -EFAULT;
2730
2731 if (sinit.sinit_num_ostreams)
2732 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2733 if (sinit.sinit_max_instreams)
2734 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2735 if (sinit.sinit_max_attempts)
2736 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2737 if (sinit.sinit_max_init_timeo)
2738 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2739
2740 return 0;
2741 }
2742
2743 /*
2744 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2745 *
2746 * Applications that wish to use the sendto() system call may wish to
2747 * specify a default set of parameters that would normally be supplied
2748 * through the inclusion of ancillary data. This socket option allows
2749 * such an application to set the default sctp_sndrcvinfo structure.
2750 * The application that wishes to use this socket option simply passes
2751 * in to this call the sctp_sndrcvinfo structure defined in Section
2752 * 5.2.2) The input parameters accepted by this call include
2753 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2754 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2755 * to this call if the caller is using the UDP model.
2756 */
2757 static int sctp_setsockopt_default_send_param(struct sock *sk,
2758 char __user *optval,
2759 unsigned int optlen)
2760 {
2761 struct sctp_sock *sp = sctp_sk(sk);
2762 struct sctp_association *asoc;
2763 struct sctp_sndrcvinfo info;
2764
2765 if (optlen != sizeof(info))
2766 return -EINVAL;
2767 if (copy_from_user(&info, optval, optlen))
2768 return -EFAULT;
2769 if (info.sinfo_flags &
2770 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2771 SCTP_ABORT | SCTP_EOF))
2772 return -EINVAL;
2773
2774 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
2775 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
2776 return -EINVAL;
2777 if (asoc) {
2778 asoc->default_stream = info.sinfo_stream;
2779 asoc->default_flags = info.sinfo_flags;
2780 asoc->default_ppid = info.sinfo_ppid;
2781 asoc->default_context = info.sinfo_context;
2782 asoc->default_timetolive = info.sinfo_timetolive;
2783 } else {
2784 sp->default_stream = info.sinfo_stream;
2785 sp->default_flags = info.sinfo_flags;
2786 sp->default_ppid = info.sinfo_ppid;
2787 sp->default_context = info.sinfo_context;
2788 sp->default_timetolive = info.sinfo_timetolive;
2789 }
2790
2791 return 0;
2792 }
2793
2794 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
2795 * (SCTP_DEFAULT_SNDINFO)
2796 */
2797 static int sctp_setsockopt_default_sndinfo(struct sock *sk,
2798 char __user *optval,
2799 unsigned int optlen)
2800 {
2801 struct sctp_sock *sp = sctp_sk(sk);
2802 struct sctp_association *asoc;
2803 struct sctp_sndinfo info;
2804
2805 if (optlen != sizeof(info))
2806 return -EINVAL;
2807 if (copy_from_user(&info, optval, optlen))
2808 return -EFAULT;
2809 if (info.snd_flags &
2810 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2811 SCTP_ABORT | SCTP_EOF))
2812 return -EINVAL;
2813
2814 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
2815 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
2816 return -EINVAL;
2817 if (asoc) {
2818 asoc->default_stream = info.snd_sid;
2819 asoc->default_flags = info.snd_flags;
2820 asoc->default_ppid = info.snd_ppid;
2821 asoc->default_context = info.snd_context;
2822 } else {
2823 sp->default_stream = info.snd_sid;
2824 sp->default_flags = info.snd_flags;
2825 sp->default_ppid = info.snd_ppid;
2826 sp->default_context = info.snd_context;
2827 }
2828
2829 return 0;
2830 }
2831
2832 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2833 *
2834 * Requests that the local SCTP stack use the enclosed peer address as
2835 * the association primary. The enclosed address must be one of the
2836 * association peer's addresses.
2837 */
2838 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2839 unsigned int optlen)
2840 {
2841 struct sctp_prim prim;
2842 struct sctp_transport *trans;
2843
2844 if (optlen != sizeof(struct sctp_prim))
2845 return -EINVAL;
2846
2847 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
2848 return -EFAULT;
2849
2850 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
2851 if (!trans)
2852 return -EINVAL;
2853
2854 sctp_assoc_set_primary(trans->asoc, trans);
2855
2856 return 0;
2857 }
2858
2859 /*
2860 * 7.1.5 SCTP_NODELAY
2861 *
2862 * Turn on/off any Nagle-like algorithm. This means that packets are
2863 * generally sent as soon as possible and no unnecessary delays are
2864 * introduced, at the cost of more packets in the network. Expects an
2865 * integer boolean flag.
2866 */
2867 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2868 unsigned int optlen)
2869 {
2870 int val;
2871
2872 if (optlen < sizeof(int))
2873 return -EINVAL;
2874 if (get_user(val, (int __user *)optval))
2875 return -EFAULT;
2876
2877 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
2878 return 0;
2879 }
2880
2881 /*
2882 *
2883 * 7.1.1 SCTP_RTOINFO
2884 *
2885 * The protocol parameters used to initialize and bound retransmission
2886 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2887 * and modify these parameters.
2888 * All parameters are time values, in milliseconds. A value of 0, when
2889 * modifying the parameters, indicates that the current value should not
2890 * be changed.
2891 *
2892 */
2893 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2894 {
2895 struct sctp_rtoinfo rtoinfo;
2896 struct sctp_association *asoc;
2897 unsigned long rto_min, rto_max;
2898 struct sctp_sock *sp = sctp_sk(sk);
2899
2900 if (optlen != sizeof (struct sctp_rtoinfo))
2901 return -EINVAL;
2902
2903 if (copy_from_user(&rtoinfo, optval, optlen))
2904 return -EFAULT;
2905
2906 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
2907
2908 /* Set the values to the specific association */
2909 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2910 return -EINVAL;
2911
2912 rto_max = rtoinfo.srto_max;
2913 rto_min = rtoinfo.srto_min;
2914
2915 if (rto_max)
2916 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2917 else
2918 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2919
2920 if (rto_min)
2921 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2922 else
2923 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2924
2925 if (rto_min > rto_max)
2926 return -EINVAL;
2927
2928 if (asoc) {
2929 if (rtoinfo.srto_initial != 0)
2930 asoc->rto_initial =
2931 msecs_to_jiffies(rtoinfo.srto_initial);
2932 asoc->rto_max = rto_max;
2933 asoc->rto_min = rto_min;
2934 } else {
2935 /* If there is no association or the association-id = 0
2936 * set the values to the endpoint.
2937 */
2938 if (rtoinfo.srto_initial != 0)
2939 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
2940 sp->rtoinfo.srto_max = rto_max;
2941 sp->rtoinfo.srto_min = rto_min;
2942 }
2943
2944 return 0;
2945 }
2946
2947 /*
2948 *
2949 * 7.1.2 SCTP_ASSOCINFO
2950 *
2951 * This option is used to tune the maximum retransmission attempts
2952 * of the association.
2953 * Returns an error if the new association retransmission value is
2954 * greater than the sum of the retransmission value of the peer.
2955 * See [SCTP] for more information.
2956 *
2957 */
2958 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
2959 {
2960
2961 struct sctp_assocparams assocparams;
2962 struct sctp_association *asoc;
2963
2964 if (optlen != sizeof(struct sctp_assocparams))
2965 return -EINVAL;
2966 if (copy_from_user(&assocparams, optval, optlen))
2967 return -EFAULT;
2968
2969 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
2970
2971 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
2972 return -EINVAL;
2973
2974 /* Set the values to the specific association */
2975 if (asoc) {
2976 if (assocparams.sasoc_asocmaxrxt != 0) {
2977 __u32 path_sum = 0;
2978 int paths = 0;
2979 struct sctp_transport *peer_addr;
2980
2981 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
2982 transports) {
2983 path_sum += peer_addr->pathmaxrxt;
2984 paths++;
2985 }
2986
2987 /* Only validate asocmaxrxt if we have more than
2988 * one path/transport. We do this because path
2989 * retransmissions are only counted when we have more
2990 * then one path.
2991 */
2992 if (paths > 1 &&
2993 assocparams.sasoc_asocmaxrxt > path_sum)
2994 return -EINVAL;
2995
2996 asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
2997 }
2998
2999 if (assocparams.sasoc_cookie_life != 0)
3000 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
3001 } else {
3002 /* Set the values to the endpoint */
3003 struct sctp_sock *sp = sctp_sk(sk);
3004
3005 if (assocparams.sasoc_asocmaxrxt != 0)
3006 sp->assocparams.sasoc_asocmaxrxt =
3007 assocparams.sasoc_asocmaxrxt;
3008 if (assocparams.sasoc_cookie_life != 0)
3009 sp->assocparams.sasoc_cookie_life =
3010 assocparams.sasoc_cookie_life;
3011 }
3012 return 0;
3013 }
3014
3015 /*
3016 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3017 *
3018 * This socket option is a boolean flag which turns on or off mapped V4
3019 * addresses. If this option is turned on and the socket is type
3020 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3021 * If this option is turned off, then no mapping will be done of V4
3022 * addresses and a user will receive both PF_INET6 and PF_INET type
3023 * addresses on the socket.
3024 */
3025 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
3026 {
3027 int val;
3028 struct sctp_sock *sp = sctp_sk(sk);
3029
3030 if (optlen < sizeof(int))
3031 return -EINVAL;
3032 if (get_user(val, (int __user *)optval))
3033 return -EFAULT;
3034 if (val)
3035 sp->v4mapped = 1;
3036 else
3037 sp->v4mapped = 0;
3038
3039 return 0;
3040 }
3041
3042 /*
3043 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3044 * This option will get or set the maximum size to put in any outgoing
3045 * SCTP DATA chunk. If a message is larger than this size it will be
3046 * fragmented by SCTP into the specified size. Note that the underlying
3047 * SCTP implementation may fragment into smaller sized chunks when the
3048 * PMTU of the underlying association is smaller than the value set by
3049 * the user. The default value for this option is '0' which indicates
3050 * the user is NOT limiting fragmentation and only the PMTU will effect
3051 * SCTP's choice of DATA chunk size. Note also that values set larger
3052 * than the maximum size of an IP datagram will effectively let SCTP
3053 * control fragmentation (i.e. the same as setting this option to 0).
3054 *
3055 * The following structure is used to access and modify this parameter:
3056 *
3057 * struct sctp_assoc_value {
3058 * sctp_assoc_t assoc_id;
3059 * uint32_t assoc_value;
3060 * };
3061 *
3062 * assoc_id: This parameter is ignored for one-to-one style sockets.
3063 * For one-to-many style sockets this parameter indicates which
3064 * association the user is performing an action upon. Note that if
3065 * this field's value is zero then the endpoints default value is
3066 * changed (effecting future associations only).
3067 * assoc_value: This parameter specifies the maximum size in bytes.
3068 */
3069 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3070 {
3071 struct sctp_assoc_value params;
3072 struct sctp_association *asoc;
3073 struct sctp_sock *sp = sctp_sk(sk);
3074 int val;
3075
3076 if (optlen == sizeof(int)) {
3077 pr_warn_ratelimited(DEPRECATED
3078 "%s (pid %d) "
3079 "Use of int in maxseg socket option.\n"
3080 "Use struct sctp_assoc_value instead\n",
3081 current->comm, task_pid_nr(current));
3082 if (copy_from_user(&val, optval, optlen))
3083 return -EFAULT;
3084 params.assoc_id = 0;
3085 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3086 if (copy_from_user(&params, optval, optlen))
3087 return -EFAULT;
3088 val = params.assoc_value;
3089 } else
3090 return -EINVAL;
3091
3092 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
3093 return -EINVAL;
3094
3095 asoc = sctp_id2assoc(sk, params.assoc_id);
3096 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
3097 return -EINVAL;
3098
3099 if (asoc) {
3100 if (val == 0) {
3101 val = asoc->pathmtu;
3102 val -= sp->pf->af->net_header_len;
3103 val -= sizeof(struct sctphdr) +
3104 sizeof(struct sctp_data_chunk);
3105 }
3106 asoc->user_frag = val;
3107 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
3108 } else {
3109 sp->user_frag = val;
3110 }
3111
3112 return 0;
3113 }
3114
3115
3116 /*
3117 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3118 *
3119 * Requests that the peer mark the enclosed address as the association
3120 * primary. The enclosed address must be one of the association's
3121 * locally bound addresses. The following structure is used to make a
3122 * set primary request:
3123 */
3124 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3125 unsigned int optlen)
3126 {
3127 struct net *net = sock_net(sk);
3128 struct sctp_sock *sp;
3129 struct sctp_association *asoc = NULL;
3130 struct sctp_setpeerprim prim;
3131 struct sctp_chunk *chunk;
3132 struct sctp_af *af;
3133 int err;
3134
3135 sp = sctp_sk(sk);
3136
3137 if (!net->sctp.addip_enable)
3138 return -EPERM;
3139
3140 if (optlen != sizeof(struct sctp_setpeerprim))
3141 return -EINVAL;
3142
3143 if (copy_from_user(&prim, optval, optlen))
3144 return -EFAULT;
3145
3146 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
3147 if (!asoc)
3148 return -EINVAL;
3149
3150 if (!asoc->peer.asconf_capable)
3151 return -EPERM;
3152
3153 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
3154 return -EPERM;
3155
3156 if (!sctp_state(asoc, ESTABLISHED))
3157 return -ENOTCONN;
3158
3159 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
3160 if (!af)
3161 return -EINVAL;
3162
3163 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
3164 return -EADDRNOTAVAIL;
3165
3166 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
3167 return -EADDRNOTAVAIL;
3168
3169 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3170 chunk = sctp_make_asconf_set_prim(asoc,
3171 (union sctp_addr *)&prim.sspp_addr);
3172 if (!chunk)
3173 return -ENOMEM;
3174
3175 err = sctp_send_asconf(asoc, chunk);
3176
3177 pr_debug("%s: we set peer primary addr primitively\n", __func__);
3178
3179 return err;
3180 }
3181
3182 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
3183 unsigned int optlen)
3184 {
3185 struct sctp_setadaptation adaptation;
3186
3187 if (optlen != sizeof(struct sctp_setadaptation))
3188 return -EINVAL;
3189 if (copy_from_user(&adaptation, optval, optlen))
3190 return -EFAULT;
3191
3192 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
3193
3194 return 0;
3195 }
3196
3197 /*
3198 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3199 *
3200 * The context field in the sctp_sndrcvinfo structure is normally only
3201 * used when a failed message is retrieved holding the value that was
3202 * sent down on the actual send call. This option allows the setting of
3203 * a default context on an association basis that will be received on
3204 * reading messages from the peer. This is especially helpful in the
3205 * one-2-many model for an application to keep some reference to an
3206 * internal state machine that is processing messages on the
3207 * association. Note that the setting of this value only effects
3208 * received messages from the peer and does not effect the value that is
3209 * saved with outbound messages.
3210 */
3211 static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3212 unsigned int optlen)
3213 {
3214 struct sctp_assoc_value params;
3215 struct sctp_sock *sp;
3216 struct sctp_association *asoc;
3217
3218 if (optlen != sizeof(struct sctp_assoc_value))
3219 return -EINVAL;
3220 if (copy_from_user(&params, optval, optlen))
3221 return -EFAULT;
3222
3223 sp = sctp_sk(sk);
3224
3225 if (params.assoc_id != 0) {
3226 asoc = sctp_id2assoc(sk, params.assoc_id);
3227 if (!asoc)
3228 return -EINVAL;
3229 asoc->default_rcv_context = params.assoc_value;
3230 } else {
3231 sp->default_rcv_context = params.assoc_value;
3232 }
3233
3234 return 0;
3235 }
3236
3237 /*
3238 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3239 *
3240 * This options will at a minimum specify if the implementation is doing
3241 * fragmented interleave. Fragmented interleave, for a one to many
3242 * socket, is when subsequent calls to receive a message may return
3243 * parts of messages from different associations. Some implementations
3244 * may allow you to turn this value on or off. If so, when turned off,
3245 * no fragment interleave will occur (which will cause a head of line
3246 * blocking amongst multiple associations sharing the same one to many
3247 * socket). When this option is turned on, then each receive call may
3248 * come from a different association (thus the user must receive data
3249 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3250 * association each receive belongs to.
3251 *
3252 * This option takes a boolean value. A non-zero value indicates that
3253 * fragmented interleave is on. A value of zero indicates that
3254 * fragmented interleave is off.
3255 *
3256 * Note that it is important that an implementation that allows this
3257 * option to be turned on, have it off by default. Otherwise an unaware
3258 * application using the one to many model may become confused and act
3259 * incorrectly.
3260 */
3261 static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3262 char __user *optval,
3263 unsigned int optlen)
3264 {
3265 int val;
3266
3267 if (optlen != sizeof(int))
3268 return -EINVAL;
3269 if (get_user(val, (int __user *)optval))
3270 return -EFAULT;
3271
3272 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
3273
3274 return 0;
3275 }
3276
3277 /*
3278 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3279 * (SCTP_PARTIAL_DELIVERY_POINT)
3280 *
3281 * This option will set or get the SCTP partial delivery point. This
3282 * point is the size of a message where the partial delivery API will be
3283 * invoked to help free up rwnd space for the peer. Setting this to a
3284 * lower value will cause partial deliveries to happen more often. The
3285 * calls argument is an integer that sets or gets the partial delivery
3286 * point. Note also that the call will fail if the user attempts to set
3287 * this value larger than the socket receive buffer size.
3288 *
3289 * Note that any single message having a length smaller than or equal to
3290 * the SCTP partial delivery point will be delivered in one single read
3291 * call as long as the user provided buffer is large enough to hold the
3292 * message.
3293 */
3294 static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3295 char __user *optval,
3296 unsigned int optlen)
3297 {
3298 u32 val;
3299
3300 if (optlen != sizeof(u32))
3301 return -EINVAL;
3302 if (get_user(val, (int __user *)optval))
3303 return -EFAULT;
3304
3305 /* Note: We double the receive buffer from what the user sets
3306 * it to be, also initial rwnd is based on rcvbuf/2.
3307 */
3308 if (val > (sk->sk_rcvbuf >> 1))
3309 return -EINVAL;
3310
3311 sctp_sk(sk)->pd_point = val;
3312
3313 return 0; /* is this the right error code? */
3314 }
3315
3316 /*
3317 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3318 *
3319 * This option will allow a user to change the maximum burst of packets
3320 * that can be emitted by this association. Note that the default value
3321 * is 4, and some implementations may restrict this setting so that it
3322 * can only be lowered.
3323 *
3324 * NOTE: This text doesn't seem right. Do this on a socket basis with
3325 * future associations inheriting the socket value.
3326 */
3327 static int sctp_setsockopt_maxburst(struct sock *sk,
3328 char __user *optval,
3329 unsigned int optlen)
3330 {
3331 struct sctp_assoc_value params;
3332 struct sctp_sock *sp;
3333 struct sctp_association *asoc;
3334 int val;
3335 int assoc_id = 0;
3336
3337 if (optlen == sizeof(int)) {
3338 pr_warn_ratelimited(DEPRECATED
3339 "%s (pid %d) "
3340 "Use of int in max_burst socket option deprecated.\n"
3341 "Use struct sctp_assoc_value instead\n",
3342 current->comm, task_pid_nr(current));
3343 if (copy_from_user(&val, optval, optlen))
3344 return -EFAULT;
3345 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3346 if (copy_from_user(&params, optval, optlen))
3347 return -EFAULT;
3348 val = params.assoc_value;
3349 assoc_id = params.assoc_id;
3350 } else
3351 return -EINVAL;
3352
3353 sp = sctp_sk(sk);
3354
3355 if (assoc_id != 0) {
3356 asoc = sctp_id2assoc(sk, assoc_id);
3357 if (!asoc)
3358 return -EINVAL;
3359 asoc->max_burst = val;
3360 } else
3361 sp->max_burst = val;
3362
3363 return 0;
3364 }
3365
3366 /*
3367 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3368 *
3369 * This set option adds a chunk type that the user is requesting to be
3370 * received only in an authenticated way. Changes to the list of chunks
3371 * will only effect future associations on the socket.
3372 */
3373 static int sctp_setsockopt_auth_chunk(struct sock *sk,
3374 char __user *optval,
3375 unsigned int optlen)
3376 {
3377 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3378 struct sctp_authchunk val;
3379
3380 if (!ep->auth_enable)
3381 return -EACCES;
3382
3383 if (optlen != sizeof(struct sctp_authchunk))
3384 return -EINVAL;
3385 if (copy_from_user(&val, optval, optlen))
3386 return -EFAULT;
3387
3388 switch (val.sauth_chunk) {
3389 case SCTP_CID_INIT:
3390 case SCTP_CID_INIT_ACK:
3391 case SCTP_CID_SHUTDOWN_COMPLETE:
3392 case SCTP_CID_AUTH:
3393 return -EINVAL;
3394 }
3395
3396 /* add this chunk id to the endpoint */
3397 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
3398 }
3399
3400 /*
3401 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3402 *
3403 * This option gets or sets the list of HMAC algorithms that the local
3404 * endpoint requires the peer to use.
3405 */
3406 static int sctp_setsockopt_hmac_ident(struct sock *sk,
3407 char __user *optval,
3408 unsigned int optlen)
3409 {
3410 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3411 struct sctp_hmacalgo *hmacs;
3412 u32 idents;
3413 int err;
3414
3415 if (!ep->auth_enable)
3416 return -EACCES;
3417
3418 if (optlen < sizeof(struct sctp_hmacalgo))
3419 return -EINVAL;
3420
3421 hmacs = memdup_user(optval, optlen);
3422 if (IS_ERR(hmacs))
3423 return PTR_ERR(hmacs);
3424
3425 idents = hmacs->shmac_num_idents;
3426 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3427 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3428 err = -EINVAL;
3429 goto out;
3430 }
3431
3432 err = sctp_auth_ep_set_hmacs(ep, hmacs);
3433 out:
3434 kfree(hmacs);
3435 return err;
3436 }
3437
3438 /*
3439 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3440 *
3441 * This option will set a shared secret key which is used to build an
3442 * association shared key.
3443 */
3444 static int sctp_setsockopt_auth_key(struct sock *sk,
3445 char __user *optval,
3446 unsigned int optlen)
3447 {
3448 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3449 struct sctp_authkey *authkey;
3450 struct sctp_association *asoc;
3451 int ret;
3452
3453 if (!ep->auth_enable)
3454 return -EACCES;
3455
3456 if (optlen <= sizeof(struct sctp_authkey))
3457 return -EINVAL;
3458
3459 authkey = memdup_user(optval, optlen);
3460 if (IS_ERR(authkey))
3461 return PTR_ERR(authkey);
3462
3463 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3464 ret = -EINVAL;
3465 goto out;
3466 }
3467
3468 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3469 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3470 ret = -EINVAL;
3471 goto out;
3472 }
3473
3474 ret = sctp_auth_set_key(ep, asoc, authkey);
3475 out:
3476 kzfree(authkey);
3477 return ret;
3478 }
3479
3480 /*
3481 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3482 *
3483 * This option will get or set the active shared key to be used to build
3484 * the association shared key.
3485 */
3486 static int sctp_setsockopt_active_key(struct sock *sk,
3487 char __user *optval,
3488 unsigned int optlen)
3489 {
3490 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3491 struct sctp_authkeyid val;
3492 struct sctp_association *asoc;
3493
3494 if (!ep->auth_enable)
3495 return -EACCES;
3496
3497 if (optlen != sizeof(struct sctp_authkeyid))
3498 return -EINVAL;
3499 if (copy_from_user(&val, optval, optlen))
3500 return -EFAULT;
3501
3502 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3503 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3504 return -EINVAL;
3505
3506 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3507 }
3508
3509 /*
3510 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3511 *
3512 * This set option will delete a shared secret key from use.
3513 */
3514 static int sctp_setsockopt_del_key(struct sock *sk,
3515 char __user *optval,
3516 unsigned int optlen)
3517 {
3518 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3519 struct sctp_authkeyid val;
3520 struct sctp_association *asoc;
3521
3522 if (!ep->auth_enable)
3523 return -EACCES;
3524
3525 if (optlen != sizeof(struct sctp_authkeyid))
3526 return -EINVAL;
3527 if (copy_from_user(&val, optval, optlen))
3528 return -EFAULT;
3529
3530 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3531 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3532 return -EINVAL;
3533
3534 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3535
3536 }
3537
3538 /*
3539 * 8.1.23 SCTP_AUTO_ASCONF
3540 *
3541 * This option will enable or disable the use of the automatic generation of
3542 * ASCONF chunks to add and delete addresses to an existing association. Note
3543 * that this option has two caveats namely: a) it only affects sockets that
3544 * are bound to all addresses available to the SCTP stack, and b) the system
3545 * administrator may have an overriding control that turns the ASCONF feature
3546 * off no matter what setting the socket option may have.
3547 * This option expects an integer boolean flag, where a non-zero value turns on
3548 * the option, and a zero value turns off the option.
3549 * Note. In this implementation, socket operation overrides default parameter
3550 * being set by sysctl as well as FreeBSD implementation
3551 */
3552 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3553 unsigned int optlen)
3554 {
3555 int val;
3556 struct sctp_sock *sp = sctp_sk(sk);
3557
3558 if (optlen < sizeof(int))
3559 return -EINVAL;
3560 if (get_user(val, (int __user *)optval))
3561 return -EFAULT;
3562 if (!sctp_is_ep_boundall(sk) && val)
3563 return -EINVAL;
3564 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3565 return 0;
3566
3567 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3568 if (val == 0 && sp->do_auto_asconf) {
3569 list_del(&sp->auto_asconf_list);
3570 sp->do_auto_asconf = 0;
3571 } else if (val && !sp->do_auto_asconf) {
3572 list_add_tail(&sp->auto_asconf_list,
3573 &sock_net(sk)->sctp.auto_asconf_splist);
3574 sp->do_auto_asconf = 1;
3575 }
3576 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3577 return 0;
3578 }
3579
3580 /*
3581 * SCTP_PEER_ADDR_THLDS
3582 *
3583 * This option allows us to alter the partially failed threshold for one or all
3584 * transports in an association. See Section 6.1 of:
3585 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3586 */
3587 static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3588 char __user *optval,
3589 unsigned int optlen)
3590 {
3591 struct sctp_paddrthlds val;
3592 struct sctp_transport *trans;
3593 struct sctp_association *asoc;
3594
3595 if (optlen < sizeof(struct sctp_paddrthlds))
3596 return -EINVAL;
3597 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3598 sizeof(struct sctp_paddrthlds)))
3599 return -EFAULT;
3600
3601
3602 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3603 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3604 if (!asoc)
3605 return -ENOENT;
3606 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
3607 transports) {
3608 if (val.spt_pathmaxrxt)
3609 trans->pathmaxrxt = val.spt_pathmaxrxt;
3610 trans->pf_retrans = val.spt_pathpfthld;
3611 }
3612
3613 if (val.spt_pathmaxrxt)
3614 asoc->pathmaxrxt = val.spt_pathmaxrxt;
3615 asoc->pf_retrans = val.spt_pathpfthld;
3616 } else {
3617 trans = sctp_addr_id2transport(sk, &val.spt_address,
3618 val.spt_assoc_id);
3619 if (!trans)
3620 return -ENOENT;
3621
3622 if (val.spt_pathmaxrxt)
3623 trans->pathmaxrxt = val.spt_pathmaxrxt;
3624 trans->pf_retrans = val.spt_pathpfthld;
3625 }
3626
3627 return 0;
3628 }
3629
3630 static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
3631 char __user *optval,
3632 unsigned int optlen)
3633 {
3634 int val;
3635
3636 if (optlen < sizeof(int))
3637 return -EINVAL;
3638 if (get_user(val, (int __user *) optval))
3639 return -EFAULT;
3640
3641 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
3642
3643 return 0;
3644 }
3645
3646 static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
3647 char __user *optval,
3648 unsigned int optlen)
3649 {
3650 int val;
3651
3652 if (optlen < sizeof(int))
3653 return -EINVAL;
3654 if (get_user(val, (int __user *) optval))
3655 return -EFAULT;
3656
3657 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
3658
3659 return 0;
3660 }
3661
3662 /* API 6.2 setsockopt(), getsockopt()
3663 *
3664 * Applications use setsockopt() and getsockopt() to set or retrieve
3665 * socket options. Socket options are used to change the default
3666 * behavior of sockets calls. They are described in Section 7.
3667 *
3668 * The syntax is:
3669 *
3670 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
3671 * int __user *optlen);
3672 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
3673 * int optlen);
3674 *
3675 * sd - the socket descript.
3676 * level - set to IPPROTO_SCTP for all SCTP options.
3677 * optname - the option name.
3678 * optval - the buffer to store the value of the option.
3679 * optlen - the size of the buffer.
3680 */
3681 static int sctp_setsockopt(struct sock *sk, int level, int optname,
3682 char __user *optval, unsigned int optlen)
3683 {
3684 int retval = 0;
3685
3686 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
3687
3688 /* I can hardly begin to describe how wrong this is. This is
3689 * so broken as to be worse than useless. The API draft
3690 * REALLY is NOT helpful here... I am not convinced that the
3691 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
3692 * are at all well-founded.
3693 */
3694 if (level != SOL_SCTP) {
3695 struct sctp_af *af = sctp_sk(sk)->pf->af;
3696 retval = af->setsockopt(sk, level, optname, optval, optlen);
3697 goto out_nounlock;
3698 }
3699
3700 lock_sock(sk);
3701
3702 switch (optname) {
3703 case SCTP_SOCKOPT_BINDX_ADD:
3704 /* 'optlen' is the size of the addresses buffer. */
3705 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3706 optlen, SCTP_BINDX_ADD_ADDR);
3707 break;
3708
3709 case SCTP_SOCKOPT_BINDX_REM:
3710 /* 'optlen' is the size of the addresses buffer. */
3711 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3712 optlen, SCTP_BINDX_REM_ADDR);
3713 break;
3714
3715 case SCTP_SOCKOPT_CONNECTX_OLD:
3716 /* 'optlen' is the size of the addresses buffer. */
3717 retval = sctp_setsockopt_connectx_old(sk,
3718 (struct sockaddr __user *)optval,
3719 optlen);
3720 break;
3721
3722 case SCTP_SOCKOPT_CONNECTX:
3723 /* 'optlen' is the size of the addresses buffer. */
3724 retval = sctp_setsockopt_connectx(sk,
3725 (struct sockaddr __user *)optval,
3726 optlen);
3727 break;
3728
3729 case SCTP_DISABLE_FRAGMENTS:
3730 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
3731 break;
3732
3733 case SCTP_EVENTS:
3734 retval = sctp_setsockopt_events(sk, optval, optlen);
3735 break;
3736
3737 case SCTP_AUTOCLOSE:
3738 retval = sctp_setsockopt_autoclose(sk, optval, optlen);
3739 break;
3740
3741 case SCTP_PEER_ADDR_PARAMS:
3742 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3743 break;
3744
3745 case SCTP_DELAYED_SACK:
3746 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3747 break;
3748 case SCTP_PARTIAL_DELIVERY_POINT:
3749 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
3750 break;
3751
3752 case SCTP_INITMSG:
3753 retval = sctp_setsockopt_initmsg(sk, optval, optlen);
3754 break;
3755 case SCTP_DEFAULT_SEND_PARAM:
3756 retval = sctp_setsockopt_default_send_param(sk, optval,
3757 optlen);
3758 break;
3759 case SCTP_DEFAULT_SNDINFO:
3760 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
3761 break;
3762 case SCTP_PRIMARY_ADDR:
3763 retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
3764 break;
3765 case SCTP_SET_PEER_PRIMARY_ADDR:
3766 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
3767 break;
3768 case SCTP_NODELAY:
3769 retval = sctp_setsockopt_nodelay(sk, optval, optlen);
3770 break;
3771 case SCTP_RTOINFO:
3772 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
3773 break;
3774 case SCTP_ASSOCINFO:
3775 retval = sctp_setsockopt_associnfo(sk, optval, optlen);
3776 break;
3777 case SCTP_I_WANT_MAPPED_V4_ADDR:
3778 retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
3779 break;
3780 case SCTP_MAXSEG:
3781 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
3782 break;
3783 case SCTP_ADAPTATION_LAYER:
3784 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
3785 break;
3786 case SCTP_CONTEXT:
3787 retval = sctp_setsockopt_context(sk, optval, optlen);
3788 break;
3789 case SCTP_FRAGMENT_INTERLEAVE:
3790 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
3791 break;
3792 case SCTP_MAX_BURST:
3793 retval = sctp_setsockopt_maxburst(sk, optval, optlen);
3794 break;
3795 case SCTP_AUTH_CHUNK:
3796 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
3797 break;
3798 case SCTP_HMAC_IDENT:
3799 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
3800 break;
3801 case SCTP_AUTH_KEY:
3802 retval = sctp_setsockopt_auth_key(sk, optval, optlen);
3803 break;
3804 case SCTP_AUTH_ACTIVE_KEY:
3805 retval = sctp_setsockopt_active_key(sk, optval, optlen);
3806 break;
3807 case SCTP_AUTH_DELETE_KEY:
3808 retval = sctp_setsockopt_del_key(sk, optval, optlen);
3809 break;
3810 case SCTP_AUTO_ASCONF:
3811 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
3812 break;
3813 case SCTP_PEER_ADDR_THLDS:
3814 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
3815 break;
3816 case SCTP_RECVRCVINFO:
3817 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
3818 break;
3819 case SCTP_RECVNXTINFO:
3820 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
3821 break;
3822 default:
3823 retval = -ENOPROTOOPT;
3824 break;
3825 }
3826
3827 release_sock(sk);
3828
3829 out_nounlock:
3830 return retval;
3831 }
3832
3833 /* API 3.1.6 connect() - UDP Style Syntax
3834 *
3835 * An application may use the connect() call in the UDP model to initiate an
3836 * association without sending data.
3837 *
3838 * The syntax is:
3839 *
3840 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
3841 *
3842 * sd: the socket descriptor to have a new association added to.
3843 *
3844 * nam: the address structure (either struct sockaddr_in or struct
3845 * sockaddr_in6 defined in RFC2553 [7]).
3846 *
3847 * len: the size of the address.
3848 */
3849 static int sctp_connect(struct sock *sk, struct sockaddr *addr,
3850 int addr_len)
3851 {
3852 int err = 0;
3853 struct sctp_af *af;
3854
3855 lock_sock(sk);
3856
3857 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
3858 addr, addr_len);
3859
3860 /* Validate addr_len before calling common connect/connectx routine. */
3861 af = sctp_get_af_specific(addr->sa_family);
3862 if (!af || addr_len < af->sockaddr_len) {
3863 err = -EINVAL;
3864 } else {
3865 /* Pass correct addr len to common routine (so it knows there
3866 * is only one address being passed.
3867 */
3868 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
3869 }
3870
3871 release_sock(sk);
3872 return err;
3873 }
3874
3875 /* FIXME: Write comments. */
3876 static int sctp_disconnect(struct sock *sk, int flags)
3877 {
3878 return -EOPNOTSUPP; /* STUB */
3879 }
3880
3881 /* 4.1.4 accept() - TCP Style Syntax
3882 *
3883 * Applications use accept() call to remove an established SCTP
3884 * association from the accept queue of the endpoint. A new socket
3885 * descriptor will be returned from accept() to represent the newly
3886 * formed association.
3887 */
3888 static struct sock *sctp_accept(struct sock *sk, int flags, int *err)
3889 {
3890 struct sctp_sock *sp;
3891 struct sctp_endpoint *ep;
3892 struct sock *newsk = NULL;
3893 struct sctp_association *asoc;
3894 long timeo;
3895 int error = 0;
3896
3897 lock_sock(sk);
3898
3899 sp = sctp_sk(sk);
3900 ep = sp->ep;
3901
3902 if (!sctp_style(sk, TCP)) {
3903 error = -EOPNOTSUPP;
3904 goto out;
3905 }
3906
3907 if (!sctp_sstate(sk, LISTENING)) {
3908 error = -EINVAL;
3909 goto out;
3910 }
3911
3912 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
3913
3914 error = sctp_wait_for_accept(sk, timeo);
3915 if (error)
3916 goto out;
3917
3918 /* We treat the list of associations on the endpoint as the accept
3919 * queue and pick the first association on the list.
3920 */
3921 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
3922
3923 newsk = sp->pf->create_accept_sk(sk, asoc);
3924 if (!newsk) {
3925 error = -ENOMEM;
3926 goto out;
3927 }
3928
3929 /* Populate the fields of the newsk from the oldsk and migrate the
3930 * asoc to the newsk.
3931 */
3932 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
3933
3934 out:
3935 release_sock(sk);
3936 *err = error;
3937 return newsk;
3938 }
3939
3940 /* The SCTP ioctl handler. */
3941 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
3942 {
3943 int rc = -ENOTCONN;
3944
3945 lock_sock(sk);
3946
3947 /*
3948 * SEQPACKET-style sockets in LISTENING state are valid, for
3949 * SCTP, so only discard TCP-style sockets in LISTENING state.
3950 */
3951 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
3952 goto out;
3953
3954 switch (cmd) {
3955 case SIOCINQ: {
3956 struct sk_buff *skb;
3957 unsigned int amount = 0;
3958
3959 skb = skb_peek(&sk->sk_receive_queue);
3960 if (skb != NULL) {
3961 /*
3962 * We will only return the amount of this packet since
3963 * that is all that will be read.
3964 */
3965 amount = skb->len;
3966 }
3967 rc = put_user(amount, (int __user *)arg);
3968 break;
3969 }
3970 default:
3971 rc = -ENOIOCTLCMD;
3972 break;
3973 }
3974 out:
3975 release_sock(sk);
3976 return rc;
3977 }
3978
3979 /* This is the function which gets called during socket creation to
3980 * initialized the SCTP-specific portion of the sock.
3981 * The sock structure should already be zero-filled memory.
3982 */
3983 static int sctp_init_sock(struct sock *sk)
3984 {
3985 struct net *net = sock_net(sk);
3986 struct sctp_sock *sp;
3987
3988 pr_debug("%s: sk:%p\n", __func__, sk);
3989
3990 sp = sctp_sk(sk);
3991
3992 /* Initialize the SCTP per socket area. */
3993 switch (sk->sk_type) {
3994 case SOCK_SEQPACKET:
3995 sp->type = SCTP_SOCKET_UDP;
3996 break;
3997 case SOCK_STREAM:
3998 sp->type = SCTP_SOCKET_TCP;
3999 break;
4000 default:
4001 return -ESOCKTNOSUPPORT;
4002 }
4003
4004 /* Initialize default send parameters. These parameters can be
4005 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4006 */
4007 sp->default_stream = 0;
4008 sp->default_ppid = 0;
4009 sp->default_flags = 0;
4010 sp->default_context = 0;
4011 sp->default_timetolive = 0;
4012
4013 sp->default_rcv_context = 0;
4014 sp->max_burst = net->sctp.max_burst;
4015
4016 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
4017
4018 /* Initialize default setup parameters. These parameters
4019 * can be modified with the SCTP_INITMSG socket option or
4020 * overridden by the SCTP_INIT CMSG.
4021 */
4022 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
4023 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
4024 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
4025 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
4026
4027 /* Initialize default RTO related parameters. These parameters can
4028 * be modified for with the SCTP_RTOINFO socket option.
4029 */
4030 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
4031 sp->rtoinfo.srto_max = net->sctp.rto_max;
4032 sp->rtoinfo.srto_min = net->sctp.rto_min;
4033
4034 /* Initialize default association related parameters. These parameters
4035 * can be modified with the SCTP_ASSOCINFO socket option.
4036 */
4037 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
4038 sp->assocparams.sasoc_number_peer_destinations = 0;
4039 sp->assocparams.sasoc_peer_rwnd = 0;
4040 sp->assocparams.sasoc_local_rwnd = 0;
4041 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
4042
4043 /* Initialize default event subscriptions. By default, all the
4044 * options are off.
4045 */
4046 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
4047
4048 /* Default Peer Address Parameters. These defaults can
4049 * be modified via SCTP_PEER_ADDR_PARAMS
4050 */
4051 sp->hbinterval = net->sctp.hb_interval;
4052 sp->pathmaxrxt = net->sctp.max_retrans_path;
4053 sp->pathmtu = 0; /* allow default discovery */
4054 sp->sackdelay = net->sctp.sack_timeout;
4055 sp->sackfreq = 2;
4056 sp->param_flags = SPP_HB_ENABLE |
4057 SPP_PMTUD_ENABLE |
4058 SPP_SACKDELAY_ENABLE;
4059
4060 /* If enabled no SCTP message fragmentation will be performed.
4061 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
4062 */
4063 sp->disable_fragments = 0;
4064
4065 /* Enable Nagle algorithm by default. */
4066 sp->nodelay = 0;
4067
4068 sp->recvrcvinfo = 0;
4069 sp->recvnxtinfo = 0;
4070
4071 /* Enable by default. */
4072 sp->v4mapped = 1;
4073
4074 /* Auto-close idle associations after the configured
4075 * number of seconds. A value of 0 disables this
4076 * feature. Configure through the SCTP_AUTOCLOSE socket option,
4077 * for UDP-style sockets only.
4078 */
4079 sp->autoclose = 0;
4080
4081 /* User specified fragmentation limit. */
4082 sp->user_frag = 0;
4083
4084 sp->adaptation_ind = 0;
4085
4086 sp->pf = sctp_get_pf_specific(sk->sk_family);
4087
4088 /* Control variables for partial data delivery. */
4089 atomic_set(&sp->pd_mode, 0);
4090 skb_queue_head_init(&sp->pd_lobby);
4091 sp->frag_interleave = 0;
4092
4093 /* Create a per socket endpoint structure. Even if we
4094 * change the data structure relationships, this may still
4095 * be useful for storing pre-connect address information.
4096 */
4097 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
4098 if (!sp->ep)
4099 return -ENOMEM;
4100
4101 sp->hmac = NULL;
4102
4103 sk->sk_destruct = sctp_destruct_sock;
4104
4105 SCTP_DBG_OBJCNT_INC(sock);
4106
4107 local_bh_disable();
4108 percpu_counter_inc(&sctp_sockets_allocated);
4109 sock_prot_inuse_add(net, sk->sk_prot, 1);
4110
4111 /* Nothing can fail after this block, otherwise
4112 * sctp_destroy_sock() will be called without addr_wq_lock held
4113 */
4114 if (net->sctp.default_auto_asconf) {
4115 spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
4116 list_add_tail(&sp->auto_asconf_list,
4117 &net->sctp.auto_asconf_splist);
4118 sp->do_auto_asconf = 1;
4119 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
4120 } else {
4121 sp->do_auto_asconf = 0;
4122 }
4123
4124 local_bh_enable();
4125
4126 return 0;
4127 }
4128
4129 /* Cleanup any SCTP per socket resources. Must be called with
4130 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
4131 */
4132 static void sctp_destroy_sock(struct sock *sk)
4133 {
4134 struct sctp_sock *sp;
4135
4136 pr_debug("%s: sk:%p\n", __func__, sk);
4137
4138 /* Release our hold on the endpoint. */
4139 sp = sctp_sk(sk);
4140 /* This could happen during socket init, thus we bail out
4141 * early, since the rest of the below is not setup either.
4142 */
4143 if (sp->ep == NULL)
4144 return;
4145
4146 if (sp->do_auto_asconf) {
4147 sp->do_auto_asconf = 0;
4148 list_del(&sp->auto_asconf_list);
4149 }
4150 sctp_endpoint_free(sp->ep);
4151 local_bh_disable();
4152 percpu_counter_dec(&sctp_sockets_allocated);
4153 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4154 local_bh_enable();
4155 }
4156
4157 /* Triggered when there are no references on the socket anymore */
4158 static void sctp_destruct_sock(struct sock *sk)
4159 {
4160 struct sctp_sock *sp = sctp_sk(sk);
4161
4162 /* Free up the HMAC transform. */
4163 crypto_free_hash(sp->hmac);
4164
4165 inet_sock_destruct(sk);
4166 }
4167
4168 /* API 4.1.7 shutdown() - TCP Style Syntax
4169 * int shutdown(int socket, int how);
4170 *
4171 * sd - the socket descriptor of the association to be closed.
4172 * how - Specifies the type of shutdown. The values are
4173 * as follows:
4174 * SHUT_RD
4175 * Disables further receive operations. No SCTP
4176 * protocol action is taken.
4177 * SHUT_WR
4178 * Disables further send operations, and initiates
4179 * the SCTP shutdown sequence.
4180 * SHUT_RDWR
4181 * Disables further send and receive operations
4182 * and initiates the SCTP shutdown sequence.
4183 */
4184 static void sctp_shutdown(struct sock *sk, int how)
4185 {
4186 struct net *net = sock_net(sk);
4187 struct sctp_endpoint *ep;
4188 struct sctp_association *asoc;
4189
4190 if (!sctp_style(sk, TCP))
4191 return;
4192
4193 if (how & SEND_SHUTDOWN) {
4194 ep = sctp_sk(sk)->ep;
4195 if (!list_empty(&ep->asocs)) {
4196 asoc = list_entry(ep->asocs.next,
4197 struct sctp_association, asocs);
4198 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4199 }
4200 }
4201 }
4202
4203 /* 7.2.1 Association Status (SCTP_STATUS)
4204
4205 * Applications can retrieve current status information about an
4206 * association, including association state, peer receiver window size,
4207 * number of unacked data chunks, and number of data chunks pending
4208 * receipt. This information is read-only.
4209 */
4210 static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4211 char __user *optval,
4212 int __user *optlen)
4213 {
4214 struct sctp_status status;
4215 struct sctp_association *asoc = NULL;
4216 struct sctp_transport *transport;
4217 sctp_assoc_t associd;
4218 int retval = 0;
4219
4220 if (len < sizeof(status)) {
4221 retval = -EINVAL;
4222 goto out;
4223 }
4224
4225 len = sizeof(status);
4226 if (copy_from_user(&status, optval, len)) {
4227 retval = -EFAULT;
4228 goto out;
4229 }
4230
4231 associd = status.sstat_assoc_id;
4232 asoc = sctp_id2assoc(sk, associd);
4233 if (!asoc) {
4234 retval = -EINVAL;
4235 goto out;
4236 }
4237
4238 transport = asoc->peer.primary_path;
4239
4240 status.sstat_assoc_id = sctp_assoc2id(asoc);
4241 status.sstat_state = sctp_assoc_to_state(asoc);
4242 status.sstat_rwnd = asoc->peer.rwnd;
4243 status.sstat_unackdata = asoc->unack_data;
4244
4245 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4246 status.sstat_instrms = asoc->c.sinit_max_instreams;
4247 status.sstat_outstrms = asoc->c.sinit_num_ostreams;
4248 status.sstat_fragmentation_point = asoc->frag_point;
4249 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4250 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
4251 transport->af_specific->sockaddr_len);
4252 /* Map ipv4 address into v4-mapped-on-v6 address. */
4253 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
4254 (union sctp_addr *)&status.sstat_primary.spinfo_address);
4255 status.sstat_primary.spinfo_state = transport->state;
4256 status.sstat_primary.spinfo_cwnd = transport->cwnd;
4257 status.sstat_primary.spinfo_srtt = transport->srtt;
4258 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
4259 status.sstat_primary.spinfo_mtu = transport->pathmtu;
4260
4261 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
4262 status.sstat_primary.spinfo_state = SCTP_ACTIVE;
4263
4264 if (put_user(len, optlen)) {
4265 retval = -EFAULT;
4266 goto out;
4267 }
4268
4269 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
4270 __func__, len, status.sstat_state, status.sstat_rwnd,
4271 status.sstat_assoc_id);
4272
4273 if (copy_to_user(optval, &status, len)) {
4274 retval = -EFAULT;
4275 goto out;
4276 }
4277
4278 out:
4279 return retval;
4280 }
4281
4282
4283 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
4284 *
4285 * Applications can retrieve information about a specific peer address
4286 * of an association, including its reachability state, congestion
4287 * window, and retransmission timer values. This information is
4288 * read-only.
4289 */
4290 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
4291 char __user *optval,
4292 int __user *optlen)
4293 {
4294 struct sctp_paddrinfo pinfo;
4295 struct sctp_transport *transport;
4296 int retval = 0;
4297
4298 if (len < sizeof(pinfo)) {
4299 retval = -EINVAL;
4300 goto out;
4301 }
4302
4303 len = sizeof(pinfo);
4304 if (copy_from_user(&pinfo, optval, len)) {
4305 retval = -EFAULT;
4306 goto out;
4307 }
4308
4309 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
4310 pinfo.spinfo_assoc_id);
4311 if (!transport)
4312 return -EINVAL;
4313
4314 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4315 pinfo.spinfo_state = transport->state;
4316 pinfo.spinfo_cwnd = transport->cwnd;
4317 pinfo.spinfo_srtt = transport->srtt;
4318 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
4319 pinfo.spinfo_mtu = transport->pathmtu;
4320
4321 if (pinfo.spinfo_state == SCTP_UNKNOWN)
4322 pinfo.spinfo_state = SCTP_ACTIVE;
4323
4324 if (put_user(len, optlen)) {
4325 retval = -EFAULT;
4326 goto out;
4327 }
4328
4329 if (copy_to_user(optval, &pinfo, len)) {
4330 retval = -EFAULT;
4331 goto out;
4332 }
4333
4334 out:
4335 return retval;
4336 }
4337
4338 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
4339 *
4340 * This option is a on/off flag. If enabled no SCTP message
4341 * fragmentation will be performed. Instead if a message being sent
4342 * exceeds the current PMTU size, the message will NOT be sent and
4343 * instead a error will be indicated to the user.
4344 */
4345 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4346 char __user *optval, int __user *optlen)
4347 {
4348 int val;
4349
4350 if (len < sizeof(int))
4351 return -EINVAL;
4352
4353 len = sizeof(int);
4354 val = (sctp_sk(sk)->disable_fragments == 1);
4355 if (put_user(len, optlen))
4356 return -EFAULT;
4357 if (copy_to_user(optval, &val, len))
4358 return -EFAULT;
4359 return 0;
4360 }
4361
4362 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
4363 *
4364 * This socket option is used to specify various notifications and
4365 * ancillary data the user wishes to receive.
4366 */
4367 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4368 int __user *optlen)
4369 {
4370 if (len <= 0)
4371 return -EINVAL;
4372 if (len > sizeof(struct sctp_event_subscribe))
4373 len = sizeof(struct sctp_event_subscribe);
4374 if (put_user(len, optlen))
4375 return -EFAULT;
4376 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
4377 return -EFAULT;
4378 return 0;
4379 }
4380
4381 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
4382 *
4383 * This socket option is applicable to the UDP-style socket only. When
4384 * set it will cause associations that are idle for more than the
4385 * specified number of seconds to automatically close. An association
4386 * being idle is defined an association that has NOT sent or received
4387 * user data. The special value of '0' indicates that no automatic
4388 * close of any associations should be performed. The option expects an
4389 * integer defining the number of seconds of idle time before an
4390 * association is closed.
4391 */
4392 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
4393 {
4394 /* Applicable to UDP-style socket only */
4395 if (sctp_style(sk, TCP))
4396 return -EOPNOTSUPP;
4397 if (len < sizeof(int))
4398 return -EINVAL;
4399 len = sizeof(int);
4400 if (put_user(len, optlen))
4401 return -EFAULT;
4402 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
4403 return -EFAULT;
4404 return 0;
4405 }
4406
4407 /* Helper routine to branch off an association to a new socket. */
4408 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4409 {
4410 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4411 struct sctp_sock *sp = sctp_sk(sk);
4412 struct socket *sock;
4413 int err = 0;
4414
4415 if (!asoc)
4416 return -EINVAL;
4417
4418 /* An association cannot be branched off from an already peeled-off
4419 * socket, nor is this supported for tcp style sockets.
4420 */
4421 if (!sctp_style(sk, UDP))
4422 return -EINVAL;
4423
4424 /* Create a new socket. */
4425 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
4426 if (err < 0)
4427 return err;
4428
4429 sctp_copy_sock(sock->sk, sk, asoc);
4430
4431 /* Make peeled-off sockets more like 1-1 accepted sockets.
4432 * Set the daddr and initialize id to something more random
4433 */
4434 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
4435
4436 /* Populate the fields of the newsk from the oldsk and migrate the
4437 * asoc to the newsk.
4438 */
4439 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
4440
4441 *sockp = sock;
4442
4443 return err;
4444 }
4445 EXPORT_SYMBOL(sctp_do_peeloff);
4446
4447 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
4448 {
4449 sctp_peeloff_arg_t peeloff;
4450 struct socket *newsock;
4451 struct file *newfile;
4452 int retval = 0;
4453
4454 if (len < sizeof(sctp_peeloff_arg_t))
4455 return -EINVAL;
4456 len = sizeof(sctp_peeloff_arg_t);
4457 if (copy_from_user(&peeloff, optval, len))
4458 return -EFAULT;
4459
4460 retval = sctp_do_peeloff(sk, peeloff.associd, &newsock);
4461 if (retval < 0)
4462 goto out;
4463
4464 /* Map the socket to an unused fd that can be returned to the user. */
4465 retval = get_unused_fd_flags(0);
4466 if (retval < 0) {
4467 sock_release(newsock);
4468 goto out;
4469 }
4470
4471 newfile = sock_alloc_file(newsock, 0, NULL);
4472 if (IS_ERR(newfile)) {
4473 put_unused_fd(retval);
4474 sock_release(newsock);
4475 return PTR_ERR(newfile);
4476 }
4477
4478 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
4479 retval);
4480
4481 /* Return the fd mapped to the new socket. */
4482 if (put_user(len, optlen)) {
4483 fput(newfile);
4484 put_unused_fd(retval);
4485 return -EFAULT;
4486 }
4487 peeloff.sd = retval;
4488 if (copy_to_user(optval, &peeloff, len)) {
4489 fput(newfile);
4490 put_unused_fd(retval);
4491 return -EFAULT;
4492 }
4493 fd_install(retval, newfile);
4494 out:
4495 return retval;
4496 }
4497
4498 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
4499 *
4500 * Applications can enable or disable heartbeats for any peer address of
4501 * an association, modify an address's heartbeat interval, force a
4502 * heartbeat to be sent immediately, and adjust the address's maximum
4503 * number of retransmissions sent before an address is considered
4504 * unreachable. The following structure is used to access and modify an
4505 * address's parameters:
4506 *
4507 * struct sctp_paddrparams {
4508 * sctp_assoc_t spp_assoc_id;
4509 * struct sockaddr_storage spp_address;
4510 * uint32_t spp_hbinterval;
4511 * uint16_t spp_pathmaxrxt;
4512 * uint32_t spp_pathmtu;
4513 * uint32_t spp_sackdelay;
4514 * uint32_t spp_flags;
4515 * };
4516 *
4517 * spp_assoc_id - (one-to-many style socket) This is filled in the
4518 * application, and identifies the association for
4519 * this query.
4520 * spp_address - This specifies which address is of interest.
4521 * spp_hbinterval - This contains the value of the heartbeat interval,
4522 * in milliseconds. If a value of zero
4523 * is present in this field then no changes are to
4524 * be made to this parameter.
4525 * spp_pathmaxrxt - This contains the maximum number of
4526 * retransmissions before this address shall be
4527 * considered unreachable. If a value of zero
4528 * is present in this field then no changes are to
4529 * be made to this parameter.
4530 * spp_pathmtu - When Path MTU discovery is disabled the value
4531 * specified here will be the "fixed" path mtu.
4532 * Note that if the spp_address field is empty
4533 * then all associations on this address will
4534 * have this fixed path mtu set upon them.
4535 *
4536 * spp_sackdelay - When delayed sack is enabled, this value specifies
4537 * the number of milliseconds that sacks will be delayed
4538 * for. This value will apply to all addresses of an
4539 * association if the spp_address field is empty. Note
4540 * also, that if delayed sack is enabled and this
4541 * value is set to 0, no change is made to the last
4542 * recorded delayed sack timer value.
4543 *
4544 * spp_flags - These flags are used to control various features
4545 * on an association. The flag field may contain
4546 * zero or more of the following options.
4547 *
4548 * SPP_HB_ENABLE - Enable heartbeats on the
4549 * specified address. Note that if the address
4550 * field is empty all addresses for the association
4551 * have heartbeats enabled upon them.
4552 *
4553 * SPP_HB_DISABLE - Disable heartbeats on the
4554 * speicifed address. Note that if the address
4555 * field is empty all addresses for the association
4556 * will have their heartbeats disabled. Note also
4557 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
4558 * mutually exclusive, only one of these two should
4559 * be specified. Enabling both fields will have
4560 * undetermined results.
4561 *
4562 * SPP_HB_DEMAND - Request a user initiated heartbeat
4563 * to be made immediately.
4564 *
4565 * SPP_PMTUD_ENABLE - This field will enable PMTU
4566 * discovery upon the specified address. Note that
4567 * if the address feild is empty then all addresses
4568 * on the association are effected.
4569 *
4570 * SPP_PMTUD_DISABLE - This field will disable PMTU
4571 * discovery upon the specified address. Note that
4572 * if the address feild is empty then all addresses
4573 * on the association are effected. Not also that
4574 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
4575 * exclusive. Enabling both will have undetermined
4576 * results.
4577 *
4578 * SPP_SACKDELAY_ENABLE - Setting this flag turns
4579 * on delayed sack. The time specified in spp_sackdelay
4580 * is used to specify the sack delay for this address. Note
4581 * that if spp_address is empty then all addresses will
4582 * enable delayed sack and take on the sack delay
4583 * value specified in spp_sackdelay.
4584 * SPP_SACKDELAY_DISABLE - Setting this flag turns
4585 * off delayed sack. If the spp_address field is blank then
4586 * delayed sack is disabled for the entire association. Note
4587 * also that this field is mutually exclusive to
4588 * SPP_SACKDELAY_ENABLE, setting both will have undefined
4589 * results.
4590 */
4591 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
4592 char __user *optval, int __user *optlen)
4593 {
4594 struct sctp_paddrparams params;
4595 struct sctp_transport *trans = NULL;
4596 struct sctp_association *asoc = NULL;
4597 struct sctp_sock *sp = sctp_sk(sk);
4598
4599 if (len < sizeof(struct sctp_paddrparams))
4600 return -EINVAL;
4601 len = sizeof(struct sctp_paddrparams);
4602 if (copy_from_user(&params, optval, len))
4603 return -EFAULT;
4604
4605 /* If an address other than INADDR_ANY is specified, and
4606 * no transport is found, then the request is invalid.
4607 */
4608 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
4609 trans = sctp_addr_id2transport(sk, &params.spp_address,
4610 params.spp_assoc_id);
4611 if (!trans) {
4612 pr_debug("%s: failed no transport\n", __func__);
4613 return -EINVAL;
4614 }
4615 }
4616
4617 /* Get association, if assoc_id != 0 and the socket is a one
4618 * to many style socket, and an association was not found, then
4619 * the id was invalid.
4620 */
4621 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
4622 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
4623 pr_debug("%s: failed no association\n", __func__);
4624 return -EINVAL;
4625 }
4626
4627 if (trans) {
4628 /* Fetch transport values. */
4629 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
4630 params.spp_pathmtu = trans->pathmtu;
4631 params.spp_pathmaxrxt = trans->pathmaxrxt;
4632 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
4633
4634 /*draft-11 doesn't say what to return in spp_flags*/
4635 params.spp_flags = trans->param_flags;
4636 } else if (asoc) {
4637 /* Fetch association values. */
4638 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
4639 params.spp_pathmtu = asoc->pathmtu;
4640 params.spp_pathmaxrxt = asoc->pathmaxrxt;
4641 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
4642
4643 /*draft-11 doesn't say what to return in spp_flags*/
4644 params.spp_flags = asoc->param_flags;
4645 } else {
4646 /* Fetch socket values. */
4647 params.spp_hbinterval = sp->hbinterval;
4648 params.spp_pathmtu = sp->pathmtu;
4649 params.spp_sackdelay = sp->sackdelay;
4650 params.spp_pathmaxrxt = sp->pathmaxrxt;
4651
4652 /*draft-11 doesn't say what to return in spp_flags*/
4653 params.spp_flags = sp->param_flags;
4654 }
4655
4656 if (copy_to_user(optval, &params, len))
4657 return -EFAULT;
4658
4659 if (put_user(len, optlen))
4660 return -EFAULT;
4661
4662 return 0;
4663 }
4664
4665 /*
4666 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
4667 *
4668 * This option will effect the way delayed acks are performed. This
4669 * option allows you to get or set the delayed ack time, in
4670 * milliseconds. It also allows changing the delayed ack frequency.
4671 * Changing the frequency to 1 disables the delayed sack algorithm. If
4672 * the assoc_id is 0, then this sets or gets the endpoints default
4673 * values. If the assoc_id field is non-zero, then the set or get
4674 * effects the specified association for the one to many model (the
4675 * assoc_id field is ignored by the one to one model). Note that if
4676 * sack_delay or sack_freq are 0 when setting this option, then the
4677 * current values will remain unchanged.
4678 *
4679 * struct sctp_sack_info {
4680 * sctp_assoc_t sack_assoc_id;
4681 * uint32_t sack_delay;
4682 * uint32_t sack_freq;
4683 * };
4684 *
4685 * sack_assoc_id - This parameter, indicates which association the user
4686 * is performing an action upon. Note that if this field's value is
4687 * zero then the endpoints default value is changed (effecting future
4688 * associations only).
4689 *
4690 * sack_delay - This parameter contains the number of milliseconds that
4691 * the user is requesting the delayed ACK timer be set to. Note that
4692 * this value is defined in the standard to be between 200 and 500
4693 * milliseconds.
4694 *
4695 * sack_freq - This parameter contains the number of packets that must
4696 * be received before a sack is sent without waiting for the delay
4697 * timer to expire. The default value for this is 2, setting this
4698 * value to 1 will disable the delayed sack algorithm.
4699 */
4700 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
4701 char __user *optval,
4702 int __user *optlen)
4703 {
4704 struct sctp_sack_info params;
4705 struct sctp_association *asoc = NULL;
4706 struct sctp_sock *sp = sctp_sk(sk);
4707
4708 if (len >= sizeof(struct sctp_sack_info)) {
4709 len = sizeof(struct sctp_sack_info);
4710
4711 if (copy_from_user(&params, optval, len))
4712 return -EFAULT;
4713 } else if (len == sizeof(struct sctp_assoc_value)) {
4714 pr_warn_ratelimited(DEPRECATED
4715 "%s (pid %d) "
4716 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
4717 "Use struct sctp_sack_info instead\n",
4718 current->comm, task_pid_nr(current));
4719 if (copy_from_user(&params, optval, len))
4720 return -EFAULT;
4721 } else
4722 return -EINVAL;
4723
4724 /* Get association, if sack_assoc_id != 0 and the socket is a one
4725 * to many style socket, and an association was not found, then
4726 * the id was invalid.
4727 */
4728 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
4729 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
4730 return -EINVAL;
4731
4732 if (asoc) {
4733 /* Fetch association values. */
4734 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
4735 params.sack_delay = jiffies_to_msecs(
4736 asoc->sackdelay);
4737 params.sack_freq = asoc->sackfreq;
4738
4739 } else {
4740 params.sack_delay = 0;
4741 params.sack_freq = 1;
4742 }
4743 } else {
4744 /* Fetch socket values. */
4745 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
4746 params.sack_delay = sp->sackdelay;
4747 params.sack_freq = sp->sackfreq;
4748 } else {
4749 params.sack_delay = 0;
4750 params.sack_freq = 1;
4751 }
4752 }
4753
4754 if (copy_to_user(optval, &params, len))
4755 return -EFAULT;
4756
4757 if (put_user(len, optlen))
4758 return -EFAULT;
4759
4760 return 0;
4761 }
4762
4763 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
4764 *
4765 * Applications can specify protocol parameters for the default association
4766 * initialization. The option name argument to setsockopt() and getsockopt()
4767 * is SCTP_INITMSG.
4768 *
4769 * Setting initialization parameters is effective only on an unconnected
4770 * socket (for UDP-style sockets only future associations are effected
4771 * by the change). With TCP-style sockets, this option is inherited by
4772 * sockets derived from a listener socket.
4773 */
4774 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
4775 {
4776 if (len < sizeof(struct sctp_initmsg))
4777 return -EINVAL;
4778 len = sizeof(struct sctp_initmsg);
4779 if (put_user(len, optlen))
4780 return -EFAULT;
4781 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
4782 return -EFAULT;
4783 return 0;
4784 }
4785
4786
4787 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
4788 char __user *optval, int __user *optlen)
4789 {
4790 struct sctp_association *asoc;
4791 int cnt = 0;
4792 struct sctp_getaddrs getaddrs;
4793 struct sctp_transport *from;
4794 void __user *to;
4795 union sctp_addr temp;
4796 struct sctp_sock *sp = sctp_sk(sk);
4797 int addrlen;
4798 size_t space_left;
4799 int bytes_copied;
4800
4801 if (len < sizeof(struct sctp_getaddrs))
4802 return -EINVAL;
4803
4804 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
4805 return -EFAULT;
4806
4807 /* For UDP-style sockets, id specifies the association to query. */
4808 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4809 if (!asoc)
4810 return -EINVAL;
4811
4812 to = optval + offsetof(struct sctp_getaddrs, addrs);
4813 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4814
4815 list_for_each_entry(from, &asoc->peer.transport_addr_list,
4816 transports) {
4817 memcpy(&temp, &from->ipaddr, sizeof(temp));
4818 addrlen = sctp_get_pf_specific(sk->sk_family)
4819 ->addr_to_user(sp, &temp);
4820 if (space_left < addrlen)
4821 return -ENOMEM;
4822 if (copy_to_user(to, &temp, addrlen))
4823 return -EFAULT;
4824 to += addrlen;
4825 cnt++;
4826 space_left -= addrlen;
4827 }
4828
4829 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
4830 return -EFAULT;
4831 bytes_copied = ((char __user *)to) - optval;
4832 if (put_user(bytes_copied, optlen))
4833 return -EFAULT;
4834
4835 return 0;
4836 }
4837
4838 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
4839 size_t space_left, int *bytes_copied)
4840 {
4841 struct sctp_sockaddr_entry *addr;
4842 union sctp_addr temp;
4843 int cnt = 0;
4844 int addrlen;
4845 struct net *net = sock_net(sk);
4846
4847 rcu_read_lock();
4848 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
4849 if (!addr->valid)
4850 continue;
4851
4852 if ((PF_INET == sk->sk_family) &&
4853 (AF_INET6 == addr->a.sa.sa_family))
4854 continue;
4855 if ((PF_INET6 == sk->sk_family) &&
4856 inet_v6_ipv6only(sk) &&
4857 (AF_INET == addr->a.sa.sa_family))
4858 continue;
4859 memcpy(&temp, &addr->a, sizeof(temp));
4860 if (!temp.v4.sin_port)
4861 temp.v4.sin_port = htons(port);
4862
4863 addrlen = sctp_get_pf_specific(sk->sk_family)
4864 ->addr_to_user(sctp_sk(sk), &temp);
4865
4866 if (space_left < addrlen) {
4867 cnt = -ENOMEM;
4868 break;
4869 }
4870 memcpy(to, &temp, addrlen);
4871
4872 to += addrlen;
4873 cnt++;
4874 space_left -= addrlen;
4875 *bytes_copied += addrlen;
4876 }
4877 rcu_read_unlock();
4878
4879 return cnt;
4880 }
4881
4882
4883 static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
4884 char __user *optval, int __user *optlen)
4885 {
4886 struct sctp_bind_addr *bp;
4887 struct sctp_association *asoc;
4888 int cnt = 0;
4889 struct sctp_getaddrs getaddrs;
4890 struct sctp_sockaddr_entry *addr;
4891 void __user *to;
4892 union sctp_addr temp;
4893 struct sctp_sock *sp = sctp_sk(sk);
4894 int addrlen;
4895 int err = 0;
4896 size_t space_left;
4897 int bytes_copied = 0;
4898 void *addrs;
4899 void *buf;
4900
4901 if (len < sizeof(struct sctp_getaddrs))
4902 return -EINVAL;
4903
4904 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
4905 return -EFAULT;
4906
4907 /*
4908 * For UDP-style sockets, id specifies the association to query.
4909 * If the id field is set to the value '0' then the locally bound
4910 * addresses are returned without regard to any particular
4911 * association.
4912 */
4913 if (0 == getaddrs.assoc_id) {
4914 bp = &sctp_sk(sk)->ep->base.bind_addr;
4915 } else {
4916 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
4917 if (!asoc)
4918 return -EINVAL;
4919 bp = &asoc->base.bind_addr;
4920 }
4921
4922 to = optval + offsetof(struct sctp_getaddrs, addrs);
4923 space_left = len - offsetof(struct sctp_getaddrs, addrs);
4924
4925 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
4926 if (!addrs)
4927 return -ENOMEM;
4928
4929 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
4930 * addresses from the global local address list.
4931 */
4932 if (sctp_list_single_entry(&bp->address_list)) {
4933 addr = list_entry(bp->address_list.next,
4934 struct sctp_sockaddr_entry, list);
4935 if (sctp_is_any(sk, &addr->a)) {
4936 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
4937 space_left, &bytes_copied);
4938 if (cnt < 0) {
4939 err = cnt;
4940 goto out;
4941 }
4942 goto copy_getaddrs;
4943 }
4944 }
4945
4946 buf = addrs;
4947 /* Protection on the bound address list is not needed since
4948 * in the socket option context we hold a socket lock and
4949 * thus the bound address list can't change.
4950 */
4951 list_for_each_entry(addr, &bp->address_list, list) {
4952 memcpy(&temp, &addr->a, sizeof(temp));
4953 addrlen = sctp_get_pf_specific(sk->sk_family)
4954 ->addr_to_user(sp, &temp);
4955 if (space_left < addrlen) {
4956 err = -ENOMEM; /*fixme: right error?*/
4957 goto out;
4958 }
4959 memcpy(buf, &temp, addrlen);
4960 buf += addrlen;
4961 bytes_copied += addrlen;
4962 cnt++;
4963 space_left -= addrlen;
4964 }
4965
4966 copy_getaddrs:
4967 if (copy_to_user(to, addrs, bytes_copied)) {
4968 err = -EFAULT;
4969 goto out;
4970 }
4971 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
4972 err = -EFAULT;
4973 goto out;
4974 }
4975 if (put_user(bytes_copied, optlen))
4976 err = -EFAULT;
4977 out:
4978 kfree(addrs);
4979 return err;
4980 }
4981
4982 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
4983 *
4984 * Requests that the local SCTP stack use the enclosed peer address as
4985 * the association primary. The enclosed address must be one of the
4986 * association peer's addresses.
4987 */
4988 static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
4989 char __user *optval, int __user *optlen)
4990 {
4991 struct sctp_prim prim;
4992 struct sctp_association *asoc;
4993 struct sctp_sock *sp = sctp_sk(sk);
4994
4995 if (len < sizeof(struct sctp_prim))
4996 return -EINVAL;
4997
4998 len = sizeof(struct sctp_prim);
4999
5000 if (copy_from_user(&prim, optval, len))
5001 return -EFAULT;
5002
5003 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
5004 if (!asoc)
5005 return -EINVAL;
5006
5007 if (!asoc->peer.primary_path)
5008 return -ENOTCONN;
5009
5010 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
5011 asoc->peer.primary_path->af_specific->sockaddr_len);
5012
5013 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
5014 (union sctp_addr *)&prim.ssp_addr);
5015
5016 if (put_user(len, optlen))
5017 return -EFAULT;
5018 if (copy_to_user(optval, &prim, len))
5019 return -EFAULT;
5020
5021 return 0;
5022 }
5023
5024 /*
5025 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
5026 *
5027 * Requests that the local endpoint set the specified Adaptation Layer
5028 * Indication parameter for all future INIT and INIT-ACK exchanges.
5029 */
5030 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
5031 char __user *optval, int __user *optlen)
5032 {
5033 struct sctp_setadaptation adaptation;
5034
5035 if (len < sizeof(struct sctp_setadaptation))
5036 return -EINVAL;
5037
5038 len = sizeof(struct sctp_setadaptation);
5039
5040 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
5041
5042 if (put_user(len, optlen))
5043 return -EFAULT;
5044 if (copy_to_user(optval, &adaptation, len))
5045 return -EFAULT;
5046
5047 return 0;
5048 }
5049
5050 /*
5051 *
5052 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
5053 *
5054 * Applications that wish to use the sendto() system call may wish to
5055 * specify a default set of parameters that would normally be supplied
5056 * through the inclusion of ancillary data. This socket option allows
5057 * such an application to set the default sctp_sndrcvinfo structure.
5058
5059
5060 * The application that wishes to use this socket option simply passes
5061 * in to this call the sctp_sndrcvinfo structure defined in Section
5062 * 5.2.2) The input parameters accepted by this call include
5063 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
5064 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
5065 * to this call if the caller is using the UDP model.
5066 *
5067 * For getsockopt, it get the default sctp_sndrcvinfo structure.
5068 */
5069 static int sctp_getsockopt_default_send_param(struct sock *sk,
5070 int len, char __user *optval,
5071 int __user *optlen)
5072 {
5073 struct sctp_sock *sp = sctp_sk(sk);
5074 struct sctp_association *asoc;
5075 struct sctp_sndrcvinfo info;
5076
5077 if (len < sizeof(info))
5078 return -EINVAL;
5079
5080 len = sizeof(info);
5081
5082 if (copy_from_user(&info, optval, len))
5083 return -EFAULT;
5084
5085 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
5086 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
5087 return -EINVAL;
5088 if (asoc) {
5089 info.sinfo_stream = asoc->default_stream;
5090 info.sinfo_flags = asoc->default_flags;
5091 info.sinfo_ppid = asoc->default_ppid;
5092 info.sinfo_context = asoc->default_context;
5093 info.sinfo_timetolive = asoc->default_timetolive;
5094 } else {
5095 info.sinfo_stream = sp->default_stream;
5096 info.sinfo_flags = sp->default_flags;
5097 info.sinfo_ppid = sp->default_ppid;
5098 info.sinfo_context = sp->default_context;
5099 info.sinfo_timetolive = sp->default_timetolive;
5100 }
5101
5102 if (put_user(len, optlen))
5103 return -EFAULT;
5104 if (copy_to_user(optval, &info, len))
5105 return -EFAULT;
5106
5107 return 0;
5108 }
5109
5110 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
5111 * (SCTP_DEFAULT_SNDINFO)
5112 */
5113 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
5114 char __user *optval,
5115 int __user *optlen)
5116 {
5117 struct sctp_sock *sp = sctp_sk(sk);
5118 struct sctp_association *asoc;
5119 struct sctp_sndinfo info;
5120
5121 if (len < sizeof(info))
5122 return -EINVAL;
5123
5124 len = sizeof(info);
5125
5126 if (copy_from_user(&info, optval, len))
5127 return -EFAULT;
5128
5129 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
5130 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
5131 return -EINVAL;
5132 if (asoc) {
5133 info.snd_sid = asoc->default_stream;
5134 info.snd_flags = asoc->default_flags;
5135 info.snd_ppid = asoc->default_ppid;
5136 info.snd_context = asoc->default_context;
5137 } else {
5138 info.snd_sid = sp->default_stream;
5139 info.snd_flags = sp->default_flags;
5140 info.snd_ppid = sp->default_ppid;
5141 info.snd_context = sp->default_context;
5142 }
5143
5144 if (put_user(len, optlen))
5145 return -EFAULT;
5146 if (copy_to_user(optval, &info, len))
5147 return -EFAULT;
5148
5149 return 0;
5150 }
5151
5152 /*
5153 *
5154 * 7.1.5 SCTP_NODELAY
5155 *
5156 * Turn on/off any Nagle-like algorithm. This means that packets are
5157 * generally sent as soon as possible and no unnecessary delays are
5158 * introduced, at the cost of more packets in the network. Expects an
5159 * integer boolean flag.
5160 */
5161
5162 static int sctp_getsockopt_nodelay(struct sock *sk, int len,
5163 char __user *optval, int __user *optlen)
5164 {
5165 int val;
5166
5167 if (len < sizeof(int))
5168 return -EINVAL;
5169
5170 len = sizeof(int);
5171 val = (sctp_sk(sk)->nodelay == 1);
5172 if (put_user(len, optlen))
5173 return -EFAULT;
5174 if (copy_to_user(optval, &val, len))
5175 return -EFAULT;
5176 return 0;
5177 }
5178
5179 /*
5180 *
5181 * 7.1.1 SCTP_RTOINFO
5182 *
5183 * The protocol parameters used to initialize and bound retransmission
5184 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
5185 * and modify these parameters.
5186 * All parameters are time values, in milliseconds. A value of 0, when
5187 * modifying the parameters, indicates that the current value should not
5188 * be changed.
5189 *
5190 */
5191 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
5192 char __user *optval,
5193 int __user *optlen) {
5194 struct sctp_rtoinfo rtoinfo;
5195 struct sctp_association *asoc;
5196
5197 if (len < sizeof (struct sctp_rtoinfo))
5198 return -EINVAL;
5199
5200 len = sizeof(struct sctp_rtoinfo);
5201
5202 if (copy_from_user(&rtoinfo, optval, len))
5203 return -EFAULT;
5204
5205 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
5206
5207 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
5208 return -EINVAL;
5209
5210 /* Values corresponding to the specific association. */
5211 if (asoc) {
5212 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
5213 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
5214 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
5215 } else {
5216 /* Values corresponding to the endpoint. */
5217 struct sctp_sock *sp = sctp_sk(sk);
5218
5219 rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
5220 rtoinfo.srto_max = sp->rtoinfo.srto_max;
5221 rtoinfo.srto_min = sp->rtoinfo.srto_min;
5222 }
5223
5224 if (put_user(len, optlen))
5225 return -EFAULT;
5226
5227 if (copy_to_user(optval, &rtoinfo, len))
5228 return -EFAULT;
5229
5230 return 0;
5231 }
5232
5233 /*
5234 *
5235 * 7.1.2 SCTP_ASSOCINFO
5236 *
5237 * This option is used to tune the maximum retransmission attempts
5238 * of the association.
5239 * Returns an error if the new association retransmission value is
5240 * greater than the sum of the retransmission value of the peer.
5241 * See [SCTP] for more information.
5242 *
5243 */
5244 static int sctp_getsockopt_associnfo(struct sock *sk, int len,
5245 char __user *optval,
5246 int __user *optlen)
5247 {
5248
5249 struct sctp_assocparams assocparams;
5250 struct sctp_association *asoc;
5251 struct list_head *pos;
5252 int cnt = 0;
5253
5254 if (len < sizeof (struct sctp_assocparams))
5255 return -EINVAL;
5256
5257 len = sizeof(struct sctp_assocparams);
5258
5259 if (copy_from_user(&assocparams, optval, len))
5260 return -EFAULT;
5261
5262 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
5263
5264 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
5265 return -EINVAL;
5266
5267 /* Values correspoinding to the specific association */
5268 if (asoc) {
5269 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
5270 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
5271 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
5272 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
5273
5274 list_for_each(pos, &asoc->peer.transport_addr_list) {
5275 cnt++;
5276 }
5277
5278 assocparams.sasoc_number_peer_destinations = cnt;
5279 } else {
5280 /* Values corresponding to the endpoint */
5281 struct sctp_sock *sp = sctp_sk(sk);
5282
5283 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
5284 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
5285 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
5286 assocparams.sasoc_cookie_life =
5287 sp->assocparams.sasoc_cookie_life;
5288 assocparams.sasoc_number_peer_destinations =
5289 sp->assocparams.
5290 sasoc_number_peer_destinations;
5291 }
5292
5293 if (put_user(len, optlen))
5294 return -EFAULT;
5295
5296 if (copy_to_user(optval, &assocparams, len))
5297 return -EFAULT;
5298
5299 return 0;
5300 }
5301
5302 /*
5303 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
5304 *
5305 * This socket option is a boolean flag which turns on or off mapped V4
5306 * addresses. If this option is turned on and the socket is type
5307 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
5308 * If this option is turned off, then no mapping will be done of V4
5309 * addresses and a user will receive both PF_INET6 and PF_INET type
5310 * addresses on the socket.
5311 */
5312 static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
5313 char __user *optval, int __user *optlen)
5314 {
5315 int val;
5316 struct sctp_sock *sp = sctp_sk(sk);
5317
5318 if (len < sizeof(int))
5319 return -EINVAL;
5320
5321 len = sizeof(int);
5322 val = sp->v4mapped;
5323 if (put_user(len, optlen))
5324 return -EFAULT;
5325 if (copy_to_user(optval, &val, len))
5326 return -EFAULT;
5327
5328 return 0;
5329 }
5330
5331 /*
5332 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
5333 * (chapter and verse is quoted at sctp_setsockopt_context())
5334 */
5335 static int sctp_getsockopt_context(struct sock *sk, int len,
5336 char __user *optval, int __user *optlen)
5337 {
5338 struct sctp_assoc_value params;
5339 struct sctp_sock *sp;
5340 struct sctp_association *asoc;
5341
5342 if (len < sizeof(struct sctp_assoc_value))
5343 return -EINVAL;
5344
5345 len = sizeof(struct sctp_assoc_value);
5346
5347 if (copy_from_user(&params, optval, len))
5348 return -EFAULT;
5349
5350 sp = sctp_sk(sk);
5351
5352 if (params.assoc_id != 0) {
5353 asoc = sctp_id2assoc(sk, params.assoc_id);
5354 if (!asoc)
5355 return -EINVAL;
5356 params.assoc_value = asoc->default_rcv_context;
5357 } else {
5358 params.assoc_value = sp->default_rcv_context;
5359 }
5360
5361 if (put_user(len, optlen))
5362 return -EFAULT;
5363 if (copy_to_user(optval, &params, len))
5364 return -EFAULT;
5365
5366 return 0;
5367 }
5368
5369 /*
5370 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
5371 * This option will get or set the maximum size to put in any outgoing
5372 * SCTP DATA chunk. If a message is larger than this size it will be
5373 * fragmented by SCTP into the specified size. Note that the underlying
5374 * SCTP implementation may fragment into smaller sized chunks when the
5375 * PMTU of the underlying association is smaller than the value set by
5376 * the user. The default value for this option is '0' which indicates
5377 * the user is NOT limiting fragmentation and only the PMTU will effect
5378 * SCTP's choice of DATA chunk size. Note also that values set larger
5379 * than the maximum size of an IP datagram will effectively let SCTP
5380 * control fragmentation (i.e. the same as setting this option to 0).
5381 *
5382 * The following structure is used to access and modify this parameter:
5383 *
5384 * struct sctp_assoc_value {
5385 * sctp_assoc_t assoc_id;
5386 * uint32_t assoc_value;
5387 * };
5388 *
5389 * assoc_id: This parameter is ignored for one-to-one style sockets.
5390 * For one-to-many style sockets this parameter indicates which
5391 * association the user is performing an action upon. Note that if
5392 * this field's value is zero then the endpoints default value is
5393 * changed (effecting future associations only).
5394 * assoc_value: This parameter specifies the maximum size in bytes.
5395 */
5396 static int sctp_getsockopt_maxseg(struct sock *sk, int len,
5397 char __user *optval, int __user *optlen)
5398 {
5399 struct sctp_assoc_value params;
5400 struct sctp_association *asoc;
5401
5402 if (len == sizeof(int)) {
5403 pr_warn_ratelimited(DEPRECATED
5404 "%s (pid %d) "
5405 "Use of int in maxseg socket option.\n"
5406 "Use struct sctp_assoc_value instead\n",
5407 current->comm, task_pid_nr(current));
5408 params.assoc_id = 0;
5409 } else if (len >= sizeof(struct sctp_assoc_value)) {
5410 len = sizeof(struct sctp_assoc_value);
5411 if (copy_from_user(&params, optval, sizeof(params)))
5412 return -EFAULT;
5413 } else
5414 return -EINVAL;
5415
5416 asoc = sctp_id2assoc(sk, params.assoc_id);
5417 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
5418 return -EINVAL;
5419
5420 if (asoc)
5421 params.assoc_value = asoc->frag_point;
5422 else
5423 params.assoc_value = sctp_sk(sk)->user_frag;
5424
5425 if (put_user(len, optlen))
5426 return -EFAULT;
5427 if (len == sizeof(int)) {
5428 if (copy_to_user(optval, &params.assoc_value, len))
5429 return -EFAULT;
5430 } else {
5431 if (copy_to_user(optval, &params, len))
5432 return -EFAULT;
5433 }
5434
5435 return 0;
5436 }
5437
5438 /*
5439 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
5440 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
5441 */
5442 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
5443 char __user *optval, int __user *optlen)
5444 {
5445 int val;
5446
5447 if (len < sizeof(int))
5448 return -EINVAL;
5449
5450 len = sizeof(int);
5451
5452 val = sctp_sk(sk)->frag_interleave;
5453 if (put_user(len, optlen))
5454 return -EFAULT;
5455 if (copy_to_user(optval, &val, len))
5456 return -EFAULT;
5457
5458 return 0;
5459 }
5460
5461 /*
5462 * 7.1.25. Set or Get the sctp partial delivery point
5463 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
5464 */
5465 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
5466 char __user *optval,
5467 int __user *optlen)
5468 {
5469 u32 val;
5470
5471 if (len < sizeof(u32))
5472 return -EINVAL;
5473
5474 len = sizeof(u32);
5475
5476 val = sctp_sk(sk)->pd_point;
5477 if (put_user(len, optlen))
5478 return -EFAULT;
5479 if (copy_to_user(optval, &val, len))
5480 return -EFAULT;
5481
5482 return 0;
5483 }
5484
5485 /*
5486 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
5487 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
5488 */
5489 static int sctp_getsockopt_maxburst(struct sock *sk, int len,
5490 char __user *optval,
5491 int __user *optlen)
5492 {
5493 struct sctp_assoc_value params;
5494 struct sctp_sock *sp;
5495 struct sctp_association *asoc;
5496
5497 if (len == sizeof(int)) {
5498 pr_warn_ratelimited(DEPRECATED
5499 "%s (pid %d) "
5500 "Use of int in max_burst socket option.\n"
5501 "Use struct sctp_assoc_value instead\n",
5502 current->comm, task_pid_nr(current));
5503 params.assoc_id = 0;
5504 } else if (len >= sizeof(struct sctp_assoc_value)) {
5505 len = sizeof(struct sctp_assoc_value);
5506 if (copy_from_user(&params, optval, len))
5507 return -EFAULT;
5508 } else
5509 return -EINVAL;
5510
5511 sp = sctp_sk(sk);
5512
5513 if (params.assoc_id != 0) {
5514 asoc = sctp_id2assoc(sk, params.assoc_id);
5515 if (!asoc)
5516 return -EINVAL;
5517 params.assoc_value = asoc->max_burst;
5518 } else
5519 params.assoc_value = sp->max_burst;
5520
5521 if (len == sizeof(int)) {
5522 if (copy_to_user(optval, &params.assoc_value, len))
5523 return -EFAULT;
5524 } else {
5525 if (copy_to_user(optval, &params, len))
5526 return -EFAULT;
5527 }
5528
5529 return 0;
5530
5531 }
5532
5533 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
5534 char __user *optval, int __user *optlen)
5535 {
5536 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5537 struct sctp_hmacalgo __user *p = (void __user *)optval;
5538 struct sctp_hmac_algo_param *hmacs;
5539 __u16 data_len = 0;
5540 u32 num_idents;
5541 int i;
5542
5543 if (!ep->auth_enable)
5544 return -EACCES;
5545
5546 hmacs = ep->auth_hmacs_list;
5547 data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
5548
5549 if (len < sizeof(struct sctp_hmacalgo) + data_len)
5550 return -EINVAL;
5551
5552 len = sizeof(struct sctp_hmacalgo) + data_len;
5553 num_idents = data_len / sizeof(u16);
5554
5555 if (put_user(len, optlen))
5556 return -EFAULT;
5557 if (put_user(num_idents, &p->shmac_num_idents))
5558 return -EFAULT;
5559 for (i = 0; i < num_idents; i++) {
5560 __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
5561
5562 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
5563 return -EFAULT;
5564 }
5565 return 0;
5566 }
5567
5568 static int sctp_getsockopt_active_key(struct sock *sk, int len,
5569 char __user *optval, int __user *optlen)
5570 {
5571 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5572 struct sctp_authkeyid val;
5573 struct sctp_association *asoc;
5574
5575 if (!ep->auth_enable)
5576 return -EACCES;
5577
5578 if (len < sizeof(struct sctp_authkeyid))
5579 return -EINVAL;
5580 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
5581 return -EFAULT;
5582
5583 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
5584 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
5585 return -EINVAL;
5586
5587 if (asoc)
5588 val.scact_keynumber = asoc->active_key_id;
5589 else
5590 val.scact_keynumber = ep->active_key_id;
5591
5592 len = sizeof(struct sctp_authkeyid);
5593 if (put_user(len, optlen))
5594 return -EFAULT;
5595 if (copy_to_user(optval, &val, len))
5596 return -EFAULT;
5597
5598 return 0;
5599 }
5600
5601 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
5602 char __user *optval, int __user *optlen)
5603 {
5604 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5605 struct sctp_authchunks __user *p = (void __user *)optval;
5606 struct sctp_authchunks val;
5607 struct sctp_association *asoc;
5608 struct sctp_chunks_param *ch;
5609 u32 num_chunks = 0;
5610 char __user *to;
5611
5612 if (!ep->auth_enable)
5613 return -EACCES;
5614
5615 if (len < sizeof(struct sctp_authchunks))
5616 return -EINVAL;
5617
5618 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5619 return -EFAULT;
5620
5621 to = p->gauth_chunks;
5622 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
5623 if (!asoc)
5624 return -EINVAL;
5625
5626 ch = asoc->peer.peer_chunks;
5627 if (!ch)
5628 goto num;
5629
5630 /* See if the user provided enough room for all the data */
5631 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5632 if (len < num_chunks)
5633 return -EINVAL;
5634
5635 if (copy_to_user(to, ch->chunks, num_chunks))
5636 return -EFAULT;
5637 num:
5638 len = sizeof(struct sctp_authchunks) + num_chunks;
5639 if (put_user(len, optlen))
5640 return -EFAULT;
5641 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5642 return -EFAULT;
5643 return 0;
5644 }
5645
5646 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
5647 char __user *optval, int __user *optlen)
5648 {
5649 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
5650 struct sctp_authchunks __user *p = (void __user *)optval;
5651 struct sctp_authchunks val;
5652 struct sctp_association *asoc;
5653 struct sctp_chunks_param *ch;
5654 u32 num_chunks = 0;
5655 char __user *to;
5656
5657 if (!ep->auth_enable)
5658 return -EACCES;
5659
5660 if (len < sizeof(struct sctp_authchunks))
5661 return -EINVAL;
5662
5663 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
5664 return -EFAULT;
5665
5666 to = p->gauth_chunks;
5667 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
5668 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
5669 return -EINVAL;
5670
5671 if (asoc)
5672 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
5673 else
5674 ch = ep->auth_chunk_list;
5675
5676 if (!ch)
5677 goto num;
5678
5679 num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
5680 if (len < sizeof(struct sctp_authchunks) + num_chunks)
5681 return -EINVAL;
5682
5683 if (copy_to_user(to, ch->chunks, num_chunks))
5684 return -EFAULT;
5685 num:
5686 len = sizeof(struct sctp_authchunks) + num_chunks;
5687 if (put_user(len, optlen))
5688 return -EFAULT;
5689 if (put_user(num_chunks, &p->gauth_number_of_chunks))
5690 return -EFAULT;
5691
5692 return 0;
5693 }
5694
5695 /*
5696 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
5697 * This option gets the current number of associations that are attached
5698 * to a one-to-many style socket. The option value is an uint32_t.
5699 */
5700 static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5701 char __user *optval, int __user *optlen)
5702 {
5703 struct sctp_sock *sp = sctp_sk(sk);
5704 struct sctp_association *asoc;
5705 u32 val = 0;
5706
5707 if (sctp_style(sk, TCP))
5708 return -EOPNOTSUPP;
5709
5710 if (len < sizeof(u32))
5711 return -EINVAL;
5712
5713 len = sizeof(u32);
5714
5715 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5716 val++;
5717 }
5718
5719 if (put_user(len, optlen))
5720 return -EFAULT;
5721 if (copy_to_user(optval, &val, len))
5722 return -EFAULT;
5723
5724 return 0;
5725 }
5726
5727 /*
5728 * 8.1.23 SCTP_AUTO_ASCONF
5729 * See the corresponding setsockopt entry as description
5730 */
5731 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
5732 char __user *optval, int __user *optlen)
5733 {
5734 int val = 0;
5735
5736 if (len < sizeof(int))
5737 return -EINVAL;
5738
5739 len = sizeof(int);
5740 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
5741 val = 1;
5742 if (put_user(len, optlen))
5743 return -EFAULT;
5744 if (copy_to_user(optval, &val, len))
5745 return -EFAULT;
5746 return 0;
5747 }
5748
5749 /*
5750 * 8.2.6. Get the Current Identifiers of Associations
5751 * (SCTP_GET_ASSOC_ID_LIST)
5752 *
5753 * This option gets the current list of SCTP association identifiers of
5754 * the SCTP associations handled by a one-to-many style socket.
5755 */
5756 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5757 char __user *optval, int __user *optlen)
5758 {
5759 struct sctp_sock *sp = sctp_sk(sk);
5760 struct sctp_association *asoc;
5761 struct sctp_assoc_ids *ids;
5762 u32 num = 0;
5763
5764 if (sctp_style(sk, TCP))
5765 return -EOPNOTSUPP;
5766
5767 if (len < sizeof(struct sctp_assoc_ids))
5768 return -EINVAL;
5769
5770 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5771 num++;
5772 }
5773
5774 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5775 return -EINVAL;
5776
5777 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5778
5779 ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
5780 if (unlikely(!ids))
5781 return -ENOMEM;
5782
5783 ids->gaids_number_of_ids = num;
5784 num = 0;
5785 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5786 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5787 }
5788
5789 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5790 kfree(ids);
5791 return -EFAULT;
5792 }
5793
5794 kfree(ids);
5795 return 0;
5796 }
5797
5798 /*
5799 * SCTP_PEER_ADDR_THLDS
5800 *
5801 * This option allows us to fetch the partially failed threshold for one or all
5802 * transports in an association. See Section 6.1 of:
5803 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
5804 */
5805 static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
5806 char __user *optval,
5807 int len,
5808 int __user *optlen)
5809 {
5810 struct sctp_paddrthlds val;
5811 struct sctp_transport *trans;
5812 struct sctp_association *asoc;
5813
5814 if (len < sizeof(struct sctp_paddrthlds))
5815 return -EINVAL;
5816 len = sizeof(struct sctp_paddrthlds);
5817 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
5818 return -EFAULT;
5819
5820 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
5821 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
5822 if (!asoc)
5823 return -ENOENT;
5824
5825 val.spt_pathpfthld = asoc->pf_retrans;
5826 val.spt_pathmaxrxt = asoc->pathmaxrxt;
5827 } else {
5828 trans = sctp_addr_id2transport(sk, &val.spt_address,
5829 val.spt_assoc_id);
5830 if (!trans)
5831 return -ENOENT;
5832
5833 val.spt_pathmaxrxt = trans->pathmaxrxt;
5834 val.spt_pathpfthld = trans->pf_retrans;
5835 }
5836
5837 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
5838 return -EFAULT;
5839
5840 return 0;
5841 }
5842
5843 /*
5844 * SCTP_GET_ASSOC_STATS
5845 *
5846 * This option retrieves local per endpoint statistics. It is modeled
5847 * after OpenSolaris' implementation
5848 */
5849 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
5850 char __user *optval,
5851 int __user *optlen)
5852 {
5853 struct sctp_assoc_stats sas;
5854 struct sctp_association *asoc = NULL;
5855
5856 /* User must provide at least the assoc id */
5857 if (len < sizeof(sctp_assoc_t))
5858 return -EINVAL;
5859
5860 /* Allow the struct to grow and fill in as much as possible */
5861 len = min_t(size_t, len, sizeof(sas));
5862
5863 if (copy_from_user(&sas, optval, len))
5864 return -EFAULT;
5865
5866 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
5867 if (!asoc)
5868 return -EINVAL;
5869
5870 sas.sas_rtxchunks = asoc->stats.rtxchunks;
5871 sas.sas_gapcnt = asoc->stats.gapcnt;
5872 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
5873 sas.sas_osacks = asoc->stats.osacks;
5874 sas.sas_isacks = asoc->stats.isacks;
5875 sas.sas_octrlchunks = asoc->stats.octrlchunks;
5876 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
5877 sas.sas_oodchunks = asoc->stats.oodchunks;
5878 sas.sas_iodchunks = asoc->stats.iodchunks;
5879 sas.sas_ouodchunks = asoc->stats.ouodchunks;
5880 sas.sas_iuodchunks = asoc->stats.iuodchunks;
5881 sas.sas_idupchunks = asoc->stats.idupchunks;
5882 sas.sas_opackets = asoc->stats.opackets;
5883 sas.sas_ipackets = asoc->stats.ipackets;
5884
5885 /* New high max rto observed, will return 0 if not a single
5886 * RTO update took place. obs_rto_ipaddr will be bogus
5887 * in such a case
5888 */
5889 sas.sas_maxrto = asoc->stats.max_obs_rto;
5890 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
5891 sizeof(struct sockaddr_storage));
5892
5893 /* Mark beginning of a new observation period */
5894 asoc->stats.max_obs_rto = asoc->rto_min;
5895
5896 if (put_user(len, optlen))
5897 return -EFAULT;
5898
5899 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
5900
5901 if (copy_to_user(optval, &sas, len))
5902 return -EFAULT;
5903
5904 return 0;
5905 }
5906
5907 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
5908 char __user *optval,
5909 int __user *optlen)
5910 {
5911 int val = 0;
5912
5913 if (len < sizeof(int))
5914 return -EINVAL;
5915
5916 len = sizeof(int);
5917 if (sctp_sk(sk)->recvrcvinfo)
5918 val = 1;
5919 if (put_user(len, optlen))
5920 return -EFAULT;
5921 if (copy_to_user(optval, &val, len))
5922 return -EFAULT;
5923
5924 return 0;
5925 }
5926
5927 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
5928 char __user *optval,
5929 int __user *optlen)
5930 {
5931 int val = 0;
5932
5933 if (len < sizeof(int))
5934 return -EINVAL;
5935
5936 len = sizeof(int);
5937 if (sctp_sk(sk)->recvnxtinfo)
5938 val = 1;
5939 if (put_user(len, optlen))
5940 return -EFAULT;
5941 if (copy_to_user(optval, &val, len))
5942 return -EFAULT;
5943
5944 return 0;
5945 }
5946
5947 static int sctp_getsockopt(struct sock *sk, int level, int optname,
5948 char __user *optval, int __user *optlen)
5949 {
5950 int retval = 0;
5951 int len;
5952
5953 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
5954
5955 /* I can hardly begin to describe how wrong this is. This is
5956 * so broken as to be worse than useless. The API draft
5957 * REALLY is NOT helpful here... I am not convinced that the
5958 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
5959 * are at all well-founded.
5960 */
5961 if (level != SOL_SCTP) {
5962 struct sctp_af *af = sctp_sk(sk)->pf->af;
5963
5964 retval = af->getsockopt(sk, level, optname, optval, optlen);
5965 return retval;
5966 }
5967
5968 if (get_user(len, optlen))
5969 return -EFAULT;
5970
5971 lock_sock(sk);
5972
5973 switch (optname) {
5974 case SCTP_STATUS:
5975 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
5976 break;
5977 case SCTP_DISABLE_FRAGMENTS:
5978 retval = sctp_getsockopt_disable_fragments(sk, len, optval,
5979 optlen);
5980 break;
5981 case SCTP_EVENTS:
5982 retval = sctp_getsockopt_events(sk, len, optval, optlen);
5983 break;
5984 case SCTP_AUTOCLOSE:
5985 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
5986 break;
5987 case SCTP_SOCKOPT_PEELOFF:
5988 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
5989 break;
5990 case SCTP_PEER_ADDR_PARAMS:
5991 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
5992 optlen);
5993 break;
5994 case SCTP_DELAYED_SACK:
5995 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
5996 optlen);
5997 break;
5998 case SCTP_INITMSG:
5999 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
6000 break;
6001 case SCTP_GET_PEER_ADDRS:
6002 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
6003 optlen);
6004 break;
6005 case SCTP_GET_LOCAL_ADDRS:
6006 retval = sctp_getsockopt_local_addrs(sk, len, optval,
6007 optlen);
6008 break;
6009 case SCTP_SOCKOPT_CONNECTX3:
6010 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
6011 break;
6012 case SCTP_DEFAULT_SEND_PARAM:
6013 retval = sctp_getsockopt_default_send_param(sk, len,
6014 optval, optlen);
6015 break;
6016 case SCTP_DEFAULT_SNDINFO:
6017 retval = sctp_getsockopt_default_sndinfo(sk, len,
6018 optval, optlen);
6019 break;
6020 case SCTP_PRIMARY_ADDR:
6021 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
6022 break;
6023 case SCTP_NODELAY:
6024 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
6025 break;
6026 case SCTP_RTOINFO:
6027 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
6028 break;
6029 case SCTP_ASSOCINFO:
6030 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
6031 break;
6032 case SCTP_I_WANT_MAPPED_V4_ADDR:
6033 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
6034 break;
6035 case SCTP_MAXSEG:
6036 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
6037 break;
6038 case SCTP_GET_PEER_ADDR_INFO:
6039 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
6040 optlen);
6041 break;
6042 case SCTP_ADAPTATION_LAYER:
6043 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
6044 optlen);
6045 break;
6046 case SCTP_CONTEXT:
6047 retval = sctp_getsockopt_context(sk, len, optval, optlen);
6048 break;
6049 case SCTP_FRAGMENT_INTERLEAVE:
6050 retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
6051 optlen);
6052 break;
6053 case SCTP_PARTIAL_DELIVERY_POINT:
6054 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
6055 optlen);
6056 break;
6057 case SCTP_MAX_BURST:
6058 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
6059 break;
6060 case SCTP_AUTH_KEY:
6061 case SCTP_AUTH_CHUNK:
6062 case SCTP_AUTH_DELETE_KEY:
6063 retval = -EOPNOTSUPP;
6064 break;
6065 case SCTP_HMAC_IDENT:
6066 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
6067 break;
6068 case SCTP_AUTH_ACTIVE_KEY:
6069 retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
6070 break;
6071 case SCTP_PEER_AUTH_CHUNKS:
6072 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
6073 optlen);
6074 break;
6075 case SCTP_LOCAL_AUTH_CHUNKS:
6076 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
6077 optlen);
6078 break;
6079 case SCTP_GET_ASSOC_NUMBER:
6080 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
6081 break;
6082 case SCTP_GET_ASSOC_ID_LIST:
6083 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
6084 break;
6085 case SCTP_AUTO_ASCONF:
6086 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
6087 break;
6088 case SCTP_PEER_ADDR_THLDS:
6089 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
6090 break;
6091 case SCTP_GET_ASSOC_STATS:
6092 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
6093 break;
6094 case SCTP_RECVRCVINFO:
6095 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
6096 break;
6097 case SCTP_RECVNXTINFO:
6098 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
6099 break;
6100 default:
6101 retval = -ENOPROTOOPT;
6102 break;
6103 }
6104
6105 release_sock(sk);
6106 return retval;
6107 }
6108
6109 static void sctp_hash(struct sock *sk)
6110 {
6111 /* STUB */
6112 }
6113
6114 static void sctp_unhash(struct sock *sk)
6115 {
6116 /* STUB */
6117 }
6118
6119 /* Check if port is acceptable. Possibly find first available port.
6120 *
6121 * The port hash table (contained in the 'global' SCTP protocol storage
6122 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
6123 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
6124 * list (the list number is the port number hashed out, so as you
6125 * would expect from a hash function, all the ports in a given list have
6126 * such a number that hashes out to the same list number; you were
6127 * expecting that, right?); so each list has a set of ports, with a
6128 * link to the socket (struct sock) that uses it, the port number and
6129 * a fastreuse flag (FIXME: NPI ipg).
6130 */
6131 static struct sctp_bind_bucket *sctp_bucket_create(
6132 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
6133
6134 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
6135 {
6136 struct sctp_bind_hashbucket *head; /* hash list */
6137 struct sctp_bind_bucket *pp;
6138 unsigned short snum;
6139 int ret;
6140
6141 snum = ntohs(addr->v4.sin_port);
6142
6143 pr_debug("%s: begins, snum:%d\n", __func__, snum);
6144
6145 local_bh_disable();
6146
6147 if (snum == 0) {
6148 /* Search for an available port. */
6149 int low, high, remaining, index;
6150 unsigned int rover;
6151 struct net *net = sock_net(sk);
6152
6153 inet_get_local_port_range(net, &low, &high);
6154 remaining = (high - low) + 1;
6155 rover = prandom_u32() % remaining + low;
6156
6157 do {
6158 rover++;
6159 if ((rover < low) || (rover > high))
6160 rover = low;
6161 if (inet_is_local_reserved_port(net, rover))
6162 continue;
6163 index = sctp_phashfn(sock_net(sk), rover);
6164 head = &sctp_port_hashtable[index];
6165 spin_lock(&head->lock);
6166 sctp_for_each_hentry(pp, &head->chain)
6167 if ((pp->port == rover) &&
6168 net_eq(sock_net(sk), pp->net))
6169 goto next;
6170 break;
6171 next:
6172 spin_unlock(&head->lock);
6173 } while (--remaining > 0);
6174
6175 /* Exhausted local port range during search? */
6176 ret = 1;
6177 if (remaining <= 0)
6178 goto fail;
6179
6180 /* OK, here is the one we will use. HEAD (the port
6181 * hash table list entry) is non-NULL and we hold it's
6182 * mutex.
6183 */
6184 snum = rover;
6185 } else {
6186 /* We are given an specific port number; we verify
6187 * that it is not being used. If it is used, we will
6188 * exahust the search in the hash list corresponding
6189 * to the port number (snum) - we detect that with the
6190 * port iterator, pp being NULL.
6191 */
6192 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
6193 spin_lock(&head->lock);
6194 sctp_for_each_hentry(pp, &head->chain) {
6195 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
6196 goto pp_found;
6197 }
6198 }
6199 pp = NULL;
6200 goto pp_not_found;
6201 pp_found:
6202 if (!hlist_empty(&pp->owner)) {
6203 /* We had a port hash table hit - there is an
6204 * available port (pp != NULL) and it is being
6205 * used by other socket (pp->owner not empty); that other
6206 * socket is going to be sk2.
6207 */
6208 int reuse = sk->sk_reuse;
6209 struct sock *sk2;
6210
6211 pr_debug("%s: found a possible match\n", __func__);
6212
6213 if (pp->fastreuse && sk->sk_reuse &&
6214 sk->sk_state != SCTP_SS_LISTENING)
6215 goto success;
6216
6217 /* Run through the list of sockets bound to the port
6218 * (pp->port) [via the pointers bind_next and
6219 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
6220 * we get the endpoint they describe and run through
6221 * the endpoint's list of IP (v4 or v6) addresses,
6222 * comparing each of the addresses with the address of
6223 * the socket sk. If we find a match, then that means
6224 * that this port/socket (sk) combination are already
6225 * in an endpoint.
6226 */
6227 sk_for_each_bound(sk2, &pp->owner) {
6228 struct sctp_endpoint *ep2;
6229 ep2 = sctp_sk(sk2)->ep;
6230
6231 if (sk == sk2 ||
6232 (reuse && sk2->sk_reuse &&
6233 sk2->sk_state != SCTP_SS_LISTENING))
6234 continue;
6235
6236 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
6237 sctp_sk(sk2), sctp_sk(sk))) {
6238 ret = (long)sk2;
6239 goto fail_unlock;
6240 }
6241 }
6242
6243 pr_debug("%s: found a match\n", __func__);
6244 }
6245 pp_not_found:
6246 /* If there was a hash table miss, create a new port. */
6247 ret = 1;
6248 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
6249 goto fail_unlock;
6250
6251 /* In either case (hit or miss), make sure fastreuse is 1 only
6252 * if sk->sk_reuse is too (that is, if the caller requested
6253 * SO_REUSEADDR on this socket -sk-).
6254 */
6255 if (hlist_empty(&pp->owner)) {
6256 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
6257 pp->fastreuse = 1;
6258 else
6259 pp->fastreuse = 0;
6260 } else if (pp->fastreuse &&
6261 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
6262 pp->fastreuse = 0;
6263
6264 /* We are set, so fill up all the data in the hash table
6265 * entry, tie the socket list information with the rest of the
6266 * sockets FIXME: Blurry, NPI (ipg).
6267 */
6268 success:
6269 if (!sctp_sk(sk)->bind_hash) {
6270 inet_sk(sk)->inet_num = snum;
6271 sk_add_bind_node(sk, &pp->owner);
6272 sctp_sk(sk)->bind_hash = pp;
6273 }
6274 ret = 0;
6275
6276 fail_unlock:
6277 spin_unlock(&head->lock);
6278
6279 fail:
6280 local_bh_enable();
6281 return ret;
6282 }
6283
6284 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
6285 * port is requested.
6286 */
6287 static int sctp_get_port(struct sock *sk, unsigned short snum)
6288 {
6289 union sctp_addr addr;
6290 struct sctp_af *af = sctp_sk(sk)->pf->af;
6291
6292 /* Set up a dummy address struct from the sk. */
6293 af->from_sk(&addr, sk);
6294 addr.v4.sin_port = htons(snum);
6295
6296 /* Note: sk->sk_num gets filled in if ephemeral port request. */
6297 return !!sctp_get_port_local(sk, &addr);
6298 }
6299
6300 /*
6301 * Move a socket to LISTENING state.
6302 */
6303 static int sctp_listen_start(struct sock *sk, int backlog)
6304 {
6305 struct sctp_sock *sp = sctp_sk(sk);
6306 struct sctp_endpoint *ep = sp->ep;
6307 struct crypto_hash *tfm = NULL;
6308 char alg[32];
6309
6310 /* Allocate HMAC for generating cookie. */
6311 if (!sp->hmac && sp->sctp_hmac_alg) {
6312 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
6313 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
6314 if (IS_ERR(tfm)) {
6315 net_info_ratelimited("failed to load transform for %s: %ld\n",
6316 sp->sctp_hmac_alg, PTR_ERR(tfm));
6317 return -ENOSYS;
6318 }
6319 sctp_sk(sk)->hmac = tfm;
6320 }
6321
6322 /*
6323 * If a bind() or sctp_bindx() is not called prior to a listen()
6324 * call that allows new associations to be accepted, the system
6325 * picks an ephemeral port and will choose an address set equivalent
6326 * to binding with a wildcard address.
6327 *
6328 * This is not currently spelled out in the SCTP sockets
6329 * extensions draft, but follows the practice as seen in TCP
6330 * sockets.
6331 *
6332 */
6333 sk->sk_state = SCTP_SS_LISTENING;
6334 if (!ep->base.bind_addr.port) {
6335 if (sctp_autobind(sk))
6336 return -EAGAIN;
6337 } else {
6338 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
6339 sk->sk_state = SCTP_SS_CLOSED;
6340 return -EADDRINUSE;
6341 }
6342 }
6343
6344 sk->sk_max_ack_backlog = backlog;
6345 sctp_hash_endpoint(ep);
6346 return 0;
6347 }
6348
6349 /*
6350 * 4.1.3 / 5.1.3 listen()
6351 *
6352 * By default, new associations are not accepted for UDP style sockets.
6353 * An application uses listen() to mark a socket as being able to
6354 * accept new associations.
6355 *
6356 * On TCP style sockets, applications use listen() to ready the SCTP
6357 * endpoint for accepting inbound associations.
6358 *
6359 * On both types of endpoints a backlog of '0' disables listening.
6360 *
6361 * Move a socket to LISTENING state.
6362 */
6363 int sctp_inet_listen(struct socket *sock, int backlog)
6364 {
6365 struct sock *sk = sock->sk;
6366 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6367 int err = -EINVAL;
6368
6369 if (unlikely(backlog < 0))
6370 return err;
6371
6372 lock_sock(sk);
6373
6374 /* Peeled-off sockets are not allowed to listen(). */
6375 if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
6376 goto out;
6377
6378 if (sock->state != SS_UNCONNECTED)
6379 goto out;
6380
6381 /* If backlog is zero, disable listening. */
6382 if (!backlog) {
6383 if (sctp_sstate(sk, CLOSED))
6384 goto out;
6385
6386 err = 0;
6387 sctp_unhash_endpoint(ep);
6388 sk->sk_state = SCTP_SS_CLOSED;
6389 if (sk->sk_reuse)
6390 sctp_sk(sk)->bind_hash->fastreuse = 1;
6391 goto out;
6392 }
6393
6394 /* If we are already listening, just update the backlog */
6395 if (sctp_sstate(sk, LISTENING))
6396 sk->sk_max_ack_backlog = backlog;
6397 else {
6398 err = sctp_listen_start(sk, backlog);
6399 if (err)
6400 goto out;
6401 }
6402
6403 err = 0;
6404 out:
6405 release_sock(sk);
6406 return err;
6407 }
6408
6409 /*
6410 * This function is done by modeling the current datagram_poll() and the
6411 * tcp_poll(). Note that, based on these implementations, we don't
6412 * lock the socket in this function, even though it seems that,
6413 * ideally, locking or some other mechanisms can be used to ensure
6414 * the integrity of the counters (sndbuf and wmem_alloc) used
6415 * in this place. We assume that we don't need locks either until proven
6416 * otherwise.
6417 *
6418 * Another thing to note is that we include the Async I/O support
6419 * here, again, by modeling the current TCP/UDP code. We don't have
6420 * a good way to test with it yet.
6421 */
6422 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6423 {
6424 struct sock *sk = sock->sk;
6425 struct sctp_sock *sp = sctp_sk(sk);
6426 unsigned int mask;
6427
6428 poll_wait(file, sk_sleep(sk), wait);
6429
6430 /* A TCP-style listening socket becomes readable when the accept queue
6431 * is not empty.
6432 */
6433 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
6434 return (!list_empty(&sp->ep->asocs)) ?
6435 (POLLIN | POLLRDNORM) : 0;
6436
6437 mask = 0;
6438
6439 /* Is there any exceptional events? */
6440 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
6441 mask |= POLLERR |
6442 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
6443 if (sk->sk_shutdown & RCV_SHUTDOWN)
6444 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
6445 if (sk->sk_shutdown == SHUTDOWN_MASK)
6446 mask |= POLLHUP;
6447
6448 /* Is it readable? Reconsider this code with TCP-style support. */
6449 if (!skb_queue_empty(&sk->sk_receive_queue))
6450 mask |= POLLIN | POLLRDNORM;
6451
6452 /* The association is either gone or not ready. */
6453 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
6454 return mask;
6455
6456 /* Is it writable? */
6457 if (sctp_writeable(sk)) {
6458 mask |= POLLOUT | POLLWRNORM;
6459 } else {
6460 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
6461 /*
6462 * Since the socket is not locked, the buffer
6463 * might be made available after the writeable check and
6464 * before the bit is set. This could cause a lost I/O
6465 * signal. tcp_poll() has a race breaker for this race
6466 * condition. Based on their implementation, we put
6467 * in the following code to cover it as well.
6468 */
6469 if (sctp_writeable(sk))
6470 mask |= POLLOUT | POLLWRNORM;
6471 }
6472 return mask;
6473 }
6474
6475 /********************************************************************
6476 * 2nd Level Abstractions
6477 ********************************************************************/
6478
6479 static struct sctp_bind_bucket *sctp_bucket_create(
6480 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
6481 {
6482 struct sctp_bind_bucket *pp;
6483
6484 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
6485 if (pp) {
6486 SCTP_DBG_OBJCNT_INC(bind_bucket);
6487 pp->port = snum;
6488 pp->fastreuse = 0;
6489 INIT_HLIST_HEAD(&pp->owner);
6490 pp->net = net;
6491 hlist_add_head(&pp->node, &head->chain);
6492 }
6493 return pp;
6494 }
6495
6496 /* Caller must hold hashbucket lock for this tb with local BH disabled */
6497 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
6498 {
6499 if (pp && hlist_empty(&pp->owner)) {
6500 __hlist_del(&pp->node);
6501 kmem_cache_free(sctp_bucket_cachep, pp);
6502 SCTP_DBG_OBJCNT_DEC(bind_bucket);
6503 }
6504 }
6505
6506 /* Release this socket's reference to a local port. */
6507 static inline void __sctp_put_port(struct sock *sk)
6508 {
6509 struct sctp_bind_hashbucket *head =
6510 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
6511 inet_sk(sk)->inet_num)];
6512 struct sctp_bind_bucket *pp;
6513
6514 spin_lock(&head->lock);
6515 pp = sctp_sk(sk)->bind_hash;
6516 __sk_del_bind_node(sk);
6517 sctp_sk(sk)->bind_hash = NULL;
6518 inet_sk(sk)->inet_num = 0;
6519 sctp_bucket_destroy(pp);
6520 spin_unlock(&head->lock);
6521 }
6522
6523 void sctp_put_port(struct sock *sk)
6524 {
6525 local_bh_disable();
6526 __sctp_put_port(sk);
6527 local_bh_enable();
6528 }
6529
6530 /*
6531 * The system picks an ephemeral port and choose an address set equivalent
6532 * to binding with a wildcard address.
6533 * One of those addresses will be the primary address for the association.
6534 * This automatically enables the multihoming capability of SCTP.
6535 */
6536 static int sctp_autobind(struct sock *sk)
6537 {
6538 union sctp_addr autoaddr;
6539 struct sctp_af *af;
6540 __be16 port;
6541
6542 /* Initialize a local sockaddr structure to INADDR_ANY. */
6543 af = sctp_sk(sk)->pf->af;
6544
6545 port = htons(inet_sk(sk)->inet_num);
6546 af->inaddr_any(&autoaddr, port);
6547
6548 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
6549 }
6550
6551 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
6552 *
6553 * From RFC 2292
6554 * 4.2 The cmsghdr Structure *
6555 *
6556 * When ancillary data is sent or received, any number of ancillary data
6557 * objects can be specified by the msg_control and msg_controllen members of
6558 * the msghdr structure, because each object is preceded by
6559 * a cmsghdr structure defining the object's length (the cmsg_len member).
6560 * Historically Berkeley-derived implementations have passed only one object
6561 * at a time, but this API allows multiple objects to be
6562 * passed in a single call to sendmsg() or recvmsg(). The following example
6563 * shows two ancillary data objects in a control buffer.
6564 *
6565 * |<--------------------------- msg_controllen -------------------------->|
6566 * | |
6567 *
6568 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
6569 *
6570 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
6571 * | | |
6572 *
6573 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
6574 *
6575 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
6576 * | | | | |
6577 *
6578 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
6579 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
6580 *
6581 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
6582 *
6583 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
6584 * ^
6585 * |
6586 *
6587 * msg_control
6588 * points here
6589 */
6590 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
6591 {
6592 struct cmsghdr *cmsg;
6593 struct msghdr *my_msg = (struct msghdr *)msg;
6594
6595 for_each_cmsghdr(cmsg, my_msg) {
6596 if (!CMSG_OK(my_msg, cmsg))
6597 return -EINVAL;
6598
6599 /* Should we parse this header or ignore? */
6600 if (cmsg->cmsg_level != IPPROTO_SCTP)
6601 continue;
6602
6603 /* Strictly check lengths following example in SCM code. */
6604 switch (cmsg->cmsg_type) {
6605 case SCTP_INIT:
6606 /* SCTP Socket API Extension
6607 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
6608 *
6609 * This cmsghdr structure provides information for
6610 * initializing new SCTP associations with sendmsg().
6611 * The SCTP_INITMSG socket option uses this same data
6612 * structure. This structure is not used for
6613 * recvmsg().
6614 *
6615 * cmsg_level cmsg_type cmsg_data[]
6616 * ------------ ------------ ----------------------
6617 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
6618 */
6619 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
6620 return -EINVAL;
6621
6622 cmsgs->init = CMSG_DATA(cmsg);
6623 break;
6624
6625 case SCTP_SNDRCV:
6626 /* SCTP Socket API Extension
6627 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
6628 *
6629 * This cmsghdr structure specifies SCTP options for
6630 * sendmsg() and describes SCTP header information
6631 * about a received message through recvmsg().
6632 *
6633 * cmsg_level cmsg_type cmsg_data[]
6634 * ------------ ------------ ----------------------
6635 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
6636 */
6637 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
6638 return -EINVAL;
6639
6640 cmsgs->srinfo = CMSG_DATA(cmsg);
6641
6642 if (cmsgs->srinfo->sinfo_flags &
6643 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6644 SCTP_SACK_IMMEDIATELY |
6645 SCTP_ABORT | SCTP_EOF))
6646 return -EINVAL;
6647 break;
6648
6649 case SCTP_SNDINFO:
6650 /* SCTP Socket API Extension
6651 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
6652 *
6653 * This cmsghdr structure specifies SCTP options for
6654 * sendmsg(). This structure and SCTP_RCVINFO replaces
6655 * SCTP_SNDRCV which has been deprecated.
6656 *
6657 * cmsg_level cmsg_type cmsg_data[]
6658 * ------------ ------------ ---------------------
6659 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
6660 */
6661 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
6662 return -EINVAL;
6663
6664 cmsgs->sinfo = CMSG_DATA(cmsg);
6665
6666 if (cmsgs->sinfo->snd_flags &
6667 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
6668 SCTP_SACK_IMMEDIATELY |
6669 SCTP_ABORT | SCTP_EOF))
6670 return -EINVAL;
6671 break;
6672 default:
6673 return -EINVAL;
6674 }
6675 }
6676
6677 return 0;
6678 }
6679
6680 /*
6681 * Wait for a packet..
6682 * Note: This function is the same function as in core/datagram.c
6683 * with a few modifications to make lksctp work.
6684 */
6685 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
6686 {
6687 int error;
6688 DEFINE_WAIT(wait);
6689
6690 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6691
6692 /* Socket errors? */
6693 error = sock_error(sk);
6694 if (error)
6695 goto out;
6696
6697 if (!skb_queue_empty(&sk->sk_receive_queue))
6698 goto ready;
6699
6700 /* Socket shut down? */
6701 if (sk->sk_shutdown & RCV_SHUTDOWN)
6702 goto out;
6703
6704 /* Sequenced packets can come disconnected. If so we report the
6705 * problem.
6706 */
6707 error = -ENOTCONN;
6708
6709 /* Is there a good reason to think that we may receive some data? */
6710 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
6711 goto out;
6712
6713 /* Handle signals. */
6714 if (signal_pending(current))
6715 goto interrupted;
6716
6717 /* Let another process have a go. Since we are going to sleep
6718 * anyway. Note: This may cause odd behaviors if the message
6719 * does not fit in the user's buffer, but this seems to be the
6720 * only way to honor MSG_DONTWAIT realistically.
6721 */
6722 release_sock(sk);
6723 *timeo_p = schedule_timeout(*timeo_p);
6724 lock_sock(sk);
6725
6726 ready:
6727 finish_wait(sk_sleep(sk), &wait);
6728 return 0;
6729
6730 interrupted:
6731 error = sock_intr_errno(*timeo_p);
6732
6733 out:
6734 finish_wait(sk_sleep(sk), &wait);
6735 *err = error;
6736 return error;
6737 }
6738
6739 /* Receive a datagram.
6740 * Note: This is pretty much the same routine as in core/datagram.c
6741 * with a few changes to make lksctp work.
6742 */
6743 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
6744 int noblock, int *err)
6745 {
6746 int error;
6747 struct sk_buff *skb;
6748 long timeo;
6749
6750 timeo = sock_rcvtimeo(sk, noblock);
6751
6752 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
6753 MAX_SCHEDULE_TIMEOUT);
6754
6755 do {
6756 /* Again only user level code calls this function,
6757 * so nothing interrupt level
6758 * will suddenly eat the receive_queue.
6759 *
6760 * Look at current nfs client by the way...
6761 * However, this function was correct in any case. 8)
6762 */
6763 if (flags & MSG_PEEK) {
6764 spin_lock_bh(&sk->sk_receive_queue.lock);
6765 skb = skb_peek(&sk->sk_receive_queue);
6766 if (skb)
6767 atomic_inc(&skb->users);
6768 spin_unlock_bh(&sk->sk_receive_queue.lock);
6769 } else {
6770 skb = skb_dequeue(&sk->sk_receive_queue);
6771 }
6772
6773 if (skb)
6774 return skb;
6775
6776 /* Caller is allowed not to check sk->sk_err before calling. */
6777 error = sock_error(sk);
6778 if (error)
6779 goto no_packet;
6780
6781 if (sk->sk_shutdown & RCV_SHUTDOWN)
6782 break;
6783
6784 if (sk_can_busy_loop(sk) &&
6785 sk_busy_loop(sk, noblock))
6786 continue;
6787
6788 /* User doesn't want to wait. */
6789 error = -EAGAIN;
6790 if (!timeo)
6791 goto no_packet;
6792 } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
6793
6794 return NULL;
6795
6796 no_packet:
6797 *err = error;
6798 return NULL;
6799 }
6800
6801 /* If sndbuf has changed, wake up per association sndbuf waiters. */
6802 static void __sctp_write_space(struct sctp_association *asoc)
6803 {
6804 struct sock *sk = asoc->base.sk;
6805
6806 if (sctp_wspace(asoc) <= 0)
6807 return;
6808
6809 if (waitqueue_active(&asoc->wait))
6810 wake_up_interruptible(&asoc->wait);
6811
6812 if (sctp_writeable(sk)) {
6813 struct socket_wq *wq;
6814
6815 rcu_read_lock();
6816 wq = rcu_dereference(sk->sk_wq);
6817 if (wq) {
6818 if (waitqueue_active(&wq->wait))
6819 wake_up_interruptible(&wq->wait);
6820
6821 /* Note that we try to include the Async I/O support
6822 * here by modeling from the current TCP/UDP code.
6823 * We have not tested with it yet.
6824 */
6825 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
6826 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
6827 }
6828 rcu_read_unlock();
6829 }
6830 }
6831
6832 static void sctp_wake_up_waiters(struct sock *sk,
6833 struct sctp_association *asoc)
6834 {
6835 struct sctp_association *tmp = asoc;
6836
6837 /* We do accounting for the sndbuf space per association,
6838 * so we only need to wake our own association.
6839 */
6840 if (asoc->ep->sndbuf_policy)
6841 return __sctp_write_space(asoc);
6842
6843 /* If association goes down and is just flushing its
6844 * outq, then just normally notify others.
6845 */
6846 if (asoc->base.dead)
6847 return sctp_write_space(sk);
6848
6849 /* Accounting for the sndbuf space is per socket, so we
6850 * need to wake up others, try to be fair and in case of
6851 * other associations, let them have a go first instead
6852 * of just doing a sctp_write_space() call.
6853 *
6854 * Note that we reach sctp_wake_up_waiters() only when
6855 * associations free up queued chunks, thus we are under
6856 * lock and the list of associations on a socket is
6857 * guaranteed not to change.
6858 */
6859 for (tmp = list_next_entry(tmp, asocs); 1;
6860 tmp = list_next_entry(tmp, asocs)) {
6861 /* Manually skip the head element. */
6862 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
6863 continue;
6864 /* Wake up association. */
6865 __sctp_write_space(tmp);
6866 /* We've reached the end. */
6867 if (tmp == asoc)
6868 break;
6869 }
6870 }
6871
6872 /* Do accounting for the sndbuf space.
6873 * Decrement the used sndbuf space of the corresponding association by the
6874 * data size which was just transmitted(freed).
6875 */
6876 static void sctp_wfree(struct sk_buff *skb)
6877 {
6878 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
6879 struct sctp_association *asoc = chunk->asoc;
6880 struct sock *sk = asoc->base.sk;
6881
6882 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
6883 sizeof(struct sk_buff) +
6884 sizeof(struct sctp_chunk);
6885
6886 atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
6887
6888 /*
6889 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
6890 */
6891 sk->sk_wmem_queued -= skb->truesize;
6892 sk_mem_uncharge(sk, skb->truesize);
6893
6894 sock_wfree(skb);
6895 sctp_wake_up_waiters(sk, asoc);
6896
6897 sctp_association_put(asoc);
6898 }
6899
6900 /* Do accounting for the receive space on the socket.
6901 * Accounting for the association is done in ulpevent.c
6902 * We set this as a destructor for the cloned data skbs so that
6903 * accounting is done at the correct time.
6904 */
6905 void sctp_sock_rfree(struct sk_buff *skb)
6906 {
6907 struct sock *sk = skb->sk;
6908 struct sctp_ulpevent *event = sctp_skb2event(skb);
6909
6910 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
6911
6912 /*
6913 * Mimic the behavior of sock_rfree
6914 */
6915 sk_mem_uncharge(sk, event->rmem_len);
6916 }
6917
6918
6919 /* Helper function to wait for space in the sndbuf. */
6920 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
6921 size_t msg_len)
6922 {
6923 struct sock *sk = asoc->base.sk;
6924 int err = 0;
6925 long current_timeo = *timeo_p;
6926 DEFINE_WAIT(wait);
6927
6928 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
6929 *timeo_p, msg_len);
6930
6931 /* Increment the association's refcnt. */
6932 sctp_association_hold(asoc);
6933
6934 /* Wait on the association specific sndbuf space. */
6935 for (;;) {
6936 prepare_to_wait_exclusive(&asoc->wait, &wait,
6937 TASK_INTERRUPTIBLE);
6938 if (!*timeo_p)
6939 goto do_nonblock;
6940 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
6941 asoc->base.dead)
6942 goto do_error;
6943 if (signal_pending(current))
6944 goto do_interrupted;
6945 if (msg_len <= sctp_wspace(asoc))
6946 break;
6947
6948 /* Let another process have a go. Since we are going
6949 * to sleep anyway.
6950 */
6951 release_sock(sk);
6952 current_timeo = schedule_timeout(current_timeo);
6953 BUG_ON(sk != asoc->base.sk);
6954 lock_sock(sk);
6955
6956 *timeo_p = current_timeo;
6957 }
6958
6959 out:
6960 finish_wait(&asoc->wait, &wait);
6961
6962 /* Release the association's refcnt. */
6963 sctp_association_put(asoc);
6964
6965 return err;
6966
6967 do_error:
6968 err = -EPIPE;
6969 goto out;
6970
6971 do_interrupted:
6972 err = sock_intr_errno(*timeo_p);
6973 goto out;
6974
6975 do_nonblock:
6976 err = -EAGAIN;
6977 goto out;
6978 }
6979
6980 void sctp_data_ready(struct sock *sk)
6981 {
6982 struct socket_wq *wq;
6983
6984 rcu_read_lock();
6985 wq = rcu_dereference(sk->sk_wq);
6986 if (skwq_has_sleeper(wq))
6987 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
6988 POLLRDNORM | POLLRDBAND);
6989 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6990 rcu_read_unlock();
6991 }
6992
6993 /* If socket sndbuf has changed, wake up all per association waiters. */
6994 void sctp_write_space(struct sock *sk)
6995 {
6996 struct sctp_association *asoc;
6997
6998 /* Wake up the tasks in each wait queue. */
6999 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
7000 __sctp_write_space(asoc);
7001 }
7002 }
7003
7004 /* Is there any sndbuf space available on the socket?
7005 *
7006 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
7007 * associations on the same socket. For a UDP-style socket with
7008 * multiple associations, it is possible for it to be "unwriteable"
7009 * prematurely. I assume that this is acceptable because
7010 * a premature "unwriteable" is better than an accidental "writeable" which
7011 * would cause an unwanted block under certain circumstances. For the 1-1
7012 * UDP-style sockets or TCP-style sockets, this code should work.
7013 * - Daisy
7014 */
7015 static int sctp_writeable(struct sock *sk)
7016 {
7017 int amt = 0;
7018
7019 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
7020 if (amt < 0)
7021 amt = 0;
7022 return amt;
7023 }
7024
7025 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
7026 * returns immediately with EINPROGRESS.
7027 */
7028 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
7029 {
7030 struct sock *sk = asoc->base.sk;
7031 int err = 0;
7032 long current_timeo = *timeo_p;
7033 DEFINE_WAIT(wait);
7034
7035 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
7036
7037 /* Increment the association's refcnt. */
7038 sctp_association_hold(asoc);
7039
7040 for (;;) {
7041 prepare_to_wait_exclusive(&asoc->wait, &wait,
7042 TASK_INTERRUPTIBLE);
7043 if (!*timeo_p)
7044 goto do_nonblock;
7045 if (sk->sk_shutdown & RCV_SHUTDOWN)
7046 break;
7047 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
7048 asoc->base.dead)
7049 goto do_error;
7050 if (signal_pending(current))
7051 goto do_interrupted;
7052
7053 if (sctp_state(asoc, ESTABLISHED))
7054 break;
7055
7056 /* Let another process have a go. Since we are going
7057 * to sleep anyway.
7058 */
7059 release_sock(sk);
7060 current_timeo = schedule_timeout(current_timeo);
7061 lock_sock(sk);
7062
7063 *timeo_p = current_timeo;
7064 }
7065
7066 out:
7067 finish_wait(&asoc->wait, &wait);
7068
7069 /* Release the association's refcnt. */
7070 sctp_association_put(asoc);
7071
7072 return err;
7073
7074 do_error:
7075 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
7076 err = -ETIMEDOUT;
7077 else
7078 err = -ECONNREFUSED;
7079 goto out;
7080
7081 do_interrupted:
7082 err = sock_intr_errno(*timeo_p);
7083 goto out;
7084
7085 do_nonblock:
7086 err = -EINPROGRESS;
7087 goto out;
7088 }
7089
7090 static int sctp_wait_for_accept(struct sock *sk, long timeo)
7091 {
7092 struct sctp_endpoint *ep;
7093 int err = 0;
7094 DEFINE_WAIT(wait);
7095
7096 ep = sctp_sk(sk)->ep;
7097
7098
7099 for (;;) {
7100 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
7101 TASK_INTERRUPTIBLE);
7102
7103 if (list_empty(&ep->asocs)) {
7104 release_sock(sk);
7105 timeo = schedule_timeout(timeo);
7106 lock_sock(sk);
7107 }
7108
7109 err = -EINVAL;
7110 if (!sctp_sstate(sk, LISTENING))
7111 break;
7112
7113 err = 0;
7114 if (!list_empty(&ep->asocs))
7115 break;
7116
7117 err = sock_intr_errno(timeo);
7118 if (signal_pending(current))
7119 break;
7120
7121 err = -EAGAIN;
7122 if (!timeo)
7123 break;
7124 }
7125
7126 finish_wait(sk_sleep(sk), &wait);
7127
7128 return err;
7129 }
7130
7131 static void sctp_wait_for_close(struct sock *sk, long timeout)
7132 {
7133 DEFINE_WAIT(wait);
7134
7135 do {
7136 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
7137 if (list_empty(&sctp_sk(sk)->ep->asocs))
7138 break;
7139 release_sock(sk);
7140 timeout = schedule_timeout(timeout);
7141 lock_sock(sk);
7142 } while (!signal_pending(current) && timeout);
7143
7144 finish_wait(sk_sleep(sk), &wait);
7145 }
7146
7147 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
7148 {
7149 struct sk_buff *frag;
7150
7151 if (!skb->data_len)
7152 goto done;
7153
7154 /* Don't forget the fragments. */
7155 skb_walk_frags(skb, frag)
7156 sctp_skb_set_owner_r_frag(frag, sk);
7157
7158 done:
7159 sctp_skb_set_owner_r(skb, sk);
7160 }
7161
7162 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
7163 struct sctp_association *asoc)
7164 {
7165 struct inet_sock *inet = inet_sk(sk);
7166 struct inet_sock *newinet;
7167
7168 newsk->sk_type = sk->sk_type;
7169 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
7170 newsk->sk_flags = sk->sk_flags;
7171 newsk->sk_tsflags = sk->sk_tsflags;
7172 newsk->sk_no_check_tx = sk->sk_no_check_tx;
7173 newsk->sk_no_check_rx = sk->sk_no_check_rx;
7174 newsk->sk_reuse = sk->sk_reuse;
7175
7176 newsk->sk_shutdown = sk->sk_shutdown;
7177 newsk->sk_destruct = sctp_destruct_sock;
7178 newsk->sk_family = sk->sk_family;
7179 newsk->sk_protocol = IPPROTO_SCTP;
7180 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
7181 newsk->sk_sndbuf = sk->sk_sndbuf;
7182 newsk->sk_rcvbuf = sk->sk_rcvbuf;
7183 newsk->sk_lingertime = sk->sk_lingertime;
7184 newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
7185 newsk->sk_sndtimeo = sk->sk_sndtimeo;
7186
7187 newinet = inet_sk(newsk);
7188
7189 /* Initialize sk's sport, dport, rcv_saddr and daddr for
7190 * getsockname() and getpeername()
7191 */
7192 newinet->inet_sport = inet->inet_sport;
7193 newinet->inet_saddr = inet->inet_saddr;
7194 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
7195 newinet->inet_dport = htons(asoc->peer.port);
7196 newinet->pmtudisc = inet->pmtudisc;
7197 newinet->inet_id = asoc->next_tsn ^ jiffies;
7198
7199 newinet->uc_ttl = inet->uc_ttl;
7200 newinet->mc_loop = 1;
7201 newinet->mc_ttl = 1;
7202 newinet->mc_index = 0;
7203 newinet->mc_list = NULL;
7204
7205 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
7206 net_enable_timestamp();
7207
7208 security_sk_clone(sk, newsk);
7209 }
7210
7211 static inline void sctp_copy_descendant(struct sock *sk_to,
7212 const struct sock *sk_from)
7213 {
7214 int ancestor_size = sizeof(struct inet_sock) +
7215 sizeof(struct sctp_sock) -
7216 offsetof(struct sctp_sock, auto_asconf_list);
7217
7218 if (sk_from->sk_family == PF_INET6)
7219 ancestor_size += sizeof(struct ipv6_pinfo);
7220
7221 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
7222 }
7223
7224 /* Populate the fields of the newsk from the oldsk and migrate the assoc
7225 * and its messages to the newsk.
7226 */
7227 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
7228 struct sctp_association *assoc,
7229 sctp_socket_type_t type)
7230 {
7231 struct sctp_sock *oldsp = sctp_sk(oldsk);
7232 struct sctp_sock *newsp = sctp_sk(newsk);
7233 struct sctp_bind_bucket *pp; /* hash list port iterator */
7234 struct sctp_endpoint *newep = newsp->ep;
7235 struct sk_buff *skb, *tmp;
7236 struct sctp_ulpevent *event;
7237 struct sctp_bind_hashbucket *head;
7238
7239 /* Migrate socket buffer sizes and all the socket level options to the
7240 * new socket.
7241 */
7242 newsk->sk_sndbuf = oldsk->sk_sndbuf;
7243 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
7244 /* Brute force copy old sctp opt. */
7245 sctp_copy_descendant(newsk, oldsk);
7246
7247 /* Restore the ep value that was overwritten with the above structure
7248 * copy.
7249 */
7250 newsp->ep = newep;
7251 newsp->hmac = NULL;
7252
7253 /* Hook this new socket in to the bind_hash list. */
7254 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
7255 inet_sk(oldsk)->inet_num)];
7256 local_bh_disable();
7257 spin_lock(&head->lock);
7258 pp = sctp_sk(oldsk)->bind_hash;
7259 sk_add_bind_node(newsk, &pp->owner);
7260 sctp_sk(newsk)->bind_hash = pp;
7261 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
7262 spin_unlock(&head->lock);
7263 local_bh_enable();
7264
7265 /* Copy the bind_addr list from the original endpoint to the new
7266 * endpoint so that we can handle restarts properly
7267 */
7268 sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
7269 &oldsp->ep->base.bind_addr, GFP_KERNEL);
7270
7271 /* Move any messages in the old socket's receive queue that are for the
7272 * peeled off association to the new socket's receive queue.
7273 */
7274 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
7275 event = sctp_skb2event(skb);
7276 if (event->asoc == assoc) {
7277 __skb_unlink(skb, &oldsk->sk_receive_queue);
7278 __skb_queue_tail(&newsk->sk_receive_queue, skb);
7279 sctp_skb_set_owner_r_frag(skb, newsk);
7280 }
7281 }
7282
7283 /* Clean up any messages pending delivery due to partial
7284 * delivery. Three cases:
7285 * 1) No partial deliver; no work.
7286 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
7287 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
7288 */
7289 skb_queue_head_init(&newsp->pd_lobby);
7290 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
7291
7292 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
7293 struct sk_buff_head *queue;
7294
7295 /* Decide which queue to move pd_lobby skbs to. */
7296 if (assoc->ulpq.pd_mode) {
7297 queue = &newsp->pd_lobby;
7298 } else
7299 queue = &newsk->sk_receive_queue;
7300
7301 /* Walk through the pd_lobby, looking for skbs that
7302 * need moved to the new socket.
7303 */
7304 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
7305 event = sctp_skb2event(skb);
7306 if (event->asoc == assoc) {
7307 __skb_unlink(skb, &oldsp->pd_lobby);
7308 __skb_queue_tail(queue, skb);
7309 sctp_skb_set_owner_r_frag(skb, newsk);
7310 }
7311 }
7312
7313 /* Clear up any skbs waiting for the partial
7314 * delivery to finish.
7315 */
7316 if (assoc->ulpq.pd_mode)
7317 sctp_clear_pd(oldsk, NULL);
7318
7319 }
7320
7321 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
7322 sctp_skb_set_owner_r_frag(skb, newsk);
7323
7324 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
7325 sctp_skb_set_owner_r_frag(skb, newsk);
7326
7327 /* Set the type of socket to indicate that it is peeled off from the
7328 * original UDP-style socket or created with the accept() call on a
7329 * TCP-style socket..
7330 */
7331 newsp->type = type;
7332
7333 /* Mark the new socket "in-use" by the user so that any packets
7334 * that may arrive on the association after we've moved it are
7335 * queued to the backlog. This prevents a potential race between
7336 * backlog processing on the old socket and new-packet processing
7337 * on the new socket.
7338 *
7339 * The caller has just allocated newsk so we can guarantee that other
7340 * paths won't try to lock it and then oldsk.
7341 */
7342 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
7343 sctp_assoc_migrate(assoc, newsk);
7344
7345 /* If the association on the newsk is already closed before accept()
7346 * is called, set RCV_SHUTDOWN flag.
7347 */
7348 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP))
7349 newsk->sk_shutdown |= RCV_SHUTDOWN;
7350
7351 newsk->sk_state = SCTP_SS_ESTABLISHED;
7352 release_sock(newsk);
7353 }
7354
7355
7356 /* This proto struct describes the ULP interface for SCTP. */
7357 struct proto sctp_prot = {
7358 .name = "SCTP",
7359 .owner = THIS_MODULE,
7360 .close = sctp_close,
7361 .connect = sctp_connect,
7362 .disconnect = sctp_disconnect,
7363 .accept = sctp_accept,
7364 .ioctl = sctp_ioctl,
7365 .init = sctp_init_sock,
7366 .destroy = sctp_destroy_sock,
7367 .shutdown = sctp_shutdown,
7368 .setsockopt = sctp_setsockopt,
7369 .getsockopt = sctp_getsockopt,
7370 .sendmsg = sctp_sendmsg,
7371 .recvmsg = sctp_recvmsg,
7372 .bind = sctp_bind,
7373 .backlog_rcv = sctp_backlog_rcv,
7374 .hash = sctp_hash,
7375 .unhash = sctp_unhash,
7376 .get_port = sctp_get_port,
7377 .obj_size = sizeof(struct sctp_sock),
7378 .sysctl_mem = sysctl_sctp_mem,
7379 .sysctl_rmem = sysctl_sctp_rmem,
7380 .sysctl_wmem = sysctl_sctp_wmem,
7381 .memory_pressure = &sctp_memory_pressure,
7382 .enter_memory_pressure = sctp_enter_memory_pressure,
7383 .memory_allocated = &sctp_memory_allocated,
7384 .sockets_allocated = &sctp_sockets_allocated,
7385 };
7386
7387 #if IS_ENABLED(CONFIG_IPV6)
7388
7389 #include <net/transp_v6.h>
7390 static void sctp_v6_destroy_sock(struct sock *sk)
7391 {
7392 sctp_destroy_sock(sk);
7393 inet6_destroy_sock(sk);
7394 }
7395
7396 struct proto sctpv6_prot = {
7397 .name = "SCTPv6",
7398 .owner = THIS_MODULE,
7399 .close = sctp_close,
7400 .connect = sctp_connect,
7401 .disconnect = sctp_disconnect,
7402 .accept = sctp_accept,
7403 .ioctl = sctp_ioctl,
7404 .init = sctp_init_sock,
7405 .destroy = sctp_v6_destroy_sock,
7406 .shutdown = sctp_shutdown,
7407 .setsockopt = sctp_setsockopt,
7408 .getsockopt = sctp_getsockopt,
7409 .sendmsg = sctp_sendmsg,
7410 .recvmsg = sctp_recvmsg,
7411 .bind = sctp_bind,
7412 .backlog_rcv = sctp_backlog_rcv,
7413 .hash = sctp_hash,
7414 .unhash = sctp_unhash,
7415 .get_port = sctp_get_port,
7416 .obj_size = sizeof(struct sctp6_sock),
7417 .sysctl_mem = sysctl_sctp_mem,
7418 .sysctl_rmem = sysctl_sctp_rmem,
7419 .sysctl_wmem = sysctl_sctp_wmem,
7420 .memory_pressure = &sctp_memory_pressure,
7421 .enter_memory_pressure = sctp_enter_memory_pressure,
7422 .memory_allocated = &sctp_memory_allocated,
7423 .sockets_allocated = &sctp_sockets_allocated,
7424 };
7425 #endif /* IS_ENABLED(CONFIG_IPV6) */