]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/sctp/socket.c
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / net / sctp / socket.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
6 * Copyright (c) 2001-2002 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This file is part of the SCTP kernel implementation
10 *
11 * These functions interface with the sockets layer to implement the
12 * SCTP Extensions for the Sockets API.
13 *
14 * Note that the descriptions from the specification are USER level
15 * functions--this file is the functions which populate the struct proto
16 * for SCTP which is the BOTTOM of the sockets interface.
17 *
18 * This SCTP implementation is free software;
19 * you can redistribute it and/or modify it under the terms of
20 * the GNU General Public License as published by
21 * the Free Software Foundation; either version 2, or (at your option)
22 * any later version.
23 *
24 * This SCTP implementation is distributed in the hope that it
25 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
26 * ************************
27 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
28 * See the GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with GNU CC; see the file COPYING. If not, see
32 * <http://www.gnu.org/licenses/>.
33 *
34 * Please send any bug reports or fixes you make to the
35 * email address(es):
36 * lksctp developers <linux-sctp@vger.kernel.org>
37 *
38 * Written or modified by:
39 * La Monte H.P. Yarroll <piggy@acm.org>
40 * Narasimha Budihal <narsi@refcode.org>
41 * Karl Knutson <karl@athena.chicago.il.us>
42 * Jon Grimm <jgrimm@us.ibm.com>
43 * Xingang Guo <xingang.guo@intel.com>
44 * Daisy Chang <daisyc@us.ibm.com>
45 * Sridhar Samudrala <samudrala@us.ibm.com>
46 * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com>
47 * Ardelle Fan <ardelle.fan@intel.com>
48 * Ryan Layer <rmlayer@us.ibm.com>
49 * Anup Pemmaiah <pemmaiah@cc.usu.edu>
50 * Kevin Gao <kevin.gao@intel.com>
51 */
52
53 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54
55 #include <crypto/hash.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
58 #include <linux/wait.h>
59 #include <linux/time.h>
60 #include <linux/sched/signal.h>
61 #include <linux/ip.h>
62 #include <linux/capability.h>
63 #include <linux/fcntl.h>
64 #include <linux/poll.h>
65 #include <linux/init.h>
66 #include <linux/slab.h>
67 #include <linux/file.h>
68 #include <linux/compat.h>
69
70 #include <net/ip.h>
71 #include <net/icmp.h>
72 #include <net/route.h>
73 #include <net/ipv6.h>
74 #include <net/inet_common.h>
75 #include <net/busy_poll.h>
76
77 #include <linux/socket.h> /* for sa_family_t */
78 #include <linux/export.h>
79 #include <net/sock.h>
80 #include <net/sctp/sctp.h>
81 #include <net/sctp/sm.h>
82
83 /* Forward declarations for internal helper functions. */
84 static int sctp_writeable(struct sock *sk);
85 static void sctp_wfree(struct sk_buff *skb);
86 static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
87 size_t msg_len);
88 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
89 static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
90 static int sctp_wait_for_accept(struct sock *sk, long timeo);
91 static void sctp_wait_for_close(struct sock *sk, long timeo);
92 static void sctp_destruct_sock(struct sock *sk);
93 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
94 union sctp_addr *addr, int len);
95 static int sctp_bindx_add(struct sock *, struct sockaddr *, int);
96 static int sctp_bindx_rem(struct sock *, struct sockaddr *, int);
97 static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int);
98 static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int);
99 static int sctp_send_asconf(struct sctp_association *asoc,
100 struct sctp_chunk *chunk);
101 static int sctp_do_bind(struct sock *, union sctp_addr *, int);
102 static int sctp_autobind(struct sock *sk);
103 static void sctp_sock_migrate(struct sock *, struct sock *,
104 struct sctp_association *, sctp_socket_type_t);
105
106 static unsigned long sctp_memory_pressure;
107 static atomic_long_t sctp_memory_allocated;
108 struct percpu_counter sctp_sockets_allocated;
109
110 static void sctp_enter_memory_pressure(struct sock *sk)
111 {
112 sctp_memory_pressure = 1;
113 }
114
115
116 /* Get the sndbuf space available at the time on the association. */
117 static inline int sctp_wspace(struct sctp_association *asoc)
118 {
119 int amt;
120
121 if (asoc->ep->sndbuf_policy)
122 amt = asoc->sndbuf_used;
123 else
124 amt = sk_wmem_alloc_get(asoc->base.sk);
125
126 if (amt >= asoc->base.sk->sk_sndbuf) {
127 if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK)
128 amt = 0;
129 else {
130 amt = sk_stream_wspace(asoc->base.sk);
131 if (amt < 0)
132 amt = 0;
133 }
134 } else {
135 amt = asoc->base.sk->sk_sndbuf - amt;
136 }
137 return amt;
138 }
139
140 /* Increment the used sndbuf space count of the corresponding association by
141 * the size of the outgoing data chunk.
142 * Also, set the skb destructor for sndbuf accounting later.
143 *
144 * Since it is always 1-1 between chunk and skb, and also a new skb is always
145 * allocated for chunk bundling in sctp_packet_transmit(), we can use the
146 * destructor in the data chunk skb for the purpose of the sndbuf space
147 * tracking.
148 */
149 static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
150 {
151 struct sctp_association *asoc = chunk->asoc;
152 struct sock *sk = asoc->base.sk;
153
154 /* The sndbuf space is tracked per association. */
155 sctp_association_hold(asoc);
156
157 skb_set_owner_w(chunk->skb, sk);
158
159 chunk->skb->destructor = sctp_wfree;
160 /* Save the chunk pointer in skb for sctp_wfree to use later. */
161 skb_shinfo(chunk->skb)->destructor_arg = chunk;
162
163 asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
164 sizeof(struct sk_buff) +
165 sizeof(struct sctp_chunk);
166
167 refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
168 sk->sk_wmem_queued += chunk->skb->truesize;
169 sk_mem_charge(sk, chunk->skb->truesize);
170 }
171
172 /* Verify that this is a valid address. */
173 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
174 int len)
175 {
176 struct sctp_af *af;
177
178 /* Verify basic sockaddr. */
179 af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
180 if (!af)
181 return -EINVAL;
182
183 /* Is this a valid SCTP address? */
184 if (!af->addr_valid(addr, sctp_sk(sk), NULL))
185 return -EINVAL;
186
187 if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
188 return -EINVAL;
189
190 return 0;
191 }
192
193 /* Look up the association by its id. If this is not a UDP-style
194 * socket, the ID field is always ignored.
195 */
196 struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
197 {
198 struct sctp_association *asoc = NULL;
199
200 /* If this is not a UDP-style socket, assoc id should be ignored. */
201 if (!sctp_style(sk, UDP)) {
202 /* Return NULL if the socket state is not ESTABLISHED. It
203 * could be a TCP-style listening socket or a socket which
204 * hasn't yet called connect() to establish an association.
205 */
206 if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
207 return NULL;
208
209 /* Get the first and the only association from the list. */
210 if (!list_empty(&sctp_sk(sk)->ep->asocs))
211 asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
212 struct sctp_association, asocs);
213 return asoc;
214 }
215
216 /* Otherwise this is a UDP-style socket. */
217 if (!id || (id == (sctp_assoc_t)-1))
218 return NULL;
219
220 spin_lock_bh(&sctp_assocs_id_lock);
221 asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
222 spin_unlock_bh(&sctp_assocs_id_lock);
223
224 if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
225 return NULL;
226
227 return asoc;
228 }
229
230 /* Look up the transport from an address and an assoc id. If both address and
231 * id are specified, the associations matching the address and the id should be
232 * the same.
233 */
234 static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
235 struct sockaddr_storage *addr,
236 sctp_assoc_t id)
237 {
238 struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
239 struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
240 union sctp_addr *laddr = (union sctp_addr *)addr;
241 struct sctp_transport *transport;
242
243 if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
244 return NULL;
245
246 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
247 laddr,
248 &transport);
249
250 if (!addr_asoc)
251 return NULL;
252
253 id_asoc = sctp_id2assoc(sk, id);
254 if (id_asoc && (id_asoc != addr_asoc))
255 return NULL;
256
257 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
258 (union sctp_addr *)addr);
259
260 return transport;
261 }
262
263 /* API 3.1.2 bind() - UDP Style Syntax
264 * The syntax of bind() is,
265 *
266 * ret = bind(int sd, struct sockaddr *addr, int addrlen);
267 *
268 * sd - the socket descriptor returned by socket().
269 * addr - the address structure (struct sockaddr_in or struct
270 * sockaddr_in6 [RFC 2553]),
271 * addr_len - the size of the address structure.
272 */
273 static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
274 {
275 int retval = 0;
276
277 lock_sock(sk);
278
279 pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
280 addr, addr_len);
281
282 /* Disallow binding twice. */
283 if (!sctp_sk(sk)->ep->base.bind_addr.port)
284 retval = sctp_do_bind(sk, (union sctp_addr *)addr,
285 addr_len);
286 else
287 retval = -EINVAL;
288
289 release_sock(sk);
290
291 return retval;
292 }
293
294 static long sctp_get_port_local(struct sock *, union sctp_addr *);
295
296 /* Verify this is a valid sockaddr. */
297 static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
298 union sctp_addr *addr, int len)
299 {
300 struct sctp_af *af;
301
302 /* Check minimum size. */
303 if (len < sizeof (struct sockaddr))
304 return NULL;
305
306 /* V4 mapped address are really of AF_INET family */
307 if (addr->sa.sa_family == AF_INET6 &&
308 ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
309 if (!opt->pf->af_supported(AF_INET, opt))
310 return NULL;
311 } else {
312 /* Does this PF support this AF? */
313 if (!opt->pf->af_supported(addr->sa.sa_family, opt))
314 return NULL;
315 }
316
317 /* If we get this far, af is valid. */
318 af = sctp_get_af_specific(addr->sa.sa_family);
319
320 if (len < af->sockaddr_len)
321 return NULL;
322
323 return af;
324 }
325
326 /* Bind a local address either to an endpoint or to an association. */
327 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
328 {
329 struct net *net = sock_net(sk);
330 struct sctp_sock *sp = sctp_sk(sk);
331 struct sctp_endpoint *ep = sp->ep;
332 struct sctp_bind_addr *bp = &ep->base.bind_addr;
333 struct sctp_af *af;
334 unsigned short snum;
335 int ret = 0;
336
337 /* Common sockaddr verification. */
338 af = sctp_sockaddr_af(sp, addr, len);
339 if (!af) {
340 pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n",
341 __func__, sk, addr, len);
342 return -EINVAL;
343 }
344
345 snum = ntohs(addr->v4.sin_port);
346
347 pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n",
348 __func__, sk, &addr->sa, bp->port, snum, len);
349
350 /* PF specific bind() address verification. */
351 if (!sp->pf->bind_verify(sp, addr))
352 return -EADDRNOTAVAIL;
353
354 /* We must either be unbound, or bind to the same port.
355 * It's OK to allow 0 ports if we are already bound.
356 * We'll just inhert an already bound port in this case
357 */
358 if (bp->port) {
359 if (!snum)
360 snum = bp->port;
361 else if (snum != bp->port) {
362 pr_debug("%s: new port %d doesn't match existing port "
363 "%d\n", __func__, snum, bp->port);
364 return -EINVAL;
365 }
366 }
367
368 if (snum && snum < inet_prot_sock(net) &&
369 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
370 return -EACCES;
371
372 /* See if the address matches any of the addresses we may have
373 * already bound before checking against other endpoints.
374 */
375 if (sctp_bind_addr_match(bp, addr, sp))
376 return -EINVAL;
377
378 /* Make sure we are allowed to bind here.
379 * The function sctp_get_port_local() does duplicate address
380 * detection.
381 */
382 addr->v4.sin_port = htons(snum);
383 if ((ret = sctp_get_port_local(sk, addr))) {
384 return -EADDRINUSE;
385 }
386
387 /* Refresh ephemeral port. */
388 if (!bp->port)
389 bp->port = inet_sk(sk)->inet_num;
390
391 /* Add the address to the bind address list.
392 * Use GFP_ATOMIC since BHs will be disabled.
393 */
394 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
395 SCTP_ADDR_SRC, GFP_ATOMIC);
396
397 /* Copy back into socket for getsockname() use. */
398 if (!ret) {
399 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
400 sp->pf->to_sk_saddr(addr, sk);
401 }
402
403 return ret;
404 }
405
406 /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks
407 *
408 * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged
409 * at any one time. If a sender, after sending an ASCONF chunk, decides
410 * it needs to transfer another ASCONF Chunk, it MUST wait until the
411 * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a
412 * subsequent ASCONF. Note this restriction binds each side, so at any
413 * time two ASCONF may be in-transit on any given association (one sent
414 * from each endpoint).
415 */
416 static int sctp_send_asconf(struct sctp_association *asoc,
417 struct sctp_chunk *chunk)
418 {
419 struct net *net = sock_net(asoc->base.sk);
420 int retval = 0;
421
422 /* If there is an outstanding ASCONF chunk, queue it for later
423 * transmission.
424 */
425 if (asoc->addip_last_asconf) {
426 list_add_tail(&chunk->list, &asoc->addip_chunk_list);
427 goto out;
428 }
429
430 /* Hold the chunk until an ASCONF_ACK is received. */
431 sctp_chunk_hold(chunk);
432 retval = sctp_primitive_ASCONF(net, asoc, chunk);
433 if (retval)
434 sctp_chunk_free(chunk);
435 else
436 asoc->addip_last_asconf = chunk;
437
438 out:
439 return retval;
440 }
441
442 /* Add a list of addresses as bind addresses to local endpoint or
443 * association.
444 *
445 * Basically run through each address specified in the addrs/addrcnt
446 * array/length pair, determine if it is IPv6 or IPv4 and call
447 * sctp_do_bind() on it.
448 *
449 * If any of them fails, then the operation will be reversed and the
450 * ones that were added will be removed.
451 *
452 * Only sctp_setsockopt_bindx() is supposed to call this function.
453 */
454 static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
455 {
456 int cnt;
457 int retval = 0;
458 void *addr_buf;
459 struct sockaddr *sa_addr;
460 struct sctp_af *af;
461
462 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
463 addrs, addrcnt);
464
465 addr_buf = addrs;
466 for (cnt = 0; cnt < addrcnt; cnt++) {
467 /* The list may contain either IPv4 or IPv6 address;
468 * determine the address length for walking thru the list.
469 */
470 sa_addr = addr_buf;
471 af = sctp_get_af_specific(sa_addr->sa_family);
472 if (!af) {
473 retval = -EINVAL;
474 goto err_bindx_add;
475 }
476
477 retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
478 af->sockaddr_len);
479
480 addr_buf += af->sockaddr_len;
481
482 err_bindx_add:
483 if (retval < 0) {
484 /* Failed. Cleanup the ones that have been added */
485 if (cnt > 0)
486 sctp_bindx_rem(sk, addrs, cnt);
487 return retval;
488 }
489 }
490
491 return retval;
492 }
493
494 /* Send an ASCONF chunk with Add IP address parameters to all the peers of the
495 * associations that are part of the endpoint indicating that a list of local
496 * addresses are added to the endpoint.
497 *
498 * If any of the addresses is already in the bind address list of the
499 * association, we do not send the chunk for that association. But it will not
500 * affect other associations.
501 *
502 * Only sctp_setsockopt_bindx() is supposed to call this function.
503 */
504 static int sctp_send_asconf_add_ip(struct sock *sk,
505 struct sockaddr *addrs,
506 int addrcnt)
507 {
508 struct net *net = sock_net(sk);
509 struct sctp_sock *sp;
510 struct sctp_endpoint *ep;
511 struct sctp_association *asoc;
512 struct sctp_bind_addr *bp;
513 struct sctp_chunk *chunk;
514 struct sctp_sockaddr_entry *laddr;
515 union sctp_addr *addr;
516 union sctp_addr saveaddr;
517 void *addr_buf;
518 struct sctp_af *af;
519 struct list_head *p;
520 int i;
521 int retval = 0;
522
523 if (!net->sctp.addip_enable)
524 return retval;
525
526 sp = sctp_sk(sk);
527 ep = sp->ep;
528
529 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
530 __func__, sk, addrs, addrcnt);
531
532 list_for_each_entry(asoc, &ep->asocs, asocs) {
533 if (!asoc->peer.asconf_capable)
534 continue;
535
536 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
537 continue;
538
539 if (!sctp_state(asoc, ESTABLISHED))
540 continue;
541
542 /* Check if any address in the packed array of addresses is
543 * in the bind address list of the association. If so,
544 * do not send the asconf chunk to its peer, but continue with
545 * other associations.
546 */
547 addr_buf = addrs;
548 for (i = 0; i < addrcnt; i++) {
549 addr = addr_buf;
550 af = sctp_get_af_specific(addr->v4.sin_family);
551 if (!af) {
552 retval = -EINVAL;
553 goto out;
554 }
555
556 if (sctp_assoc_lookup_laddr(asoc, addr))
557 break;
558
559 addr_buf += af->sockaddr_len;
560 }
561 if (i < addrcnt)
562 continue;
563
564 /* Use the first valid address in bind addr list of
565 * association as Address Parameter of ASCONF CHUNK.
566 */
567 bp = &asoc->base.bind_addr;
568 p = bp->address_list.next;
569 laddr = list_entry(p, struct sctp_sockaddr_entry, list);
570 chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs,
571 addrcnt, SCTP_PARAM_ADD_IP);
572 if (!chunk) {
573 retval = -ENOMEM;
574 goto out;
575 }
576
577 /* Add the new addresses to the bind address list with
578 * use_as_src set to 0.
579 */
580 addr_buf = addrs;
581 for (i = 0; i < addrcnt; i++) {
582 addr = addr_buf;
583 af = sctp_get_af_specific(addr->v4.sin_family);
584 memcpy(&saveaddr, addr, af->sockaddr_len);
585 retval = sctp_add_bind_addr(bp, &saveaddr,
586 sizeof(saveaddr),
587 SCTP_ADDR_NEW, GFP_ATOMIC);
588 addr_buf += af->sockaddr_len;
589 }
590 if (asoc->src_out_of_asoc_ok) {
591 struct sctp_transport *trans;
592
593 list_for_each_entry(trans,
594 &asoc->peer.transport_addr_list, transports) {
595 /* Clear the source and route cache */
596 sctp_transport_dst_release(trans);
597 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
598 2*asoc->pathmtu, 4380));
599 trans->ssthresh = asoc->peer.i.a_rwnd;
600 trans->rto = asoc->rto_initial;
601 sctp_max_rto(asoc, trans);
602 trans->rtt = trans->srtt = trans->rttvar = 0;
603 sctp_transport_route(trans, NULL,
604 sctp_sk(asoc->base.sk));
605 }
606 }
607 retval = sctp_send_asconf(asoc, chunk);
608 }
609
610 out:
611 return retval;
612 }
613
614 /* Remove a list of addresses from bind addresses list. Do not remove the
615 * last address.
616 *
617 * Basically run through each address specified in the addrs/addrcnt
618 * array/length pair, determine if it is IPv6 or IPv4 and call
619 * sctp_del_bind() on it.
620 *
621 * If any of them fails, then the operation will be reversed and the
622 * ones that were removed will be added back.
623 *
624 * At least one address has to be left; if only one address is
625 * available, the operation will return -EBUSY.
626 *
627 * Only sctp_setsockopt_bindx() is supposed to call this function.
628 */
629 static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
630 {
631 struct sctp_sock *sp = sctp_sk(sk);
632 struct sctp_endpoint *ep = sp->ep;
633 int cnt;
634 struct sctp_bind_addr *bp = &ep->base.bind_addr;
635 int retval = 0;
636 void *addr_buf;
637 union sctp_addr *sa_addr;
638 struct sctp_af *af;
639
640 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
641 __func__, sk, addrs, addrcnt);
642
643 addr_buf = addrs;
644 for (cnt = 0; cnt < addrcnt; cnt++) {
645 /* If the bind address list is empty or if there is only one
646 * bind address, there is nothing more to be removed (we need
647 * at least one address here).
648 */
649 if (list_empty(&bp->address_list) ||
650 (sctp_list_single_entry(&bp->address_list))) {
651 retval = -EBUSY;
652 goto err_bindx_rem;
653 }
654
655 sa_addr = addr_buf;
656 af = sctp_get_af_specific(sa_addr->sa.sa_family);
657 if (!af) {
658 retval = -EINVAL;
659 goto err_bindx_rem;
660 }
661
662 if (!af->addr_valid(sa_addr, sp, NULL)) {
663 retval = -EADDRNOTAVAIL;
664 goto err_bindx_rem;
665 }
666
667 if (sa_addr->v4.sin_port &&
668 sa_addr->v4.sin_port != htons(bp->port)) {
669 retval = -EINVAL;
670 goto err_bindx_rem;
671 }
672
673 if (!sa_addr->v4.sin_port)
674 sa_addr->v4.sin_port = htons(bp->port);
675
676 /* FIXME - There is probably a need to check if sk->sk_saddr and
677 * sk->sk_rcv_addr are currently set to one of the addresses to
678 * be removed. This is something which needs to be looked into
679 * when we are fixing the outstanding issues with multi-homing
680 * socket routing and failover schemes. Refer to comments in
681 * sctp_do_bind(). -daisy
682 */
683 retval = sctp_del_bind_addr(bp, sa_addr);
684
685 addr_buf += af->sockaddr_len;
686 err_bindx_rem:
687 if (retval < 0) {
688 /* Failed. Add the ones that has been removed back */
689 if (cnt > 0)
690 sctp_bindx_add(sk, addrs, cnt);
691 return retval;
692 }
693 }
694
695 return retval;
696 }
697
698 /* Send an ASCONF chunk with Delete IP address parameters to all the peers of
699 * the associations that are part of the endpoint indicating that a list of
700 * local addresses are removed from the endpoint.
701 *
702 * If any of the addresses is already in the bind address list of the
703 * association, we do not send the chunk for that association. But it will not
704 * affect other associations.
705 *
706 * Only sctp_setsockopt_bindx() is supposed to call this function.
707 */
708 static int sctp_send_asconf_del_ip(struct sock *sk,
709 struct sockaddr *addrs,
710 int addrcnt)
711 {
712 struct net *net = sock_net(sk);
713 struct sctp_sock *sp;
714 struct sctp_endpoint *ep;
715 struct sctp_association *asoc;
716 struct sctp_transport *transport;
717 struct sctp_bind_addr *bp;
718 struct sctp_chunk *chunk;
719 union sctp_addr *laddr;
720 void *addr_buf;
721 struct sctp_af *af;
722 struct sctp_sockaddr_entry *saddr;
723 int i;
724 int retval = 0;
725 int stored = 0;
726
727 chunk = NULL;
728 if (!net->sctp.addip_enable)
729 return retval;
730
731 sp = sctp_sk(sk);
732 ep = sp->ep;
733
734 pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n",
735 __func__, sk, addrs, addrcnt);
736
737 list_for_each_entry(asoc, &ep->asocs, asocs) {
738
739 if (!asoc->peer.asconf_capable)
740 continue;
741
742 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
743 continue;
744
745 if (!sctp_state(asoc, ESTABLISHED))
746 continue;
747
748 /* Check if any address in the packed array of addresses is
749 * not present in the bind address list of the association.
750 * If so, do not send the asconf chunk to its peer, but
751 * continue with other associations.
752 */
753 addr_buf = addrs;
754 for (i = 0; i < addrcnt; i++) {
755 laddr = addr_buf;
756 af = sctp_get_af_specific(laddr->v4.sin_family);
757 if (!af) {
758 retval = -EINVAL;
759 goto out;
760 }
761
762 if (!sctp_assoc_lookup_laddr(asoc, laddr))
763 break;
764
765 addr_buf += af->sockaddr_len;
766 }
767 if (i < addrcnt)
768 continue;
769
770 /* Find one address in the association's bind address list
771 * that is not in the packed array of addresses. This is to
772 * make sure that we do not delete all the addresses in the
773 * association.
774 */
775 bp = &asoc->base.bind_addr;
776 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
777 addrcnt, sp);
778 if ((laddr == NULL) && (addrcnt == 1)) {
779 if (asoc->asconf_addr_del_pending)
780 continue;
781 asoc->asconf_addr_del_pending =
782 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
783 if (asoc->asconf_addr_del_pending == NULL) {
784 retval = -ENOMEM;
785 goto out;
786 }
787 asoc->asconf_addr_del_pending->sa.sa_family =
788 addrs->sa_family;
789 asoc->asconf_addr_del_pending->v4.sin_port =
790 htons(bp->port);
791 if (addrs->sa_family == AF_INET) {
792 struct sockaddr_in *sin;
793
794 sin = (struct sockaddr_in *)addrs;
795 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
796 } else if (addrs->sa_family == AF_INET6) {
797 struct sockaddr_in6 *sin6;
798
799 sin6 = (struct sockaddr_in6 *)addrs;
800 asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr;
801 }
802
803 pr_debug("%s: keep the last address asoc:%p %pISc at %p\n",
804 __func__, asoc, &asoc->asconf_addr_del_pending->sa,
805 asoc->asconf_addr_del_pending);
806
807 asoc->src_out_of_asoc_ok = 1;
808 stored = 1;
809 goto skip_mkasconf;
810 }
811
812 if (laddr == NULL)
813 return -EINVAL;
814
815 /* We do not need RCU protection throughout this loop
816 * because this is done under a socket lock from the
817 * setsockopt call.
818 */
819 chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt,
820 SCTP_PARAM_DEL_IP);
821 if (!chunk) {
822 retval = -ENOMEM;
823 goto out;
824 }
825
826 skip_mkasconf:
827 /* Reset use_as_src flag for the addresses in the bind address
828 * list that are to be deleted.
829 */
830 addr_buf = addrs;
831 for (i = 0; i < addrcnt; i++) {
832 laddr = addr_buf;
833 af = sctp_get_af_specific(laddr->v4.sin_family);
834 list_for_each_entry(saddr, &bp->address_list, list) {
835 if (sctp_cmp_addr_exact(&saddr->a, laddr))
836 saddr->state = SCTP_ADDR_DEL;
837 }
838 addr_buf += af->sockaddr_len;
839 }
840
841 /* Update the route and saddr entries for all the transports
842 * as some of the addresses in the bind address list are
843 * about to be deleted and cannot be used as source addresses.
844 */
845 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
846 transports) {
847 sctp_transport_dst_release(transport);
848 sctp_transport_route(transport, NULL,
849 sctp_sk(asoc->base.sk));
850 }
851
852 if (stored)
853 /* We don't need to transmit ASCONF */
854 continue;
855 retval = sctp_send_asconf(asoc, chunk);
856 }
857 out:
858 return retval;
859 }
860
861 /* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
862 int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
863 {
864 struct sock *sk = sctp_opt2sk(sp);
865 union sctp_addr *addr;
866 struct sctp_af *af;
867
868 /* It is safe to write port space in caller. */
869 addr = &addrw->a;
870 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
871 af = sctp_get_af_specific(addr->sa.sa_family);
872 if (!af)
873 return -EINVAL;
874 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
875 return -EINVAL;
876
877 if (addrw->state == SCTP_ADDR_NEW)
878 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
879 else
880 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
881 }
882
883 /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
884 *
885 * API 8.1
886 * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt,
887 * int flags);
888 *
889 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
890 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
891 * or IPv6 addresses.
892 *
893 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
894 * Section 3.1.2 for this usage.
895 *
896 * addrs is a pointer to an array of one or more socket addresses. Each
897 * address is contained in its appropriate structure (i.e. struct
898 * sockaddr_in or struct sockaddr_in6) the family of the address type
899 * must be used to distinguish the address length (note that this
900 * representation is termed a "packed array" of addresses). The caller
901 * specifies the number of addresses in the array with addrcnt.
902 *
903 * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns
904 * -1, and sets errno to the appropriate error code.
905 *
906 * For SCTP, the port given in each socket address must be the same, or
907 * sctp_bindx() will fail, setting errno to EINVAL.
908 *
909 * The flags parameter is formed from the bitwise OR of zero or more of
910 * the following currently defined flags:
911 *
912 * SCTP_BINDX_ADD_ADDR
913 *
914 * SCTP_BINDX_REM_ADDR
915 *
916 * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the
917 * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given
918 * addresses from the association. The two flags are mutually exclusive;
919 * if both are given, sctp_bindx() will fail with EINVAL. A caller may
920 * not remove all addresses from an association; sctp_bindx() will
921 * reject such an attempt with EINVAL.
922 *
923 * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate
924 * additional addresses with an endpoint after calling bind(). Or use
925 * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening
926 * socket is associated with so that no new association accepted will be
927 * associated with those addresses. If the endpoint supports dynamic
928 * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a
929 * endpoint to send the appropriate message to the peer to change the
930 * peers address lists.
931 *
932 * Adding and removing addresses from a connected association is
933 * optional functionality. Implementations that do not support this
934 * functionality should return EOPNOTSUPP.
935 *
936 * Basically do nothing but copying the addresses from user to kernel
937 * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk.
938 * This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
939 * from userspace.
940 *
941 * We don't use copy_from_user() for optimization: we first do the
942 * sanity checks (buffer size -fast- and access check-healthy
943 * pointer); if all of those succeed, then we can alloc the memory
944 * (expensive operation) needed to copy the data to kernel. Then we do
945 * the copying without checking the user space area
946 * (__copy_from_user()).
947 *
948 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
949 * it.
950 *
951 * sk The sk of the socket
952 * addrs The pointer to the addresses in user land
953 * addrssize Size of the addrs buffer
954 * op Operation to perform (add or remove, see the flags of
955 * sctp_bindx)
956 *
957 * Returns 0 if ok, <0 errno code on error.
958 */
959 static int sctp_setsockopt_bindx(struct sock *sk,
960 struct sockaddr __user *addrs,
961 int addrs_size, int op)
962 {
963 struct sockaddr *kaddrs;
964 int err;
965 int addrcnt = 0;
966 int walk_size = 0;
967 struct sockaddr *sa_addr;
968 void *addr_buf;
969 struct sctp_af *af;
970
971 pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
972 __func__, sk, addrs, addrs_size, op);
973
974 if (unlikely(addrs_size <= 0))
975 return -EINVAL;
976
977 /* Check the user passed a healthy pointer. */
978 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
979 return -EFAULT;
980
981 /* Alloc space for the address array in kernel memory. */
982 kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
983 if (unlikely(!kaddrs))
984 return -ENOMEM;
985
986 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
987 kfree(kaddrs);
988 return -EFAULT;
989 }
990
991 /* Walk through the addrs buffer and count the number of addresses. */
992 addr_buf = kaddrs;
993 while (walk_size < addrs_size) {
994 if (walk_size + sizeof(sa_family_t) > addrs_size) {
995 kfree(kaddrs);
996 return -EINVAL;
997 }
998
999 sa_addr = addr_buf;
1000 af = sctp_get_af_specific(sa_addr->sa_family);
1001
1002 /* If the address family is not supported or if this address
1003 * causes the address buffer to overflow return EINVAL.
1004 */
1005 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1006 kfree(kaddrs);
1007 return -EINVAL;
1008 }
1009 addrcnt++;
1010 addr_buf += af->sockaddr_len;
1011 walk_size += af->sockaddr_len;
1012 }
1013
1014 /* Do the work. */
1015 switch (op) {
1016 case SCTP_BINDX_ADD_ADDR:
1017 err = sctp_bindx_add(sk, kaddrs, addrcnt);
1018 if (err)
1019 goto out;
1020 err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
1021 break;
1022
1023 case SCTP_BINDX_REM_ADDR:
1024 err = sctp_bindx_rem(sk, kaddrs, addrcnt);
1025 if (err)
1026 goto out;
1027 err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
1028 break;
1029
1030 default:
1031 err = -EINVAL;
1032 break;
1033 }
1034
1035 out:
1036 kfree(kaddrs);
1037
1038 return err;
1039 }
1040
1041 /* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size)
1042 *
1043 * Common routine for handling connect() and sctp_connectx().
1044 * Connect will come in with just a single address.
1045 */
1046 static int __sctp_connect(struct sock *sk,
1047 struct sockaddr *kaddrs,
1048 int addrs_size,
1049 sctp_assoc_t *assoc_id)
1050 {
1051 struct net *net = sock_net(sk);
1052 struct sctp_sock *sp;
1053 struct sctp_endpoint *ep;
1054 struct sctp_association *asoc = NULL;
1055 struct sctp_association *asoc2;
1056 struct sctp_transport *transport;
1057 union sctp_addr to;
1058 sctp_scope_t scope;
1059 long timeo;
1060 int err = 0;
1061 int addrcnt = 0;
1062 int walk_size = 0;
1063 union sctp_addr *sa_addr = NULL;
1064 void *addr_buf;
1065 unsigned short port;
1066 unsigned int f_flags = 0;
1067
1068 sp = sctp_sk(sk);
1069 ep = sp->ep;
1070
1071 /* connect() cannot be done on a socket that is already in ESTABLISHED
1072 * state - UDP-style peeled off socket or a TCP-style socket that
1073 * is already connected.
1074 * It cannot be done even on a TCP-style listening socket.
1075 */
1076 if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
1077 (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) {
1078 err = -EISCONN;
1079 goto out_free;
1080 }
1081
1082 /* Walk through the addrs buffer and count the number of addresses. */
1083 addr_buf = kaddrs;
1084 while (walk_size < addrs_size) {
1085 struct sctp_af *af;
1086
1087 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1088 err = -EINVAL;
1089 goto out_free;
1090 }
1091
1092 sa_addr = addr_buf;
1093 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1094
1095 /* If the address family is not supported or if this address
1096 * causes the address buffer to overflow return EINVAL.
1097 */
1098 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1099 err = -EINVAL;
1100 goto out_free;
1101 }
1102
1103 port = ntohs(sa_addr->v4.sin_port);
1104
1105 /* Save current address so we can work with it */
1106 memcpy(&to, sa_addr, af->sockaddr_len);
1107
1108 err = sctp_verify_addr(sk, &to, af->sockaddr_len);
1109 if (err)
1110 goto out_free;
1111
1112 /* Make sure the destination port is correctly set
1113 * in all addresses.
1114 */
1115 if (asoc && asoc->peer.port && asoc->peer.port != port) {
1116 err = -EINVAL;
1117 goto out_free;
1118 }
1119
1120 /* Check if there already is a matching association on the
1121 * endpoint (other than the one created here).
1122 */
1123 asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1124 if (asoc2 && asoc2 != asoc) {
1125 if (asoc2->state >= SCTP_STATE_ESTABLISHED)
1126 err = -EISCONN;
1127 else
1128 err = -EALREADY;
1129 goto out_free;
1130 }
1131
1132 /* If we could not find a matching association on the endpoint,
1133 * make sure that there is no peeled-off association matching
1134 * the peer address even on another socket.
1135 */
1136 if (sctp_endpoint_is_peeled_off(ep, &to)) {
1137 err = -EADDRNOTAVAIL;
1138 goto out_free;
1139 }
1140
1141 if (!asoc) {
1142 /* If a bind() or sctp_bindx() is not called prior to
1143 * an sctp_connectx() call, the system picks an
1144 * ephemeral port and will choose an address set
1145 * equivalent to binding with a wildcard address.
1146 */
1147 if (!ep->base.bind_addr.port) {
1148 if (sctp_autobind(sk)) {
1149 err = -EAGAIN;
1150 goto out_free;
1151 }
1152 } else {
1153 /*
1154 * If an unprivileged user inherits a 1-many
1155 * style socket with open associations on a
1156 * privileged port, it MAY be permitted to
1157 * accept new associations, but it SHOULD NOT
1158 * be permitted to open new associations.
1159 */
1160 if (ep->base.bind_addr.port <
1161 inet_prot_sock(net) &&
1162 !ns_capable(net->user_ns,
1163 CAP_NET_BIND_SERVICE)) {
1164 err = -EACCES;
1165 goto out_free;
1166 }
1167 }
1168
1169 scope = sctp_scope(&to);
1170 asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1171 if (!asoc) {
1172 err = -ENOMEM;
1173 goto out_free;
1174 }
1175
1176 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1177 GFP_KERNEL);
1178 if (err < 0) {
1179 goto out_free;
1180 }
1181
1182 }
1183
1184 /* Prime the peer's transport structures. */
1185 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
1186 SCTP_UNKNOWN);
1187 if (!transport) {
1188 err = -ENOMEM;
1189 goto out_free;
1190 }
1191
1192 addrcnt++;
1193 addr_buf += af->sockaddr_len;
1194 walk_size += af->sockaddr_len;
1195 }
1196
1197 /* In case the user of sctp_connectx() wants an association
1198 * id back, assign one now.
1199 */
1200 if (assoc_id) {
1201 err = sctp_assoc_set_id(asoc, GFP_KERNEL);
1202 if (err < 0)
1203 goto out_free;
1204 }
1205
1206 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1207 if (err < 0) {
1208 goto out_free;
1209 }
1210
1211 /* Initialize sk's dport and daddr for getpeername() */
1212 inet_sk(sk)->inet_dport = htons(asoc->peer.port);
1213 sp->pf->to_sk_daddr(sa_addr, sk);
1214 sk->sk_err = 0;
1215
1216 /* in-kernel sockets don't generally have a file allocated to them
1217 * if all they do is call sock_create_kern().
1218 */
1219 if (sk->sk_socket->file)
1220 f_flags = sk->sk_socket->file->f_flags;
1221
1222 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1223
1224 if (assoc_id)
1225 *assoc_id = asoc->assoc_id;
1226 err = sctp_wait_for_connect(asoc, &timeo);
1227 /* Note: the asoc may be freed after the return of
1228 * sctp_wait_for_connect.
1229 */
1230
1231 /* Don't free association on exit. */
1232 asoc = NULL;
1233
1234 out_free:
1235 pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n",
1236 __func__, asoc, kaddrs, err);
1237
1238 if (asoc) {
1239 /* sctp_primitive_ASSOCIATE may have added this association
1240 * To the hash table, try to unhash it, just in case, its a noop
1241 * if it wasn't hashed so we're safe
1242 */
1243 sctp_association_free(asoc);
1244 }
1245 return err;
1246 }
1247
1248 /* Helper for tunneling sctp_connectx() requests through sctp_setsockopt()
1249 *
1250 * API 8.9
1251 * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt,
1252 * sctp_assoc_t *asoc);
1253 *
1254 * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses.
1255 * If the sd is an IPv6 socket, the addresses passed can either be IPv4
1256 * or IPv6 addresses.
1257 *
1258 * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see
1259 * Section 3.1.2 for this usage.
1260 *
1261 * addrs is a pointer to an array of one or more socket addresses. Each
1262 * address is contained in its appropriate structure (i.e. struct
1263 * sockaddr_in or struct sockaddr_in6) the family of the address type
1264 * must be used to distengish the address length (note that this
1265 * representation is termed a "packed array" of addresses). The caller
1266 * specifies the number of addresses in the array with addrcnt.
1267 *
1268 * On success, sctp_connectx() returns 0. It also sets the assoc_id to
1269 * the association id of the new association. On failure, sctp_connectx()
1270 * returns -1, and sets errno to the appropriate error code. The assoc_id
1271 * is not touched by the kernel.
1272 *
1273 * For SCTP, the port given in each socket address must be the same, or
1274 * sctp_connectx() will fail, setting errno to EINVAL.
1275 *
1276 * An application can use sctp_connectx to initiate an association with
1277 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1278 * allows a caller to specify multiple addresses at which a peer can be
1279 * reached. The way the SCTP stack uses the list of addresses to set up
1280 * the association is implementation dependent. This function only
1281 * specifies that the stack will try to make use of all the addresses in
1282 * the list when needed.
1283 *
1284 * Note that the list of addresses passed in is only used for setting up
1285 * the association. It does not necessarily equal the set of addresses
1286 * the peer uses for the resulting association. If the caller wants to
1287 * find out the set of peer addresses, it must use sctp_getpaddrs() to
1288 * retrieve them after the association has been set up.
1289 *
1290 * Basically do nothing but copying the addresses from user to kernel
1291 * land and invoking either sctp_connectx(). This is used for tunneling
1292 * the sctp_connectx() request through sctp_setsockopt() from userspace.
1293 *
1294 * We don't use copy_from_user() for optimization: we first do the
1295 * sanity checks (buffer size -fast- and access check-healthy
1296 * pointer); if all of those succeed, then we can alloc the memory
1297 * (expensive operation) needed to copy the data to kernel. Then we do
1298 * the copying without checking the user space area
1299 * (__copy_from_user()).
1300 *
1301 * On exit there is no need to do sockfd_put(), sys_setsockopt() does
1302 * it.
1303 *
1304 * sk The sk of the socket
1305 * addrs The pointer to the addresses in user land
1306 * addrssize Size of the addrs buffer
1307 *
1308 * Returns >=0 if ok, <0 errno code on error.
1309 */
1310 static int __sctp_setsockopt_connectx(struct sock *sk,
1311 struct sockaddr __user *addrs,
1312 int addrs_size,
1313 sctp_assoc_t *assoc_id)
1314 {
1315 struct sockaddr *kaddrs;
1316 gfp_t gfp = GFP_KERNEL;
1317 int err = 0;
1318
1319 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1320 __func__, sk, addrs, addrs_size);
1321
1322 if (unlikely(addrs_size <= 0))
1323 return -EINVAL;
1324
1325 /* Check the user passed a healthy pointer. */
1326 if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
1327 return -EFAULT;
1328
1329 /* Alloc space for the address array in kernel memory. */
1330 if (sk->sk_socket->file)
1331 gfp = GFP_USER | __GFP_NOWARN;
1332 kaddrs = kmalloc(addrs_size, gfp);
1333 if (unlikely(!kaddrs))
1334 return -ENOMEM;
1335
1336 if (__copy_from_user(kaddrs, addrs, addrs_size)) {
1337 err = -EFAULT;
1338 } else {
1339 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
1340 }
1341
1342 kfree(kaddrs);
1343
1344 return err;
1345 }
1346
1347 /*
1348 * This is an older interface. It's kept for backward compatibility
1349 * to the option that doesn't provide association id.
1350 */
1351 static int sctp_setsockopt_connectx_old(struct sock *sk,
1352 struct sockaddr __user *addrs,
1353 int addrs_size)
1354 {
1355 return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
1356 }
1357
1358 /*
1359 * New interface for the API. The since the API is done with a socket
1360 * option, to make it simple we feed back the association id is as a return
1361 * indication to the call. Error is always negative and association id is
1362 * always positive.
1363 */
1364 static int sctp_setsockopt_connectx(struct sock *sk,
1365 struct sockaddr __user *addrs,
1366 int addrs_size)
1367 {
1368 sctp_assoc_t assoc_id = 0;
1369 int err = 0;
1370
1371 err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
1372
1373 if (err)
1374 return err;
1375 else
1376 return assoc_id;
1377 }
1378
1379 /*
1380 * New (hopefully final) interface for the API.
1381 * We use the sctp_getaddrs_old structure so that use-space library
1382 * can avoid any unnecessary allocations. The only different part
1383 * is that we store the actual length of the address buffer into the
1384 * addrs_num structure member. That way we can re-use the existing
1385 * code.
1386 */
1387 #ifdef CONFIG_COMPAT
1388 struct compat_sctp_getaddrs_old {
1389 sctp_assoc_t assoc_id;
1390 s32 addr_num;
1391 compat_uptr_t addrs; /* struct sockaddr * */
1392 };
1393 #endif
1394
1395 static int sctp_getsockopt_connectx3(struct sock *sk, int len,
1396 char __user *optval,
1397 int __user *optlen)
1398 {
1399 struct sctp_getaddrs_old param;
1400 sctp_assoc_t assoc_id = 0;
1401 int err = 0;
1402
1403 #ifdef CONFIG_COMPAT
1404 if (in_compat_syscall()) {
1405 struct compat_sctp_getaddrs_old param32;
1406
1407 if (len < sizeof(param32))
1408 return -EINVAL;
1409 if (copy_from_user(&param32, optval, sizeof(param32)))
1410 return -EFAULT;
1411
1412 param.assoc_id = param32.assoc_id;
1413 param.addr_num = param32.addr_num;
1414 param.addrs = compat_ptr(param32.addrs);
1415 } else
1416 #endif
1417 {
1418 if (len < sizeof(param))
1419 return -EINVAL;
1420 if (copy_from_user(&param, optval, sizeof(param)))
1421 return -EFAULT;
1422 }
1423
1424 err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
1425 param.addrs, param.addr_num,
1426 &assoc_id);
1427 if (err == 0 || err == -EINPROGRESS) {
1428 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
1429 return -EFAULT;
1430 if (put_user(sizeof(assoc_id), optlen))
1431 return -EFAULT;
1432 }
1433
1434 return err;
1435 }
1436
1437 /* API 3.1.4 close() - UDP Style Syntax
1438 * Applications use close() to perform graceful shutdown (as described in
1439 * Section 10.1 of [SCTP]) on ALL the associations currently represented
1440 * by a UDP-style socket.
1441 *
1442 * The syntax is
1443 *
1444 * ret = close(int sd);
1445 *
1446 * sd - the socket descriptor of the associations to be closed.
1447 *
1448 * To gracefully shutdown a specific association represented by the
1449 * UDP-style socket, an application should use the sendmsg() call,
1450 * passing no user data, but including the appropriate flag in the
1451 * ancillary data (see Section xxxx).
1452 *
1453 * If sd in the close() call is a branched-off socket representing only
1454 * one association, the shutdown is performed on that association only.
1455 *
1456 * 4.1.6 close() - TCP Style Syntax
1457 *
1458 * Applications use close() to gracefully close down an association.
1459 *
1460 * The syntax is:
1461 *
1462 * int close(int sd);
1463 *
1464 * sd - the socket descriptor of the association to be closed.
1465 *
1466 * After an application calls close() on a socket descriptor, no further
1467 * socket operations will succeed on that descriptor.
1468 *
1469 * API 7.1.4 SO_LINGER
1470 *
1471 * An application using the TCP-style socket can use this option to
1472 * perform the SCTP ABORT primitive. The linger option structure is:
1473 *
1474 * struct linger {
1475 * int l_onoff; // option on/off
1476 * int l_linger; // linger time
1477 * };
1478 *
1479 * To enable the option, set l_onoff to 1. If the l_linger value is set
1480 * to 0, calling close() is the same as the ABORT primitive. If the
1481 * value is set to a negative value, the setsockopt() call will return
1482 * an error. If the value is set to a positive value linger_time, the
1483 * close() can be blocked for at most linger_time ms. If the graceful
1484 * shutdown phase does not finish during this period, close() will
1485 * return but the graceful shutdown phase continues in the system.
1486 */
1487 static void sctp_close(struct sock *sk, long timeout)
1488 {
1489 struct net *net = sock_net(sk);
1490 struct sctp_endpoint *ep;
1491 struct sctp_association *asoc;
1492 struct list_head *pos, *temp;
1493 unsigned int data_was_unread;
1494
1495 pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
1496
1497 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1498 sk->sk_shutdown = SHUTDOWN_MASK;
1499 sk->sk_state = SCTP_SS_CLOSING;
1500
1501 ep = sctp_sk(sk)->ep;
1502
1503 /* Clean up any skbs sitting on the receive queue. */
1504 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1505 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1506
1507 /* Walk all associations on an endpoint. */
1508 list_for_each_safe(pos, temp, &ep->asocs) {
1509 asoc = list_entry(pos, struct sctp_association, asocs);
1510
1511 if (sctp_style(sk, TCP)) {
1512 /* A closed association can still be in the list if
1513 * it belongs to a TCP-style listening socket that is
1514 * not yet accepted. If so, free it. If not, send an
1515 * ABORT or SHUTDOWN based on the linger options.
1516 */
1517 if (sctp_state(asoc, CLOSED)) {
1518 sctp_association_free(asoc);
1519 continue;
1520 }
1521 }
1522
1523 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1524 !skb_queue_empty(&asoc->ulpq.reasm) ||
1525 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1526 struct sctp_chunk *chunk;
1527
1528 chunk = sctp_make_abort_user(asoc, NULL, 0);
1529 sctp_primitive_ABORT(net, asoc, chunk);
1530 } else
1531 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1532 }
1533
1534 /* On a TCP-style socket, block for at most linger_time if set. */
1535 if (sctp_style(sk, TCP) && timeout)
1536 sctp_wait_for_close(sk, timeout);
1537
1538 /* This will run the backlog queue. */
1539 release_sock(sk);
1540
1541 /* Supposedly, no process has access to the socket, but
1542 * the net layers still may.
1543 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
1544 * held and that should be grabbed before socket lock.
1545 */
1546 spin_lock_bh(&net->sctp.addr_wq_lock);
1547 bh_lock_sock_nested(sk);
1548
1549 /* Hold the sock, since sk_common_release() will put sock_put()
1550 * and we have just a little more cleanup.
1551 */
1552 sock_hold(sk);
1553 sk_common_release(sk);
1554
1555 bh_unlock_sock(sk);
1556 spin_unlock_bh(&net->sctp.addr_wq_lock);
1557
1558 sock_put(sk);
1559
1560 SCTP_DBG_OBJCNT_DEC(sock);
1561 }
1562
1563 /* Handle EPIPE error. */
1564 static int sctp_error(struct sock *sk, int flags, int err)
1565 {
1566 if (err == -EPIPE)
1567 err = sock_error(sk) ? : -EPIPE;
1568 if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
1569 send_sig(SIGPIPE, current, 0);
1570 return err;
1571 }
1572
1573 /* API 3.1.3 sendmsg() - UDP Style Syntax
1574 *
1575 * An application uses sendmsg() and recvmsg() calls to transmit data to
1576 * and receive data from its peer.
1577 *
1578 * ssize_t sendmsg(int socket, const struct msghdr *message,
1579 * int flags);
1580 *
1581 * socket - the socket descriptor of the endpoint.
1582 * message - pointer to the msghdr structure which contains a single
1583 * user message and possibly some ancillary data.
1584 *
1585 * See Section 5 for complete description of the data
1586 * structures.
1587 *
1588 * flags - flags sent or received with the user message, see Section
1589 * 5 for complete description of the flags.
1590 *
1591 * Note: This function could use a rewrite especially when explicit
1592 * connect support comes in.
1593 */
1594 /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
1595
1596 static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
1597
1598 static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
1599 {
1600 struct net *net = sock_net(sk);
1601 struct sctp_sock *sp;
1602 struct sctp_endpoint *ep;
1603 struct sctp_association *new_asoc = NULL, *asoc = NULL;
1604 struct sctp_transport *transport, *chunk_tp;
1605 struct sctp_chunk *chunk;
1606 union sctp_addr to;
1607 struct sockaddr *msg_name = NULL;
1608 struct sctp_sndrcvinfo default_sinfo;
1609 struct sctp_sndrcvinfo *sinfo;
1610 struct sctp_initmsg *sinit;
1611 sctp_assoc_t associd = 0;
1612 sctp_cmsgs_t cmsgs = { NULL };
1613 sctp_scope_t scope;
1614 bool fill_sinfo_ttl = false, wait_connect = false;
1615 struct sctp_datamsg *datamsg;
1616 int msg_flags = msg->msg_flags;
1617 __u16 sinfo_flags = 0;
1618 long timeo;
1619 int err;
1620
1621 err = 0;
1622 sp = sctp_sk(sk);
1623 ep = sp->ep;
1624
1625 pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk,
1626 msg, msg_len, ep);
1627
1628 /* We cannot send a message over a TCP-style listening socket. */
1629 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) {
1630 err = -EPIPE;
1631 goto out_nounlock;
1632 }
1633
1634 /* Parse out the SCTP CMSGs. */
1635 err = sctp_msghdr_parse(msg, &cmsgs);
1636 if (err) {
1637 pr_debug("%s: msghdr parse err:%x\n", __func__, err);
1638 goto out_nounlock;
1639 }
1640
1641 /* Fetch the destination address for this packet. This
1642 * address only selects the association--it is not necessarily
1643 * the address we will send to.
1644 * For a peeled-off socket, msg_name is ignored.
1645 */
1646 if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
1647 int msg_namelen = msg->msg_namelen;
1648
1649 err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name,
1650 msg_namelen);
1651 if (err)
1652 return err;
1653
1654 if (msg_namelen > sizeof(to))
1655 msg_namelen = sizeof(to);
1656 memcpy(&to, msg->msg_name, msg_namelen);
1657 msg_name = msg->msg_name;
1658 }
1659
1660 sinit = cmsgs.init;
1661 if (cmsgs.sinfo != NULL) {
1662 memset(&default_sinfo, 0, sizeof(default_sinfo));
1663 default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
1664 default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
1665 default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
1666 default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
1667 default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
1668
1669 sinfo = &default_sinfo;
1670 fill_sinfo_ttl = true;
1671 } else {
1672 sinfo = cmsgs.srinfo;
1673 }
1674 /* Did the user specify SNDINFO/SNDRCVINFO? */
1675 if (sinfo) {
1676 sinfo_flags = sinfo->sinfo_flags;
1677 associd = sinfo->sinfo_assoc_id;
1678 }
1679
1680 pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__,
1681 msg_len, sinfo_flags);
1682
1683 /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */
1684 if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) {
1685 err = -EINVAL;
1686 goto out_nounlock;
1687 }
1688
1689 /* If SCTP_EOF is set, no data can be sent. Disallow sending zero
1690 * length messages when SCTP_EOF|SCTP_ABORT is not set.
1691 * If SCTP_ABORT is set, the message length could be non zero with
1692 * the msg_iov set to the user abort reason.
1693 */
1694 if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) ||
1695 (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) {
1696 err = -EINVAL;
1697 goto out_nounlock;
1698 }
1699
1700 /* If SCTP_ADDR_OVER is set, there must be an address
1701 * specified in msg_name.
1702 */
1703 if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) {
1704 err = -EINVAL;
1705 goto out_nounlock;
1706 }
1707
1708 transport = NULL;
1709
1710 pr_debug("%s: about to look up association\n", __func__);
1711
1712 lock_sock(sk);
1713
1714 /* If a msg_name has been specified, assume this is to be used. */
1715 if (msg_name) {
1716 /* Look for a matching association on the endpoint. */
1717 asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport);
1718
1719 /* If we could not find a matching association on the
1720 * endpoint, make sure that it is not a TCP-style
1721 * socket that already has an association or there is
1722 * no peeled-off association on another socket.
1723 */
1724 if (!asoc &&
1725 ((sctp_style(sk, TCP) &&
1726 (sctp_sstate(sk, ESTABLISHED) ||
1727 sctp_sstate(sk, CLOSING))) ||
1728 sctp_endpoint_is_peeled_off(ep, &to))) {
1729 err = -EADDRNOTAVAIL;
1730 goto out_unlock;
1731 }
1732 } else {
1733 asoc = sctp_id2assoc(sk, associd);
1734 if (!asoc) {
1735 err = -EPIPE;
1736 goto out_unlock;
1737 }
1738 }
1739
1740 if (asoc) {
1741 pr_debug("%s: just looked up association:%p\n", __func__, asoc);
1742
1743 /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED
1744 * socket that has an association in CLOSED state. This can
1745 * happen when an accepted socket has an association that is
1746 * already CLOSED.
1747 */
1748 if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) {
1749 err = -EPIPE;
1750 goto out_unlock;
1751 }
1752
1753 if (sinfo_flags & SCTP_EOF) {
1754 pr_debug("%s: shutting down association:%p\n",
1755 __func__, asoc);
1756
1757 sctp_primitive_SHUTDOWN(net, asoc, NULL);
1758 err = 0;
1759 goto out_unlock;
1760 }
1761 if (sinfo_flags & SCTP_ABORT) {
1762
1763 chunk = sctp_make_abort_user(asoc, msg, msg_len);
1764 if (!chunk) {
1765 err = -ENOMEM;
1766 goto out_unlock;
1767 }
1768
1769 pr_debug("%s: aborting association:%p\n",
1770 __func__, asoc);
1771
1772 sctp_primitive_ABORT(net, asoc, chunk);
1773 err = 0;
1774 goto out_unlock;
1775 }
1776 }
1777
1778 /* Do we need to create the association? */
1779 if (!asoc) {
1780 pr_debug("%s: there is no association yet\n", __func__);
1781
1782 if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) {
1783 err = -EINVAL;
1784 goto out_unlock;
1785 }
1786
1787 /* Check for invalid stream against the stream counts,
1788 * either the default or the user specified stream counts.
1789 */
1790 if (sinfo) {
1791 if (!sinit || !sinit->sinit_num_ostreams) {
1792 /* Check against the defaults. */
1793 if (sinfo->sinfo_stream >=
1794 sp->initmsg.sinit_num_ostreams) {
1795 err = -EINVAL;
1796 goto out_unlock;
1797 }
1798 } else {
1799 /* Check against the requested. */
1800 if (sinfo->sinfo_stream >=
1801 sinit->sinit_num_ostreams) {
1802 err = -EINVAL;
1803 goto out_unlock;
1804 }
1805 }
1806 }
1807
1808 /*
1809 * API 3.1.2 bind() - UDP Style Syntax
1810 * If a bind() or sctp_bindx() is not called prior to a
1811 * sendmsg() call that initiates a new association, the
1812 * system picks an ephemeral port and will choose an address
1813 * set equivalent to binding with a wildcard address.
1814 */
1815 if (!ep->base.bind_addr.port) {
1816 if (sctp_autobind(sk)) {
1817 err = -EAGAIN;
1818 goto out_unlock;
1819 }
1820 } else {
1821 /*
1822 * If an unprivileged user inherits a one-to-many
1823 * style socket with open associations on a privileged
1824 * port, it MAY be permitted to accept new associations,
1825 * but it SHOULD NOT be permitted to open new
1826 * associations.
1827 */
1828 if (ep->base.bind_addr.port < inet_prot_sock(net) &&
1829 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
1830 err = -EACCES;
1831 goto out_unlock;
1832 }
1833 }
1834
1835 scope = sctp_scope(&to);
1836 new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
1837 if (!new_asoc) {
1838 err = -ENOMEM;
1839 goto out_unlock;
1840 }
1841 asoc = new_asoc;
1842 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1843 if (err < 0) {
1844 err = -ENOMEM;
1845 goto out_free;
1846 }
1847
1848 /* If the SCTP_INIT ancillary data is specified, set all
1849 * the association init values accordingly.
1850 */
1851 if (sinit) {
1852 if (sinit->sinit_num_ostreams) {
1853 asoc->c.sinit_num_ostreams =
1854 sinit->sinit_num_ostreams;
1855 }
1856 if (sinit->sinit_max_instreams) {
1857 asoc->c.sinit_max_instreams =
1858 sinit->sinit_max_instreams;
1859 }
1860 if (sinit->sinit_max_attempts) {
1861 asoc->max_init_attempts
1862 = sinit->sinit_max_attempts;
1863 }
1864 if (sinit->sinit_max_init_timeo) {
1865 asoc->max_init_timeo =
1866 msecs_to_jiffies(sinit->sinit_max_init_timeo);
1867 }
1868 }
1869
1870 /* Prime the peer's transport structures. */
1871 transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN);
1872 if (!transport) {
1873 err = -ENOMEM;
1874 goto out_free;
1875 }
1876 }
1877
1878 /* ASSERT: we have a valid association at this point. */
1879 pr_debug("%s: we have a valid association\n", __func__);
1880
1881 if (!sinfo) {
1882 /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
1883 * one with some defaults.
1884 */
1885 memset(&default_sinfo, 0, sizeof(default_sinfo));
1886 default_sinfo.sinfo_stream = asoc->default_stream;
1887 default_sinfo.sinfo_flags = asoc->default_flags;
1888 default_sinfo.sinfo_ppid = asoc->default_ppid;
1889 default_sinfo.sinfo_context = asoc->default_context;
1890 default_sinfo.sinfo_timetolive = asoc->default_timetolive;
1891 default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
1892
1893 sinfo = &default_sinfo;
1894 } else if (fill_sinfo_ttl) {
1895 /* In case SNDINFO was specified, we still need to fill
1896 * it with a default ttl from the assoc here.
1897 */
1898 sinfo->sinfo_timetolive = asoc->default_timetolive;
1899 }
1900
1901 /* API 7.1.7, the sndbuf size per association bounds the
1902 * maximum size of data that can be sent in a single send call.
1903 */
1904 if (msg_len > sk->sk_sndbuf) {
1905 err = -EMSGSIZE;
1906 goto out_free;
1907 }
1908
1909 if (asoc->pmtu_pending)
1910 sctp_assoc_pending_pmtu(asoc);
1911
1912 /* If fragmentation is disabled and the message length exceeds the
1913 * association fragmentation point, return EMSGSIZE. The I-D
1914 * does not specify what this error is, but this looks like
1915 * a great fit.
1916 */
1917 if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) {
1918 err = -EMSGSIZE;
1919 goto out_free;
1920 }
1921
1922 /* Check for invalid stream. */
1923 if (sinfo->sinfo_stream >= asoc->stream.outcnt) {
1924 err = -EINVAL;
1925 goto out_free;
1926 }
1927
1928 if (sctp_wspace(asoc) < msg_len)
1929 sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
1930
1931 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1932 if (!sctp_wspace(asoc)) {
1933 err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
1934 if (err)
1935 goto out_free;
1936 }
1937
1938 /* If an address is passed with the sendto/sendmsg call, it is used
1939 * to override the primary destination address in the TCP model, or
1940 * when SCTP_ADDR_OVER flag is set in the UDP model.
1941 */
1942 if ((sctp_style(sk, TCP) && msg_name) ||
1943 (sinfo_flags & SCTP_ADDR_OVER)) {
1944 chunk_tp = sctp_assoc_lookup_paddr(asoc, &to);
1945 if (!chunk_tp) {
1946 err = -EINVAL;
1947 goto out_free;
1948 }
1949 } else
1950 chunk_tp = NULL;
1951
1952 /* Auto-connect, if we aren't connected already. */
1953 if (sctp_state(asoc, CLOSED)) {
1954 err = sctp_primitive_ASSOCIATE(net, asoc, NULL);
1955 if (err < 0)
1956 goto out_free;
1957
1958 wait_connect = true;
1959 pr_debug("%s: we associated primitively\n", __func__);
1960 }
1961
1962 /* Break the message into multiple chunks of maximum size. */
1963 datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter);
1964 if (IS_ERR(datamsg)) {
1965 err = PTR_ERR(datamsg);
1966 goto out_free;
1967 }
1968 asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
1969
1970 /* Now send the (possibly) fragmented message. */
1971 list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
1972 sctp_chunk_hold(chunk);
1973
1974 /* Do accounting for the write space. */
1975 sctp_set_owner_w(chunk);
1976
1977 chunk->transport = chunk_tp;
1978 }
1979
1980 /* Send it to the lower layers. Note: all chunks
1981 * must either fail or succeed. The lower layer
1982 * works that way today. Keep it that way or this
1983 * breaks.
1984 */
1985 err = sctp_primitive_SEND(net, asoc, datamsg);
1986 /* Did the lower layer accept the chunk? */
1987 if (err) {
1988 sctp_datamsg_free(datamsg);
1989 goto out_free;
1990 }
1991
1992 pr_debug("%s: we sent primitively\n", __func__);
1993
1994 sctp_datamsg_put(datamsg);
1995 err = msg_len;
1996
1997 if (unlikely(wait_connect)) {
1998 timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
1999 sctp_wait_for_connect(asoc, &timeo);
2000 }
2001
2002 /* If we are already past ASSOCIATE, the lower
2003 * layers are responsible for association cleanup.
2004 */
2005 goto out_unlock;
2006
2007 out_free:
2008 if (new_asoc)
2009 sctp_association_free(asoc);
2010 out_unlock:
2011 release_sock(sk);
2012
2013 out_nounlock:
2014 return sctp_error(sk, msg_flags, err);
2015
2016 #if 0
2017 do_sock_err:
2018 if (msg_len)
2019 err = msg_len;
2020 else
2021 err = sock_error(sk);
2022 goto out;
2023
2024 do_interrupted:
2025 if (msg_len)
2026 err = msg_len;
2027 goto out;
2028 #endif /* 0 */
2029 }
2030
2031 /* This is an extended version of skb_pull() that removes the data from the
2032 * start of a skb even when data is spread across the list of skb's in the
2033 * frag_list. len specifies the total amount of data that needs to be removed.
2034 * when 'len' bytes could be removed from the skb, it returns 0.
2035 * If 'len' exceeds the total skb length, it returns the no. of bytes that
2036 * could not be removed.
2037 */
2038 static int sctp_skb_pull(struct sk_buff *skb, int len)
2039 {
2040 struct sk_buff *list;
2041 int skb_len = skb_headlen(skb);
2042 int rlen;
2043
2044 if (len <= skb_len) {
2045 __skb_pull(skb, len);
2046 return 0;
2047 }
2048 len -= skb_len;
2049 __skb_pull(skb, skb_len);
2050
2051 skb_walk_frags(skb, list) {
2052 rlen = sctp_skb_pull(list, len);
2053 skb->len -= (len-rlen);
2054 skb->data_len -= (len-rlen);
2055
2056 if (!rlen)
2057 return 0;
2058
2059 len = rlen;
2060 }
2061
2062 return len;
2063 }
2064
2065 /* API 3.1.3 recvmsg() - UDP Style Syntax
2066 *
2067 * ssize_t recvmsg(int socket, struct msghdr *message,
2068 * int flags);
2069 *
2070 * socket - the socket descriptor of the endpoint.
2071 * message - pointer to the msghdr structure which contains a single
2072 * user message and possibly some ancillary data.
2073 *
2074 * See Section 5 for complete description of the data
2075 * structures.
2076 *
2077 * flags - flags sent or received with the user message, see Section
2078 * 5 for complete description of the flags.
2079 */
2080 static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
2081 int noblock, int flags, int *addr_len)
2082 {
2083 struct sctp_ulpevent *event = NULL;
2084 struct sctp_sock *sp = sctp_sk(sk);
2085 struct sk_buff *skb, *head_skb;
2086 int copied;
2087 int err = 0;
2088 int skb_len;
2089
2090 pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, "
2091 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
2092 addr_len);
2093
2094 lock_sock(sk);
2095
2096 if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
2097 !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
2098 err = -ENOTCONN;
2099 goto out;
2100 }
2101
2102 skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
2103 if (!skb)
2104 goto out;
2105
2106 /* Get the total length of the skb including any skb's in the
2107 * frag_list.
2108 */
2109 skb_len = skb->len;
2110
2111 copied = skb_len;
2112 if (copied > len)
2113 copied = len;
2114
2115 err = skb_copy_datagram_msg(skb, 0, msg, copied);
2116
2117 event = sctp_skb2event(skb);
2118
2119 if (err)
2120 goto out_free;
2121
2122 if (event->chunk && event->chunk->head_skb)
2123 head_skb = event->chunk->head_skb;
2124 else
2125 head_skb = skb;
2126 sock_recv_ts_and_drops(msg, sk, head_skb);
2127 if (sctp_ulpevent_is_notification(event)) {
2128 msg->msg_flags |= MSG_NOTIFICATION;
2129 sp->pf->event_msgname(event, msg->msg_name, addr_len);
2130 } else {
2131 sp->pf->skb_msgname(head_skb, msg->msg_name, addr_len);
2132 }
2133
2134 /* Check if we allow SCTP_NXTINFO. */
2135 if (sp->recvnxtinfo)
2136 sctp_ulpevent_read_nxtinfo(event, msg, sk);
2137 /* Check if we allow SCTP_RCVINFO. */
2138 if (sp->recvrcvinfo)
2139 sctp_ulpevent_read_rcvinfo(event, msg);
2140 /* Check if we allow SCTP_SNDRCVINFO. */
2141 if (sp->subscribe.sctp_data_io_event)
2142 sctp_ulpevent_read_sndrcvinfo(event, msg);
2143
2144 err = copied;
2145
2146 /* If skb's length exceeds the user's buffer, update the skb and
2147 * push it back to the receive_queue so that the next call to
2148 * recvmsg() will return the remaining data. Don't set MSG_EOR.
2149 */
2150 if (skb_len > copied) {
2151 msg->msg_flags &= ~MSG_EOR;
2152 if (flags & MSG_PEEK)
2153 goto out_free;
2154 sctp_skb_pull(skb, copied);
2155 skb_queue_head(&sk->sk_receive_queue, skb);
2156
2157 /* When only partial message is copied to the user, increase
2158 * rwnd by that amount. If all the data in the skb is read,
2159 * rwnd is updated when the event is freed.
2160 */
2161 if (!sctp_ulpevent_is_notification(event))
2162 sctp_assoc_rwnd_increase(event->asoc, copied);
2163 goto out;
2164 } else if ((event->msg_flags & MSG_NOTIFICATION) ||
2165 (event->msg_flags & MSG_EOR))
2166 msg->msg_flags |= MSG_EOR;
2167 else
2168 msg->msg_flags &= ~MSG_EOR;
2169
2170 out_free:
2171 if (flags & MSG_PEEK) {
2172 /* Release the skb reference acquired after peeking the skb in
2173 * sctp_skb_recv_datagram().
2174 */
2175 kfree_skb(skb);
2176 } else {
2177 /* Free the event which includes releasing the reference to
2178 * the owner of the skb, freeing the skb and updating the
2179 * rwnd.
2180 */
2181 sctp_ulpevent_free(event);
2182 }
2183 out:
2184 release_sock(sk);
2185 return err;
2186 }
2187
2188 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
2189 *
2190 * This option is a on/off flag. If enabled no SCTP message
2191 * fragmentation will be performed. Instead if a message being sent
2192 * exceeds the current PMTU size, the message will NOT be sent and
2193 * instead a error will be indicated to the user.
2194 */
2195 static int sctp_setsockopt_disable_fragments(struct sock *sk,
2196 char __user *optval,
2197 unsigned int optlen)
2198 {
2199 int val;
2200
2201 if (optlen < sizeof(int))
2202 return -EINVAL;
2203
2204 if (get_user(val, (int __user *)optval))
2205 return -EFAULT;
2206
2207 sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
2208
2209 return 0;
2210 }
2211
2212 static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2213 unsigned int optlen)
2214 {
2215 struct sctp_association *asoc;
2216 struct sctp_ulpevent *event;
2217
2218 if (optlen > sizeof(struct sctp_event_subscribe))
2219 return -EINVAL;
2220 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2221 return -EFAULT;
2222
2223 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2224 * if there is no data to be sent or retransmit, the stack will
2225 * immediately send up this notification.
2226 */
2227 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2228 &sctp_sk(sk)->subscribe)) {
2229 asoc = sctp_id2assoc(sk, 0);
2230
2231 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2232 event = sctp_ulpevent_make_sender_dry_event(asoc,
2233 GFP_ATOMIC);
2234 if (!event)
2235 return -ENOMEM;
2236
2237 sctp_ulpq_tail_event(&asoc->ulpq, event);
2238 }
2239 }
2240
2241 return 0;
2242 }
2243
2244 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
2245 *
2246 * This socket option is applicable to the UDP-style socket only. When
2247 * set it will cause associations that are idle for more than the
2248 * specified number of seconds to automatically close. An association
2249 * being idle is defined an association that has NOT sent or received
2250 * user data. The special value of '0' indicates that no automatic
2251 * close of any associations should be performed. The option expects an
2252 * integer defining the number of seconds of idle time before an
2253 * association is closed.
2254 */
2255 static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
2256 unsigned int optlen)
2257 {
2258 struct sctp_sock *sp = sctp_sk(sk);
2259 struct net *net = sock_net(sk);
2260
2261 /* Applicable to UDP-style socket only */
2262 if (sctp_style(sk, TCP))
2263 return -EOPNOTSUPP;
2264 if (optlen != sizeof(int))
2265 return -EINVAL;
2266 if (copy_from_user(&sp->autoclose, optval, optlen))
2267 return -EFAULT;
2268
2269 if (sp->autoclose > net->sctp.max_autoclose)
2270 sp->autoclose = net->sctp.max_autoclose;
2271
2272 return 0;
2273 }
2274
2275 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
2276 *
2277 * Applications can enable or disable heartbeats for any peer address of
2278 * an association, modify an address's heartbeat interval, force a
2279 * heartbeat to be sent immediately, and adjust the address's maximum
2280 * number of retransmissions sent before an address is considered
2281 * unreachable. The following structure is used to access and modify an
2282 * address's parameters:
2283 *
2284 * struct sctp_paddrparams {
2285 * sctp_assoc_t spp_assoc_id;
2286 * struct sockaddr_storage spp_address;
2287 * uint32_t spp_hbinterval;
2288 * uint16_t spp_pathmaxrxt;
2289 * uint32_t spp_pathmtu;
2290 * uint32_t spp_sackdelay;
2291 * uint32_t spp_flags;
2292 * };
2293 *
2294 * spp_assoc_id - (one-to-many style socket) This is filled in the
2295 * application, and identifies the association for
2296 * this query.
2297 * spp_address - This specifies which address is of interest.
2298 * spp_hbinterval - This contains the value of the heartbeat interval,
2299 * in milliseconds. If a value of zero
2300 * is present in this field then no changes are to
2301 * be made to this parameter.
2302 * spp_pathmaxrxt - This contains the maximum number of
2303 * retransmissions before this address shall be
2304 * considered unreachable. If a value of zero
2305 * is present in this field then no changes are to
2306 * be made to this parameter.
2307 * spp_pathmtu - When Path MTU discovery is disabled the value
2308 * specified here will be the "fixed" path mtu.
2309 * Note that if the spp_address field is empty
2310 * then all associations on this address will
2311 * have this fixed path mtu set upon them.
2312 *
2313 * spp_sackdelay - When delayed sack is enabled, this value specifies
2314 * the number of milliseconds that sacks will be delayed
2315 * for. This value will apply to all addresses of an
2316 * association if the spp_address field is empty. Note
2317 * also, that if delayed sack is enabled and this
2318 * value is set to 0, no change is made to the last
2319 * recorded delayed sack timer value.
2320 *
2321 * spp_flags - These flags are used to control various features
2322 * on an association. The flag field may contain
2323 * zero or more of the following options.
2324 *
2325 * SPP_HB_ENABLE - Enable heartbeats on the
2326 * specified address. Note that if the address
2327 * field is empty all addresses for the association
2328 * have heartbeats enabled upon them.
2329 *
2330 * SPP_HB_DISABLE - Disable heartbeats on the
2331 * speicifed address. Note that if the address
2332 * field is empty all addresses for the association
2333 * will have their heartbeats disabled. Note also
2334 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
2335 * mutually exclusive, only one of these two should
2336 * be specified. Enabling both fields will have
2337 * undetermined results.
2338 *
2339 * SPP_HB_DEMAND - Request a user initiated heartbeat
2340 * to be made immediately.
2341 *
2342 * SPP_HB_TIME_IS_ZERO - Specify's that the time for
2343 * heartbeat delayis to be set to the value of 0
2344 * milliseconds.
2345 *
2346 * SPP_PMTUD_ENABLE - This field will enable PMTU
2347 * discovery upon the specified address. Note that
2348 * if the address feild is empty then all addresses
2349 * on the association are effected.
2350 *
2351 * SPP_PMTUD_DISABLE - This field will disable PMTU
2352 * discovery upon the specified address. Note that
2353 * if the address feild is empty then all addresses
2354 * on the association are effected. Not also that
2355 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
2356 * exclusive. Enabling both will have undetermined
2357 * results.
2358 *
2359 * SPP_SACKDELAY_ENABLE - Setting this flag turns
2360 * on delayed sack. The time specified in spp_sackdelay
2361 * is used to specify the sack delay for this address. Note
2362 * that if spp_address is empty then all addresses will
2363 * enable delayed sack and take on the sack delay
2364 * value specified in spp_sackdelay.
2365 * SPP_SACKDELAY_DISABLE - Setting this flag turns
2366 * off delayed sack. If the spp_address field is blank then
2367 * delayed sack is disabled for the entire association. Note
2368 * also that this field is mutually exclusive to
2369 * SPP_SACKDELAY_ENABLE, setting both will have undefined
2370 * results.
2371 */
2372 static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2373 struct sctp_transport *trans,
2374 struct sctp_association *asoc,
2375 struct sctp_sock *sp,
2376 int hb_change,
2377 int pmtud_change,
2378 int sackdelay_change)
2379 {
2380 int error;
2381
2382 if (params->spp_flags & SPP_HB_DEMAND && trans) {
2383 struct net *net = sock_net(trans->asoc->base.sk);
2384
2385 error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans);
2386 if (error)
2387 return error;
2388 }
2389
2390 /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of
2391 * this field is ignored. Note also that a value of zero indicates
2392 * the current setting should be left unchanged.
2393 */
2394 if (params->spp_flags & SPP_HB_ENABLE) {
2395
2396 /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is
2397 * set. This lets us use 0 value when this flag
2398 * is set.
2399 */
2400 if (params->spp_flags & SPP_HB_TIME_IS_ZERO)
2401 params->spp_hbinterval = 0;
2402
2403 if (params->spp_hbinterval ||
2404 (params->spp_flags & SPP_HB_TIME_IS_ZERO)) {
2405 if (trans) {
2406 trans->hbinterval =
2407 msecs_to_jiffies(params->spp_hbinterval);
2408 } else if (asoc) {
2409 asoc->hbinterval =
2410 msecs_to_jiffies(params->spp_hbinterval);
2411 } else {
2412 sp->hbinterval = params->spp_hbinterval;
2413 }
2414 }
2415 }
2416
2417 if (hb_change) {
2418 if (trans) {
2419 trans->param_flags =
2420 (trans->param_flags & ~SPP_HB) | hb_change;
2421 } else if (asoc) {
2422 asoc->param_flags =
2423 (asoc->param_flags & ~SPP_HB) | hb_change;
2424 } else {
2425 sp->param_flags =
2426 (sp->param_flags & ~SPP_HB) | hb_change;
2427 }
2428 }
2429
2430 /* When Path MTU discovery is disabled the value specified here will
2431 * be the "fixed" path mtu (i.e. the value of the spp_flags field must
2432 * include the flag SPP_PMTUD_DISABLE for this field to have any
2433 * effect).
2434 */
2435 if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
2436 if (trans) {
2437 trans->pathmtu = params->spp_pathmtu;
2438 sctp_assoc_sync_pmtu(asoc);
2439 } else if (asoc) {
2440 asoc->pathmtu = params->spp_pathmtu;
2441 } else {
2442 sp->pathmtu = params->spp_pathmtu;
2443 }
2444 }
2445
2446 if (pmtud_change) {
2447 if (trans) {
2448 int update = (trans->param_flags & SPP_PMTUD_DISABLE) &&
2449 (params->spp_flags & SPP_PMTUD_ENABLE);
2450 trans->param_flags =
2451 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2452 if (update) {
2453 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2454 sctp_assoc_sync_pmtu(asoc);
2455 }
2456 } else if (asoc) {
2457 asoc->param_flags =
2458 (asoc->param_flags & ~SPP_PMTUD) | pmtud_change;
2459 } else {
2460 sp->param_flags =
2461 (sp->param_flags & ~SPP_PMTUD) | pmtud_change;
2462 }
2463 }
2464
2465 /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the
2466 * value of this field is ignored. Note also that a value of zero
2467 * indicates the current setting should be left unchanged.
2468 */
2469 if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) {
2470 if (trans) {
2471 trans->sackdelay =
2472 msecs_to_jiffies(params->spp_sackdelay);
2473 } else if (asoc) {
2474 asoc->sackdelay =
2475 msecs_to_jiffies(params->spp_sackdelay);
2476 } else {
2477 sp->sackdelay = params->spp_sackdelay;
2478 }
2479 }
2480
2481 if (sackdelay_change) {
2482 if (trans) {
2483 trans->param_flags =
2484 (trans->param_flags & ~SPP_SACKDELAY) |
2485 sackdelay_change;
2486 } else if (asoc) {
2487 asoc->param_flags =
2488 (asoc->param_flags & ~SPP_SACKDELAY) |
2489 sackdelay_change;
2490 } else {
2491 sp->param_flags =
2492 (sp->param_flags & ~SPP_SACKDELAY) |
2493 sackdelay_change;
2494 }
2495 }
2496
2497 /* Note that a value of zero indicates the current setting should be
2498 left unchanged.
2499 */
2500 if (params->spp_pathmaxrxt) {
2501 if (trans) {
2502 trans->pathmaxrxt = params->spp_pathmaxrxt;
2503 } else if (asoc) {
2504 asoc->pathmaxrxt = params->spp_pathmaxrxt;
2505 } else {
2506 sp->pathmaxrxt = params->spp_pathmaxrxt;
2507 }
2508 }
2509
2510 return 0;
2511 }
2512
2513 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
2514 char __user *optval,
2515 unsigned int optlen)
2516 {
2517 struct sctp_paddrparams params;
2518 struct sctp_transport *trans = NULL;
2519 struct sctp_association *asoc = NULL;
2520 struct sctp_sock *sp = sctp_sk(sk);
2521 int error;
2522 int hb_change, pmtud_change, sackdelay_change;
2523
2524 if (optlen != sizeof(struct sctp_paddrparams))
2525 return -EINVAL;
2526
2527 if (copy_from_user(&params, optval, optlen))
2528 return -EFAULT;
2529
2530 /* Validate flags and value parameters. */
2531 hb_change = params.spp_flags & SPP_HB;
2532 pmtud_change = params.spp_flags & SPP_PMTUD;
2533 sackdelay_change = params.spp_flags & SPP_SACKDELAY;
2534
2535 if (hb_change == SPP_HB ||
2536 pmtud_change == SPP_PMTUD ||
2537 sackdelay_change == SPP_SACKDELAY ||
2538 params.spp_sackdelay > 500 ||
2539 (params.spp_pathmtu &&
2540 params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
2541 return -EINVAL;
2542
2543 /* If an address other than INADDR_ANY is specified, and
2544 * no transport is found, then the request is invalid.
2545 */
2546 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
2547 trans = sctp_addr_id2transport(sk, &params.spp_address,
2548 params.spp_assoc_id);
2549 if (!trans)
2550 return -EINVAL;
2551 }
2552
2553 /* Get association, if assoc_id != 0 and the socket is a one
2554 * to many style socket, and an association was not found, then
2555 * the id was invalid.
2556 */
2557 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
2558 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP))
2559 return -EINVAL;
2560
2561 /* Heartbeat demand can only be sent on a transport or
2562 * association, but not a socket.
2563 */
2564 if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
2565 return -EINVAL;
2566
2567 /* Process parameters. */
2568 error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2569 hb_change, pmtud_change,
2570 sackdelay_change);
2571
2572 if (error)
2573 return error;
2574
2575 /* If changes are for association, also apply parameters to each
2576 * transport.
2577 */
2578 if (!trans && asoc) {
2579 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2580 transports) {
2581 sctp_apply_peer_addr_params(&params, trans, asoc, sp,
2582 hb_change, pmtud_change,
2583 sackdelay_change);
2584 }
2585 }
2586
2587 return 0;
2588 }
2589
2590 static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags)
2591 {
2592 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE;
2593 }
2594
2595 static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags)
2596 {
2597 return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE;
2598 }
2599
2600 /*
2601 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
2602 *
2603 * This option will effect the way delayed acks are performed. This
2604 * option allows you to get or set the delayed ack time, in
2605 * milliseconds. It also allows changing the delayed ack frequency.
2606 * Changing the frequency to 1 disables the delayed sack algorithm. If
2607 * the assoc_id is 0, then this sets or gets the endpoints default
2608 * values. If the assoc_id field is non-zero, then the set or get
2609 * effects the specified association for the one to many model (the
2610 * assoc_id field is ignored by the one to one model). Note that if
2611 * sack_delay or sack_freq are 0 when setting this option, then the
2612 * current values will remain unchanged.
2613 *
2614 * struct sctp_sack_info {
2615 * sctp_assoc_t sack_assoc_id;
2616 * uint32_t sack_delay;
2617 * uint32_t sack_freq;
2618 * };
2619 *
2620 * sack_assoc_id - This parameter, indicates which association the user
2621 * is performing an action upon. Note that if this field's value is
2622 * zero then the endpoints default value is changed (effecting future
2623 * associations only).
2624 *
2625 * sack_delay - This parameter contains the number of milliseconds that
2626 * the user is requesting the delayed ACK timer be set to. Note that
2627 * this value is defined in the standard to be between 200 and 500
2628 * milliseconds.
2629 *
2630 * sack_freq - This parameter contains the number of packets that must
2631 * be received before a sack is sent without waiting for the delay
2632 * timer to expire. The default value for this is 2, setting this
2633 * value to 1 will disable the delayed sack algorithm.
2634 */
2635
2636 static int sctp_setsockopt_delayed_ack(struct sock *sk,
2637 char __user *optval, unsigned int optlen)
2638 {
2639 struct sctp_sack_info params;
2640 struct sctp_transport *trans = NULL;
2641 struct sctp_association *asoc = NULL;
2642 struct sctp_sock *sp = sctp_sk(sk);
2643
2644 if (optlen == sizeof(struct sctp_sack_info)) {
2645 if (copy_from_user(&params, optval, optlen))
2646 return -EFAULT;
2647
2648 if (params.sack_delay == 0 && params.sack_freq == 0)
2649 return 0;
2650 } else if (optlen == sizeof(struct sctp_assoc_value)) {
2651 pr_warn_ratelimited(DEPRECATED
2652 "%s (pid %d) "
2653 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
2654 "Use struct sctp_sack_info instead\n",
2655 current->comm, task_pid_nr(current));
2656 if (copy_from_user(&params, optval, optlen))
2657 return -EFAULT;
2658
2659 if (params.sack_delay == 0)
2660 params.sack_freq = 1;
2661 else
2662 params.sack_freq = 0;
2663 } else
2664 return -EINVAL;
2665
2666 /* Validate value parameter. */
2667 if (params.sack_delay > 500)
2668 return -EINVAL;
2669
2670 /* Get association, if sack_assoc_id != 0 and the socket is a one
2671 * to many style socket, and an association was not found, then
2672 * the id was invalid.
2673 */
2674 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
2675 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
2676 return -EINVAL;
2677
2678 if (params.sack_delay) {
2679 if (asoc) {
2680 asoc->sackdelay =
2681 msecs_to_jiffies(params.sack_delay);
2682 asoc->param_flags =
2683 sctp_spp_sackdelay_enable(asoc->param_flags);
2684 } else {
2685 sp->sackdelay = params.sack_delay;
2686 sp->param_flags =
2687 sctp_spp_sackdelay_enable(sp->param_flags);
2688 }
2689 }
2690
2691 if (params.sack_freq == 1) {
2692 if (asoc) {
2693 asoc->param_flags =
2694 sctp_spp_sackdelay_disable(asoc->param_flags);
2695 } else {
2696 sp->param_flags =
2697 sctp_spp_sackdelay_disable(sp->param_flags);
2698 }
2699 } else if (params.sack_freq > 1) {
2700 if (asoc) {
2701 asoc->sackfreq = params.sack_freq;
2702 asoc->param_flags =
2703 sctp_spp_sackdelay_enable(asoc->param_flags);
2704 } else {
2705 sp->sackfreq = params.sack_freq;
2706 sp->param_flags =
2707 sctp_spp_sackdelay_enable(sp->param_flags);
2708 }
2709 }
2710
2711 /* If change is for association, also apply to each transport. */
2712 if (asoc) {
2713 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
2714 transports) {
2715 if (params.sack_delay) {
2716 trans->sackdelay =
2717 msecs_to_jiffies(params.sack_delay);
2718 trans->param_flags =
2719 sctp_spp_sackdelay_enable(trans->param_flags);
2720 }
2721 if (params.sack_freq == 1) {
2722 trans->param_flags =
2723 sctp_spp_sackdelay_disable(trans->param_flags);
2724 } else if (params.sack_freq > 1) {
2725 trans->sackfreq = params.sack_freq;
2726 trans->param_flags =
2727 sctp_spp_sackdelay_enable(trans->param_flags);
2728 }
2729 }
2730 }
2731
2732 return 0;
2733 }
2734
2735 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
2736 *
2737 * Applications can specify protocol parameters for the default association
2738 * initialization. The option name argument to setsockopt() and getsockopt()
2739 * is SCTP_INITMSG.
2740 *
2741 * Setting initialization parameters is effective only on an unconnected
2742 * socket (for UDP-style sockets only future associations are effected
2743 * by the change). With TCP-style sockets, this option is inherited by
2744 * sockets derived from a listener socket.
2745 */
2746 static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
2747 {
2748 struct sctp_initmsg sinit;
2749 struct sctp_sock *sp = sctp_sk(sk);
2750
2751 if (optlen != sizeof(struct sctp_initmsg))
2752 return -EINVAL;
2753 if (copy_from_user(&sinit, optval, optlen))
2754 return -EFAULT;
2755
2756 if (sinit.sinit_num_ostreams)
2757 sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
2758 if (sinit.sinit_max_instreams)
2759 sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
2760 if (sinit.sinit_max_attempts)
2761 sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
2762 if (sinit.sinit_max_init_timeo)
2763 sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
2764
2765 return 0;
2766 }
2767
2768 /*
2769 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
2770 *
2771 * Applications that wish to use the sendto() system call may wish to
2772 * specify a default set of parameters that would normally be supplied
2773 * through the inclusion of ancillary data. This socket option allows
2774 * such an application to set the default sctp_sndrcvinfo structure.
2775 * The application that wishes to use this socket option simply passes
2776 * in to this call the sctp_sndrcvinfo structure defined in Section
2777 * 5.2.2) The input parameters accepted by this call include
2778 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
2779 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
2780 * to this call if the caller is using the UDP model.
2781 */
2782 static int sctp_setsockopt_default_send_param(struct sock *sk,
2783 char __user *optval,
2784 unsigned int optlen)
2785 {
2786 struct sctp_sock *sp = sctp_sk(sk);
2787 struct sctp_association *asoc;
2788 struct sctp_sndrcvinfo info;
2789
2790 if (optlen != sizeof(info))
2791 return -EINVAL;
2792 if (copy_from_user(&info, optval, optlen))
2793 return -EFAULT;
2794 if (info.sinfo_flags &
2795 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2796 SCTP_ABORT | SCTP_EOF))
2797 return -EINVAL;
2798
2799 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
2800 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
2801 return -EINVAL;
2802 if (asoc) {
2803 asoc->default_stream = info.sinfo_stream;
2804 asoc->default_flags = info.sinfo_flags;
2805 asoc->default_ppid = info.sinfo_ppid;
2806 asoc->default_context = info.sinfo_context;
2807 asoc->default_timetolive = info.sinfo_timetolive;
2808 } else {
2809 sp->default_stream = info.sinfo_stream;
2810 sp->default_flags = info.sinfo_flags;
2811 sp->default_ppid = info.sinfo_ppid;
2812 sp->default_context = info.sinfo_context;
2813 sp->default_timetolive = info.sinfo_timetolive;
2814 }
2815
2816 return 0;
2817 }
2818
2819 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
2820 * (SCTP_DEFAULT_SNDINFO)
2821 */
2822 static int sctp_setsockopt_default_sndinfo(struct sock *sk,
2823 char __user *optval,
2824 unsigned int optlen)
2825 {
2826 struct sctp_sock *sp = sctp_sk(sk);
2827 struct sctp_association *asoc;
2828 struct sctp_sndinfo info;
2829
2830 if (optlen != sizeof(info))
2831 return -EINVAL;
2832 if (copy_from_user(&info, optval, optlen))
2833 return -EFAULT;
2834 if (info.snd_flags &
2835 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
2836 SCTP_ABORT | SCTP_EOF))
2837 return -EINVAL;
2838
2839 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
2840 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
2841 return -EINVAL;
2842 if (asoc) {
2843 asoc->default_stream = info.snd_sid;
2844 asoc->default_flags = info.snd_flags;
2845 asoc->default_ppid = info.snd_ppid;
2846 asoc->default_context = info.snd_context;
2847 } else {
2848 sp->default_stream = info.snd_sid;
2849 sp->default_flags = info.snd_flags;
2850 sp->default_ppid = info.snd_ppid;
2851 sp->default_context = info.snd_context;
2852 }
2853
2854 return 0;
2855 }
2856
2857 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
2858 *
2859 * Requests that the local SCTP stack use the enclosed peer address as
2860 * the association primary. The enclosed address must be one of the
2861 * association peer's addresses.
2862 */
2863 static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
2864 unsigned int optlen)
2865 {
2866 struct sctp_prim prim;
2867 struct sctp_transport *trans;
2868
2869 if (optlen != sizeof(struct sctp_prim))
2870 return -EINVAL;
2871
2872 if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
2873 return -EFAULT;
2874
2875 trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
2876 if (!trans)
2877 return -EINVAL;
2878
2879 sctp_assoc_set_primary(trans->asoc, trans);
2880
2881 return 0;
2882 }
2883
2884 /*
2885 * 7.1.5 SCTP_NODELAY
2886 *
2887 * Turn on/off any Nagle-like algorithm. This means that packets are
2888 * generally sent as soon as possible and no unnecessary delays are
2889 * introduced, at the cost of more packets in the network. Expects an
2890 * integer boolean flag.
2891 */
2892 static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
2893 unsigned int optlen)
2894 {
2895 int val;
2896
2897 if (optlen < sizeof(int))
2898 return -EINVAL;
2899 if (get_user(val, (int __user *)optval))
2900 return -EFAULT;
2901
2902 sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
2903 return 0;
2904 }
2905
2906 /*
2907 *
2908 * 7.1.1 SCTP_RTOINFO
2909 *
2910 * The protocol parameters used to initialize and bound retransmission
2911 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
2912 * and modify these parameters.
2913 * All parameters are time values, in milliseconds. A value of 0, when
2914 * modifying the parameters, indicates that the current value should not
2915 * be changed.
2916 *
2917 */
2918 static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
2919 {
2920 struct sctp_rtoinfo rtoinfo;
2921 struct sctp_association *asoc;
2922 unsigned long rto_min, rto_max;
2923 struct sctp_sock *sp = sctp_sk(sk);
2924
2925 if (optlen != sizeof (struct sctp_rtoinfo))
2926 return -EINVAL;
2927
2928 if (copy_from_user(&rtoinfo, optval, optlen))
2929 return -EFAULT;
2930
2931 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
2932
2933 /* Set the values to the specific association */
2934 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
2935 return -EINVAL;
2936
2937 rto_max = rtoinfo.srto_max;
2938 rto_min = rtoinfo.srto_min;
2939
2940 if (rto_max)
2941 rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
2942 else
2943 rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
2944
2945 if (rto_min)
2946 rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
2947 else
2948 rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
2949
2950 if (rto_min > rto_max)
2951 return -EINVAL;
2952
2953 if (asoc) {
2954 if (rtoinfo.srto_initial != 0)
2955 asoc->rto_initial =
2956 msecs_to_jiffies(rtoinfo.srto_initial);
2957 asoc->rto_max = rto_max;
2958 asoc->rto_min = rto_min;
2959 } else {
2960 /* If there is no association or the association-id = 0
2961 * set the values to the endpoint.
2962 */
2963 if (rtoinfo.srto_initial != 0)
2964 sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
2965 sp->rtoinfo.srto_max = rto_max;
2966 sp->rtoinfo.srto_min = rto_min;
2967 }
2968
2969 return 0;
2970 }
2971
2972 /*
2973 *
2974 * 7.1.2 SCTP_ASSOCINFO
2975 *
2976 * This option is used to tune the maximum retransmission attempts
2977 * of the association.
2978 * Returns an error if the new association retransmission value is
2979 * greater than the sum of the retransmission value of the peer.
2980 * See [SCTP] for more information.
2981 *
2982 */
2983 static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
2984 {
2985
2986 struct sctp_assocparams assocparams;
2987 struct sctp_association *asoc;
2988
2989 if (optlen != sizeof(struct sctp_assocparams))
2990 return -EINVAL;
2991 if (copy_from_user(&assocparams, optval, optlen))
2992 return -EFAULT;
2993
2994 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
2995
2996 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
2997 return -EINVAL;
2998
2999 /* Set the values to the specific association */
3000 if (asoc) {
3001 if (assocparams.sasoc_asocmaxrxt != 0) {
3002 __u32 path_sum = 0;
3003 int paths = 0;
3004 struct sctp_transport *peer_addr;
3005
3006 list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
3007 transports) {
3008 path_sum += peer_addr->pathmaxrxt;
3009 paths++;
3010 }
3011
3012 /* Only validate asocmaxrxt if we have more than
3013 * one path/transport. We do this because path
3014 * retransmissions are only counted when we have more
3015 * then one path.
3016 */
3017 if (paths > 1 &&
3018 assocparams.sasoc_asocmaxrxt > path_sum)
3019 return -EINVAL;
3020
3021 asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
3022 }
3023
3024 if (assocparams.sasoc_cookie_life != 0)
3025 asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
3026 } else {
3027 /* Set the values to the endpoint */
3028 struct sctp_sock *sp = sctp_sk(sk);
3029
3030 if (assocparams.sasoc_asocmaxrxt != 0)
3031 sp->assocparams.sasoc_asocmaxrxt =
3032 assocparams.sasoc_asocmaxrxt;
3033 if (assocparams.sasoc_cookie_life != 0)
3034 sp->assocparams.sasoc_cookie_life =
3035 assocparams.sasoc_cookie_life;
3036 }
3037 return 0;
3038 }
3039
3040 /*
3041 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
3042 *
3043 * This socket option is a boolean flag which turns on or off mapped V4
3044 * addresses. If this option is turned on and the socket is type
3045 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
3046 * If this option is turned off, then no mapping will be done of V4
3047 * addresses and a user will receive both PF_INET6 and PF_INET type
3048 * addresses on the socket.
3049 */
3050 static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
3051 {
3052 int val;
3053 struct sctp_sock *sp = sctp_sk(sk);
3054
3055 if (optlen < sizeof(int))
3056 return -EINVAL;
3057 if (get_user(val, (int __user *)optval))
3058 return -EFAULT;
3059 if (val)
3060 sp->v4mapped = 1;
3061 else
3062 sp->v4mapped = 0;
3063
3064 return 0;
3065 }
3066
3067 /*
3068 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
3069 * This option will get or set the maximum size to put in any outgoing
3070 * SCTP DATA chunk. If a message is larger than this size it will be
3071 * fragmented by SCTP into the specified size. Note that the underlying
3072 * SCTP implementation may fragment into smaller sized chunks when the
3073 * PMTU of the underlying association is smaller than the value set by
3074 * the user. The default value for this option is '0' which indicates
3075 * the user is NOT limiting fragmentation and only the PMTU will effect
3076 * SCTP's choice of DATA chunk size. Note also that values set larger
3077 * than the maximum size of an IP datagram will effectively let SCTP
3078 * control fragmentation (i.e. the same as setting this option to 0).
3079 *
3080 * The following structure is used to access and modify this parameter:
3081 *
3082 * struct sctp_assoc_value {
3083 * sctp_assoc_t assoc_id;
3084 * uint32_t assoc_value;
3085 * };
3086 *
3087 * assoc_id: This parameter is ignored for one-to-one style sockets.
3088 * For one-to-many style sockets this parameter indicates which
3089 * association the user is performing an action upon. Note that if
3090 * this field's value is zero then the endpoints default value is
3091 * changed (effecting future associations only).
3092 * assoc_value: This parameter specifies the maximum size in bytes.
3093 */
3094 static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
3095 {
3096 struct sctp_assoc_value params;
3097 struct sctp_association *asoc;
3098 struct sctp_sock *sp = sctp_sk(sk);
3099 int val;
3100
3101 if (optlen == sizeof(int)) {
3102 pr_warn_ratelimited(DEPRECATED
3103 "%s (pid %d) "
3104 "Use of int in maxseg socket option.\n"
3105 "Use struct sctp_assoc_value instead\n",
3106 current->comm, task_pid_nr(current));
3107 if (copy_from_user(&val, optval, optlen))
3108 return -EFAULT;
3109 params.assoc_id = 0;
3110 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3111 if (copy_from_user(&params, optval, optlen))
3112 return -EFAULT;
3113 val = params.assoc_value;
3114 } else
3115 return -EINVAL;
3116
3117 if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)))
3118 return -EINVAL;
3119
3120 asoc = sctp_id2assoc(sk, params.assoc_id);
3121 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
3122 return -EINVAL;
3123
3124 if (asoc) {
3125 if (val == 0) {
3126 val = asoc->pathmtu;
3127 val -= sp->pf->af->net_header_len;
3128 val -= sizeof(struct sctphdr) +
3129 sizeof(struct sctp_data_chunk);
3130 }
3131 asoc->user_frag = val;
3132 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
3133 } else {
3134 sp->user_frag = val;
3135 }
3136
3137 return 0;
3138 }
3139
3140
3141 /*
3142 * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR)
3143 *
3144 * Requests that the peer mark the enclosed address as the association
3145 * primary. The enclosed address must be one of the association's
3146 * locally bound addresses. The following structure is used to make a
3147 * set primary request:
3148 */
3149 static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
3150 unsigned int optlen)
3151 {
3152 struct net *net = sock_net(sk);
3153 struct sctp_sock *sp;
3154 struct sctp_association *asoc = NULL;
3155 struct sctp_setpeerprim prim;
3156 struct sctp_chunk *chunk;
3157 struct sctp_af *af;
3158 int err;
3159
3160 sp = sctp_sk(sk);
3161
3162 if (!net->sctp.addip_enable)
3163 return -EPERM;
3164
3165 if (optlen != sizeof(struct sctp_setpeerprim))
3166 return -EINVAL;
3167
3168 if (copy_from_user(&prim, optval, optlen))
3169 return -EFAULT;
3170
3171 asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
3172 if (!asoc)
3173 return -EINVAL;
3174
3175 if (!asoc->peer.asconf_capable)
3176 return -EPERM;
3177
3178 if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
3179 return -EPERM;
3180
3181 if (!sctp_state(asoc, ESTABLISHED))
3182 return -ENOTCONN;
3183
3184 af = sctp_get_af_specific(prim.sspp_addr.ss_family);
3185 if (!af)
3186 return -EINVAL;
3187
3188 if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
3189 return -EADDRNOTAVAIL;
3190
3191 if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
3192 return -EADDRNOTAVAIL;
3193
3194 /* Create an ASCONF chunk with SET_PRIMARY parameter */
3195 chunk = sctp_make_asconf_set_prim(asoc,
3196 (union sctp_addr *)&prim.sspp_addr);
3197 if (!chunk)
3198 return -ENOMEM;
3199
3200 err = sctp_send_asconf(asoc, chunk);
3201
3202 pr_debug("%s: we set peer primary addr primitively\n", __func__);
3203
3204 return err;
3205 }
3206
3207 static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
3208 unsigned int optlen)
3209 {
3210 struct sctp_setadaptation adaptation;
3211
3212 if (optlen != sizeof(struct sctp_setadaptation))
3213 return -EINVAL;
3214 if (copy_from_user(&adaptation, optval, optlen))
3215 return -EFAULT;
3216
3217 sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
3218
3219 return 0;
3220 }
3221
3222 /*
3223 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
3224 *
3225 * The context field in the sctp_sndrcvinfo structure is normally only
3226 * used when a failed message is retrieved holding the value that was
3227 * sent down on the actual send call. This option allows the setting of
3228 * a default context on an association basis that will be received on
3229 * reading messages from the peer. This is especially helpful in the
3230 * one-2-many model for an application to keep some reference to an
3231 * internal state machine that is processing messages on the
3232 * association. Note that the setting of this value only effects
3233 * received messages from the peer and does not effect the value that is
3234 * saved with outbound messages.
3235 */
3236 static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3237 unsigned int optlen)
3238 {
3239 struct sctp_assoc_value params;
3240 struct sctp_sock *sp;
3241 struct sctp_association *asoc;
3242
3243 if (optlen != sizeof(struct sctp_assoc_value))
3244 return -EINVAL;
3245 if (copy_from_user(&params, optval, optlen))
3246 return -EFAULT;
3247
3248 sp = sctp_sk(sk);
3249
3250 if (params.assoc_id != 0) {
3251 asoc = sctp_id2assoc(sk, params.assoc_id);
3252 if (!asoc)
3253 return -EINVAL;
3254 asoc->default_rcv_context = params.assoc_value;
3255 } else {
3256 sp->default_rcv_context = params.assoc_value;
3257 }
3258
3259 return 0;
3260 }
3261
3262 /*
3263 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
3264 *
3265 * This options will at a minimum specify if the implementation is doing
3266 * fragmented interleave. Fragmented interleave, for a one to many
3267 * socket, is when subsequent calls to receive a message may return
3268 * parts of messages from different associations. Some implementations
3269 * may allow you to turn this value on or off. If so, when turned off,
3270 * no fragment interleave will occur (which will cause a head of line
3271 * blocking amongst multiple associations sharing the same one to many
3272 * socket). When this option is turned on, then each receive call may
3273 * come from a different association (thus the user must receive data
3274 * with the extended calls (e.g. sctp_recvmsg) to keep track of which
3275 * association each receive belongs to.
3276 *
3277 * This option takes a boolean value. A non-zero value indicates that
3278 * fragmented interleave is on. A value of zero indicates that
3279 * fragmented interleave is off.
3280 *
3281 * Note that it is important that an implementation that allows this
3282 * option to be turned on, have it off by default. Otherwise an unaware
3283 * application using the one to many model may become confused and act
3284 * incorrectly.
3285 */
3286 static int sctp_setsockopt_fragment_interleave(struct sock *sk,
3287 char __user *optval,
3288 unsigned int optlen)
3289 {
3290 int val;
3291
3292 if (optlen != sizeof(int))
3293 return -EINVAL;
3294 if (get_user(val, (int __user *)optval))
3295 return -EFAULT;
3296
3297 sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
3298
3299 return 0;
3300 }
3301
3302 /*
3303 * 8.1.21. Set or Get the SCTP Partial Delivery Point
3304 * (SCTP_PARTIAL_DELIVERY_POINT)
3305 *
3306 * This option will set or get the SCTP partial delivery point. This
3307 * point is the size of a message where the partial delivery API will be
3308 * invoked to help free up rwnd space for the peer. Setting this to a
3309 * lower value will cause partial deliveries to happen more often. The
3310 * calls argument is an integer that sets or gets the partial delivery
3311 * point. Note also that the call will fail if the user attempts to set
3312 * this value larger than the socket receive buffer size.
3313 *
3314 * Note that any single message having a length smaller than or equal to
3315 * the SCTP partial delivery point will be delivered in one single read
3316 * call as long as the user provided buffer is large enough to hold the
3317 * message.
3318 */
3319 static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
3320 char __user *optval,
3321 unsigned int optlen)
3322 {
3323 u32 val;
3324
3325 if (optlen != sizeof(u32))
3326 return -EINVAL;
3327 if (get_user(val, (int __user *)optval))
3328 return -EFAULT;
3329
3330 /* Note: We double the receive buffer from what the user sets
3331 * it to be, also initial rwnd is based on rcvbuf/2.
3332 */
3333 if (val > (sk->sk_rcvbuf >> 1))
3334 return -EINVAL;
3335
3336 sctp_sk(sk)->pd_point = val;
3337
3338 return 0; /* is this the right error code? */
3339 }
3340
3341 /*
3342 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
3343 *
3344 * This option will allow a user to change the maximum burst of packets
3345 * that can be emitted by this association. Note that the default value
3346 * is 4, and some implementations may restrict this setting so that it
3347 * can only be lowered.
3348 *
3349 * NOTE: This text doesn't seem right. Do this on a socket basis with
3350 * future associations inheriting the socket value.
3351 */
3352 static int sctp_setsockopt_maxburst(struct sock *sk,
3353 char __user *optval,
3354 unsigned int optlen)
3355 {
3356 struct sctp_assoc_value params;
3357 struct sctp_sock *sp;
3358 struct sctp_association *asoc;
3359 int val;
3360 int assoc_id = 0;
3361
3362 if (optlen == sizeof(int)) {
3363 pr_warn_ratelimited(DEPRECATED
3364 "%s (pid %d) "
3365 "Use of int in max_burst socket option deprecated.\n"
3366 "Use struct sctp_assoc_value instead\n",
3367 current->comm, task_pid_nr(current));
3368 if (copy_from_user(&val, optval, optlen))
3369 return -EFAULT;
3370 } else if (optlen == sizeof(struct sctp_assoc_value)) {
3371 if (copy_from_user(&params, optval, optlen))
3372 return -EFAULT;
3373 val = params.assoc_value;
3374 assoc_id = params.assoc_id;
3375 } else
3376 return -EINVAL;
3377
3378 sp = sctp_sk(sk);
3379
3380 if (assoc_id != 0) {
3381 asoc = sctp_id2assoc(sk, assoc_id);
3382 if (!asoc)
3383 return -EINVAL;
3384 asoc->max_burst = val;
3385 } else
3386 sp->max_burst = val;
3387
3388 return 0;
3389 }
3390
3391 /*
3392 * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK)
3393 *
3394 * This set option adds a chunk type that the user is requesting to be
3395 * received only in an authenticated way. Changes to the list of chunks
3396 * will only effect future associations on the socket.
3397 */
3398 static int sctp_setsockopt_auth_chunk(struct sock *sk,
3399 char __user *optval,
3400 unsigned int optlen)
3401 {
3402 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3403 struct sctp_authchunk val;
3404
3405 if (!ep->auth_enable)
3406 return -EACCES;
3407
3408 if (optlen != sizeof(struct sctp_authchunk))
3409 return -EINVAL;
3410 if (copy_from_user(&val, optval, optlen))
3411 return -EFAULT;
3412
3413 switch (val.sauth_chunk) {
3414 case SCTP_CID_INIT:
3415 case SCTP_CID_INIT_ACK:
3416 case SCTP_CID_SHUTDOWN_COMPLETE:
3417 case SCTP_CID_AUTH:
3418 return -EINVAL;
3419 }
3420
3421 /* add this chunk id to the endpoint */
3422 return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
3423 }
3424
3425 /*
3426 * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT)
3427 *
3428 * This option gets or sets the list of HMAC algorithms that the local
3429 * endpoint requires the peer to use.
3430 */
3431 static int sctp_setsockopt_hmac_ident(struct sock *sk,
3432 char __user *optval,
3433 unsigned int optlen)
3434 {
3435 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3436 struct sctp_hmacalgo *hmacs;
3437 u32 idents;
3438 int err;
3439
3440 if (!ep->auth_enable)
3441 return -EACCES;
3442
3443 if (optlen < sizeof(struct sctp_hmacalgo))
3444 return -EINVAL;
3445
3446 hmacs = memdup_user(optval, optlen);
3447 if (IS_ERR(hmacs))
3448 return PTR_ERR(hmacs);
3449
3450 idents = hmacs->shmac_num_idents;
3451 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
3452 (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
3453 err = -EINVAL;
3454 goto out;
3455 }
3456
3457 err = sctp_auth_ep_set_hmacs(ep, hmacs);
3458 out:
3459 kfree(hmacs);
3460 return err;
3461 }
3462
3463 /*
3464 * 7.1.20. Set a shared key (SCTP_AUTH_KEY)
3465 *
3466 * This option will set a shared secret key which is used to build an
3467 * association shared key.
3468 */
3469 static int sctp_setsockopt_auth_key(struct sock *sk,
3470 char __user *optval,
3471 unsigned int optlen)
3472 {
3473 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3474 struct sctp_authkey *authkey;
3475 struct sctp_association *asoc;
3476 int ret;
3477
3478 if (!ep->auth_enable)
3479 return -EACCES;
3480
3481 if (optlen <= sizeof(struct sctp_authkey))
3482 return -EINVAL;
3483
3484 authkey = memdup_user(optval, optlen);
3485 if (IS_ERR(authkey))
3486 return PTR_ERR(authkey);
3487
3488 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3489 ret = -EINVAL;
3490 goto out;
3491 }
3492
3493 asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
3494 if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
3495 ret = -EINVAL;
3496 goto out;
3497 }
3498
3499 ret = sctp_auth_set_key(ep, asoc, authkey);
3500 out:
3501 kzfree(authkey);
3502 return ret;
3503 }
3504
3505 /*
3506 * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY)
3507 *
3508 * This option will get or set the active shared key to be used to build
3509 * the association shared key.
3510 */
3511 static int sctp_setsockopt_active_key(struct sock *sk,
3512 char __user *optval,
3513 unsigned int optlen)
3514 {
3515 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3516 struct sctp_authkeyid val;
3517 struct sctp_association *asoc;
3518
3519 if (!ep->auth_enable)
3520 return -EACCES;
3521
3522 if (optlen != sizeof(struct sctp_authkeyid))
3523 return -EINVAL;
3524 if (copy_from_user(&val, optval, optlen))
3525 return -EFAULT;
3526
3527 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3528 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3529 return -EINVAL;
3530
3531 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3532 }
3533
3534 /*
3535 * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY)
3536 *
3537 * This set option will delete a shared secret key from use.
3538 */
3539 static int sctp_setsockopt_del_key(struct sock *sk,
3540 char __user *optval,
3541 unsigned int optlen)
3542 {
3543 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
3544 struct sctp_authkeyid val;
3545 struct sctp_association *asoc;
3546
3547 if (!ep->auth_enable)
3548 return -EACCES;
3549
3550 if (optlen != sizeof(struct sctp_authkeyid))
3551 return -EINVAL;
3552 if (copy_from_user(&val, optval, optlen))
3553 return -EFAULT;
3554
3555 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
3556 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
3557 return -EINVAL;
3558
3559 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3560
3561 }
3562
3563 /*
3564 * 8.1.23 SCTP_AUTO_ASCONF
3565 *
3566 * This option will enable or disable the use of the automatic generation of
3567 * ASCONF chunks to add and delete addresses to an existing association. Note
3568 * that this option has two caveats namely: a) it only affects sockets that
3569 * are bound to all addresses available to the SCTP stack, and b) the system
3570 * administrator may have an overriding control that turns the ASCONF feature
3571 * off no matter what setting the socket option may have.
3572 * This option expects an integer boolean flag, where a non-zero value turns on
3573 * the option, and a zero value turns off the option.
3574 * Note. In this implementation, socket operation overrides default parameter
3575 * being set by sysctl as well as FreeBSD implementation
3576 */
3577 static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3578 unsigned int optlen)
3579 {
3580 int val;
3581 struct sctp_sock *sp = sctp_sk(sk);
3582
3583 if (optlen < sizeof(int))
3584 return -EINVAL;
3585 if (get_user(val, (int __user *)optval))
3586 return -EFAULT;
3587 if (!sctp_is_ep_boundall(sk) && val)
3588 return -EINVAL;
3589 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3590 return 0;
3591
3592 spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3593 if (val == 0 && sp->do_auto_asconf) {
3594 list_del(&sp->auto_asconf_list);
3595 sp->do_auto_asconf = 0;
3596 } else if (val && !sp->do_auto_asconf) {
3597 list_add_tail(&sp->auto_asconf_list,
3598 &sock_net(sk)->sctp.auto_asconf_splist);
3599 sp->do_auto_asconf = 1;
3600 }
3601 spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
3602 return 0;
3603 }
3604
3605 /*
3606 * SCTP_PEER_ADDR_THLDS
3607 *
3608 * This option allows us to alter the partially failed threshold for one or all
3609 * transports in an association. See Section 6.1 of:
3610 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
3611 */
3612 static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
3613 char __user *optval,
3614 unsigned int optlen)
3615 {
3616 struct sctp_paddrthlds val;
3617 struct sctp_transport *trans;
3618 struct sctp_association *asoc;
3619
3620 if (optlen < sizeof(struct sctp_paddrthlds))
3621 return -EINVAL;
3622 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
3623 sizeof(struct sctp_paddrthlds)))
3624 return -EFAULT;
3625
3626
3627 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
3628 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
3629 if (!asoc)
3630 return -ENOENT;
3631 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
3632 transports) {
3633 if (val.spt_pathmaxrxt)
3634 trans->pathmaxrxt = val.spt_pathmaxrxt;
3635 trans->pf_retrans = val.spt_pathpfthld;
3636 }
3637
3638 if (val.spt_pathmaxrxt)
3639 asoc->pathmaxrxt = val.spt_pathmaxrxt;
3640 asoc->pf_retrans = val.spt_pathpfthld;
3641 } else {
3642 trans = sctp_addr_id2transport(sk, &val.spt_address,
3643 val.spt_assoc_id);
3644 if (!trans)
3645 return -ENOENT;
3646
3647 if (val.spt_pathmaxrxt)
3648 trans->pathmaxrxt = val.spt_pathmaxrxt;
3649 trans->pf_retrans = val.spt_pathpfthld;
3650 }
3651
3652 return 0;
3653 }
3654
3655 static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
3656 char __user *optval,
3657 unsigned int optlen)
3658 {
3659 int val;
3660
3661 if (optlen < sizeof(int))
3662 return -EINVAL;
3663 if (get_user(val, (int __user *) optval))
3664 return -EFAULT;
3665
3666 sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
3667
3668 return 0;
3669 }
3670
3671 static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
3672 char __user *optval,
3673 unsigned int optlen)
3674 {
3675 int val;
3676
3677 if (optlen < sizeof(int))
3678 return -EINVAL;
3679 if (get_user(val, (int __user *) optval))
3680 return -EFAULT;
3681
3682 sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
3683
3684 return 0;
3685 }
3686
3687 static int sctp_setsockopt_pr_supported(struct sock *sk,
3688 char __user *optval,
3689 unsigned int optlen)
3690 {
3691 struct sctp_assoc_value params;
3692 struct sctp_association *asoc;
3693 int retval = -EINVAL;
3694
3695 if (optlen != sizeof(params))
3696 goto out;
3697
3698 if (copy_from_user(&params, optval, optlen)) {
3699 retval = -EFAULT;
3700 goto out;
3701 }
3702
3703 asoc = sctp_id2assoc(sk, params.assoc_id);
3704 if (asoc) {
3705 asoc->prsctp_enable = !!params.assoc_value;
3706 } else if (!params.assoc_id) {
3707 struct sctp_sock *sp = sctp_sk(sk);
3708
3709 sp->ep->prsctp_enable = !!params.assoc_value;
3710 } else {
3711 goto out;
3712 }
3713
3714 retval = 0;
3715
3716 out:
3717 return retval;
3718 }
3719
3720 static int sctp_setsockopt_default_prinfo(struct sock *sk,
3721 char __user *optval,
3722 unsigned int optlen)
3723 {
3724 struct sctp_default_prinfo info;
3725 struct sctp_association *asoc;
3726 int retval = -EINVAL;
3727
3728 if (optlen != sizeof(info))
3729 goto out;
3730
3731 if (copy_from_user(&info, optval, sizeof(info))) {
3732 retval = -EFAULT;
3733 goto out;
3734 }
3735
3736 if (info.pr_policy & ~SCTP_PR_SCTP_MASK)
3737 goto out;
3738
3739 if (info.pr_policy == SCTP_PR_SCTP_NONE)
3740 info.pr_value = 0;
3741
3742 asoc = sctp_id2assoc(sk, info.pr_assoc_id);
3743 if (asoc) {
3744 SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
3745 asoc->default_timetolive = info.pr_value;
3746 } else if (!info.pr_assoc_id) {
3747 struct sctp_sock *sp = sctp_sk(sk);
3748
3749 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
3750 sp->default_timetolive = info.pr_value;
3751 } else {
3752 goto out;
3753 }
3754
3755 retval = 0;
3756
3757 out:
3758 return retval;
3759 }
3760
3761 static int sctp_setsockopt_reconfig_supported(struct sock *sk,
3762 char __user *optval,
3763 unsigned int optlen)
3764 {
3765 struct sctp_assoc_value params;
3766 struct sctp_association *asoc;
3767 int retval = -EINVAL;
3768
3769 if (optlen != sizeof(params))
3770 goto out;
3771
3772 if (copy_from_user(&params, optval, optlen)) {
3773 retval = -EFAULT;
3774 goto out;
3775 }
3776
3777 asoc = sctp_id2assoc(sk, params.assoc_id);
3778 if (asoc) {
3779 asoc->reconf_enable = !!params.assoc_value;
3780 } else if (!params.assoc_id) {
3781 struct sctp_sock *sp = sctp_sk(sk);
3782
3783 sp->ep->reconf_enable = !!params.assoc_value;
3784 } else {
3785 goto out;
3786 }
3787
3788 retval = 0;
3789
3790 out:
3791 return retval;
3792 }
3793
3794 static int sctp_setsockopt_enable_strreset(struct sock *sk,
3795 char __user *optval,
3796 unsigned int optlen)
3797 {
3798 struct sctp_assoc_value params;
3799 struct sctp_association *asoc;
3800 int retval = -EINVAL;
3801
3802 if (optlen != sizeof(params))
3803 goto out;
3804
3805 if (copy_from_user(&params, optval, optlen)) {
3806 retval = -EFAULT;
3807 goto out;
3808 }
3809
3810 if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
3811 goto out;
3812
3813 asoc = sctp_id2assoc(sk, params.assoc_id);
3814 if (asoc) {
3815 asoc->strreset_enable = params.assoc_value;
3816 } else if (!params.assoc_id) {
3817 struct sctp_sock *sp = sctp_sk(sk);
3818
3819 sp->ep->strreset_enable = params.assoc_value;
3820 } else {
3821 goto out;
3822 }
3823
3824 retval = 0;
3825
3826 out:
3827 return retval;
3828 }
3829
3830 static int sctp_setsockopt_reset_streams(struct sock *sk,
3831 char __user *optval,
3832 unsigned int optlen)
3833 {
3834 struct sctp_reset_streams *params;
3835 struct sctp_association *asoc;
3836 int retval = -EINVAL;
3837
3838 if (optlen < sizeof(struct sctp_reset_streams))
3839 return -EINVAL;
3840
3841 params = memdup_user(optval, optlen);
3842 if (IS_ERR(params))
3843 return PTR_ERR(params);
3844
3845 asoc = sctp_id2assoc(sk, params->srs_assoc_id);
3846 if (!asoc)
3847 goto out;
3848
3849 retval = sctp_send_reset_streams(asoc, params);
3850
3851 out:
3852 kfree(params);
3853 return retval;
3854 }
3855
3856 static int sctp_setsockopt_reset_assoc(struct sock *sk,
3857 char __user *optval,
3858 unsigned int optlen)
3859 {
3860 struct sctp_association *asoc;
3861 sctp_assoc_t associd;
3862 int retval = -EINVAL;
3863
3864 if (optlen != sizeof(associd))
3865 goto out;
3866
3867 if (copy_from_user(&associd, optval, optlen)) {
3868 retval = -EFAULT;
3869 goto out;
3870 }
3871
3872 asoc = sctp_id2assoc(sk, associd);
3873 if (!asoc)
3874 goto out;
3875
3876 retval = sctp_send_reset_assoc(asoc);
3877
3878 out:
3879 return retval;
3880 }
3881
3882 static int sctp_setsockopt_add_streams(struct sock *sk,
3883 char __user *optval,
3884 unsigned int optlen)
3885 {
3886 struct sctp_association *asoc;
3887 struct sctp_add_streams params;
3888 int retval = -EINVAL;
3889
3890 if (optlen != sizeof(params))
3891 goto out;
3892
3893 if (copy_from_user(&params, optval, optlen)) {
3894 retval = -EFAULT;
3895 goto out;
3896 }
3897
3898 asoc = sctp_id2assoc(sk, params.sas_assoc_id);
3899 if (!asoc)
3900 goto out;
3901
3902 retval = sctp_send_add_streams(asoc, &params);
3903
3904 out:
3905 return retval;
3906 }
3907
3908 /* API 6.2 setsockopt(), getsockopt()
3909 *
3910 * Applications use setsockopt() and getsockopt() to set or retrieve
3911 * socket options. Socket options are used to change the default
3912 * behavior of sockets calls. They are described in Section 7.
3913 *
3914 * The syntax is:
3915 *
3916 * ret = getsockopt(int sd, int level, int optname, void __user *optval,
3917 * int __user *optlen);
3918 * ret = setsockopt(int sd, int level, int optname, const void __user *optval,
3919 * int optlen);
3920 *
3921 * sd - the socket descript.
3922 * level - set to IPPROTO_SCTP for all SCTP options.
3923 * optname - the option name.
3924 * optval - the buffer to store the value of the option.
3925 * optlen - the size of the buffer.
3926 */
3927 static int sctp_setsockopt(struct sock *sk, int level, int optname,
3928 char __user *optval, unsigned int optlen)
3929 {
3930 int retval = 0;
3931
3932 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
3933
3934 /* I can hardly begin to describe how wrong this is. This is
3935 * so broken as to be worse than useless. The API draft
3936 * REALLY is NOT helpful here... I am not convinced that the
3937 * semantics of setsockopt() with a level OTHER THAN SOL_SCTP
3938 * are at all well-founded.
3939 */
3940 if (level != SOL_SCTP) {
3941 struct sctp_af *af = sctp_sk(sk)->pf->af;
3942 retval = af->setsockopt(sk, level, optname, optval, optlen);
3943 goto out_nounlock;
3944 }
3945
3946 lock_sock(sk);
3947
3948 switch (optname) {
3949 case SCTP_SOCKOPT_BINDX_ADD:
3950 /* 'optlen' is the size of the addresses buffer. */
3951 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3952 optlen, SCTP_BINDX_ADD_ADDR);
3953 break;
3954
3955 case SCTP_SOCKOPT_BINDX_REM:
3956 /* 'optlen' is the size of the addresses buffer. */
3957 retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
3958 optlen, SCTP_BINDX_REM_ADDR);
3959 break;
3960
3961 case SCTP_SOCKOPT_CONNECTX_OLD:
3962 /* 'optlen' is the size of the addresses buffer. */
3963 retval = sctp_setsockopt_connectx_old(sk,
3964 (struct sockaddr __user *)optval,
3965 optlen);
3966 break;
3967
3968 case SCTP_SOCKOPT_CONNECTX:
3969 /* 'optlen' is the size of the addresses buffer. */
3970 retval = sctp_setsockopt_connectx(sk,
3971 (struct sockaddr __user *)optval,
3972 optlen);
3973 break;
3974
3975 case SCTP_DISABLE_FRAGMENTS:
3976 retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
3977 break;
3978
3979 case SCTP_EVENTS:
3980 retval = sctp_setsockopt_events(sk, optval, optlen);
3981 break;
3982
3983 case SCTP_AUTOCLOSE:
3984 retval = sctp_setsockopt_autoclose(sk, optval, optlen);
3985 break;
3986
3987 case SCTP_PEER_ADDR_PARAMS:
3988 retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
3989 break;
3990
3991 case SCTP_DELAYED_SACK:
3992 retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
3993 break;
3994 case SCTP_PARTIAL_DELIVERY_POINT:
3995 retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
3996 break;
3997
3998 case SCTP_INITMSG:
3999 retval = sctp_setsockopt_initmsg(sk, optval, optlen);
4000 break;
4001 case SCTP_DEFAULT_SEND_PARAM:
4002 retval = sctp_setsockopt_default_send_param(sk, optval,
4003 optlen);
4004 break;
4005 case SCTP_DEFAULT_SNDINFO:
4006 retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
4007 break;
4008 case SCTP_PRIMARY_ADDR:
4009 retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
4010 break;
4011 case SCTP_SET_PEER_PRIMARY_ADDR:
4012 retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
4013 break;
4014 case SCTP_NODELAY:
4015 retval = sctp_setsockopt_nodelay(sk, optval, optlen);
4016 break;
4017 case SCTP_RTOINFO:
4018 retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
4019 break;
4020 case SCTP_ASSOCINFO:
4021 retval = sctp_setsockopt_associnfo(sk, optval, optlen);
4022 break;
4023 case SCTP_I_WANT_MAPPED_V4_ADDR:
4024 retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
4025 break;
4026 case SCTP_MAXSEG:
4027 retval = sctp_setsockopt_maxseg(sk, optval, optlen);
4028 break;
4029 case SCTP_ADAPTATION_LAYER:
4030 retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
4031 break;
4032 case SCTP_CONTEXT:
4033 retval = sctp_setsockopt_context(sk, optval, optlen);
4034 break;
4035 case SCTP_FRAGMENT_INTERLEAVE:
4036 retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
4037 break;
4038 case SCTP_MAX_BURST:
4039 retval = sctp_setsockopt_maxburst(sk, optval, optlen);
4040 break;
4041 case SCTP_AUTH_CHUNK:
4042 retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
4043 break;
4044 case SCTP_HMAC_IDENT:
4045 retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
4046 break;
4047 case SCTP_AUTH_KEY:
4048 retval = sctp_setsockopt_auth_key(sk, optval, optlen);
4049 break;
4050 case SCTP_AUTH_ACTIVE_KEY:
4051 retval = sctp_setsockopt_active_key(sk, optval, optlen);
4052 break;
4053 case SCTP_AUTH_DELETE_KEY:
4054 retval = sctp_setsockopt_del_key(sk, optval, optlen);
4055 break;
4056 case SCTP_AUTO_ASCONF:
4057 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
4058 break;
4059 case SCTP_PEER_ADDR_THLDS:
4060 retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
4061 break;
4062 case SCTP_RECVRCVINFO:
4063 retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
4064 break;
4065 case SCTP_RECVNXTINFO:
4066 retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
4067 break;
4068 case SCTP_PR_SUPPORTED:
4069 retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
4070 break;
4071 case SCTP_DEFAULT_PRINFO:
4072 retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
4073 break;
4074 case SCTP_RECONFIG_SUPPORTED:
4075 retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
4076 break;
4077 case SCTP_ENABLE_STREAM_RESET:
4078 retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
4079 break;
4080 case SCTP_RESET_STREAMS:
4081 retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
4082 break;
4083 case SCTP_RESET_ASSOC:
4084 retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
4085 break;
4086 case SCTP_ADD_STREAMS:
4087 retval = sctp_setsockopt_add_streams(sk, optval, optlen);
4088 break;
4089 default:
4090 retval = -ENOPROTOOPT;
4091 break;
4092 }
4093
4094 release_sock(sk);
4095
4096 out_nounlock:
4097 return retval;
4098 }
4099
4100 /* API 3.1.6 connect() - UDP Style Syntax
4101 *
4102 * An application may use the connect() call in the UDP model to initiate an
4103 * association without sending data.
4104 *
4105 * The syntax is:
4106 *
4107 * ret = connect(int sd, const struct sockaddr *nam, socklen_t len);
4108 *
4109 * sd: the socket descriptor to have a new association added to.
4110 *
4111 * nam: the address structure (either struct sockaddr_in or struct
4112 * sockaddr_in6 defined in RFC2553 [7]).
4113 *
4114 * len: the size of the address.
4115 */
4116 static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4117 int addr_len)
4118 {
4119 int err = 0;
4120 struct sctp_af *af;
4121
4122 lock_sock(sk);
4123
4124 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
4125 addr, addr_len);
4126
4127 /* Validate addr_len before calling common connect/connectx routine. */
4128 af = sctp_get_af_specific(addr->sa_family);
4129 if (!af || addr_len < af->sockaddr_len) {
4130 err = -EINVAL;
4131 } else {
4132 /* Pass correct addr len to common routine (so it knows there
4133 * is only one address being passed.
4134 */
4135 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
4136 }
4137
4138 release_sock(sk);
4139 return err;
4140 }
4141
4142 /* FIXME: Write comments. */
4143 static int sctp_disconnect(struct sock *sk, int flags)
4144 {
4145 return -EOPNOTSUPP; /* STUB */
4146 }
4147
4148 /* 4.1.4 accept() - TCP Style Syntax
4149 *
4150 * Applications use accept() call to remove an established SCTP
4151 * association from the accept queue of the endpoint. A new socket
4152 * descriptor will be returned from accept() to represent the newly
4153 * formed association.
4154 */
4155 static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
4156 {
4157 struct sctp_sock *sp;
4158 struct sctp_endpoint *ep;
4159 struct sock *newsk = NULL;
4160 struct sctp_association *asoc;
4161 long timeo;
4162 int error = 0;
4163
4164 lock_sock(sk);
4165
4166 sp = sctp_sk(sk);
4167 ep = sp->ep;
4168
4169 if (!sctp_style(sk, TCP)) {
4170 error = -EOPNOTSUPP;
4171 goto out;
4172 }
4173
4174 if (!sctp_sstate(sk, LISTENING)) {
4175 error = -EINVAL;
4176 goto out;
4177 }
4178
4179 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
4180
4181 error = sctp_wait_for_accept(sk, timeo);
4182 if (error)
4183 goto out;
4184
4185 /* We treat the list of associations on the endpoint as the accept
4186 * queue and pick the first association on the list.
4187 */
4188 asoc = list_entry(ep->asocs.next, struct sctp_association, asocs);
4189
4190 newsk = sp->pf->create_accept_sk(sk, asoc, kern);
4191 if (!newsk) {
4192 error = -ENOMEM;
4193 goto out;
4194 }
4195
4196 /* Populate the fields of the newsk from the oldsk and migrate the
4197 * asoc to the newsk.
4198 */
4199 sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
4200
4201 out:
4202 release_sock(sk);
4203 *err = error;
4204 return newsk;
4205 }
4206
4207 /* The SCTP ioctl handler. */
4208 static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4209 {
4210 int rc = -ENOTCONN;
4211
4212 lock_sock(sk);
4213
4214 /*
4215 * SEQPACKET-style sockets in LISTENING state are valid, for
4216 * SCTP, so only discard TCP-style sockets in LISTENING state.
4217 */
4218 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
4219 goto out;
4220
4221 switch (cmd) {
4222 case SIOCINQ: {
4223 struct sk_buff *skb;
4224 unsigned int amount = 0;
4225
4226 skb = skb_peek(&sk->sk_receive_queue);
4227 if (skb != NULL) {
4228 /*
4229 * We will only return the amount of this packet since
4230 * that is all that will be read.
4231 */
4232 amount = skb->len;
4233 }
4234 rc = put_user(amount, (int __user *)arg);
4235 break;
4236 }
4237 default:
4238 rc = -ENOIOCTLCMD;
4239 break;
4240 }
4241 out:
4242 release_sock(sk);
4243 return rc;
4244 }
4245
4246 /* This is the function which gets called during socket creation to
4247 * initialized the SCTP-specific portion of the sock.
4248 * The sock structure should already be zero-filled memory.
4249 */
4250 static int sctp_init_sock(struct sock *sk)
4251 {
4252 struct net *net = sock_net(sk);
4253 struct sctp_sock *sp;
4254
4255 pr_debug("%s: sk:%p\n", __func__, sk);
4256
4257 sp = sctp_sk(sk);
4258
4259 /* Initialize the SCTP per socket area. */
4260 switch (sk->sk_type) {
4261 case SOCK_SEQPACKET:
4262 sp->type = SCTP_SOCKET_UDP;
4263 break;
4264 case SOCK_STREAM:
4265 sp->type = SCTP_SOCKET_TCP;
4266 break;
4267 default:
4268 return -ESOCKTNOSUPPORT;
4269 }
4270
4271 sk->sk_gso_type = SKB_GSO_SCTP;
4272
4273 /* Initialize default send parameters. These parameters can be
4274 * modified with the SCTP_DEFAULT_SEND_PARAM socket option.
4275 */
4276 sp->default_stream = 0;
4277 sp->default_ppid = 0;
4278 sp->default_flags = 0;
4279 sp->default_context = 0;
4280 sp->default_timetolive = 0;
4281
4282 sp->default_rcv_context = 0;
4283 sp->max_burst = net->sctp.max_burst;
4284
4285 sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg;
4286
4287 /* Initialize default setup parameters. These parameters
4288 * can be modified with the SCTP_INITMSG socket option or
4289 * overridden by the SCTP_INIT CMSG.
4290 */
4291 sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
4292 sp->initmsg.sinit_max_instreams = sctp_max_instreams;
4293 sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init;
4294 sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max;
4295
4296 /* Initialize default RTO related parameters. These parameters can
4297 * be modified for with the SCTP_RTOINFO socket option.
4298 */
4299 sp->rtoinfo.srto_initial = net->sctp.rto_initial;
4300 sp->rtoinfo.srto_max = net->sctp.rto_max;
4301 sp->rtoinfo.srto_min = net->sctp.rto_min;
4302
4303 /* Initialize default association related parameters. These parameters
4304 * can be modified with the SCTP_ASSOCINFO socket option.
4305 */
4306 sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association;
4307 sp->assocparams.sasoc_number_peer_destinations = 0;
4308 sp->assocparams.sasoc_peer_rwnd = 0;
4309 sp->assocparams.sasoc_local_rwnd = 0;
4310 sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life;
4311
4312 /* Initialize default event subscriptions. By default, all the
4313 * options are off.
4314 */
4315 memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
4316
4317 /* Default Peer Address Parameters. These defaults can
4318 * be modified via SCTP_PEER_ADDR_PARAMS
4319 */
4320 sp->hbinterval = net->sctp.hb_interval;
4321 sp->pathmaxrxt = net->sctp.max_retrans_path;
4322 sp->pathmtu = 0; /* allow default discovery */
4323 sp->sackdelay = net->sctp.sack_timeout;
4324 sp->sackfreq = 2;
4325 sp->param_flags = SPP_HB_ENABLE |
4326 SPP_PMTUD_ENABLE |
4327 SPP_SACKDELAY_ENABLE;
4328
4329 /* If enabled no SCTP message fragmentation will be performed.
4330 * Configure through SCTP_DISABLE_FRAGMENTS socket option.
4331 */
4332 sp->disable_fragments = 0;
4333
4334 /* Enable Nagle algorithm by default. */
4335 sp->nodelay = 0;
4336
4337 sp->recvrcvinfo = 0;
4338 sp->recvnxtinfo = 0;
4339
4340 /* Enable by default. */
4341 sp->v4mapped = 1;
4342
4343 /* Auto-close idle associations after the configured
4344 * number of seconds. A value of 0 disables this
4345 * feature. Configure through the SCTP_AUTOCLOSE socket option,
4346 * for UDP-style sockets only.
4347 */
4348 sp->autoclose = 0;
4349
4350 /* User specified fragmentation limit. */
4351 sp->user_frag = 0;
4352
4353 sp->adaptation_ind = 0;
4354
4355 sp->pf = sctp_get_pf_specific(sk->sk_family);
4356
4357 /* Control variables for partial data delivery. */
4358 atomic_set(&sp->pd_mode, 0);
4359 skb_queue_head_init(&sp->pd_lobby);
4360 sp->frag_interleave = 0;
4361
4362 /* Create a per socket endpoint structure. Even if we
4363 * change the data structure relationships, this may still
4364 * be useful for storing pre-connect address information.
4365 */
4366 sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
4367 if (!sp->ep)
4368 return -ENOMEM;
4369
4370 sp->hmac = NULL;
4371
4372 sk->sk_destruct = sctp_destruct_sock;
4373
4374 SCTP_DBG_OBJCNT_INC(sock);
4375
4376 local_bh_disable();
4377 percpu_counter_inc(&sctp_sockets_allocated);
4378 sock_prot_inuse_add(net, sk->sk_prot, 1);
4379
4380 /* Nothing can fail after this block, otherwise
4381 * sctp_destroy_sock() will be called without addr_wq_lock held
4382 */
4383 if (net->sctp.default_auto_asconf) {
4384 spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
4385 list_add_tail(&sp->auto_asconf_list,
4386 &net->sctp.auto_asconf_splist);
4387 sp->do_auto_asconf = 1;
4388 spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
4389 } else {
4390 sp->do_auto_asconf = 0;
4391 }
4392
4393 local_bh_enable();
4394
4395 return 0;
4396 }
4397
4398 /* Cleanup any SCTP per socket resources. Must be called with
4399 * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
4400 */
4401 static void sctp_destroy_sock(struct sock *sk)
4402 {
4403 struct sctp_sock *sp;
4404
4405 pr_debug("%s: sk:%p\n", __func__, sk);
4406
4407 /* Release our hold on the endpoint. */
4408 sp = sctp_sk(sk);
4409 /* This could happen during socket init, thus we bail out
4410 * early, since the rest of the below is not setup either.
4411 */
4412 if (sp->ep == NULL)
4413 return;
4414
4415 if (sp->do_auto_asconf) {
4416 sp->do_auto_asconf = 0;
4417 list_del(&sp->auto_asconf_list);
4418 }
4419 sctp_endpoint_free(sp->ep);
4420 local_bh_disable();
4421 percpu_counter_dec(&sctp_sockets_allocated);
4422 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
4423 local_bh_enable();
4424 }
4425
4426 /* Triggered when there are no references on the socket anymore */
4427 static void sctp_destruct_sock(struct sock *sk)
4428 {
4429 struct sctp_sock *sp = sctp_sk(sk);
4430
4431 /* Free up the HMAC transform. */
4432 crypto_free_shash(sp->hmac);
4433
4434 inet_sock_destruct(sk);
4435 }
4436
4437 /* API 4.1.7 shutdown() - TCP Style Syntax
4438 * int shutdown(int socket, int how);
4439 *
4440 * sd - the socket descriptor of the association to be closed.
4441 * how - Specifies the type of shutdown. The values are
4442 * as follows:
4443 * SHUT_RD
4444 * Disables further receive operations. No SCTP
4445 * protocol action is taken.
4446 * SHUT_WR
4447 * Disables further send operations, and initiates
4448 * the SCTP shutdown sequence.
4449 * SHUT_RDWR
4450 * Disables further send and receive operations
4451 * and initiates the SCTP shutdown sequence.
4452 */
4453 static void sctp_shutdown(struct sock *sk, int how)
4454 {
4455 struct net *net = sock_net(sk);
4456 struct sctp_endpoint *ep;
4457
4458 if (!sctp_style(sk, TCP))
4459 return;
4460
4461 ep = sctp_sk(sk)->ep;
4462 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
4463 struct sctp_association *asoc;
4464
4465 sk->sk_state = SCTP_SS_CLOSING;
4466 asoc = list_entry(ep->asocs.next,
4467 struct sctp_association, asocs);
4468 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4469 }
4470 }
4471
4472 int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
4473 struct sctp_info *info)
4474 {
4475 struct sctp_transport *prim;
4476 struct list_head *pos;
4477 int mask;
4478
4479 memset(info, 0, sizeof(*info));
4480 if (!asoc) {
4481 struct sctp_sock *sp = sctp_sk(sk);
4482
4483 info->sctpi_s_autoclose = sp->autoclose;
4484 info->sctpi_s_adaptation_ind = sp->adaptation_ind;
4485 info->sctpi_s_pd_point = sp->pd_point;
4486 info->sctpi_s_nodelay = sp->nodelay;
4487 info->sctpi_s_disable_fragments = sp->disable_fragments;
4488 info->sctpi_s_v4mapped = sp->v4mapped;
4489 info->sctpi_s_frag_interleave = sp->frag_interleave;
4490 info->sctpi_s_type = sp->type;
4491
4492 return 0;
4493 }
4494
4495 info->sctpi_tag = asoc->c.my_vtag;
4496 info->sctpi_state = asoc->state;
4497 info->sctpi_rwnd = asoc->a_rwnd;
4498 info->sctpi_unackdata = asoc->unack_data;
4499 info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4500 info->sctpi_instrms = asoc->stream.incnt;
4501 info->sctpi_outstrms = asoc->stream.outcnt;
4502 list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
4503 info->sctpi_inqueue++;
4504 list_for_each(pos, &asoc->outqueue.out_chunk_list)
4505 info->sctpi_outqueue++;
4506 info->sctpi_overall_error = asoc->overall_error_count;
4507 info->sctpi_max_burst = asoc->max_burst;
4508 info->sctpi_maxseg = asoc->frag_point;
4509 info->sctpi_peer_rwnd = asoc->peer.rwnd;
4510 info->sctpi_peer_tag = asoc->c.peer_vtag;
4511
4512 mask = asoc->peer.ecn_capable << 1;
4513 mask = (mask | asoc->peer.ipv4_address) << 1;
4514 mask = (mask | asoc->peer.ipv6_address) << 1;
4515 mask = (mask | asoc->peer.hostname_address) << 1;
4516 mask = (mask | asoc->peer.asconf_capable) << 1;
4517 mask = (mask | asoc->peer.prsctp_capable) << 1;
4518 mask = (mask | asoc->peer.auth_capable);
4519 info->sctpi_peer_capable = mask;
4520 mask = asoc->peer.sack_needed << 1;
4521 mask = (mask | asoc->peer.sack_generation) << 1;
4522 mask = (mask | asoc->peer.zero_window_announced);
4523 info->sctpi_peer_sack = mask;
4524
4525 info->sctpi_isacks = asoc->stats.isacks;
4526 info->sctpi_osacks = asoc->stats.osacks;
4527 info->sctpi_opackets = asoc->stats.opackets;
4528 info->sctpi_ipackets = asoc->stats.ipackets;
4529 info->sctpi_rtxchunks = asoc->stats.rtxchunks;
4530 info->sctpi_outofseqtsns = asoc->stats.outofseqtsns;
4531 info->sctpi_idupchunks = asoc->stats.idupchunks;
4532 info->sctpi_gapcnt = asoc->stats.gapcnt;
4533 info->sctpi_ouodchunks = asoc->stats.ouodchunks;
4534 info->sctpi_iuodchunks = asoc->stats.iuodchunks;
4535 info->sctpi_oodchunks = asoc->stats.oodchunks;
4536 info->sctpi_iodchunks = asoc->stats.iodchunks;
4537 info->sctpi_octrlchunks = asoc->stats.octrlchunks;
4538 info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
4539
4540 prim = asoc->peer.primary_path;
4541 memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
4542 info->sctpi_p_state = prim->state;
4543 info->sctpi_p_cwnd = prim->cwnd;
4544 info->sctpi_p_srtt = prim->srtt;
4545 info->sctpi_p_rto = jiffies_to_msecs(prim->rto);
4546 info->sctpi_p_hbinterval = prim->hbinterval;
4547 info->sctpi_p_pathmaxrxt = prim->pathmaxrxt;
4548 info->sctpi_p_sackdelay = jiffies_to_msecs(prim->sackdelay);
4549 info->sctpi_p_ssthresh = prim->ssthresh;
4550 info->sctpi_p_partial_bytes_acked = prim->partial_bytes_acked;
4551 info->sctpi_p_flight_size = prim->flight_size;
4552 info->sctpi_p_error = prim->error_count;
4553
4554 return 0;
4555 }
4556 EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
4557
4558 /* use callback to avoid exporting the core structure */
4559 int sctp_transport_walk_start(struct rhashtable_iter *iter)
4560 {
4561 int err;
4562
4563 rhltable_walk_enter(&sctp_transport_hashtable, iter);
4564
4565 err = rhashtable_walk_start(iter);
4566 if (err && err != -EAGAIN) {
4567 rhashtable_walk_stop(iter);
4568 rhashtable_walk_exit(iter);
4569 return err;
4570 }
4571
4572 return 0;
4573 }
4574
4575 void sctp_transport_walk_stop(struct rhashtable_iter *iter)
4576 {
4577 rhashtable_walk_stop(iter);
4578 rhashtable_walk_exit(iter);
4579 }
4580
4581 struct sctp_transport *sctp_transport_get_next(struct net *net,
4582 struct rhashtable_iter *iter)
4583 {
4584 struct sctp_transport *t;
4585
4586 t = rhashtable_walk_next(iter);
4587 for (; t; t = rhashtable_walk_next(iter)) {
4588 if (IS_ERR(t)) {
4589 if (PTR_ERR(t) == -EAGAIN)
4590 continue;
4591 break;
4592 }
4593
4594 if (net_eq(sock_net(t->asoc->base.sk), net) &&
4595 t->asoc->peer.primary_path == t)
4596 break;
4597 }
4598
4599 return t;
4600 }
4601
4602 struct sctp_transport *sctp_transport_get_idx(struct net *net,
4603 struct rhashtable_iter *iter,
4604 int pos)
4605 {
4606 void *obj = SEQ_START_TOKEN;
4607
4608 while (pos && (obj = sctp_transport_get_next(net, iter)) &&
4609 !IS_ERR(obj))
4610 pos--;
4611
4612 return obj;
4613 }
4614
4615 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
4616 void *p) {
4617 int err = 0;
4618 int hash = 0;
4619 struct sctp_ep_common *epb;
4620 struct sctp_hashbucket *head;
4621
4622 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
4623 hash++, head++) {
4624 read_lock_bh(&head->lock);
4625 sctp_for_each_hentry(epb, &head->chain) {
4626 err = cb(sctp_ep(epb), p);
4627 if (err)
4628 break;
4629 }
4630 read_unlock_bh(&head->lock);
4631 }
4632
4633 return err;
4634 }
4635 EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
4636
4637 int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
4638 struct net *net,
4639 const union sctp_addr *laddr,
4640 const union sctp_addr *paddr, void *p)
4641 {
4642 struct sctp_transport *transport;
4643 int err;
4644
4645 rcu_read_lock();
4646 transport = sctp_addrs_lookup_transport(net, laddr, paddr);
4647 rcu_read_unlock();
4648 if (!transport)
4649 return -ENOENT;
4650
4651 err = cb(transport, p);
4652 sctp_transport_put(transport);
4653
4654 return err;
4655 }
4656 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
4657
4658 int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
4659 struct net *net, int pos, void *p) {
4660 struct rhashtable_iter hti;
4661 void *obj;
4662 int err;
4663
4664 err = sctp_transport_walk_start(&hti);
4665 if (err)
4666 return err;
4667
4668 obj = sctp_transport_get_idx(net, &hti, pos + 1);
4669 for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
4670 struct sctp_transport *transport = obj;
4671
4672 if (!sctp_transport_hold(transport))
4673 continue;
4674 err = cb(transport, p);
4675 sctp_transport_put(transport);
4676 if (err)
4677 break;
4678 }
4679 sctp_transport_walk_stop(&hti);
4680
4681 return err;
4682 }
4683 EXPORT_SYMBOL_GPL(sctp_for_each_transport);
4684
4685 /* 7.2.1 Association Status (SCTP_STATUS)
4686
4687 * Applications can retrieve current status information about an
4688 * association, including association state, peer receiver window size,
4689 * number of unacked data chunks, and number of data chunks pending
4690 * receipt. This information is read-only.
4691 */
4692 static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
4693 char __user *optval,
4694 int __user *optlen)
4695 {
4696 struct sctp_status status;
4697 struct sctp_association *asoc = NULL;
4698 struct sctp_transport *transport;
4699 sctp_assoc_t associd;
4700 int retval = 0;
4701
4702 if (len < sizeof(status)) {
4703 retval = -EINVAL;
4704 goto out;
4705 }
4706
4707 len = sizeof(status);
4708 if (copy_from_user(&status, optval, len)) {
4709 retval = -EFAULT;
4710 goto out;
4711 }
4712
4713 associd = status.sstat_assoc_id;
4714 asoc = sctp_id2assoc(sk, associd);
4715 if (!asoc) {
4716 retval = -EINVAL;
4717 goto out;
4718 }
4719
4720 transport = asoc->peer.primary_path;
4721
4722 status.sstat_assoc_id = sctp_assoc2id(asoc);
4723 status.sstat_state = sctp_assoc_to_state(asoc);
4724 status.sstat_rwnd = asoc->peer.rwnd;
4725 status.sstat_unackdata = asoc->unack_data;
4726
4727 status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
4728 status.sstat_instrms = asoc->stream.incnt;
4729 status.sstat_outstrms = asoc->stream.outcnt;
4730 status.sstat_fragmentation_point = asoc->frag_point;
4731 status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4732 memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
4733 transport->af_specific->sockaddr_len);
4734 /* Map ipv4 address into v4-mapped-on-v6 address. */
4735 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
4736 (union sctp_addr *)&status.sstat_primary.spinfo_address);
4737 status.sstat_primary.spinfo_state = transport->state;
4738 status.sstat_primary.spinfo_cwnd = transport->cwnd;
4739 status.sstat_primary.spinfo_srtt = transport->srtt;
4740 status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto);
4741 status.sstat_primary.spinfo_mtu = transport->pathmtu;
4742
4743 if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN)
4744 status.sstat_primary.spinfo_state = SCTP_ACTIVE;
4745
4746 if (put_user(len, optlen)) {
4747 retval = -EFAULT;
4748 goto out;
4749 }
4750
4751 pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n",
4752 __func__, len, status.sstat_state, status.sstat_rwnd,
4753 status.sstat_assoc_id);
4754
4755 if (copy_to_user(optval, &status, len)) {
4756 retval = -EFAULT;
4757 goto out;
4758 }
4759
4760 out:
4761 return retval;
4762 }
4763
4764
4765 /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO)
4766 *
4767 * Applications can retrieve information about a specific peer address
4768 * of an association, including its reachability state, congestion
4769 * window, and retransmission timer values. This information is
4770 * read-only.
4771 */
4772 static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
4773 char __user *optval,
4774 int __user *optlen)
4775 {
4776 struct sctp_paddrinfo pinfo;
4777 struct sctp_transport *transport;
4778 int retval = 0;
4779
4780 if (len < sizeof(pinfo)) {
4781 retval = -EINVAL;
4782 goto out;
4783 }
4784
4785 len = sizeof(pinfo);
4786 if (copy_from_user(&pinfo, optval, len)) {
4787 retval = -EFAULT;
4788 goto out;
4789 }
4790
4791 transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
4792 pinfo.spinfo_assoc_id);
4793 if (!transport)
4794 return -EINVAL;
4795
4796 pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
4797 pinfo.spinfo_state = transport->state;
4798 pinfo.spinfo_cwnd = transport->cwnd;
4799 pinfo.spinfo_srtt = transport->srtt;
4800 pinfo.spinfo_rto = jiffies_to_msecs(transport->rto);
4801 pinfo.spinfo_mtu = transport->pathmtu;
4802
4803 if (pinfo.spinfo_state == SCTP_UNKNOWN)
4804 pinfo.spinfo_state = SCTP_ACTIVE;
4805
4806 if (put_user(len, optlen)) {
4807 retval = -EFAULT;
4808 goto out;
4809 }
4810
4811 if (copy_to_user(optval, &pinfo, len)) {
4812 retval = -EFAULT;
4813 goto out;
4814 }
4815
4816 out:
4817 return retval;
4818 }
4819
4820 /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
4821 *
4822 * This option is a on/off flag. If enabled no SCTP message
4823 * fragmentation will be performed. Instead if a message being sent
4824 * exceeds the current PMTU size, the message will NOT be sent and
4825 * instead a error will be indicated to the user.
4826 */
4827 static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
4828 char __user *optval, int __user *optlen)
4829 {
4830 int val;
4831
4832 if (len < sizeof(int))
4833 return -EINVAL;
4834
4835 len = sizeof(int);
4836 val = (sctp_sk(sk)->disable_fragments == 1);
4837 if (put_user(len, optlen))
4838 return -EFAULT;
4839 if (copy_to_user(optval, &val, len))
4840 return -EFAULT;
4841 return 0;
4842 }
4843
4844 /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS)
4845 *
4846 * This socket option is used to specify various notifications and
4847 * ancillary data the user wishes to receive.
4848 */
4849 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
4850 int __user *optlen)
4851 {
4852 if (len == 0)
4853 return -EINVAL;
4854 if (len > sizeof(struct sctp_event_subscribe))
4855 len = sizeof(struct sctp_event_subscribe);
4856 if (put_user(len, optlen))
4857 return -EFAULT;
4858 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
4859 return -EFAULT;
4860 return 0;
4861 }
4862
4863 /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE)
4864 *
4865 * This socket option is applicable to the UDP-style socket only. When
4866 * set it will cause associations that are idle for more than the
4867 * specified number of seconds to automatically close. An association
4868 * being idle is defined an association that has NOT sent or received
4869 * user data. The special value of '0' indicates that no automatic
4870 * close of any associations should be performed. The option expects an
4871 * integer defining the number of seconds of idle time before an
4872 * association is closed.
4873 */
4874 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
4875 {
4876 /* Applicable to UDP-style socket only */
4877 if (sctp_style(sk, TCP))
4878 return -EOPNOTSUPP;
4879 if (len < sizeof(int))
4880 return -EINVAL;
4881 len = sizeof(int);
4882 if (put_user(len, optlen))
4883 return -EFAULT;
4884 if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
4885 return -EFAULT;
4886 return 0;
4887 }
4888
4889 /* Helper routine to branch off an association to a new socket. */
4890 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
4891 {
4892 struct sctp_association *asoc = sctp_id2assoc(sk, id);
4893 struct sctp_sock *sp = sctp_sk(sk);
4894 struct socket *sock;
4895 int err = 0;
4896
4897 if (!asoc)
4898 return -EINVAL;
4899
4900 /* If there is a thread waiting on more sndbuf space for
4901 * sending on this asoc, it cannot be peeled.
4902 */
4903 if (waitqueue_active(&asoc->wait))
4904 return -EBUSY;
4905
4906 /* An association cannot be branched off from an already peeled-off
4907 * socket, nor is this supported for tcp style sockets.
4908 */
4909 if (!sctp_style(sk, UDP))
4910 return -EINVAL;
4911
4912 /* Create a new socket. */
4913 err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
4914 if (err < 0)
4915 return err;
4916
4917 sctp_copy_sock(sock->sk, sk, asoc);
4918
4919 /* Make peeled-off sockets more like 1-1 accepted sockets.
4920 * Set the daddr and initialize id to something more random
4921 */
4922 sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
4923
4924 /* Populate the fields of the newsk from the oldsk and migrate the
4925 * asoc to the newsk.
4926 */
4927 sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
4928
4929 *sockp = sock;
4930
4931 return err;
4932 }
4933 EXPORT_SYMBOL(sctp_do_peeloff);
4934
4935 static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
4936 struct file **newfile, unsigned flags)
4937 {
4938 struct socket *newsock;
4939 int retval;
4940
4941 retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
4942 if (retval < 0)
4943 goto out;
4944
4945 /* Map the socket to an unused fd that can be returned to the user. */
4946 retval = get_unused_fd_flags(flags & SOCK_CLOEXEC);
4947 if (retval < 0) {
4948 sock_release(newsock);
4949 goto out;
4950 }
4951
4952 *newfile = sock_alloc_file(newsock, 0, NULL);
4953 if (IS_ERR(*newfile)) {
4954 put_unused_fd(retval);
4955 sock_release(newsock);
4956 retval = PTR_ERR(*newfile);
4957 *newfile = NULL;
4958 return retval;
4959 }
4960
4961 pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
4962 retval);
4963
4964 peeloff->sd = retval;
4965
4966 if (flags & SOCK_NONBLOCK)
4967 (*newfile)->f_flags |= O_NONBLOCK;
4968 out:
4969 return retval;
4970 }
4971
4972 static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
4973 {
4974 sctp_peeloff_arg_t peeloff;
4975 struct file *newfile = NULL;
4976 int retval = 0;
4977
4978 if (len < sizeof(sctp_peeloff_arg_t))
4979 return -EINVAL;
4980 len = sizeof(sctp_peeloff_arg_t);
4981 if (copy_from_user(&peeloff, optval, len))
4982 return -EFAULT;
4983
4984 retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
4985 if (retval < 0)
4986 goto out;
4987
4988 /* Return the fd mapped to the new socket. */
4989 if (put_user(len, optlen)) {
4990 fput(newfile);
4991 put_unused_fd(retval);
4992 return -EFAULT;
4993 }
4994
4995 if (copy_to_user(optval, &peeloff, len)) {
4996 fput(newfile);
4997 put_unused_fd(retval);
4998 return -EFAULT;
4999 }
5000 fd_install(retval, newfile);
5001 out:
5002 return retval;
5003 }
5004
5005 static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
5006 char __user *optval, int __user *optlen)
5007 {
5008 sctp_peeloff_flags_arg_t peeloff;
5009 struct file *newfile = NULL;
5010 int retval = 0;
5011
5012 if (len < sizeof(sctp_peeloff_flags_arg_t))
5013 return -EINVAL;
5014 len = sizeof(sctp_peeloff_flags_arg_t);
5015 if (copy_from_user(&peeloff, optval, len))
5016 return -EFAULT;
5017
5018 retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
5019 &newfile, peeloff.flags);
5020 if (retval < 0)
5021 goto out;
5022
5023 /* Return the fd mapped to the new socket. */
5024 if (put_user(len, optlen)) {
5025 fput(newfile);
5026 put_unused_fd(retval);
5027 return -EFAULT;
5028 }
5029
5030 if (copy_to_user(optval, &peeloff, len)) {
5031 fput(newfile);
5032 put_unused_fd(retval);
5033 return -EFAULT;
5034 }
5035 fd_install(retval, newfile);
5036 out:
5037 return retval;
5038 }
5039
5040 /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS)
5041 *
5042 * Applications can enable or disable heartbeats for any peer address of
5043 * an association, modify an address's heartbeat interval, force a
5044 * heartbeat to be sent immediately, and adjust the address's maximum
5045 * number of retransmissions sent before an address is considered
5046 * unreachable. The following structure is used to access and modify an
5047 * address's parameters:
5048 *
5049 * struct sctp_paddrparams {
5050 * sctp_assoc_t spp_assoc_id;
5051 * struct sockaddr_storage spp_address;
5052 * uint32_t spp_hbinterval;
5053 * uint16_t spp_pathmaxrxt;
5054 * uint32_t spp_pathmtu;
5055 * uint32_t spp_sackdelay;
5056 * uint32_t spp_flags;
5057 * };
5058 *
5059 * spp_assoc_id - (one-to-many style socket) This is filled in the
5060 * application, and identifies the association for
5061 * this query.
5062 * spp_address - This specifies which address is of interest.
5063 * spp_hbinterval - This contains the value of the heartbeat interval,
5064 * in milliseconds. If a value of zero
5065 * is present in this field then no changes are to
5066 * be made to this parameter.
5067 * spp_pathmaxrxt - This contains the maximum number of
5068 * retransmissions before this address shall be
5069 * considered unreachable. If a value of zero
5070 * is present in this field then no changes are to
5071 * be made to this parameter.
5072 * spp_pathmtu - When Path MTU discovery is disabled the value
5073 * specified here will be the "fixed" path mtu.
5074 * Note that if the spp_address field is empty
5075 * then all associations on this address will
5076 * have this fixed path mtu set upon them.
5077 *
5078 * spp_sackdelay - When delayed sack is enabled, this value specifies
5079 * the number of milliseconds that sacks will be delayed
5080 * for. This value will apply to all addresses of an
5081 * association if the spp_address field is empty. Note
5082 * also, that if delayed sack is enabled and this
5083 * value is set to 0, no change is made to the last
5084 * recorded delayed sack timer value.
5085 *
5086 * spp_flags - These flags are used to control various features
5087 * on an association. The flag field may contain
5088 * zero or more of the following options.
5089 *
5090 * SPP_HB_ENABLE - Enable heartbeats on the
5091 * specified address. Note that if the address
5092 * field is empty all addresses for the association
5093 * have heartbeats enabled upon them.
5094 *
5095 * SPP_HB_DISABLE - Disable heartbeats on the
5096 * speicifed address. Note that if the address
5097 * field is empty all addresses for the association
5098 * will have their heartbeats disabled. Note also
5099 * that SPP_HB_ENABLE and SPP_HB_DISABLE are
5100 * mutually exclusive, only one of these two should
5101 * be specified. Enabling both fields will have
5102 * undetermined results.
5103 *
5104 * SPP_HB_DEMAND - Request a user initiated heartbeat
5105 * to be made immediately.
5106 *
5107 * SPP_PMTUD_ENABLE - This field will enable PMTU
5108 * discovery upon the specified address. Note that
5109 * if the address feild is empty then all addresses
5110 * on the association are effected.
5111 *
5112 * SPP_PMTUD_DISABLE - This field will disable PMTU
5113 * discovery upon the specified address. Note that
5114 * if the address feild is empty then all addresses
5115 * on the association are effected. Not also that
5116 * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually
5117 * exclusive. Enabling both will have undetermined
5118 * results.
5119 *
5120 * SPP_SACKDELAY_ENABLE - Setting this flag turns
5121 * on delayed sack. The time specified in spp_sackdelay
5122 * is used to specify the sack delay for this address. Note
5123 * that if spp_address is empty then all addresses will
5124 * enable delayed sack and take on the sack delay
5125 * value specified in spp_sackdelay.
5126 * SPP_SACKDELAY_DISABLE - Setting this flag turns
5127 * off delayed sack. If the spp_address field is blank then
5128 * delayed sack is disabled for the entire association. Note
5129 * also that this field is mutually exclusive to
5130 * SPP_SACKDELAY_ENABLE, setting both will have undefined
5131 * results.
5132 */
5133 static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
5134 char __user *optval, int __user *optlen)
5135 {
5136 struct sctp_paddrparams params;
5137 struct sctp_transport *trans = NULL;
5138 struct sctp_association *asoc = NULL;
5139 struct sctp_sock *sp = sctp_sk(sk);
5140
5141 if (len < sizeof(struct sctp_paddrparams))
5142 return -EINVAL;
5143 len = sizeof(struct sctp_paddrparams);
5144 if (copy_from_user(&params, optval, len))
5145 return -EFAULT;
5146
5147 /* If an address other than INADDR_ANY is specified, and
5148 * no transport is found, then the request is invalid.
5149 */
5150 if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
5151 trans = sctp_addr_id2transport(sk, &params.spp_address,
5152 params.spp_assoc_id);
5153 if (!trans) {
5154 pr_debug("%s: failed no transport\n", __func__);
5155 return -EINVAL;
5156 }
5157 }
5158
5159 /* Get association, if assoc_id != 0 and the socket is a one
5160 * to many style socket, and an association was not found, then
5161 * the id was invalid.
5162 */
5163 asoc = sctp_id2assoc(sk, params.spp_assoc_id);
5164 if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) {
5165 pr_debug("%s: failed no association\n", __func__);
5166 return -EINVAL;
5167 }
5168
5169 if (trans) {
5170 /* Fetch transport values. */
5171 params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval);
5172 params.spp_pathmtu = trans->pathmtu;
5173 params.spp_pathmaxrxt = trans->pathmaxrxt;
5174 params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay);
5175
5176 /*draft-11 doesn't say what to return in spp_flags*/
5177 params.spp_flags = trans->param_flags;
5178 } else if (asoc) {
5179 /* Fetch association values. */
5180 params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval);
5181 params.spp_pathmtu = asoc->pathmtu;
5182 params.spp_pathmaxrxt = asoc->pathmaxrxt;
5183 params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay);
5184
5185 /*draft-11 doesn't say what to return in spp_flags*/
5186 params.spp_flags = asoc->param_flags;
5187 } else {
5188 /* Fetch socket values. */
5189 params.spp_hbinterval = sp->hbinterval;
5190 params.spp_pathmtu = sp->pathmtu;
5191 params.spp_sackdelay = sp->sackdelay;
5192 params.spp_pathmaxrxt = sp->pathmaxrxt;
5193
5194 /*draft-11 doesn't say what to return in spp_flags*/
5195 params.spp_flags = sp->param_flags;
5196 }
5197
5198 if (copy_to_user(optval, &params, len))
5199 return -EFAULT;
5200
5201 if (put_user(len, optlen))
5202 return -EFAULT;
5203
5204 return 0;
5205 }
5206
5207 /*
5208 * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK)
5209 *
5210 * This option will effect the way delayed acks are performed. This
5211 * option allows you to get or set the delayed ack time, in
5212 * milliseconds. It also allows changing the delayed ack frequency.
5213 * Changing the frequency to 1 disables the delayed sack algorithm. If
5214 * the assoc_id is 0, then this sets or gets the endpoints default
5215 * values. If the assoc_id field is non-zero, then the set or get
5216 * effects the specified association for the one to many model (the
5217 * assoc_id field is ignored by the one to one model). Note that if
5218 * sack_delay or sack_freq are 0 when setting this option, then the
5219 * current values will remain unchanged.
5220 *
5221 * struct sctp_sack_info {
5222 * sctp_assoc_t sack_assoc_id;
5223 * uint32_t sack_delay;
5224 * uint32_t sack_freq;
5225 * };
5226 *
5227 * sack_assoc_id - This parameter, indicates which association the user
5228 * is performing an action upon. Note that if this field's value is
5229 * zero then the endpoints default value is changed (effecting future
5230 * associations only).
5231 *
5232 * sack_delay - This parameter contains the number of milliseconds that
5233 * the user is requesting the delayed ACK timer be set to. Note that
5234 * this value is defined in the standard to be between 200 and 500
5235 * milliseconds.
5236 *
5237 * sack_freq - This parameter contains the number of packets that must
5238 * be received before a sack is sent without waiting for the delay
5239 * timer to expire. The default value for this is 2, setting this
5240 * value to 1 will disable the delayed sack algorithm.
5241 */
5242 static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
5243 char __user *optval,
5244 int __user *optlen)
5245 {
5246 struct sctp_sack_info params;
5247 struct sctp_association *asoc = NULL;
5248 struct sctp_sock *sp = sctp_sk(sk);
5249
5250 if (len >= sizeof(struct sctp_sack_info)) {
5251 len = sizeof(struct sctp_sack_info);
5252
5253 if (copy_from_user(&params, optval, len))
5254 return -EFAULT;
5255 } else if (len == sizeof(struct sctp_assoc_value)) {
5256 pr_warn_ratelimited(DEPRECATED
5257 "%s (pid %d) "
5258 "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
5259 "Use struct sctp_sack_info instead\n",
5260 current->comm, task_pid_nr(current));
5261 if (copy_from_user(&params, optval, len))
5262 return -EFAULT;
5263 } else
5264 return -EINVAL;
5265
5266 /* Get association, if sack_assoc_id != 0 and the socket is a one
5267 * to many style socket, and an association was not found, then
5268 * the id was invalid.
5269 */
5270 asoc = sctp_id2assoc(sk, params.sack_assoc_id);
5271 if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP))
5272 return -EINVAL;
5273
5274 if (asoc) {
5275 /* Fetch association values. */
5276 if (asoc->param_flags & SPP_SACKDELAY_ENABLE) {
5277 params.sack_delay = jiffies_to_msecs(
5278 asoc->sackdelay);
5279 params.sack_freq = asoc->sackfreq;
5280
5281 } else {
5282 params.sack_delay = 0;
5283 params.sack_freq = 1;
5284 }
5285 } else {
5286 /* Fetch socket values. */
5287 if (sp->param_flags & SPP_SACKDELAY_ENABLE) {
5288 params.sack_delay = sp->sackdelay;
5289 params.sack_freq = sp->sackfreq;
5290 } else {
5291 params.sack_delay = 0;
5292 params.sack_freq = 1;
5293 }
5294 }
5295
5296 if (copy_to_user(optval, &params, len))
5297 return -EFAULT;
5298
5299 if (put_user(len, optlen))
5300 return -EFAULT;
5301
5302 return 0;
5303 }
5304
5305 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
5306 *
5307 * Applications can specify protocol parameters for the default association
5308 * initialization. The option name argument to setsockopt() and getsockopt()
5309 * is SCTP_INITMSG.
5310 *
5311 * Setting initialization parameters is effective only on an unconnected
5312 * socket (for UDP-style sockets only future associations are effected
5313 * by the change). With TCP-style sockets, this option is inherited by
5314 * sockets derived from a listener socket.
5315 */
5316 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
5317 {
5318 if (len < sizeof(struct sctp_initmsg))
5319 return -EINVAL;
5320 len = sizeof(struct sctp_initmsg);
5321 if (put_user(len, optlen))
5322 return -EFAULT;
5323 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
5324 return -EFAULT;
5325 return 0;
5326 }
5327
5328
5329 static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
5330 char __user *optval, int __user *optlen)
5331 {
5332 struct sctp_association *asoc;
5333 int cnt = 0;
5334 struct sctp_getaddrs getaddrs;
5335 struct sctp_transport *from;
5336 void __user *to;
5337 union sctp_addr temp;
5338 struct sctp_sock *sp = sctp_sk(sk);
5339 int addrlen;
5340 size_t space_left;
5341 int bytes_copied;
5342
5343 if (len < sizeof(struct sctp_getaddrs))
5344 return -EINVAL;
5345
5346 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
5347 return -EFAULT;
5348
5349 /* For UDP-style sockets, id specifies the association to query. */
5350 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
5351 if (!asoc)
5352 return -EINVAL;
5353
5354 to = optval + offsetof(struct sctp_getaddrs, addrs);
5355 space_left = len - offsetof(struct sctp_getaddrs, addrs);
5356
5357 list_for_each_entry(from, &asoc->peer.transport_addr_list,
5358 transports) {
5359 memcpy(&temp, &from->ipaddr, sizeof(temp));
5360 addrlen = sctp_get_pf_specific(sk->sk_family)
5361 ->addr_to_user(sp, &temp);
5362 if (space_left < addrlen)
5363 return -ENOMEM;
5364 if (copy_to_user(to, &temp, addrlen))
5365 return -EFAULT;
5366 to += addrlen;
5367 cnt++;
5368 space_left -= addrlen;
5369 }
5370
5371 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num))
5372 return -EFAULT;
5373 bytes_copied = ((char __user *)to) - optval;
5374 if (put_user(bytes_copied, optlen))
5375 return -EFAULT;
5376
5377 return 0;
5378 }
5379
5380 static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
5381 size_t space_left, int *bytes_copied)
5382 {
5383 struct sctp_sockaddr_entry *addr;
5384 union sctp_addr temp;
5385 int cnt = 0;
5386 int addrlen;
5387 struct net *net = sock_net(sk);
5388
5389 rcu_read_lock();
5390 list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) {
5391 if (!addr->valid)
5392 continue;
5393
5394 if ((PF_INET == sk->sk_family) &&
5395 (AF_INET6 == addr->a.sa.sa_family))
5396 continue;
5397 if ((PF_INET6 == sk->sk_family) &&
5398 inet_v6_ipv6only(sk) &&
5399 (AF_INET == addr->a.sa.sa_family))
5400 continue;
5401 memcpy(&temp, &addr->a, sizeof(temp));
5402 if (!temp.v4.sin_port)
5403 temp.v4.sin_port = htons(port);
5404
5405 addrlen = sctp_get_pf_specific(sk->sk_family)
5406 ->addr_to_user(sctp_sk(sk), &temp);
5407
5408 if (space_left < addrlen) {
5409 cnt = -ENOMEM;
5410 break;
5411 }
5412 memcpy(to, &temp, addrlen);
5413
5414 to += addrlen;
5415 cnt++;
5416 space_left -= addrlen;
5417 *bytes_copied += addrlen;
5418 }
5419 rcu_read_unlock();
5420
5421 return cnt;
5422 }
5423
5424
5425 static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
5426 char __user *optval, int __user *optlen)
5427 {
5428 struct sctp_bind_addr *bp;
5429 struct sctp_association *asoc;
5430 int cnt = 0;
5431 struct sctp_getaddrs getaddrs;
5432 struct sctp_sockaddr_entry *addr;
5433 void __user *to;
5434 union sctp_addr temp;
5435 struct sctp_sock *sp = sctp_sk(sk);
5436 int addrlen;
5437 int err = 0;
5438 size_t space_left;
5439 int bytes_copied = 0;
5440 void *addrs;
5441 void *buf;
5442
5443 if (len < sizeof(struct sctp_getaddrs))
5444 return -EINVAL;
5445
5446 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
5447 return -EFAULT;
5448
5449 /*
5450 * For UDP-style sockets, id specifies the association to query.
5451 * If the id field is set to the value '0' then the locally bound
5452 * addresses are returned without regard to any particular
5453 * association.
5454 */
5455 if (0 == getaddrs.assoc_id) {
5456 bp = &sctp_sk(sk)->ep->base.bind_addr;
5457 } else {
5458 asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
5459 if (!asoc)
5460 return -EINVAL;
5461 bp = &asoc->base.bind_addr;
5462 }
5463
5464 to = optval + offsetof(struct sctp_getaddrs, addrs);
5465 space_left = len - offsetof(struct sctp_getaddrs, addrs);
5466
5467 addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN);
5468 if (!addrs)
5469 return -ENOMEM;
5470
5471 /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid
5472 * addresses from the global local address list.
5473 */
5474 if (sctp_list_single_entry(&bp->address_list)) {
5475 addr = list_entry(bp->address_list.next,
5476 struct sctp_sockaddr_entry, list);
5477 if (sctp_is_any(sk, &addr->a)) {
5478 cnt = sctp_copy_laddrs(sk, bp->port, addrs,
5479 space_left, &bytes_copied);
5480 if (cnt < 0) {
5481 err = cnt;
5482 goto out;
5483 }
5484 goto copy_getaddrs;
5485 }
5486 }
5487
5488 buf = addrs;
5489 /* Protection on the bound address list is not needed since
5490 * in the socket option context we hold a socket lock and
5491 * thus the bound address list can't change.
5492 */
5493 list_for_each_entry(addr, &bp->address_list, list) {
5494 memcpy(&temp, &addr->a, sizeof(temp));
5495 addrlen = sctp_get_pf_specific(sk->sk_family)
5496 ->addr_to_user(sp, &temp);
5497 if (space_left < addrlen) {
5498 err = -ENOMEM; /*fixme: right error?*/
5499 goto out;
5500 }
5501 memcpy(buf, &temp, addrlen);
5502 buf += addrlen;
5503 bytes_copied += addrlen;
5504 cnt++;
5505 space_left -= addrlen;
5506 }
5507
5508 copy_getaddrs:
5509 if (copy_to_user(to, addrs, bytes_copied)) {
5510 err = -EFAULT;
5511 goto out;
5512 }
5513 if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
5514 err = -EFAULT;
5515 goto out;
5516 }
5517 if (put_user(bytes_copied, optlen))
5518 err = -EFAULT;
5519 out:
5520 kfree(addrs);
5521 return err;
5522 }
5523
5524 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
5525 *
5526 * Requests that the local SCTP stack use the enclosed peer address as
5527 * the association primary. The enclosed address must be one of the
5528 * association peer's addresses.
5529 */
5530 static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
5531 char __user *optval, int __user *optlen)
5532 {
5533 struct sctp_prim prim;
5534 struct sctp_association *asoc;
5535 struct sctp_sock *sp = sctp_sk(sk);
5536
5537 if (len < sizeof(struct sctp_prim))
5538 return -EINVAL;
5539
5540 len = sizeof(struct sctp_prim);
5541
5542 if (copy_from_user(&prim, optval, len))
5543 return -EFAULT;
5544
5545 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
5546 if (!asoc)
5547 return -EINVAL;
5548
5549 if (!asoc->peer.primary_path)
5550 return -ENOTCONN;
5551
5552 memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
5553 asoc->peer.primary_path->af_specific->sockaddr_len);
5554
5555 sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
5556 (union sctp_addr *)&prim.ssp_addr);
5557
5558 if (put_user(len, optlen))
5559 return -EFAULT;
5560 if (copy_to_user(optval, &prim, len))
5561 return -EFAULT;
5562
5563 return 0;
5564 }
5565
5566 /*
5567 * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
5568 *
5569 * Requests that the local endpoint set the specified Adaptation Layer
5570 * Indication parameter for all future INIT and INIT-ACK exchanges.
5571 */
5572 static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
5573 char __user *optval, int __user *optlen)
5574 {
5575 struct sctp_setadaptation adaptation;
5576
5577 if (len < sizeof(struct sctp_setadaptation))
5578 return -EINVAL;
5579
5580 len = sizeof(struct sctp_setadaptation);
5581
5582 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
5583
5584 if (put_user(len, optlen))
5585 return -EFAULT;
5586 if (copy_to_user(optval, &adaptation, len))
5587 return -EFAULT;
5588
5589 return 0;
5590 }
5591
5592 /*
5593 *
5594 * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM)
5595 *
5596 * Applications that wish to use the sendto() system call may wish to
5597 * specify a default set of parameters that would normally be supplied
5598 * through the inclusion of ancillary data. This socket option allows
5599 * such an application to set the default sctp_sndrcvinfo structure.
5600
5601
5602 * The application that wishes to use this socket option simply passes
5603 * in to this call the sctp_sndrcvinfo structure defined in Section
5604 * 5.2.2) The input parameters accepted by this call include
5605 * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context,
5606 * sinfo_timetolive. The user must provide the sinfo_assoc_id field in
5607 * to this call if the caller is using the UDP model.
5608 *
5609 * For getsockopt, it get the default sctp_sndrcvinfo structure.
5610 */
5611 static int sctp_getsockopt_default_send_param(struct sock *sk,
5612 int len, char __user *optval,
5613 int __user *optlen)
5614 {
5615 struct sctp_sock *sp = sctp_sk(sk);
5616 struct sctp_association *asoc;
5617 struct sctp_sndrcvinfo info;
5618
5619 if (len < sizeof(info))
5620 return -EINVAL;
5621
5622 len = sizeof(info);
5623
5624 if (copy_from_user(&info, optval, len))
5625 return -EFAULT;
5626
5627 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
5628 if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
5629 return -EINVAL;
5630 if (asoc) {
5631 info.sinfo_stream = asoc->default_stream;
5632 info.sinfo_flags = asoc->default_flags;
5633 info.sinfo_ppid = asoc->default_ppid;
5634 info.sinfo_context = asoc->default_context;
5635 info.sinfo_timetolive = asoc->default_timetolive;
5636 } else {
5637 info.sinfo_stream = sp->default_stream;
5638 info.sinfo_flags = sp->default_flags;
5639 info.sinfo_ppid = sp->default_ppid;
5640 info.sinfo_context = sp->default_context;
5641 info.sinfo_timetolive = sp->default_timetolive;
5642 }
5643
5644 if (put_user(len, optlen))
5645 return -EFAULT;
5646 if (copy_to_user(optval, &info, len))
5647 return -EFAULT;
5648
5649 return 0;
5650 }
5651
5652 /* RFC6458, Section 8.1.31. Set/get Default Send Parameters
5653 * (SCTP_DEFAULT_SNDINFO)
5654 */
5655 static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
5656 char __user *optval,
5657 int __user *optlen)
5658 {
5659 struct sctp_sock *sp = sctp_sk(sk);
5660 struct sctp_association *asoc;
5661 struct sctp_sndinfo info;
5662
5663 if (len < sizeof(info))
5664 return -EINVAL;
5665
5666 len = sizeof(info);
5667
5668 if (copy_from_user(&info, optval, len))
5669 return -EFAULT;
5670
5671 asoc = sctp_id2assoc(sk, info.snd_assoc_id);
5672 if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
5673 return -EINVAL;
5674 if (asoc) {
5675 info.snd_sid = asoc->default_stream;
5676 info.snd_flags = asoc->default_flags;
5677 info.snd_ppid = asoc->default_ppid;
5678 info.snd_context = asoc->default_context;
5679 } else {
5680 info.snd_sid = sp->default_stream;
5681 info.snd_flags = sp->default_flags;
5682 info.snd_ppid = sp->default_ppid;
5683 info.snd_context = sp->default_context;
5684 }
5685
5686 if (put_user(len, optlen))
5687 return -EFAULT;
5688 if (copy_to_user(optval, &info, len))
5689 return -EFAULT;
5690
5691 return 0;
5692 }
5693
5694 /*
5695 *
5696 * 7.1.5 SCTP_NODELAY
5697 *
5698 * Turn on/off any Nagle-like algorithm. This means that packets are
5699 * generally sent as soon as possible and no unnecessary delays are
5700 * introduced, at the cost of more packets in the network. Expects an
5701 * integer boolean flag.
5702 */
5703
5704 static int sctp_getsockopt_nodelay(struct sock *sk, int len,
5705 char __user *optval, int __user *optlen)
5706 {
5707 int val;
5708
5709 if (len < sizeof(int))
5710 return -EINVAL;
5711
5712 len = sizeof(int);
5713 val = (sctp_sk(sk)->nodelay == 1);
5714 if (put_user(len, optlen))
5715 return -EFAULT;
5716 if (copy_to_user(optval, &val, len))
5717 return -EFAULT;
5718 return 0;
5719 }
5720
5721 /*
5722 *
5723 * 7.1.1 SCTP_RTOINFO
5724 *
5725 * The protocol parameters used to initialize and bound retransmission
5726 * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access
5727 * and modify these parameters.
5728 * All parameters are time values, in milliseconds. A value of 0, when
5729 * modifying the parameters, indicates that the current value should not
5730 * be changed.
5731 *
5732 */
5733 static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
5734 char __user *optval,
5735 int __user *optlen) {
5736 struct sctp_rtoinfo rtoinfo;
5737 struct sctp_association *asoc;
5738
5739 if (len < sizeof (struct sctp_rtoinfo))
5740 return -EINVAL;
5741
5742 len = sizeof(struct sctp_rtoinfo);
5743
5744 if (copy_from_user(&rtoinfo, optval, len))
5745 return -EFAULT;
5746
5747 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
5748
5749 if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
5750 return -EINVAL;
5751
5752 /* Values corresponding to the specific association. */
5753 if (asoc) {
5754 rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial);
5755 rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max);
5756 rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min);
5757 } else {
5758 /* Values corresponding to the endpoint. */
5759 struct sctp_sock *sp = sctp_sk(sk);
5760
5761 rtoinfo.srto_initial = sp->rtoinfo.srto_initial;
5762 rtoinfo.srto_max = sp->rtoinfo.srto_max;
5763 rtoinfo.srto_min = sp->rtoinfo.srto_min;
5764 }
5765
5766 if (put_user(len, optlen))
5767 return -EFAULT;
5768
5769 if (copy_to_user(optval, &rtoinfo, len))
5770 return -EFAULT;
5771
5772 return 0;
5773 }
5774
5775 /*
5776 *
5777 * 7.1.2 SCTP_ASSOCINFO
5778 *
5779 * This option is used to tune the maximum retransmission attempts
5780 * of the association.
5781 * Returns an error if the new association retransmission value is
5782 * greater than the sum of the retransmission value of the peer.
5783 * See [SCTP] for more information.
5784 *
5785 */
5786 static int sctp_getsockopt_associnfo(struct sock *sk, int len,
5787 char __user *optval,
5788 int __user *optlen)
5789 {
5790
5791 struct sctp_assocparams assocparams;
5792 struct sctp_association *asoc;
5793 struct list_head *pos;
5794 int cnt = 0;
5795
5796 if (len < sizeof (struct sctp_assocparams))
5797 return -EINVAL;
5798
5799 len = sizeof(struct sctp_assocparams);
5800
5801 if (copy_from_user(&assocparams, optval, len))
5802 return -EFAULT;
5803
5804 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
5805
5806 if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP))
5807 return -EINVAL;
5808
5809 /* Values correspoinding to the specific association */
5810 if (asoc) {
5811 assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
5812 assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
5813 assocparams.sasoc_local_rwnd = asoc->a_rwnd;
5814 assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life);
5815
5816 list_for_each(pos, &asoc->peer.transport_addr_list) {
5817 cnt++;
5818 }
5819
5820 assocparams.sasoc_number_peer_destinations = cnt;
5821 } else {
5822 /* Values corresponding to the endpoint */
5823 struct sctp_sock *sp = sctp_sk(sk);
5824
5825 assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt;
5826 assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd;
5827 assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd;
5828 assocparams.sasoc_cookie_life =
5829 sp->assocparams.sasoc_cookie_life;
5830 assocparams.sasoc_number_peer_destinations =
5831 sp->assocparams.
5832 sasoc_number_peer_destinations;
5833 }
5834
5835 if (put_user(len, optlen))
5836 return -EFAULT;
5837
5838 if (copy_to_user(optval, &assocparams, len))
5839 return -EFAULT;
5840
5841 return 0;
5842 }
5843
5844 /*
5845 * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR)
5846 *
5847 * This socket option is a boolean flag which turns on or off mapped V4
5848 * addresses. If this option is turned on and the socket is type
5849 * PF_INET6, then IPv4 addresses will be mapped to V6 representation.
5850 * If this option is turned off, then no mapping will be done of V4
5851 * addresses and a user will receive both PF_INET6 and PF_INET type
5852 * addresses on the socket.
5853 */
5854 static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
5855 char __user *optval, int __user *optlen)
5856 {
5857 int val;
5858 struct sctp_sock *sp = sctp_sk(sk);
5859
5860 if (len < sizeof(int))
5861 return -EINVAL;
5862
5863 len = sizeof(int);
5864 val = sp->v4mapped;
5865 if (put_user(len, optlen))
5866 return -EFAULT;
5867 if (copy_to_user(optval, &val, len))
5868 return -EFAULT;
5869
5870 return 0;
5871 }
5872
5873 /*
5874 * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
5875 * (chapter and verse is quoted at sctp_setsockopt_context())
5876 */
5877 static int sctp_getsockopt_context(struct sock *sk, int len,
5878 char __user *optval, int __user *optlen)
5879 {
5880 struct sctp_assoc_value params;
5881 struct sctp_sock *sp;
5882 struct sctp_association *asoc;
5883
5884 if (len < sizeof(struct sctp_assoc_value))
5885 return -EINVAL;
5886
5887 len = sizeof(struct sctp_assoc_value);
5888
5889 if (copy_from_user(&params, optval, len))
5890 return -EFAULT;
5891
5892 sp = sctp_sk(sk);
5893
5894 if (params.assoc_id != 0) {
5895 asoc = sctp_id2assoc(sk, params.assoc_id);
5896 if (!asoc)
5897 return -EINVAL;
5898 params.assoc_value = asoc->default_rcv_context;
5899 } else {
5900 params.assoc_value = sp->default_rcv_context;
5901 }
5902
5903 if (put_user(len, optlen))
5904 return -EFAULT;
5905 if (copy_to_user(optval, &params, len))
5906 return -EFAULT;
5907
5908 return 0;
5909 }
5910
5911 /*
5912 * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG)
5913 * This option will get or set the maximum size to put in any outgoing
5914 * SCTP DATA chunk. If a message is larger than this size it will be
5915 * fragmented by SCTP into the specified size. Note that the underlying
5916 * SCTP implementation may fragment into smaller sized chunks when the
5917 * PMTU of the underlying association is smaller than the value set by
5918 * the user. The default value for this option is '0' which indicates
5919 * the user is NOT limiting fragmentation and only the PMTU will effect
5920 * SCTP's choice of DATA chunk size. Note also that values set larger
5921 * than the maximum size of an IP datagram will effectively let SCTP
5922 * control fragmentation (i.e. the same as setting this option to 0).
5923 *
5924 * The following structure is used to access and modify this parameter:
5925 *
5926 * struct sctp_assoc_value {
5927 * sctp_assoc_t assoc_id;
5928 * uint32_t assoc_value;
5929 * };
5930 *
5931 * assoc_id: This parameter is ignored for one-to-one style sockets.
5932 * For one-to-many style sockets this parameter indicates which
5933 * association the user is performing an action upon. Note that if
5934 * this field's value is zero then the endpoints default value is
5935 * changed (effecting future associations only).
5936 * assoc_value: This parameter specifies the maximum size in bytes.
5937 */
5938 static int sctp_getsockopt_maxseg(struct sock *sk, int len,
5939 char __user *optval, int __user *optlen)
5940 {
5941 struct sctp_assoc_value params;
5942 struct sctp_association *asoc;
5943
5944 if (len == sizeof(int)) {
5945 pr_warn_ratelimited(DEPRECATED
5946 "%s (pid %d) "
5947 "Use of int in maxseg socket option.\n"
5948 "Use struct sctp_assoc_value instead\n",
5949 current->comm, task_pid_nr(current));
5950 params.assoc_id = 0;
5951 } else if (len >= sizeof(struct sctp_assoc_value)) {
5952 len = sizeof(struct sctp_assoc_value);
5953 if (copy_from_user(&params, optval, sizeof(params)))
5954 return -EFAULT;
5955 } else
5956 return -EINVAL;
5957
5958 asoc = sctp_id2assoc(sk, params.assoc_id);
5959 if (!asoc && params.assoc_id && sctp_style(sk, UDP))
5960 return -EINVAL;
5961
5962 if (asoc)
5963 params.assoc_value = asoc->frag_point;
5964 else
5965 params.assoc_value = sctp_sk(sk)->user_frag;
5966
5967 if (put_user(len, optlen))
5968 return -EFAULT;
5969 if (len == sizeof(int)) {
5970 if (copy_to_user(optval, &params.assoc_value, len))
5971 return -EFAULT;
5972 } else {
5973 if (copy_to_user(optval, &params, len))
5974 return -EFAULT;
5975 }
5976
5977 return 0;
5978 }
5979
5980 /*
5981 * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE)
5982 * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave())
5983 */
5984 static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
5985 char __user *optval, int __user *optlen)
5986 {
5987 int val;
5988
5989 if (len < sizeof(int))
5990 return -EINVAL;
5991
5992 len = sizeof(int);
5993
5994 val = sctp_sk(sk)->frag_interleave;
5995 if (put_user(len, optlen))
5996 return -EFAULT;
5997 if (copy_to_user(optval, &val, len))
5998 return -EFAULT;
5999
6000 return 0;
6001 }
6002
6003 /*
6004 * 7.1.25. Set or Get the sctp partial delivery point
6005 * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point())
6006 */
6007 static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
6008 char __user *optval,
6009 int __user *optlen)
6010 {
6011 u32 val;
6012
6013 if (len < sizeof(u32))
6014 return -EINVAL;
6015
6016 len = sizeof(u32);
6017
6018 val = sctp_sk(sk)->pd_point;
6019 if (put_user(len, optlen))
6020 return -EFAULT;
6021 if (copy_to_user(optval, &val, len))
6022 return -EFAULT;
6023
6024 return 0;
6025 }
6026
6027 /*
6028 * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST)
6029 * (chapter and verse is quoted at sctp_setsockopt_maxburst())
6030 */
6031 static int sctp_getsockopt_maxburst(struct sock *sk, int len,
6032 char __user *optval,
6033 int __user *optlen)
6034 {
6035 struct sctp_assoc_value params;
6036 struct sctp_sock *sp;
6037 struct sctp_association *asoc;
6038
6039 if (len == sizeof(int)) {
6040 pr_warn_ratelimited(DEPRECATED
6041 "%s (pid %d) "
6042 "Use of int in max_burst socket option.\n"
6043 "Use struct sctp_assoc_value instead\n",
6044 current->comm, task_pid_nr(current));
6045 params.assoc_id = 0;
6046 } else if (len >= sizeof(struct sctp_assoc_value)) {
6047 len = sizeof(struct sctp_assoc_value);
6048 if (copy_from_user(&params, optval, len))
6049 return -EFAULT;
6050 } else
6051 return -EINVAL;
6052
6053 sp = sctp_sk(sk);
6054
6055 if (params.assoc_id != 0) {
6056 asoc = sctp_id2assoc(sk, params.assoc_id);
6057 if (!asoc)
6058 return -EINVAL;
6059 params.assoc_value = asoc->max_burst;
6060 } else
6061 params.assoc_value = sp->max_burst;
6062
6063 if (len == sizeof(int)) {
6064 if (copy_to_user(optval, &params.assoc_value, len))
6065 return -EFAULT;
6066 } else {
6067 if (copy_to_user(optval, &params, len))
6068 return -EFAULT;
6069 }
6070
6071 return 0;
6072
6073 }
6074
6075 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
6076 char __user *optval, int __user *optlen)
6077 {
6078 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6079 struct sctp_hmacalgo __user *p = (void __user *)optval;
6080 struct sctp_hmac_algo_param *hmacs;
6081 __u16 data_len = 0;
6082 u32 num_idents;
6083 int i;
6084
6085 if (!ep->auth_enable)
6086 return -EACCES;
6087
6088 hmacs = ep->auth_hmacs_list;
6089 data_len = ntohs(hmacs->param_hdr.length) -
6090 sizeof(struct sctp_paramhdr);
6091
6092 if (len < sizeof(struct sctp_hmacalgo) + data_len)
6093 return -EINVAL;
6094
6095 len = sizeof(struct sctp_hmacalgo) + data_len;
6096 num_idents = data_len / sizeof(u16);
6097
6098 if (put_user(len, optlen))
6099 return -EFAULT;
6100 if (put_user(num_idents, &p->shmac_num_idents))
6101 return -EFAULT;
6102 for (i = 0; i < num_idents; i++) {
6103 __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
6104
6105 if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
6106 return -EFAULT;
6107 }
6108 return 0;
6109 }
6110
6111 static int sctp_getsockopt_active_key(struct sock *sk, int len,
6112 char __user *optval, int __user *optlen)
6113 {
6114 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6115 struct sctp_authkeyid val;
6116 struct sctp_association *asoc;
6117
6118 if (!ep->auth_enable)
6119 return -EACCES;
6120
6121 if (len < sizeof(struct sctp_authkeyid))
6122 return -EINVAL;
6123 if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
6124 return -EFAULT;
6125
6126 asoc = sctp_id2assoc(sk, val.scact_assoc_id);
6127 if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
6128 return -EINVAL;
6129
6130 if (asoc)
6131 val.scact_keynumber = asoc->active_key_id;
6132 else
6133 val.scact_keynumber = ep->active_key_id;
6134
6135 len = sizeof(struct sctp_authkeyid);
6136 if (put_user(len, optlen))
6137 return -EFAULT;
6138 if (copy_to_user(optval, &val, len))
6139 return -EFAULT;
6140
6141 return 0;
6142 }
6143
6144 static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
6145 char __user *optval, int __user *optlen)
6146 {
6147 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6148 struct sctp_authchunks __user *p = (void __user *)optval;
6149 struct sctp_authchunks val;
6150 struct sctp_association *asoc;
6151 struct sctp_chunks_param *ch;
6152 u32 num_chunks = 0;
6153 char __user *to;
6154
6155 if (!ep->auth_enable)
6156 return -EACCES;
6157
6158 if (len < sizeof(struct sctp_authchunks))
6159 return -EINVAL;
6160
6161 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
6162 return -EFAULT;
6163
6164 to = p->gauth_chunks;
6165 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
6166 if (!asoc)
6167 return -EINVAL;
6168
6169 ch = asoc->peer.peer_chunks;
6170 if (!ch)
6171 goto num;
6172
6173 /* See if the user provided enough room for all the data */
6174 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
6175 if (len < num_chunks)
6176 return -EINVAL;
6177
6178 if (copy_to_user(to, ch->chunks, num_chunks))
6179 return -EFAULT;
6180 num:
6181 len = sizeof(struct sctp_authchunks) + num_chunks;
6182 if (put_user(len, optlen))
6183 return -EFAULT;
6184 if (put_user(num_chunks, &p->gauth_number_of_chunks))
6185 return -EFAULT;
6186 return 0;
6187 }
6188
6189 static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
6190 char __user *optval, int __user *optlen)
6191 {
6192 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
6193 struct sctp_authchunks __user *p = (void __user *)optval;
6194 struct sctp_authchunks val;
6195 struct sctp_association *asoc;
6196 struct sctp_chunks_param *ch;
6197 u32 num_chunks = 0;
6198 char __user *to;
6199
6200 if (!ep->auth_enable)
6201 return -EACCES;
6202
6203 if (len < sizeof(struct sctp_authchunks))
6204 return -EINVAL;
6205
6206 if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
6207 return -EFAULT;
6208
6209 to = p->gauth_chunks;
6210 asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
6211 if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP))
6212 return -EINVAL;
6213
6214 if (asoc)
6215 ch = (struct sctp_chunks_param *)asoc->c.auth_chunks;
6216 else
6217 ch = ep->auth_chunk_list;
6218
6219 if (!ch)
6220 goto num;
6221
6222 num_chunks = ntohs(ch->param_hdr.length) - sizeof(struct sctp_paramhdr);
6223 if (len < sizeof(struct sctp_authchunks) + num_chunks)
6224 return -EINVAL;
6225
6226 if (copy_to_user(to, ch->chunks, num_chunks))
6227 return -EFAULT;
6228 num:
6229 len = sizeof(struct sctp_authchunks) + num_chunks;
6230 if (put_user(len, optlen))
6231 return -EFAULT;
6232 if (put_user(num_chunks, &p->gauth_number_of_chunks))
6233 return -EFAULT;
6234
6235 return 0;
6236 }
6237
6238 /*
6239 * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER)
6240 * This option gets the current number of associations that are attached
6241 * to a one-to-many style socket. The option value is an uint32_t.
6242 */
6243 static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
6244 char __user *optval, int __user *optlen)
6245 {
6246 struct sctp_sock *sp = sctp_sk(sk);
6247 struct sctp_association *asoc;
6248 u32 val = 0;
6249
6250 if (sctp_style(sk, TCP))
6251 return -EOPNOTSUPP;
6252
6253 if (len < sizeof(u32))
6254 return -EINVAL;
6255
6256 len = sizeof(u32);
6257
6258 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
6259 val++;
6260 }
6261
6262 if (put_user(len, optlen))
6263 return -EFAULT;
6264 if (copy_to_user(optval, &val, len))
6265 return -EFAULT;
6266
6267 return 0;
6268 }
6269
6270 /*
6271 * 8.1.23 SCTP_AUTO_ASCONF
6272 * See the corresponding setsockopt entry as description
6273 */
6274 static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
6275 char __user *optval, int __user *optlen)
6276 {
6277 int val = 0;
6278
6279 if (len < sizeof(int))
6280 return -EINVAL;
6281
6282 len = sizeof(int);
6283 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
6284 val = 1;
6285 if (put_user(len, optlen))
6286 return -EFAULT;
6287 if (copy_to_user(optval, &val, len))
6288 return -EFAULT;
6289 return 0;
6290 }
6291
6292 /*
6293 * 8.2.6. Get the Current Identifiers of Associations
6294 * (SCTP_GET_ASSOC_ID_LIST)
6295 *
6296 * This option gets the current list of SCTP association identifiers of
6297 * the SCTP associations handled by a one-to-many style socket.
6298 */
6299 static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
6300 char __user *optval, int __user *optlen)
6301 {
6302 struct sctp_sock *sp = sctp_sk(sk);
6303 struct sctp_association *asoc;
6304 struct sctp_assoc_ids *ids;
6305 u32 num = 0;
6306
6307 if (sctp_style(sk, TCP))
6308 return -EOPNOTSUPP;
6309
6310 if (len < sizeof(struct sctp_assoc_ids))
6311 return -EINVAL;
6312
6313 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
6314 num++;
6315 }
6316
6317 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
6318 return -EINVAL;
6319
6320 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
6321
6322 ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
6323 if (unlikely(!ids))
6324 return -ENOMEM;
6325
6326 ids->gaids_number_of_ids = num;
6327 num = 0;
6328 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
6329 ids->gaids_assoc_id[num++] = asoc->assoc_id;
6330 }
6331
6332 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
6333 kfree(ids);
6334 return -EFAULT;
6335 }
6336
6337 kfree(ids);
6338 return 0;
6339 }
6340
6341 /*
6342 * SCTP_PEER_ADDR_THLDS
6343 *
6344 * This option allows us to fetch the partially failed threshold for one or all
6345 * transports in an association. See Section 6.1 of:
6346 * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
6347 */
6348 static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
6349 char __user *optval,
6350 int len,
6351 int __user *optlen)
6352 {
6353 struct sctp_paddrthlds val;
6354 struct sctp_transport *trans;
6355 struct sctp_association *asoc;
6356
6357 if (len < sizeof(struct sctp_paddrthlds))
6358 return -EINVAL;
6359 len = sizeof(struct sctp_paddrthlds);
6360 if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
6361 return -EFAULT;
6362
6363 if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
6364 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
6365 if (!asoc)
6366 return -ENOENT;
6367
6368 val.spt_pathpfthld = asoc->pf_retrans;
6369 val.spt_pathmaxrxt = asoc->pathmaxrxt;
6370 } else {
6371 trans = sctp_addr_id2transport(sk, &val.spt_address,
6372 val.spt_assoc_id);
6373 if (!trans)
6374 return -ENOENT;
6375
6376 val.spt_pathmaxrxt = trans->pathmaxrxt;
6377 val.spt_pathpfthld = trans->pf_retrans;
6378 }
6379
6380 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
6381 return -EFAULT;
6382
6383 return 0;
6384 }
6385
6386 /*
6387 * SCTP_GET_ASSOC_STATS
6388 *
6389 * This option retrieves local per endpoint statistics. It is modeled
6390 * after OpenSolaris' implementation
6391 */
6392 static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
6393 char __user *optval,
6394 int __user *optlen)
6395 {
6396 struct sctp_assoc_stats sas;
6397 struct sctp_association *asoc = NULL;
6398
6399 /* User must provide at least the assoc id */
6400 if (len < sizeof(sctp_assoc_t))
6401 return -EINVAL;
6402
6403 /* Allow the struct to grow and fill in as much as possible */
6404 len = min_t(size_t, len, sizeof(sas));
6405
6406 if (copy_from_user(&sas, optval, len))
6407 return -EFAULT;
6408
6409 asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
6410 if (!asoc)
6411 return -EINVAL;
6412
6413 sas.sas_rtxchunks = asoc->stats.rtxchunks;
6414 sas.sas_gapcnt = asoc->stats.gapcnt;
6415 sas.sas_outofseqtsns = asoc->stats.outofseqtsns;
6416 sas.sas_osacks = asoc->stats.osacks;
6417 sas.sas_isacks = asoc->stats.isacks;
6418 sas.sas_octrlchunks = asoc->stats.octrlchunks;
6419 sas.sas_ictrlchunks = asoc->stats.ictrlchunks;
6420 sas.sas_oodchunks = asoc->stats.oodchunks;
6421 sas.sas_iodchunks = asoc->stats.iodchunks;
6422 sas.sas_ouodchunks = asoc->stats.ouodchunks;
6423 sas.sas_iuodchunks = asoc->stats.iuodchunks;
6424 sas.sas_idupchunks = asoc->stats.idupchunks;
6425 sas.sas_opackets = asoc->stats.opackets;
6426 sas.sas_ipackets = asoc->stats.ipackets;
6427
6428 /* New high max rto observed, will return 0 if not a single
6429 * RTO update took place. obs_rto_ipaddr will be bogus
6430 * in such a case
6431 */
6432 sas.sas_maxrto = asoc->stats.max_obs_rto;
6433 memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr,
6434 sizeof(struct sockaddr_storage));
6435
6436 /* Mark beginning of a new observation period */
6437 asoc->stats.max_obs_rto = asoc->rto_min;
6438
6439 if (put_user(len, optlen))
6440 return -EFAULT;
6441
6442 pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id);
6443
6444 if (copy_to_user(optval, &sas, len))
6445 return -EFAULT;
6446
6447 return 0;
6448 }
6449
6450 static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len,
6451 char __user *optval,
6452 int __user *optlen)
6453 {
6454 int val = 0;
6455
6456 if (len < sizeof(int))
6457 return -EINVAL;
6458
6459 len = sizeof(int);
6460 if (sctp_sk(sk)->recvrcvinfo)
6461 val = 1;
6462 if (put_user(len, optlen))
6463 return -EFAULT;
6464 if (copy_to_user(optval, &val, len))
6465 return -EFAULT;
6466
6467 return 0;
6468 }
6469
6470 static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len,
6471 char __user *optval,
6472 int __user *optlen)
6473 {
6474 int val = 0;
6475
6476 if (len < sizeof(int))
6477 return -EINVAL;
6478
6479 len = sizeof(int);
6480 if (sctp_sk(sk)->recvnxtinfo)
6481 val = 1;
6482 if (put_user(len, optlen))
6483 return -EFAULT;
6484 if (copy_to_user(optval, &val, len))
6485 return -EFAULT;
6486
6487 return 0;
6488 }
6489
6490 static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
6491 char __user *optval,
6492 int __user *optlen)
6493 {
6494 struct sctp_assoc_value params;
6495 struct sctp_association *asoc;
6496 int retval = -EFAULT;
6497
6498 if (len < sizeof(params)) {
6499 retval = -EINVAL;
6500 goto out;
6501 }
6502
6503 len = sizeof(params);
6504 if (copy_from_user(&params, optval, len))
6505 goto out;
6506
6507 asoc = sctp_id2assoc(sk, params.assoc_id);
6508 if (asoc) {
6509 params.assoc_value = asoc->prsctp_enable;
6510 } else if (!params.assoc_id) {
6511 struct sctp_sock *sp = sctp_sk(sk);
6512
6513 params.assoc_value = sp->ep->prsctp_enable;
6514 } else {
6515 retval = -EINVAL;
6516 goto out;
6517 }
6518
6519 if (put_user(len, optlen))
6520 goto out;
6521
6522 if (copy_to_user(optval, &params, len))
6523 goto out;
6524
6525 retval = 0;
6526
6527 out:
6528 return retval;
6529 }
6530
6531 static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
6532 char __user *optval,
6533 int __user *optlen)
6534 {
6535 struct sctp_default_prinfo info;
6536 struct sctp_association *asoc;
6537 int retval = -EFAULT;
6538
6539 if (len < sizeof(info)) {
6540 retval = -EINVAL;
6541 goto out;
6542 }
6543
6544 len = sizeof(info);
6545 if (copy_from_user(&info, optval, len))
6546 goto out;
6547
6548 asoc = sctp_id2assoc(sk, info.pr_assoc_id);
6549 if (asoc) {
6550 info.pr_policy = SCTP_PR_POLICY(asoc->default_flags);
6551 info.pr_value = asoc->default_timetolive;
6552 } else if (!info.pr_assoc_id) {
6553 struct sctp_sock *sp = sctp_sk(sk);
6554
6555 info.pr_policy = SCTP_PR_POLICY(sp->default_flags);
6556 info.pr_value = sp->default_timetolive;
6557 } else {
6558 retval = -EINVAL;
6559 goto out;
6560 }
6561
6562 if (put_user(len, optlen))
6563 goto out;
6564
6565 if (copy_to_user(optval, &info, len))
6566 goto out;
6567
6568 retval = 0;
6569
6570 out:
6571 return retval;
6572 }
6573
6574 static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
6575 char __user *optval,
6576 int __user *optlen)
6577 {
6578 struct sctp_prstatus params;
6579 struct sctp_association *asoc;
6580 int policy;
6581 int retval = -EINVAL;
6582
6583 if (len < sizeof(params))
6584 goto out;
6585
6586 len = sizeof(params);
6587 if (copy_from_user(&params, optval, len)) {
6588 retval = -EFAULT;
6589 goto out;
6590 }
6591
6592 policy = params.sprstat_policy;
6593 if (policy & ~SCTP_PR_SCTP_MASK)
6594 goto out;
6595
6596 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
6597 if (!asoc)
6598 goto out;
6599
6600 if (policy == SCTP_PR_SCTP_NONE) {
6601 params.sprstat_abandoned_unsent = 0;
6602 params.sprstat_abandoned_sent = 0;
6603 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
6604 params.sprstat_abandoned_unsent +=
6605 asoc->abandoned_unsent[policy];
6606 params.sprstat_abandoned_sent +=
6607 asoc->abandoned_sent[policy];
6608 }
6609 } else {
6610 params.sprstat_abandoned_unsent =
6611 asoc->abandoned_unsent[__SCTP_PR_INDEX(policy)];
6612 params.sprstat_abandoned_sent =
6613 asoc->abandoned_sent[__SCTP_PR_INDEX(policy)];
6614 }
6615
6616 if (put_user(len, optlen)) {
6617 retval = -EFAULT;
6618 goto out;
6619 }
6620
6621 if (copy_to_user(optval, &params, len)) {
6622 retval = -EFAULT;
6623 goto out;
6624 }
6625
6626 retval = 0;
6627
6628 out:
6629 return retval;
6630 }
6631
6632 static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
6633 char __user *optval,
6634 int __user *optlen)
6635 {
6636 struct sctp_stream_out *streamout;
6637 struct sctp_association *asoc;
6638 struct sctp_prstatus params;
6639 int retval = -EINVAL;
6640 int policy;
6641
6642 if (len < sizeof(params))
6643 goto out;
6644
6645 len = sizeof(params);
6646 if (copy_from_user(&params, optval, len)) {
6647 retval = -EFAULT;
6648 goto out;
6649 }
6650
6651 policy = params.sprstat_policy;
6652 if (policy & ~SCTP_PR_SCTP_MASK)
6653 goto out;
6654
6655 asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
6656 if (!asoc || params.sprstat_sid >= asoc->stream.outcnt)
6657 goto out;
6658
6659 streamout = &asoc->stream.out[params.sprstat_sid];
6660 if (policy == SCTP_PR_SCTP_NONE) {
6661 params.sprstat_abandoned_unsent = 0;
6662 params.sprstat_abandoned_sent = 0;
6663 for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
6664 params.sprstat_abandoned_unsent +=
6665 streamout->abandoned_unsent[policy];
6666 params.sprstat_abandoned_sent +=
6667 streamout->abandoned_sent[policy];
6668 }
6669 } else {
6670 params.sprstat_abandoned_unsent =
6671 streamout->abandoned_unsent[__SCTP_PR_INDEX(policy)];
6672 params.sprstat_abandoned_sent =
6673 streamout->abandoned_sent[__SCTP_PR_INDEX(policy)];
6674 }
6675
6676 if (put_user(len, optlen) || copy_to_user(optval, &params, len)) {
6677 retval = -EFAULT;
6678 goto out;
6679 }
6680
6681 retval = 0;
6682
6683 out:
6684 return retval;
6685 }
6686
6687 static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
6688 char __user *optval,
6689 int __user *optlen)
6690 {
6691 struct sctp_assoc_value params;
6692 struct sctp_association *asoc;
6693 int retval = -EFAULT;
6694
6695 if (len < sizeof(params)) {
6696 retval = -EINVAL;
6697 goto out;
6698 }
6699
6700 len = sizeof(params);
6701 if (copy_from_user(&params, optval, len))
6702 goto out;
6703
6704 asoc = sctp_id2assoc(sk, params.assoc_id);
6705 if (asoc) {
6706 params.assoc_value = asoc->reconf_enable;
6707 } else if (!params.assoc_id) {
6708 struct sctp_sock *sp = sctp_sk(sk);
6709
6710 params.assoc_value = sp->ep->reconf_enable;
6711 } else {
6712 retval = -EINVAL;
6713 goto out;
6714 }
6715
6716 if (put_user(len, optlen))
6717 goto out;
6718
6719 if (copy_to_user(optval, &params, len))
6720 goto out;
6721
6722 retval = 0;
6723
6724 out:
6725 return retval;
6726 }
6727
6728 static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
6729 char __user *optval,
6730 int __user *optlen)
6731 {
6732 struct sctp_assoc_value params;
6733 struct sctp_association *asoc;
6734 int retval = -EFAULT;
6735
6736 if (len < sizeof(params)) {
6737 retval = -EINVAL;
6738 goto out;
6739 }
6740
6741 len = sizeof(params);
6742 if (copy_from_user(&params, optval, len))
6743 goto out;
6744
6745 asoc = sctp_id2assoc(sk, params.assoc_id);
6746 if (asoc) {
6747 params.assoc_value = asoc->strreset_enable;
6748 } else if (!params.assoc_id) {
6749 struct sctp_sock *sp = sctp_sk(sk);
6750
6751 params.assoc_value = sp->ep->strreset_enable;
6752 } else {
6753 retval = -EINVAL;
6754 goto out;
6755 }
6756
6757 if (put_user(len, optlen))
6758 goto out;
6759
6760 if (copy_to_user(optval, &params, len))
6761 goto out;
6762
6763 retval = 0;
6764
6765 out:
6766 return retval;
6767 }
6768
6769 static int sctp_getsockopt(struct sock *sk, int level, int optname,
6770 char __user *optval, int __user *optlen)
6771 {
6772 int retval = 0;
6773 int len;
6774
6775 pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
6776
6777 /* I can hardly begin to describe how wrong this is. This is
6778 * so broken as to be worse than useless. The API draft
6779 * REALLY is NOT helpful here... I am not convinced that the
6780 * semantics of getsockopt() with a level OTHER THAN SOL_SCTP
6781 * are at all well-founded.
6782 */
6783 if (level != SOL_SCTP) {
6784 struct sctp_af *af = sctp_sk(sk)->pf->af;
6785
6786 retval = af->getsockopt(sk, level, optname, optval, optlen);
6787 return retval;
6788 }
6789
6790 if (get_user(len, optlen))
6791 return -EFAULT;
6792
6793 if (len < 0)
6794 return -EINVAL;
6795
6796 lock_sock(sk);
6797
6798 switch (optname) {
6799 case SCTP_STATUS:
6800 retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
6801 break;
6802 case SCTP_DISABLE_FRAGMENTS:
6803 retval = sctp_getsockopt_disable_fragments(sk, len, optval,
6804 optlen);
6805 break;
6806 case SCTP_EVENTS:
6807 retval = sctp_getsockopt_events(sk, len, optval, optlen);
6808 break;
6809 case SCTP_AUTOCLOSE:
6810 retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
6811 break;
6812 case SCTP_SOCKOPT_PEELOFF:
6813 retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
6814 break;
6815 case SCTP_SOCKOPT_PEELOFF_FLAGS:
6816 retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
6817 break;
6818 case SCTP_PEER_ADDR_PARAMS:
6819 retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
6820 optlen);
6821 break;
6822 case SCTP_DELAYED_SACK:
6823 retval = sctp_getsockopt_delayed_ack(sk, len, optval,
6824 optlen);
6825 break;
6826 case SCTP_INITMSG:
6827 retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
6828 break;
6829 case SCTP_GET_PEER_ADDRS:
6830 retval = sctp_getsockopt_peer_addrs(sk, len, optval,
6831 optlen);
6832 break;
6833 case SCTP_GET_LOCAL_ADDRS:
6834 retval = sctp_getsockopt_local_addrs(sk, len, optval,
6835 optlen);
6836 break;
6837 case SCTP_SOCKOPT_CONNECTX3:
6838 retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
6839 break;
6840 case SCTP_DEFAULT_SEND_PARAM:
6841 retval = sctp_getsockopt_default_send_param(sk, len,
6842 optval, optlen);
6843 break;
6844 case SCTP_DEFAULT_SNDINFO:
6845 retval = sctp_getsockopt_default_sndinfo(sk, len,
6846 optval, optlen);
6847 break;
6848 case SCTP_PRIMARY_ADDR:
6849 retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
6850 break;
6851 case SCTP_NODELAY:
6852 retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
6853 break;
6854 case SCTP_RTOINFO:
6855 retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
6856 break;
6857 case SCTP_ASSOCINFO:
6858 retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
6859 break;
6860 case SCTP_I_WANT_MAPPED_V4_ADDR:
6861 retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
6862 break;
6863 case SCTP_MAXSEG:
6864 retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
6865 break;
6866 case SCTP_GET_PEER_ADDR_INFO:
6867 retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
6868 optlen);
6869 break;
6870 case SCTP_ADAPTATION_LAYER:
6871 retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
6872 optlen);
6873 break;
6874 case SCTP_CONTEXT:
6875 retval = sctp_getsockopt_context(sk, len, optval, optlen);
6876 break;
6877 case SCTP_FRAGMENT_INTERLEAVE:
6878 retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
6879 optlen);
6880 break;
6881 case SCTP_PARTIAL_DELIVERY_POINT:
6882 retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
6883 optlen);
6884 break;
6885 case SCTP_MAX_BURST:
6886 retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
6887 break;
6888 case SCTP_AUTH_KEY:
6889 case SCTP_AUTH_CHUNK:
6890 case SCTP_AUTH_DELETE_KEY:
6891 retval = -EOPNOTSUPP;
6892 break;
6893 case SCTP_HMAC_IDENT:
6894 retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
6895 break;
6896 case SCTP_AUTH_ACTIVE_KEY:
6897 retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
6898 break;
6899 case SCTP_PEER_AUTH_CHUNKS:
6900 retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
6901 optlen);
6902 break;
6903 case SCTP_LOCAL_AUTH_CHUNKS:
6904 retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
6905 optlen);
6906 break;
6907 case SCTP_GET_ASSOC_NUMBER:
6908 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
6909 break;
6910 case SCTP_GET_ASSOC_ID_LIST:
6911 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
6912 break;
6913 case SCTP_AUTO_ASCONF:
6914 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
6915 break;
6916 case SCTP_PEER_ADDR_THLDS:
6917 retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
6918 break;
6919 case SCTP_GET_ASSOC_STATS:
6920 retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
6921 break;
6922 case SCTP_RECVRCVINFO:
6923 retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
6924 break;
6925 case SCTP_RECVNXTINFO:
6926 retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
6927 break;
6928 case SCTP_PR_SUPPORTED:
6929 retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
6930 break;
6931 case SCTP_DEFAULT_PRINFO:
6932 retval = sctp_getsockopt_default_prinfo(sk, len, optval,
6933 optlen);
6934 break;
6935 case SCTP_PR_ASSOC_STATUS:
6936 retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
6937 optlen);
6938 break;
6939 case SCTP_PR_STREAM_STATUS:
6940 retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
6941 optlen);
6942 break;
6943 case SCTP_RECONFIG_SUPPORTED:
6944 retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
6945 optlen);
6946 break;
6947 case SCTP_ENABLE_STREAM_RESET:
6948 retval = sctp_getsockopt_enable_strreset(sk, len, optval,
6949 optlen);
6950 break;
6951 default:
6952 retval = -ENOPROTOOPT;
6953 break;
6954 }
6955
6956 release_sock(sk);
6957 return retval;
6958 }
6959
6960 static int sctp_hash(struct sock *sk)
6961 {
6962 /* STUB */
6963 return 0;
6964 }
6965
6966 static void sctp_unhash(struct sock *sk)
6967 {
6968 /* STUB */
6969 }
6970
6971 /* Check if port is acceptable. Possibly find first available port.
6972 *
6973 * The port hash table (contained in the 'global' SCTP protocol storage
6974 * returned by struct sctp_protocol *sctp_get_protocol()). The hash
6975 * table is an array of 4096 lists (sctp_bind_hashbucket). Each
6976 * list (the list number is the port number hashed out, so as you
6977 * would expect from a hash function, all the ports in a given list have
6978 * such a number that hashes out to the same list number; you were
6979 * expecting that, right?); so each list has a set of ports, with a
6980 * link to the socket (struct sock) that uses it, the port number and
6981 * a fastreuse flag (FIXME: NPI ipg).
6982 */
6983 static struct sctp_bind_bucket *sctp_bucket_create(
6984 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
6985
6986 static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
6987 {
6988 struct sctp_bind_hashbucket *head; /* hash list */
6989 struct sctp_bind_bucket *pp;
6990 unsigned short snum;
6991 int ret;
6992
6993 snum = ntohs(addr->v4.sin_port);
6994
6995 pr_debug("%s: begins, snum:%d\n", __func__, snum);
6996
6997 local_bh_disable();
6998
6999 if (snum == 0) {
7000 /* Search for an available port. */
7001 int low, high, remaining, index;
7002 unsigned int rover;
7003 struct net *net = sock_net(sk);
7004
7005 inet_get_local_port_range(net, &low, &high);
7006 remaining = (high - low) + 1;
7007 rover = prandom_u32() % remaining + low;
7008
7009 do {
7010 rover++;
7011 if ((rover < low) || (rover > high))
7012 rover = low;
7013 if (inet_is_local_reserved_port(net, rover))
7014 continue;
7015 index = sctp_phashfn(sock_net(sk), rover);
7016 head = &sctp_port_hashtable[index];
7017 spin_lock(&head->lock);
7018 sctp_for_each_hentry(pp, &head->chain)
7019 if ((pp->port == rover) &&
7020 net_eq(sock_net(sk), pp->net))
7021 goto next;
7022 break;
7023 next:
7024 spin_unlock(&head->lock);
7025 } while (--remaining > 0);
7026
7027 /* Exhausted local port range during search? */
7028 ret = 1;
7029 if (remaining <= 0)
7030 goto fail;
7031
7032 /* OK, here is the one we will use. HEAD (the port
7033 * hash table list entry) is non-NULL and we hold it's
7034 * mutex.
7035 */
7036 snum = rover;
7037 } else {
7038 /* We are given an specific port number; we verify
7039 * that it is not being used. If it is used, we will
7040 * exahust the search in the hash list corresponding
7041 * to the port number (snum) - we detect that with the
7042 * port iterator, pp being NULL.
7043 */
7044 head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
7045 spin_lock(&head->lock);
7046 sctp_for_each_hentry(pp, &head->chain) {
7047 if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
7048 goto pp_found;
7049 }
7050 }
7051 pp = NULL;
7052 goto pp_not_found;
7053 pp_found:
7054 if (!hlist_empty(&pp->owner)) {
7055 /* We had a port hash table hit - there is an
7056 * available port (pp != NULL) and it is being
7057 * used by other socket (pp->owner not empty); that other
7058 * socket is going to be sk2.
7059 */
7060 int reuse = sk->sk_reuse;
7061 struct sock *sk2;
7062
7063 pr_debug("%s: found a possible match\n", __func__);
7064
7065 if (pp->fastreuse && sk->sk_reuse &&
7066 sk->sk_state != SCTP_SS_LISTENING)
7067 goto success;
7068
7069 /* Run through the list of sockets bound to the port
7070 * (pp->port) [via the pointers bind_next and
7071 * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one,
7072 * we get the endpoint they describe and run through
7073 * the endpoint's list of IP (v4 or v6) addresses,
7074 * comparing each of the addresses with the address of
7075 * the socket sk. If we find a match, then that means
7076 * that this port/socket (sk) combination are already
7077 * in an endpoint.
7078 */
7079 sk_for_each_bound(sk2, &pp->owner) {
7080 struct sctp_endpoint *ep2;
7081 ep2 = sctp_sk(sk2)->ep;
7082
7083 if (sk == sk2 ||
7084 (reuse && sk2->sk_reuse &&
7085 sk2->sk_state != SCTP_SS_LISTENING))
7086 continue;
7087
7088 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
7089 sctp_sk(sk2), sctp_sk(sk))) {
7090 ret = (long)sk2;
7091 goto fail_unlock;
7092 }
7093 }
7094
7095 pr_debug("%s: found a match\n", __func__);
7096 }
7097 pp_not_found:
7098 /* If there was a hash table miss, create a new port. */
7099 ret = 1;
7100 if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
7101 goto fail_unlock;
7102
7103 /* In either case (hit or miss), make sure fastreuse is 1 only
7104 * if sk->sk_reuse is too (that is, if the caller requested
7105 * SO_REUSEADDR on this socket -sk-).
7106 */
7107 if (hlist_empty(&pp->owner)) {
7108 if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING)
7109 pp->fastreuse = 1;
7110 else
7111 pp->fastreuse = 0;
7112 } else if (pp->fastreuse &&
7113 (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING))
7114 pp->fastreuse = 0;
7115
7116 /* We are set, so fill up all the data in the hash table
7117 * entry, tie the socket list information with the rest of the
7118 * sockets FIXME: Blurry, NPI (ipg).
7119 */
7120 success:
7121 if (!sctp_sk(sk)->bind_hash) {
7122 inet_sk(sk)->inet_num = snum;
7123 sk_add_bind_node(sk, &pp->owner);
7124 sctp_sk(sk)->bind_hash = pp;
7125 }
7126 ret = 0;
7127
7128 fail_unlock:
7129 spin_unlock(&head->lock);
7130
7131 fail:
7132 local_bh_enable();
7133 return ret;
7134 }
7135
7136 /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral
7137 * port is requested.
7138 */
7139 static int sctp_get_port(struct sock *sk, unsigned short snum)
7140 {
7141 union sctp_addr addr;
7142 struct sctp_af *af = sctp_sk(sk)->pf->af;
7143
7144 /* Set up a dummy address struct from the sk. */
7145 af->from_sk(&addr, sk);
7146 addr.v4.sin_port = htons(snum);
7147
7148 /* Note: sk->sk_num gets filled in if ephemeral port request. */
7149 return !!sctp_get_port_local(sk, &addr);
7150 }
7151
7152 /*
7153 * Move a socket to LISTENING state.
7154 */
7155 static int sctp_listen_start(struct sock *sk, int backlog)
7156 {
7157 struct sctp_sock *sp = sctp_sk(sk);
7158 struct sctp_endpoint *ep = sp->ep;
7159 struct crypto_shash *tfm = NULL;
7160 char alg[32];
7161
7162 /* Allocate HMAC for generating cookie. */
7163 if (!sp->hmac && sp->sctp_hmac_alg) {
7164 sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
7165 tfm = crypto_alloc_shash(alg, 0, 0);
7166 if (IS_ERR(tfm)) {
7167 net_info_ratelimited("failed to load transform for %s: %ld\n",
7168 sp->sctp_hmac_alg, PTR_ERR(tfm));
7169 return -ENOSYS;
7170 }
7171 sctp_sk(sk)->hmac = tfm;
7172 }
7173
7174 /*
7175 * If a bind() or sctp_bindx() is not called prior to a listen()
7176 * call that allows new associations to be accepted, the system
7177 * picks an ephemeral port and will choose an address set equivalent
7178 * to binding with a wildcard address.
7179 *
7180 * This is not currently spelled out in the SCTP sockets
7181 * extensions draft, but follows the practice as seen in TCP
7182 * sockets.
7183 *
7184 */
7185 sk->sk_state = SCTP_SS_LISTENING;
7186 if (!ep->base.bind_addr.port) {
7187 if (sctp_autobind(sk))
7188 return -EAGAIN;
7189 } else {
7190 if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
7191 sk->sk_state = SCTP_SS_CLOSED;
7192 return -EADDRINUSE;
7193 }
7194 }
7195
7196 sk->sk_max_ack_backlog = backlog;
7197 sctp_hash_endpoint(ep);
7198 return 0;
7199 }
7200
7201 /*
7202 * 4.1.3 / 5.1.3 listen()
7203 *
7204 * By default, new associations are not accepted for UDP style sockets.
7205 * An application uses listen() to mark a socket as being able to
7206 * accept new associations.
7207 *
7208 * On TCP style sockets, applications use listen() to ready the SCTP
7209 * endpoint for accepting inbound associations.
7210 *
7211 * On both types of endpoints a backlog of '0' disables listening.
7212 *
7213 * Move a socket to LISTENING state.
7214 */
7215 int sctp_inet_listen(struct socket *sock, int backlog)
7216 {
7217 struct sock *sk = sock->sk;
7218 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
7219 int err = -EINVAL;
7220
7221 if (unlikely(backlog < 0))
7222 return err;
7223
7224 lock_sock(sk);
7225
7226 /* Peeled-off sockets are not allowed to listen(). */
7227 if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
7228 goto out;
7229
7230 if (sock->state != SS_UNCONNECTED)
7231 goto out;
7232
7233 if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
7234 goto out;
7235
7236 /* If backlog is zero, disable listening. */
7237 if (!backlog) {
7238 if (sctp_sstate(sk, CLOSED))
7239 goto out;
7240
7241 err = 0;
7242 sctp_unhash_endpoint(ep);
7243 sk->sk_state = SCTP_SS_CLOSED;
7244 if (sk->sk_reuse)
7245 sctp_sk(sk)->bind_hash->fastreuse = 1;
7246 goto out;
7247 }
7248
7249 /* If we are already listening, just update the backlog */
7250 if (sctp_sstate(sk, LISTENING))
7251 sk->sk_max_ack_backlog = backlog;
7252 else {
7253 err = sctp_listen_start(sk, backlog);
7254 if (err)
7255 goto out;
7256 }
7257
7258 err = 0;
7259 out:
7260 release_sock(sk);
7261 return err;
7262 }
7263
7264 /*
7265 * This function is done by modeling the current datagram_poll() and the
7266 * tcp_poll(). Note that, based on these implementations, we don't
7267 * lock the socket in this function, even though it seems that,
7268 * ideally, locking or some other mechanisms can be used to ensure
7269 * the integrity of the counters (sndbuf and wmem_alloc) used
7270 * in this place. We assume that we don't need locks either until proven
7271 * otherwise.
7272 *
7273 * Another thing to note is that we include the Async I/O support
7274 * here, again, by modeling the current TCP/UDP code. We don't have
7275 * a good way to test with it yet.
7276 */
7277 unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
7278 {
7279 struct sock *sk = sock->sk;
7280 struct sctp_sock *sp = sctp_sk(sk);
7281 unsigned int mask;
7282
7283 poll_wait(file, sk_sleep(sk), wait);
7284
7285 sock_rps_record_flow(sk);
7286
7287 /* A TCP-style listening socket becomes readable when the accept queue
7288 * is not empty.
7289 */
7290 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
7291 return (!list_empty(&sp->ep->asocs)) ?
7292 (POLLIN | POLLRDNORM) : 0;
7293
7294 mask = 0;
7295
7296 /* Is there any exceptional events? */
7297 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
7298 mask |= POLLERR |
7299 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
7300 if (sk->sk_shutdown & RCV_SHUTDOWN)
7301 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
7302 if (sk->sk_shutdown == SHUTDOWN_MASK)
7303 mask |= POLLHUP;
7304
7305 /* Is it readable? Reconsider this code with TCP-style support. */
7306 if (!skb_queue_empty(&sk->sk_receive_queue))
7307 mask |= POLLIN | POLLRDNORM;
7308
7309 /* The association is either gone or not ready. */
7310 if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
7311 return mask;
7312
7313 /* Is it writable? */
7314 if (sctp_writeable(sk)) {
7315 mask |= POLLOUT | POLLWRNORM;
7316 } else {
7317 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
7318 /*
7319 * Since the socket is not locked, the buffer
7320 * might be made available after the writeable check and
7321 * before the bit is set. This could cause a lost I/O
7322 * signal. tcp_poll() has a race breaker for this race
7323 * condition. Based on their implementation, we put
7324 * in the following code to cover it as well.
7325 */
7326 if (sctp_writeable(sk))
7327 mask |= POLLOUT | POLLWRNORM;
7328 }
7329 return mask;
7330 }
7331
7332 /********************************************************************
7333 * 2nd Level Abstractions
7334 ********************************************************************/
7335
7336 static struct sctp_bind_bucket *sctp_bucket_create(
7337 struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum)
7338 {
7339 struct sctp_bind_bucket *pp;
7340
7341 pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
7342 if (pp) {
7343 SCTP_DBG_OBJCNT_INC(bind_bucket);
7344 pp->port = snum;
7345 pp->fastreuse = 0;
7346 INIT_HLIST_HEAD(&pp->owner);
7347 pp->net = net;
7348 hlist_add_head(&pp->node, &head->chain);
7349 }
7350 return pp;
7351 }
7352
7353 /* Caller must hold hashbucket lock for this tb with local BH disabled */
7354 static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
7355 {
7356 if (pp && hlist_empty(&pp->owner)) {
7357 __hlist_del(&pp->node);
7358 kmem_cache_free(sctp_bucket_cachep, pp);
7359 SCTP_DBG_OBJCNT_DEC(bind_bucket);
7360 }
7361 }
7362
7363 /* Release this socket's reference to a local port. */
7364 static inline void __sctp_put_port(struct sock *sk)
7365 {
7366 struct sctp_bind_hashbucket *head =
7367 &sctp_port_hashtable[sctp_phashfn(sock_net(sk),
7368 inet_sk(sk)->inet_num)];
7369 struct sctp_bind_bucket *pp;
7370
7371 spin_lock(&head->lock);
7372 pp = sctp_sk(sk)->bind_hash;
7373 __sk_del_bind_node(sk);
7374 sctp_sk(sk)->bind_hash = NULL;
7375 inet_sk(sk)->inet_num = 0;
7376 sctp_bucket_destroy(pp);
7377 spin_unlock(&head->lock);
7378 }
7379
7380 void sctp_put_port(struct sock *sk)
7381 {
7382 local_bh_disable();
7383 __sctp_put_port(sk);
7384 local_bh_enable();
7385 }
7386
7387 /*
7388 * The system picks an ephemeral port and choose an address set equivalent
7389 * to binding with a wildcard address.
7390 * One of those addresses will be the primary address for the association.
7391 * This automatically enables the multihoming capability of SCTP.
7392 */
7393 static int sctp_autobind(struct sock *sk)
7394 {
7395 union sctp_addr autoaddr;
7396 struct sctp_af *af;
7397 __be16 port;
7398
7399 /* Initialize a local sockaddr structure to INADDR_ANY. */
7400 af = sctp_sk(sk)->pf->af;
7401
7402 port = htons(inet_sk(sk)->inet_num);
7403 af->inaddr_any(&autoaddr, port);
7404
7405 return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
7406 }
7407
7408 /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation.
7409 *
7410 * From RFC 2292
7411 * 4.2 The cmsghdr Structure *
7412 *
7413 * When ancillary data is sent or received, any number of ancillary data
7414 * objects can be specified by the msg_control and msg_controllen members of
7415 * the msghdr structure, because each object is preceded by
7416 * a cmsghdr structure defining the object's length (the cmsg_len member).
7417 * Historically Berkeley-derived implementations have passed only one object
7418 * at a time, but this API allows multiple objects to be
7419 * passed in a single call to sendmsg() or recvmsg(). The following example
7420 * shows two ancillary data objects in a control buffer.
7421 *
7422 * |<--------------------------- msg_controllen -------------------------->|
7423 * | |
7424 *
7425 * |<----- ancillary data object ----->|<----- ancillary data object ----->|
7426 *
7427 * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->|
7428 * | | |
7429 *
7430 * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| |
7431 *
7432 * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| |
7433 * | | | | |
7434 *
7435 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
7436 * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX|
7437 *
7438 * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX|
7439 *
7440 * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+
7441 * ^
7442 * |
7443 *
7444 * msg_control
7445 * points here
7446 */
7447 static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
7448 {
7449 struct cmsghdr *cmsg;
7450 struct msghdr *my_msg = (struct msghdr *)msg;
7451
7452 for_each_cmsghdr(cmsg, my_msg) {
7453 if (!CMSG_OK(my_msg, cmsg))
7454 return -EINVAL;
7455
7456 /* Should we parse this header or ignore? */
7457 if (cmsg->cmsg_level != IPPROTO_SCTP)
7458 continue;
7459
7460 /* Strictly check lengths following example in SCM code. */
7461 switch (cmsg->cmsg_type) {
7462 case SCTP_INIT:
7463 /* SCTP Socket API Extension
7464 * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
7465 *
7466 * This cmsghdr structure provides information for
7467 * initializing new SCTP associations with sendmsg().
7468 * The SCTP_INITMSG socket option uses this same data
7469 * structure. This structure is not used for
7470 * recvmsg().
7471 *
7472 * cmsg_level cmsg_type cmsg_data[]
7473 * ------------ ------------ ----------------------
7474 * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg
7475 */
7476 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
7477 return -EINVAL;
7478
7479 cmsgs->init = CMSG_DATA(cmsg);
7480 break;
7481
7482 case SCTP_SNDRCV:
7483 /* SCTP Socket API Extension
7484 * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
7485 *
7486 * This cmsghdr structure specifies SCTP options for
7487 * sendmsg() and describes SCTP header information
7488 * about a received message through recvmsg().
7489 *
7490 * cmsg_level cmsg_type cmsg_data[]
7491 * ------------ ------------ ----------------------
7492 * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo
7493 */
7494 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
7495 return -EINVAL;
7496
7497 cmsgs->srinfo = CMSG_DATA(cmsg);
7498
7499 if (cmsgs->srinfo->sinfo_flags &
7500 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
7501 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK |
7502 SCTP_ABORT | SCTP_EOF))
7503 return -EINVAL;
7504 break;
7505
7506 case SCTP_SNDINFO:
7507 /* SCTP Socket API Extension
7508 * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
7509 *
7510 * This cmsghdr structure specifies SCTP options for
7511 * sendmsg(). This structure and SCTP_RCVINFO replaces
7512 * SCTP_SNDRCV which has been deprecated.
7513 *
7514 * cmsg_level cmsg_type cmsg_data[]
7515 * ------------ ------------ ---------------------
7516 * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo
7517 */
7518 if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
7519 return -EINVAL;
7520
7521 cmsgs->sinfo = CMSG_DATA(cmsg);
7522
7523 if (cmsgs->sinfo->snd_flags &
7524 ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
7525 SCTP_SACK_IMMEDIATELY | SCTP_PR_SCTP_MASK |
7526 SCTP_ABORT | SCTP_EOF))
7527 return -EINVAL;
7528 break;
7529 default:
7530 return -EINVAL;
7531 }
7532 }
7533
7534 return 0;
7535 }
7536
7537 /*
7538 * Wait for a packet..
7539 * Note: This function is the same function as in core/datagram.c
7540 * with a few modifications to make lksctp work.
7541 */
7542 static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
7543 {
7544 int error;
7545 DEFINE_WAIT(wait);
7546
7547 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
7548
7549 /* Socket errors? */
7550 error = sock_error(sk);
7551 if (error)
7552 goto out;
7553
7554 if (!skb_queue_empty(&sk->sk_receive_queue))
7555 goto ready;
7556
7557 /* Socket shut down? */
7558 if (sk->sk_shutdown & RCV_SHUTDOWN)
7559 goto out;
7560
7561 /* Sequenced packets can come disconnected. If so we report the
7562 * problem.
7563 */
7564 error = -ENOTCONN;
7565
7566 /* Is there a good reason to think that we may receive some data? */
7567 if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
7568 goto out;
7569
7570 /* Handle signals. */
7571 if (signal_pending(current))
7572 goto interrupted;
7573
7574 /* Let another process have a go. Since we are going to sleep
7575 * anyway. Note: This may cause odd behaviors if the message
7576 * does not fit in the user's buffer, but this seems to be the
7577 * only way to honor MSG_DONTWAIT realistically.
7578 */
7579 release_sock(sk);
7580 *timeo_p = schedule_timeout(*timeo_p);
7581 lock_sock(sk);
7582
7583 ready:
7584 finish_wait(sk_sleep(sk), &wait);
7585 return 0;
7586
7587 interrupted:
7588 error = sock_intr_errno(*timeo_p);
7589
7590 out:
7591 finish_wait(sk_sleep(sk), &wait);
7592 *err = error;
7593 return error;
7594 }
7595
7596 /* Receive a datagram.
7597 * Note: This is pretty much the same routine as in core/datagram.c
7598 * with a few changes to make lksctp work.
7599 */
7600 struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
7601 int noblock, int *err)
7602 {
7603 int error;
7604 struct sk_buff *skb;
7605 long timeo;
7606
7607 timeo = sock_rcvtimeo(sk, noblock);
7608
7609 pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo,
7610 MAX_SCHEDULE_TIMEOUT);
7611
7612 do {
7613 /* Again only user level code calls this function,
7614 * so nothing interrupt level
7615 * will suddenly eat the receive_queue.
7616 *
7617 * Look at current nfs client by the way...
7618 * However, this function was correct in any case. 8)
7619 */
7620 if (flags & MSG_PEEK) {
7621 skb = skb_peek(&sk->sk_receive_queue);
7622 if (skb)
7623 refcount_inc(&skb->users);
7624 } else {
7625 skb = __skb_dequeue(&sk->sk_receive_queue);
7626 }
7627
7628 if (skb)
7629 return skb;
7630
7631 /* Caller is allowed not to check sk->sk_err before calling. */
7632 error = sock_error(sk);
7633 if (error)
7634 goto no_packet;
7635
7636 if (sk->sk_shutdown & RCV_SHUTDOWN)
7637 break;
7638
7639 if (sk_can_busy_loop(sk)) {
7640 sk_busy_loop(sk, noblock);
7641
7642 if (!skb_queue_empty(&sk->sk_receive_queue))
7643 continue;
7644 }
7645
7646 /* User doesn't want to wait. */
7647 error = -EAGAIN;
7648 if (!timeo)
7649 goto no_packet;
7650 } while (sctp_wait_for_packet(sk, err, &timeo) == 0);
7651
7652 return NULL;
7653
7654 no_packet:
7655 *err = error;
7656 return NULL;
7657 }
7658
7659 /* If sndbuf has changed, wake up per association sndbuf waiters. */
7660 static void __sctp_write_space(struct sctp_association *asoc)
7661 {
7662 struct sock *sk = asoc->base.sk;
7663
7664 if (sctp_wspace(asoc) <= 0)
7665 return;
7666
7667 if (waitqueue_active(&asoc->wait))
7668 wake_up_interruptible(&asoc->wait);
7669
7670 if (sctp_writeable(sk)) {
7671 struct socket_wq *wq;
7672
7673 rcu_read_lock();
7674 wq = rcu_dereference(sk->sk_wq);
7675 if (wq) {
7676 if (waitqueue_active(&wq->wait))
7677 wake_up_interruptible(&wq->wait);
7678
7679 /* Note that we try to include the Async I/O support
7680 * here by modeling from the current TCP/UDP code.
7681 * We have not tested with it yet.
7682 */
7683 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
7684 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
7685 }
7686 rcu_read_unlock();
7687 }
7688 }
7689
7690 static void sctp_wake_up_waiters(struct sock *sk,
7691 struct sctp_association *asoc)
7692 {
7693 struct sctp_association *tmp = asoc;
7694
7695 /* We do accounting for the sndbuf space per association,
7696 * so we only need to wake our own association.
7697 */
7698 if (asoc->ep->sndbuf_policy)
7699 return __sctp_write_space(asoc);
7700
7701 /* If association goes down and is just flushing its
7702 * outq, then just normally notify others.
7703 */
7704 if (asoc->base.dead)
7705 return sctp_write_space(sk);
7706
7707 /* Accounting for the sndbuf space is per socket, so we
7708 * need to wake up others, try to be fair and in case of
7709 * other associations, let them have a go first instead
7710 * of just doing a sctp_write_space() call.
7711 *
7712 * Note that we reach sctp_wake_up_waiters() only when
7713 * associations free up queued chunks, thus we are under
7714 * lock and the list of associations on a socket is
7715 * guaranteed not to change.
7716 */
7717 for (tmp = list_next_entry(tmp, asocs); 1;
7718 tmp = list_next_entry(tmp, asocs)) {
7719 /* Manually skip the head element. */
7720 if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
7721 continue;
7722 /* Wake up association. */
7723 __sctp_write_space(tmp);
7724 /* We've reached the end. */
7725 if (tmp == asoc)
7726 break;
7727 }
7728 }
7729
7730 /* Do accounting for the sndbuf space.
7731 * Decrement the used sndbuf space of the corresponding association by the
7732 * data size which was just transmitted(freed).
7733 */
7734 static void sctp_wfree(struct sk_buff *skb)
7735 {
7736 struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg;
7737 struct sctp_association *asoc = chunk->asoc;
7738 struct sock *sk = asoc->base.sk;
7739
7740 asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
7741 sizeof(struct sk_buff) +
7742 sizeof(struct sctp_chunk);
7743
7744 WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc));
7745
7746 /*
7747 * This undoes what is done via sctp_set_owner_w and sk_mem_charge
7748 */
7749 sk->sk_wmem_queued -= skb->truesize;
7750 sk_mem_uncharge(sk, skb->truesize);
7751
7752 sock_wfree(skb);
7753 sctp_wake_up_waiters(sk, asoc);
7754
7755 sctp_association_put(asoc);
7756 }
7757
7758 /* Do accounting for the receive space on the socket.
7759 * Accounting for the association is done in ulpevent.c
7760 * We set this as a destructor for the cloned data skbs so that
7761 * accounting is done at the correct time.
7762 */
7763 void sctp_sock_rfree(struct sk_buff *skb)
7764 {
7765 struct sock *sk = skb->sk;
7766 struct sctp_ulpevent *event = sctp_skb2event(skb);
7767
7768 atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
7769
7770 /*
7771 * Mimic the behavior of sock_rfree
7772 */
7773 sk_mem_uncharge(sk, event->rmem_len);
7774 }
7775
7776
7777 /* Helper function to wait for space in the sndbuf. */
7778 static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
7779 size_t msg_len)
7780 {
7781 struct sock *sk = asoc->base.sk;
7782 int err = 0;
7783 long current_timeo = *timeo_p;
7784 DEFINE_WAIT(wait);
7785
7786 pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
7787 *timeo_p, msg_len);
7788
7789 /* Increment the association's refcnt. */
7790 sctp_association_hold(asoc);
7791
7792 /* Wait on the association specific sndbuf space. */
7793 for (;;) {
7794 prepare_to_wait_exclusive(&asoc->wait, &wait,
7795 TASK_INTERRUPTIBLE);
7796 if (!*timeo_p)
7797 goto do_nonblock;
7798 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
7799 asoc->base.dead)
7800 goto do_error;
7801 if (signal_pending(current))
7802 goto do_interrupted;
7803 if (msg_len <= sctp_wspace(asoc))
7804 break;
7805
7806 /* Let another process have a go. Since we are going
7807 * to sleep anyway.
7808 */
7809 release_sock(sk);
7810 current_timeo = schedule_timeout(current_timeo);
7811 lock_sock(sk);
7812
7813 *timeo_p = current_timeo;
7814 }
7815
7816 out:
7817 finish_wait(&asoc->wait, &wait);
7818
7819 /* Release the association's refcnt. */
7820 sctp_association_put(asoc);
7821
7822 return err;
7823
7824 do_error:
7825 err = -EPIPE;
7826 goto out;
7827
7828 do_interrupted:
7829 err = sock_intr_errno(*timeo_p);
7830 goto out;
7831
7832 do_nonblock:
7833 err = -EAGAIN;
7834 goto out;
7835 }
7836
7837 void sctp_data_ready(struct sock *sk)
7838 {
7839 struct socket_wq *wq;
7840
7841 rcu_read_lock();
7842 wq = rcu_dereference(sk->sk_wq);
7843 if (skwq_has_sleeper(wq))
7844 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
7845 POLLRDNORM | POLLRDBAND);
7846 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
7847 rcu_read_unlock();
7848 }
7849
7850 /* If socket sndbuf has changed, wake up all per association waiters. */
7851 void sctp_write_space(struct sock *sk)
7852 {
7853 struct sctp_association *asoc;
7854
7855 /* Wake up the tasks in each wait queue. */
7856 list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
7857 __sctp_write_space(asoc);
7858 }
7859 }
7860
7861 /* Is there any sndbuf space available on the socket?
7862 *
7863 * Note that sk_wmem_alloc is the sum of the send buffers on all of the
7864 * associations on the same socket. For a UDP-style socket with
7865 * multiple associations, it is possible for it to be "unwriteable"
7866 * prematurely. I assume that this is acceptable because
7867 * a premature "unwriteable" is better than an accidental "writeable" which
7868 * would cause an unwanted block under certain circumstances. For the 1-1
7869 * UDP-style sockets or TCP-style sockets, this code should work.
7870 * - Daisy
7871 */
7872 static int sctp_writeable(struct sock *sk)
7873 {
7874 int amt = 0;
7875
7876 amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
7877 if (amt < 0)
7878 amt = 0;
7879 return amt;
7880 }
7881
7882 /* Wait for an association to go into ESTABLISHED state. If timeout is 0,
7883 * returns immediately with EINPROGRESS.
7884 */
7885 static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p)
7886 {
7887 struct sock *sk = asoc->base.sk;
7888 int err = 0;
7889 long current_timeo = *timeo_p;
7890 DEFINE_WAIT(wait);
7891
7892 pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p);
7893
7894 /* Increment the association's refcnt. */
7895 sctp_association_hold(asoc);
7896
7897 for (;;) {
7898 prepare_to_wait_exclusive(&asoc->wait, &wait,
7899 TASK_INTERRUPTIBLE);
7900 if (!*timeo_p)
7901 goto do_nonblock;
7902 if (sk->sk_shutdown & RCV_SHUTDOWN)
7903 break;
7904 if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
7905 asoc->base.dead)
7906 goto do_error;
7907 if (signal_pending(current))
7908 goto do_interrupted;
7909
7910 if (sctp_state(asoc, ESTABLISHED))
7911 break;
7912
7913 /* Let another process have a go. Since we are going
7914 * to sleep anyway.
7915 */
7916 release_sock(sk);
7917 current_timeo = schedule_timeout(current_timeo);
7918 lock_sock(sk);
7919
7920 *timeo_p = current_timeo;
7921 }
7922
7923 out:
7924 finish_wait(&asoc->wait, &wait);
7925
7926 /* Release the association's refcnt. */
7927 sctp_association_put(asoc);
7928
7929 return err;
7930
7931 do_error:
7932 if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
7933 err = -ETIMEDOUT;
7934 else
7935 err = -ECONNREFUSED;
7936 goto out;
7937
7938 do_interrupted:
7939 err = sock_intr_errno(*timeo_p);
7940 goto out;
7941
7942 do_nonblock:
7943 err = -EINPROGRESS;
7944 goto out;
7945 }
7946
7947 static int sctp_wait_for_accept(struct sock *sk, long timeo)
7948 {
7949 struct sctp_endpoint *ep;
7950 int err = 0;
7951 DEFINE_WAIT(wait);
7952
7953 ep = sctp_sk(sk)->ep;
7954
7955
7956 for (;;) {
7957 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
7958 TASK_INTERRUPTIBLE);
7959
7960 if (list_empty(&ep->asocs)) {
7961 release_sock(sk);
7962 timeo = schedule_timeout(timeo);
7963 lock_sock(sk);
7964 }
7965
7966 err = -EINVAL;
7967 if (!sctp_sstate(sk, LISTENING))
7968 break;
7969
7970 err = 0;
7971 if (!list_empty(&ep->asocs))
7972 break;
7973
7974 err = sock_intr_errno(timeo);
7975 if (signal_pending(current))
7976 break;
7977
7978 err = -EAGAIN;
7979 if (!timeo)
7980 break;
7981 }
7982
7983 finish_wait(sk_sleep(sk), &wait);
7984
7985 return err;
7986 }
7987
7988 static void sctp_wait_for_close(struct sock *sk, long timeout)
7989 {
7990 DEFINE_WAIT(wait);
7991
7992 do {
7993 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
7994 if (list_empty(&sctp_sk(sk)->ep->asocs))
7995 break;
7996 release_sock(sk);
7997 timeout = schedule_timeout(timeout);
7998 lock_sock(sk);
7999 } while (!signal_pending(current) && timeout);
8000
8001 finish_wait(sk_sleep(sk), &wait);
8002 }
8003
8004 static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
8005 {
8006 struct sk_buff *frag;
8007
8008 if (!skb->data_len)
8009 goto done;
8010
8011 /* Don't forget the fragments. */
8012 skb_walk_frags(skb, frag)
8013 sctp_skb_set_owner_r_frag(frag, sk);
8014
8015 done:
8016 sctp_skb_set_owner_r(skb, sk);
8017 }
8018
8019 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
8020 struct sctp_association *asoc)
8021 {
8022 struct inet_sock *inet = inet_sk(sk);
8023 struct inet_sock *newinet;
8024
8025 newsk->sk_type = sk->sk_type;
8026 newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
8027 newsk->sk_flags = sk->sk_flags;
8028 newsk->sk_tsflags = sk->sk_tsflags;
8029 newsk->sk_no_check_tx = sk->sk_no_check_tx;
8030 newsk->sk_no_check_rx = sk->sk_no_check_rx;
8031 newsk->sk_reuse = sk->sk_reuse;
8032
8033 newsk->sk_shutdown = sk->sk_shutdown;
8034 newsk->sk_destruct = sctp_destruct_sock;
8035 newsk->sk_family = sk->sk_family;
8036 newsk->sk_protocol = IPPROTO_SCTP;
8037 newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
8038 newsk->sk_sndbuf = sk->sk_sndbuf;
8039 newsk->sk_rcvbuf = sk->sk_rcvbuf;
8040 newsk->sk_lingertime = sk->sk_lingertime;
8041 newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
8042 newsk->sk_sndtimeo = sk->sk_sndtimeo;
8043 newsk->sk_rxhash = sk->sk_rxhash;
8044
8045 newinet = inet_sk(newsk);
8046
8047 /* Initialize sk's sport, dport, rcv_saddr and daddr for
8048 * getsockname() and getpeername()
8049 */
8050 newinet->inet_sport = inet->inet_sport;
8051 newinet->inet_saddr = inet->inet_saddr;
8052 newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
8053 newinet->inet_dport = htons(asoc->peer.port);
8054 newinet->pmtudisc = inet->pmtudisc;
8055 newinet->inet_id = asoc->next_tsn ^ jiffies;
8056
8057 newinet->uc_ttl = inet->uc_ttl;
8058 newinet->mc_loop = 1;
8059 newinet->mc_ttl = 1;
8060 newinet->mc_index = 0;
8061 newinet->mc_list = NULL;
8062
8063 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
8064 net_enable_timestamp();
8065
8066 security_sk_clone(sk, newsk);
8067 }
8068
8069 static inline void sctp_copy_descendant(struct sock *sk_to,
8070 const struct sock *sk_from)
8071 {
8072 int ancestor_size = sizeof(struct inet_sock) +
8073 sizeof(struct sctp_sock) -
8074 offsetof(struct sctp_sock, auto_asconf_list);
8075
8076 if (sk_from->sk_family == PF_INET6)
8077 ancestor_size += sizeof(struct ipv6_pinfo);
8078
8079 __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
8080 }
8081
8082 /* Populate the fields of the newsk from the oldsk and migrate the assoc
8083 * and its messages to the newsk.
8084 */
8085 static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
8086 struct sctp_association *assoc,
8087 sctp_socket_type_t type)
8088 {
8089 struct sctp_sock *oldsp = sctp_sk(oldsk);
8090 struct sctp_sock *newsp = sctp_sk(newsk);
8091 struct sctp_bind_bucket *pp; /* hash list port iterator */
8092 struct sctp_endpoint *newep = newsp->ep;
8093 struct sk_buff *skb, *tmp;
8094 struct sctp_ulpevent *event;
8095 struct sctp_bind_hashbucket *head;
8096
8097 /* Migrate socket buffer sizes and all the socket level options to the
8098 * new socket.
8099 */
8100 newsk->sk_sndbuf = oldsk->sk_sndbuf;
8101 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
8102 /* Brute force copy old sctp opt. */
8103 sctp_copy_descendant(newsk, oldsk);
8104
8105 /* Restore the ep value that was overwritten with the above structure
8106 * copy.
8107 */
8108 newsp->ep = newep;
8109 newsp->hmac = NULL;
8110
8111 /* Hook this new socket in to the bind_hash list. */
8112 head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk),
8113 inet_sk(oldsk)->inet_num)];
8114 spin_lock_bh(&head->lock);
8115 pp = sctp_sk(oldsk)->bind_hash;
8116 sk_add_bind_node(newsk, &pp->owner);
8117 sctp_sk(newsk)->bind_hash = pp;
8118 inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num;
8119 spin_unlock_bh(&head->lock);
8120
8121 /* Copy the bind_addr list from the original endpoint to the new
8122 * endpoint so that we can handle restarts properly
8123 */
8124 sctp_bind_addr_dup(&newsp->ep->base.bind_addr,
8125 &oldsp->ep->base.bind_addr, GFP_KERNEL);
8126
8127 /* Move any messages in the old socket's receive queue that are for the
8128 * peeled off association to the new socket's receive queue.
8129 */
8130 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
8131 event = sctp_skb2event(skb);
8132 if (event->asoc == assoc) {
8133 __skb_unlink(skb, &oldsk->sk_receive_queue);
8134 __skb_queue_tail(&newsk->sk_receive_queue, skb);
8135 sctp_skb_set_owner_r_frag(skb, newsk);
8136 }
8137 }
8138
8139 /* Clean up any messages pending delivery due to partial
8140 * delivery. Three cases:
8141 * 1) No partial deliver; no work.
8142 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
8143 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
8144 */
8145 skb_queue_head_init(&newsp->pd_lobby);
8146 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
8147
8148 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
8149 struct sk_buff_head *queue;
8150
8151 /* Decide which queue to move pd_lobby skbs to. */
8152 if (assoc->ulpq.pd_mode) {
8153 queue = &newsp->pd_lobby;
8154 } else
8155 queue = &newsk->sk_receive_queue;
8156
8157 /* Walk through the pd_lobby, looking for skbs that
8158 * need moved to the new socket.
8159 */
8160 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
8161 event = sctp_skb2event(skb);
8162 if (event->asoc == assoc) {
8163 __skb_unlink(skb, &oldsp->pd_lobby);
8164 __skb_queue_tail(queue, skb);
8165 sctp_skb_set_owner_r_frag(skb, newsk);
8166 }
8167 }
8168
8169 /* Clear up any skbs waiting for the partial
8170 * delivery to finish.
8171 */
8172 if (assoc->ulpq.pd_mode)
8173 sctp_clear_pd(oldsk, NULL);
8174
8175 }
8176
8177 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
8178 sctp_skb_set_owner_r_frag(skb, newsk);
8179
8180 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
8181 sctp_skb_set_owner_r_frag(skb, newsk);
8182
8183 /* Set the type of socket to indicate that it is peeled off from the
8184 * original UDP-style socket or created with the accept() call on a
8185 * TCP-style socket..
8186 */
8187 newsp->type = type;
8188
8189 /* Mark the new socket "in-use" by the user so that any packets
8190 * that may arrive on the association after we've moved it are
8191 * queued to the backlog. This prevents a potential race between
8192 * backlog processing on the old socket and new-packet processing
8193 * on the new socket.
8194 *
8195 * The caller has just allocated newsk so we can guarantee that other
8196 * paths won't try to lock it and then oldsk.
8197 */
8198 lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
8199 sctp_assoc_migrate(assoc, newsk);
8200
8201 /* If the association on the newsk is already closed before accept()
8202 * is called, set RCV_SHUTDOWN flag.
8203 */
8204 if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
8205 newsk->sk_state = SCTP_SS_CLOSED;
8206 newsk->sk_shutdown |= RCV_SHUTDOWN;
8207 } else {
8208 newsk->sk_state = SCTP_SS_ESTABLISHED;
8209 }
8210
8211 release_sock(newsk);
8212 }
8213
8214
8215 /* This proto struct describes the ULP interface for SCTP. */
8216 struct proto sctp_prot = {
8217 .name = "SCTP",
8218 .owner = THIS_MODULE,
8219 .close = sctp_close,
8220 .connect = sctp_connect,
8221 .disconnect = sctp_disconnect,
8222 .accept = sctp_accept,
8223 .ioctl = sctp_ioctl,
8224 .init = sctp_init_sock,
8225 .destroy = sctp_destroy_sock,
8226 .shutdown = sctp_shutdown,
8227 .setsockopt = sctp_setsockopt,
8228 .getsockopt = sctp_getsockopt,
8229 .sendmsg = sctp_sendmsg,
8230 .recvmsg = sctp_recvmsg,
8231 .bind = sctp_bind,
8232 .backlog_rcv = sctp_backlog_rcv,
8233 .hash = sctp_hash,
8234 .unhash = sctp_unhash,
8235 .get_port = sctp_get_port,
8236 .obj_size = sizeof(struct sctp_sock),
8237 .sysctl_mem = sysctl_sctp_mem,
8238 .sysctl_rmem = sysctl_sctp_rmem,
8239 .sysctl_wmem = sysctl_sctp_wmem,
8240 .memory_pressure = &sctp_memory_pressure,
8241 .enter_memory_pressure = sctp_enter_memory_pressure,
8242 .memory_allocated = &sctp_memory_allocated,
8243 .sockets_allocated = &sctp_sockets_allocated,
8244 };
8245
8246 #if IS_ENABLED(CONFIG_IPV6)
8247
8248 #include <net/transp_v6.h>
8249 static void sctp_v6_destroy_sock(struct sock *sk)
8250 {
8251 sctp_destroy_sock(sk);
8252 inet6_destroy_sock(sk);
8253 }
8254
8255 struct proto sctpv6_prot = {
8256 .name = "SCTPv6",
8257 .owner = THIS_MODULE,
8258 .close = sctp_close,
8259 .connect = sctp_connect,
8260 .disconnect = sctp_disconnect,
8261 .accept = sctp_accept,
8262 .ioctl = sctp_ioctl,
8263 .init = sctp_init_sock,
8264 .destroy = sctp_v6_destroy_sock,
8265 .shutdown = sctp_shutdown,
8266 .setsockopt = sctp_setsockopt,
8267 .getsockopt = sctp_getsockopt,
8268 .sendmsg = sctp_sendmsg,
8269 .recvmsg = sctp_recvmsg,
8270 .bind = sctp_bind,
8271 .backlog_rcv = sctp_backlog_rcv,
8272 .hash = sctp_hash,
8273 .unhash = sctp_unhash,
8274 .get_port = sctp_get_port,
8275 .obj_size = sizeof(struct sctp6_sock),
8276 .sysctl_mem = sysctl_sctp_mem,
8277 .sysctl_rmem = sysctl_sctp_rmem,
8278 .sysctl_wmem = sysctl_sctp_wmem,
8279 .memory_pressure = &sctp_memory_pressure,
8280 .enter_memory_pressure = sctp_enter_memory_pressure,
8281 .memory_allocated = &sctp_memory_allocated,
8282 .sockets_allocated = &sctp_sockets_allocated,
8283 };
8284 #endif /* IS_ENABLED(CONFIG_IPV6) */