]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/sctp/associola.c
sctp: Make sysctl tunables per net
[mirror_ubuntu-focal-kernel.git] / net / sctp / associola.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP association.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
28 *
29 * Please send any bug reports or fixes you make to the
30 * email address(es):
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 *
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
35 *
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Jon Grimm <jgrimm@us.ibm.com>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Daisy Chang <daisyc@us.ibm.com>
44 * Ryan Layer <rmlayer@us.ibm.com>
45 * Kevin Gao <kevin.gao@intel.com>
46 *
47 * Any bugs reported given to us we will try to fix... any fixes shared will
48 * be incorporated into the next SCTP release.
49 */
50
51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53 #include <linux/types.h>
54 #include <linux/fcntl.h>
55 #include <linux/poll.h>
56 #include <linux/init.h>
57
58 #include <linux/slab.h>
59 #include <linux/in.h>
60 #include <net/ipv6.h>
61 #include <net/sctp/sctp.h>
62 #include <net/sctp/sm.h>
63
64 /* Forward declarations for internal functions. */
65 static void sctp_assoc_bh_rcv(struct work_struct *work);
66 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
68
69 /* Keep track of the new idr low so that we don't re-use association id
70 * numbers too fast. It is protected by they idr spin lock is in the
71 * range of 1 - INT_MAX.
72 */
73 static u32 idr_low = 1;
74
75
76 /* 1st Level Abstractions. */
77
78 /* Initialize a new association from provided memory. */
79 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
80 const struct sctp_endpoint *ep,
81 const struct sock *sk,
82 sctp_scope_t scope,
83 gfp_t gfp)
84 {
85 struct net *net = sock_net(sk);
86 struct sctp_sock *sp;
87 int i;
88 sctp_paramhdr_t *p;
89 int err;
90
91 /* Retrieve the SCTP per socket area. */
92 sp = sctp_sk((struct sock *)sk);
93
94 /* Discarding const is appropriate here. */
95 asoc->ep = (struct sctp_endpoint *)ep;
96 sctp_endpoint_hold(asoc->ep);
97
98 /* Hold the sock. */
99 asoc->base.sk = (struct sock *)sk;
100 sock_hold(asoc->base.sk);
101
102 /* Initialize the common base substructure. */
103 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
104
105 /* Initialize the object handling fields. */
106 atomic_set(&asoc->base.refcnt, 1);
107 asoc->base.dead = 0;
108 asoc->base.malloced = 0;
109
110 /* Initialize the bind addr area. */
111 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
112
113 asoc->state = SCTP_STATE_CLOSED;
114
115 /* Set these values from the socket values, a conversion between
116 * millsecons to seconds/microseconds must also be done.
117 */
118 asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
119 asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
120 * 1000;
121 asoc->frag_point = 0;
122 asoc->user_frag = sp->user_frag;
123
124 /* Set the association max_retrans and RTO values from the
125 * socket values.
126 */
127 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
128 asoc->pf_retrans = net->sctp.pf_retrans;
129
130 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
131 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
132 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
133
134 asoc->overall_error_count = 0;
135
136 /* Initialize the association's heartbeat interval based on the
137 * sock configured value.
138 */
139 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
140
141 /* Initialize path max retrans value. */
142 asoc->pathmaxrxt = sp->pathmaxrxt;
143
144 /* Initialize default path MTU. */
145 asoc->pathmtu = sp->pathmtu;
146
147 /* Set association default SACK delay */
148 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
149 asoc->sackfreq = sp->sackfreq;
150
151 /* Set the association default flags controlling
152 * Heartbeat, SACK delay, and Path MTU Discovery.
153 */
154 asoc->param_flags = sp->param_flags;
155
156 /* Initialize the maximum mumber of new data packets that can be sent
157 * in a burst.
158 */
159 asoc->max_burst = sp->max_burst;
160
161 /* initialize association timers */
162 asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
163 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
164 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
165 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
166 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
167 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
168
169 /* sctpimpguide Section 2.12.2
170 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
171 * recommended value of 5 times 'RTO.Max'.
172 */
173 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
174 = 5 * asoc->rto_max;
175
176 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
177 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
178 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
179 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
180
181 /* Initializes the timers */
182 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
183 setup_timer(&asoc->timers[i], sctp_timer_events[i],
184 (unsigned long)asoc);
185
186 /* Pull default initialization values from the sock options.
187 * Note: This assumes that the values have already been
188 * validated in the sock.
189 */
190 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
191 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
192 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
193
194 asoc->max_init_timeo =
195 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
196
197 /* Allocate storage for the ssnmap after the inbound and outbound
198 * streams have been negotiated during Init.
199 */
200 asoc->ssnmap = NULL;
201
202 /* Set the local window size for receive.
203 * This is also the rcvbuf space per association.
204 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
205 * 1500 bytes in one SCTP packet.
206 */
207 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
208 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
209 else
210 asoc->rwnd = sk->sk_rcvbuf/2;
211
212 asoc->a_rwnd = asoc->rwnd;
213
214 asoc->rwnd_over = 0;
215 asoc->rwnd_press = 0;
216
217 /* Use my own max window until I learn something better. */
218 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
219
220 /* Set the sndbuf size for transmit. */
221 asoc->sndbuf_used = 0;
222
223 /* Initialize the receive memory counter */
224 atomic_set(&asoc->rmem_alloc, 0);
225
226 init_waitqueue_head(&asoc->wait);
227
228 asoc->c.my_vtag = sctp_generate_tag(ep);
229 asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */
230 asoc->c.peer_vtag = 0;
231 asoc->c.my_ttag = 0;
232 asoc->c.peer_ttag = 0;
233 asoc->c.my_port = ep->base.bind_addr.port;
234
235 asoc->c.initial_tsn = sctp_generate_tsn(ep);
236
237 asoc->next_tsn = asoc->c.initial_tsn;
238
239 asoc->ctsn_ack_point = asoc->next_tsn - 1;
240 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
241 asoc->highest_sacked = asoc->ctsn_ack_point;
242 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
243 asoc->unack_data = 0;
244
245 /* ADDIP Section 4.1 Asconf Chunk Procedures
246 *
247 * When an endpoint has an ASCONF signaled change to be sent to the
248 * remote endpoint it should do the following:
249 * ...
250 * A2) a serial number should be assigned to the chunk. The serial
251 * number SHOULD be a monotonically increasing number. The serial
252 * numbers SHOULD be initialized at the start of the
253 * association to the same value as the initial TSN.
254 */
255 asoc->addip_serial = asoc->c.initial_tsn;
256
257 INIT_LIST_HEAD(&asoc->addip_chunk_list);
258 INIT_LIST_HEAD(&asoc->asconf_ack_list);
259
260 /* Make an empty list of remote transport addresses. */
261 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
262 asoc->peer.transport_count = 0;
263
264 /* RFC 2960 5.1 Normal Establishment of an Association
265 *
266 * After the reception of the first data chunk in an
267 * association the endpoint must immediately respond with a
268 * sack to acknowledge the data chunk. Subsequent
269 * acknowledgements should be done as described in Section
270 * 6.2.
271 *
272 * [We implement this by telling a new association that it
273 * already received one packet.]
274 */
275 asoc->peer.sack_needed = 1;
276 asoc->peer.sack_cnt = 0;
277 asoc->peer.sack_generation = 1;
278
279 /* Assume that the peer will tell us if he recognizes ASCONF
280 * as part of INIT exchange.
281 * The sctp_addip_noauth option is there for backward compatibilty
282 * and will revert old behavior.
283 */
284 asoc->peer.asconf_capable = 0;
285 if (net->sctp.addip_noauth)
286 asoc->peer.asconf_capable = 1;
287 asoc->asconf_addr_del_pending = NULL;
288 asoc->src_out_of_asoc_ok = 0;
289 asoc->new_transport = NULL;
290
291 /* Create an input queue. */
292 sctp_inq_init(&asoc->base.inqueue);
293 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
294
295 /* Create an output queue. */
296 sctp_outq_init(asoc, &asoc->outqueue);
297
298 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
299 goto fail_init;
300
301 memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
302
303 asoc->need_ecne = 0;
304
305 asoc->assoc_id = 0;
306
307 /* Assume that peer would support both address types unless we are
308 * told otherwise.
309 */
310 asoc->peer.ipv4_address = 1;
311 if (asoc->base.sk->sk_family == PF_INET6)
312 asoc->peer.ipv6_address = 1;
313 INIT_LIST_HEAD(&asoc->asocs);
314
315 asoc->autoclose = sp->autoclose;
316
317 asoc->default_stream = sp->default_stream;
318 asoc->default_ppid = sp->default_ppid;
319 asoc->default_flags = sp->default_flags;
320 asoc->default_context = sp->default_context;
321 asoc->default_timetolive = sp->default_timetolive;
322 asoc->default_rcv_context = sp->default_rcv_context;
323
324 /* AUTH related initializations */
325 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
326 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
327 if (err)
328 goto fail_init;
329
330 asoc->active_key_id = ep->active_key_id;
331 asoc->asoc_shared_key = NULL;
332
333 asoc->default_hmac_id = 0;
334 /* Save the hmacs and chunks list into this association */
335 if (ep->auth_hmacs_list)
336 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
337 ntohs(ep->auth_hmacs_list->param_hdr.length));
338 if (ep->auth_chunk_list)
339 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
340 ntohs(ep->auth_chunk_list->param_hdr.length));
341
342 /* Get the AUTH random number for this association */
343 p = (sctp_paramhdr_t *)asoc->c.auth_random;
344 p->type = SCTP_PARAM_RANDOM;
345 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
346 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
347
348 return asoc;
349
350 fail_init:
351 sctp_endpoint_put(asoc->ep);
352 sock_put(asoc->base.sk);
353 return NULL;
354 }
355
356 /* Allocate and initialize a new association */
357 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
358 const struct sock *sk,
359 sctp_scope_t scope,
360 gfp_t gfp)
361 {
362 struct sctp_association *asoc;
363
364 asoc = t_new(struct sctp_association, gfp);
365 if (!asoc)
366 goto fail;
367
368 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
369 goto fail_init;
370
371 asoc->base.malloced = 1;
372 SCTP_DBG_OBJCNT_INC(assoc);
373 SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
374
375 return asoc;
376
377 fail_init:
378 kfree(asoc);
379 fail:
380 return NULL;
381 }
382
383 /* Free this association if possible. There may still be users, so
384 * the actual deallocation may be delayed.
385 */
386 void sctp_association_free(struct sctp_association *asoc)
387 {
388 struct sock *sk = asoc->base.sk;
389 struct sctp_transport *transport;
390 struct list_head *pos, *temp;
391 int i;
392
393 /* Only real associations count against the endpoint, so
394 * don't bother for if this is a temporary association.
395 */
396 if (!asoc->temp) {
397 list_del(&asoc->asocs);
398
399 /* Decrement the backlog value for a TCP-style listening
400 * socket.
401 */
402 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
403 sk->sk_ack_backlog--;
404 }
405
406 /* Mark as dead, so other users can know this structure is
407 * going away.
408 */
409 asoc->base.dead = 1;
410
411 /* Dispose of any data lying around in the outqueue. */
412 sctp_outq_free(&asoc->outqueue);
413
414 /* Dispose of any pending messages for the upper layer. */
415 sctp_ulpq_free(&asoc->ulpq);
416
417 /* Dispose of any pending chunks on the inqueue. */
418 sctp_inq_free(&asoc->base.inqueue);
419
420 sctp_tsnmap_free(&asoc->peer.tsn_map);
421
422 /* Free ssnmap storage. */
423 sctp_ssnmap_free(asoc->ssnmap);
424
425 /* Clean up the bound address list. */
426 sctp_bind_addr_free(&asoc->base.bind_addr);
427
428 /* Do we need to go through all of our timers and
429 * delete them? To be safe we will try to delete all, but we
430 * should be able to go through and make a guess based
431 * on our state.
432 */
433 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
434 if (timer_pending(&asoc->timers[i]) &&
435 del_timer(&asoc->timers[i]))
436 sctp_association_put(asoc);
437 }
438
439 /* Free peer's cached cookie. */
440 kfree(asoc->peer.cookie);
441 kfree(asoc->peer.peer_random);
442 kfree(asoc->peer.peer_chunks);
443 kfree(asoc->peer.peer_hmacs);
444
445 /* Release the transport structures. */
446 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
447 transport = list_entry(pos, struct sctp_transport, transports);
448 list_del(pos);
449 sctp_transport_free(transport);
450 }
451
452 asoc->peer.transport_count = 0;
453
454 sctp_asconf_queue_teardown(asoc);
455
456 /* Free pending address space being deleted */
457 if (asoc->asconf_addr_del_pending != NULL)
458 kfree(asoc->asconf_addr_del_pending);
459
460 /* AUTH - Free the endpoint shared keys */
461 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
462
463 /* AUTH - Free the association shared key */
464 sctp_auth_key_put(asoc->asoc_shared_key);
465
466 sctp_association_put(asoc);
467 }
468
469 /* Cleanup and free up an association. */
470 static void sctp_association_destroy(struct sctp_association *asoc)
471 {
472 SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
473
474 sctp_endpoint_put(asoc->ep);
475 sock_put(asoc->base.sk);
476
477 if (asoc->assoc_id != 0) {
478 spin_lock_bh(&sctp_assocs_id_lock);
479 idr_remove(&sctp_assocs_id, asoc->assoc_id);
480 spin_unlock_bh(&sctp_assocs_id_lock);
481 }
482
483 WARN_ON(atomic_read(&asoc->rmem_alloc));
484
485 if (asoc->base.malloced) {
486 kfree(asoc);
487 SCTP_DBG_OBJCNT_DEC(assoc);
488 }
489 }
490
491 /* Change the primary destination address for the peer. */
492 void sctp_assoc_set_primary(struct sctp_association *asoc,
493 struct sctp_transport *transport)
494 {
495 int changeover = 0;
496
497 /* it's a changeover only if we already have a primary path
498 * that we are changing
499 */
500 if (asoc->peer.primary_path != NULL &&
501 asoc->peer.primary_path != transport)
502 changeover = 1 ;
503
504 asoc->peer.primary_path = transport;
505
506 /* Set a default msg_name for events. */
507 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
508 sizeof(union sctp_addr));
509
510 /* If the primary path is changing, assume that the
511 * user wants to use this new path.
512 */
513 if ((transport->state == SCTP_ACTIVE) ||
514 (transport->state == SCTP_UNKNOWN))
515 asoc->peer.active_path = transport;
516
517 /*
518 * SFR-CACC algorithm:
519 * Upon the receipt of a request to change the primary
520 * destination address, on the data structure for the new
521 * primary destination, the sender MUST do the following:
522 *
523 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
524 * to this destination address earlier. The sender MUST set
525 * CYCLING_CHANGEOVER to indicate that this switch is a
526 * double switch to the same destination address.
527 *
528 * Really, only bother is we have data queued or outstanding on
529 * the association.
530 */
531 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
532 return;
533
534 if (transport->cacc.changeover_active)
535 transport->cacc.cycling_changeover = changeover;
536
537 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
538 * a changeover has occurred.
539 */
540 transport->cacc.changeover_active = changeover;
541
542 /* 3) The sender MUST store the next TSN to be sent in
543 * next_tsn_at_change.
544 */
545 transport->cacc.next_tsn_at_change = asoc->next_tsn;
546 }
547
548 /* Remove a transport from an association. */
549 void sctp_assoc_rm_peer(struct sctp_association *asoc,
550 struct sctp_transport *peer)
551 {
552 struct list_head *pos;
553 struct sctp_transport *transport;
554
555 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
556 " port: %d\n",
557 asoc,
558 (&peer->ipaddr),
559 ntohs(peer->ipaddr.v4.sin_port));
560
561 /* If we are to remove the current retran_path, update it
562 * to the next peer before removing this peer from the list.
563 */
564 if (asoc->peer.retran_path == peer)
565 sctp_assoc_update_retran_path(asoc);
566
567 /* Remove this peer from the list. */
568 list_del(&peer->transports);
569
570 /* Get the first transport of asoc. */
571 pos = asoc->peer.transport_addr_list.next;
572 transport = list_entry(pos, struct sctp_transport, transports);
573
574 /* Update any entries that match the peer to be deleted. */
575 if (asoc->peer.primary_path == peer)
576 sctp_assoc_set_primary(asoc, transport);
577 if (asoc->peer.active_path == peer)
578 asoc->peer.active_path = transport;
579 if (asoc->peer.retran_path == peer)
580 asoc->peer.retran_path = transport;
581 if (asoc->peer.last_data_from == peer)
582 asoc->peer.last_data_from = transport;
583
584 /* If we remove the transport an INIT was last sent to, set it to
585 * NULL. Combined with the update of the retran path above, this
586 * will cause the next INIT to be sent to the next available
587 * transport, maintaining the cycle.
588 */
589 if (asoc->init_last_sent_to == peer)
590 asoc->init_last_sent_to = NULL;
591
592 /* If we remove the transport an SHUTDOWN was last sent to, set it
593 * to NULL. Combined with the update of the retran path above, this
594 * will cause the next SHUTDOWN to be sent to the next available
595 * transport, maintaining the cycle.
596 */
597 if (asoc->shutdown_last_sent_to == peer)
598 asoc->shutdown_last_sent_to = NULL;
599
600 /* If we remove the transport an ASCONF was last sent to, set it to
601 * NULL.
602 */
603 if (asoc->addip_last_asconf &&
604 asoc->addip_last_asconf->transport == peer)
605 asoc->addip_last_asconf->transport = NULL;
606
607 /* If we have something on the transmitted list, we have to
608 * save it off. The best place is the active path.
609 */
610 if (!list_empty(&peer->transmitted)) {
611 struct sctp_transport *active = asoc->peer.active_path;
612 struct sctp_chunk *ch;
613
614 /* Reset the transport of each chunk on this list */
615 list_for_each_entry(ch, &peer->transmitted,
616 transmitted_list) {
617 ch->transport = NULL;
618 ch->rtt_in_progress = 0;
619 }
620
621 list_splice_tail_init(&peer->transmitted,
622 &active->transmitted);
623
624 /* Start a T3 timer here in case it wasn't running so
625 * that these migrated packets have a chance to get
626 * retrnasmitted.
627 */
628 if (!timer_pending(&active->T3_rtx_timer))
629 if (!mod_timer(&active->T3_rtx_timer,
630 jiffies + active->rto))
631 sctp_transport_hold(active);
632 }
633
634 asoc->peer.transport_count--;
635
636 sctp_transport_free(peer);
637 }
638
639 /* Add a transport address to an association. */
640 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
641 const union sctp_addr *addr,
642 const gfp_t gfp,
643 const int peer_state)
644 {
645 struct net *net = sock_net(asoc->base.sk);
646 struct sctp_transport *peer;
647 struct sctp_sock *sp;
648 unsigned short port;
649
650 sp = sctp_sk(asoc->base.sk);
651
652 /* AF_INET and AF_INET6 share common port field. */
653 port = ntohs(addr->v4.sin_port);
654
655 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
656 " port: %d state:%d\n",
657 asoc,
658 addr,
659 port,
660 peer_state);
661
662 /* Set the port if it has not been set yet. */
663 if (0 == asoc->peer.port)
664 asoc->peer.port = port;
665
666 /* Check to see if this is a duplicate. */
667 peer = sctp_assoc_lookup_paddr(asoc, addr);
668 if (peer) {
669 /* An UNKNOWN state is only set on transports added by
670 * user in sctp_connectx() call. Such transports should be
671 * considered CONFIRMED per RFC 4960, Section 5.4.
672 */
673 if (peer->state == SCTP_UNKNOWN) {
674 peer->state = SCTP_ACTIVE;
675 }
676 return peer;
677 }
678
679 peer = sctp_transport_new(net, addr, gfp);
680 if (!peer)
681 return NULL;
682
683 sctp_transport_set_owner(peer, asoc);
684
685 /* Initialize the peer's heartbeat interval based on the
686 * association configured value.
687 */
688 peer->hbinterval = asoc->hbinterval;
689
690 /* Set the path max_retrans. */
691 peer->pathmaxrxt = asoc->pathmaxrxt;
692
693 /* And the partial failure retrnas threshold */
694 peer->pf_retrans = asoc->pf_retrans;
695
696 /* Initialize the peer's SACK delay timeout based on the
697 * association configured value.
698 */
699 peer->sackdelay = asoc->sackdelay;
700 peer->sackfreq = asoc->sackfreq;
701
702 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
703 * based on association setting.
704 */
705 peer->param_flags = asoc->param_flags;
706
707 sctp_transport_route(peer, NULL, sp);
708
709 /* Initialize the pmtu of the transport. */
710 if (peer->param_flags & SPP_PMTUD_DISABLE) {
711 if (asoc->pathmtu)
712 peer->pathmtu = asoc->pathmtu;
713 else
714 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
715 }
716
717 /* If this is the first transport addr on this association,
718 * initialize the association PMTU to the peer's PMTU.
719 * If not and the current association PMTU is higher than the new
720 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
721 */
722 if (asoc->pathmtu)
723 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
724 else
725 asoc->pathmtu = peer->pathmtu;
726
727 SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
728 "%d\n", asoc, asoc->pathmtu);
729 peer->pmtu_pending = 0;
730
731 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
732
733 /* The asoc->peer.port might not be meaningful yet, but
734 * initialize the packet structure anyway.
735 */
736 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
737 asoc->peer.port);
738
739 /* 7.2.1 Slow-Start
740 *
741 * o The initial cwnd before DATA transmission or after a sufficiently
742 * long idle period MUST be set to
743 * min(4*MTU, max(2*MTU, 4380 bytes))
744 *
745 * o The initial value of ssthresh MAY be arbitrarily high
746 * (for example, implementations MAY use the size of the
747 * receiver advertised window).
748 */
749 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
750
751 /* At this point, we may not have the receiver's advertised window,
752 * so initialize ssthresh to the default value and it will be set
753 * later when we process the INIT.
754 */
755 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
756
757 peer->partial_bytes_acked = 0;
758 peer->flight_size = 0;
759 peer->burst_limited = 0;
760
761 /* Set the transport's RTO.initial value */
762 peer->rto = asoc->rto_initial;
763
764 /* Set the peer's active state. */
765 peer->state = peer_state;
766
767 /* Attach the remote transport to our asoc. */
768 list_add_tail(&peer->transports, &asoc->peer.transport_addr_list);
769 asoc->peer.transport_count++;
770
771 /* If we do not yet have a primary path, set one. */
772 if (!asoc->peer.primary_path) {
773 sctp_assoc_set_primary(asoc, peer);
774 asoc->peer.retran_path = peer;
775 }
776
777 if (asoc->peer.active_path == asoc->peer.retran_path &&
778 peer->state != SCTP_UNCONFIRMED) {
779 asoc->peer.retran_path = peer;
780 }
781
782 return peer;
783 }
784
785 /* Delete a transport address from an association. */
786 void sctp_assoc_del_peer(struct sctp_association *asoc,
787 const union sctp_addr *addr)
788 {
789 struct list_head *pos;
790 struct list_head *temp;
791 struct sctp_transport *transport;
792
793 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
794 transport = list_entry(pos, struct sctp_transport, transports);
795 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
796 /* Do book keeping for removing the peer and free it. */
797 sctp_assoc_rm_peer(asoc, transport);
798 break;
799 }
800 }
801 }
802
803 /* Lookup a transport by address. */
804 struct sctp_transport *sctp_assoc_lookup_paddr(
805 const struct sctp_association *asoc,
806 const union sctp_addr *address)
807 {
808 struct sctp_transport *t;
809
810 /* Cycle through all transports searching for a peer address. */
811
812 list_for_each_entry(t, &asoc->peer.transport_addr_list,
813 transports) {
814 if (sctp_cmp_addr_exact(address, &t->ipaddr))
815 return t;
816 }
817
818 return NULL;
819 }
820
821 /* Remove all transports except a give one */
822 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
823 struct sctp_transport *primary)
824 {
825 struct sctp_transport *temp;
826 struct sctp_transport *t;
827
828 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
829 transports) {
830 /* if the current transport is not the primary one, delete it */
831 if (t != primary)
832 sctp_assoc_rm_peer(asoc, t);
833 }
834 }
835
836 /* Engage in transport control operations.
837 * Mark the transport up or down and send a notification to the user.
838 * Select and update the new active and retran paths.
839 */
840 void sctp_assoc_control_transport(struct sctp_association *asoc,
841 struct sctp_transport *transport,
842 sctp_transport_cmd_t command,
843 sctp_sn_error_t error)
844 {
845 struct sctp_transport *t = NULL;
846 struct sctp_transport *first;
847 struct sctp_transport *second;
848 struct sctp_ulpevent *event;
849 struct sockaddr_storage addr;
850 int spc_state = 0;
851 bool ulp_notify = true;
852
853 /* Record the transition on the transport. */
854 switch (command) {
855 case SCTP_TRANSPORT_UP:
856 /* If we are moving from UNCONFIRMED state due
857 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
858 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
859 */
860 if (SCTP_UNCONFIRMED == transport->state &&
861 SCTP_HEARTBEAT_SUCCESS == error)
862 spc_state = SCTP_ADDR_CONFIRMED;
863 else
864 spc_state = SCTP_ADDR_AVAILABLE;
865 /* Don't inform ULP about transition from PF to
866 * active state and set cwnd to 1, see SCTP
867 * Quick failover draft section 5.1, point 5
868 */
869 if (transport->state == SCTP_PF) {
870 ulp_notify = false;
871 transport->cwnd = 1;
872 }
873 transport->state = SCTP_ACTIVE;
874 break;
875
876 case SCTP_TRANSPORT_DOWN:
877 /* If the transport was never confirmed, do not transition it
878 * to inactive state. Also, release the cached route since
879 * there may be a better route next time.
880 */
881 if (transport->state != SCTP_UNCONFIRMED)
882 transport->state = SCTP_INACTIVE;
883 else {
884 dst_release(transport->dst);
885 transport->dst = NULL;
886 }
887
888 spc_state = SCTP_ADDR_UNREACHABLE;
889 break;
890
891 case SCTP_TRANSPORT_PF:
892 transport->state = SCTP_PF;
893 ulp_notify = false;
894 break;
895
896 default:
897 return;
898 }
899
900 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
901 * user.
902 */
903 if (ulp_notify) {
904 memset(&addr, 0, sizeof(struct sockaddr_storage));
905 memcpy(&addr, &transport->ipaddr,
906 transport->af_specific->sockaddr_len);
907 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
908 0, spc_state, error, GFP_ATOMIC);
909 if (event)
910 sctp_ulpq_tail_event(&asoc->ulpq, event);
911 }
912
913 /* Select new active and retran paths. */
914
915 /* Look for the two most recently used active transports.
916 *
917 * This code produces the wrong ordering whenever jiffies
918 * rolls over, but we still get usable transports, so we don't
919 * worry about it.
920 */
921 first = NULL; second = NULL;
922
923 list_for_each_entry(t, &asoc->peer.transport_addr_list,
924 transports) {
925
926 if ((t->state == SCTP_INACTIVE) ||
927 (t->state == SCTP_UNCONFIRMED) ||
928 (t->state == SCTP_PF))
929 continue;
930 if (!first || t->last_time_heard > first->last_time_heard) {
931 second = first;
932 first = t;
933 }
934 if (!second || t->last_time_heard > second->last_time_heard)
935 second = t;
936 }
937
938 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
939 *
940 * By default, an endpoint should always transmit to the
941 * primary path, unless the SCTP user explicitly specifies the
942 * destination transport address (and possibly source
943 * transport address) to use.
944 *
945 * [If the primary is active but not most recent, bump the most
946 * recently used transport.]
947 */
948 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
949 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
950 first != asoc->peer.primary_path) {
951 second = first;
952 first = asoc->peer.primary_path;
953 }
954
955 /* If we failed to find a usable transport, just camp on the
956 * primary, even if it is inactive.
957 */
958 if (!first) {
959 first = asoc->peer.primary_path;
960 second = asoc->peer.primary_path;
961 }
962
963 /* Set the active and retran transports. */
964 asoc->peer.active_path = first;
965 asoc->peer.retran_path = second;
966 }
967
968 /* Hold a reference to an association. */
969 void sctp_association_hold(struct sctp_association *asoc)
970 {
971 atomic_inc(&asoc->base.refcnt);
972 }
973
974 /* Release a reference to an association and cleanup
975 * if there are no more references.
976 */
977 void sctp_association_put(struct sctp_association *asoc)
978 {
979 if (atomic_dec_and_test(&asoc->base.refcnt))
980 sctp_association_destroy(asoc);
981 }
982
983 /* Allocate the next TSN, Transmission Sequence Number, for the given
984 * association.
985 */
986 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
987 {
988 /* From Section 1.6 Serial Number Arithmetic:
989 * Transmission Sequence Numbers wrap around when they reach
990 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
991 * after transmitting TSN = 2*32 - 1 is TSN = 0.
992 */
993 __u32 retval = asoc->next_tsn;
994 asoc->next_tsn++;
995 asoc->unack_data++;
996
997 return retval;
998 }
999
1000 /* Compare two addresses to see if they match. Wildcard addresses
1001 * only match themselves.
1002 */
1003 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
1004 const union sctp_addr *ss2)
1005 {
1006 struct sctp_af *af;
1007
1008 af = sctp_get_af_specific(ss1->sa.sa_family);
1009 if (unlikely(!af))
1010 return 0;
1011
1012 return af->cmp_addr(ss1, ss2);
1013 }
1014
1015 /* Return an ecne chunk to get prepended to a packet.
1016 * Note: We are sly and return a shared, prealloced chunk. FIXME:
1017 * No we don't, but we could/should.
1018 */
1019 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
1020 {
1021 struct sctp_chunk *chunk;
1022
1023 /* Send ECNE if needed.
1024 * Not being able to allocate a chunk here is not deadly.
1025 */
1026 if (asoc->need_ecne)
1027 chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
1028 else
1029 chunk = NULL;
1030
1031 return chunk;
1032 }
1033
1034 /*
1035 * Find which transport this TSN was sent on.
1036 */
1037 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1038 __u32 tsn)
1039 {
1040 struct sctp_transport *active;
1041 struct sctp_transport *match;
1042 struct sctp_transport *transport;
1043 struct sctp_chunk *chunk;
1044 __be32 key = htonl(tsn);
1045
1046 match = NULL;
1047
1048 /*
1049 * FIXME: In general, find a more efficient data structure for
1050 * searching.
1051 */
1052
1053 /*
1054 * The general strategy is to search each transport's transmitted
1055 * list. Return which transport this TSN lives on.
1056 *
1057 * Let's be hopeful and check the active_path first.
1058 * Another optimization would be to know if there is only one
1059 * outbound path and not have to look for the TSN at all.
1060 *
1061 */
1062
1063 active = asoc->peer.active_path;
1064
1065 list_for_each_entry(chunk, &active->transmitted,
1066 transmitted_list) {
1067
1068 if (key == chunk->subh.data_hdr->tsn) {
1069 match = active;
1070 goto out;
1071 }
1072 }
1073
1074 /* If not found, go search all the other transports. */
1075 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1076 transports) {
1077
1078 if (transport == active)
1079 break;
1080 list_for_each_entry(chunk, &transport->transmitted,
1081 transmitted_list) {
1082 if (key == chunk->subh.data_hdr->tsn) {
1083 match = transport;
1084 goto out;
1085 }
1086 }
1087 }
1088 out:
1089 return match;
1090 }
1091
1092 /* Is this the association we are looking for? */
1093 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1094 struct net *net,
1095 const union sctp_addr *laddr,
1096 const union sctp_addr *paddr)
1097 {
1098 struct sctp_transport *transport;
1099
1100 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1101 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1102 net_eq(sock_net(asoc->base.sk), net)) {
1103 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1104 if (!transport)
1105 goto out;
1106
1107 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1108 sctp_sk(asoc->base.sk)))
1109 goto out;
1110 }
1111 transport = NULL;
1112
1113 out:
1114 return transport;
1115 }
1116
1117 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
1118 static void sctp_assoc_bh_rcv(struct work_struct *work)
1119 {
1120 struct sctp_association *asoc =
1121 container_of(work, struct sctp_association,
1122 base.inqueue.immediate);
1123 struct net *net = sock_net(asoc->base.sk);
1124 struct sctp_endpoint *ep;
1125 struct sctp_chunk *chunk;
1126 struct sctp_inq *inqueue;
1127 int state;
1128 sctp_subtype_t subtype;
1129 int error = 0;
1130
1131 /* The association should be held so we should be safe. */
1132 ep = asoc->ep;
1133
1134 inqueue = &asoc->base.inqueue;
1135 sctp_association_hold(asoc);
1136 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1137 state = asoc->state;
1138 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1139
1140 /* SCTP-AUTH, Section 6.3:
1141 * The receiver has a list of chunk types which it expects
1142 * to be received only after an AUTH-chunk. This list has
1143 * been sent to the peer during the association setup. It
1144 * MUST silently discard these chunks if they are not placed
1145 * after an AUTH chunk in the packet.
1146 */
1147 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1148 continue;
1149
1150 /* Remember where the last DATA chunk came from so we
1151 * know where to send the SACK.
1152 */
1153 if (sctp_chunk_is_data(chunk))
1154 asoc->peer.last_data_from = chunk->transport;
1155 else
1156 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1157
1158 if (chunk->transport)
1159 chunk->transport->last_time_heard = jiffies;
1160
1161 /* Run through the state machine. */
1162 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1163 state, ep, asoc, chunk, GFP_ATOMIC);
1164
1165 /* Check to see if the association is freed in response to
1166 * the incoming chunk. If so, get out of the while loop.
1167 */
1168 if (asoc->base.dead)
1169 break;
1170
1171 /* If there is an error on chunk, discard this packet. */
1172 if (error && chunk)
1173 chunk->pdiscard = 1;
1174 }
1175 sctp_association_put(asoc);
1176 }
1177
1178 /* This routine moves an association from its old sk to a new sk. */
1179 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1180 {
1181 struct sctp_sock *newsp = sctp_sk(newsk);
1182 struct sock *oldsk = assoc->base.sk;
1183
1184 /* Delete the association from the old endpoint's list of
1185 * associations.
1186 */
1187 list_del_init(&assoc->asocs);
1188
1189 /* Decrement the backlog value for a TCP-style socket. */
1190 if (sctp_style(oldsk, TCP))
1191 oldsk->sk_ack_backlog--;
1192
1193 /* Release references to the old endpoint and the sock. */
1194 sctp_endpoint_put(assoc->ep);
1195 sock_put(assoc->base.sk);
1196
1197 /* Get a reference to the new endpoint. */
1198 assoc->ep = newsp->ep;
1199 sctp_endpoint_hold(assoc->ep);
1200
1201 /* Get a reference to the new sock. */
1202 assoc->base.sk = newsk;
1203 sock_hold(assoc->base.sk);
1204
1205 /* Add the association to the new endpoint's list of associations. */
1206 sctp_endpoint_add_asoc(newsp->ep, assoc);
1207 }
1208
1209 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1210 void sctp_assoc_update(struct sctp_association *asoc,
1211 struct sctp_association *new)
1212 {
1213 struct sctp_transport *trans;
1214 struct list_head *pos, *temp;
1215
1216 /* Copy in new parameters of peer. */
1217 asoc->c = new->c;
1218 asoc->peer.rwnd = new->peer.rwnd;
1219 asoc->peer.sack_needed = new->peer.sack_needed;
1220 asoc->peer.i = new->peer.i;
1221 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1222 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1223
1224 /* Remove any peer addresses not present in the new association. */
1225 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1226 trans = list_entry(pos, struct sctp_transport, transports);
1227 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1228 sctp_assoc_rm_peer(asoc, trans);
1229 continue;
1230 }
1231
1232 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1233 sctp_transport_reset(trans);
1234 }
1235
1236 /* If the case is A (association restart), use
1237 * initial_tsn as next_tsn. If the case is B, use
1238 * current next_tsn in case data sent to peer
1239 * has been discarded and needs retransmission.
1240 */
1241 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1242 asoc->next_tsn = new->next_tsn;
1243 asoc->ctsn_ack_point = new->ctsn_ack_point;
1244 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1245
1246 /* Reinitialize SSN for both local streams
1247 * and peer's streams.
1248 */
1249 sctp_ssnmap_clear(asoc->ssnmap);
1250
1251 /* Flush the ULP reassembly and ordered queue.
1252 * Any data there will now be stale and will
1253 * cause problems.
1254 */
1255 sctp_ulpq_flush(&asoc->ulpq);
1256
1257 /* reset the overall association error count so
1258 * that the restarted association doesn't get torn
1259 * down on the next retransmission timer.
1260 */
1261 asoc->overall_error_count = 0;
1262
1263 } else {
1264 /* Add any peer addresses from the new association. */
1265 list_for_each_entry(trans, &new->peer.transport_addr_list,
1266 transports) {
1267 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1268 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1269 GFP_ATOMIC, trans->state);
1270 }
1271
1272 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1273 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1274 if (!asoc->ssnmap) {
1275 /* Move the ssnmap. */
1276 asoc->ssnmap = new->ssnmap;
1277 new->ssnmap = NULL;
1278 }
1279
1280 if (!asoc->assoc_id) {
1281 /* get a new association id since we don't have one
1282 * yet.
1283 */
1284 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1285 }
1286 }
1287
1288 /* SCTP-AUTH: Save the peer parameters from the new assocaitions
1289 * and also move the association shared keys over
1290 */
1291 kfree(asoc->peer.peer_random);
1292 asoc->peer.peer_random = new->peer.peer_random;
1293 new->peer.peer_random = NULL;
1294
1295 kfree(asoc->peer.peer_chunks);
1296 asoc->peer.peer_chunks = new->peer.peer_chunks;
1297 new->peer.peer_chunks = NULL;
1298
1299 kfree(asoc->peer.peer_hmacs);
1300 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1301 new->peer.peer_hmacs = NULL;
1302
1303 sctp_auth_key_put(asoc->asoc_shared_key);
1304 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1305 }
1306
1307 /* Update the retran path for sending a retransmitted packet.
1308 * Round-robin through the active transports, else round-robin
1309 * through the inactive transports as this is the next best thing
1310 * we can try.
1311 */
1312 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1313 {
1314 struct sctp_transport *t, *next;
1315 struct list_head *head = &asoc->peer.transport_addr_list;
1316 struct list_head *pos;
1317
1318 if (asoc->peer.transport_count == 1)
1319 return;
1320
1321 /* Find the next transport in a round-robin fashion. */
1322 t = asoc->peer.retran_path;
1323 pos = &t->transports;
1324 next = NULL;
1325
1326 while (1) {
1327 /* Skip the head. */
1328 if (pos->next == head)
1329 pos = head->next;
1330 else
1331 pos = pos->next;
1332
1333 t = list_entry(pos, struct sctp_transport, transports);
1334
1335 /* We have exhausted the list, but didn't find any
1336 * other active transports. If so, use the next
1337 * transport.
1338 */
1339 if (t == asoc->peer.retran_path) {
1340 t = next;
1341 break;
1342 }
1343
1344 /* Try to find an active transport. */
1345
1346 if ((t->state == SCTP_ACTIVE) ||
1347 (t->state == SCTP_UNKNOWN)) {
1348 break;
1349 } else {
1350 /* Keep track of the next transport in case
1351 * we don't find any active transport.
1352 */
1353 if (t->state != SCTP_UNCONFIRMED && !next)
1354 next = t;
1355 }
1356 }
1357
1358 if (t)
1359 asoc->peer.retran_path = t;
1360 else
1361 t = asoc->peer.retran_path;
1362
1363 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1364 " %p addr: ",
1365 " port: %d\n",
1366 asoc,
1367 (&t->ipaddr),
1368 ntohs(t->ipaddr.v4.sin_port));
1369 }
1370
1371 /* Choose the transport for sending retransmit packet. */
1372 struct sctp_transport *sctp_assoc_choose_alter_transport(
1373 struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1374 {
1375 /* If this is the first time packet is sent, use the active path,
1376 * else use the retran path. If the last packet was sent over the
1377 * retran path, update the retran path and use it.
1378 */
1379 if (!last_sent_to)
1380 return asoc->peer.active_path;
1381 else {
1382 if (last_sent_to == asoc->peer.retran_path)
1383 sctp_assoc_update_retran_path(asoc);
1384 return asoc->peer.retran_path;
1385 }
1386 }
1387
1388 /* Update the association's pmtu and frag_point by going through all the
1389 * transports. This routine is called when a transport's PMTU has changed.
1390 */
1391 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1392 {
1393 struct sctp_transport *t;
1394 __u32 pmtu = 0;
1395
1396 if (!asoc)
1397 return;
1398
1399 /* Get the lowest pmtu of all the transports. */
1400 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1401 transports) {
1402 if (t->pmtu_pending && t->dst) {
1403 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1404 t->pmtu_pending = 0;
1405 }
1406 if (!pmtu || (t->pathmtu < pmtu))
1407 pmtu = t->pathmtu;
1408 }
1409
1410 if (pmtu) {
1411 asoc->pathmtu = pmtu;
1412 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1413 }
1414
1415 SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1416 __func__, asoc, asoc->pathmtu, asoc->frag_point);
1417 }
1418
1419 /* Should we send a SACK to update our peer? */
1420 static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1421 {
1422 struct net *net = sock_net(asoc->base.sk);
1423 switch (asoc->state) {
1424 case SCTP_STATE_ESTABLISHED:
1425 case SCTP_STATE_SHUTDOWN_PENDING:
1426 case SCTP_STATE_SHUTDOWN_RECEIVED:
1427 case SCTP_STATE_SHUTDOWN_SENT:
1428 if ((asoc->rwnd > asoc->a_rwnd) &&
1429 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1430 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1431 asoc->pathmtu)))
1432 return 1;
1433 break;
1434 default:
1435 break;
1436 }
1437 return 0;
1438 }
1439
1440 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1441 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1442 {
1443 struct sctp_chunk *sack;
1444 struct timer_list *timer;
1445
1446 if (asoc->rwnd_over) {
1447 if (asoc->rwnd_over >= len) {
1448 asoc->rwnd_over -= len;
1449 } else {
1450 asoc->rwnd += (len - asoc->rwnd_over);
1451 asoc->rwnd_over = 0;
1452 }
1453 } else {
1454 asoc->rwnd += len;
1455 }
1456
1457 /* If we had window pressure, start recovering it
1458 * once our rwnd had reached the accumulated pressure
1459 * threshold. The idea is to recover slowly, but up
1460 * to the initial advertised window.
1461 */
1462 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1463 int change = min(asoc->pathmtu, asoc->rwnd_press);
1464 asoc->rwnd += change;
1465 asoc->rwnd_press -= change;
1466 }
1467
1468 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1469 "- %u\n", __func__, asoc, len, asoc->rwnd,
1470 asoc->rwnd_over, asoc->a_rwnd);
1471
1472 /* Send a window update SACK if the rwnd has increased by at least the
1473 * minimum of the association's PMTU and half of the receive buffer.
1474 * The algorithm used is similar to the one described in
1475 * Section 4.2.3.3 of RFC 1122.
1476 */
1477 if (sctp_peer_needs_update(asoc)) {
1478 asoc->a_rwnd = asoc->rwnd;
1479 SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1480 "rwnd: %u a_rwnd: %u\n", __func__,
1481 asoc, asoc->rwnd, asoc->a_rwnd);
1482 sack = sctp_make_sack(asoc);
1483 if (!sack)
1484 return;
1485
1486 asoc->peer.sack_needed = 0;
1487
1488 sctp_outq_tail(&asoc->outqueue, sack);
1489
1490 /* Stop the SACK timer. */
1491 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1492 if (timer_pending(timer) && del_timer(timer))
1493 sctp_association_put(asoc);
1494 }
1495 }
1496
1497 /* Decrease asoc's rwnd by len. */
1498 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1499 {
1500 int rx_count;
1501 int over = 0;
1502
1503 SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1504 SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1505
1506 if (asoc->ep->rcvbuf_policy)
1507 rx_count = atomic_read(&asoc->rmem_alloc);
1508 else
1509 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1510
1511 /* If we've reached or overflowed our receive buffer, announce
1512 * a 0 rwnd if rwnd would still be positive. Store the
1513 * the pottential pressure overflow so that the window can be restored
1514 * back to original value.
1515 */
1516 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1517 over = 1;
1518
1519 if (asoc->rwnd >= len) {
1520 asoc->rwnd -= len;
1521 if (over) {
1522 asoc->rwnd_press += asoc->rwnd;
1523 asoc->rwnd = 0;
1524 }
1525 } else {
1526 asoc->rwnd_over = len - asoc->rwnd;
1527 asoc->rwnd = 0;
1528 }
1529 SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1530 __func__, asoc, len, asoc->rwnd,
1531 asoc->rwnd_over, asoc->rwnd_press);
1532 }
1533
1534 /* Build the bind address list for the association based on info from the
1535 * local endpoint and the remote peer.
1536 */
1537 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1538 sctp_scope_t scope, gfp_t gfp)
1539 {
1540 int flags;
1541
1542 /* Use scoping rules to determine the subset of addresses from
1543 * the endpoint.
1544 */
1545 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1546 if (asoc->peer.ipv4_address)
1547 flags |= SCTP_ADDR4_PEERSUPP;
1548 if (asoc->peer.ipv6_address)
1549 flags |= SCTP_ADDR6_PEERSUPP;
1550
1551 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1552 &asoc->base.bind_addr,
1553 &asoc->ep->base.bind_addr,
1554 scope, gfp, flags);
1555 }
1556
1557 /* Build the association's bind address list from the cookie. */
1558 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1559 struct sctp_cookie *cookie,
1560 gfp_t gfp)
1561 {
1562 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1563 int var_size3 = cookie->raw_addr_list_len;
1564 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1565
1566 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1567 asoc->ep->base.bind_addr.port, gfp);
1568 }
1569
1570 /* Lookup laddr in the bind address list of an association. */
1571 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1572 const union sctp_addr *laddr)
1573 {
1574 int found = 0;
1575
1576 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1577 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1578 sctp_sk(asoc->base.sk)))
1579 found = 1;
1580
1581 return found;
1582 }
1583
1584 /* Set an association id for a given association */
1585 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1586 {
1587 int assoc_id;
1588 int error = 0;
1589
1590 /* If the id is already assigned, keep it. */
1591 if (asoc->assoc_id)
1592 return error;
1593 retry:
1594 if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1595 return -ENOMEM;
1596
1597 spin_lock_bh(&sctp_assocs_id_lock);
1598 error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1599 idr_low, &assoc_id);
1600 if (!error) {
1601 idr_low = assoc_id + 1;
1602 if (idr_low == INT_MAX)
1603 idr_low = 1;
1604 }
1605 spin_unlock_bh(&sctp_assocs_id_lock);
1606 if (error == -EAGAIN)
1607 goto retry;
1608 else if (error)
1609 return error;
1610
1611 asoc->assoc_id = (sctp_assoc_t) assoc_id;
1612 return error;
1613 }
1614
1615 /* Free the ASCONF queue */
1616 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1617 {
1618 struct sctp_chunk *asconf;
1619 struct sctp_chunk *tmp;
1620
1621 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1622 list_del_init(&asconf->list);
1623 sctp_chunk_free(asconf);
1624 }
1625 }
1626
1627 /* Free asconf_ack cache */
1628 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1629 {
1630 struct sctp_chunk *ack;
1631 struct sctp_chunk *tmp;
1632
1633 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1634 transmitted_list) {
1635 list_del_init(&ack->transmitted_list);
1636 sctp_chunk_free(ack);
1637 }
1638 }
1639
1640 /* Clean up the ASCONF_ACK queue */
1641 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1642 {
1643 struct sctp_chunk *ack;
1644 struct sctp_chunk *tmp;
1645
1646 /* We can remove all the entries from the queue up to
1647 * the "Peer-Sequence-Number".
1648 */
1649 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1650 transmitted_list) {
1651 if (ack->subh.addip_hdr->serial ==
1652 htonl(asoc->peer.addip_serial))
1653 break;
1654
1655 list_del_init(&ack->transmitted_list);
1656 sctp_chunk_free(ack);
1657 }
1658 }
1659
1660 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1661 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1662 const struct sctp_association *asoc,
1663 __be32 serial)
1664 {
1665 struct sctp_chunk *ack;
1666
1667 /* Walk through the list of cached ASCONF-ACKs and find the
1668 * ack chunk whose serial number matches that of the request.
1669 */
1670 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1671 if (ack->subh.addip_hdr->serial == serial) {
1672 sctp_chunk_hold(ack);
1673 return ack;
1674 }
1675 }
1676
1677 return NULL;
1678 }
1679
1680 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1681 {
1682 /* Free any cached ASCONF_ACK chunk. */
1683 sctp_assoc_free_asconf_acks(asoc);
1684
1685 /* Free the ASCONF queue. */
1686 sctp_assoc_free_asconf_queue(asoc);
1687
1688 /* Free any cached ASCONF chunk. */
1689 if (asoc->addip_last_asconf)
1690 sctp_chunk_free(asoc->addip_last_asconf);
1691 }