]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sctp/associola.c
ASoC: wm_adsp: add support for DSP region lock
[mirror_ubuntu-bionic-kernel.git] / net / sctp / associola.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP association.
11 *
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, see
26 * <http://www.gnu.org/licenses/>.
27 *
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <linux-sctp@vger.kernel.org>
31 *
32 * Written or modified by:
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Karl Knutson <karl@athena.chicago.il.us>
35 * Jon Grimm <jgrimm@us.ibm.com>
36 * Xingang Guo <xingang.guo@intel.com>
37 * Hui Huang <hui.huang@nokia.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
39 * Daisy Chang <daisyc@us.ibm.com>
40 * Ryan Layer <rmlayer@us.ibm.com>
41 * Kevin Gao <kevin.gao@intel.com>
42 */
43
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46 #include <linux/types.h>
47 #include <linux/fcntl.h>
48 #include <linux/poll.h>
49 #include <linux/init.h>
50
51 #include <linux/slab.h>
52 #include <linux/in.h>
53 #include <net/ipv6.h>
54 #include <net/sctp/sctp.h>
55 #include <net/sctp/sm.h>
56
57 /* Forward declarations for internal functions. */
58 static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59 static void sctp_assoc_bh_rcv(struct work_struct *work);
60 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62
63 /* 1st Level Abstractions. */
64
65 /* Initialize a new association from provided memory. */
66 static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 const struct sctp_endpoint *ep,
68 const struct sock *sk,
69 sctp_scope_t scope,
70 gfp_t gfp)
71 {
72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p;
76 int err;
77
78 /* Retrieve the SCTP per socket area. */
79 sp = sctp_sk((struct sock *)sk);
80
81 /* Discarding const is appropriate here. */
82 asoc->ep = (struct sctp_endpoint *)ep;
83 asoc->base.sk = (struct sock *)sk;
84
85 sctp_endpoint_hold(asoc->ep);
86 sock_hold(asoc->base.sk);
87
88 /* Initialize the common base substructure. */
89 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90
91 /* Initialize the object handling fields. */
92 atomic_set(&asoc->base.refcnt, 1);
93
94 /* Initialize the bind addr area. */
95 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96
97 asoc->state = SCTP_STATE_CLOSED;
98 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
99 asoc->user_frag = sp->user_frag;
100
101 /* Set the association max_retrans and RTO values from the
102 * socket values.
103 */
104 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
105 asoc->pf_retrans = net->sctp.pf_retrans;
106
107 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
109 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110
111 /* Initialize the association's heartbeat interval based on the
112 * sock configured value.
113 */
114 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115
116 /* Initialize path max retrans value. */
117 asoc->pathmaxrxt = sp->pathmaxrxt;
118
119 /* Initialize default path MTU. */
120 asoc->pathmtu = sp->pathmtu;
121
122 /* Set association default SACK delay */
123 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
124 asoc->sackfreq = sp->sackfreq;
125
126 /* Set the association default flags controlling
127 * Heartbeat, SACK delay, and Path MTU Discovery.
128 */
129 asoc->param_flags = sp->param_flags;
130
131 /* Initialize the maximum number of new data packets that can be sent
132 * in a burst.
133 */
134 asoc->max_burst = sp->max_burst;
135
136 /* initialize association timers */
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
138 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
139 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140
141 /* sctpimpguide Section 2.12.2
142 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
143 * recommended value of 5 times 'RTO.Max'.
144 */
145 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
146 = 5 * asoc->rto_max;
147
148 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
149 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
150
151 /* Initializes the timers */
152 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 (unsigned long)asoc);
155
156 /* Pull default initialization values from the sock options.
157 * Note: This assumes that the values have already been
158 * validated in the sock.
159 */
160 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
162 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
163
164 asoc->max_init_timeo =
165 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
166
167 /* Set the local window size for receive.
168 * This is also the rcvbuf space per association.
169 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
170 * 1500 bytes in one SCTP packet.
171 */
172 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
174 else
175 asoc->rwnd = sk->sk_rcvbuf/2;
176
177 asoc->a_rwnd = asoc->rwnd;
178
179 /* Use my own max window until I learn something better. */
180 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
181
182 /* Initialize the receive memory counter */
183 atomic_set(&asoc->rmem_alloc, 0);
184
185 init_waitqueue_head(&asoc->wait);
186
187 asoc->c.my_vtag = sctp_generate_tag(ep);
188 asoc->c.my_port = ep->base.bind_addr.port;
189
190 asoc->c.initial_tsn = sctp_generate_tsn(ep);
191
192 asoc->next_tsn = asoc->c.initial_tsn;
193
194 asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 asoc->highest_sacked = asoc->ctsn_ack_point;
197 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
198
199 /* ADDIP Section 4.1 Asconf Chunk Procedures
200 *
201 * When an endpoint has an ASCONF signaled change to be sent to the
202 * remote endpoint it should do the following:
203 * ...
204 * A2) a serial number should be assigned to the chunk. The serial
205 * number SHOULD be a monotonically increasing number. The serial
206 * numbers SHOULD be initialized at the start of the
207 * association to the same value as the initial TSN.
208 */
209 asoc->addip_serial = asoc->c.initial_tsn;
210 asoc->strreset_outseq = asoc->c.initial_tsn;
211
212 INIT_LIST_HEAD(&asoc->addip_chunk_list);
213 INIT_LIST_HEAD(&asoc->asconf_ack_list);
214
215 /* Make an empty list of remote transport addresses. */
216 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
217
218 /* RFC 2960 5.1 Normal Establishment of an Association
219 *
220 * After the reception of the first data chunk in an
221 * association the endpoint must immediately respond with a
222 * sack to acknowledge the data chunk. Subsequent
223 * acknowledgements should be done as described in Section
224 * 6.2.
225 *
226 * [We implement this by telling a new association that it
227 * already received one packet.]
228 */
229 asoc->peer.sack_needed = 1;
230 asoc->peer.sack_generation = 1;
231
232 /* Assume that the peer will tell us if he recognizes ASCONF
233 * as part of INIT exchange.
234 * The sctp_addip_noauth option is there for backward compatibility
235 * and will revert old behavior.
236 */
237 if (net->sctp.addip_noauth)
238 asoc->peer.asconf_capable = 1;
239
240 /* Create an input queue. */
241 sctp_inq_init(&asoc->base.inqueue);
242 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
243
244 /* Create an output queue. */
245 sctp_outq_init(asoc, &asoc->outqueue);
246
247 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
248 goto fail_init;
249
250 /* Assume that peer would support both address types unless we are
251 * told otherwise.
252 */
253 asoc->peer.ipv4_address = 1;
254 if (asoc->base.sk->sk_family == PF_INET6)
255 asoc->peer.ipv6_address = 1;
256 INIT_LIST_HEAD(&asoc->asocs);
257
258 asoc->default_stream = sp->default_stream;
259 asoc->default_ppid = sp->default_ppid;
260 asoc->default_flags = sp->default_flags;
261 asoc->default_context = sp->default_context;
262 asoc->default_timetolive = sp->default_timetolive;
263 asoc->default_rcv_context = sp->default_rcv_context;
264
265 /* AUTH related initializations */
266 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
267 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
268 if (err)
269 goto fail_init;
270
271 asoc->active_key_id = ep->active_key_id;
272 asoc->prsctp_enable = ep->prsctp_enable;
273 asoc->reconf_enable = ep->reconf_enable;
274 asoc->strreset_enable = ep->strreset_enable;
275
276 /* Save the hmacs and chunks list into this association */
277 if (ep->auth_hmacs_list)
278 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
279 ntohs(ep->auth_hmacs_list->param_hdr.length));
280 if (ep->auth_chunk_list)
281 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
282 ntohs(ep->auth_chunk_list->param_hdr.length));
283
284 /* Get the AUTH random number for this association */
285 p = (sctp_paramhdr_t *)asoc->c.auth_random;
286 p->type = SCTP_PARAM_RANDOM;
287 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
288 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
289
290 return asoc;
291
292 fail_init:
293 sock_put(asoc->base.sk);
294 sctp_endpoint_put(asoc->ep);
295 return NULL;
296 }
297
298 /* Allocate and initialize a new association */
299 struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
300 const struct sock *sk,
301 sctp_scope_t scope,
302 gfp_t gfp)
303 {
304 struct sctp_association *asoc;
305
306 asoc = kzalloc(sizeof(*asoc), gfp);
307 if (!asoc)
308 goto fail;
309
310 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
311 goto fail_init;
312
313 SCTP_DBG_OBJCNT_INC(assoc);
314
315 pr_debug("Created asoc %p\n", asoc);
316
317 return asoc;
318
319 fail_init:
320 kfree(asoc);
321 fail:
322 return NULL;
323 }
324
325 /* Free this association if possible. There may still be users, so
326 * the actual deallocation may be delayed.
327 */
328 void sctp_association_free(struct sctp_association *asoc)
329 {
330 struct sock *sk = asoc->base.sk;
331 struct sctp_transport *transport;
332 struct list_head *pos, *temp;
333 int i;
334
335 /* Only real associations count against the endpoint, so
336 * don't bother for if this is a temporary association.
337 */
338 if (!list_empty(&asoc->asocs)) {
339 list_del(&asoc->asocs);
340
341 /* Decrement the backlog value for a TCP-style listening
342 * socket.
343 */
344 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
345 sk->sk_ack_backlog--;
346 }
347
348 /* Mark as dead, so other users can know this structure is
349 * going away.
350 */
351 asoc->base.dead = true;
352
353 /* Dispose of any data lying around in the outqueue. */
354 sctp_outq_free(&asoc->outqueue);
355
356 /* Dispose of any pending messages for the upper layer. */
357 sctp_ulpq_free(&asoc->ulpq);
358
359 /* Dispose of any pending chunks on the inqueue. */
360 sctp_inq_free(&asoc->base.inqueue);
361
362 sctp_tsnmap_free(&asoc->peer.tsn_map);
363
364 /* Free stream information. */
365 sctp_stream_free(asoc->stream);
366
367 if (asoc->strreset_chunk)
368 sctp_chunk_free(asoc->strreset_chunk);
369
370 /* Clean up the bound address list. */
371 sctp_bind_addr_free(&asoc->base.bind_addr);
372
373 /* Do we need to go through all of our timers and
374 * delete them? To be safe we will try to delete all, but we
375 * should be able to go through and make a guess based
376 * on our state.
377 */
378 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
379 if (del_timer(&asoc->timers[i]))
380 sctp_association_put(asoc);
381 }
382
383 /* Free peer's cached cookie. */
384 kfree(asoc->peer.cookie);
385 kfree(asoc->peer.peer_random);
386 kfree(asoc->peer.peer_chunks);
387 kfree(asoc->peer.peer_hmacs);
388
389 /* Release the transport structures. */
390 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
391 transport = list_entry(pos, struct sctp_transport, transports);
392 list_del_rcu(pos);
393 sctp_unhash_transport(transport);
394 sctp_transport_free(transport);
395 }
396
397 asoc->peer.transport_count = 0;
398
399 sctp_asconf_queue_teardown(asoc);
400
401 /* Free pending address space being deleted */
402 kfree(asoc->asconf_addr_del_pending);
403
404 /* AUTH - Free the endpoint shared keys */
405 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
406
407 /* AUTH - Free the association shared key */
408 sctp_auth_key_put(asoc->asoc_shared_key);
409
410 sctp_association_put(asoc);
411 }
412
413 /* Cleanup and free up an association. */
414 static void sctp_association_destroy(struct sctp_association *asoc)
415 {
416 if (unlikely(!asoc->base.dead)) {
417 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
418 return;
419 }
420
421 sctp_endpoint_put(asoc->ep);
422 sock_put(asoc->base.sk);
423
424 if (asoc->assoc_id != 0) {
425 spin_lock_bh(&sctp_assocs_id_lock);
426 idr_remove(&sctp_assocs_id, asoc->assoc_id);
427 spin_unlock_bh(&sctp_assocs_id_lock);
428 }
429
430 WARN_ON(atomic_read(&asoc->rmem_alloc));
431
432 kfree(asoc);
433 SCTP_DBG_OBJCNT_DEC(assoc);
434 }
435
436 /* Change the primary destination address for the peer. */
437 void sctp_assoc_set_primary(struct sctp_association *asoc,
438 struct sctp_transport *transport)
439 {
440 int changeover = 0;
441
442 /* it's a changeover only if we already have a primary path
443 * that we are changing
444 */
445 if (asoc->peer.primary_path != NULL &&
446 asoc->peer.primary_path != transport)
447 changeover = 1 ;
448
449 asoc->peer.primary_path = transport;
450
451 /* Set a default msg_name for events. */
452 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
453 sizeof(union sctp_addr));
454
455 /* If the primary path is changing, assume that the
456 * user wants to use this new path.
457 */
458 if ((transport->state == SCTP_ACTIVE) ||
459 (transport->state == SCTP_UNKNOWN))
460 asoc->peer.active_path = transport;
461
462 /*
463 * SFR-CACC algorithm:
464 * Upon the receipt of a request to change the primary
465 * destination address, on the data structure for the new
466 * primary destination, the sender MUST do the following:
467 *
468 * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
469 * to this destination address earlier. The sender MUST set
470 * CYCLING_CHANGEOVER to indicate that this switch is a
471 * double switch to the same destination address.
472 *
473 * Really, only bother is we have data queued or outstanding on
474 * the association.
475 */
476 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
477 return;
478
479 if (transport->cacc.changeover_active)
480 transport->cacc.cycling_changeover = changeover;
481
482 /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
483 * a changeover has occurred.
484 */
485 transport->cacc.changeover_active = changeover;
486
487 /* 3) The sender MUST store the next TSN to be sent in
488 * next_tsn_at_change.
489 */
490 transport->cacc.next_tsn_at_change = asoc->next_tsn;
491 }
492
493 /* Remove a transport from an association. */
494 void sctp_assoc_rm_peer(struct sctp_association *asoc,
495 struct sctp_transport *peer)
496 {
497 struct list_head *pos;
498 struct sctp_transport *transport;
499
500 pr_debug("%s: association:%p addr:%pISpc\n",
501 __func__, asoc, &peer->ipaddr.sa);
502
503 /* If we are to remove the current retran_path, update it
504 * to the next peer before removing this peer from the list.
505 */
506 if (asoc->peer.retran_path == peer)
507 sctp_assoc_update_retran_path(asoc);
508
509 /* Remove this peer from the list. */
510 list_del_rcu(&peer->transports);
511 /* Remove this peer from the transport hashtable */
512 sctp_unhash_transport(peer);
513
514 /* Get the first transport of asoc. */
515 pos = asoc->peer.transport_addr_list.next;
516 transport = list_entry(pos, struct sctp_transport, transports);
517
518 /* Update any entries that match the peer to be deleted. */
519 if (asoc->peer.primary_path == peer)
520 sctp_assoc_set_primary(asoc, transport);
521 if (asoc->peer.active_path == peer)
522 asoc->peer.active_path = transport;
523 if (asoc->peer.retran_path == peer)
524 asoc->peer.retran_path = transport;
525 if (asoc->peer.last_data_from == peer)
526 asoc->peer.last_data_from = transport;
527
528 if (asoc->strreset_chunk &&
529 asoc->strreset_chunk->transport == peer) {
530 asoc->strreset_chunk->transport = transport;
531 sctp_transport_reset_reconf_timer(transport);
532 }
533
534 /* If we remove the transport an INIT was last sent to, set it to
535 * NULL. Combined with the update of the retran path above, this
536 * will cause the next INIT to be sent to the next available
537 * transport, maintaining the cycle.
538 */
539 if (asoc->init_last_sent_to == peer)
540 asoc->init_last_sent_to = NULL;
541
542 /* If we remove the transport an SHUTDOWN was last sent to, set it
543 * to NULL. Combined with the update of the retran path above, this
544 * will cause the next SHUTDOWN to be sent to the next available
545 * transport, maintaining the cycle.
546 */
547 if (asoc->shutdown_last_sent_to == peer)
548 asoc->shutdown_last_sent_to = NULL;
549
550 /* If we remove the transport an ASCONF was last sent to, set it to
551 * NULL.
552 */
553 if (asoc->addip_last_asconf &&
554 asoc->addip_last_asconf->transport == peer)
555 asoc->addip_last_asconf->transport = NULL;
556
557 /* If we have something on the transmitted list, we have to
558 * save it off. The best place is the active path.
559 */
560 if (!list_empty(&peer->transmitted)) {
561 struct sctp_transport *active = asoc->peer.active_path;
562 struct sctp_chunk *ch;
563
564 /* Reset the transport of each chunk on this list */
565 list_for_each_entry(ch, &peer->transmitted,
566 transmitted_list) {
567 ch->transport = NULL;
568 ch->rtt_in_progress = 0;
569 }
570
571 list_splice_tail_init(&peer->transmitted,
572 &active->transmitted);
573
574 /* Start a T3 timer here in case it wasn't running so
575 * that these migrated packets have a chance to get
576 * retransmitted.
577 */
578 if (!timer_pending(&active->T3_rtx_timer))
579 if (!mod_timer(&active->T3_rtx_timer,
580 jiffies + active->rto))
581 sctp_transport_hold(active);
582 }
583
584 asoc->peer.transport_count--;
585
586 sctp_transport_free(peer);
587 }
588
589 /* Add a transport address to an association. */
590 struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
591 const union sctp_addr *addr,
592 const gfp_t gfp,
593 const int peer_state)
594 {
595 struct net *net = sock_net(asoc->base.sk);
596 struct sctp_transport *peer;
597 struct sctp_sock *sp;
598 unsigned short port;
599
600 sp = sctp_sk(asoc->base.sk);
601
602 /* AF_INET and AF_INET6 share common port field. */
603 port = ntohs(addr->v4.sin_port);
604
605 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
606 asoc, &addr->sa, peer_state);
607
608 /* Set the port if it has not been set yet. */
609 if (0 == asoc->peer.port)
610 asoc->peer.port = port;
611
612 /* Check to see if this is a duplicate. */
613 peer = sctp_assoc_lookup_paddr(asoc, addr);
614 if (peer) {
615 /* An UNKNOWN state is only set on transports added by
616 * user in sctp_connectx() call. Such transports should be
617 * considered CONFIRMED per RFC 4960, Section 5.4.
618 */
619 if (peer->state == SCTP_UNKNOWN) {
620 peer->state = SCTP_ACTIVE;
621 }
622 return peer;
623 }
624
625 peer = sctp_transport_new(net, addr, gfp);
626 if (!peer)
627 return NULL;
628
629 sctp_transport_set_owner(peer, asoc);
630
631 /* Initialize the peer's heartbeat interval based on the
632 * association configured value.
633 */
634 peer->hbinterval = asoc->hbinterval;
635
636 /* Set the path max_retrans. */
637 peer->pathmaxrxt = asoc->pathmaxrxt;
638
639 /* And the partial failure retrans threshold */
640 peer->pf_retrans = asoc->pf_retrans;
641
642 /* Initialize the peer's SACK delay timeout based on the
643 * association configured value.
644 */
645 peer->sackdelay = asoc->sackdelay;
646 peer->sackfreq = asoc->sackfreq;
647
648 /* Enable/disable heartbeat, SACK delay, and path MTU discovery
649 * based on association setting.
650 */
651 peer->param_flags = asoc->param_flags;
652
653 sctp_transport_route(peer, NULL, sp);
654
655 /* Initialize the pmtu of the transport. */
656 if (peer->param_flags & SPP_PMTUD_DISABLE) {
657 if (asoc->pathmtu)
658 peer->pathmtu = asoc->pathmtu;
659 else
660 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
661 }
662
663 /* If this is the first transport addr on this association,
664 * initialize the association PMTU to the peer's PMTU.
665 * If not and the current association PMTU is higher than the new
666 * peer's PMTU, reset the association PMTU to the new peer's PMTU.
667 */
668 if (asoc->pathmtu)
669 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
670 else
671 asoc->pathmtu = peer->pathmtu;
672
673 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
674 asoc->pathmtu);
675
676 peer->pmtu_pending = 0;
677
678 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
679
680 /* The asoc->peer.port might not be meaningful yet, but
681 * initialize the packet structure anyway.
682 */
683 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
684 asoc->peer.port);
685
686 /* 7.2.1 Slow-Start
687 *
688 * o The initial cwnd before DATA transmission or after a sufficiently
689 * long idle period MUST be set to
690 * min(4*MTU, max(2*MTU, 4380 bytes))
691 *
692 * o The initial value of ssthresh MAY be arbitrarily high
693 * (for example, implementations MAY use the size of the
694 * receiver advertised window).
695 */
696 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
697
698 /* At this point, we may not have the receiver's advertised window,
699 * so initialize ssthresh to the default value and it will be set
700 * later when we process the INIT.
701 */
702 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
703
704 peer->partial_bytes_acked = 0;
705 peer->flight_size = 0;
706 peer->burst_limited = 0;
707
708 /* Set the transport's RTO.initial value */
709 peer->rto = asoc->rto_initial;
710 sctp_max_rto(asoc, peer);
711
712 /* Set the peer's active state. */
713 peer->state = peer_state;
714
715 /* Add this peer into the transport hashtable */
716 if (sctp_hash_transport(peer)) {
717 sctp_transport_free(peer);
718 return NULL;
719 }
720
721 /* Attach the remote transport to our asoc. */
722 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
723 asoc->peer.transport_count++;
724
725 /* If we do not yet have a primary path, set one. */
726 if (!asoc->peer.primary_path) {
727 sctp_assoc_set_primary(asoc, peer);
728 asoc->peer.retran_path = peer;
729 }
730
731 if (asoc->peer.active_path == asoc->peer.retran_path &&
732 peer->state != SCTP_UNCONFIRMED) {
733 asoc->peer.retran_path = peer;
734 }
735
736 return peer;
737 }
738
739 /* Delete a transport address from an association. */
740 void sctp_assoc_del_peer(struct sctp_association *asoc,
741 const union sctp_addr *addr)
742 {
743 struct list_head *pos;
744 struct list_head *temp;
745 struct sctp_transport *transport;
746
747 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
748 transport = list_entry(pos, struct sctp_transport, transports);
749 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
750 /* Do book keeping for removing the peer and free it. */
751 sctp_assoc_rm_peer(asoc, transport);
752 break;
753 }
754 }
755 }
756
757 /* Lookup a transport by address. */
758 struct sctp_transport *sctp_assoc_lookup_paddr(
759 const struct sctp_association *asoc,
760 const union sctp_addr *address)
761 {
762 struct sctp_transport *t;
763
764 /* Cycle through all transports searching for a peer address. */
765
766 list_for_each_entry(t, &asoc->peer.transport_addr_list,
767 transports) {
768 if (sctp_cmp_addr_exact(address, &t->ipaddr))
769 return t;
770 }
771
772 return NULL;
773 }
774
775 /* Remove all transports except a give one */
776 void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
777 struct sctp_transport *primary)
778 {
779 struct sctp_transport *temp;
780 struct sctp_transport *t;
781
782 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
783 transports) {
784 /* if the current transport is not the primary one, delete it */
785 if (t != primary)
786 sctp_assoc_rm_peer(asoc, t);
787 }
788 }
789
790 /* Engage in transport control operations.
791 * Mark the transport up or down and send a notification to the user.
792 * Select and update the new active and retran paths.
793 */
794 void sctp_assoc_control_transport(struct sctp_association *asoc,
795 struct sctp_transport *transport,
796 sctp_transport_cmd_t command,
797 sctp_sn_error_t error)
798 {
799 struct sctp_ulpevent *event;
800 struct sockaddr_storage addr;
801 int spc_state = 0;
802 bool ulp_notify = true;
803
804 /* Record the transition on the transport. */
805 switch (command) {
806 case SCTP_TRANSPORT_UP:
807 /* If we are moving from UNCONFIRMED state due
808 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
809 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
810 */
811 if (SCTP_UNCONFIRMED == transport->state &&
812 SCTP_HEARTBEAT_SUCCESS == error)
813 spc_state = SCTP_ADDR_CONFIRMED;
814 else
815 spc_state = SCTP_ADDR_AVAILABLE;
816 /* Don't inform ULP about transition from PF to
817 * active state and set cwnd to 1 MTU, see SCTP
818 * Quick failover draft section 5.1, point 5
819 */
820 if (transport->state == SCTP_PF) {
821 ulp_notify = false;
822 transport->cwnd = asoc->pathmtu;
823 }
824 transport->state = SCTP_ACTIVE;
825 break;
826
827 case SCTP_TRANSPORT_DOWN:
828 /* If the transport was never confirmed, do not transition it
829 * to inactive state. Also, release the cached route since
830 * there may be a better route next time.
831 */
832 if (transport->state != SCTP_UNCONFIRMED)
833 transport->state = SCTP_INACTIVE;
834 else {
835 sctp_transport_dst_release(transport);
836 ulp_notify = false;
837 }
838
839 spc_state = SCTP_ADDR_UNREACHABLE;
840 break;
841
842 case SCTP_TRANSPORT_PF:
843 transport->state = SCTP_PF;
844 ulp_notify = false;
845 break;
846
847 default:
848 return;
849 }
850
851 /* Generate and send a SCTP_PEER_ADDR_CHANGE notification
852 * to the user.
853 */
854 if (ulp_notify) {
855 memset(&addr, 0, sizeof(struct sockaddr_storage));
856 memcpy(&addr, &transport->ipaddr,
857 transport->af_specific->sockaddr_len);
858
859 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
860 0, spc_state, error, GFP_ATOMIC);
861 if (event)
862 sctp_ulpq_tail_event(&asoc->ulpq, event);
863 }
864
865 /* Select new active and retran paths. */
866 sctp_select_active_and_retran_path(asoc);
867 }
868
869 /* Hold a reference to an association. */
870 void sctp_association_hold(struct sctp_association *asoc)
871 {
872 atomic_inc(&asoc->base.refcnt);
873 }
874
875 /* Release a reference to an association and cleanup
876 * if there are no more references.
877 */
878 void sctp_association_put(struct sctp_association *asoc)
879 {
880 if (atomic_dec_and_test(&asoc->base.refcnt))
881 sctp_association_destroy(asoc);
882 }
883
884 /* Allocate the next TSN, Transmission Sequence Number, for the given
885 * association.
886 */
887 __u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
888 {
889 /* From Section 1.6 Serial Number Arithmetic:
890 * Transmission Sequence Numbers wrap around when they reach
891 * 2**32 - 1. That is, the next TSN a DATA chunk MUST use
892 * after transmitting TSN = 2*32 - 1 is TSN = 0.
893 */
894 __u32 retval = asoc->next_tsn;
895 asoc->next_tsn++;
896 asoc->unack_data++;
897
898 return retval;
899 }
900
901 /* Compare two addresses to see if they match. Wildcard addresses
902 * only match themselves.
903 */
904 int sctp_cmp_addr_exact(const union sctp_addr *ss1,
905 const union sctp_addr *ss2)
906 {
907 struct sctp_af *af;
908
909 af = sctp_get_af_specific(ss1->sa.sa_family);
910 if (unlikely(!af))
911 return 0;
912
913 return af->cmp_addr(ss1, ss2);
914 }
915
916 /* Return an ecne chunk to get prepended to a packet.
917 * Note: We are sly and return a shared, prealloced chunk. FIXME:
918 * No we don't, but we could/should.
919 */
920 struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
921 {
922 if (!asoc->need_ecne)
923 return NULL;
924
925 /* Send ECNE if needed.
926 * Not being able to allocate a chunk here is not deadly.
927 */
928 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
929 }
930
931 /*
932 * Find which transport this TSN was sent on.
933 */
934 struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
935 __u32 tsn)
936 {
937 struct sctp_transport *active;
938 struct sctp_transport *match;
939 struct sctp_transport *transport;
940 struct sctp_chunk *chunk;
941 __be32 key = htonl(tsn);
942
943 match = NULL;
944
945 /*
946 * FIXME: In general, find a more efficient data structure for
947 * searching.
948 */
949
950 /*
951 * The general strategy is to search each transport's transmitted
952 * list. Return which transport this TSN lives on.
953 *
954 * Let's be hopeful and check the active_path first.
955 * Another optimization would be to know if there is only one
956 * outbound path and not have to look for the TSN at all.
957 *
958 */
959
960 active = asoc->peer.active_path;
961
962 list_for_each_entry(chunk, &active->transmitted,
963 transmitted_list) {
964
965 if (key == chunk->subh.data_hdr->tsn) {
966 match = active;
967 goto out;
968 }
969 }
970
971 /* If not found, go search all the other transports. */
972 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
973 transports) {
974
975 if (transport == active)
976 continue;
977 list_for_each_entry(chunk, &transport->transmitted,
978 transmitted_list) {
979 if (key == chunk->subh.data_hdr->tsn) {
980 match = transport;
981 goto out;
982 }
983 }
984 }
985 out:
986 return match;
987 }
988
989 /* Is this the association we are looking for? */
990 struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
991 struct net *net,
992 const union sctp_addr *laddr,
993 const union sctp_addr *paddr)
994 {
995 struct sctp_transport *transport;
996
997 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
998 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
999 net_eq(sock_net(asoc->base.sk), net)) {
1000 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1001 if (!transport)
1002 goto out;
1003
1004 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1005 sctp_sk(asoc->base.sk)))
1006 goto out;
1007 }
1008 transport = NULL;
1009
1010 out:
1011 return transport;
1012 }
1013
1014 /* Do delayed input processing. This is scheduled by sctp_rcv(). */
1015 static void sctp_assoc_bh_rcv(struct work_struct *work)
1016 {
1017 struct sctp_association *asoc =
1018 container_of(work, struct sctp_association,
1019 base.inqueue.immediate);
1020 struct net *net = sock_net(asoc->base.sk);
1021 struct sctp_endpoint *ep;
1022 struct sctp_chunk *chunk;
1023 struct sctp_inq *inqueue;
1024 int state;
1025 sctp_subtype_t subtype;
1026 int error = 0;
1027
1028 /* The association should be held so we should be safe. */
1029 ep = asoc->ep;
1030
1031 inqueue = &asoc->base.inqueue;
1032 sctp_association_hold(asoc);
1033 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1034 state = asoc->state;
1035 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1036
1037 /* SCTP-AUTH, Section 6.3:
1038 * The receiver has a list of chunk types which it expects
1039 * to be received only after an AUTH-chunk. This list has
1040 * been sent to the peer during the association setup. It
1041 * MUST silently discard these chunks if they are not placed
1042 * after an AUTH chunk in the packet.
1043 */
1044 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1045 continue;
1046
1047 /* Remember where the last DATA chunk came from so we
1048 * know where to send the SACK.
1049 */
1050 if (sctp_chunk_is_data(chunk))
1051 asoc->peer.last_data_from = chunk->transport;
1052 else {
1053 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1054 asoc->stats.ictrlchunks++;
1055 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1056 asoc->stats.isacks++;
1057 }
1058
1059 if (chunk->transport)
1060 chunk->transport->last_time_heard = ktime_get();
1061
1062 /* Run through the state machine. */
1063 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1064 state, ep, asoc, chunk, GFP_ATOMIC);
1065
1066 /* Check to see if the association is freed in response to
1067 * the incoming chunk. If so, get out of the while loop.
1068 */
1069 if (asoc->base.dead)
1070 break;
1071
1072 /* If there is an error on chunk, discard this packet. */
1073 if (error && chunk)
1074 chunk->pdiscard = 1;
1075 }
1076 sctp_association_put(asoc);
1077 }
1078
1079 /* This routine moves an association from its old sk to a new sk. */
1080 void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1081 {
1082 struct sctp_sock *newsp = sctp_sk(newsk);
1083 struct sock *oldsk = assoc->base.sk;
1084
1085 /* Delete the association from the old endpoint's list of
1086 * associations.
1087 */
1088 list_del_init(&assoc->asocs);
1089
1090 /* Decrement the backlog value for a TCP-style socket. */
1091 if (sctp_style(oldsk, TCP))
1092 oldsk->sk_ack_backlog--;
1093
1094 /* Release references to the old endpoint and the sock. */
1095 sctp_endpoint_put(assoc->ep);
1096 sock_put(assoc->base.sk);
1097
1098 /* Get a reference to the new endpoint. */
1099 assoc->ep = newsp->ep;
1100 sctp_endpoint_hold(assoc->ep);
1101
1102 /* Get a reference to the new sock. */
1103 assoc->base.sk = newsk;
1104 sock_hold(assoc->base.sk);
1105
1106 /* Add the association to the new endpoint's list of associations. */
1107 sctp_endpoint_add_asoc(newsp->ep, assoc);
1108 }
1109
1110 /* Update an association (possibly from unexpected COOKIE-ECHO processing). */
1111 void sctp_assoc_update(struct sctp_association *asoc,
1112 struct sctp_association *new)
1113 {
1114 struct sctp_transport *trans;
1115 struct list_head *pos, *temp;
1116
1117 /* Copy in new parameters of peer. */
1118 asoc->c = new->c;
1119 asoc->peer.rwnd = new->peer.rwnd;
1120 asoc->peer.sack_needed = new->peer.sack_needed;
1121 asoc->peer.auth_capable = new->peer.auth_capable;
1122 asoc->peer.i = new->peer.i;
1123 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1124 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1125
1126 /* Remove any peer addresses not present in the new association. */
1127 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1128 trans = list_entry(pos, struct sctp_transport, transports);
1129 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1130 sctp_assoc_rm_peer(asoc, trans);
1131 continue;
1132 }
1133
1134 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1135 sctp_transport_reset(trans);
1136 }
1137
1138 /* If the case is A (association restart), use
1139 * initial_tsn as next_tsn. If the case is B, use
1140 * current next_tsn in case data sent to peer
1141 * has been discarded and needs retransmission.
1142 */
1143 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1144 asoc->next_tsn = new->next_tsn;
1145 asoc->ctsn_ack_point = new->ctsn_ack_point;
1146 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1147
1148 /* Reinitialize SSN for both local streams
1149 * and peer's streams.
1150 */
1151 sctp_stream_clear(asoc->stream);
1152
1153 /* Flush the ULP reassembly and ordered queue.
1154 * Any data there will now be stale and will
1155 * cause problems.
1156 */
1157 sctp_ulpq_flush(&asoc->ulpq);
1158
1159 /* reset the overall association error count so
1160 * that the restarted association doesn't get torn
1161 * down on the next retransmission timer.
1162 */
1163 asoc->overall_error_count = 0;
1164
1165 } else {
1166 /* Add any peer addresses from the new association. */
1167 list_for_each_entry(trans, &new->peer.transport_addr_list,
1168 transports) {
1169 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1170 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1171 GFP_ATOMIC, trans->state);
1172 }
1173
1174 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1175 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1176 if (!asoc->stream) {
1177 asoc->stream = new->stream;
1178 new->stream = NULL;
1179 }
1180
1181 if (!asoc->assoc_id) {
1182 /* get a new association id since we don't have one
1183 * yet.
1184 */
1185 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1186 }
1187 }
1188
1189 /* SCTP-AUTH: Save the peer parameters from the new associations
1190 * and also move the association shared keys over
1191 */
1192 kfree(asoc->peer.peer_random);
1193 asoc->peer.peer_random = new->peer.peer_random;
1194 new->peer.peer_random = NULL;
1195
1196 kfree(asoc->peer.peer_chunks);
1197 asoc->peer.peer_chunks = new->peer.peer_chunks;
1198 new->peer.peer_chunks = NULL;
1199
1200 kfree(asoc->peer.peer_hmacs);
1201 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1202 new->peer.peer_hmacs = NULL;
1203
1204 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1205 }
1206
1207 /* Update the retran path for sending a retransmitted packet.
1208 * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
1209 *
1210 * When there is outbound data to send and the primary path
1211 * becomes inactive (e.g., due to failures), or where the
1212 * SCTP user explicitly requests to send data to an
1213 * inactive destination transport address, before reporting
1214 * an error to its ULP, the SCTP endpoint should try to send
1215 * the data to an alternate active destination transport
1216 * address if one exists.
1217 *
1218 * When retransmitting data that timed out, if the endpoint
1219 * is multihomed, it should consider each source-destination
1220 * address pair in its retransmission selection policy.
1221 * When retransmitting timed-out data, the endpoint should
1222 * attempt to pick the most divergent source-destination
1223 * pair from the original source-destination pair to which
1224 * the packet was transmitted.
1225 *
1226 * Note: Rules for picking the most divergent source-destination
1227 * pair are an implementation decision and are not specified
1228 * within this document.
1229 *
1230 * Our basic strategy is to round-robin transports in priorities
1231 * according to sctp_trans_score() e.g., if no such
1232 * transport with state SCTP_ACTIVE exists, round-robin through
1233 * SCTP_UNKNOWN, etc. You get the picture.
1234 */
1235 static u8 sctp_trans_score(const struct sctp_transport *trans)
1236 {
1237 switch (trans->state) {
1238 case SCTP_ACTIVE:
1239 return 3; /* best case */
1240 case SCTP_UNKNOWN:
1241 return 2;
1242 case SCTP_PF:
1243 return 1;
1244 default: /* case SCTP_INACTIVE */
1245 return 0; /* worst case */
1246 }
1247 }
1248
1249 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1250 struct sctp_transport *trans2)
1251 {
1252 if (trans1->error_count > trans2->error_count) {
1253 return trans2;
1254 } else if (trans1->error_count == trans2->error_count &&
1255 ktime_after(trans2->last_time_heard,
1256 trans1->last_time_heard)) {
1257 return trans2;
1258 } else {
1259 return trans1;
1260 }
1261 }
1262
1263 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1264 struct sctp_transport *best)
1265 {
1266 u8 score_curr, score_best;
1267
1268 if (best == NULL || curr == best)
1269 return curr;
1270
1271 score_curr = sctp_trans_score(curr);
1272 score_best = sctp_trans_score(best);
1273
1274 /* First, try a score-based selection if both transport states
1275 * differ. If we're in a tie, lets try to make a more clever
1276 * decision here based on error counts and last time heard.
1277 */
1278 if (score_curr > score_best)
1279 return curr;
1280 else if (score_curr == score_best)
1281 return sctp_trans_elect_tie(best, curr);
1282 else
1283 return best;
1284 }
1285
1286 void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1287 {
1288 struct sctp_transport *trans = asoc->peer.retran_path;
1289 struct sctp_transport *trans_next = NULL;
1290
1291 /* We're done as we only have the one and only path. */
1292 if (asoc->peer.transport_count == 1)
1293 return;
1294 /* If active_path and retran_path are the same and active,
1295 * then this is the only active path. Use it.
1296 */
1297 if (asoc->peer.active_path == asoc->peer.retran_path &&
1298 asoc->peer.active_path->state == SCTP_ACTIVE)
1299 return;
1300
1301 /* Iterate from retran_path's successor back to retran_path. */
1302 for (trans = list_next_entry(trans, transports); 1;
1303 trans = list_next_entry(trans, transports)) {
1304 /* Manually skip the head element. */
1305 if (&trans->transports == &asoc->peer.transport_addr_list)
1306 continue;
1307 if (trans->state == SCTP_UNCONFIRMED)
1308 continue;
1309 trans_next = sctp_trans_elect_best(trans, trans_next);
1310 /* Active is good enough for immediate return. */
1311 if (trans_next->state == SCTP_ACTIVE)
1312 break;
1313 /* We've reached the end, time to update path. */
1314 if (trans == asoc->peer.retran_path)
1315 break;
1316 }
1317
1318 asoc->peer.retran_path = trans_next;
1319
1320 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1321 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1322 }
1323
1324 static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1325 {
1326 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1327 struct sctp_transport *trans_pf = NULL;
1328
1329 /* Look for the two most recently used active transports. */
1330 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1331 transports) {
1332 /* Skip uninteresting transports. */
1333 if (trans->state == SCTP_INACTIVE ||
1334 trans->state == SCTP_UNCONFIRMED)
1335 continue;
1336 /* Keep track of the best PF transport from our
1337 * list in case we don't find an active one.
1338 */
1339 if (trans->state == SCTP_PF) {
1340 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1341 continue;
1342 }
1343 /* For active transports, pick the most recent ones. */
1344 if (trans_pri == NULL ||
1345 ktime_after(trans->last_time_heard,
1346 trans_pri->last_time_heard)) {
1347 trans_sec = trans_pri;
1348 trans_pri = trans;
1349 } else if (trans_sec == NULL ||
1350 ktime_after(trans->last_time_heard,
1351 trans_sec->last_time_heard)) {
1352 trans_sec = trans;
1353 }
1354 }
1355
1356 /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
1357 *
1358 * By default, an endpoint should always transmit to the primary
1359 * path, unless the SCTP user explicitly specifies the
1360 * destination transport address (and possibly source transport
1361 * address) to use. [If the primary is active but not most recent,
1362 * bump the most recently used transport.]
1363 */
1364 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1365 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1366 asoc->peer.primary_path != trans_pri) {
1367 trans_sec = trans_pri;
1368 trans_pri = asoc->peer.primary_path;
1369 }
1370
1371 /* We did not find anything useful for a possible retransmission
1372 * path; either primary path that we found is the the same as
1373 * the current one, or we didn't generally find an active one.
1374 */
1375 if (trans_sec == NULL)
1376 trans_sec = trans_pri;
1377
1378 /* If we failed to find a usable transport, just camp on the
1379 * active or pick a PF iff it's the better choice.
1380 */
1381 if (trans_pri == NULL) {
1382 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1383 trans_sec = trans_pri;
1384 }
1385
1386 /* Set the active and retran transports. */
1387 asoc->peer.active_path = trans_pri;
1388 asoc->peer.retran_path = trans_sec;
1389 }
1390
1391 struct sctp_transport *
1392 sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1393 struct sctp_transport *last_sent_to)
1394 {
1395 /* If this is the first time packet is sent, use the active path,
1396 * else use the retran path. If the last packet was sent over the
1397 * retran path, update the retran path and use it.
1398 */
1399 if (last_sent_to == NULL) {
1400 return asoc->peer.active_path;
1401 } else {
1402 if (last_sent_to == asoc->peer.retran_path)
1403 sctp_assoc_update_retran_path(asoc);
1404
1405 return asoc->peer.retran_path;
1406 }
1407 }
1408
1409 /* Update the association's pmtu and frag_point by going through all the
1410 * transports. This routine is called when a transport's PMTU has changed.
1411 */
1412 void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1413 {
1414 struct sctp_transport *t;
1415 __u32 pmtu = 0;
1416
1417 if (!asoc)
1418 return;
1419
1420 /* Get the lowest pmtu of all the transports. */
1421 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1422 transports) {
1423 if (t->pmtu_pending && t->dst) {
1424 sctp_transport_update_pmtu(sk, t,
1425 SCTP_TRUNC4(dst_mtu(t->dst)));
1426 t->pmtu_pending = 0;
1427 }
1428 if (!pmtu || (t->pathmtu < pmtu))
1429 pmtu = t->pathmtu;
1430 }
1431
1432 if (pmtu) {
1433 asoc->pathmtu = pmtu;
1434 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1435 }
1436
1437 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1438 asoc->pathmtu, asoc->frag_point);
1439 }
1440
1441 /* Should we send a SACK to update our peer? */
1442 static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1443 {
1444 struct net *net = sock_net(asoc->base.sk);
1445 switch (asoc->state) {
1446 case SCTP_STATE_ESTABLISHED:
1447 case SCTP_STATE_SHUTDOWN_PENDING:
1448 case SCTP_STATE_SHUTDOWN_RECEIVED:
1449 case SCTP_STATE_SHUTDOWN_SENT:
1450 if ((asoc->rwnd > asoc->a_rwnd) &&
1451 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1452 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1453 asoc->pathmtu)))
1454 return true;
1455 break;
1456 default:
1457 break;
1458 }
1459 return false;
1460 }
1461
1462 /* Increase asoc's rwnd by len and send any window update SACK if needed. */
1463 void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1464 {
1465 struct sctp_chunk *sack;
1466 struct timer_list *timer;
1467
1468 if (asoc->rwnd_over) {
1469 if (asoc->rwnd_over >= len) {
1470 asoc->rwnd_over -= len;
1471 } else {
1472 asoc->rwnd += (len - asoc->rwnd_over);
1473 asoc->rwnd_over = 0;
1474 }
1475 } else {
1476 asoc->rwnd += len;
1477 }
1478
1479 /* If we had window pressure, start recovering it
1480 * once our rwnd had reached the accumulated pressure
1481 * threshold. The idea is to recover slowly, but up
1482 * to the initial advertised window.
1483 */
1484 if (asoc->rwnd_press) {
1485 int change = min(asoc->pathmtu, asoc->rwnd_press);
1486 asoc->rwnd += change;
1487 asoc->rwnd_press -= change;
1488 }
1489
1490 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1491 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1492 asoc->a_rwnd);
1493
1494 /* Send a window update SACK if the rwnd has increased by at least the
1495 * minimum of the association's PMTU and half of the receive buffer.
1496 * The algorithm used is similar to the one described in
1497 * Section 4.2.3.3 of RFC 1122.
1498 */
1499 if (sctp_peer_needs_update(asoc)) {
1500 asoc->a_rwnd = asoc->rwnd;
1501
1502 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1503 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1504 asoc->a_rwnd);
1505
1506 sack = sctp_make_sack(asoc);
1507 if (!sack)
1508 return;
1509
1510 asoc->peer.sack_needed = 0;
1511
1512 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1513
1514 /* Stop the SACK timer. */
1515 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1516 if (del_timer(timer))
1517 sctp_association_put(asoc);
1518 }
1519 }
1520
1521 /* Decrease asoc's rwnd by len. */
1522 void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1523 {
1524 int rx_count;
1525 int over = 0;
1526
1527 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1528 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1529 "asoc->rwnd_over:%u!\n", __func__, asoc,
1530 asoc->rwnd, asoc->rwnd_over);
1531
1532 if (asoc->ep->rcvbuf_policy)
1533 rx_count = atomic_read(&asoc->rmem_alloc);
1534 else
1535 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1536
1537 /* If we've reached or overflowed our receive buffer, announce
1538 * a 0 rwnd if rwnd would still be positive. Store the
1539 * the potential pressure overflow so that the window can be restored
1540 * back to original value.
1541 */
1542 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1543 over = 1;
1544
1545 if (asoc->rwnd >= len) {
1546 asoc->rwnd -= len;
1547 if (over) {
1548 asoc->rwnd_press += asoc->rwnd;
1549 asoc->rwnd = 0;
1550 }
1551 } else {
1552 asoc->rwnd_over += len - asoc->rwnd;
1553 asoc->rwnd = 0;
1554 }
1555
1556 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1557 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1558 asoc->rwnd_press);
1559 }
1560
1561 /* Build the bind address list for the association based on info from the
1562 * local endpoint and the remote peer.
1563 */
1564 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1565 sctp_scope_t scope, gfp_t gfp)
1566 {
1567 int flags;
1568
1569 /* Use scoping rules to determine the subset of addresses from
1570 * the endpoint.
1571 */
1572 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1573 if (asoc->peer.ipv4_address)
1574 flags |= SCTP_ADDR4_PEERSUPP;
1575 if (asoc->peer.ipv6_address)
1576 flags |= SCTP_ADDR6_PEERSUPP;
1577
1578 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1579 &asoc->base.bind_addr,
1580 &asoc->ep->base.bind_addr,
1581 scope, gfp, flags);
1582 }
1583
1584 /* Build the association's bind address list from the cookie. */
1585 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1586 struct sctp_cookie *cookie,
1587 gfp_t gfp)
1588 {
1589 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1590 int var_size3 = cookie->raw_addr_list_len;
1591 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1592
1593 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1594 asoc->ep->base.bind_addr.port, gfp);
1595 }
1596
1597 /* Lookup laddr in the bind address list of an association. */
1598 int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1599 const union sctp_addr *laddr)
1600 {
1601 int found = 0;
1602
1603 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1604 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1605 sctp_sk(asoc->base.sk)))
1606 found = 1;
1607
1608 return found;
1609 }
1610
1611 /* Set an association id for a given association */
1612 int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1613 {
1614 bool preload = gfpflags_allow_blocking(gfp);
1615 int ret;
1616
1617 /* If the id is already assigned, keep it. */
1618 if (asoc->assoc_id)
1619 return 0;
1620
1621 if (preload)
1622 idr_preload(gfp);
1623 spin_lock_bh(&sctp_assocs_id_lock);
1624 /* 0 is not a valid assoc_id, must be >= 1 */
1625 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1626 spin_unlock_bh(&sctp_assocs_id_lock);
1627 if (preload)
1628 idr_preload_end();
1629 if (ret < 0)
1630 return ret;
1631
1632 asoc->assoc_id = (sctp_assoc_t)ret;
1633 return 0;
1634 }
1635
1636 /* Free the ASCONF queue */
1637 static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1638 {
1639 struct sctp_chunk *asconf;
1640 struct sctp_chunk *tmp;
1641
1642 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1643 list_del_init(&asconf->list);
1644 sctp_chunk_free(asconf);
1645 }
1646 }
1647
1648 /* Free asconf_ack cache */
1649 static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1650 {
1651 struct sctp_chunk *ack;
1652 struct sctp_chunk *tmp;
1653
1654 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1655 transmitted_list) {
1656 list_del_init(&ack->transmitted_list);
1657 sctp_chunk_free(ack);
1658 }
1659 }
1660
1661 /* Clean up the ASCONF_ACK queue */
1662 void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1663 {
1664 struct sctp_chunk *ack;
1665 struct sctp_chunk *tmp;
1666
1667 /* We can remove all the entries from the queue up to
1668 * the "Peer-Sequence-Number".
1669 */
1670 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1671 transmitted_list) {
1672 if (ack->subh.addip_hdr->serial ==
1673 htonl(asoc->peer.addip_serial))
1674 break;
1675
1676 list_del_init(&ack->transmitted_list);
1677 sctp_chunk_free(ack);
1678 }
1679 }
1680
1681 /* Find the ASCONF_ACK whose serial number matches ASCONF */
1682 struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1683 const struct sctp_association *asoc,
1684 __be32 serial)
1685 {
1686 struct sctp_chunk *ack;
1687
1688 /* Walk through the list of cached ASCONF-ACKs and find the
1689 * ack chunk whose serial number matches that of the request.
1690 */
1691 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1692 if (sctp_chunk_pending(ack))
1693 continue;
1694 if (ack->subh.addip_hdr->serial == serial) {
1695 sctp_chunk_hold(ack);
1696 return ack;
1697 }
1698 }
1699
1700 return NULL;
1701 }
1702
1703 void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1704 {
1705 /* Free any cached ASCONF_ACK chunk. */
1706 sctp_assoc_free_asconf_acks(asoc);
1707
1708 /* Free the ASCONF queue. */
1709 sctp_assoc_free_asconf_queue(asoc);
1710
1711 /* Free any cached ASCONF chunk. */
1712 if (asoc->addip_last_asconf)
1713 sctp_chunk_free(asoc->addip_last_asconf);
1714 }