1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <linux-sctp@vger.kernel.org>
33 * Written or modified by:
34 * La Monte H.P. Yarroll <piggy@acm.org>
35 * Karl Knutson <karl@athena.chicago.il.us>
36 * Perry Melange <pmelange@null.cc.uic.edu>
37 * Xingang Guo <xingang.guo@intel.com>
38 * Hui Huang <hui.huang@nokia.com>
39 * Sridhar Samudrala <sri@us.ibm.com>
40 * Jon Grimm <jgrimm@us.ibm.com>
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/types.h>
46 #include <linux/list.h> /* For struct list_head */
47 #include <linux/socket.h>
49 #include <linux/slab.h>
50 #include <net/sock.h> /* For skb_set_owner_w */
52 #include <net/sctp/sctp.h>
53 #include <net/sctp/sm.h>
55 /* Declare internal functions here. */
56 static int sctp_acked(struct sctp_sackhdr
*sack
, __u32 tsn
);
57 static void sctp_check_transmitted(struct sctp_outq
*q
,
58 struct list_head
*transmitted_queue
,
59 struct sctp_transport
*transport
,
60 union sctp_addr
*saddr
,
61 struct sctp_sackhdr
*sack
,
62 __u32
*highest_new_tsn
);
64 static void sctp_mark_missing(struct sctp_outq
*q
,
65 struct list_head
*transmitted_queue
,
66 struct sctp_transport
*transport
,
67 __u32 highest_new_tsn
,
68 int count_of_newacks
);
70 static void sctp_generate_fwdtsn(struct sctp_outq
*q
, __u32 sack_ctsn
);
72 static int sctp_outq_flush(struct sctp_outq
*q
, int rtx_timeout
);
74 /* Add data to the front of the queue. */
75 static inline void sctp_outq_head_data(struct sctp_outq
*q
,
76 struct sctp_chunk
*ch
)
78 list_add(&ch
->list
, &q
->out_chunk_list
);
79 q
->out_qlen
+= ch
->skb
->len
;
82 /* Take data from the front of the queue. */
83 static inline struct sctp_chunk
*sctp_outq_dequeue_data(struct sctp_outq
*q
)
85 struct sctp_chunk
*ch
= NULL
;
87 if (!list_empty(&q
->out_chunk_list
)) {
88 struct list_head
*entry
= q
->out_chunk_list
.next
;
90 ch
= list_entry(entry
, struct sctp_chunk
, list
);
92 q
->out_qlen
-= ch
->skb
->len
;
96 /* Add data chunk to the end of the queue. */
97 static inline void sctp_outq_tail_data(struct sctp_outq
*q
,
98 struct sctp_chunk
*ch
)
100 list_add_tail(&ch
->list
, &q
->out_chunk_list
);
101 q
->out_qlen
+= ch
->skb
->len
;
105 * SFR-CACC algorithm:
106 * D) If count_of_newacks is greater than or equal to 2
107 * and t was not sent to the current primary then the
108 * sender MUST NOT increment missing report count for t.
110 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport
*primary
,
111 struct sctp_transport
*transport
,
112 int count_of_newacks
)
114 if (count_of_newacks
>=2 && transport
!= primary
)
120 * SFR-CACC algorithm:
121 * F) If count_of_newacks is less than 2, let d be the
122 * destination to which t was sent. If cacc_saw_newack
123 * is 0 for destination d, then the sender MUST NOT
124 * increment missing report count for t.
126 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport
*transport
,
127 int count_of_newacks
)
129 if (count_of_newacks
< 2 &&
130 (transport
&& !transport
->cacc
.cacc_saw_newack
))
136 * SFR-CACC algorithm:
137 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
138 * execute steps C, D, F.
140 * C has been implemented in sctp_outq_sack
142 static inline int sctp_cacc_skip_3_1(struct sctp_transport
*primary
,
143 struct sctp_transport
*transport
,
144 int count_of_newacks
)
146 if (!primary
->cacc
.cycling_changeover
) {
147 if (sctp_cacc_skip_3_1_d(primary
, transport
, count_of_newacks
))
149 if (sctp_cacc_skip_3_1_f(transport
, count_of_newacks
))
157 * SFR-CACC algorithm:
158 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
159 * than next_tsn_at_change of the current primary, then
160 * the sender MUST NOT increment missing report count
163 static inline int sctp_cacc_skip_3_2(struct sctp_transport
*primary
, __u32 tsn
)
165 if (primary
->cacc
.cycling_changeover
&&
166 TSN_lt(tsn
, primary
->cacc
.next_tsn_at_change
))
172 * SFR-CACC algorithm:
173 * 3) If the missing report count for TSN t is to be
174 * incremented according to [RFC2960] and
175 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
176 * then the sender MUST further execute steps 3.1 and
177 * 3.2 to determine if the missing report count for
178 * TSN t SHOULD NOT be incremented.
180 * 3.3) If 3.1 and 3.2 do not dictate that the missing
181 * report count for t should not be incremented, then
182 * the sender SHOULD increment missing report count for
183 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
185 static inline int sctp_cacc_skip(struct sctp_transport
*primary
,
186 struct sctp_transport
*transport
,
187 int count_of_newacks
,
190 if (primary
->cacc
.changeover_active
&&
191 (sctp_cacc_skip_3_1(primary
, transport
, count_of_newacks
) ||
192 sctp_cacc_skip_3_2(primary
, tsn
)))
197 /* Initialize an existing sctp_outq. This does the boring stuff.
198 * You still need to define handlers if you really want to DO
199 * something with this structure...
201 void sctp_outq_init(struct sctp_association
*asoc
, struct sctp_outq
*q
)
203 memset(q
, 0, sizeof(struct sctp_outq
));
206 INIT_LIST_HEAD(&q
->out_chunk_list
);
207 INIT_LIST_HEAD(&q
->control_chunk_list
);
208 INIT_LIST_HEAD(&q
->retransmit
);
209 INIT_LIST_HEAD(&q
->sacked
);
210 INIT_LIST_HEAD(&q
->abandoned
);
215 /* Free the outqueue structure and any related pending chunks.
217 static void __sctp_outq_teardown(struct sctp_outq
*q
)
219 struct sctp_transport
*transport
;
220 struct list_head
*lchunk
, *temp
;
221 struct sctp_chunk
*chunk
, *tmp
;
223 /* Throw away unacknowledged chunks. */
224 list_for_each_entry(transport
, &q
->asoc
->peer
.transport_addr_list
,
226 while ((lchunk
= sctp_list_dequeue(&transport
->transmitted
)) != NULL
) {
227 chunk
= list_entry(lchunk
, struct sctp_chunk
,
229 /* Mark as part of a failed message. */
230 sctp_chunk_fail(chunk
, q
->error
);
231 sctp_chunk_free(chunk
);
235 /* Throw away chunks that have been gap ACKed. */
236 list_for_each_safe(lchunk
, temp
, &q
->sacked
) {
237 list_del_init(lchunk
);
238 chunk
= list_entry(lchunk
, struct sctp_chunk
,
240 sctp_chunk_fail(chunk
, q
->error
);
241 sctp_chunk_free(chunk
);
244 /* Throw away any chunks in the retransmit queue. */
245 list_for_each_safe(lchunk
, temp
, &q
->retransmit
) {
246 list_del_init(lchunk
);
247 chunk
= list_entry(lchunk
, struct sctp_chunk
,
249 sctp_chunk_fail(chunk
, q
->error
);
250 sctp_chunk_free(chunk
);
253 /* Throw away any chunks that are in the abandoned queue. */
254 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
255 list_del_init(lchunk
);
256 chunk
= list_entry(lchunk
, struct sctp_chunk
,
258 sctp_chunk_fail(chunk
, q
->error
);
259 sctp_chunk_free(chunk
);
262 /* Throw away any leftover data chunks. */
263 while ((chunk
= sctp_outq_dequeue_data(q
)) != NULL
) {
265 /* Mark as send failure. */
266 sctp_chunk_fail(chunk
, q
->error
);
267 sctp_chunk_free(chunk
);
270 /* Throw away any leftover control chunks. */
271 list_for_each_entry_safe(chunk
, tmp
, &q
->control_chunk_list
, list
) {
272 list_del_init(&chunk
->list
);
273 sctp_chunk_free(chunk
);
277 void sctp_outq_teardown(struct sctp_outq
*q
)
279 __sctp_outq_teardown(q
);
280 sctp_outq_init(q
->asoc
, q
);
283 /* Free the outqueue structure and any related pending chunks. */
284 void sctp_outq_free(struct sctp_outq
*q
)
286 /* Throw away leftover chunks. */
287 __sctp_outq_teardown(q
);
290 /* Put a new chunk in an sctp_outq. */
291 int sctp_outq_tail(struct sctp_outq
*q
, struct sctp_chunk
*chunk
)
293 struct net
*net
= sock_net(q
->asoc
->base
.sk
);
296 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__
, q
, chunk
,
297 chunk
&& chunk
->chunk_hdr
?
298 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)) :
301 /* If it is data, queue it up, otherwise, send it
304 if (sctp_chunk_is_data(chunk
)) {
305 /* Is it OK to queue data chunks? */
306 /* From 9. Termination of Association
308 * When either endpoint performs a shutdown, the
309 * association on each peer will stop accepting new
310 * data from its user and only deliver data in queue
311 * at the time of sending or receiving the SHUTDOWN
314 switch (q
->asoc
->state
) {
315 case SCTP_STATE_CLOSED
:
316 case SCTP_STATE_SHUTDOWN_PENDING
:
317 case SCTP_STATE_SHUTDOWN_SENT
:
318 case SCTP_STATE_SHUTDOWN_RECEIVED
:
319 case SCTP_STATE_SHUTDOWN_ACK_SENT
:
320 /* Cannot send after transport endpoint shutdown */
325 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
326 __func__
, q
, chunk
, chunk
&& chunk
->chunk_hdr
?
327 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)) :
330 sctp_outq_tail_data(q
, chunk
);
331 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
332 SCTP_INC_STATS(net
, SCTP_MIB_OUTUNORDERCHUNKS
);
334 SCTP_INC_STATS(net
, SCTP_MIB_OUTORDERCHUNKS
);
339 list_add_tail(&chunk
->list
, &q
->control_chunk_list
);
340 SCTP_INC_STATS(net
, SCTP_MIB_OUTCTRLCHUNKS
);
347 error
= sctp_outq_flush(q
, 0);
352 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
353 * and the abandoned list are in ascending order.
355 static void sctp_insert_list(struct list_head
*head
, struct list_head
*new)
357 struct list_head
*pos
;
358 struct sctp_chunk
*nchunk
, *lchunk
;
362 nchunk
= list_entry(new, struct sctp_chunk
, transmitted_list
);
363 ntsn
= ntohl(nchunk
->subh
.data_hdr
->tsn
);
365 list_for_each(pos
, head
) {
366 lchunk
= list_entry(pos
, struct sctp_chunk
, transmitted_list
);
367 ltsn
= ntohl(lchunk
->subh
.data_hdr
->tsn
);
368 if (TSN_lt(ntsn
, ltsn
)) {
369 list_add(new, pos
->prev
);
375 list_add_tail(new, head
);
378 /* Mark all the eligible packets on a transport for retransmission. */
379 void sctp_retransmit_mark(struct sctp_outq
*q
,
380 struct sctp_transport
*transport
,
383 struct list_head
*lchunk
, *ltemp
;
384 struct sctp_chunk
*chunk
;
386 /* Walk through the specified transmitted queue. */
387 list_for_each_safe(lchunk
, ltemp
, &transport
->transmitted
) {
388 chunk
= list_entry(lchunk
, struct sctp_chunk
,
391 /* If the chunk is abandoned, move it to abandoned list. */
392 if (sctp_chunk_abandoned(chunk
)) {
393 list_del_init(lchunk
);
394 sctp_insert_list(&q
->abandoned
, lchunk
);
396 /* If this chunk has not been previousely acked,
397 * stop considering it 'outstanding'. Our peer
398 * will most likely never see it since it will
399 * not be retransmitted
401 if (!chunk
->tsn_gap_acked
) {
402 if (chunk
->transport
)
403 chunk
->transport
->flight_size
-=
404 sctp_data_size(chunk
);
405 q
->outstanding_bytes
-= sctp_data_size(chunk
);
406 q
->asoc
->peer
.rwnd
+= sctp_data_size(chunk
);
411 /* If we are doing retransmission due to a timeout or pmtu
412 * discovery, only the chunks that are not yet acked should
413 * be added to the retransmit queue.
415 if ((reason
== SCTP_RTXR_FAST_RTX
&&
416 (chunk
->fast_retransmit
== SCTP_NEED_FRTX
)) ||
417 (reason
!= SCTP_RTXR_FAST_RTX
&& !chunk
->tsn_gap_acked
)) {
418 /* RFC 2960 6.2.1 Processing a Received SACK
420 * C) Any time a DATA chunk is marked for
421 * retransmission (via either T3-rtx timer expiration
422 * (Section 6.3.3) or via fast retransmit
423 * (Section 7.2.4)), add the data size of those
424 * chunks to the rwnd.
426 q
->asoc
->peer
.rwnd
+= sctp_data_size(chunk
);
427 q
->outstanding_bytes
-= sctp_data_size(chunk
);
428 if (chunk
->transport
)
429 transport
->flight_size
-= sctp_data_size(chunk
);
431 /* sctpimpguide-05 Section 2.8.2
432 * M5) If a T3-rtx timer expires, the
433 * 'TSN.Missing.Report' of all affected TSNs is set
436 chunk
->tsn_missing_report
= 0;
438 /* If a chunk that is being used for RTT measurement
439 * has to be retransmitted, we cannot use this chunk
440 * anymore for RTT measurements. Reset rto_pending so
441 * that a new RTT measurement is started when a new
442 * data chunk is sent.
444 if (chunk
->rtt_in_progress
) {
445 chunk
->rtt_in_progress
= 0;
446 transport
->rto_pending
= 0;
451 /* Move the chunk to the retransmit queue. The chunks
452 * on the retransmit queue are always kept in order.
454 list_del_init(lchunk
);
455 sctp_insert_list(&q
->retransmit
, lchunk
);
459 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
460 "flight_size:%d, pba:%d\n", __func__
, transport
, reason
,
461 transport
->cwnd
, transport
->ssthresh
, transport
->flight_size
,
462 transport
->partial_bytes_acked
);
465 /* Mark all the eligible packets on a transport for retransmission and force
468 void sctp_retransmit(struct sctp_outq
*q
, struct sctp_transport
*transport
,
469 sctp_retransmit_reason_t reason
)
471 struct net
*net
= sock_net(q
->asoc
->base
.sk
);
475 case SCTP_RTXR_T3_RTX
:
476 SCTP_INC_STATS(net
, SCTP_MIB_T3_RETRANSMITS
);
477 sctp_transport_lower_cwnd(transport
, SCTP_LOWER_CWND_T3_RTX
);
478 /* Update the retran path if the T3-rtx timer has expired for
479 * the current retran path.
481 if (transport
== transport
->asoc
->peer
.retran_path
)
482 sctp_assoc_update_retran_path(transport
->asoc
);
483 transport
->asoc
->rtx_data_chunks
+=
484 transport
->asoc
->unack_data
;
486 case SCTP_RTXR_FAST_RTX
:
487 SCTP_INC_STATS(net
, SCTP_MIB_FAST_RETRANSMITS
);
488 sctp_transport_lower_cwnd(transport
, SCTP_LOWER_CWND_FAST_RTX
);
491 case SCTP_RTXR_PMTUD
:
492 SCTP_INC_STATS(net
, SCTP_MIB_PMTUD_RETRANSMITS
);
494 case SCTP_RTXR_T1_RTX
:
495 SCTP_INC_STATS(net
, SCTP_MIB_T1_RETRANSMITS
);
496 transport
->asoc
->init_retries
++;
502 sctp_retransmit_mark(q
, transport
, reason
);
504 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
505 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
506 * following the procedures outlined in C1 - C5.
508 if (reason
== SCTP_RTXR_T3_RTX
)
509 sctp_generate_fwdtsn(q
, q
->asoc
->ctsn_ack_point
);
511 /* Flush the queues only on timeout, since fast_rtx is only
512 * triggered during sack processing and the queue
513 * will be flushed at the end.
515 if (reason
!= SCTP_RTXR_FAST_RTX
)
516 error
= sctp_outq_flush(q
, /* rtx_timeout */ 1);
519 q
->asoc
->base
.sk
->sk_err
= -error
;
523 * Transmit DATA chunks on the retransmit queue. Upon return from
524 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
525 * need to be transmitted by the caller.
526 * We assume that pkt->transport has already been set.
528 * The return value is a normal kernel error return value.
530 static int sctp_outq_flush_rtx(struct sctp_outq
*q
, struct sctp_packet
*pkt
,
531 int rtx_timeout
, int *start_timer
)
533 struct list_head
*lqueue
;
534 struct sctp_transport
*transport
= pkt
->transport
;
536 struct sctp_chunk
*chunk
, *chunk1
;
542 lqueue
= &q
->retransmit
;
543 fast_rtx
= q
->fast_rtx
;
545 /* This loop handles time-out retransmissions, fast retransmissions,
546 * and retransmissions due to opening of whindow.
548 * RFC 2960 6.3.3 Handle T3-rtx Expiration
550 * E3) Determine how many of the earliest (i.e., lowest TSN)
551 * outstanding DATA chunks for the address for which the
552 * T3-rtx has expired will fit into a single packet, subject
553 * to the MTU constraint for the path corresponding to the
554 * destination transport address to which the retransmission
555 * is being sent (this may be different from the address for
556 * which the timer expires [see Section 6.4]). Call this value
557 * K. Bundle and retransmit those K DATA chunks in a single
558 * packet to the destination endpoint.
560 * [Just to be painfully clear, if we are retransmitting
561 * because a timeout just happened, we should send only ONE
562 * packet of retransmitted data.]
564 * For fast retransmissions we also send only ONE packet. However,
565 * if we are just flushing the queue due to open window, we'll
566 * try to send as much as possible.
568 list_for_each_entry_safe(chunk
, chunk1
, lqueue
, transmitted_list
) {
569 /* If the chunk is abandoned, move it to abandoned list. */
570 if (sctp_chunk_abandoned(chunk
)) {
571 list_del_init(&chunk
->transmitted_list
);
572 sctp_insert_list(&q
->abandoned
,
573 &chunk
->transmitted_list
);
577 /* Make sure that Gap Acked TSNs are not retransmitted. A
578 * simple approach is just to move such TSNs out of the
579 * way and into a 'transmitted' queue and skip to the
582 if (chunk
->tsn_gap_acked
) {
583 list_move_tail(&chunk
->transmitted_list
,
584 &transport
->transmitted
);
588 /* If we are doing fast retransmit, ignore non-fast_rtransmit
591 if (fast_rtx
&& !chunk
->fast_retransmit
)
595 /* Attempt to append this chunk to the packet. */
596 status
= sctp_packet_append_chunk(pkt
, chunk
);
599 case SCTP_XMIT_PMTU_FULL
:
600 if (!pkt
->has_data
&& !pkt
->has_cookie_echo
) {
601 /* If this packet did not contain DATA then
602 * retransmission did not happen, so do it
603 * again. We'll ignore the error here since
604 * control chunks are already freed so there
605 * is nothing we can do.
607 sctp_packet_transmit(pkt
);
611 /* Send this packet. */
612 error
= sctp_packet_transmit(pkt
);
614 /* If we are retransmitting, we should only
615 * send a single packet.
616 * Otherwise, try appending this chunk again.
618 if (rtx_timeout
|| fast_rtx
)
623 /* Bundle next chunk in the next round. */
626 case SCTP_XMIT_RWND_FULL
:
627 /* Send this packet. */
628 error
= sctp_packet_transmit(pkt
);
630 /* Stop sending DATA as there is no more room
636 case SCTP_XMIT_NAGLE_DELAY
:
637 /* Send this packet. */
638 error
= sctp_packet_transmit(pkt
);
640 /* Stop sending DATA because of nagle delay. */
645 /* The append was successful, so add this chunk to
646 * the transmitted list.
648 list_move_tail(&chunk
->transmitted_list
,
649 &transport
->transmitted
);
651 /* Mark the chunk as ineligible for fast retransmit
652 * after it is retransmitted.
654 if (chunk
->fast_retransmit
== SCTP_NEED_FRTX
)
655 chunk
->fast_retransmit
= SCTP_DONT_FRTX
;
658 q
->asoc
->stats
.rtxchunks
++;
662 /* Set the timer if there were no errors */
663 if (!error
&& !timer
)
670 /* If we are here due to a retransmit timeout or a fast
671 * retransmit and if there are any chunks left in the retransmit
672 * queue that could not fit in the PMTU sized packet, they need
673 * to be marked as ineligible for a subsequent fast retransmit.
675 if (rtx_timeout
|| fast_rtx
) {
676 list_for_each_entry(chunk1
, lqueue
, transmitted_list
) {
677 if (chunk1
->fast_retransmit
== SCTP_NEED_FRTX
)
678 chunk1
->fast_retransmit
= SCTP_DONT_FRTX
;
682 *start_timer
= timer
;
684 /* Clear fast retransmit hint */
691 /* Cork the outqueue so queued chunks are really queued. */
692 int sctp_outq_uncork(struct sctp_outq
*q
)
697 return sctp_outq_flush(q
, 0);
702 * Try to flush an outqueue.
704 * Description: Send everything in q which we legally can, subject to
705 * congestion limitations.
706 * * Note: This function can be called from multiple contexts so appropriate
707 * locking concerns must be made. Today we use the sock lock to protect
710 static int sctp_outq_flush(struct sctp_outq
*q
, int rtx_timeout
)
712 struct sctp_packet
*packet
;
713 struct sctp_packet singleton
;
714 struct sctp_association
*asoc
= q
->asoc
;
715 __u16 sport
= asoc
->base
.bind_addr
.port
;
716 __u16 dport
= asoc
->peer
.port
;
717 __u32 vtag
= asoc
->peer
.i
.init_tag
;
718 struct sctp_transport
*transport
= NULL
;
719 struct sctp_transport
*new_transport
;
720 struct sctp_chunk
*chunk
, *tmp
;
726 /* These transports have chunks to send. */
727 struct list_head transport_list
;
728 struct list_head
*ltransport
;
730 INIT_LIST_HEAD(&transport_list
);
736 * When bundling control chunks with DATA chunks, an
737 * endpoint MUST place control chunks first in the outbound
738 * SCTP packet. The transmitter MUST transmit DATA chunks
739 * within a SCTP packet in increasing order of TSN.
743 list_for_each_entry_safe(chunk
, tmp
, &q
->control_chunk_list
, list
) {
745 * F1) This means that until such time as the ASCONF
746 * containing the add is acknowledged, the sender MUST
747 * NOT use the new IP address as a source for ANY SCTP
748 * packet except on carrying an ASCONF Chunk.
750 if (asoc
->src_out_of_asoc_ok
&&
751 chunk
->chunk_hdr
->type
!= SCTP_CID_ASCONF
)
754 list_del_init(&chunk
->list
);
756 /* Pick the right transport to use. */
757 new_transport
= chunk
->transport
;
759 if (!new_transport
) {
761 * If we have a prior transport pointer, see if
762 * the destination address of the chunk
763 * matches the destination address of the
764 * current transport. If not a match, then
765 * try to look up the transport with a given
766 * destination address. We do this because
767 * after processing ASCONFs, we may have new
768 * transports created.
771 sctp_cmp_addr_exact(&chunk
->dest
,
773 new_transport
= transport
;
775 new_transport
= sctp_assoc_lookup_paddr(asoc
,
778 /* if we still don't have a new transport, then
779 * use the current active path.
782 new_transport
= asoc
->peer
.active_path
;
783 } else if ((new_transport
->state
== SCTP_INACTIVE
) ||
784 (new_transport
->state
== SCTP_UNCONFIRMED
) ||
785 (new_transport
->state
== SCTP_PF
)) {
786 /* If the chunk is Heartbeat or Heartbeat Ack,
787 * send it to chunk->transport, even if it's
790 * 3.3.6 Heartbeat Acknowledgement:
792 * A HEARTBEAT ACK is always sent to the source IP
793 * address of the IP datagram containing the
794 * HEARTBEAT chunk to which this ack is responding.
797 * ASCONF_ACKs also must be sent to the source.
799 if (chunk
->chunk_hdr
->type
!= SCTP_CID_HEARTBEAT
&&
800 chunk
->chunk_hdr
->type
!= SCTP_CID_HEARTBEAT_ACK
&&
801 chunk
->chunk_hdr
->type
!= SCTP_CID_ASCONF_ACK
)
802 new_transport
= asoc
->peer
.active_path
;
805 /* Are we switching transports?
806 * Take care of transport locks.
808 if (new_transport
!= transport
) {
809 transport
= new_transport
;
810 if (list_empty(&transport
->send_ready
)) {
811 list_add_tail(&transport
->send_ready
,
814 packet
= &transport
->packet
;
815 sctp_packet_config(packet
, vtag
,
816 asoc
->peer
.ecn_capable
);
819 switch (chunk
->chunk_hdr
->type
) {
823 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
824 * COMPLETE with any other chunks. [Send them immediately.]
827 case SCTP_CID_INIT_ACK
:
828 case SCTP_CID_SHUTDOWN_COMPLETE
:
829 sctp_packet_init(&singleton
, transport
, sport
, dport
);
830 sctp_packet_config(&singleton
, vtag
, 0);
831 sctp_packet_append_chunk(&singleton
, chunk
);
832 error
= sctp_packet_transmit(&singleton
);
838 if (sctp_test_T_bit(chunk
)) {
839 packet
->vtag
= asoc
->c
.my_vtag
;
841 /* The following chunks are "response" chunks, i.e.
842 * they are generated in response to something we
843 * received. If we are sending these, then we can
844 * send only 1 packet containing these chunks.
846 case SCTP_CID_HEARTBEAT_ACK
:
847 case SCTP_CID_SHUTDOWN_ACK
:
848 case SCTP_CID_COOKIE_ACK
:
849 case SCTP_CID_COOKIE_ECHO
:
851 case SCTP_CID_ECN_CWR
:
852 case SCTP_CID_ASCONF_ACK
:
857 case SCTP_CID_HEARTBEAT
:
858 case SCTP_CID_SHUTDOWN
:
859 case SCTP_CID_ECN_ECNE
:
860 case SCTP_CID_ASCONF
:
861 case SCTP_CID_FWD_TSN
:
862 status
= sctp_packet_transmit_chunk(packet
, chunk
,
864 if (status
!= SCTP_XMIT_OK
) {
865 /* put the chunk back */
866 list_add(&chunk
->list
, &q
->control_chunk_list
);
868 asoc
->stats
.octrlchunks
++;
869 /* PR-SCTP C5) If a FORWARD TSN is sent, the
870 * sender MUST assure that at least one T3-rtx
873 if (chunk
->chunk_hdr
->type
== SCTP_CID_FWD_TSN
)
874 sctp_transport_reset_timers(transport
);
879 /* We built a chunk with an illegal type! */
884 if (q
->asoc
->src_out_of_asoc_ok
)
887 /* Is it OK to send data chunks? */
888 switch (asoc
->state
) {
889 case SCTP_STATE_COOKIE_ECHOED
:
890 /* Only allow bundling when this packet has a COOKIE-ECHO
893 if (!packet
|| !packet
->has_cookie_echo
)
897 case SCTP_STATE_ESTABLISHED
:
898 case SCTP_STATE_SHUTDOWN_PENDING
:
899 case SCTP_STATE_SHUTDOWN_RECEIVED
:
901 * RFC 2960 6.1 Transmission of DATA Chunks
903 * C) When the time comes for the sender to transmit,
904 * before sending new DATA chunks, the sender MUST
905 * first transmit any outstanding DATA chunks which
906 * are marked for retransmission (limited by the
909 if (!list_empty(&q
->retransmit
)) {
910 if (asoc
->peer
.retran_path
->state
== SCTP_UNCONFIRMED
)
912 if (transport
== asoc
->peer
.retran_path
)
915 /* Switch transports & prepare the packet. */
917 transport
= asoc
->peer
.retran_path
;
919 if (list_empty(&transport
->send_ready
)) {
920 list_add_tail(&transport
->send_ready
,
924 packet
= &transport
->packet
;
925 sctp_packet_config(packet
, vtag
,
926 asoc
->peer
.ecn_capable
);
928 error
= sctp_outq_flush_rtx(q
, packet
,
929 rtx_timeout
, &start_timer
);
932 sctp_transport_reset_timers(transport
);
934 /* This can happen on COOKIE-ECHO resend. Only
935 * one chunk can get bundled with a COOKIE-ECHO.
937 if (packet
->has_cookie_echo
)
940 /* Don't send new data if there is still data
941 * waiting to retransmit.
943 if (!list_empty(&q
->retransmit
))
947 /* Apply Max.Burst limitation to the current transport in
948 * case it will be used for new data. We are going to
949 * rest it before we return, but we want to apply the limit
950 * to the currently queued data.
953 sctp_transport_burst_limited(transport
);
955 /* Finally, transmit new packets. */
956 while ((chunk
= sctp_outq_dequeue_data(q
)) != NULL
) {
957 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
960 if (chunk
->sinfo
.sinfo_stream
>=
961 asoc
->c
.sinit_num_ostreams
) {
963 /* Mark as failed send. */
964 sctp_chunk_fail(chunk
, SCTP_ERROR_INV_STRM
);
965 sctp_chunk_free(chunk
);
969 /* Has this chunk expired? */
970 if (sctp_chunk_abandoned(chunk
)) {
971 sctp_chunk_fail(chunk
, 0);
972 sctp_chunk_free(chunk
);
976 /* If there is a specified transport, use it.
977 * Otherwise, we want to use the active path.
979 new_transport
= chunk
->transport
;
980 if (!new_transport
||
981 ((new_transport
->state
== SCTP_INACTIVE
) ||
982 (new_transport
->state
== SCTP_UNCONFIRMED
) ||
983 (new_transport
->state
== SCTP_PF
)))
984 new_transport
= asoc
->peer
.active_path
;
985 if (new_transport
->state
== SCTP_UNCONFIRMED
)
988 /* Change packets if necessary. */
989 if (new_transport
!= transport
) {
990 transport
= new_transport
;
992 /* Schedule to have this transport's
995 if (list_empty(&transport
->send_ready
)) {
996 list_add_tail(&transport
->send_ready
,
1000 packet
= &transport
->packet
;
1001 sctp_packet_config(packet
, vtag
,
1002 asoc
->peer
.ecn_capable
);
1003 /* We've switched transports, so apply the
1004 * Burst limit to the new transport.
1006 sctp_transport_burst_limited(transport
);
1009 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1011 __func__
, q
, chunk
, chunk
&& chunk
->chunk_hdr
?
1012 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)) :
1013 "illegal chunk", ntohl(chunk
->subh
.data_hdr
->tsn
),
1014 chunk
->skb
? chunk
->skb
->head
: NULL
, chunk
->skb
?
1015 atomic_read(&chunk
->skb
->users
) : -1);
1017 /* Add the chunk to the packet. */
1018 status
= sctp_packet_transmit_chunk(packet
, chunk
, 0);
1021 case SCTP_XMIT_PMTU_FULL
:
1022 case SCTP_XMIT_RWND_FULL
:
1023 case SCTP_XMIT_NAGLE_DELAY
:
1024 /* We could not append this chunk, so put
1025 * the chunk back on the output queue.
1027 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1028 __func__
, ntohl(chunk
->subh
.data_hdr
->tsn
),
1031 sctp_outq_head_data(q
, chunk
);
1032 goto sctp_flush_out
;
1036 /* The sender is in the SHUTDOWN-PENDING state,
1037 * The sender MAY set the I-bit in the DATA
1040 if (asoc
->state
== SCTP_STATE_SHUTDOWN_PENDING
)
1041 chunk
->chunk_hdr
->flags
|= SCTP_DATA_SACK_IMM
;
1042 if (chunk
->chunk_hdr
->flags
& SCTP_DATA_UNORDERED
)
1043 asoc
->stats
.ouodchunks
++;
1045 asoc
->stats
.oodchunks
++;
1053 /* BUG: We assume that the sctp_packet_transmit()
1054 * call below will succeed all the time and add the
1055 * chunk to the transmitted list and restart the
1057 * It is possible that the call can fail under OOM
1060 * Is this really a problem? Won't this behave
1063 list_add_tail(&chunk
->transmitted_list
,
1064 &transport
->transmitted
);
1066 sctp_transport_reset_timers(transport
);
1070 /* Only let one DATA chunk get bundled with a
1071 * COOKIE-ECHO chunk.
1073 if (packet
->has_cookie_echo
)
1074 goto sctp_flush_out
;
1085 /* Before returning, examine all the transports touched in
1086 * this call. Right now, we bluntly force clear all the
1087 * transports. Things might change after we implement Nagle.
1088 * But such an examination is still required.
1092 while ((ltransport
= sctp_list_dequeue(&transport_list
)) != NULL
) {
1093 struct sctp_transport
*t
= list_entry(ltransport
,
1094 struct sctp_transport
,
1096 packet
= &t
->packet
;
1097 if (!sctp_packet_empty(packet
))
1098 error
= sctp_packet_transmit(packet
);
1100 /* Clear the burst limited state, if any */
1101 sctp_transport_burst_reset(t
);
1107 /* Update unack_data based on the incoming SACK chunk */
1108 static void sctp_sack_update_unack_data(struct sctp_association
*assoc
,
1109 struct sctp_sackhdr
*sack
)
1111 sctp_sack_variable_t
*frags
;
1115 unack_data
= assoc
->next_tsn
- assoc
->ctsn_ack_point
- 1;
1117 frags
= sack
->variable
;
1118 for (i
= 0; i
< ntohs(sack
->num_gap_ack_blocks
); i
++) {
1119 unack_data
-= ((ntohs(frags
[i
].gab
.end
) -
1120 ntohs(frags
[i
].gab
.start
) + 1));
1123 assoc
->unack_data
= unack_data
;
1126 /* This is where we REALLY process a SACK.
1128 * Process the SACK against the outqueue. Mostly, this just frees
1129 * things off the transmitted queue.
1131 int sctp_outq_sack(struct sctp_outq
*q
, struct sctp_chunk
*chunk
)
1133 struct sctp_association
*asoc
= q
->asoc
;
1134 struct sctp_sackhdr
*sack
= chunk
->subh
.sack_hdr
;
1135 struct sctp_transport
*transport
;
1136 struct sctp_chunk
*tchunk
= NULL
;
1137 struct list_head
*lchunk
, *transport_list
, *temp
;
1138 sctp_sack_variable_t
*frags
= sack
->variable
;
1139 __u32 sack_ctsn
, ctsn
, tsn
;
1140 __u32 highest_tsn
, highest_new_tsn
;
1142 unsigned int outstanding
;
1143 struct sctp_transport
*primary
= asoc
->peer
.primary_path
;
1144 int count_of_newacks
= 0;
1148 /* Grab the association's destination address list. */
1149 transport_list
= &asoc
->peer
.transport_addr_list
;
1151 sack_ctsn
= ntohl(sack
->cum_tsn_ack
);
1152 gap_ack_blocks
= ntohs(sack
->num_gap_ack_blocks
);
1153 asoc
->stats
.gapcnt
+= gap_ack_blocks
;
1155 * SFR-CACC algorithm:
1156 * On receipt of a SACK the sender SHOULD execute the
1157 * following statements.
1159 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1160 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1161 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1163 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1164 * is set the receiver of the SACK MUST take the following actions:
1166 * A) Initialize the cacc_saw_newack to 0 for all destination
1169 * Only bother if changeover_active is set. Otherwise, this is
1170 * totally suboptimal to do on every SACK.
1172 if (primary
->cacc
.changeover_active
) {
1173 u8 clear_cycling
= 0;
1175 if (TSN_lte(primary
->cacc
.next_tsn_at_change
, sack_ctsn
)) {
1176 primary
->cacc
.changeover_active
= 0;
1180 if (clear_cycling
|| gap_ack_blocks
) {
1181 list_for_each_entry(transport
, transport_list
,
1184 transport
->cacc
.cycling_changeover
= 0;
1186 transport
->cacc
.cacc_saw_newack
= 0;
1191 /* Get the highest TSN in the sack. */
1192 highest_tsn
= sack_ctsn
;
1194 highest_tsn
+= ntohs(frags
[gap_ack_blocks
- 1].gab
.end
);
1196 if (TSN_lt(asoc
->highest_sacked
, highest_tsn
))
1197 asoc
->highest_sacked
= highest_tsn
;
1199 highest_new_tsn
= sack_ctsn
;
1201 /* Run through the retransmit queue. Credit bytes received
1202 * and free those chunks that we can.
1204 sctp_check_transmitted(q
, &q
->retransmit
, NULL
, NULL
, sack
, &highest_new_tsn
);
1206 /* Run through the transmitted queue.
1207 * Credit bytes received and free those chunks which we can.
1209 * This is a MASSIVE candidate for optimization.
1211 list_for_each_entry(transport
, transport_list
, transports
) {
1212 sctp_check_transmitted(q
, &transport
->transmitted
,
1213 transport
, &chunk
->source
, sack
,
1216 * SFR-CACC algorithm:
1217 * C) Let count_of_newacks be the number of
1218 * destinations for which cacc_saw_newack is set.
1220 if (transport
->cacc
.cacc_saw_newack
)
1221 count_of_newacks
++;
1224 /* Move the Cumulative TSN Ack Point if appropriate. */
1225 if (TSN_lt(asoc
->ctsn_ack_point
, sack_ctsn
)) {
1226 asoc
->ctsn_ack_point
= sack_ctsn
;
1230 if (gap_ack_blocks
) {
1232 if (asoc
->fast_recovery
&& accum_moved
)
1233 highest_new_tsn
= highest_tsn
;
1235 list_for_each_entry(transport
, transport_list
, transports
)
1236 sctp_mark_missing(q
, &transport
->transmitted
, transport
,
1237 highest_new_tsn
, count_of_newacks
);
1240 /* Update unack_data field in the assoc. */
1241 sctp_sack_update_unack_data(asoc
, sack
);
1243 ctsn
= asoc
->ctsn_ack_point
;
1245 /* Throw away stuff rotting on the sack queue. */
1246 list_for_each_safe(lchunk
, temp
, &q
->sacked
) {
1247 tchunk
= list_entry(lchunk
, struct sctp_chunk
,
1249 tsn
= ntohl(tchunk
->subh
.data_hdr
->tsn
);
1250 if (TSN_lte(tsn
, ctsn
)) {
1251 list_del_init(&tchunk
->transmitted_list
);
1252 sctp_chunk_free(tchunk
);
1256 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1257 * number of bytes still outstanding after processing the
1258 * Cumulative TSN Ack and the Gap Ack Blocks.
1261 sack_a_rwnd
= ntohl(sack
->a_rwnd
);
1262 outstanding
= q
->outstanding_bytes
;
1264 if (outstanding
< sack_a_rwnd
)
1265 sack_a_rwnd
-= outstanding
;
1269 asoc
->peer
.rwnd
= sack_a_rwnd
;
1271 sctp_generate_fwdtsn(q
, sack_ctsn
);
1273 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__
, sack_ctsn
);
1274 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1275 "advertised peer ack point:0x%x\n", __func__
, asoc
, ctsn
,
1276 asoc
->adv_peer_ack_point
);
1278 /* See if all chunks are acked.
1279 * Make sure the empty queue handler will get run later.
1281 q
->empty
= (list_empty(&q
->out_chunk_list
) &&
1282 list_empty(&q
->retransmit
));
1286 list_for_each_entry(transport
, transport_list
, transports
) {
1287 q
->empty
= q
->empty
&& list_empty(&transport
->transmitted
);
1292 pr_debug("%s: sack queue is empty\n", __func__
);
1297 /* Is the outqueue empty? */
1298 int sctp_outq_is_empty(const struct sctp_outq
*q
)
1303 /********************************************************************
1304 * 2nd Level Abstractions
1305 ********************************************************************/
1307 /* Go through a transport's transmitted list or the association's retransmit
1308 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1309 * The retransmit list will not have an associated transport.
1311 * I added coherent debug information output. --xguo
1313 * Instead of printing 'sacked' or 'kept' for each TSN on the
1314 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1315 * KEPT TSN6-TSN7, etc.
1317 static void sctp_check_transmitted(struct sctp_outq
*q
,
1318 struct list_head
*transmitted_queue
,
1319 struct sctp_transport
*transport
,
1320 union sctp_addr
*saddr
,
1321 struct sctp_sackhdr
*sack
,
1322 __u32
*highest_new_tsn_in_sack
)
1324 struct list_head
*lchunk
;
1325 struct sctp_chunk
*tchunk
;
1326 struct list_head tlist
;
1330 __u8 restart_timer
= 0;
1331 int bytes_acked
= 0;
1332 int migrate_bytes
= 0;
1333 bool forward_progress
= false;
1335 sack_ctsn
= ntohl(sack
->cum_tsn_ack
);
1337 INIT_LIST_HEAD(&tlist
);
1339 /* The while loop will skip empty transmitted queues. */
1340 while (NULL
!= (lchunk
= sctp_list_dequeue(transmitted_queue
))) {
1341 tchunk
= list_entry(lchunk
, struct sctp_chunk
,
1344 if (sctp_chunk_abandoned(tchunk
)) {
1345 /* Move the chunk to abandoned list. */
1346 sctp_insert_list(&q
->abandoned
, lchunk
);
1348 /* If this chunk has not been acked, stop
1349 * considering it as 'outstanding'.
1351 if (!tchunk
->tsn_gap_acked
) {
1352 if (tchunk
->transport
)
1353 tchunk
->transport
->flight_size
-=
1354 sctp_data_size(tchunk
);
1355 q
->outstanding_bytes
-= sctp_data_size(tchunk
);
1360 tsn
= ntohl(tchunk
->subh
.data_hdr
->tsn
);
1361 if (sctp_acked(sack
, tsn
)) {
1362 /* If this queue is the retransmit queue, the
1363 * retransmit timer has already reclaimed
1364 * the outstanding bytes for this chunk, so only
1365 * count bytes associated with a transport.
1368 /* If this chunk is being used for RTT
1369 * measurement, calculate the RTT and update
1370 * the RTO using this value.
1372 * 6.3.1 C5) Karn's algorithm: RTT measurements
1373 * MUST NOT be made using packets that were
1374 * retransmitted (and thus for which it is
1375 * ambiguous whether the reply was for the
1376 * first instance of the packet or a later
1379 if (!tchunk
->tsn_gap_acked
&&
1381 tchunk
->rtt_in_progress
) {
1382 tchunk
->rtt_in_progress
= 0;
1383 rtt
= jiffies
- tchunk
->sent_at
;
1384 sctp_transport_update_rto(transport
,
1389 /* If the chunk hasn't been marked as ACKED,
1390 * mark it and account bytes_acked if the
1391 * chunk had a valid transport (it will not
1392 * have a transport if ASCONF had deleted it
1393 * while DATA was outstanding).
1395 if (!tchunk
->tsn_gap_acked
) {
1396 tchunk
->tsn_gap_acked
= 1;
1397 if (TSN_lt(*highest_new_tsn_in_sack
, tsn
))
1398 *highest_new_tsn_in_sack
= tsn
;
1399 bytes_acked
+= sctp_data_size(tchunk
);
1400 if (!tchunk
->transport
)
1401 migrate_bytes
+= sctp_data_size(tchunk
);
1402 forward_progress
= true;
1405 if (TSN_lte(tsn
, sack_ctsn
)) {
1406 /* RFC 2960 6.3.2 Retransmission Timer Rules
1408 * R3) Whenever a SACK is received
1409 * that acknowledges the DATA chunk
1410 * with the earliest outstanding TSN
1411 * for that address, restart T3-rtx
1412 * timer for that address with its
1416 forward_progress
= true;
1418 if (!tchunk
->tsn_gap_acked
) {
1420 * SFR-CACC algorithm:
1421 * 2) If the SACK contains gap acks
1422 * and the flag CHANGEOVER_ACTIVE is
1423 * set the receiver of the SACK MUST
1424 * take the following action:
1426 * B) For each TSN t being acked that
1427 * has not been acked in any SACK so
1428 * far, set cacc_saw_newack to 1 for
1429 * the destination that the TSN was
1433 sack
->num_gap_ack_blocks
&&
1434 q
->asoc
->peer
.primary_path
->cacc
.
1436 transport
->cacc
.cacc_saw_newack
1440 list_add_tail(&tchunk
->transmitted_list
,
1443 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1444 * M2) Each time a SACK arrives reporting
1445 * 'Stray DATA chunk(s)' record the highest TSN
1446 * reported as newly acknowledged, call this
1447 * value 'HighestTSNinSack'. A newly
1448 * acknowledged DATA chunk is one not
1449 * previously acknowledged in a SACK.
1451 * When the SCTP sender of data receives a SACK
1452 * chunk that acknowledges, for the first time,
1453 * the receipt of a DATA chunk, all the still
1454 * unacknowledged DATA chunks whose TSN is
1455 * older than that newly acknowledged DATA
1456 * chunk, are qualified as 'Stray DATA chunks'.
1458 list_add_tail(lchunk
, &tlist
);
1461 if (tchunk
->tsn_gap_acked
) {
1462 pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1465 tchunk
->tsn_gap_acked
= 0;
1467 if (tchunk
->transport
)
1468 bytes_acked
-= sctp_data_size(tchunk
);
1470 /* RFC 2960 6.3.2 Retransmission Timer Rules
1472 * R4) Whenever a SACK is received missing a
1473 * TSN that was previously acknowledged via a
1474 * Gap Ack Block, start T3-rtx for the
1475 * destination address to which the DATA
1476 * chunk was originally
1477 * transmitted if it is not already running.
1482 list_add_tail(lchunk
, &tlist
);
1488 struct sctp_association
*asoc
= transport
->asoc
;
1490 /* We may have counted DATA that was migrated
1491 * to this transport due to DEL-IP operation.
1492 * Subtract those bytes, since the were never
1493 * send on this transport and shouldn't be
1494 * credited to this transport.
1496 bytes_acked
-= migrate_bytes
;
1498 /* 8.2. When an outstanding TSN is acknowledged,
1499 * the endpoint shall clear the error counter of
1500 * the destination transport address to which the
1501 * DATA chunk was last sent.
1502 * The association's overall error counter is
1505 transport
->error_count
= 0;
1506 transport
->asoc
->overall_error_count
= 0;
1507 forward_progress
= true;
1510 * While in SHUTDOWN PENDING, we may have started
1511 * the T5 shutdown guard timer after reaching the
1512 * retransmission limit. Stop that timer as soon
1513 * as the receiver acknowledged any data.
1515 if (asoc
->state
== SCTP_STATE_SHUTDOWN_PENDING
&&
1516 del_timer(&asoc
->timers
1517 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
]))
1518 sctp_association_put(asoc
);
1520 /* Mark the destination transport address as
1521 * active if it is not so marked.
1523 if ((transport
->state
== SCTP_INACTIVE
||
1524 transport
->state
== SCTP_UNCONFIRMED
) &&
1525 sctp_cmp_addr_exact(&transport
->ipaddr
, saddr
)) {
1526 sctp_assoc_control_transport(
1530 SCTP_RECEIVED_SACK
);
1533 sctp_transport_raise_cwnd(transport
, sack_ctsn
,
1536 transport
->flight_size
-= bytes_acked
;
1537 if (transport
->flight_size
== 0)
1538 transport
->partial_bytes_acked
= 0;
1539 q
->outstanding_bytes
-= bytes_acked
+ migrate_bytes
;
1541 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1542 * When a sender is doing zero window probing, it
1543 * should not timeout the association if it continues
1544 * to receive new packets from the receiver. The
1545 * reason is that the receiver MAY keep its window
1546 * closed for an indefinite time.
1547 * A sender is doing zero window probing when the
1548 * receiver's advertised window is zero, and there is
1549 * only one data chunk in flight to the receiver.
1551 * Allow the association to timeout while in SHUTDOWN
1552 * PENDING or SHUTDOWN RECEIVED in case the receiver
1553 * stays in zero window mode forever.
1555 if (!q
->asoc
->peer
.rwnd
&&
1556 !list_empty(&tlist
) &&
1557 (sack_ctsn
+2 == q
->asoc
->next_tsn
) &&
1558 q
->asoc
->state
< SCTP_STATE_SHUTDOWN_PENDING
) {
1559 pr_debug("%s: sack received for zero window "
1560 "probe:%u\n", __func__
, sack_ctsn
);
1562 q
->asoc
->overall_error_count
= 0;
1563 transport
->error_count
= 0;
1567 /* RFC 2960 6.3.2 Retransmission Timer Rules
1569 * R2) Whenever all outstanding data sent to an address have
1570 * been acknowledged, turn off the T3-rtx timer of that
1573 if (!transport
->flight_size
) {
1574 if (del_timer(&transport
->T3_rtx_timer
))
1575 sctp_transport_put(transport
);
1576 } else if (restart_timer
) {
1577 if (!mod_timer(&transport
->T3_rtx_timer
,
1578 jiffies
+ transport
->rto
))
1579 sctp_transport_hold(transport
);
1582 if (forward_progress
) {
1584 dst_confirm(transport
->dst
);
1588 list_splice(&tlist
, transmitted_queue
);
1591 /* Mark chunks as missing and consequently may get retransmitted. */
1592 static void sctp_mark_missing(struct sctp_outq
*q
,
1593 struct list_head
*transmitted_queue
,
1594 struct sctp_transport
*transport
,
1595 __u32 highest_new_tsn_in_sack
,
1596 int count_of_newacks
)
1598 struct sctp_chunk
*chunk
;
1600 char do_fast_retransmit
= 0;
1601 struct sctp_association
*asoc
= q
->asoc
;
1602 struct sctp_transport
*primary
= asoc
->peer
.primary_path
;
1604 list_for_each_entry(chunk
, transmitted_queue
, transmitted_list
) {
1606 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1608 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1609 * 'Unacknowledged TSN's', if the TSN number of an
1610 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1611 * value, increment the 'TSN.Missing.Report' count on that
1612 * chunk if it has NOT been fast retransmitted or marked for
1613 * fast retransmit already.
1615 if (chunk
->fast_retransmit
== SCTP_CAN_FRTX
&&
1616 !chunk
->tsn_gap_acked
&&
1617 TSN_lt(tsn
, highest_new_tsn_in_sack
)) {
1619 /* SFR-CACC may require us to skip marking
1620 * this chunk as missing.
1622 if (!transport
|| !sctp_cacc_skip(primary
,
1624 count_of_newacks
, tsn
)) {
1625 chunk
->tsn_missing_report
++;
1627 pr_debug("%s: tsn:0x%x missing counter:%d\n",
1628 __func__
, tsn
, chunk
->tsn_missing_report
);
1632 * M4) If any DATA chunk is found to have a
1633 * 'TSN.Missing.Report'
1634 * value larger than or equal to 3, mark that chunk for
1635 * retransmission and start the fast retransmit procedure.
1638 if (chunk
->tsn_missing_report
>= 3) {
1639 chunk
->fast_retransmit
= SCTP_NEED_FRTX
;
1640 do_fast_retransmit
= 1;
1645 if (do_fast_retransmit
)
1646 sctp_retransmit(q
, transport
, SCTP_RTXR_FAST_RTX
);
1648 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1649 "flight_size:%d, pba:%d\n", __func__
, transport
,
1650 transport
->cwnd
, transport
->ssthresh
,
1651 transport
->flight_size
, transport
->partial_bytes_acked
);
1655 /* Is the given TSN acked by this packet? */
1656 static int sctp_acked(struct sctp_sackhdr
*sack
, __u32 tsn
)
1659 sctp_sack_variable_t
*frags
;
1661 __u32 ctsn
= ntohl(sack
->cum_tsn_ack
);
1663 if (TSN_lte(tsn
, ctsn
))
1666 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1669 * These fields contain the Gap Ack Blocks. They are repeated
1670 * for each Gap Ack Block up to the number of Gap Ack Blocks
1671 * defined in the Number of Gap Ack Blocks field. All DATA
1672 * chunks with TSNs greater than or equal to (Cumulative TSN
1673 * Ack + Gap Ack Block Start) and less than or equal to
1674 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1675 * Block are assumed to have been received correctly.
1678 frags
= sack
->variable
;
1680 for (i
= 0; i
< ntohs(sack
->num_gap_ack_blocks
); ++i
) {
1681 if (TSN_lte(ntohs(frags
[i
].gab
.start
), gap
) &&
1682 TSN_lte(gap
, ntohs(frags
[i
].gab
.end
)))
1691 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip
*skiplist
,
1692 int nskips
, __be16 stream
)
1696 for (i
= 0; i
< nskips
; i
++) {
1697 if (skiplist
[i
].stream
== stream
)
1703 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1704 static void sctp_generate_fwdtsn(struct sctp_outq
*q
, __u32 ctsn
)
1706 struct sctp_association
*asoc
= q
->asoc
;
1707 struct sctp_chunk
*ftsn_chunk
= NULL
;
1708 struct sctp_fwdtsn_skip ftsn_skip_arr
[10];
1712 struct sctp_chunk
*chunk
;
1713 struct list_head
*lchunk
, *temp
;
1715 if (!asoc
->peer
.prsctp_capable
)
1718 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1721 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1722 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1724 if (TSN_lt(asoc
->adv_peer_ack_point
, ctsn
))
1725 asoc
->adv_peer_ack_point
= ctsn
;
1727 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1728 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1729 * the chunk next in the out-queue space is marked as "abandoned" as
1730 * shown in the following example:
1732 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1733 * and the Advanced.Peer.Ack.Point is updated to this value:
1735 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1736 * normal SACK processing local advancement
1738 * Adv.Ack.Pt-> 102 acked 102 acked
1739 * 103 abandoned 103 abandoned
1740 * 104 abandoned Adv.Ack.P-> 104 abandoned
1742 * 106 acked 106 acked
1745 * In this example, the data sender successfully advanced the
1746 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1748 list_for_each_safe(lchunk
, temp
, &q
->abandoned
) {
1749 chunk
= list_entry(lchunk
, struct sctp_chunk
,
1751 tsn
= ntohl(chunk
->subh
.data_hdr
->tsn
);
1753 /* Remove any chunks in the abandoned queue that are acked by
1756 if (TSN_lte(tsn
, ctsn
)) {
1757 list_del_init(lchunk
);
1758 sctp_chunk_free(chunk
);
1760 if (TSN_lte(tsn
, asoc
->adv_peer_ack_point
+1)) {
1761 asoc
->adv_peer_ack_point
= tsn
;
1762 if (chunk
->chunk_hdr
->flags
&
1763 SCTP_DATA_UNORDERED
)
1765 skip_pos
= sctp_get_skip_pos(&ftsn_skip_arr
[0],
1767 chunk
->subh
.data_hdr
->stream
);
1768 ftsn_skip_arr
[skip_pos
].stream
=
1769 chunk
->subh
.data_hdr
->stream
;
1770 ftsn_skip_arr
[skip_pos
].ssn
=
1771 chunk
->subh
.data_hdr
->ssn
;
1772 if (skip_pos
== nskips
)
1781 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1782 * is greater than the Cumulative TSN ACK carried in the received
1783 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1784 * chunk containing the latest value of the
1785 * "Advanced.Peer.Ack.Point".
1787 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1788 * list each stream and sequence number in the forwarded TSN. This
1789 * information will enable the receiver to easily find any
1790 * stranded TSN's waiting on stream reorder queues. Each stream
1791 * SHOULD only be reported once; this means that if multiple
1792 * abandoned messages occur in the same stream then only the
1793 * highest abandoned stream sequence number is reported. If the
1794 * total size of the FORWARD TSN does NOT fit in a single MTU then
1795 * the sender of the FORWARD TSN SHOULD lower the
1796 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1799 if (asoc
->adv_peer_ack_point
> ctsn
)
1800 ftsn_chunk
= sctp_make_fwdtsn(asoc
, asoc
->adv_peer_ack_point
,
1801 nskips
, &ftsn_skip_arr
[0]);
1804 list_add_tail(&ftsn_chunk
->list
, &q
->control_chunk_list
);
1805 SCTP_INC_STATS(sock_net(asoc
->base
.sk
), SCTP_MIB_OUTCTRLCHUNKS
);