1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
7 * This file is part of the SCTP kernel implementation
9 * These functions handle output processing.
11 * Please send any bug reports or fixes you make to the
13 * lksctp developers <linux-sctp@vger.kernel.org>
15 * Written or modified by:
16 * La Monte H.P. Yarroll <piggy@acm.org>
17 * Karl Knutson <karl@athena.chicago.il.us>
18 * Jon Grimm <jgrimm@austin.ibm.com>
19 * Sridhar Samudrala <sri@us.ibm.com>
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/wait.h>
27 #include <linux/time.h>
29 #include <linux/ipv6.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <net/inet_ecn.h>
35 #include <net/net_namespace.h>
37 #include <linux/socket.h> /* for sa_family_t */
40 #include <net/sctp/sctp.h>
41 #include <net/sctp/sm.h>
42 #include <net/sctp/checksum.h>
44 /* Forward declarations for private helpers. */
45 static enum sctp_xmit
__sctp_packet_append_chunk(struct sctp_packet
*packet
,
46 struct sctp_chunk
*chunk
);
47 static enum sctp_xmit
sctp_packet_can_append_data(struct sctp_packet
*packet
,
48 struct sctp_chunk
*chunk
);
49 static void sctp_packet_append_data(struct sctp_packet
*packet
,
50 struct sctp_chunk
*chunk
);
51 static enum sctp_xmit
sctp_packet_will_fit(struct sctp_packet
*packet
,
52 struct sctp_chunk
*chunk
,
55 static void sctp_packet_reset(struct sctp_packet
*packet
)
57 /* sctp_packet_transmit() relies on this to reset size to the
58 * current overhead after sending packets.
60 packet
->size
= packet
->overhead
;
62 packet
->has_cookie_echo
= 0;
71 * This appears to be a followup set of initializations.
73 void sctp_packet_config(struct sctp_packet
*packet
, __u32 vtag
,
76 struct sctp_transport
*tp
= packet
->transport
;
77 struct sctp_association
*asoc
= tp
->asoc
;
78 struct sctp_sock
*sp
= NULL
;
81 pr_debug("%s: packet:%p vtag:0x%x\n", __func__
, packet
, vtag
);
84 /* do the following jobs only once for a flush schedule */
85 if (!sctp_packet_empty(packet
))
88 /* set packet max_size with pathmtu, then calculate overhead */
89 packet
->max_size
= tp
->pathmtu
;
95 packet
->overhead
= sctp_mtu_payload(sp
, 0, 0);
96 packet
->size
= packet
->overhead
;
101 /* update dst or transport pathmtu if in need */
102 if (!sctp_transport_dst_check(tp
)) {
103 sctp_transport_route(tp
, NULL
, sp
);
104 if (asoc
->param_flags
& SPP_PMTUD_ENABLE
)
105 sctp_assoc_sync_pmtu(asoc
);
106 } else if (!sctp_transport_pl_enabled(tp
) &&
107 asoc
->param_flags
& SPP_PMTUD_ENABLE
) {
108 if (!sctp_transport_pmtu_check(tp
))
109 sctp_assoc_sync_pmtu(asoc
);
112 if (asoc
->pmtu_pending
) {
113 if (asoc
->param_flags
& SPP_PMTUD_ENABLE
)
114 sctp_assoc_sync_pmtu(asoc
);
115 asoc
->pmtu_pending
= 0;
118 /* If there a is a prepend chunk stick it on the list before
119 * any other chunks get appended.
122 struct sctp_chunk
*chunk
= sctp_get_ecne_prepend(asoc
);
125 sctp_packet_append_chunk(packet
, chunk
);
131 /* set packet max_size with gso_max_size if gso is enabled*/
133 if (__sk_dst_get(sk
) != tp
->dst
) {
135 sk_setup_caps(sk
, tp
->dst
);
137 packet
->max_size
= sk_can_gso(sk
) ? tp
->dst
->dev
->gso_max_size
142 /* Initialize the packet structure. */
143 void sctp_packet_init(struct sctp_packet
*packet
,
144 struct sctp_transport
*transport
,
145 __u16 sport
, __u16 dport
)
147 pr_debug("%s: packet:%p transport:%p\n", __func__
, packet
, transport
);
149 packet
->transport
= transport
;
150 packet
->source_port
= sport
;
151 packet
->destination_port
= dport
;
152 INIT_LIST_HEAD(&packet
->chunk_list
);
153 /* The overhead will be calculated by sctp_packet_config() */
154 packet
->overhead
= 0;
155 sctp_packet_reset(packet
);
160 void sctp_packet_free(struct sctp_packet
*packet
)
162 struct sctp_chunk
*chunk
, *tmp
;
164 pr_debug("%s: packet:%p\n", __func__
, packet
);
166 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
167 list_del_init(&chunk
->list
);
168 sctp_chunk_free(chunk
);
172 /* This routine tries to append the chunk to the offered packet. If adding
173 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
174 * is not present in the packet, it transmits the input packet.
175 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
176 * as it can fit in the packet, but any more data that does not fit in this
177 * packet can be sent only after receiving the COOKIE_ACK.
179 enum sctp_xmit
sctp_packet_transmit_chunk(struct sctp_packet
*packet
,
180 struct sctp_chunk
*chunk
,
181 int one_packet
, gfp_t gfp
)
183 enum sctp_xmit retval
;
185 pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__
,
186 packet
, packet
->size
, chunk
, chunk
->skb
? chunk
->skb
->len
: -1);
188 switch ((retval
= (sctp_packet_append_chunk(packet
, chunk
)))) {
189 case SCTP_XMIT_PMTU_FULL
:
190 if (!packet
->has_cookie_echo
) {
193 error
= sctp_packet_transmit(packet
, gfp
);
195 chunk
->skb
->sk
->sk_err
= -error
;
197 /* If we have an empty packet, then we can NOT ever
201 retval
= sctp_packet_append_chunk(packet
,
206 case SCTP_XMIT_RWND_FULL
:
208 case SCTP_XMIT_DELAY
:
215 /* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
216 static enum sctp_xmit
sctp_packet_bundle_pad(struct sctp_packet
*pkt
, struct sctp_chunk
*chunk
)
218 struct sctp_transport
*t
= pkt
->transport
;
219 struct sctp_chunk
*pad
;
222 if (!chunk
->pmtu_probe
)
225 /* calculate the Padding Data size for the pad chunk */
226 overhead
+= sizeof(struct sctphdr
) + sizeof(struct sctp_chunkhdr
);
227 overhead
+= sizeof(struct sctp_sender_hb_info
) + sizeof(struct sctp_pad_chunk
);
228 pad
= sctp_make_pad(t
->asoc
, t
->pl
.probe_size
- overhead
);
230 return SCTP_XMIT_DELAY
;
232 list_add_tail(&pad
->list
, &pkt
->chunk_list
);
233 pkt
->size
+= SCTP_PAD4(ntohs(pad
->chunk_hdr
->length
));
234 chunk
->transport
= t
;
239 /* Try to bundle an auth chunk into the packet. */
240 static enum sctp_xmit
sctp_packet_bundle_auth(struct sctp_packet
*pkt
,
241 struct sctp_chunk
*chunk
)
243 struct sctp_association
*asoc
= pkt
->transport
->asoc
;
244 enum sctp_xmit retval
= SCTP_XMIT_OK
;
245 struct sctp_chunk
*auth
;
247 /* if we don't have an association, we can't do authentication */
251 /* See if this is an auth chunk we are bundling or if
252 * auth is already bundled.
254 if (chunk
->chunk_hdr
->type
== SCTP_CID_AUTH
|| pkt
->has_auth
)
257 /* if the peer did not request this chunk to be authenticated,
263 auth
= sctp_make_auth(asoc
, chunk
->shkey
->key_id
);
267 auth
->shkey
= chunk
->shkey
;
268 sctp_auth_shkey_hold(auth
->shkey
);
270 retval
= __sctp_packet_append_chunk(pkt
, auth
);
272 if (retval
!= SCTP_XMIT_OK
)
273 sctp_chunk_free(auth
);
278 /* Try to bundle a SACK with the packet. */
279 static enum sctp_xmit
sctp_packet_bundle_sack(struct sctp_packet
*pkt
,
280 struct sctp_chunk
*chunk
)
282 enum sctp_xmit retval
= SCTP_XMIT_OK
;
284 /* If sending DATA and haven't aleady bundled a SACK, try to
285 * bundle one in to the packet.
287 if (sctp_chunk_is_data(chunk
) && !pkt
->has_sack
&&
288 !pkt
->has_cookie_echo
) {
289 struct sctp_association
*asoc
;
290 struct timer_list
*timer
;
291 asoc
= pkt
->transport
->asoc
;
292 timer
= &asoc
->timers
[SCTP_EVENT_TIMEOUT_SACK
];
294 /* If the SACK timer is running, we have a pending SACK */
295 if (timer_pending(timer
)) {
296 struct sctp_chunk
*sack
;
298 if (pkt
->transport
->sack_generation
!=
299 pkt
->transport
->asoc
->peer
.sack_generation
)
302 asoc
->a_rwnd
= asoc
->rwnd
;
303 sack
= sctp_make_sack(asoc
);
305 retval
= __sctp_packet_append_chunk(pkt
, sack
);
306 if (retval
!= SCTP_XMIT_OK
) {
307 sctp_chunk_free(sack
);
310 SCTP_INC_STATS(asoc
->base
.net
,
311 SCTP_MIB_OUTCTRLCHUNKS
);
312 asoc
->stats
.octrlchunks
++;
313 asoc
->peer
.sack_needed
= 0;
314 if (del_timer(timer
))
315 sctp_association_put(asoc
);
324 /* Append a chunk to the offered packet reporting back any inability to do
327 static enum sctp_xmit
__sctp_packet_append_chunk(struct sctp_packet
*packet
,
328 struct sctp_chunk
*chunk
)
330 __u16 chunk_len
= SCTP_PAD4(ntohs(chunk
->chunk_hdr
->length
));
331 enum sctp_xmit retval
= SCTP_XMIT_OK
;
333 /* Check to see if this chunk will fit into the packet */
334 retval
= sctp_packet_will_fit(packet
, chunk
, chunk_len
);
335 if (retval
!= SCTP_XMIT_OK
)
338 /* We believe that this chunk is OK to add to the packet */
339 switch (chunk
->chunk_hdr
->type
) {
341 case SCTP_CID_I_DATA
:
342 /* Account for the data being in the packet */
343 sctp_packet_append_data(packet
, chunk
);
344 /* Disallow SACK bundling after DATA. */
345 packet
->has_sack
= 1;
346 /* Disallow AUTH bundling after DATA */
347 packet
->has_auth
= 1;
348 /* Let it be knows that packet has DATA in it */
349 packet
->has_data
= 1;
350 /* timestamp the chunk for rtx purposes */
351 chunk
->sent_at
= jiffies
;
352 /* Mainly used for prsctp RTX policy */
355 case SCTP_CID_COOKIE_ECHO
:
356 packet
->has_cookie_echo
= 1;
360 packet
->has_sack
= 1;
362 chunk
->asoc
->stats
.osacks
++;
366 packet
->has_auth
= 1;
367 packet
->auth
= chunk
;
371 /* It is OK to send this chunk. */
372 list_add_tail(&chunk
->list
, &packet
->chunk_list
);
373 packet
->size
+= chunk_len
;
374 chunk
->transport
= packet
->transport
;
379 /* Append a chunk to the offered packet reporting back any inability to do
382 enum sctp_xmit
sctp_packet_append_chunk(struct sctp_packet
*packet
,
383 struct sctp_chunk
*chunk
)
385 enum sctp_xmit retval
= SCTP_XMIT_OK
;
387 pr_debug("%s: packet:%p chunk:%p\n", __func__
, packet
, chunk
);
389 /* Data chunks are special. Before seeing what else we can
390 * bundle into this packet, check to see if we are allowed to
393 if (sctp_chunk_is_data(chunk
)) {
394 retval
= sctp_packet_can_append_data(packet
, chunk
);
395 if (retval
!= SCTP_XMIT_OK
)
399 /* Try to bundle AUTH chunk */
400 retval
= sctp_packet_bundle_auth(packet
, chunk
);
401 if (retval
!= SCTP_XMIT_OK
)
404 /* Try to bundle SACK chunk */
405 retval
= sctp_packet_bundle_sack(packet
, chunk
);
406 if (retval
!= SCTP_XMIT_OK
)
409 retval
= __sctp_packet_append_chunk(packet
, chunk
);
410 if (retval
!= SCTP_XMIT_OK
)
413 retval
= sctp_packet_bundle_pad(packet
, chunk
);
419 static void sctp_packet_gso_append(struct sk_buff
*head
, struct sk_buff
*skb
)
421 if (SCTP_OUTPUT_CB(head
)->last
== head
)
422 skb_shinfo(head
)->frag_list
= skb
;
424 SCTP_OUTPUT_CB(head
)->last
->next
= skb
;
425 SCTP_OUTPUT_CB(head
)->last
= skb
;
427 head
->truesize
+= skb
->truesize
;
428 head
->data_len
+= skb
->len
;
429 head
->len
+= skb
->len
;
430 refcount_add(skb
->truesize
, &head
->sk
->sk_wmem_alloc
);
432 __skb_header_release(skb
);
435 static int sctp_packet_pack(struct sctp_packet
*packet
,
436 struct sk_buff
*head
, int gso
, gfp_t gfp
)
438 struct sctp_transport
*tp
= packet
->transport
;
439 struct sctp_auth_chunk
*auth
= NULL
;
440 struct sctp_chunk
*chunk
, *tmp
;
441 int pkt_count
= 0, pkt_size
;
442 struct sock
*sk
= head
->sk
;
443 struct sk_buff
*nskb
;
447 skb_shinfo(head
)->gso_type
= sk
->sk_gso_type
;
448 SCTP_OUTPUT_CB(head
)->last
= head
;
451 pkt_size
= packet
->size
;
456 /* calculate the pkt_size and alloc nskb */
457 pkt_size
= packet
->overhead
;
458 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
,
460 int padded
= SCTP_PAD4(chunk
->skb
->len
);
462 if (chunk
== packet
->auth
)
464 else if (auth_len
+ padded
+ packet
->overhead
>
467 else if (pkt_size
+ padded
> tp
->pathmtu
)
471 nskb
= alloc_skb(pkt_size
+ MAX_HEADER
, gfp
);
474 skb_reserve(nskb
, packet
->overhead
+ MAX_HEADER
);
477 /* merge chunks into nskb and append nskb into head list */
478 pkt_size
-= packet
->overhead
;
479 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
482 list_del_init(&chunk
->list
);
483 if (sctp_chunk_is_data(chunk
)) {
484 if (!sctp_chunk_retransmitted(chunk
) &&
486 chunk
->rtt_in_progress
= 1;
491 padding
= SCTP_PAD4(chunk
->skb
->len
) - chunk
->skb
->len
;
493 skb_put_zero(chunk
->skb
, padding
);
495 if (chunk
== packet
->auth
)
496 auth
= (struct sctp_auth_chunk
*)
497 skb_tail_pointer(nskb
);
499 skb_put_data(nskb
, chunk
->skb
->data
, chunk
->skb
->len
);
501 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
503 sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)),
504 chunk
->has_tsn
? "TSN" : "No TSN",
505 chunk
->has_tsn
? ntohl(chunk
->subh
.data_hdr
->tsn
) : 0,
506 ntohs(chunk
->chunk_hdr
->length
), chunk
->skb
->len
,
507 chunk
->rtt_in_progress
);
509 pkt_size
-= SCTP_PAD4(chunk
->skb
->len
);
511 if (!sctp_chunk_is_data(chunk
) && chunk
!= packet
->auth
)
512 sctp_chunk_free(chunk
);
519 sctp_auth_calculate_hmac(tp
->asoc
, nskb
, auth
,
520 packet
->auth
->shkey
, gfp
);
521 /* free auth if no more chunks, or add it back */
522 if (list_empty(&packet
->chunk_list
))
523 sctp_chunk_free(packet
->auth
);
525 list_add(&packet
->auth
->list
,
526 &packet
->chunk_list
);
530 sctp_packet_gso_append(head
, nskb
);
533 } while (!list_empty(&packet
->chunk_list
));
536 memset(head
->cb
, 0, max(sizeof(struct inet_skb_parm
),
537 sizeof(struct inet6_skb_parm
)));
538 skb_shinfo(head
)->gso_segs
= pkt_count
;
539 skb_shinfo(head
)->gso_size
= GSO_BY_FRAGS
;
543 if (sctp_checksum_disable
)
546 if (!(tp
->dst
->dev
->features
& NETIF_F_SCTP_CRC
) ||
547 dst_xfrm(tp
->dst
) || packet
->ipfragok
|| tp
->encap_port
) {
549 (struct sctphdr
*)skb_transport_header(head
);
551 sh
->checksum
= sctp_compute_cksum(head
, 0);
554 head
->ip_summed
= CHECKSUM_PARTIAL
;
555 head
->csum_not_inet
= 1;
556 head
->csum_start
= skb_transport_header(head
) - head
->head
;
557 head
->csum_offset
= offsetof(struct sctphdr
, checksum
);
563 /* All packets are sent to the network through this function from
566 * The return value is always 0 for now.
568 int sctp_packet_transmit(struct sctp_packet
*packet
, gfp_t gfp
)
570 struct sctp_transport
*tp
= packet
->transport
;
571 struct sctp_association
*asoc
= tp
->asoc
;
572 struct sctp_chunk
*chunk
, *tmp
;
573 int pkt_count
, gso
= 0;
574 struct sk_buff
*head
;
578 pr_debug("%s: packet:%p\n", __func__
, packet
);
579 if (list_empty(&packet
->chunk_list
))
581 chunk
= list_entry(packet
->chunk_list
.next
, struct sctp_chunk
, list
);
584 if (packet
->size
> tp
->pathmtu
&& !packet
->ipfragok
&& !chunk
->pmtu_probe
) {
585 if (tp
->pl
.state
== SCTP_PL_ERROR
) { /* do IP fragmentation if in Error state */
586 packet
->ipfragok
= 1;
588 if (!sk_can_gso(sk
)) { /* check gso */
589 pr_err_once("Trying to GSO but underlying device doesn't support it.");
597 head
= alloc_skb((gso
? packet
->overhead
: packet
->size
) +
601 skb_reserve(head
, packet
->overhead
+ MAX_HEADER
);
602 skb_set_owner_w(head
, sk
);
604 /* set sctp header */
605 sh
= skb_push(head
, sizeof(struct sctphdr
));
606 skb_reset_transport_header(head
);
607 sh
->source
= htons(packet
->source_port
);
608 sh
->dest
= htons(packet
->destination_port
);
609 sh
->vtag
= htonl(packet
->vtag
);
612 /* drop packet if no dst */
614 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
620 pkt_count
= sctp_packet_pack(packet
, head
, gso
, gfp
);
625 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head
->len
);
627 /* start autoclose timer */
628 if (packet
->has_data
&& sctp_state(asoc
, ESTABLISHED
) &&
629 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
]) {
630 struct timer_list
*timer
=
631 &asoc
->timers
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
632 unsigned long timeout
=
633 asoc
->timeouts
[SCTP_EVENT_TIMEOUT_AUTOCLOSE
];
635 if (!mod_timer(timer
, jiffies
+ timeout
))
636 sctp_association_hold(asoc
);
640 tp
->af_specific
->ecn_capable(sk
);
642 asoc
->stats
.opackets
+= pkt_count
;
643 if (asoc
->peer
.last_sent_to
!= tp
)
644 asoc
->peer
.last_sent_to
= tp
;
646 head
->ignore_df
= packet
->ipfragok
;
647 if (tp
->dst_pending_confirm
)
648 skb_set_dst_pending_confirm(head
, 1);
649 /* neighbour should be confirmed on successful transmission or
652 if (tp
->af_specific
->sctp_xmit(head
, tp
) >= 0 &&
653 tp
->dst_pending_confirm
)
654 tp
->dst_pending_confirm
= 0;
657 list_for_each_entry_safe(chunk
, tmp
, &packet
->chunk_list
, list
) {
658 list_del_init(&chunk
->list
);
659 if (!sctp_chunk_is_data(chunk
))
660 sctp_chunk_free(chunk
);
662 sctp_packet_reset(packet
);
666 /********************************************************************
667 * 2nd Level Abstractions
668 ********************************************************************/
670 /* This private function check to see if a chunk can be added */
671 static enum sctp_xmit
sctp_packet_can_append_data(struct sctp_packet
*packet
,
672 struct sctp_chunk
*chunk
)
674 size_t datasize
, rwnd
, inflight
, flight_size
;
675 struct sctp_transport
*transport
= packet
->transport
;
676 struct sctp_association
*asoc
= transport
->asoc
;
677 struct sctp_outq
*q
= &asoc
->outqueue
;
679 /* RFC 2960 6.1 Transmission of DATA Chunks
681 * A) At any given time, the data sender MUST NOT transmit new data to
682 * any destination transport address if its peer's rwnd indicates
683 * that the peer has no buffer space (i.e. rwnd is 0, see Section
684 * 6.2.1). However, regardless of the value of rwnd (including if it
685 * is 0), the data sender can always have one DATA chunk in flight to
686 * the receiver if allowed by cwnd (see rule B below). This rule
687 * allows the sender to probe for a change in rwnd that the sender
688 * missed due to the SACK having been lost in transit from the data
689 * receiver to the data sender.
692 rwnd
= asoc
->peer
.rwnd
;
693 inflight
= q
->outstanding_bytes
;
694 flight_size
= transport
->flight_size
;
696 datasize
= sctp_data_size(chunk
);
698 if (datasize
> rwnd
&& inflight
> 0)
699 /* We have (at least) one data chunk in flight,
700 * so we can't fall back to rule 6.1 B).
702 return SCTP_XMIT_RWND_FULL
;
704 /* RFC 2960 6.1 Transmission of DATA Chunks
706 * B) At any given time, the sender MUST NOT transmit new data
707 * to a given transport address if it has cwnd or more bytes
708 * of data outstanding to that transport address.
710 /* RFC 7.2.4 & the Implementers Guide 2.8.
713 * When a Fast Retransmit is being performed the sender SHOULD
714 * ignore the value of cwnd and SHOULD NOT delay retransmission.
716 if (chunk
->fast_retransmit
!= SCTP_NEED_FRTX
&&
717 flight_size
>= transport
->cwnd
)
718 return SCTP_XMIT_RWND_FULL
;
720 /* Nagle's algorithm to solve small-packet problem:
721 * Inhibit the sending of new chunks when new outgoing data arrives
722 * if any previously transmitted data on the connection remains
726 if ((sctp_sk(asoc
->base
.sk
)->nodelay
|| inflight
== 0) &&
728 /* Nothing unacked */
731 if (!sctp_packet_empty(packet
))
732 /* Append to packet */
735 if (!sctp_state(asoc
, ESTABLISHED
))
738 /* Check whether this chunk and all the rest of pending data will fit
739 * or delay in hopes of bundling a full sized packet.
741 if (chunk
->skb
->len
+ q
->out_qlen
> transport
->pathmtu
-
742 packet
->overhead
- sctp_datachk_len(&chunk
->asoc
->stream
) - 4)
743 /* Enough data queued to fill a packet */
746 /* Don't delay large message writes that may have been fragmented */
747 if (!chunk
->msg
->can_delay
)
750 /* Defer until all data acked or packet full */
751 return SCTP_XMIT_DELAY
;
754 /* This private function does management things when adding DATA chunk */
755 static void sctp_packet_append_data(struct sctp_packet
*packet
,
756 struct sctp_chunk
*chunk
)
758 struct sctp_transport
*transport
= packet
->transport
;
759 size_t datasize
= sctp_data_size(chunk
);
760 struct sctp_association
*asoc
= transport
->asoc
;
761 u32 rwnd
= asoc
->peer
.rwnd
;
763 /* Keep track of how many bytes are in flight over this transport. */
764 transport
->flight_size
+= datasize
;
766 /* Keep track of how many bytes are in flight to the receiver. */
767 asoc
->outqueue
.outstanding_bytes
+= datasize
;
769 /* Update our view of the receiver's rwnd. */
775 asoc
->peer
.rwnd
= rwnd
;
776 sctp_chunk_assign_tsn(chunk
);
777 asoc
->stream
.si
->assign_number(chunk
);
780 static enum sctp_xmit
sctp_packet_will_fit(struct sctp_packet
*packet
,
781 struct sctp_chunk
*chunk
,
784 enum sctp_xmit retval
= SCTP_XMIT_OK
;
785 size_t psize
, pmtu
, maxsize
;
787 /* Don't bundle in this packet if this chunk's auth key doesn't
788 * match other chunks already enqueued on this packet. Also,
789 * don't bundle the chunk with auth key if other chunks in this
790 * packet don't have auth key.
792 if ((packet
->auth
&& chunk
->shkey
!= packet
->auth
->shkey
) ||
793 (!packet
->auth
&& chunk
->shkey
&&
794 chunk
->chunk_hdr
->type
!= SCTP_CID_AUTH
))
795 return SCTP_XMIT_PMTU_FULL
;
797 psize
= packet
->size
;
798 if (packet
->transport
->asoc
)
799 pmtu
= packet
->transport
->asoc
->pathmtu
;
801 pmtu
= packet
->transport
->pathmtu
;
803 /* Decide if we need to fragment or resubmit later. */
804 if (psize
+ chunk_len
> pmtu
) {
805 /* It's OK to fragment at IP level if any one of the following
807 * 1. The packet is empty (meaning this chunk is greater
809 * 2. The packet doesn't have any data in it yet and data
810 * requires authentication.
812 if (sctp_packet_empty(packet
) ||
813 (!packet
->has_data
&& chunk
->auth
)) {
814 /* We no longer do re-fragmentation.
815 * Just fragment at the IP layer, if we
816 * actually hit this condition
818 packet
->ipfragok
= 1;
822 /* Similarly, if this chunk was built before a PMTU
823 * reduction, we have to fragment it at IP level now. So
824 * if the packet already contains something, we need to
827 maxsize
= pmtu
- packet
->overhead
;
829 maxsize
-= SCTP_PAD4(packet
->auth
->skb
->len
);
830 if (chunk_len
> maxsize
)
831 retval
= SCTP_XMIT_PMTU_FULL
;
833 /* It is also okay to fragment if the chunk we are
834 * adding is a control chunk, but only if current packet
835 * is not a GSO one otherwise it causes fragmentation of
836 * a large frame. So in this case we allow the
837 * fragmentation by forcing it to be in a new packet.
839 if (!sctp_chunk_is_data(chunk
) && packet
->has_data
)
840 retval
= SCTP_XMIT_PMTU_FULL
;
842 if (psize
+ chunk_len
> packet
->max_size
)
843 /* Hit GSO/PMTU limit, gotta flush */
844 retval
= SCTP_XMIT_PMTU_FULL
;
846 if (!packet
->transport
->burst_limited
&&
847 psize
+ chunk_len
> (packet
->transport
->cwnd
>> 1))
848 /* Do not allow a single GSO packet to use more
851 retval
= SCTP_XMIT_PMTU_FULL
;
853 if (packet
->transport
->burst_limited
&&
854 psize
+ chunk_len
> (packet
->transport
->burst_limited
>> 1))
855 /* Do not allow a single GSO packet to use more
856 * than half of original cwnd.
858 retval
= SCTP_XMIT_PMTU_FULL
;
859 /* Otherwise it will fit in the GSO packet */