]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/sctp/output.c
85406d5f8f41e739738c51988e4a093f1859b000
[mirror_ubuntu-artful-kernel.git] / net / sctp / output.c
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 *
6 * This file is part of the SCTP kernel implementation
7 *
8 * These functions handle output processing.
9 *
10 * This SCTP implementation is free software;
11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This SCTP implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, see
24 * <http://www.gnu.org/licenses/>.
25 *
26 * Please send any bug reports or fixes you make to the
27 * email address(es):
28 * lksctp developers <linux-sctp@vger.kernel.org>
29 *
30 * Written or modified by:
31 * La Monte H.P. Yarroll <piggy@acm.org>
32 * Karl Knutson <karl@athena.chicago.il.us>
33 * Jon Grimm <jgrimm@austin.ibm.com>
34 * Sridhar Samudrala <sri@us.ibm.com>
35 */
36
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39 #include <linux/types.h>
40 #include <linux/kernel.h>
41 #include <linux/wait.h>
42 #include <linux/time.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/init.h>
46 #include <linux/slab.h>
47 #include <net/inet_ecn.h>
48 #include <net/ip.h>
49 #include <net/icmp.h>
50 #include <net/net_namespace.h>
51
52 #include <linux/socket.h> /* for sa_family_t */
53 #include <net/sock.h>
54
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
57 #include <net/sctp/checksum.h>
58
59 /* Forward declarations for private helpers. */
60 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
61 struct sctp_chunk *chunk);
62 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
63 struct sctp_chunk *chunk);
64 static void sctp_packet_append_data(struct sctp_packet *packet,
65 struct sctp_chunk *chunk);
66 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
67 struct sctp_chunk *chunk,
68 u16 chunk_len);
69
70 static void sctp_packet_reset(struct sctp_packet *packet)
71 {
72 packet->size = packet->overhead;
73 packet->has_cookie_echo = 0;
74 packet->has_sack = 0;
75 packet->has_data = 0;
76 packet->has_auth = 0;
77 packet->ipfragok = 0;
78 packet->auth = NULL;
79 }
80
81 /* Config a packet.
82 * This appears to be a followup set of initializations.
83 */
84 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
85 int ecn_capable)
86 {
87 struct sctp_transport *tp = packet->transport;
88 struct sctp_association *asoc = tp->asoc;
89
90 pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
91
92 packet->vtag = vtag;
93
94 if (asoc && tp->dst) {
95 struct sock *sk = asoc->base.sk;
96
97 rcu_read_lock();
98 if (__sk_dst_get(sk) != tp->dst) {
99 dst_hold(tp->dst);
100 sk_setup_caps(sk, tp->dst);
101 }
102
103 if (sk_can_gso(sk)) {
104 struct net_device *dev = tp->dst->dev;
105
106 packet->max_size = dev->gso_max_size;
107 } else {
108 packet->max_size = asoc->pathmtu;
109 }
110 rcu_read_unlock();
111
112 } else {
113 packet->max_size = tp->pathmtu;
114 }
115
116 if (ecn_capable && sctp_packet_empty(packet)) {
117 struct sctp_chunk *chunk;
118
119 /* If there a is a prepend chunk stick it on the list before
120 * any other chunks get appended.
121 */
122 chunk = sctp_get_ecne_prepend(asoc);
123 if (chunk)
124 sctp_packet_append_chunk(packet, chunk);
125 }
126 }
127
128 /* Initialize the packet structure. */
129 void sctp_packet_init(struct sctp_packet *packet,
130 struct sctp_transport *transport,
131 __u16 sport, __u16 dport)
132 {
133 struct sctp_association *asoc = transport->asoc;
134 size_t overhead;
135
136 pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
137
138 packet->transport = transport;
139 packet->source_port = sport;
140 packet->destination_port = dport;
141 INIT_LIST_HEAD(&packet->chunk_list);
142 if (asoc) {
143 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
144 overhead = sp->pf->af->net_header_len;
145 } else {
146 overhead = sizeof(struct ipv6hdr);
147 }
148 overhead += sizeof(struct sctphdr);
149 packet->overhead = overhead;
150 sctp_packet_reset(packet);
151 packet->vtag = 0;
152 }
153
154 /* Free a packet. */
155 void sctp_packet_free(struct sctp_packet *packet)
156 {
157 struct sctp_chunk *chunk, *tmp;
158
159 pr_debug("%s: packet:%p\n", __func__, packet);
160
161 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
162 list_del_init(&chunk->list);
163 sctp_chunk_free(chunk);
164 }
165 }
166
167 /* This routine tries to append the chunk to the offered packet. If adding
168 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
169 * is not present in the packet, it transmits the input packet.
170 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
171 * as it can fit in the packet, but any more data that does not fit in this
172 * packet can be sent only after receiving the COOKIE_ACK.
173 */
174 sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
175 struct sctp_chunk *chunk,
176 int one_packet, gfp_t gfp)
177 {
178 sctp_xmit_t retval;
179
180 pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
181 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
182
183 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
184 case SCTP_XMIT_PMTU_FULL:
185 if (!packet->has_cookie_echo) {
186 int error = 0;
187
188 error = sctp_packet_transmit(packet, gfp);
189 if (error < 0)
190 chunk->skb->sk->sk_err = -error;
191
192 /* If we have an empty packet, then we can NOT ever
193 * return PMTU_FULL.
194 */
195 if (!one_packet)
196 retval = sctp_packet_append_chunk(packet,
197 chunk);
198 }
199 break;
200
201 case SCTP_XMIT_RWND_FULL:
202 case SCTP_XMIT_OK:
203 case SCTP_XMIT_DELAY:
204 break;
205 }
206
207 return retval;
208 }
209
210 /* Try to bundle an auth chunk into the packet. */
211 static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
212 struct sctp_chunk *chunk)
213 {
214 struct sctp_association *asoc = pkt->transport->asoc;
215 struct sctp_chunk *auth;
216 sctp_xmit_t retval = SCTP_XMIT_OK;
217
218 /* if we don't have an association, we can't do authentication */
219 if (!asoc)
220 return retval;
221
222 /* See if this is an auth chunk we are bundling or if
223 * auth is already bundled.
224 */
225 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
226 return retval;
227
228 /* if the peer did not request this chunk to be authenticated,
229 * don't do it
230 */
231 if (!chunk->auth)
232 return retval;
233
234 auth = sctp_make_auth(asoc);
235 if (!auth)
236 return retval;
237
238 retval = __sctp_packet_append_chunk(pkt, auth);
239
240 if (retval != SCTP_XMIT_OK)
241 sctp_chunk_free(auth);
242
243 return retval;
244 }
245
246 /* Try to bundle a SACK with the packet. */
247 static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
248 struct sctp_chunk *chunk)
249 {
250 sctp_xmit_t retval = SCTP_XMIT_OK;
251
252 /* If sending DATA and haven't aleady bundled a SACK, try to
253 * bundle one in to the packet.
254 */
255 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
256 !pkt->has_cookie_echo) {
257 struct sctp_association *asoc;
258 struct timer_list *timer;
259 asoc = pkt->transport->asoc;
260 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
261
262 /* If the SACK timer is running, we have a pending SACK */
263 if (timer_pending(timer)) {
264 struct sctp_chunk *sack;
265
266 if (pkt->transport->sack_generation !=
267 pkt->transport->asoc->peer.sack_generation)
268 return retval;
269
270 asoc->a_rwnd = asoc->rwnd;
271 sack = sctp_make_sack(asoc);
272 if (sack) {
273 retval = __sctp_packet_append_chunk(pkt, sack);
274 if (retval != SCTP_XMIT_OK) {
275 sctp_chunk_free(sack);
276 goto out;
277 }
278 asoc->peer.sack_needed = 0;
279 if (del_timer(timer))
280 sctp_association_put(asoc);
281 }
282 }
283 }
284 out:
285 return retval;
286 }
287
288
289 /* Append a chunk to the offered packet reporting back any inability to do
290 * so.
291 */
292 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
293 struct sctp_chunk *chunk)
294 {
295 sctp_xmit_t retval = SCTP_XMIT_OK;
296 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
297
298 /* Check to see if this chunk will fit into the packet */
299 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
300 if (retval != SCTP_XMIT_OK)
301 goto finish;
302
303 /* We believe that this chunk is OK to add to the packet */
304 switch (chunk->chunk_hdr->type) {
305 case SCTP_CID_DATA:
306 /* Account for the data being in the packet */
307 sctp_packet_append_data(packet, chunk);
308 /* Disallow SACK bundling after DATA. */
309 packet->has_sack = 1;
310 /* Disallow AUTH bundling after DATA */
311 packet->has_auth = 1;
312 /* Let it be knows that packet has DATA in it */
313 packet->has_data = 1;
314 /* timestamp the chunk for rtx purposes */
315 chunk->sent_at = jiffies;
316 /* Mainly used for prsctp RTX policy */
317 chunk->sent_count++;
318 break;
319 case SCTP_CID_COOKIE_ECHO:
320 packet->has_cookie_echo = 1;
321 break;
322
323 case SCTP_CID_SACK:
324 packet->has_sack = 1;
325 if (chunk->asoc)
326 chunk->asoc->stats.osacks++;
327 break;
328
329 case SCTP_CID_AUTH:
330 packet->has_auth = 1;
331 packet->auth = chunk;
332 break;
333 }
334
335 /* It is OK to send this chunk. */
336 list_add_tail(&chunk->list, &packet->chunk_list);
337 packet->size += chunk_len;
338 chunk->transport = packet->transport;
339 finish:
340 return retval;
341 }
342
343 /* Append a chunk to the offered packet reporting back any inability to do
344 * so.
345 */
346 sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
347 struct sctp_chunk *chunk)
348 {
349 sctp_xmit_t retval = SCTP_XMIT_OK;
350
351 pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
352
353 /* Data chunks are special. Before seeing what else we can
354 * bundle into this packet, check to see if we are allowed to
355 * send this DATA.
356 */
357 if (sctp_chunk_is_data(chunk)) {
358 retval = sctp_packet_can_append_data(packet, chunk);
359 if (retval != SCTP_XMIT_OK)
360 goto finish;
361 }
362
363 /* Try to bundle AUTH chunk */
364 retval = sctp_packet_bundle_auth(packet, chunk);
365 if (retval != SCTP_XMIT_OK)
366 goto finish;
367
368 /* Try to bundle SACK chunk */
369 retval = sctp_packet_bundle_sack(packet, chunk);
370 if (retval != SCTP_XMIT_OK)
371 goto finish;
372
373 retval = __sctp_packet_append_chunk(packet, chunk);
374
375 finish:
376 return retval;
377 }
378
379 static void sctp_packet_release_owner(struct sk_buff *skb)
380 {
381 sk_free(skb->sk);
382 }
383
384 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
385 {
386 skb_orphan(skb);
387 skb->sk = sk;
388 skb->destructor = sctp_packet_release_owner;
389
390 /*
391 * The data chunks have already been accounted for in sctp_sendmsg(),
392 * therefore only reserve a single byte to keep socket around until
393 * the packet has been transmitted.
394 */
395 atomic_inc(&sk->sk_wmem_alloc);
396 }
397
398 static int sctp_packet_pack(struct sctp_packet *packet,
399 struct sk_buff *head, int gso, gfp_t gfp)
400 {
401 struct sctp_transport *tp = packet->transport;
402 struct sctp_auth_chunk *auth = NULL;
403 struct sctp_chunk *chunk, *tmp;
404 int pkt_count = 0, pkt_size;
405 struct sock *sk = head->sk;
406 struct sk_buff *nskb;
407 int auth_len = 0;
408
409 if (gso) {
410 skb_shinfo(head)->gso_type = sk->sk_gso_type;
411 NAPI_GRO_CB(head)->last = head;
412 } else {
413 nskb = head;
414 pkt_size = packet->size;
415 goto merge;
416 }
417
418 do {
419 /* calculate the pkt_size and alloc nskb */
420 pkt_size = packet->overhead;
421 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
422 list) {
423 int padded = SCTP_PAD4(chunk->skb->len);
424
425 if (chunk == packet->auth)
426 auth_len = padded;
427 else if (auth_len + padded + packet->overhead >
428 tp->pathmtu)
429 return 0;
430 else if (pkt_size + padded > tp->pathmtu)
431 break;
432 pkt_size += padded;
433 }
434 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
435 if (!nskb)
436 return 0;
437 skb_reserve(nskb, packet->overhead + MAX_HEADER);
438
439 merge:
440 /* merge chunks into nskb and append nskb into head list */
441 pkt_size -= packet->overhead;
442 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
443 int padding;
444
445 list_del_init(&chunk->list);
446 if (sctp_chunk_is_data(chunk)) {
447 if (!sctp_chunk_retransmitted(chunk) &&
448 !tp->rto_pending) {
449 chunk->rtt_in_progress = 1;
450 tp->rto_pending = 1;
451 }
452 }
453
454 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
455 if (padding)
456 memset(skb_put(chunk->skb, padding), 0, padding);
457
458 if (chunk == packet->auth)
459 auth = (struct sctp_auth_chunk *)
460 skb_tail_pointer(nskb);
461
462 memcpy(skb_put(nskb, chunk->skb->len), chunk->skb->data,
463 chunk->skb->len);
464
465 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
466 chunk,
467 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
468 chunk->has_tsn ? "TSN" : "No TSN",
469 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
470 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
471 chunk->rtt_in_progress);
472
473 pkt_size -= SCTP_PAD4(chunk->skb->len);
474
475 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
476 sctp_chunk_free(chunk);
477
478 if (!pkt_size)
479 break;
480 }
481
482 if (auth) {
483 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, gfp);
484 /* free auth if no more chunks, or add it back */
485 if (list_empty(&packet->chunk_list))
486 sctp_chunk_free(packet->auth);
487 else
488 list_add(&packet->auth->list,
489 &packet->chunk_list);
490 }
491
492 if (gso) {
493 if (skb_gro_receive(&head, nskb)) {
494 kfree_skb(nskb);
495 return 0;
496 }
497 if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
498 sk->sk_gso_max_segs))
499 return 0;
500 }
501
502 pkt_count++;
503 } while (!list_empty(&packet->chunk_list));
504
505 if (gso) {
506 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
507 sizeof(struct inet6_skb_parm)));
508 skb_shinfo(head)->gso_segs = pkt_count;
509 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
510 rcu_read_lock();
511 if (skb_dst(head) != tp->dst) {
512 dst_hold(tp->dst);
513 sk_setup_caps(sk, tp->dst);
514 }
515 rcu_read_unlock();
516 goto chksum;
517 }
518
519 if (sctp_checksum_disable)
520 return 1;
521
522 if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
523 dst_xfrm(skb_dst(head)) || packet->ipfragok) {
524 struct sctphdr *sh =
525 (struct sctphdr *)skb_transport_header(head);
526
527 sh->checksum = sctp_compute_cksum(head, 0);
528 } else {
529 chksum:
530 head->ip_summed = CHECKSUM_PARTIAL;
531 head->csum_start = skb_transport_header(head) - head->head;
532 head->csum_offset = offsetof(struct sctphdr, checksum);
533 }
534
535 return pkt_count;
536 }
537
538 /* All packets are sent to the network through this function from
539 * sctp_outq_tail().
540 *
541 * The return value is always 0 for now.
542 */
543 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
544 {
545 struct sctp_transport *tp = packet->transport;
546 struct sctp_association *asoc = tp->asoc;
547 struct sctp_chunk *chunk, *tmp;
548 int pkt_count, gso = 0;
549 int confirm;
550 struct dst_entry *dst;
551 struct sk_buff *head;
552 struct sctphdr *sh;
553 struct sock *sk;
554
555 pr_debug("%s: packet:%p\n", __func__, packet);
556 if (list_empty(&packet->chunk_list))
557 return 0;
558 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
559 sk = chunk->skb->sk;
560
561 /* check gso */
562 if (packet->size > tp->pathmtu && !packet->ipfragok) {
563 if (!sk_can_gso(sk)) {
564 pr_err_once("Trying to GSO but underlying device doesn't support it.");
565 goto out;
566 }
567 gso = 1;
568 }
569
570 /* alloc head skb */
571 head = alloc_skb((gso ? packet->overhead : packet->size) +
572 MAX_HEADER, gfp);
573 if (!head)
574 goto out;
575 skb_reserve(head, packet->overhead + MAX_HEADER);
576 sctp_packet_set_owner_w(head, sk);
577
578 /* set sctp header */
579 sh = (struct sctphdr *)skb_push(head, sizeof(struct sctphdr));
580 skb_reset_transport_header(head);
581 sh->source = htons(packet->source_port);
582 sh->dest = htons(packet->destination_port);
583 sh->vtag = htonl(packet->vtag);
584 sh->checksum = 0;
585
586 /* update dst if in need */
587 if (!sctp_transport_dst_check(tp)) {
588 sctp_transport_route(tp, NULL, sctp_sk(sk));
589 if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
590 sctp_assoc_sync_pmtu(sk, asoc);
591 }
592 dst = dst_clone(tp->dst);
593 if (!dst) {
594 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
595 kfree_skb(head);
596 goto out;
597 }
598 skb_dst_set(head, dst);
599
600 /* pack up chunks */
601 pkt_count = sctp_packet_pack(packet, head, gso, gfp);
602 if (!pkt_count) {
603 kfree_skb(head);
604 goto out;
605 }
606 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
607
608 /* start autoclose timer */
609 if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
610 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
611 struct timer_list *timer =
612 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
613 unsigned long timeout =
614 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
615
616 if (!mod_timer(timer, jiffies + timeout))
617 sctp_association_hold(asoc);
618 }
619
620 /* sctp xmit */
621 tp->af_specific->ecn_capable(sk);
622 if (asoc) {
623 asoc->stats.opackets += pkt_count;
624 if (asoc->peer.last_sent_to != tp)
625 asoc->peer.last_sent_to = tp;
626 }
627 head->ignore_df = packet->ipfragok;
628 confirm = tp->dst_pending_confirm;
629 if (confirm)
630 skb_set_dst_pending_confirm(head, 1);
631 /* neighbour should be confirmed on successful transmission or
632 * positive error
633 */
634 if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
635 tp->dst_pending_confirm = 0;
636
637 out:
638 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
639 list_del_init(&chunk->list);
640 if (!sctp_chunk_is_data(chunk))
641 sctp_chunk_free(chunk);
642 }
643 sctp_packet_reset(packet);
644 return 0;
645 }
646
647 /********************************************************************
648 * 2nd Level Abstractions
649 ********************************************************************/
650
651 /* This private function check to see if a chunk can be added */
652 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
653 struct sctp_chunk *chunk)
654 {
655 size_t datasize, rwnd, inflight, flight_size;
656 struct sctp_transport *transport = packet->transport;
657 struct sctp_association *asoc = transport->asoc;
658 struct sctp_outq *q = &asoc->outqueue;
659
660 /* RFC 2960 6.1 Transmission of DATA Chunks
661 *
662 * A) At any given time, the data sender MUST NOT transmit new data to
663 * any destination transport address if its peer's rwnd indicates
664 * that the peer has no buffer space (i.e. rwnd is 0, see Section
665 * 6.2.1). However, regardless of the value of rwnd (including if it
666 * is 0), the data sender can always have one DATA chunk in flight to
667 * the receiver if allowed by cwnd (see rule B below). This rule
668 * allows the sender to probe for a change in rwnd that the sender
669 * missed due to the SACK having been lost in transit from the data
670 * receiver to the data sender.
671 */
672
673 rwnd = asoc->peer.rwnd;
674 inflight = q->outstanding_bytes;
675 flight_size = transport->flight_size;
676
677 datasize = sctp_data_size(chunk);
678
679 if (datasize > rwnd && inflight > 0)
680 /* We have (at least) one data chunk in flight,
681 * so we can't fall back to rule 6.1 B).
682 */
683 return SCTP_XMIT_RWND_FULL;
684
685 /* RFC 2960 6.1 Transmission of DATA Chunks
686 *
687 * B) At any given time, the sender MUST NOT transmit new data
688 * to a given transport address if it has cwnd or more bytes
689 * of data outstanding to that transport address.
690 */
691 /* RFC 7.2.4 & the Implementers Guide 2.8.
692 *
693 * 3) ...
694 * When a Fast Retransmit is being performed the sender SHOULD
695 * ignore the value of cwnd and SHOULD NOT delay retransmission.
696 */
697 if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
698 flight_size >= transport->cwnd)
699 return SCTP_XMIT_RWND_FULL;
700
701 /* Nagle's algorithm to solve small-packet problem:
702 * Inhibit the sending of new chunks when new outgoing data arrives
703 * if any previously transmitted data on the connection remains
704 * unacknowledged.
705 */
706
707 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
708 !chunk->msg->force_delay)
709 /* Nothing unacked */
710 return SCTP_XMIT_OK;
711
712 if (!sctp_packet_empty(packet))
713 /* Append to packet */
714 return SCTP_XMIT_OK;
715
716 if (!sctp_state(asoc, ESTABLISHED))
717 return SCTP_XMIT_OK;
718
719 /* Check whether this chunk and all the rest of pending data will fit
720 * or delay in hopes of bundling a full sized packet.
721 */
722 if (chunk->skb->len + q->out_qlen >
723 transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4)
724 /* Enough data queued to fill a packet */
725 return SCTP_XMIT_OK;
726
727 /* Don't delay large message writes that may have been fragmented */
728 if (!chunk->msg->can_delay)
729 return SCTP_XMIT_OK;
730
731 /* Defer until all data acked or packet full */
732 return SCTP_XMIT_DELAY;
733 }
734
735 /* This private function does management things when adding DATA chunk */
736 static void sctp_packet_append_data(struct sctp_packet *packet,
737 struct sctp_chunk *chunk)
738 {
739 struct sctp_transport *transport = packet->transport;
740 size_t datasize = sctp_data_size(chunk);
741 struct sctp_association *asoc = transport->asoc;
742 u32 rwnd = asoc->peer.rwnd;
743
744 /* Keep track of how many bytes are in flight over this transport. */
745 transport->flight_size += datasize;
746
747 /* Keep track of how many bytes are in flight to the receiver. */
748 asoc->outqueue.outstanding_bytes += datasize;
749
750 /* Update our view of the receiver's rwnd. */
751 if (datasize < rwnd)
752 rwnd -= datasize;
753 else
754 rwnd = 0;
755
756 asoc->peer.rwnd = rwnd;
757 sctp_chunk_assign_tsn(chunk);
758 sctp_chunk_assign_ssn(chunk);
759 }
760
761 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
762 struct sctp_chunk *chunk,
763 u16 chunk_len)
764 {
765 size_t psize, pmtu, maxsize;
766 sctp_xmit_t retval = SCTP_XMIT_OK;
767
768 psize = packet->size;
769 if (packet->transport->asoc)
770 pmtu = packet->transport->asoc->pathmtu;
771 else
772 pmtu = packet->transport->pathmtu;
773
774 /* Decide if we need to fragment or resubmit later. */
775 if (psize + chunk_len > pmtu) {
776 /* It's OK to fragment at IP level if any one of the following
777 * is true:
778 * 1. The packet is empty (meaning this chunk is greater
779 * the MTU)
780 * 2. The packet doesn't have any data in it yet and data
781 * requires authentication.
782 */
783 if (sctp_packet_empty(packet) ||
784 (!packet->has_data && chunk->auth)) {
785 /* We no longer do re-fragmentation.
786 * Just fragment at the IP layer, if we
787 * actually hit this condition
788 */
789 packet->ipfragok = 1;
790 goto out;
791 }
792
793 /* Similarly, if this chunk was built before a PMTU
794 * reduction, we have to fragment it at IP level now. So
795 * if the packet already contains something, we need to
796 * flush.
797 */
798 maxsize = pmtu - packet->overhead;
799 if (packet->auth)
800 maxsize -= SCTP_PAD4(packet->auth->skb->len);
801 if (chunk_len > maxsize)
802 retval = SCTP_XMIT_PMTU_FULL;
803
804 /* It is also okay to fragment if the chunk we are
805 * adding is a control chunk, but only if current packet
806 * is not a GSO one otherwise it causes fragmentation of
807 * a large frame. So in this case we allow the
808 * fragmentation by forcing it to be in a new packet.
809 */
810 if (!sctp_chunk_is_data(chunk) && packet->has_data)
811 retval = SCTP_XMIT_PMTU_FULL;
812
813 if (psize + chunk_len > packet->max_size)
814 /* Hit GSO/PMTU limit, gotta flush */
815 retval = SCTP_XMIT_PMTU_FULL;
816
817 if (!packet->transport->burst_limited &&
818 psize + chunk_len > (packet->transport->cwnd >> 1))
819 /* Do not allow a single GSO packet to use more
820 * than half of cwnd.
821 */
822 retval = SCTP_XMIT_PMTU_FULL;
823
824 if (packet->transport->burst_limited &&
825 psize + chunk_len > (packet->transport->burst_limited >> 1))
826 /* Do not allow a single GSO packet to use more
827 * than half of original cwnd.
828 */
829 retval = SCTP_XMIT_PMTU_FULL;
830 /* Otherwise it will fit in the GSO packet */
831 }
832
833 out:
834 return retval;
835 }