]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/tcp_output.c
net: Revert unused variable changes.
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_output.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21/*
22 * Changes: Pedro Roque : Retransmit queue handled by TCP.
23 * : Fragmentation on mtu decrease
24 * : Segment collapse on retransmit
25 * : AF independence
26 *
27 * Linus Torvalds : send_delayed_ack
28 * David S. Miller : Charge memory using the right skb
29 * during syn/ack processing.
30 * David S. Miller : Output engine completely rewritten.
31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
32 * Cacophonix Gaul : draft-minshall-nagle-01
33 * J Hadi Salim : ECN support
34 *
35 */
36
91df42be
JP
37#define pr_fmt(fmt) "TCP: " fmt
38
1da177e4
LT
39#include <net/tcp.h>
40
41#include <linux/compiler.h>
5a0e3ad6 42#include <linux/gfp.h>
1da177e4 43#include <linux/module.h>
1da177e4
LT
44
45/* People can turn this off for buggy TCP's found in printers etc. */
ab32ea5d 46int sysctl_tcp_retrans_collapse __read_mostly = 1;
1da177e4 47
09cb105e 48/* People can turn this on to work with those rare, broken TCPs that
15d99e02
RJ
49 * interpret the window field as a signed quantity.
50 */
ab32ea5d 51int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
15d99e02 52
46d3ceab
ED
53/* Default TSQ limit of two TSO segments */
54int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
55
1da177e4
LT
56/* This limits the percentage of the congestion window which we
57 * will allow a single TSO frame to consume. Building TSO frames
58 * which are too large can cause TCP streams to be bursty.
59 */
ab32ea5d 60int sysctl_tcp_tso_win_divisor __read_mostly = 3;
1da177e4 61
ab32ea5d 62int sysctl_tcp_mtu_probing __read_mostly = 0;
97b1ce25 63int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
5d424d5a 64
35089bb2 65/* By default, RFC2861 behavior. */
ab32ea5d 66int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
35089bb2 67
46d3ceab
ED
68static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
69 int push_one, gfp_t gfp);
519855c5 70
67edfef7 71/* Account for new data that has been sent to the network. */
cf533ea5 72static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
1da177e4 73{
6ba8a3b1 74 struct inet_connection_sock *icsk = inet_csk(sk);
9e412ba7 75 struct tcp_sock *tp = tcp_sk(sk);
66f5fe62 76 unsigned int prior_packets = tp->packets_out;
9e412ba7 77
fe067e8a 78 tcp_advance_send_head(sk, skb);
1da177e4 79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
8512430e 80
66f5fe62 81 tp->packets_out += tcp_skb_pcount(skb);
6ba8a3b1 82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
6a5dc9e5 83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
750ea2ba 84 tcp_rearm_rto(sk);
6a5dc9e5 85 }
1da177e4
LT
86}
87
88/* SND.NXT, if window was not shrunk.
89 * If window has been shrunk, what should we make? It is not clear at all.
90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
92 * invalid. OK, let's make this for now:
93 */
cf533ea5 94static inline __u32 tcp_acceptable_seq(const struct sock *sk)
1da177e4 95{
cf533ea5 96 const struct tcp_sock *tp = tcp_sk(sk);
9e412ba7 97
90840def 98 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
1da177e4
LT
99 return tp->snd_nxt;
100 else
90840def 101 return tcp_wnd_end(tp);
1da177e4
LT
102}
103
104/* Calculate mss to advertise in SYN segment.
105 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
106 *
107 * 1. It is independent of path mtu.
108 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
109 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
110 * attached devices, because some buggy hosts are confused by
111 * large MSS.
112 * 4. We do not make 3, we advertise MSS, calculated from first
113 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
114 * This may be overridden via information stored in routing table.
115 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
116 * probably even Jumbo".
117 */
118static __u16 tcp_advertise_mss(struct sock *sk)
119{
120 struct tcp_sock *tp = tcp_sk(sk);
cf533ea5 121 const struct dst_entry *dst = __sk_dst_get(sk);
1da177e4
LT
122 int mss = tp->advmss;
123
0dbaee3b
DM
124 if (dst) {
125 unsigned int metric = dst_metric_advmss(dst);
126
127 if (metric < mss) {
128 mss = metric;
129 tp->advmss = mss;
130 }
1da177e4
LT
131 }
132
133 return (__u16)mss;
134}
135
136/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
137 * This is the first part of cwnd validation mechanism. */
cf533ea5 138static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
1da177e4 139{
463c84b9 140 struct tcp_sock *tp = tcp_sk(sk);
1da177e4
LT
141 s32 delta = tcp_time_stamp - tp->lsndtime;
142 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
143 u32 cwnd = tp->snd_cwnd;
144
6687e988 145 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
1da177e4 146
6687e988 147 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1da177e4
LT
148 restart_cwnd = min(restart_cwnd, cwnd);
149
463c84b9 150 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
1da177e4
LT
151 cwnd >>= 1;
152 tp->snd_cwnd = max(cwnd, restart_cwnd);
153 tp->snd_cwnd_stamp = tcp_time_stamp;
154 tp->snd_cwnd_used = 0;
155}
156
67edfef7 157/* Congestion state accounting after a packet has been sent. */
40efc6fa 158static void tcp_event_data_sent(struct tcp_sock *tp,
cf533ea5 159 struct sock *sk)
1da177e4 160{
463c84b9
ACM
161 struct inet_connection_sock *icsk = inet_csk(sk);
162 const u32 now = tcp_time_stamp;
1da177e4 163
35089bb2
DM
164 if (sysctl_tcp_slow_start_after_idle &&
165 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
463c84b9 166 tcp_cwnd_restart(sk, __sk_dst_get(sk));
1da177e4
LT
167
168 tp->lsndtime = now;
169
170 /* If it is a reply for ato after last received
171 * packet, enter pingpong mode.
172 */
463c84b9
ACM
173 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
174 icsk->icsk_ack.pingpong = 1;
1da177e4
LT
175}
176
67edfef7 177/* Account for an ACK we sent. */
40efc6fa 178static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
1da177e4 179{
463c84b9
ACM
180 tcp_dec_quickack_mode(sk, pkts);
181 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
1da177e4
LT
182}
183
184/* Determine a window scaling and initial window to offer.
185 * Based on the assumption that the given amount of space
186 * will be offered. Store the results in the tp structure.
187 * NOTE: for smooth operation initial space offering should
188 * be a multiple of mss if possible. We assume here that mss >= 1.
189 * This MUST be enforced by all callers.
190 */
191void tcp_select_initial_window(int __space, __u32 mss,
192 __u32 *rcv_wnd, __u32 *window_clamp,
31d12926 193 int wscale_ok, __u8 *rcv_wscale,
194 __u32 init_rcv_wnd)
1da177e4
LT
195{
196 unsigned int space = (__space < 0 ? 0 : __space);
197
198 /* If no clamp set the clamp to the max possible scaled window */
199 if (*window_clamp == 0)
200 (*window_clamp) = (65535 << 14);
201 space = min(*window_clamp, space);
202
203 /* Quantize space offering to a multiple of mss if possible. */
204 if (space > mss)
205 space = (space / mss) * mss;
206
207 /* NOTE: offering an initial window larger than 32767
15d99e02
RJ
208 * will break some buggy TCP stacks. If the admin tells us
209 * it is likely we could be speaking with such a buggy stack
210 * we will truncate our initial window offering to 32K-1
211 * unless the remote has sent us a window scaling option,
212 * which we interpret as a sign the remote TCP is not
213 * misinterpreting the window field as a signed quantity.
1da177e4 214 */
15d99e02
RJ
215 if (sysctl_tcp_workaround_signed_windows)
216 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
217 else
218 (*rcv_wnd) = space;
219
1da177e4
LT
220 (*rcv_wscale) = 0;
221 if (wscale_ok) {
222 /* Set window scaling on max possible window
e905a9ed 223 * See RFC1323 for an explanation of the limit to 14
1da177e4
LT
224 */
225 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
316c1592 226 space = min_t(u32, space, *window_clamp);
1da177e4
LT
227 while (space > 65535 && (*rcv_wscale) < 14) {
228 space >>= 1;
229 (*rcv_wscale)++;
230 }
231 }
232
356f0398
ND
233 /* Set initial window to a value enough for senders starting with
234 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
235 * a limit on the initial window when mss is larger than 1460.
236 */
056834d9 237 if (mss > (1 << *rcv_wscale)) {
356f0398
ND
238 int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
239 if (mss > 1460)
240 init_cwnd =
241 max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
31d12926 242 /* when initializing use the value from init_rcv_wnd
243 * rather than the default from above
244 */
b1afde60
ND
245 if (init_rcv_wnd)
246 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
247 else
248 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
1da177e4
LT
249 }
250
251 /* Set the clamp no higher than max representable value */
252 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
253}
4bc2f18b 254EXPORT_SYMBOL(tcp_select_initial_window);
1da177e4
LT
255
256/* Chose a new window to advertise, update state in tcp_sock for the
257 * socket, and return result with RFC1323 scaling applied. The return
258 * value can be stuffed directly into th->window for an outgoing
259 * frame.
260 */
40efc6fa 261static u16 tcp_select_window(struct sock *sk)
1da177e4
LT
262{
263 struct tcp_sock *tp = tcp_sk(sk);
264 u32 cur_win = tcp_receive_window(tp);
265 u32 new_win = __tcp_select_window(sk);
266
267 /* Never shrink the offered window */
2de979bd 268 if (new_win < cur_win) {
1da177e4
LT
269 /* Danger Will Robinson!
270 * Don't update rcv_wup/rcv_wnd here or else
271 * we will not be able to advertise a zero
272 * window in time. --DaveM
273 *
274 * Relax Will Robinson.
275 */
607bfbf2 276 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
1da177e4
LT
277 }
278 tp->rcv_wnd = new_win;
279 tp->rcv_wup = tp->rcv_nxt;
280
281 /* Make sure we do not exceed the maximum possible
282 * scaled window.
283 */
15d99e02 284 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
1da177e4
LT
285 new_win = min(new_win, MAX_TCP_WINDOW);
286 else
287 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
288
289 /* RFC1323 scaling applied */
290 new_win >>= tp->rx_opt.rcv_wscale;
291
292 /* If we advertise zero window, disable fast path. */
293 if (new_win == 0)
294 tp->pred_flags = 0;
295
296 return new_win;
297}
298
67edfef7 299/* Packet ECN state for a SYN-ACK */
cf533ea5 300static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
bdf1ee5d 301{
4de075e0 302 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
056834d9 303 if (!(tp->ecn_flags & TCP_ECN_OK))
4de075e0 304 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
bdf1ee5d
IJ
305}
306
67edfef7 307/* Packet ECN state for a SYN. */
bdf1ee5d
IJ
308static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
309{
310 struct tcp_sock *tp = tcp_sk(sk);
311
312 tp->ecn_flags = 0;
5d134f1c 313 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
4de075e0 314 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
bdf1ee5d
IJ
315 tp->ecn_flags = TCP_ECN_OK;
316 }
317}
318
319static __inline__ void
cf533ea5 320TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
bdf1ee5d
IJ
321{
322 if (inet_rsk(req)->ecn_ok)
323 th->ece = 1;
324}
325
67edfef7
AK
326/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
327 * be sent.
328 */
bdf1ee5d
IJ
329static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
330 int tcp_header_len)
331{
332 struct tcp_sock *tp = tcp_sk(sk);
333
334 if (tp->ecn_flags & TCP_ECN_OK) {
335 /* Not-retransmitted data segment: set ECT and inject CWR. */
336 if (skb->len != tcp_header_len &&
337 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
338 INET_ECN_xmit(sk);
056834d9 339 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
bdf1ee5d
IJ
340 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
341 tcp_hdr(skb)->cwr = 1;
342 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
343 }
344 } else {
345 /* ACK or retransmitted segment: clear ECT|CE */
346 INET_ECN_dontxmit(sk);
347 }
348 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
349 tcp_hdr(skb)->ece = 1;
350 }
351}
352
e870a8ef
IJ
353/* Constructs common control bits of non-data skb. If SYN/FIN is present,
354 * auto increment end seqno.
355 */
356static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
357{
2e8e18ef 358 skb->ip_summed = CHECKSUM_PARTIAL;
e870a8ef
IJ
359 skb->csum = 0;
360
4de075e0 361 TCP_SKB_CB(skb)->tcp_flags = flags;
e870a8ef
IJ
362 TCP_SKB_CB(skb)->sacked = 0;
363
364 skb_shinfo(skb)->gso_segs = 1;
365 skb_shinfo(skb)->gso_size = 0;
366 skb_shinfo(skb)->gso_type = 0;
367
368 TCP_SKB_CB(skb)->seq = seq;
a3433f35 369 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
e870a8ef
IJ
370 seq++;
371 TCP_SKB_CB(skb)->end_seq = seq;
372}
373
a2a385d6 374static inline bool tcp_urg_mode(const struct tcp_sock *tp)
33f5f57e
IJ
375{
376 return tp->snd_una != tp->snd_up;
377}
378
33ad798c
AL
379#define OPTION_SACK_ADVERTISE (1 << 0)
380#define OPTION_TS (1 << 1)
381#define OPTION_MD5 (1 << 2)
89e95a61 382#define OPTION_WSCALE (1 << 3)
2100c8d2 383#define OPTION_FAST_OPEN_COOKIE (1 << 8)
33ad798c
AL
384
385struct tcp_out_options {
2100c8d2
YC
386 u16 options; /* bit field of OPTION_* */
387 u16 mss; /* 0 to disable */
33ad798c
AL
388 u8 ws; /* window scale, 0 to disable */
389 u8 num_sack_blocks; /* number of SACK blocks to include */
bd0388ae 390 u8 hash_size; /* bytes in hash_location */
bd0388ae 391 __u8 *hash_location; /* temporary pointer, overloaded */
2100c8d2
YC
392 __u32 tsval, tsecr; /* need to include OPTION_TS */
393 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
33ad798c
AL
394};
395
67edfef7
AK
396/* Write previously computed TCP options to the packet.
397 *
398 * Beware: Something in the Internet is very sensitive to the ordering of
fd6149d3
IJ
399 * TCP options, we learned this through the hard way, so be careful here.
400 * Luckily we can at least blame others for their non-compliance but from
401 * inter-operatibility perspective it seems that we're somewhat stuck with
402 * the ordering which we have been using if we want to keep working with
403 * those broken things (not that it currently hurts anybody as there isn't
404 * particular reason why the ordering would need to be changed).
405 *
406 * At least SACK_PERM as the first option is known to lead to a disaster
407 * (but it may well be that other scenarios fail similarly).
408 */
33ad798c 409static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
bd0388ae
WAS
410 struct tcp_out_options *opts)
411{
2100c8d2 412 u16 options = opts->options; /* mungable copy */
bd0388ae 413
bd0388ae 414 if (unlikely(OPTION_MD5 & options)) {
1a2c6181
CP
415 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
416 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
bd0388ae
WAS
417 /* overload cookie hash location */
418 opts->hash_location = (__u8 *)ptr;
33ad798c 419 ptr += 4;
40efc6fa 420 }
33ad798c 421
fd6149d3
IJ
422 if (unlikely(opts->mss)) {
423 *ptr++ = htonl((TCPOPT_MSS << 24) |
424 (TCPOLEN_MSS << 16) |
425 opts->mss);
426 }
427
bd0388ae
WAS
428 if (likely(OPTION_TS & options)) {
429 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
33ad798c
AL
430 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
431 (TCPOLEN_SACK_PERM << 16) |
432 (TCPOPT_TIMESTAMP << 8) |
433 TCPOLEN_TIMESTAMP);
bd0388ae 434 options &= ~OPTION_SACK_ADVERTISE;
33ad798c
AL
435 } else {
436 *ptr++ = htonl((TCPOPT_NOP << 24) |
437 (TCPOPT_NOP << 16) |
438 (TCPOPT_TIMESTAMP << 8) |
439 TCPOLEN_TIMESTAMP);
440 }
441 *ptr++ = htonl(opts->tsval);
442 *ptr++ = htonl(opts->tsecr);
443 }
444
bd0388ae 445 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
33ad798c
AL
446 *ptr++ = htonl((TCPOPT_NOP << 24) |
447 (TCPOPT_NOP << 16) |
448 (TCPOPT_SACK_PERM << 8) |
449 TCPOLEN_SACK_PERM);
450 }
451
bd0388ae 452 if (unlikely(OPTION_WSCALE & options)) {
33ad798c
AL
453 *ptr++ = htonl((TCPOPT_NOP << 24) |
454 (TCPOPT_WINDOW << 16) |
455 (TCPOLEN_WINDOW << 8) |
456 opts->ws);
457 }
458
459 if (unlikely(opts->num_sack_blocks)) {
460 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
461 tp->duplicate_sack : tp->selective_acks;
40efc6fa
SH
462 int this_sack;
463
464 *ptr++ = htonl((TCPOPT_NOP << 24) |
465 (TCPOPT_NOP << 16) |
466 (TCPOPT_SACK << 8) |
33ad798c 467 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
40efc6fa 468 TCPOLEN_SACK_PERBLOCK)));
2de979bd 469
33ad798c
AL
470 for (this_sack = 0; this_sack < opts->num_sack_blocks;
471 ++this_sack) {
40efc6fa
SH
472 *ptr++ = htonl(sp[this_sack].start_seq);
473 *ptr++ = htonl(sp[this_sack].end_seq);
474 }
2de979bd 475
5861f8e5 476 tp->rx_opt.dsack = 0;
40efc6fa 477 }
2100c8d2
YC
478
479 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
480 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
481
482 *ptr++ = htonl((TCPOPT_EXP << 24) |
483 ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
484 TCPOPT_FASTOPEN_MAGIC);
485
486 memcpy(ptr, foc->val, foc->len);
487 if ((foc->len & 3) == 2) {
488 u8 *align = ((u8 *)ptr) + foc->len;
489 align[0] = align[1] = TCPOPT_NOP;
490 }
491 ptr += (foc->len + 3) >> 2;
492 }
33ad798c
AL
493}
494
67edfef7
AK
495/* Compute TCP options for SYN packets. This is not the final
496 * network wire format yet.
497 */
95c96174 498static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
33ad798c 499 struct tcp_out_options *opts,
cf533ea5
ED
500 struct tcp_md5sig_key **md5)
501{
33ad798c 502 struct tcp_sock *tp = tcp_sk(sk);
95c96174 503 unsigned int remaining = MAX_TCP_OPTION_SPACE;
783237e8 504 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
33ad798c 505
cfb6eeb4 506#ifdef CONFIG_TCP_MD5SIG
33ad798c
AL
507 *md5 = tp->af_specific->md5_lookup(sk, sk);
508 if (*md5) {
509 opts->options |= OPTION_MD5;
bd0388ae 510 remaining -= TCPOLEN_MD5SIG_ALIGNED;
cfb6eeb4 511 }
33ad798c
AL
512#else
513 *md5 = NULL;
cfb6eeb4 514#endif
33ad798c
AL
515
516 /* We always get an MSS option. The option bytes which will be seen in
517 * normal data packets should timestamps be used, must be in the MSS
518 * advertised. But we subtract them from tp->mss_cache so that
519 * calculations in tcp_sendmsg are simpler etc. So account for this
520 * fact here if necessary. If we don't do this correctly, as a
521 * receiver we won't recognize data packets as being full sized when we
522 * should, and thus we won't abide by the delayed ACK rules correctly.
523 * SACKs don't matter, we never delay an ACK when we have any of those
524 * going out. */
525 opts->mss = tcp_advertise_mss(sk);
bd0388ae 526 remaining -= TCPOLEN_MSS_ALIGNED;
33ad798c 527
bb5b7c11 528 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
33ad798c 529 opts->options |= OPTION_TS;
ee684b6f 530 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
33ad798c 531 opts->tsecr = tp->rx_opt.ts_recent;
bd0388ae 532 remaining -= TCPOLEN_TSTAMP_ALIGNED;
33ad798c 533 }
bb5b7c11 534 if (likely(sysctl_tcp_window_scaling)) {
33ad798c 535 opts->ws = tp->rx_opt.rcv_wscale;
89e95a61 536 opts->options |= OPTION_WSCALE;
bd0388ae 537 remaining -= TCPOLEN_WSCALE_ALIGNED;
33ad798c 538 }
bb5b7c11 539 if (likely(sysctl_tcp_sack)) {
33ad798c 540 opts->options |= OPTION_SACK_ADVERTISE;
b32d1310 541 if (unlikely(!(OPTION_TS & opts->options)))
bd0388ae 542 remaining -= TCPOLEN_SACKPERM_ALIGNED;
33ad798c
AL
543 }
544
783237e8
YC
545 if (fastopen && fastopen->cookie.len >= 0) {
546 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
547 need = (need + 3) & ~3U; /* Align to 32 bits */
548 if (remaining >= need) {
549 opts->options |= OPTION_FAST_OPEN_COOKIE;
550 opts->fastopen_cookie = &fastopen->cookie;
551 remaining -= need;
552 tp->syn_fastopen = 1;
553 }
554 }
bd0388ae 555
bd0388ae 556 return MAX_TCP_OPTION_SPACE - remaining;
40efc6fa
SH
557}
558
67edfef7 559/* Set up TCP options for SYN-ACKs. */
95c96174 560static unsigned int tcp_synack_options(struct sock *sk,
33ad798c 561 struct request_sock *req,
95c96174 562 unsigned int mss, struct sk_buff *skb,
33ad798c 563 struct tcp_out_options *opts,
4957faad 564 struct tcp_md5sig_key **md5,
8336886f 565 struct tcp_fastopen_cookie *foc)
4957faad 566{
33ad798c 567 struct inet_request_sock *ireq = inet_rsk(req);
95c96174 568 unsigned int remaining = MAX_TCP_OPTION_SPACE;
33ad798c 569
cfb6eeb4 570#ifdef CONFIG_TCP_MD5SIG
33ad798c
AL
571 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
572 if (*md5) {
573 opts->options |= OPTION_MD5;
4957faad
WAS
574 remaining -= TCPOLEN_MD5SIG_ALIGNED;
575
576 /* We can't fit any SACK blocks in a packet with MD5 + TS
577 * options. There was discussion about disabling SACK
578 * rather than TS in order to fit in better with old,
579 * buggy kernels, but that was deemed to be unnecessary.
580 */
de213e5e 581 ireq->tstamp_ok &= !ireq->sack_ok;
cfb6eeb4 582 }
33ad798c
AL
583#else
584 *md5 = NULL;
cfb6eeb4 585#endif
33ad798c 586
4957faad 587 /* We always send an MSS option. */
33ad798c 588 opts->mss = mss;
4957faad 589 remaining -= TCPOLEN_MSS_ALIGNED;
33ad798c
AL
590
591 if (likely(ireq->wscale_ok)) {
592 opts->ws = ireq->rcv_wscale;
89e95a61 593 opts->options |= OPTION_WSCALE;
4957faad 594 remaining -= TCPOLEN_WSCALE_ALIGNED;
33ad798c 595 }
de213e5e 596 if (likely(ireq->tstamp_ok)) {
33ad798c
AL
597 opts->options |= OPTION_TS;
598 opts->tsval = TCP_SKB_CB(skb)->when;
599 opts->tsecr = req->ts_recent;
4957faad 600 remaining -= TCPOLEN_TSTAMP_ALIGNED;
33ad798c
AL
601 }
602 if (likely(ireq->sack_ok)) {
603 opts->options |= OPTION_SACK_ADVERTISE;
de213e5e 604 if (unlikely(!ireq->tstamp_ok))
4957faad 605 remaining -= TCPOLEN_SACKPERM_ALIGNED;
33ad798c 606 }
8336886f
JC
607 if (foc != NULL) {
608 u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
609 need = (need + 3) & ~3U; /* Align to 32 bits */
610 if (remaining >= need) {
611 opts->options |= OPTION_FAST_OPEN_COOKIE;
612 opts->fastopen_cookie = foc;
613 remaining -= need;
614 }
615 }
1a2c6181 616
4957faad 617 return MAX_TCP_OPTION_SPACE - remaining;
33ad798c
AL
618}
619
67edfef7
AK
620/* Compute TCP options for ESTABLISHED sockets. This is not the
621 * final wire format yet.
622 */
95c96174 623static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
33ad798c 624 struct tcp_out_options *opts,
cf533ea5
ED
625 struct tcp_md5sig_key **md5)
626{
33ad798c
AL
627 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
628 struct tcp_sock *tp = tcp_sk(sk);
95c96174 629 unsigned int size = 0;
cabeccbd 630 unsigned int eff_sacks;
33ad798c
AL
631
632#ifdef CONFIG_TCP_MD5SIG
633 *md5 = tp->af_specific->md5_lookup(sk, sk);
634 if (unlikely(*md5)) {
635 opts->options |= OPTION_MD5;
636 size += TCPOLEN_MD5SIG_ALIGNED;
637 }
638#else
639 *md5 = NULL;
640#endif
641
642 if (likely(tp->rx_opt.tstamp_ok)) {
643 opts->options |= OPTION_TS;
ee684b6f 644 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
33ad798c
AL
645 opts->tsecr = tp->rx_opt.ts_recent;
646 size += TCPOLEN_TSTAMP_ALIGNED;
647 }
648
cabeccbd
IJ
649 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
650 if (unlikely(eff_sacks)) {
95c96174 651 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
33ad798c 652 opts->num_sack_blocks =
95c96174 653 min_t(unsigned int, eff_sacks,
33ad798c
AL
654 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
655 TCPOLEN_SACK_PERBLOCK);
656 size += TCPOLEN_SACK_BASE_ALIGNED +
657 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
658 }
659
660 return size;
40efc6fa 661}
1da177e4 662
46d3ceab
ED
663
664/* TCP SMALL QUEUES (TSQ)
665 *
666 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
667 * to reduce RTT and bufferbloat.
668 * We do this using a special skb destructor (tcp_wfree).
669 *
670 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
671 * needs to be reallocated in a driver.
672 * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
673 *
674 * Since transmit from skb destructor is forbidden, we use a tasklet
675 * to process all sockets that eventually need to send more skbs.
676 * We use one tasklet per cpu, with its own queue of sockets.
677 */
678struct tsq_tasklet {
679 struct tasklet_struct tasklet;
680 struct list_head head; /* queue of tcp sockets */
681};
682static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
683
6f458dfb
ED
684static void tcp_tsq_handler(struct sock *sk)
685{
686 if ((1 << sk->sk_state) &
687 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
688 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
689 tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
690}
46d3ceab
ED
691/*
692 * One tasklest per cpu tries to send more skbs.
693 * We run in tasklet context but need to disable irqs when
694 * transfering tsq->head because tcp_wfree() might
695 * interrupt us (non NAPI drivers)
696 */
697static void tcp_tasklet_func(unsigned long data)
698{
699 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
700 LIST_HEAD(list);
701 unsigned long flags;
702 struct list_head *q, *n;
703 struct tcp_sock *tp;
704 struct sock *sk;
705
706 local_irq_save(flags);
707 list_splice_init(&tsq->head, &list);
708 local_irq_restore(flags);
709
710 list_for_each_safe(q, n, &list) {
711 tp = list_entry(q, struct tcp_sock, tsq_node);
712 list_del(&tp->tsq_node);
713
714 sk = (struct sock *)tp;
715 bh_lock_sock(sk);
716
717 if (!sock_owned_by_user(sk)) {
6f458dfb 718 tcp_tsq_handler(sk);
46d3ceab
ED
719 } else {
720 /* defer the work to tcp_release_cb() */
6f458dfb 721 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
46d3ceab
ED
722 }
723 bh_unlock_sock(sk);
724
725 clear_bit(TSQ_QUEUED, &tp->tsq_flags);
726 sk_free(sk);
727 }
728}
729
6f458dfb
ED
730#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
731 (1UL << TCP_WRITE_TIMER_DEFERRED) | \
563d34d0
ED
732 (1UL << TCP_DELACK_TIMER_DEFERRED) | \
733 (1UL << TCP_MTU_REDUCED_DEFERRED))
46d3ceab
ED
734/**
735 * tcp_release_cb - tcp release_sock() callback
736 * @sk: socket
737 *
738 * called from release_sock() to perform protocol dependent
739 * actions before socket release.
740 */
741void tcp_release_cb(struct sock *sk)
742{
743 struct tcp_sock *tp = tcp_sk(sk);
6f458dfb 744 unsigned long flags, nflags;
46d3ceab 745
6f458dfb
ED
746 /* perform an atomic operation only if at least one flag is set */
747 do {
748 flags = tp->tsq_flags;
749 if (!(flags & TCP_DEFERRED_ALL))
750 return;
751 nflags = flags & ~TCP_DEFERRED_ALL;
752 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
753
754 if (flags & (1UL << TCP_TSQ_DEFERRED))
755 tcp_tsq_handler(sk);
756
144d56e9 757 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
6f458dfb 758 tcp_write_timer_handler(sk);
144d56e9
ED
759 __sock_put(sk);
760 }
761 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
6f458dfb 762 tcp_delack_timer_handler(sk);
144d56e9
ED
763 __sock_put(sk);
764 }
765 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
563d34d0 766 sk->sk_prot->mtu_reduced(sk);
144d56e9
ED
767 __sock_put(sk);
768 }
46d3ceab
ED
769}
770EXPORT_SYMBOL(tcp_release_cb);
771
772void __init tcp_tasklet_init(void)
773{
774 int i;
775
776 for_each_possible_cpu(i) {
777 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
778
779 INIT_LIST_HEAD(&tsq->head);
780 tasklet_init(&tsq->tasklet,
781 tcp_tasklet_func,
782 (unsigned long)tsq);
783 }
784}
785
786/*
787 * Write buffer destructor automatically called from kfree_skb.
788 * We cant xmit new skbs from this context, as we might already
789 * hold qdisc lock.
790 */
d6a4a104 791void tcp_wfree(struct sk_buff *skb)
46d3ceab
ED
792{
793 struct sock *sk = skb->sk;
794 struct tcp_sock *tp = tcp_sk(sk);
795
796 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
797 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
798 unsigned long flags;
799 struct tsq_tasklet *tsq;
800
801 /* Keep a ref on socket.
802 * This last ref will be released in tcp_tasklet_func()
803 */
804 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
805
806 /* queue this socket to tasklet queue */
807 local_irq_save(flags);
808 tsq = &__get_cpu_var(tsq_tasklet);
809 list_add(&tp->tsq_node, &tsq->head);
810 tasklet_schedule(&tsq->tasklet);
811 local_irq_restore(flags);
812 } else {
813 sock_wfree(skb);
814 }
815}
816
1da177e4
LT
817/* This routine actually transmits TCP packets queued in by
818 * tcp_do_sendmsg(). This is used by both the initial
819 * transmission and possible later retransmissions.
820 * All SKB's seen here are completely headerless. It is our
821 * job to build the TCP header, and pass the packet down to
822 * IP so it can do the same plus pass the packet off to the
823 * device.
824 *
825 * We are working here with either a clone of the original
826 * SKB, or a fresh unique copy made by the retransmit engine.
827 */
056834d9
IJ
828static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
829 gfp_t gfp_mask)
1da177e4 830{
dfb4b9dc
DM
831 const struct inet_connection_sock *icsk = inet_csk(sk);
832 struct inet_sock *inet;
833 struct tcp_sock *tp;
834 struct tcp_skb_cb *tcb;
33ad798c 835 struct tcp_out_options opts;
95c96174 836 unsigned int tcp_options_size, tcp_header_size;
cfb6eeb4 837 struct tcp_md5sig_key *md5;
dfb4b9dc 838 struct tcphdr *th;
dfb4b9dc
DM
839 int err;
840
841 BUG_ON(!skb || !tcp_skb_pcount(skb));
842
843 /* If congestion control is doing timestamping, we must
844 * take such a timestamp before we potentially clone/copy.
845 */
164891aa 846 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
dfb4b9dc
DM
847 __net_timestamp(skb);
848
849 if (likely(clone_it)) {
0e280af0
ED
850 const struct sk_buff *fclone = skb + 1;
851
852 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
853 fclone->fclone == SKB_FCLONE_CLONE))
854 NET_INC_STATS_BH(sock_net(sk),
855 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
856
dfb4b9dc
DM
857 if (unlikely(skb_cloned(skb)))
858 skb = pskb_copy(skb, gfp_mask);
859 else
860 skb = skb_clone(skb, gfp_mask);
861 if (unlikely(!skb))
862 return -ENOBUFS;
863 }
1da177e4 864
dfb4b9dc
DM
865 inet = inet_sk(sk);
866 tp = tcp_sk(sk);
867 tcb = TCP_SKB_CB(skb);
33ad798c 868 memset(&opts, 0, sizeof(opts));
1da177e4 869
4de075e0 870 if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
33ad798c
AL
871 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
872 else
873 tcp_options_size = tcp_established_options(sk, skb, &opts,
874 &md5);
875 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
e905a9ed 876
3853b584 877 if (tcp_packets_in_flight(tp) == 0) {
dfb4b9dc 878 tcp_ca_event(sk, CA_EVENT_TX_START);
3853b584
TH
879 skb->ooo_okay = 1;
880 } else
881 skb->ooo_okay = 0;
dfb4b9dc 882
aa8223c7
ACM
883 skb_push(skb, tcp_header_size);
884 skb_reset_transport_header(skb);
46d3ceab
ED
885
886 skb_orphan(skb);
887 skb->sk = sk;
888 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
889 tcp_wfree : sock_wfree;
890 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
dfb4b9dc
DM
891
892 /* Build TCP header and checksum it. */
aa8223c7 893 th = tcp_hdr(skb);
c720c7e8
ED
894 th->source = inet->inet_sport;
895 th->dest = inet->inet_dport;
dfb4b9dc
DM
896 th->seq = htonl(tcb->seq);
897 th->ack_seq = htonl(tp->rcv_nxt);
df7a3b07 898 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
4de075e0 899 tcb->tcp_flags);
dfb4b9dc 900
4de075e0 901 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
dfb4b9dc
DM
902 /* RFC1323: The window in SYN & SYN/ACK segments
903 * is never scaled.
904 */
600ff0c2 905 th->window = htons(min(tp->rcv_wnd, 65535U));
dfb4b9dc
DM
906 } else {
907 th->window = htons(tcp_select_window(sk));
908 }
909 th->check = 0;
910 th->urg_ptr = 0;
1da177e4 911
33f5f57e 912 /* The urg_mode check is necessary during a below snd_una win probe */
7691367d
HX
913 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
914 if (before(tp->snd_up, tcb->seq + 0x10000)) {
915 th->urg_ptr = htons(tp->snd_up - tcb->seq);
916 th->urg = 1;
917 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
0eae88f3 918 th->urg_ptr = htons(0xFFFF);
7691367d
HX
919 th->urg = 1;
920 }
dfb4b9dc 921 }
1da177e4 922
bd0388ae 923 tcp_options_write((__be32 *)(th + 1), tp, &opts);
4de075e0 924 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
9e412ba7 925 TCP_ECN_send(sk, skb, tcp_header_size);
1da177e4 926
cfb6eeb4
YH
927#ifdef CONFIG_TCP_MD5SIG
928 /* Calculate the MD5 hash, as we have all we need now */
929 if (md5) {
a465419b 930 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
bd0388ae 931 tp->af_specific->calc_md5_hash(opts.hash_location,
49a72dfb 932 md5, sk, NULL, skb);
cfb6eeb4
YH
933 }
934#endif
935
bb296246 936 icsk->icsk_af_ops->send_check(sk, skb);
1da177e4 937
4de075e0 938 if (likely(tcb->tcp_flags & TCPHDR_ACK))
dfb4b9dc 939 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
1da177e4 940
dfb4b9dc 941 if (skb->len != tcp_header_size)
cf533ea5 942 tcp_event_data_sent(tp, sk);
1da177e4 943
bd37a088 944 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
aa2ea058
TH
945 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
946 tcp_skb_pcount(skb));
1da177e4 947
d9d8da80 948 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
83de47cd 949 if (likely(err <= 0))
dfb4b9dc
DM
950 return err;
951
3cfe3baa 952 tcp_enter_cwr(sk, 1);
dfb4b9dc 953
b9df3cb8 954 return net_xmit_eval(err);
1da177e4
LT
955}
956
67edfef7 957/* This routine just queues the buffer for sending.
1da177e4
LT
958 *
959 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
960 * otherwise socket can stall.
961 */
962static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
963{
964 struct tcp_sock *tp = tcp_sk(sk);
965
966 /* Advance write_seq and place onto the write_queue. */
967 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
968 skb_header_release(skb);
fe067e8a 969 tcp_add_write_queue_tail(sk, skb);
3ab224be
HA
970 sk->sk_wmem_queued += skb->truesize;
971 sk_mem_charge(sk, skb->truesize);
1da177e4
LT
972}
973
67edfef7 974/* Initialize TSO segments for a packet. */
cf533ea5 975static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
056834d9 976 unsigned int mss_now)
f6302d1d 977{
8e5b9dda
HX
978 if (skb->len <= mss_now || !sk_can_gso(sk) ||
979 skb->ip_summed == CHECKSUM_NONE) {
f6302d1d
DM
980 /* Avoid the costly divide in the normal
981 * non-TSO case.
982 */
7967168c
HX
983 skb_shinfo(skb)->gso_segs = 1;
984 skb_shinfo(skb)->gso_size = 0;
c9af6db4 985 skb_shinfo(skb)->gso_type = 0;
f6302d1d 986 } else {
356f89e1 987 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
7967168c 988 skb_shinfo(skb)->gso_size = mss_now;
c9af6db4 989 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1da177e4
LT
990 }
991}
992
91fed7a1 993/* When a modification to fackets out becomes necessary, we need to check
68f8353b 994 * skb is counted to fackets_out or not.
91fed7a1 995 */
cf533ea5 996static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
91fed7a1
IJ
997 int decr)
998{
a47e5a98
IJ
999 struct tcp_sock *tp = tcp_sk(sk);
1000
dc86967b 1001 if (!tp->sacked_out || tcp_is_reno(tp))
91fed7a1
IJ
1002 return;
1003
6859d494 1004 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
91fed7a1 1005 tp->fackets_out -= decr;
91fed7a1
IJ
1006}
1007
797108d1
IJ
1008/* Pcount in the middle of the write queue got changed, we need to do various
1009 * tweaks to fix counters
1010 */
cf533ea5 1011static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
797108d1
IJ
1012{
1013 struct tcp_sock *tp = tcp_sk(sk);
1014
1015 tp->packets_out -= decr;
1016
1017 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1018 tp->sacked_out -= decr;
1019 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1020 tp->retrans_out -= decr;
1021 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1022 tp->lost_out -= decr;
1023
1024 /* Reno case is special. Sigh... */
1025 if (tcp_is_reno(tp) && decr > 0)
1026 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1027
1028 tcp_adjust_fackets_out(sk, skb, decr);
1029
1030 if (tp->lost_skb_hint &&
1031 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
52cf3cc8 1032 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
797108d1
IJ
1033 tp->lost_cnt_hint -= decr;
1034
1035 tcp_verify_left_out(tp);
1036}
1037
1da177e4
LT
1038/* Function to create two new TCP segments. Shrinks the given segment
1039 * to the specified size and appends a new segment with the rest of the
e905a9ed 1040 * packet to the list. This won't be called frequently, I hope.
1da177e4
LT
1041 * Remember, these are still headerless SKBs at this point.
1042 */
056834d9
IJ
1043int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1044 unsigned int mss_now)
1da177e4
LT
1045{
1046 struct tcp_sock *tp = tcp_sk(sk);
1047 struct sk_buff *buff;
6475be16 1048 int nsize, old_factor;
b60b49ea 1049 int nlen;
9ce01461 1050 u8 flags;
1da177e4 1051
2fceec13
IJ
1052 if (WARN_ON(len > skb->len))
1053 return -EINVAL;
6a438bbe 1054
1da177e4
LT
1055 nsize = skb_headlen(skb) - len;
1056 if (nsize < 0)
1057 nsize = 0;
1058
1059 if (skb_cloned(skb) &&
1060 skb_is_nonlinear(skb) &&
1061 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1062 return -ENOMEM;
1063
1064 /* Get a new skb... force flag on. */
1065 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1066 if (buff == NULL)
1067 return -ENOMEM; /* We'll just try again later. */
ef5cb973 1068
3ab224be
HA
1069 sk->sk_wmem_queued += buff->truesize;
1070 sk_mem_charge(sk, buff->truesize);
b60b49ea
HX
1071 nlen = skb->len - len - nsize;
1072 buff->truesize += nlen;
1073 skb->truesize -= nlen;
1da177e4
LT
1074
1075 /* Correct the sequence numbers. */
1076 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1077 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1078 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1079
1080 /* PSH and FIN should only be set in the second packet. */
4de075e0
ED
1081 flags = TCP_SKB_CB(skb)->tcp_flags;
1082 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1083 TCP_SKB_CB(buff)->tcp_flags = flags;
e14c3caf 1084 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1da177e4 1085
84fa7933 1086 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1da177e4 1087 /* Copy and checksum data tail into the new buffer. */
056834d9
IJ
1088 buff->csum = csum_partial_copy_nocheck(skb->data + len,
1089 skb_put(buff, nsize),
1da177e4
LT
1090 nsize, 0);
1091
1092 skb_trim(skb, len);
1093
1094 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1095 } else {
84fa7933 1096 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
1097 skb_split(skb, buff, len);
1098 }
1099
1100 buff->ip_summed = skb->ip_summed;
1101
1102 /* Looks stupid, but our code really uses when of
1103 * skbs, which it never sent before. --ANK
1104 */
1105 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
a61bbcf2 1106 buff->tstamp = skb->tstamp;
1da177e4 1107
6475be16
DM
1108 old_factor = tcp_skb_pcount(skb);
1109
1da177e4 1110 /* Fix up tso_factor for both original and new SKB. */
846998ae
DM
1111 tcp_set_skb_tso_segs(sk, skb, mss_now);
1112 tcp_set_skb_tso_segs(sk, buff, mss_now);
1da177e4 1113
6475be16
DM
1114 /* If this packet has been sent out already, we must
1115 * adjust the various packet counters.
1116 */
cf0b450c 1117 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
6475be16
DM
1118 int diff = old_factor - tcp_skb_pcount(skb) -
1119 tcp_skb_pcount(buff);
1da177e4 1120
797108d1
IJ
1121 if (diff)
1122 tcp_adjust_pcount(sk, skb, diff);
1da177e4
LT
1123 }
1124
1125 /* Link BUFF into the send queue. */
f44b5271 1126 skb_header_release(buff);
fe067e8a 1127 tcp_insert_write_queue_after(skb, buff, sk);
1da177e4
LT
1128
1129 return 0;
1130}
1131
1132/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1133 * eventually). The difference is that pulled data not copied, but
1134 * immediately discarded.
1135 */
f2911969 1136static void __pskb_trim_head(struct sk_buff *skb, int len)
1da177e4
LT
1137{
1138 int i, k, eat;
1139
4fa48bf3
ED
1140 eat = min_t(int, len, skb_headlen(skb));
1141 if (eat) {
1142 __skb_pull(skb, eat);
1143 len -= eat;
1144 if (!len)
1145 return;
1146 }
1da177e4
LT
1147 eat = len;
1148 k = 0;
056834d9 1149 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08
ED
1150 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1151
1152 if (size <= eat) {
aff65da0 1153 skb_frag_unref(skb, i);
9e903e08 1154 eat -= size;
1da177e4
LT
1155 } else {
1156 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1157 if (eat) {
1158 skb_shinfo(skb)->frags[k].page_offset += eat;
9e903e08 1159 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1da177e4
LT
1160 eat = 0;
1161 }
1162 k++;
1163 }
1164 }
1165 skb_shinfo(skb)->nr_frags = k;
1166
27a884dc 1167 skb_reset_tail_pointer(skb);
1da177e4
LT
1168 skb->data_len -= len;
1169 skb->len = skb->data_len;
1da177e4
LT
1170}
1171
67edfef7 1172/* Remove acked data from a packet in the transmit queue. */
1da177e4
LT
1173int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1174{
14bbd6a5 1175 if (skb_unclone(skb, GFP_ATOMIC))
1da177e4
LT
1176 return -ENOMEM;
1177
4fa48bf3 1178 __pskb_trim_head(skb, len);
1da177e4
LT
1179
1180 TCP_SKB_CB(skb)->seq += len;
84fa7933 1181 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4
LT
1182
1183 skb->truesize -= len;
1184 sk->sk_wmem_queued -= len;
3ab224be 1185 sk_mem_uncharge(sk, len);
1da177e4
LT
1186 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1187
5b35e1e6 1188 /* Any change of skb->len requires recalculation of tso factor. */
1da177e4 1189 if (tcp_skb_pcount(skb) > 1)
5b35e1e6 1190 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
1da177e4
LT
1191
1192 return 0;
1193}
1194
1b63edd6
YC
1195/* Calculate MSS not accounting any TCP options. */
1196static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
5d424d5a 1197{
cf533ea5
ED
1198 const struct tcp_sock *tp = tcp_sk(sk);
1199 const struct inet_connection_sock *icsk = inet_csk(sk);
5d424d5a
JH
1200 int mss_now;
1201
1202 /* Calculate base mss without TCP options:
1203 It is MMS_S - sizeof(tcphdr) of rfc1122
1204 */
1205 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1206
67469601
ED
1207 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1208 if (icsk->icsk_af_ops->net_frag_header_len) {
1209 const struct dst_entry *dst = __sk_dst_get(sk);
1210
1211 if (dst && dst_allfrag(dst))
1212 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1213 }
1214
5d424d5a
JH
1215 /* Clamp it (mss_clamp does not include tcp options) */
1216 if (mss_now > tp->rx_opt.mss_clamp)
1217 mss_now = tp->rx_opt.mss_clamp;
1218
1219 /* Now subtract optional transport overhead */
1220 mss_now -= icsk->icsk_ext_hdr_len;
1221
1222 /* Then reserve room for full set of TCP options and 8 bytes of data */
1223 if (mss_now < 48)
1224 mss_now = 48;
5d424d5a
JH
1225 return mss_now;
1226}
1227
1b63edd6
YC
1228/* Calculate MSS. Not accounting for SACKs here. */
1229int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1230{
1231 /* Subtract TCP options size, not including SACKs */
1232 return __tcp_mtu_to_mss(sk, pmtu) -
1233 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1234}
1235
5d424d5a 1236/* Inverse of above */
67469601 1237int tcp_mss_to_mtu(struct sock *sk, int mss)
5d424d5a 1238{
cf533ea5
ED
1239 const struct tcp_sock *tp = tcp_sk(sk);
1240 const struct inet_connection_sock *icsk = inet_csk(sk);
5d424d5a
JH
1241 int mtu;
1242
1243 mtu = mss +
1244 tp->tcp_header_len +
1245 icsk->icsk_ext_hdr_len +
1246 icsk->icsk_af_ops->net_header_len;
1247
67469601
ED
1248 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1249 if (icsk->icsk_af_ops->net_frag_header_len) {
1250 const struct dst_entry *dst = __sk_dst_get(sk);
1251
1252 if (dst && dst_allfrag(dst))
1253 mtu += icsk->icsk_af_ops->net_frag_header_len;
1254 }
5d424d5a
JH
1255 return mtu;
1256}
1257
67edfef7 1258/* MTU probing init per socket */
5d424d5a
JH
1259void tcp_mtup_init(struct sock *sk)
1260{
1261 struct tcp_sock *tp = tcp_sk(sk);
1262 struct inet_connection_sock *icsk = inet_csk(sk);
1263
1264 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1265 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
e905a9ed 1266 icsk->icsk_af_ops->net_header_len;
5d424d5a
JH
1267 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1268 icsk->icsk_mtup.probe_size = 0;
1269}
4bc2f18b 1270EXPORT_SYMBOL(tcp_mtup_init);
5d424d5a 1271
1da177e4
LT
1272/* This function synchronize snd mss to current pmtu/exthdr set.
1273
1274 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1275 for TCP options, but includes only bare TCP header.
1276
1277 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
caa20d9a 1278 It is minimum of user_mss and mss received with SYN.
1da177e4
LT
1279 It also does not include TCP options.
1280
d83d8461 1281 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1da177e4
LT
1282
1283 tp->mss_cache is current effective sending mss, including
1284 all tcp options except for SACKs. It is evaluated,
1285 taking into account current pmtu, but never exceeds
1286 tp->rx_opt.mss_clamp.
1287
1288 NOTE1. rfc1122 clearly states that advertised MSS
1289 DOES NOT include either tcp or ip options.
1290
d83d8461
ACM
1291 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1292 are READ ONLY outside this function. --ANK (980731)
1da177e4 1293 */
1da177e4
LT
1294unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1295{
1296 struct tcp_sock *tp = tcp_sk(sk);
d83d8461 1297 struct inet_connection_sock *icsk = inet_csk(sk);
5d424d5a 1298 int mss_now;
1da177e4 1299
5d424d5a
JH
1300 if (icsk->icsk_mtup.search_high > pmtu)
1301 icsk->icsk_mtup.search_high = pmtu;
1da177e4 1302
5d424d5a 1303 mss_now = tcp_mtu_to_mss(sk, pmtu);
409d22b4 1304 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1da177e4
LT
1305
1306 /* And store cached results */
d83d8461 1307 icsk->icsk_pmtu_cookie = pmtu;
5d424d5a
JH
1308 if (icsk->icsk_mtup.enabled)
1309 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
c1b4a7e6 1310 tp->mss_cache = mss_now;
1da177e4
LT
1311
1312 return mss_now;
1313}
4bc2f18b 1314EXPORT_SYMBOL(tcp_sync_mss);
1da177e4
LT
1315
1316/* Compute the current effective MSS, taking SACKs and IP options,
1317 * and even PMTU discovery events into account.
1da177e4 1318 */
0c54b85f 1319unsigned int tcp_current_mss(struct sock *sk)
1da177e4 1320{
cf533ea5
ED
1321 const struct tcp_sock *tp = tcp_sk(sk);
1322 const struct dst_entry *dst = __sk_dst_get(sk);
c1b4a7e6 1323 u32 mss_now;
95c96174 1324 unsigned int header_len;
33ad798c
AL
1325 struct tcp_out_options opts;
1326 struct tcp_md5sig_key *md5;
c1b4a7e6
DM
1327
1328 mss_now = tp->mss_cache;
1329
1da177e4
LT
1330 if (dst) {
1331 u32 mtu = dst_mtu(dst);
d83d8461 1332 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1da177e4
LT
1333 mss_now = tcp_sync_mss(sk, mtu);
1334 }
1335
33ad798c
AL
1336 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1337 sizeof(struct tcphdr);
1338 /* The mss_cache is sized based on tp->tcp_header_len, which assumes
1339 * some common options. If this is an odd packet (because we have SACK
1340 * blocks etc) then our calculated header_len will be different, and
1341 * we have to adjust mss_now correspondingly */
1342 if (header_len != tp->tcp_header_len) {
1343 int delta = (int) header_len - tp->tcp_header_len;
1344 mss_now -= delta;
1345 }
cfb6eeb4 1346
1da177e4
LT
1347 return mss_now;
1348}
1349
a762a980 1350/* Congestion window validation. (RFC2861) */
9e412ba7 1351static void tcp_cwnd_validate(struct sock *sk)
a762a980 1352{
9e412ba7 1353 struct tcp_sock *tp = tcp_sk(sk);
a762a980 1354
d436d686 1355 if (tp->packets_out >= tp->snd_cwnd) {
a762a980
DM
1356 /* Network is feed fully. */
1357 tp->snd_cwnd_used = 0;
1358 tp->snd_cwnd_stamp = tcp_time_stamp;
1359 } else {
1360 /* Network starves. */
1361 if (tp->packets_out > tp->snd_cwnd_used)
1362 tp->snd_cwnd_used = tp->packets_out;
1363
15d33c07
DM
1364 if (sysctl_tcp_slow_start_after_idle &&
1365 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
a762a980
DM
1366 tcp_cwnd_application_limited(sk);
1367 }
1368}
1369
0e3a4803
IJ
1370/* Returns the portion of skb which can be sent right away without
1371 * introducing MSS oddities to segment boundaries. In rare cases where
1372 * mss_now != mss_cache, we will request caller to create a small skb
1373 * per input skb which could be mostly avoided here (if desired).
5ea3a748
IJ
1374 *
1375 * We explicitly want to create a request for splitting write queue tail
1376 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1377 * thus all the complexity (cwnd_len is always MSS multiple which we
1378 * return whenever allowed by the other factors). Basically we need the
1379 * modulo only when the receiver window alone is the limiting factor or
1380 * when we would be allowed to send the split-due-to-Nagle skb fully.
0e3a4803 1381 */
cf533ea5 1382static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
1485348d 1383 unsigned int mss_now, unsigned int max_segs)
c1b4a7e6 1384{
cf533ea5 1385 const struct tcp_sock *tp = tcp_sk(sk);
1485348d 1386 u32 needed, window, max_len;
c1b4a7e6 1387
90840def 1388 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1485348d 1389 max_len = mss_now * max_segs;
0e3a4803 1390
1485348d
BH
1391 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1392 return max_len;
0e3a4803 1393
5ea3a748
IJ
1394 needed = min(skb->len, window);
1395
1485348d
BH
1396 if (max_len <= needed)
1397 return max_len;
0e3a4803 1398
0e3a4803 1399 return needed - needed % mss_now;
c1b4a7e6
DM
1400}
1401
1402/* Can at least one segment of SKB be sent right now, according to the
1403 * congestion window rules? If so, return how many segments are allowed.
1404 */
cf533ea5
ED
1405static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1406 const struct sk_buff *skb)
c1b4a7e6
DM
1407{
1408 u32 in_flight, cwnd;
1409
1410 /* Don't be strict about the congestion window for the final FIN. */
4de075e0
ED
1411 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1412 tcp_skb_pcount(skb) == 1)
c1b4a7e6
DM
1413 return 1;
1414
1415 in_flight = tcp_packets_in_flight(tp);
1416 cwnd = tp->snd_cwnd;
1417 if (in_flight < cwnd)
1418 return (cwnd - in_flight);
1419
1420 return 0;
1421}
1422
b595076a 1423/* Initialize TSO state of a skb.
67edfef7 1424 * This must be invoked the first time we consider transmitting
c1b4a7e6
DM
1425 * SKB onto the wire.
1426 */
cf533ea5 1427static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
056834d9 1428 unsigned int mss_now)
c1b4a7e6
DM
1429{
1430 int tso_segs = tcp_skb_pcount(skb);
1431
f8269a49 1432 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
846998ae 1433 tcp_set_skb_tso_segs(sk, skb, mss_now);
c1b4a7e6
DM
1434 tso_segs = tcp_skb_pcount(skb);
1435 }
1436 return tso_segs;
1437}
1438
67edfef7 1439/* Minshall's variant of the Nagle send check. */
a2a385d6 1440static inline bool tcp_minshall_check(const struct tcp_sock *tp)
c1b4a7e6 1441{
09cb105e 1442 return after(tp->snd_sml, tp->snd_una) &&
c1b4a7e6
DM
1443 !after(tp->snd_sml, tp->snd_nxt);
1444}
1445
a2a385d6 1446/* Return false, if packet can be sent now without violation Nagle's rules:
c1b4a7e6
DM
1447 * 1. It is full sized.
1448 * 2. Or it contains FIN. (already checked by caller)
6d67e9be 1449 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
c1b4a7e6
DM
1450 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1451 * With Minshall's modification: all sent small packets are ACKed.
1452 */
a2a385d6 1453static inline bool tcp_nagle_check(const struct tcp_sock *tp,
e905a9ed 1454 const struct sk_buff *skb,
95c96174 1455 unsigned int mss_now, int nonagle)
c1b4a7e6 1456{
a02cec21 1457 return skb->len < mss_now &&
056834d9 1458 ((nonagle & TCP_NAGLE_CORK) ||
a02cec21 1459 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
c1b4a7e6
DM
1460}
1461
a2a385d6 1462/* Return true if the Nagle test allows this packet to be
c1b4a7e6
DM
1463 * sent now.
1464 */
a2a385d6
ED
1465static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1466 unsigned int cur_mss, int nonagle)
c1b4a7e6
DM
1467{
1468 /* Nagle rule does not apply to frames, which sit in the middle of the
1469 * write_queue (they have no chances to get new data).
1470 *
1471 * This is implemented in the callers, where they modify the 'nonagle'
1472 * argument based upon the location of SKB in the send queue.
1473 */
1474 if (nonagle & TCP_NAGLE_PUSH)
a2a385d6 1475 return true;
c1b4a7e6 1476
9b44190d
YC
1477 /* Don't use the nagle rule for urgent data (or for the final FIN). */
1478 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
a2a385d6 1479 return true;
c1b4a7e6
DM
1480
1481 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
a2a385d6 1482 return true;
c1b4a7e6 1483
a2a385d6 1484 return false;
c1b4a7e6
DM
1485}
1486
1487/* Does at least the first segment of SKB fit into the send window? */
a2a385d6
ED
1488static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1489 const struct sk_buff *skb,
1490 unsigned int cur_mss)
c1b4a7e6
DM
1491{
1492 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1493
1494 if (skb->len > cur_mss)
1495 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1496
90840def 1497 return !after(end_seq, tcp_wnd_end(tp));
c1b4a7e6
DM
1498}
1499
fe067e8a 1500/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
c1b4a7e6
DM
1501 * should be put on the wire right now. If so, it returns the number of
1502 * packets allowed by the congestion window.
1503 */
cf533ea5 1504static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
c1b4a7e6
DM
1505 unsigned int cur_mss, int nonagle)
1506{
cf533ea5 1507 const struct tcp_sock *tp = tcp_sk(sk);
c1b4a7e6
DM
1508 unsigned int cwnd_quota;
1509
846998ae 1510 tcp_init_tso_segs(sk, skb, cur_mss);
c1b4a7e6
DM
1511
1512 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1513 return 0;
1514
1515 cwnd_quota = tcp_cwnd_test(tp, skb);
056834d9 1516 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
c1b4a7e6
DM
1517 cwnd_quota = 0;
1518
1519 return cwnd_quota;
1520}
1521
67edfef7 1522/* Test if sending is allowed right now. */
a2a385d6 1523bool tcp_may_send_now(struct sock *sk)
c1b4a7e6 1524{
cf533ea5 1525 const struct tcp_sock *tp = tcp_sk(sk);
fe067e8a 1526 struct sk_buff *skb = tcp_send_head(sk);
c1b4a7e6 1527
a02cec21 1528 return skb &&
0c54b85f 1529 tcp_snd_test(sk, skb, tcp_current_mss(sk),
c1b4a7e6 1530 (tcp_skb_is_last(sk, skb) ?
a02cec21 1531 tp->nonagle : TCP_NAGLE_PUSH));
c1b4a7e6
DM
1532}
1533
1534/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1535 * which is put after SKB on the list. It is very much like
1536 * tcp_fragment() except that it may make several kinds of assumptions
1537 * in order to speed up the splitting operation. In particular, we
1538 * know that all the data is in scatter-gather pages, and that the
1539 * packet has never been sent out before (and thus is not cloned).
1540 */
056834d9 1541static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
c4ead4c5 1542 unsigned int mss_now, gfp_t gfp)
c1b4a7e6
DM
1543{
1544 struct sk_buff *buff;
1545 int nlen = skb->len - len;
9ce01461 1546 u8 flags;
c1b4a7e6
DM
1547
1548 /* All of a TSO frame must be composed of paged data. */
c8ac3774
HX
1549 if (skb->len != skb->data_len)
1550 return tcp_fragment(sk, skb, len, mss_now);
c1b4a7e6 1551
c4ead4c5 1552 buff = sk_stream_alloc_skb(sk, 0, gfp);
c1b4a7e6
DM
1553 if (unlikely(buff == NULL))
1554 return -ENOMEM;
1555
3ab224be
HA
1556 sk->sk_wmem_queued += buff->truesize;
1557 sk_mem_charge(sk, buff->truesize);
b60b49ea 1558 buff->truesize += nlen;
c1b4a7e6
DM
1559 skb->truesize -= nlen;
1560
1561 /* Correct the sequence numbers. */
1562 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1563 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1564 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1565
1566 /* PSH and FIN should only be set in the second packet. */
4de075e0
ED
1567 flags = TCP_SKB_CB(skb)->tcp_flags;
1568 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1569 TCP_SKB_CB(buff)->tcp_flags = flags;
c1b4a7e6
DM
1570
1571 /* This packet was never sent out yet, so no SACK bits. */
1572 TCP_SKB_CB(buff)->sacked = 0;
1573
84fa7933 1574 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
c1b4a7e6
DM
1575 skb_split(skb, buff, len);
1576
1577 /* Fix up tso_factor for both original and new SKB. */
846998ae
DM
1578 tcp_set_skb_tso_segs(sk, skb, mss_now);
1579 tcp_set_skb_tso_segs(sk, buff, mss_now);
c1b4a7e6
DM
1580
1581 /* Link BUFF into the send queue. */
1582 skb_header_release(buff);
fe067e8a 1583 tcp_insert_write_queue_after(skb, buff, sk);
c1b4a7e6
DM
1584
1585 return 0;
1586}
1587
1588/* Try to defer sending, if possible, in order to minimize the amount
1589 * of TSO splitting we do. View it as a kind of TSO Nagle test.
1590 *
1591 * This algorithm is from John Heffner.
1592 */
a2a385d6 1593static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
c1b4a7e6 1594{
9e412ba7 1595 struct tcp_sock *tp = tcp_sk(sk);
6687e988 1596 const struct inet_connection_sock *icsk = inet_csk(sk);
c1b4a7e6 1597 u32 send_win, cong_win, limit, in_flight;
ad9f4f50 1598 int win_divisor;
c1b4a7e6 1599
4de075e0 1600 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
ae8064ac 1601 goto send_now;
c1b4a7e6 1602
6687e988 1603 if (icsk->icsk_ca_state != TCP_CA_Open)
ae8064ac
JH
1604 goto send_now;
1605
1606 /* Defer for less than two clock ticks. */
bd515c3e 1607 if (tp->tso_deferred &&
a2acde07 1608 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
ae8064ac 1609 goto send_now;
908a75c1 1610
c1b4a7e6
DM
1611 in_flight = tcp_packets_in_flight(tp);
1612
056834d9 1613 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
c1b4a7e6 1614
90840def 1615 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
c1b4a7e6
DM
1616
1617 /* From in_flight test above, we know that cwnd > in_flight. */
1618 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1619
1620 limit = min(send_win, cong_win);
1621
ba244fe9 1622 /* If a full-sized TSO skb can be sent, do it. */
1485348d
BH
1623 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1624 sk->sk_gso_max_segs * tp->mss_cache))
ae8064ac 1625 goto send_now;
ba244fe9 1626
62ad2761
IJ
1627 /* Middle in queue won't get any more data, full sendable already? */
1628 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1629 goto send_now;
1630
ad9f4f50
ED
1631 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1632 if (win_divisor) {
c1b4a7e6
DM
1633 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1634
1635 /* If at least some fraction of a window is available,
1636 * just use it.
1637 */
ad9f4f50 1638 chunk /= win_divisor;
c1b4a7e6 1639 if (limit >= chunk)
ae8064ac 1640 goto send_now;
c1b4a7e6
DM
1641 } else {
1642 /* Different approach, try not to defer past a single
1643 * ACK. Receiver should ACK every other full sized
1644 * frame, so if we have space for more than 3 frames
1645 * then send now.
1646 */
6b5a5c0d 1647 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
ae8064ac 1648 goto send_now;
c1b4a7e6
DM
1649 }
1650
f4541d60
ED
1651 /* Ok, it looks like it is advisable to defer.
1652 * Do not rearm the timer if already set to not break TCP ACK clocking.
1653 */
1654 if (!tp->tso_deferred)
1655 tp->tso_deferred = 1 | (jiffies << 1);
ae8064ac 1656
a2a385d6 1657 return true;
ae8064ac
JH
1658
1659send_now:
1660 tp->tso_deferred = 0;
a2a385d6 1661 return false;
c1b4a7e6
DM
1662}
1663
5d424d5a 1664/* Create a new MTU probe if we are ready.
67edfef7
AK
1665 * MTU probe is regularly attempting to increase the path MTU by
1666 * deliberately sending larger packets. This discovers routing
1667 * changes resulting in larger path MTUs.
1668 *
5d424d5a
JH
1669 * Returns 0 if we should wait to probe (no cwnd available),
1670 * 1 if a probe was sent,
056834d9
IJ
1671 * -1 otherwise
1672 */
5d424d5a
JH
1673static int tcp_mtu_probe(struct sock *sk)
1674{
1675 struct tcp_sock *tp = tcp_sk(sk);
1676 struct inet_connection_sock *icsk = inet_csk(sk);
1677 struct sk_buff *skb, *nskb, *next;
1678 int len;
1679 int probe_size;
91cc17c0 1680 int size_needed;
5d424d5a
JH
1681 int copy;
1682 int mss_now;
1683
1684 /* Not currently probing/verifying,
1685 * not in recovery,
1686 * have enough cwnd, and
1687 * not SACKing (the variable headers throw things off) */
1688 if (!icsk->icsk_mtup.enabled ||
1689 icsk->icsk_mtup.probe_size ||
1690 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1691 tp->snd_cwnd < 11 ||
cabeccbd 1692 tp->rx_opt.num_sacks || tp->rx_opt.dsack)
5d424d5a
JH
1693 return -1;
1694
1695 /* Very simple search strategy: just double the MSS. */
0c54b85f 1696 mss_now = tcp_current_mss(sk);
056834d9 1697 probe_size = 2 * tp->mss_cache;
91cc17c0 1698 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
5d424d5a
JH
1699 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1700 /* TODO: set timer for probe_converge_event */
1701 return -1;
1702 }
1703
1704 /* Have enough data in the send queue to probe? */
7f9c33e5 1705 if (tp->write_seq - tp->snd_nxt < size_needed)
5d424d5a
JH
1706 return -1;
1707
91cc17c0
IJ
1708 if (tp->snd_wnd < size_needed)
1709 return -1;
90840def 1710 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
91cc17c0 1711 return 0;
5d424d5a 1712
d67c58e9
IJ
1713 /* Do we need to wait to drain cwnd? With none in flight, don't stall */
1714 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1715 if (!tcp_packets_in_flight(tp))
5d424d5a
JH
1716 return -1;
1717 else
1718 return 0;
1719 }
1720
1721 /* We're allowed to probe. Build it now. */
1722 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1723 return -1;
3ab224be
HA
1724 sk->sk_wmem_queued += nskb->truesize;
1725 sk_mem_charge(sk, nskb->truesize);
5d424d5a 1726
fe067e8a 1727 skb = tcp_send_head(sk);
5d424d5a
JH
1728
1729 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1730 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
4de075e0 1731 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
5d424d5a
JH
1732 TCP_SKB_CB(nskb)->sacked = 0;
1733 nskb->csum = 0;
84fa7933 1734 nskb->ip_summed = skb->ip_summed;
5d424d5a 1735
50c4817e
IJ
1736 tcp_insert_write_queue_before(nskb, skb, sk);
1737
5d424d5a 1738 len = 0;
234b6860 1739 tcp_for_write_queue_from_safe(skb, next, sk) {
5d424d5a
JH
1740 copy = min_t(int, skb->len, probe_size - len);
1741 if (nskb->ip_summed)
1742 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1743 else
1744 nskb->csum = skb_copy_and_csum_bits(skb, 0,
056834d9
IJ
1745 skb_put(nskb, copy),
1746 copy, nskb->csum);
5d424d5a
JH
1747
1748 if (skb->len <= copy) {
1749 /* We've eaten all the data from this skb.
1750 * Throw it away. */
4de075e0 1751 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
fe067e8a 1752 tcp_unlink_write_queue(skb, sk);
3ab224be 1753 sk_wmem_free_skb(sk, skb);
5d424d5a 1754 } else {
4de075e0 1755 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
a3433f35 1756 ~(TCPHDR_FIN|TCPHDR_PSH);
5d424d5a
JH
1757 if (!skb_shinfo(skb)->nr_frags) {
1758 skb_pull(skb, copy);
84fa7933 1759 if (skb->ip_summed != CHECKSUM_PARTIAL)
056834d9
IJ
1760 skb->csum = csum_partial(skb->data,
1761 skb->len, 0);
5d424d5a
JH
1762 } else {
1763 __pskb_trim_head(skb, copy);
1764 tcp_set_skb_tso_segs(sk, skb, mss_now);
1765 }
1766 TCP_SKB_CB(skb)->seq += copy;
1767 }
1768
1769 len += copy;
234b6860
IJ
1770
1771 if (len >= probe_size)
1772 break;
5d424d5a
JH
1773 }
1774 tcp_init_tso_segs(sk, nskb, nskb->len);
1775
1776 /* We're ready to send. If this fails, the probe will
1777 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1778 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1779 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1780 /* Decrement cwnd here because we are sending
056834d9 1781 * effectively two packets. */
5d424d5a 1782 tp->snd_cwnd--;
66f5fe62 1783 tcp_event_new_data_sent(sk, nskb);
5d424d5a
JH
1784
1785 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
0e7b1368
JH
1786 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1787 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
5d424d5a
JH
1788
1789 return 1;
1790 }
1791
1792 return -1;
1793}
1794
1da177e4
LT
1795/* This routine writes packets to the network. It advances the
1796 * send_head. This happens as incoming acks open up the remote
1797 * window for us.
1798 *
f8269a49
IJ
1799 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1800 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1801 * account rare use of URG, this is not a big flaw.
1802 *
6ba8a3b1
ND
1803 * Send at most one packet when push_one > 0. Temporarily ignore
1804 * cwnd limit to force at most one packet out when push_one == 2.
1805
a2a385d6
ED
1806 * Returns true, if no segments are in flight and we have queued segments,
1807 * but cannot send anything now because of SWS or another problem.
1da177e4 1808 */
a2a385d6
ED
1809static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1810 int push_one, gfp_t gfp)
1da177e4
LT
1811{
1812 struct tcp_sock *tp = tcp_sk(sk);
92df7b51 1813 struct sk_buff *skb;
c1b4a7e6
DM
1814 unsigned int tso_segs, sent_pkts;
1815 int cwnd_quota;
5d424d5a 1816 int result;
1da177e4 1817
92df7b51 1818 sent_pkts = 0;
5d424d5a 1819
d5dd9175
IJ
1820 if (!push_one) {
1821 /* Do MTU probing. */
1822 result = tcp_mtu_probe(sk);
1823 if (!result) {
a2a385d6 1824 return false;
d5dd9175
IJ
1825 } else if (result > 0) {
1826 sent_pkts = 1;
1827 }
5d424d5a
JH
1828 }
1829
fe067e8a 1830 while ((skb = tcp_send_head(sk))) {
c8ac3774
HX
1831 unsigned int limit;
1832
46d3ceab 1833
b68e9f85 1834 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
c1b4a7e6 1835 BUG_ON(!tso_segs);
aa93466b 1836
ec342325
AV
1837 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE)
1838 goto repair; /* Skip network transmission */
1839
b68e9f85 1840 cwnd_quota = tcp_cwnd_test(tp, skb);
6ba8a3b1
ND
1841 if (!cwnd_quota) {
1842 if (push_one == 2)
1843 /* Force out a loss probe pkt. */
1844 cwnd_quota = 1;
1845 else
1846 break;
1847 }
b68e9f85
HX
1848
1849 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1850 break;
1851
c1b4a7e6
DM
1852 if (tso_segs == 1) {
1853 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1854 (tcp_skb_is_last(sk, skb) ?
1855 nonagle : TCP_NAGLE_PUSH))))
1856 break;
1857 } else {
d5dd9175 1858 if (!push_one && tcp_tso_should_defer(sk, skb))
c1b4a7e6
DM
1859 break;
1860 }
aa93466b 1861
46d3ceab
ED
1862 /* TSQ : sk_wmem_alloc accounts skb truesize,
1863 * including skb overhead. But thats OK.
1864 */
1865 if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
1866 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1867 break;
1868 }
c8ac3774 1869 limit = mss_now;
f8269a49 1870 if (tso_segs > 1 && !tcp_urg_mode(tp))
0e3a4803 1871 limit = tcp_mss_split_point(sk, skb, mss_now,
1485348d
BH
1872 min_t(unsigned int,
1873 cwnd_quota,
1874 sk->sk_gso_max_segs));
1da177e4 1875
c8ac3774 1876 if (skb->len > limit &&
c4ead4c5 1877 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
c8ac3774
HX
1878 break;
1879
92df7b51 1880 TCP_SKB_CB(skb)->when = tcp_time_stamp;
c1b4a7e6 1881
d5dd9175 1882 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
92df7b51 1883 break;
1da177e4 1884
ec342325 1885repair:
92df7b51
DM
1886 /* Advance the send_head. This one is sent out.
1887 * This call will increment packets_out.
1888 */
66f5fe62 1889 tcp_event_new_data_sent(sk, skb);
1da177e4 1890
92df7b51 1891 tcp_minshall_update(tp, mss_now, skb);
a262f0cd 1892 sent_pkts += tcp_skb_pcount(skb);
d5dd9175
IJ
1893
1894 if (push_one)
1895 break;
92df7b51 1896 }
1da177e4 1897
aa93466b 1898 if (likely(sent_pkts)) {
684bad11
YC
1899 if (tcp_in_cwnd_reduction(sk))
1900 tp->prr_out += sent_pkts;
6ba8a3b1
ND
1901
1902 /* Send one loss probe per tail loss episode. */
1903 if (push_one != 2)
1904 tcp_schedule_loss_probe(sk);
9e412ba7 1905 tcp_cwnd_validate(sk);
a2a385d6 1906 return false;
1da177e4 1907 }
6ba8a3b1
ND
1908 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
1909}
1910
1911bool tcp_schedule_loss_probe(struct sock *sk)
1912{
1913 struct inet_connection_sock *icsk = inet_csk(sk);
1914 struct tcp_sock *tp = tcp_sk(sk);
1915 u32 timeout, tlp_time_stamp, rto_time_stamp;
1916 u32 rtt = tp->srtt >> 3;
1917
1918 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
1919 return false;
1920 /* No consecutive loss probes. */
1921 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
1922 tcp_rearm_rto(sk);
1923 return false;
1924 }
1925 /* Don't do any loss probe on a Fast Open connection before 3WHS
1926 * finishes.
1927 */
1928 if (sk->sk_state == TCP_SYN_RECV)
1929 return false;
1930
1931 /* TLP is only scheduled when next timer event is RTO. */
1932 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
1933 return false;
1934
1935 /* Schedule a loss probe in 2*RTT for SACK capable connections
1936 * in Open state, that are either limited by cwnd or application.
1937 */
1938 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
1939 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
1940 return false;
1941
1942 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
1943 tcp_send_head(sk))
1944 return false;
1945
1946 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
1947 * for delayed ack when there's one outstanding packet.
1948 */
1949 timeout = rtt << 1;
1950 if (tp->packets_out == 1)
1951 timeout = max_t(u32, timeout,
1952 (rtt + (rtt >> 1) + TCP_DELACK_MAX));
1953 timeout = max_t(u32, timeout, msecs_to_jiffies(10));
1954
1955 /* If RTO is shorter, just schedule TLP in its place. */
1956 tlp_time_stamp = tcp_time_stamp + timeout;
1957 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
1958 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
1959 s32 delta = rto_time_stamp - tcp_time_stamp;
1960 if (delta > 0)
1961 timeout = delta;
1962 }
1963
1964 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
1965 TCP_RTO_MAX);
1966 return true;
1967}
1968
1969/* When probe timeout (PTO) fires, send a new segment if one exists, else
1970 * retransmit the last segment.
1971 */
1972void tcp_send_loss_probe(struct sock *sk)
1973{
9b717a8d 1974 struct tcp_sock *tp = tcp_sk(sk);
6ba8a3b1
ND
1975 struct sk_buff *skb;
1976 int pcount;
1977 int mss = tcp_current_mss(sk);
1978 int err = -1;
1979
1980 if (tcp_send_head(sk) != NULL) {
1981 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
1982 goto rearm_timer;
1983 }
1984
9b717a8d
ND
1985 /* At most one outstanding TLP retransmission. */
1986 if (tp->tlp_high_seq)
1987 goto rearm_timer;
1988
6ba8a3b1
ND
1989 /* Retransmit last segment. */
1990 skb = tcp_write_queue_tail(sk);
1991 if (WARN_ON(!skb))
1992 goto rearm_timer;
1993
1994 pcount = tcp_skb_pcount(skb);
1995 if (WARN_ON(!pcount))
1996 goto rearm_timer;
1997
1998 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
1999 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
2000 goto rearm_timer;
2001 skb = tcp_write_queue_tail(sk);
2002 }
2003
2004 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2005 goto rearm_timer;
2006
2007 /* Probe with zero data doesn't trigger fast recovery. */
2008 if (skb->len > 0)
2009 err = __tcp_retransmit_skb(sk, skb);
2010
9b717a8d
ND
2011 /* Record snd_nxt for loss detection. */
2012 if (likely(!err))
2013 tp->tlp_high_seq = tp->snd_nxt;
2014
6ba8a3b1
ND
2015rearm_timer:
2016 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2017 inet_csk(sk)->icsk_rto,
2018 TCP_RTO_MAX);
2019
2020 if (likely(!err))
2021 NET_INC_STATS_BH(sock_net(sk),
2022 LINUX_MIB_TCPLOSSPROBES);
2023 return;
1da177e4
LT
2024}
2025
a762a980
DM
2026/* Push out any pending frames which were held back due to
2027 * TCP_CORK or attempt at coalescing tiny packets.
2028 * The socket must be locked by the caller.
2029 */
9e412ba7
IJ
2030void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2031 int nonagle)
a762a980 2032{
726e07a8
IJ
2033 /* If we are closed, the bytes will have to remain here.
2034 * In time closedown will finish, we empty the write queue and
2035 * all will be happy.
2036 */
2037 if (unlikely(sk->sk_state == TCP_CLOSE))
2038 return;
2039
99a1dec7
MG
2040 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2041 sk_gfp_atomic(sk, GFP_ATOMIC)))
726e07a8 2042 tcp_check_probe_timer(sk);
a762a980
DM
2043}
2044
c1b4a7e6
DM
2045/* Send _single_ skb sitting at the send head. This function requires
2046 * true push pending frames to setup probe timer etc.
2047 */
2048void tcp_push_one(struct sock *sk, unsigned int mss_now)
2049{
fe067e8a 2050 struct sk_buff *skb = tcp_send_head(sk);
c1b4a7e6
DM
2051
2052 BUG_ON(!skb || skb->len < mss_now);
2053
d5dd9175 2054 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
c1b4a7e6
DM
2055}
2056
1da177e4
LT
2057/* This function returns the amount that we can raise the
2058 * usable window based on the following constraints
e905a9ed 2059 *
1da177e4
LT
2060 * 1. The window can never be shrunk once it is offered (RFC 793)
2061 * 2. We limit memory per socket
2062 *
2063 * RFC 1122:
2064 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2065 * RECV.NEXT + RCV.WIN fixed until:
2066 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2067 *
2068 * i.e. don't raise the right edge of the window until you can raise
2069 * it at least MSS bytes.
2070 *
2071 * Unfortunately, the recommended algorithm breaks header prediction,
2072 * since header prediction assumes th->window stays fixed.
2073 *
2074 * Strictly speaking, keeping th->window fixed violates the receiver
2075 * side SWS prevention criteria. The problem is that under this rule
2076 * a stream of single byte packets will cause the right side of the
2077 * window to always advance by a single byte.
e905a9ed 2078 *
1da177e4
LT
2079 * Of course, if the sender implements sender side SWS prevention
2080 * then this will not be a problem.
e905a9ed 2081 *
1da177e4 2082 * BSD seems to make the following compromise:
e905a9ed 2083 *
1da177e4
LT
2084 * If the free space is less than the 1/4 of the maximum
2085 * space available and the free space is less than 1/2 mss,
2086 * then set the window to 0.
2087 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2088 * Otherwise, just prevent the window from shrinking
2089 * and from being larger than the largest representable value.
2090 *
2091 * This prevents incremental opening of the window in the regime
2092 * where TCP is limited by the speed of the reader side taking
2093 * data out of the TCP receive queue. It does nothing about
2094 * those cases where the window is constrained on the sender side
2095 * because the pipeline is full.
2096 *
2097 * BSD also seems to "accidentally" limit itself to windows that are a
2098 * multiple of MSS, at least until the free space gets quite small.
2099 * This would appear to be a side effect of the mbuf implementation.
2100 * Combining these two algorithms results in the observed behavior
2101 * of having a fixed window size at almost all times.
2102 *
2103 * Below we obtain similar behavior by forcing the offered window to
2104 * a multiple of the mss when it is feasible to do so.
2105 *
2106 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2107 * Regular options like TIMESTAMP are taken into account.
2108 */
2109u32 __tcp_select_window(struct sock *sk)
2110{
463c84b9 2111 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 2112 struct tcp_sock *tp = tcp_sk(sk);
caa20d9a 2113 /* MSS for the peer's data. Previous versions used mss_clamp
1da177e4
LT
2114 * here. I don't know if the value based on our guesses
2115 * of peer's MSS is better for the performance. It's more correct
2116 * but may be worse for the performance because of rcv_mss
2117 * fluctuations. --SAW 1998/11/1
2118 */
463c84b9 2119 int mss = icsk->icsk_ack.rcv_mss;
1da177e4
LT
2120 int free_space = tcp_space(sk);
2121 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
2122 int window;
2123
2124 if (mss > full_space)
e905a9ed 2125 mss = full_space;
1da177e4 2126
b92edbe0 2127 if (free_space < (full_space >> 1)) {
463c84b9 2128 icsk->icsk_ack.quick = 0;
1da177e4 2129
180d8cd9 2130 if (sk_under_memory_pressure(sk))
056834d9
IJ
2131 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2132 4U * tp->advmss);
1da177e4
LT
2133
2134 if (free_space < mss)
2135 return 0;
2136 }
2137
2138 if (free_space > tp->rcv_ssthresh)
2139 free_space = tp->rcv_ssthresh;
2140
2141 /* Don't do rounding if we are using window scaling, since the
2142 * scaled window will not line up with the MSS boundary anyway.
2143 */
2144 window = tp->rcv_wnd;
2145 if (tp->rx_opt.rcv_wscale) {
2146 window = free_space;
2147
2148 /* Advertise enough space so that it won't get scaled away.
2149 * Import case: prevent zero window announcement if
2150 * 1<<rcv_wscale > mss.
2151 */
2152 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
2153 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
2154 << tp->rx_opt.rcv_wscale);
2155 } else {
2156 /* Get the largest window that is a nice multiple of mss.
2157 * Window clamp already applied above.
2158 * If our current window offering is within 1 mss of the
2159 * free space we just keep it. This prevents the divide
2160 * and multiply from happening most of the time.
2161 * We also don't do any window rounding when the free space
2162 * is too small.
2163 */
2164 if (window <= free_space - mss || window > free_space)
056834d9 2165 window = (free_space / mss) * mss;
84565070 2166 else if (mss == full_space &&
b92edbe0 2167 free_space > window + (full_space >> 1))
84565070 2168 window = free_space;
1da177e4
LT
2169 }
2170
2171 return window;
2172}
2173
4a17fc3a
IJ
2174/* Collapses two adjacent SKB's during retransmission. */
2175static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
2176{
2177 struct tcp_sock *tp = tcp_sk(sk);
fe067e8a 2178 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
058dc334 2179 int skb_size, next_skb_size;
1da177e4 2180
058dc334
IJ
2181 skb_size = skb->len;
2182 next_skb_size = next_skb->len;
1da177e4 2183
058dc334 2184 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
a6963a6b 2185
058dc334 2186 tcp_highest_sack_combine(sk, next_skb, skb);
1da177e4 2187
058dc334 2188 tcp_unlink_write_queue(next_skb, sk);
1da177e4 2189
058dc334
IJ
2190 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
2191 next_skb_size);
1da177e4 2192
058dc334
IJ
2193 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2194 skb->ip_summed = CHECKSUM_PARTIAL;
1da177e4 2195
058dc334
IJ
2196 if (skb->ip_summed != CHECKSUM_PARTIAL)
2197 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1da177e4 2198
058dc334
IJ
2199 /* Update sequence range on original skb. */
2200 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1da177e4 2201
e6c7d085 2202 /* Merge over control information. This moves PSH/FIN etc. over */
4de075e0 2203 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
058dc334
IJ
2204
2205 /* All done, get rid of second SKB and account for it so
2206 * packet counting does not break.
2207 */
2208 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
058dc334
IJ
2209
2210 /* changed transmit queue under us so clear hints */
ef9da47c
IJ
2211 tcp_clear_retrans_hints_partial(tp);
2212 if (next_skb == tp->retransmit_skb_hint)
2213 tp->retransmit_skb_hint = skb;
058dc334 2214
797108d1
IJ
2215 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2216
058dc334 2217 sk_wmem_free_skb(sk, next_skb);
1da177e4
LT
2218}
2219
67edfef7 2220/* Check if coalescing SKBs is legal. */
a2a385d6 2221static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
4a17fc3a
IJ
2222{
2223 if (tcp_skb_pcount(skb) > 1)
a2a385d6 2224 return false;
4a17fc3a
IJ
2225 /* TODO: SACK collapsing could be used to remove this condition */
2226 if (skb_shinfo(skb)->nr_frags != 0)
a2a385d6 2227 return false;
4a17fc3a 2228 if (skb_cloned(skb))
a2a385d6 2229 return false;
4a17fc3a 2230 if (skb == tcp_send_head(sk))
a2a385d6 2231 return false;
4a17fc3a
IJ
2232 /* Some heurestics for collapsing over SACK'd could be invented */
2233 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
a2a385d6 2234 return false;
4a17fc3a 2235
a2a385d6 2236 return true;
4a17fc3a
IJ
2237}
2238
67edfef7
AK
2239/* Collapse packets in the retransmit queue to make to create
2240 * less packets on the wire. This is only done on retransmission.
2241 */
4a17fc3a
IJ
2242static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2243 int space)
2244{
2245 struct tcp_sock *tp = tcp_sk(sk);
2246 struct sk_buff *skb = to, *tmp;
a2a385d6 2247 bool first = true;
4a17fc3a
IJ
2248
2249 if (!sysctl_tcp_retrans_collapse)
2250 return;
4de075e0 2251 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
4a17fc3a
IJ
2252 return;
2253
2254 tcp_for_write_queue_from_safe(skb, tmp, sk) {
2255 if (!tcp_can_collapse(sk, skb))
2256 break;
2257
2258 space -= skb->len;
2259
2260 if (first) {
a2a385d6 2261 first = false;
4a17fc3a
IJ
2262 continue;
2263 }
2264
2265 if (space < 0)
2266 break;
2267 /* Punt if not enough space exists in the first SKB for
2268 * the data in the second
2269 */
a21d4572 2270 if (skb->len > skb_availroom(to))
4a17fc3a
IJ
2271 break;
2272
2273 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2274 break;
2275
2276 tcp_collapse_retrans(sk, to);
2277 }
2278}
2279
1da177e4
LT
2280/* This retransmits one SKB. Policy decisions and retransmit queue
2281 * state updates are done by the caller. Returns non-zero if an
2282 * error occurred which prevented the send.
2283 */
93b174ad 2284int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
2285{
2286 struct tcp_sock *tp = tcp_sk(sk);
5d424d5a 2287 struct inet_connection_sock *icsk = inet_csk(sk);
7d227cd2 2288 unsigned int cur_mss;
1da177e4 2289
5d424d5a
JH
2290 /* Inconslusive MTU probe */
2291 if (icsk->icsk_mtup.probe_size) {
2292 icsk->icsk_mtup.probe_size = 0;
2293 }
2294
1da177e4 2295 /* Do not sent more than we queued. 1/4 is reserved for possible
caa20d9a 2296 * copying overhead: fragmentation, tunneling, mangling etc.
1da177e4
LT
2297 */
2298 if (atomic_read(&sk->sk_wmem_alloc) >
2299 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2300 return -EAGAIN;
2301
2302 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2303 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2304 BUG();
1da177e4
LT
2305 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2306 return -ENOMEM;
2307 }
2308
7d227cd2
SS
2309 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2310 return -EHOSTUNREACH; /* Routing failure or similar. */
2311
0c54b85f 2312 cur_mss = tcp_current_mss(sk);
7d227cd2 2313
1da177e4
LT
2314 /* If receiver has shrunk his window, and skb is out of
2315 * new window, do not retransmit it. The exception is the
2316 * case, when window is shrunk to zero. In this case
2317 * our retransmit serves as a zero window probe.
2318 */
9d4fb27d
JP
2319 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2320 TCP_SKB_CB(skb)->seq != tp->snd_una)
1da177e4
LT
2321 return -EAGAIN;
2322
2323 if (skb->len > cur_mss) {
846998ae 2324 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1da177e4 2325 return -ENOMEM; /* We'll try again later. */
02276f3c 2326 } else {
9eb9362e
IJ
2327 int oldpcount = tcp_skb_pcount(skb);
2328
2329 if (unlikely(oldpcount > 1)) {
2330 tcp_init_tso_segs(sk, skb, cur_mss);
2331 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2332 }
1da177e4
LT
2333 }
2334
4a17fc3a 2335 tcp_retrans_try_collapse(sk, skb, cur_mss);
1da177e4 2336
1da177e4
LT
2337 /* Some Solaris stacks overoptimize and ignore the FIN on a
2338 * retransmit when old data is attached. So strip it off
2339 * since it is cheap to do so and saves bytes on the network.
2340 */
2de979bd 2341 if (skb->len > 0 &&
4de075e0 2342 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2de979bd 2343 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
1da177e4 2344 if (!pskb_trim(skb, 0)) {
e870a8ef
IJ
2345 /* Reuse, even though it does some unnecessary work */
2346 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
4de075e0 2347 TCP_SKB_CB(skb)->tcp_flags);
1da177e4 2348 skb->ip_summed = CHECKSUM_NONE;
1da177e4
LT
2349 }
2350 }
2351
2352 /* Make a copy, if the first transmission SKB clone we made
2353 * is still in somebody's hands, else make a clone.
2354 */
2355 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1da177e4 2356
50bceae9
TG
2357 /* make sure skb->data is aligned on arches that require it
2358 * and check if ack-trimming & collapsing extended the headroom
2359 * beyond what csum_start can cover.
2360 */
2361 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2362 skb_headroom(skb) >= 0xFFFF)) {
117632e6
ED
2363 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
2364 GFP_ATOMIC);
93b174ad
YC
2365 return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2366 -ENOBUFS;
117632e6 2367 } else {
93b174ad 2368 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
117632e6 2369 }
93b174ad
YC
2370}
2371
2372int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2373{
2374 struct tcp_sock *tp = tcp_sk(sk);
2375 int err = __tcp_retransmit_skb(sk, skb);
1da177e4
LT
2376
2377 if (err == 0) {
2378 /* Update global TCP statistics. */
81cc8a75 2379 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
1da177e4
LT
2380
2381 tp->total_retrans++;
2382
2383#if FASTRETRANS_DEBUG > 0
056834d9 2384 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
e87cc472 2385 net_dbg_ratelimited("retrans_out leaked\n");
1da177e4
LT
2386 }
2387#endif
b08d6cb2
IJ
2388 if (!tp->retrans_out)
2389 tp->lost_retrans_low = tp->snd_nxt;
1da177e4
LT
2390 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2391 tp->retrans_out += tcp_skb_pcount(skb);
2392
2393 /* Save stamp of the first retransmit. */
2394 if (!tp->retrans_stamp)
2395 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2396
c24f691b 2397 tp->undo_retrans += tcp_skb_pcount(skb);
1da177e4
LT
2398
2399 /* snd_nxt is stored to detect loss of retransmitted segment,
2400 * see tcp_input.c tcp_sacktag_write_queue().
2401 */
2402 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2403 }
2404 return err;
2405}
2406
67edfef7
AK
2407/* Check if we forward retransmits are possible in the current
2408 * window/congestion state.
2409 */
a2a385d6 2410static bool tcp_can_forward_retransmit(struct sock *sk)
b5afe7bc
IJ
2411{
2412 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2413 const struct tcp_sock *tp = tcp_sk(sk);
b5afe7bc
IJ
2414
2415 /* Forward retransmissions are possible only during Recovery. */
2416 if (icsk->icsk_ca_state != TCP_CA_Recovery)
a2a385d6 2417 return false;
b5afe7bc
IJ
2418
2419 /* No forward retransmissions in Reno are possible. */
2420 if (tcp_is_reno(tp))
a2a385d6 2421 return false;
b5afe7bc
IJ
2422
2423 /* Yeah, we have to make difficult choice between forward transmission
2424 * and retransmission... Both ways have their merits...
2425 *
2426 * For now we do not retransmit anything, while we have some new
2427 * segments to send. In the other cases, follow rule 3 for
2428 * NextSeg() specified in RFC3517.
2429 */
2430
2431 if (tcp_may_send_now(sk))
a2a385d6 2432 return false;
b5afe7bc 2433
a2a385d6 2434 return true;
b5afe7bc
IJ
2435}
2436
1da177e4
LT
2437/* This gets called after a retransmit timeout, and the initially
2438 * retransmitted data is acknowledged. It tries to continue
2439 * resending the rest of the retransmit queue, until either
2440 * we've sent it all or the congestion window limit is reached.
2441 * If doing SACK, the first ACK which comes back for a timeout
2442 * based retransmit packet might feed us FACK information again.
2443 * If so, we use it to avoid unnecessarily retransmissions.
2444 */
2445void tcp_xmit_retransmit_queue(struct sock *sk)
2446{
6687e988 2447 const struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
2448 struct tcp_sock *tp = tcp_sk(sk);
2449 struct sk_buff *skb;
0e1c54c2 2450 struct sk_buff *hole = NULL;
618d9f25 2451 u32 last_lost;
61eb55f4 2452 int mib_idx;
0e1c54c2 2453 int fwd_rexmitting = 0;
6a438bbe 2454
45e77d31
IJ
2455 if (!tp->packets_out)
2456 return;
2457
08ebd172
IJ
2458 if (!tp->lost_out)
2459 tp->retransmit_high = tp->snd_una;
2460
618d9f25 2461 if (tp->retransmit_skb_hint) {
6a438bbe 2462 skb = tp->retransmit_skb_hint;
618d9f25
IJ
2463 last_lost = TCP_SKB_CB(skb)->end_seq;
2464 if (after(last_lost, tp->retransmit_high))
2465 last_lost = tp->retransmit_high;
2466 } else {
fe067e8a 2467 skb = tcp_write_queue_head(sk);
618d9f25
IJ
2468 last_lost = tp->snd_una;
2469 }
1da177e4 2470
08ebd172
IJ
2471 tcp_for_write_queue_from(skb, sk) {
2472 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1da177e4 2473
08ebd172
IJ
2474 if (skb == tcp_send_head(sk))
2475 break;
2476 /* we could do better than to assign each time */
0e1c54c2
IJ
2477 if (hole == NULL)
2478 tp->retransmit_skb_hint = skb;
08ebd172
IJ
2479
2480 /* Assume this retransmit will generate
2481 * only one packet for congestion window
2482 * calculation purposes. This works because
2483 * tcp_retransmit_skb() will chop up the
2484 * packet to be MSS sized and all the
2485 * packet counting works out.
2486 */
2487 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2488 return;
1da177e4 2489
0e1c54c2
IJ
2490 if (fwd_rexmitting) {
2491begin_fwd:
2492 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2493 break;
2494 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
6a438bbe 2495
0e1c54c2 2496 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
618d9f25 2497 tp->retransmit_high = last_lost;
0e1c54c2
IJ
2498 if (!tcp_can_forward_retransmit(sk))
2499 break;
2500 /* Backtrack if necessary to non-L'ed skb */
2501 if (hole != NULL) {
2502 skb = hole;
2503 hole = NULL;
2504 }
2505 fwd_rexmitting = 1;
2506 goto begin_fwd;
1da177e4 2507
0e1c54c2 2508 } else if (!(sacked & TCPCB_LOST)) {
ac11ba75 2509 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
0e1c54c2
IJ
2510 hole = skb;
2511 continue;
1da177e4 2512
0e1c54c2 2513 } else {
618d9f25 2514 last_lost = TCP_SKB_CB(skb)->end_seq;
0e1c54c2
IJ
2515 if (icsk->icsk_ca_state != TCP_CA_Loss)
2516 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2517 else
2518 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2519 }
1da177e4 2520
0e1c54c2 2521 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
1da177e4
LT
2522 continue;
2523
09e9b813
ED
2524 if (tcp_retransmit_skb(sk, skb)) {
2525 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
0e1c54c2 2526 return;
09e9b813 2527 }
0e1c54c2 2528 NET_INC_STATS_BH(sock_net(sk), mib_idx);
1da177e4 2529
684bad11 2530 if (tcp_in_cwnd_reduction(sk))
a262f0cd
ND
2531 tp->prr_out += tcp_skb_pcount(skb);
2532
fe067e8a 2533 if (skb == tcp_write_queue_head(sk))
3f421baa
ACM
2534 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2535 inet_csk(sk)->icsk_rto,
2536 TCP_RTO_MAX);
1da177e4
LT
2537 }
2538}
2539
1da177e4
LT
2540/* Send a fin. The caller locks the socket for us. This cannot be
2541 * allowed to fail queueing a FIN frame under any circumstances.
2542 */
2543void tcp_send_fin(struct sock *sk)
2544{
e905a9ed 2545 struct tcp_sock *tp = tcp_sk(sk);
fe067e8a 2546 struct sk_buff *skb = tcp_write_queue_tail(sk);
1da177e4 2547 int mss_now;
e905a9ed 2548
1da177e4
LT
2549 /* Optimization, tack on the FIN if we have a queue of
2550 * unsent frames. But be careful about outgoing SACKS
2551 * and IP options.
2552 */
0c54b85f 2553 mss_now = tcp_current_mss(sk);
1da177e4 2554
fe067e8a 2555 if (tcp_send_head(sk) != NULL) {
4de075e0 2556 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
1da177e4
LT
2557 TCP_SKB_CB(skb)->end_seq++;
2558 tp->write_seq++;
2559 } else {
2560 /* Socket is locked, keep trying until memory is available. */
2561 for (;;) {
aa133076
WF
2562 skb = alloc_skb_fclone(MAX_TCP_HEADER,
2563 sk->sk_allocation);
1da177e4
LT
2564 if (skb)
2565 break;
2566 yield();
2567 }
2568
2569 /* Reserve space for headers and prepare control bits. */
2570 skb_reserve(skb, MAX_TCP_HEADER);
1da177e4 2571 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
e870a8ef 2572 tcp_init_nondata_skb(skb, tp->write_seq,
a3433f35 2573 TCPHDR_ACK | TCPHDR_FIN);
1da177e4
LT
2574 tcp_queue_skb(sk, skb);
2575 }
9e412ba7 2576 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
1da177e4
LT
2577}
2578
2579/* We get here when a process closes a file descriptor (either due to
2580 * an explicit close() or as a byproduct of exit()'ing) and there
2581 * was unread data in the receive queue. This behavior is recommended
65bb723c 2582 * by RFC 2525, section 2.17. -DaveM
1da177e4 2583 */
dd0fc66f 2584void tcp_send_active_reset(struct sock *sk, gfp_t priority)
1da177e4 2585{
1da177e4
LT
2586 struct sk_buff *skb;
2587
2588 /* NOTE: No TCP options attached and we never retransmit this. */
2589 skb = alloc_skb(MAX_TCP_HEADER, priority);
2590 if (!skb) {
4e673444 2591 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
1da177e4
LT
2592 return;
2593 }
2594
2595 /* Reserve space for headers and prepare control bits. */
2596 skb_reserve(skb, MAX_TCP_HEADER);
e870a8ef 2597 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
a3433f35 2598 TCPHDR_ACK | TCPHDR_RST);
1da177e4 2599 /* Send it off. */
1da177e4 2600 TCP_SKB_CB(skb)->when = tcp_time_stamp;
dfb4b9dc 2601 if (tcp_transmit_skb(sk, skb, 0, priority))
4e673444 2602 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
26af65cb 2603
81cc8a75 2604 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
1da177e4
LT
2605}
2606
67edfef7
AK
2607/* Send a crossed SYN-ACK during socket establishment.
2608 * WARNING: This routine must only be called when we have already sent
1da177e4
LT
2609 * a SYN packet that crossed the incoming SYN that caused this routine
2610 * to get called. If this assumption fails then the initial rcv_wnd
2611 * and rcv_wscale values will not be correct.
2612 */
2613int tcp_send_synack(struct sock *sk)
2614{
056834d9 2615 struct sk_buff *skb;
1da177e4 2616
fe067e8a 2617 skb = tcp_write_queue_head(sk);
4de075e0 2618 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
91df42be 2619 pr_debug("%s: wrong queue state\n", __func__);
1da177e4
LT
2620 return -EFAULT;
2621 }
4de075e0 2622 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
1da177e4
LT
2623 if (skb_cloned(skb)) {
2624 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2625 if (nskb == NULL)
2626 return -ENOMEM;
fe067e8a 2627 tcp_unlink_write_queue(skb, sk);
1da177e4 2628 skb_header_release(nskb);
fe067e8a 2629 __tcp_add_write_queue_head(sk, nskb);
3ab224be
HA
2630 sk_wmem_free_skb(sk, skb);
2631 sk->sk_wmem_queued += nskb->truesize;
2632 sk_mem_charge(sk, nskb->truesize);
1da177e4
LT
2633 skb = nskb;
2634 }
2635
4de075e0 2636 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
1da177e4
LT
2637 TCP_ECN_send_synack(tcp_sk(sk), skb);
2638 }
2639 TCP_SKB_CB(skb)->when = tcp_time_stamp;
dfb4b9dc 2640 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
1da177e4
LT
2641}
2642
4aea39c1
ED
2643/**
2644 * tcp_make_synack - Prepare a SYN-ACK.
2645 * sk: listener socket
2646 * dst: dst entry attached to the SYNACK
2647 * req: request_sock pointer
4aea39c1
ED
2648 *
2649 * Allocate one skb and build a SYNACK packet.
2650 * @dst is consumed : Caller should not use it again.
2651 */
056834d9 2652struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
e6b4d113 2653 struct request_sock *req,
8336886f 2654 struct tcp_fastopen_cookie *foc)
1da177e4 2655{
bd0388ae 2656 struct tcp_out_options opts;
2e6599cb 2657 struct inet_request_sock *ireq = inet_rsk(req);
1da177e4
LT
2658 struct tcp_sock *tp = tcp_sk(sk);
2659 struct tcphdr *th;
1da177e4 2660 struct sk_buff *skb;
cfb6eeb4 2661 struct tcp_md5sig_key *md5;
bd0388ae 2662 int tcp_header_size;
f5fff5dc 2663 int mss;
1da177e4 2664
1a2c6181 2665 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
4aea39c1
ED
2666 if (unlikely(!skb)) {
2667 dst_release(dst);
1da177e4 2668 return NULL;
4aea39c1 2669 }
1da177e4
LT
2670 /* Reserve space for headers. */
2671 skb_reserve(skb, MAX_TCP_HEADER);
2672
4aea39c1 2673 skb_dst_set(skb, dst);
ca10b9e9 2674 security_skb_owned_by(skb, sk);
1da177e4 2675
0dbaee3b 2676 mss = dst_metric_advmss(dst);
f5fff5dc
TQ
2677 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2678 mss = tp->rx_opt.user_mss;
2679
33ad798c
AL
2680 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2681 __u8 rcv_wscale;
2682 /* Set this up on the first call only */
2683 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
e88c64f0
HPP
2684
2685 /* limit the window selection if the user enforce a smaller rx buffer */
2686 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2687 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2688 req->window_clamp = tcp_full_space(sk);
2689
33ad798c
AL
2690 /* tcp_full_space because it is guaranteed to be the first packet */
2691 tcp_select_initial_window(tcp_full_space(sk),
f5fff5dc 2692 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
33ad798c
AL
2693 &req->rcv_wnd,
2694 &req->window_clamp,
2695 ireq->wscale_ok,
31d12926 2696 &rcv_wscale,
2697 dst_metric(dst, RTAX_INITRWND));
33ad798c
AL
2698 ireq->rcv_wscale = rcv_wscale;
2699 }
2700
2701 memset(&opts, 0, sizeof(opts));
8b5f12d0
FW
2702#ifdef CONFIG_SYN_COOKIES
2703 if (unlikely(req->cookie_ts))
2704 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2705 else
2706#endif
33ad798c 2707 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1a2c6181
CP
2708 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2709 foc) + sizeof(*th);
cfb6eeb4 2710
aa8223c7
ACM
2711 skb_push(skb, tcp_header_size);
2712 skb_reset_transport_header(skb);
1da177e4 2713
aa8223c7 2714 th = tcp_hdr(skb);
1da177e4
LT
2715 memset(th, 0, sizeof(struct tcphdr));
2716 th->syn = 1;
2717 th->ack = 1;
1da177e4 2718 TCP_ECN_make_synack(req, th);
a3116ac5 2719 th->source = ireq->loc_port;
2e6599cb 2720 th->dest = ireq->rmt_port;
e870a8ef
IJ
2721 /* Setting of flags are superfluous here for callers (and ECE is
2722 * not even correctly set)
2723 */
2724 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
a3433f35 2725 TCPHDR_SYN | TCPHDR_ACK);
4957faad 2726
1da177e4 2727 th->seq = htonl(TCP_SKB_CB(skb)->seq);
8336886f
JC
2728 /* XXX data is queued and acked as is. No buffer/window check */
2729 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
1da177e4
LT
2730
2731 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
600ff0c2 2732 th->window = htons(min(req->rcv_wnd, 65535U));
bd0388ae 2733 tcp_options_write((__be32 *)(th + 1), tp, &opts);
1da177e4 2734 th->doff = (tcp_header_size >> 2);
aa2ea058 2735 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
cfb6eeb4
YH
2736
2737#ifdef CONFIG_TCP_MD5SIG
2738 /* Okay, we have all we need - do the md5 hash if needed */
2739 if (md5) {
bd0388ae 2740 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
49a72dfb 2741 md5, NULL, req, skb);
cfb6eeb4
YH
2742 }
2743#endif
2744
1da177e4
LT
2745 return skb;
2746}
4bc2f18b 2747EXPORT_SYMBOL(tcp_make_synack);
1da177e4 2748
67edfef7 2749/* Do all connect socket setups that can be done AF independent. */
370816ae 2750void tcp_connect_init(struct sock *sk)
1da177e4 2751{
cf533ea5 2752 const struct dst_entry *dst = __sk_dst_get(sk);
1da177e4
LT
2753 struct tcp_sock *tp = tcp_sk(sk);
2754 __u8 rcv_wscale;
2755
2756 /* We'll fix this up when we get a response from the other end.
2757 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2758 */
2759 tp->tcp_header_len = sizeof(struct tcphdr) +
bb5b7c11 2760 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
1da177e4 2761
cfb6eeb4
YH
2762#ifdef CONFIG_TCP_MD5SIG
2763 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2764 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2765#endif
2766
1da177e4
LT
2767 /* If user gave his TCP_MAXSEG, record it to clamp */
2768 if (tp->rx_opt.user_mss)
2769 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2770 tp->max_window = 0;
5d424d5a 2771 tcp_mtup_init(sk);
1da177e4
LT
2772 tcp_sync_mss(sk, dst_mtu(dst));
2773
2774 if (!tp->window_clamp)
2775 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
0dbaee3b 2776 tp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
2777 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2778 tp->advmss = tp->rx_opt.user_mss;
2779
1da177e4 2780 tcp_initialize_rcv_mss(sk);
1da177e4 2781
e88c64f0
HPP
2782 /* limit the window selection if the user enforce a smaller rx buffer */
2783 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2784 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2785 tp->window_clamp = tcp_full_space(sk);
2786
1da177e4
LT
2787 tcp_select_initial_window(tcp_full_space(sk),
2788 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2789 &tp->rcv_wnd,
2790 &tp->window_clamp,
bb5b7c11 2791 sysctl_tcp_window_scaling,
31d12926 2792 &rcv_wscale,
2793 dst_metric(dst, RTAX_INITRWND));
1da177e4
LT
2794
2795 tp->rx_opt.rcv_wscale = rcv_wscale;
2796 tp->rcv_ssthresh = tp->rcv_wnd;
2797
2798 sk->sk_err = 0;
2799 sock_reset_flag(sk, SOCK_DONE);
2800 tp->snd_wnd = 0;
ee7537b6 2801 tcp_init_wl(tp, 0);
1da177e4
LT
2802 tp->snd_una = tp->write_seq;
2803 tp->snd_sml = tp->write_seq;
33f5f57e 2804 tp->snd_up = tp->write_seq;
370816ae 2805 tp->snd_nxt = tp->write_seq;
ee995283
PE
2806
2807 if (likely(!tp->repair))
2808 tp->rcv_nxt = 0;
2809 tp->rcv_wup = tp->rcv_nxt;
2810 tp->copied_seq = tp->rcv_nxt;
1da177e4 2811
463c84b9
ACM
2812 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2813 inet_csk(sk)->icsk_retransmits = 0;
1da177e4
LT
2814 tcp_clear_retrans(tp);
2815}
2816
783237e8
YC
2817static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2818{
2819 struct tcp_sock *tp = tcp_sk(sk);
2820 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2821
2822 tcb->end_seq += skb->len;
2823 skb_header_release(skb);
2824 __tcp_add_write_queue_tail(sk, skb);
2825 sk->sk_wmem_queued += skb->truesize;
2826 sk_mem_charge(sk, skb->truesize);
2827 tp->write_seq = tcb->end_seq;
2828 tp->packets_out += tcp_skb_pcount(skb);
2829}
2830
2831/* Build and send a SYN with data and (cached) Fast Open cookie. However,
2832 * queue a data-only packet after the regular SYN, such that regular SYNs
2833 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
2834 * only the SYN sequence, the data are retransmitted in the first ACK.
2835 * If cookie is not cached or other error occurs, falls back to send a
2836 * regular SYN with Fast Open cookie request option.
2837 */
2838static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
2839{
2840 struct tcp_sock *tp = tcp_sk(sk);
2841 struct tcp_fastopen_request *fo = tp->fastopen_req;
aab48743 2842 int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
783237e8 2843 struct sk_buff *syn_data = NULL, *data;
aab48743
YC
2844 unsigned long last_syn_loss = 0;
2845
67da22d2 2846 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
aab48743
YC
2847 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
2848 &syn_loss, &last_syn_loss);
2849 /* Recurring FO SYN losses: revert to regular handshake temporarily */
2850 if (syn_loss > 1 &&
2851 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
2852 fo->cookie.len = -1;
2853 goto fallback;
2854 }
783237e8 2855
67da22d2
YC
2856 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
2857 fo->cookie.len = -1;
2858 else if (fo->cookie.len <= 0)
783237e8
YC
2859 goto fallback;
2860
2861 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
2862 * user-MSS. Reserve maximum option space for middleboxes that add
2863 * private TCP options. The cost is reduced data space in SYN :(
2864 */
2865 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
2866 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
1b63edd6 2867 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
783237e8
YC
2868 MAX_TCP_OPTION_SPACE;
2869
2870 syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
2871 sk->sk_allocation);
2872 if (syn_data == NULL)
2873 goto fallback;
2874
2875 for (i = 0; i < iovlen && syn_data->len < space; ++i) {
2876 struct iovec *iov = &fo->data->msg_iov[i];
2877 unsigned char __user *from = iov->iov_base;
2878 int len = iov->iov_len;
2879
2880 if (syn_data->len + len > space)
2881 len = space - syn_data->len;
2882 else if (i + 1 == iovlen)
2883 /* No more data pending in inet_wait_for_connect() */
2884 fo->data = NULL;
2885
2886 if (skb_add_data(syn_data, from, len))
2887 goto fallback;
2888 }
2889
2890 /* Queue a data-only packet after the regular SYN for retransmission */
2891 data = pskb_copy(syn_data, sk->sk_allocation);
2892 if (data == NULL)
2893 goto fallback;
2894 TCP_SKB_CB(data)->seq++;
2895 TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
2896 TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
2897 tcp_connect_queue_skb(sk, data);
2898 fo->copied = data->len;
2899
2900 if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
67da22d2 2901 tp->syn_data = (fo->copied > 0);
783237e8
YC
2902 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
2903 goto done;
2904 }
2905 syn_data = NULL;
2906
2907fallback:
2908 /* Send a regular SYN with Fast Open cookie request option */
2909 if (fo->cookie.len > 0)
2910 fo->cookie.len = 0;
2911 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
2912 if (err)
2913 tp->syn_fastopen = 0;
2914 kfree_skb(syn_data);
2915done:
2916 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
2917 return err;
2918}
2919
67edfef7 2920/* Build a SYN and send it off. */
1da177e4
LT
2921int tcp_connect(struct sock *sk)
2922{
2923 struct tcp_sock *tp = tcp_sk(sk);
2924 struct sk_buff *buff;
ee586811 2925 int err;
1da177e4
LT
2926
2927 tcp_connect_init(sk);
2928
2b916477
AV
2929 if (unlikely(tp->repair)) {
2930 tcp_finish_connect(sk, NULL);
2931 return 0;
2932 }
2933
d179cd12 2934 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
1da177e4
LT
2935 if (unlikely(buff == NULL))
2936 return -ENOBUFS;
2937
2938 /* Reserve space for headers. */
2939 skb_reserve(buff, MAX_TCP_HEADER);
2940
a3433f35 2941 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
783237e8
YC
2942 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
2943 tcp_connect_queue_skb(sk, buff);
e870a8ef 2944 TCP_ECN_send_syn(sk, buff);
1da177e4 2945
783237e8
YC
2946 /* Send off SYN; include data in Fast Open. */
2947 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
2948 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
ee586811
EP
2949 if (err == -ECONNREFUSED)
2950 return err;
bd37a088
WY
2951
2952 /* We change tp->snd_nxt after the tcp_transmit_skb() call
2953 * in order to make this packet get counted in tcpOutSegs.
2954 */
2955 tp->snd_nxt = tp->write_seq;
2956 tp->pushed_seq = tp->write_seq;
81cc8a75 2957 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
1da177e4
LT
2958
2959 /* Timer for repeating the SYN until an answer. */
3f421baa
ACM
2960 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2961 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
1da177e4
LT
2962 return 0;
2963}
4bc2f18b 2964EXPORT_SYMBOL(tcp_connect);
1da177e4
LT
2965
2966/* Send out a delayed ack, the caller does the policy checking
2967 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
2968 * for details.
2969 */
2970void tcp_send_delayed_ack(struct sock *sk)
2971{
463c84b9
ACM
2972 struct inet_connection_sock *icsk = inet_csk(sk);
2973 int ato = icsk->icsk_ack.ato;
1da177e4
LT
2974 unsigned long timeout;
2975
2976 if (ato > TCP_DELACK_MIN) {
463c84b9 2977 const struct tcp_sock *tp = tcp_sk(sk);
056834d9 2978 int max_ato = HZ / 2;
1da177e4 2979
056834d9
IJ
2980 if (icsk->icsk_ack.pingpong ||
2981 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
1da177e4
LT
2982 max_ato = TCP_DELACK_MAX;
2983
2984 /* Slow path, intersegment interval is "high". */
2985
2986 /* If some rtt estimate is known, use it to bound delayed ack.
463c84b9 2987 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
1da177e4
LT
2988 * directly.
2989 */
2990 if (tp->srtt) {
056834d9 2991 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
1da177e4
LT
2992
2993 if (rtt < max_ato)
2994 max_ato = rtt;
2995 }
2996
2997 ato = min(ato, max_ato);
2998 }
2999
3000 /* Stay within the limit we were given */
3001 timeout = jiffies + ato;
3002
3003 /* Use new timeout only if there wasn't a older one earlier. */
463c84b9 3004 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
1da177e4
LT
3005 /* If delack timer was blocked or is about to expire,
3006 * send ACK now.
3007 */
463c84b9
ACM
3008 if (icsk->icsk_ack.blocked ||
3009 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
1da177e4
LT
3010 tcp_send_ack(sk);
3011 return;
3012 }
3013
463c84b9
ACM
3014 if (!time_before(timeout, icsk->icsk_ack.timeout))
3015 timeout = icsk->icsk_ack.timeout;
1da177e4 3016 }
463c84b9
ACM
3017 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3018 icsk->icsk_ack.timeout = timeout;
3019 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
1da177e4
LT
3020}
3021
3022/* This routine sends an ack and also updates the window. */
3023void tcp_send_ack(struct sock *sk)
3024{
058dc334 3025 struct sk_buff *buff;
1da177e4 3026
058dc334
IJ
3027 /* If we have been reset, we may not send again. */
3028 if (sk->sk_state == TCP_CLOSE)
3029 return;
1da177e4 3030
058dc334
IJ
3031 /* We are not putting this on the write queue, so
3032 * tcp_transmit_skb() will set the ownership to this
3033 * sock.
3034 */
99a1dec7 3035 buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
058dc334
IJ
3036 if (buff == NULL) {
3037 inet_csk_schedule_ack(sk);
3038 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
3039 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
3040 TCP_DELACK_MAX, TCP_RTO_MAX);
3041 return;
1da177e4 3042 }
058dc334
IJ
3043
3044 /* Reserve space for headers and prepare control bits. */
3045 skb_reserve(buff, MAX_TCP_HEADER);
a3433f35 3046 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
058dc334
IJ
3047
3048 /* Send it off, this clears delayed acks for us. */
058dc334 3049 TCP_SKB_CB(buff)->when = tcp_time_stamp;
99a1dec7 3050 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
1da177e4
LT
3051}
3052
3053/* This routine sends a packet with an out of date sequence
3054 * number. It assumes the other end will try to ack it.
3055 *
3056 * Question: what should we make while urgent mode?
3057 * 4.4BSD forces sending single byte of data. We cannot send
3058 * out of window data, because we have SND.NXT==SND.MAX...
3059 *
3060 * Current solution: to send TWO zero-length segments in urgent mode:
3061 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3062 * out-of-date with SND.UNA-1 to probe window.
3063 */
3064static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3065{
3066 struct tcp_sock *tp = tcp_sk(sk);
3067 struct sk_buff *skb;
3068
3069 /* We don't queue it, tcp_transmit_skb() sets ownership. */
99a1dec7 3070 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
e905a9ed 3071 if (skb == NULL)
1da177e4
LT
3072 return -1;
3073
3074 /* Reserve space for headers and set control bits. */
3075 skb_reserve(skb, MAX_TCP_HEADER);
1da177e4
LT
3076 /* Use a previous sequence. This should cause the other
3077 * end to send an ack. Don't queue or clone SKB, just
3078 * send it.
3079 */
a3433f35 3080 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
1da177e4 3081 TCP_SKB_CB(skb)->when = tcp_time_stamp;
dfb4b9dc 3082 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
1da177e4
LT
3083}
3084
ee995283
PE
3085void tcp_send_window_probe(struct sock *sk)
3086{
3087 if (sk->sk_state == TCP_ESTABLISHED) {
3088 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
c0e88ff0 3089 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
ee995283
PE
3090 tcp_xmit_probe_skb(sk, 0);
3091 }
3092}
3093
67edfef7 3094/* Initiate keepalive or window probe from timer. */
1da177e4
LT
3095int tcp_write_wakeup(struct sock *sk)
3096{
058dc334
IJ
3097 struct tcp_sock *tp = tcp_sk(sk);
3098 struct sk_buff *skb;
1da177e4 3099
058dc334
IJ
3100 if (sk->sk_state == TCP_CLOSE)
3101 return -1;
3102
3103 if ((skb = tcp_send_head(sk)) != NULL &&
3104 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3105 int err;
0c54b85f 3106 unsigned int mss = tcp_current_mss(sk);
058dc334
IJ
3107 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3108
3109 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
3110 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
3111
3112 /* We are probing the opening of a window
3113 * but the window size is != 0
3114 * must have been a result SWS avoidance ( sender )
3115 */
3116 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
3117 skb->len > mss) {
3118 seg_size = min(seg_size, mss);
4de075e0 3119 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
058dc334
IJ
3120 if (tcp_fragment(sk, skb, seg_size, mss))
3121 return -1;
3122 } else if (!tcp_skb_pcount(skb))
3123 tcp_set_skb_tso_segs(sk, skb, mss);
3124
4de075e0 3125 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
058dc334
IJ
3126 TCP_SKB_CB(skb)->when = tcp_time_stamp;
3127 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3128 if (!err)
3129 tcp_event_new_data_sent(sk, skb);
3130 return err;
3131 } else {
33f5f57e 3132 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
058dc334
IJ
3133 tcp_xmit_probe_skb(sk, 1);
3134 return tcp_xmit_probe_skb(sk, 0);
1da177e4 3135 }
1da177e4
LT
3136}
3137
3138/* A window probe timeout has occurred. If window is not closed send
3139 * a partial packet else a zero probe.
3140 */
3141void tcp_send_probe0(struct sock *sk)
3142{
463c84b9 3143 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
3144 struct tcp_sock *tp = tcp_sk(sk);
3145 int err;
3146
3147 err = tcp_write_wakeup(sk);
3148
fe067e8a 3149 if (tp->packets_out || !tcp_send_head(sk)) {
1da177e4 3150 /* Cancel probe timer, if it is not required. */
6687e988 3151 icsk->icsk_probes_out = 0;
463c84b9 3152 icsk->icsk_backoff = 0;
1da177e4
LT
3153 return;
3154 }
3155
3156 if (err <= 0) {
463c84b9
ACM
3157 if (icsk->icsk_backoff < sysctl_tcp_retries2)
3158 icsk->icsk_backoff++;
6687e988 3159 icsk->icsk_probes_out++;
e905a9ed 3160 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3f421baa
ACM
3161 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
3162 TCP_RTO_MAX);
1da177e4
LT
3163 } else {
3164 /* If packet was not sent due to local congestion,
6687e988 3165 * do not backoff and do not remember icsk_probes_out.
1da177e4
LT
3166 * Let local senders to fight for local resources.
3167 *
3168 * Use accumulated backoff yet.
3169 */
6687e988
ACM
3170 if (!icsk->icsk_probes_out)
3171 icsk->icsk_probes_out = 1;
e905a9ed 3172 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
463c84b9 3173 min(icsk->icsk_rto << icsk->icsk_backoff,
3f421baa
ACM
3174 TCP_RESOURCE_PROBE_INTERVAL),
3175 TCP_RTO_MAX);
1da177e4
LT
3176 }
3177}