]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/tcp_offload.c
GSO: Support partial segmentation offload
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / tcp_offload.c
CommitLineData
28850dc7
DB
1/*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * TCPv4 GSO/GRO support
11 */
12
13#include <linux/skbuff.h>
14#include <net/tcp.h>
15#include <net/protocol.h>
16
f066e2b0
WB
17static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18 unsigned int seq, unsigned int mss)
4ed2d765
WB
19{
20 while (skb) {
f066e2b0
WB
21 if (before(ts_seq, seq + mss)) {
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
4ed2d765
WB
23 skb_shinfo(skb)->tskey = ts_seq;
24 return;
25 }
26
27 skb = skb->next;
28 seq += mss;
29 }
30}
31
74abc20c
ED
32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features)
d020f8f7
TH
34{
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL);
37
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
41
42 /* Set up checksum pseudo header, usually expect stack to
43 * have done this already.
44 */
45
46 th->check = 0;
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
49 }
50
51 return tcp_gso_segment(skb, features);
52}
53
28be6e07 54struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
28850dc7
DB
55 netdev_features_t features)
56{
57 struct sk_buff *segs = ERR_PTR(-EINVAL);
0d08c42c 58 unsigned int sum_truesize = 0;
28850dc7
DB
59 struct tcphdr *th;
60 unsigned int thlen;
61 unsigned int seq;
62 __be32 delta;
63 unsigned int oldlen;
64 unsigned int mss;
65 struct sk_buff *gso_skb = skb;
66 __sum16 newcheck;
67 bool ooo_okay, copy_destructor;
68
28850dc7
DB
69 th = tcp_hdr(skb);
70 thlen = th->doff * 4;
71 if (thlen < sizeof(*th))
72 goto out;
73
74 if (!pskb_may_pull(skb, thlen))
75 goto out;
76
77 oldlen = (u16)~skb->len;
78 __skb_pull(skb, thlen);
79
a7eea416 80 mss = skb_shinfo(skb)->gso_size;
28850dc7
DB
81 if (unlikely(skb->len <= mss))
82 goto out;
83
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
85 /* Packet is from an untrusted source, reset gso_segs. */
86 int type = skb_shinfo(skb)->gso_type;
87
88 if (unlikely(type &
89 ~(SKB_GSO_TCPV4 |
90 SKB_GSO_DODGY |
91 SKB_GSO_TCP_ECN |
cbc53e08 92 SKB_GSO_TCP_FIXEDID |
28850dc7
DB
93 SKB_GSO_TCPV6 |
94 SKB_GSO_GRE |
4749c09c 95 SKB_GSO_GRE_CSUM |
cb32f511 96 SKB_GSO_IPIP |
61c1db7f 97 SKB_GSO_SIT |
28850dc7 98 SKB_GSO_UDP_TUNNEL |
0f4f4ffa 99 SKB_GSO_UDP_TUNNEL_CSUM |
e585f236 100 SKB_GSO_TUNNEL_REMCSUM |
28850dc7 101 0) ||
cbc53e08
AD
102 !(type & (SKB_GSO_TCPV4 |
103 SKB_GSO_TCPV6))))
28850dc7
DB
104 goto out;
105
106 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
107
108 segs = NULL;
109 goto out;
110 }
111
802ab55a
AD
112 /* GSO partial only requires splitting the frame into an MSS
113 * multiple and possibly a remainder. So update the mss now.
114 */
115 if (features & NETIF_F_GSO_PARTIAL)
116 mss = skb->len - (skb->len % mss);
117
28850dc7
DB
118 copy_destructor = gso_skb->destructor == tcp_wfree;
119 ooo_okay = gso_skb->ooo_okay;
120 /* All segments but the first should have ooo_okay cleared */
121 skb->ooo_okay = 0;
122
123 segs = skb_segment(skb, features);
124 if (IS_ERR(segs))
125 goto out;
126
127 /* Only first segment might have ooo_okay set */
128 segs->ooo_okay = ooo_okay;
129
130 delta = htonl(oldlen + (thlen + mss));
131
132 skb = segs;
133 th = tcp_hdr(skb);
134 seq = ntohl(th->seq);
135
4ed2d765
WB
136 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
137 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
138
28850dc7
DB
139 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
140 (__force u32)delta));
141
802ab55a 142 while (skb->next) {
28850dc7
DB
143 th->fin = th->psh = 0;
144 th->check = newcheck;
145
08b64fcc
AD
146 if (skb->ip_summed == CHECKSUM_PARTIAL)
147 gso_reset_checksum(skb, ~th->check);
148 else
e9c3a24b 149 th->check = gso_make_checksum(skb, ~th->check);
28850dc7
DB
150
151 seq += mss;
152 if (copy_destructor) {
153 skb->destructor = gso_skb->destructor;
154 skb->sk = gso_skb->sk;
0d08c42c 155 sum_truesize += skb->truesize;
28850dc7
DB
156 }
157 skb = skb->next;
158 th = tcp_hdr(skb);
159
160 th->seq = htonl(seq);
161 th->cwr = 0;
802ab55a 162 }
28850dc7
DB
163
164 /* Following permits TCP Small Queues to work well with GSO :
165 * The callback to TCP stack will be called at the time last frag
166 * is freed at TX completion, and not right now when gso_skb
167 * is freed by GSO engine
168 */
169 if (copy_destructor) {
170 swap(gso_skb->sk, skb->sk);
171 swap(gso_skb->destructor, skb->destructor);
0d08c42c
ED
172 sum_truesize += skb->truesize;
173 atomic_add(sum_truesize - gso_skb->truesize,
174 &skb->sk->sk_wmem_alloc);
28850dc7
DB
175 }
176
177 delta = htonl(oldlen + (skb_tail_pointer(skb) -
178 skb_transport_header(skb)) +
179 skb->data_len);
180 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
181 (__force u32)delta));
08b64fcc
AD
182 if (skb->ip_summed == CHECKSUM_PARTIAL)
183 gso_reset_checksum(skb, ~th->check);
184 else
e9c3a24b 185 th->check = gso_make_checksum(skb, ~th->check);
28850dc7
DB
186out:
187 return segs;
188}
28850dc7
DB
189
190struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
191{
192 struct sk_buff **pp = NULL;
193 struct sk_buff *p;
194 struct tcphdr *th;
195 struct tcphdr *th2;
196 unsigned int len;
197 unsigned int thlen;
198 __be32 flags;
199 unsigned int mss = 1;
200 unsigned int hlen;
201 unsigned int off;
202 int flush = 1;
203 int i;
204
205 off = skb_gro_offset(skb);
206 hlen = off + sizeof(*th);
207 th = skb_gro_header_fast(skb, off);
208 if (skb_gro_header_hard(skb, hlen)) {
209 th = skb_gro_header_slow(skb, hlen, off);
210 if (unlikely(!th))
211 goto out;
212 }
213
214 thlen = th->doff * 4;
215 if (thlen < sizeof(*th))
216 goto out;
217
218 hlen = off + thlen;
219 if (skb_gro_header_hard(skb, hlen)) {
220 th = skb_gro_header_slow(skb, hlen, off);
221 if (unlikely(!th))
222 goto out;
223 }
224
225 skb_gro_pull(skb, thlen);
226
227 len = skb_gro_len(skb);
228 flags = tcp_flag_word(th);
229
230 for (; (p = *head); head = &p->next) {
231 if (!NAPI_GRO_CB(p)->same_flow)
232 continue;
233
234 th2 = tcp_hdr(p);
235
236 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
237 NAPI_GRO_CB(p)->same_flow = 0;
238 continue;
239 }
240
241 goto found;
242 }
243
244 goto out_check_final;
245
246found:
bf5a755f 247 /* Include the IP ID check below from the inner most IP hdr */
1530545e 248 flush = NAPI_GRO_CB(p)->flush;
28850dc7
DB
249 flush |= (__force int)(flags & TCP_FLAG_CWR);
250 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
251 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
252 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
253 for (i = sizeof(*th); i < thlen; i += 4)
254 flush |= *(u32 *)((u8 *)th + i) ^
255 *(u32 *)((u8 *)th2 + i);
256
1530545e
AD
257 /* When we receive our second frame we can made a decision on if we
258 * continue this flow as an atomic flow with a fixed ID or if we use
259 * an incrementing ID.
260 */
261 if (NAPI_GRO_CB(p)->flush_id != 1 ||
262 NAPI_GRO_CB(p)->count != 1 ||
263 !NAPI_GRO_CB(p)->is_atomic)
264 flush |= NAPI_GRO_CB(p)->flush_id;
265 else
266 NAPI_GRO_CB(p)->is_atomic = false;
267
a7eea416 268 mss = skb_shinfo(p)->gso_size;
28850dc7
DB
269
270 flush |= (len - 1) >= mss;
271 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
272
273 if (flush || skb_gro_receive(head, skb)) {
274 mss = 1;
275 goto out_check_final;
276 }
277
278 p = *head;
279 th2 = tcp_hdr(p);
280 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
281
282out_check_final:
283 flush = len < mss;
284 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
285 TCP_FLAG_RST | TCP_FLAG_SYN |
286 TCP_FLAG_FIN));
287
288 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
289 pp = head;
290
291out:
bf5a755f 292 NAPI_GRO_CB(skb)->flush |= (flush != 0);
28850dc7
DB
293
294 return pp;
295}
28850dc7
DB
296
297int tcp_gro_complete(struct sk_buff *skb)
298{
299 struct tcphdr *th = tcp_hdr(skb);
300
299603e8 301 skb->csum_start = (unsigned char *)th - skb->head;
28850dc7
DB
302 skb->csum_offset = offsetof(struct tcphdr, check);
303 skb->ip_summed = CHECKSUM_PARTIAL;
304
305 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
306
307 if (th->cwr)
308 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
309
310 return 0;
311}
312EXPORT_SYMBOL(tcp_gro_complete);
313
28850dc7
DB
314static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
315{
cc5c00bb 316 /* Don't bother verifying checksum if we're going to flush anyway. */
149d0774
TH
317 if (!NAPI_GRO_CB(skb)->flush &&
318 skb_gro_checksum_validate(skb, IPPROTO_TCP,
319 inet_gro_compute_pseudo)) {
28850dc7
DB
320 NAPI_GRO_CB(skb)->flush = 1;
321 return NULL;
28850dc7
DB
322 }
323
324 return tcp_gro_receive(head, skb);
325}
326
299603e8 327static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
28850dc7
DB
328{
329 const struct iphdr *iph = ip_hdr(skb);
330 struct tcphdr *th = tcp_hdr(skb);
331
299603e8
JC
332 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
333 iph->daddr, 0);
c3caf119 334 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
28850dc7 335
1530545e
AD
336 if (NAPI_GRO_CB(skb)->is_atomic)
337 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
338
28850dc7
DB
339 return tcp_gro_complete(skb);
340}
341
342static const struct net_offload tcpv4_offload = {
343 .callbacks = {
d020f8f7 344 .gso_segment = tcp4_gso_segment,
28850dc7
DB
345 .gro_receive = tcp4_gro_receive,
346 .gro_complete = tcp4_gro_complete,
347 },
348};
349
350int __init tcpv4_offload_init(void)
351{
352 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
353}