1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
9 #include <linux/skbuff.h>
10 #include <linux/init.h>
11 #include <net/protocol.h>
14 static struct sk_buff
*gre_gso_segment(struct sk_buff
*skb
,
15 netdev_features_t features
)
17 int tnl_hlen
= skb_inner_mac_header(skb
) - skb_transport_header(skb
);
18 bool need_csum
, offload_csum
, gso_partial
, need_ipsec
;
19 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
20 u16 mac_offset
= skb
->mac_header
;
21 __be16 protocol
= skb
->protocol
;
22 u16 mac_len
= skb
->mac_len
;
23 int gre_offset
, outer_hlen
;
25 if (!skb
->encapsulation
)
28 if (unlikely(tnl_hlen
< sizeof(struct gre_base_hdr
)))
31 if (unlikely(!pskb_may_pull(skb
, tnl_hlen
)))
34 /* setup inner skb. */
35 skb
->encapsulation
= 0;
36 SKB_GSO_CB(skb
)->encap_level
= 0;
37 __skb_pull(skb
, tnl_hlen
);
38 skb_reset_mac_header(skb
);
39 skb_set_network_header(skb
, skb_inner_network_offset(skb
));
40 skb
->mac_len
= skb_inner_network_offset(skb
);
41 skb
->protocol
= skb
->inner_protocol
;
43 need_csum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_GRE_CSUM
);
44 skb
->encap_hdr_csum
= need_csum
;
46 features
&= skb
->dev
->hw_enc_features
;
48 features
&= ~NETIF_F_SCTP_CRC
;
50 need_ipsec
= skb_dst(skb
) && dst_xfrm(skb_dst(skb
));
51 /* Try to offload checksum if possible */
52 offload_csum
= !!(need_csum
&& !need_ipsec
&&
53 (skb
->dev
->features
& NETIF_F_HW_CSUM
));
55 /* segment inner packet. */
56 segs
= skb_mac_gso_segment(skb
, features
);
57 if (IS_ERR_OR_NULL(segs
)) {
58 skb_gso_error_unwind(skb
, protocol
, tnl_hlen
, mac_offset
,
63 gso_partial
= !!(skb_shinfo(segs
)->gso_type
& SKB_GSO_PARTIAL
);
65 outer_hlen
= skb_tnl_header_len(skb
);
66 gre_offset
= outer_hlen
- tnl_hlen
;
69 struct gre_base_hdr
*greh
;
72 /* Set up inner headers if we are offloading inner checksum */
73 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
74 skb_reset_inner_headers(skb
);
75 skb
->encapsulation
= 1;
78 skb
->mac_len
= mac_len
;
79 skb
->protocol
= protocol
;
81 __skb_push(skb
, outer_hlen
);
82 skb_reset_mac_header(skb
);
83 skb_set_network_header(skb
, mac_len
);
84 skb_set_transport_header(skb
, gre_offset
);
89 greh
= (struct gre_base_hdr
*)skb_transport_header(skb
);
90 pcsum
= (__sum16
*)(greh
+ 1);
92 if (gso_partial
&& skb_is_gso(skb
)) {
93 unsigned int partial_adj
;
95 /* Adjust checksum to account for the fact that
96 * the partial checksum is based on actual size
97 * whereas headers should be based on MSS size.
99 partial_adj
= skb
->len
+ skb_headroom(skb
) -
100 SKB_GSO_CB(skb
)->data_offset
-
101 skb_shinfo(skb
)->gso_size
;
102 *pcsum
= ~csum_fold((__force __wsum
)htonl(partial_adj
));
108 if (skb
->encapsulation
|| !offload_csum
) {
109 *pcsum
= gso_make_checksum(skb
, 0);
111 skb
->ip_summed
= CHECKSUM_PARTIAL
;
112 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
113 skb
->csum_offset
= sizeof(*greh
);
115 } while ((skb
= skb
->next
));
120 static struct sk_buff
*gre_gro_receive(struct list_head
*head
,
123 struct sk_buff
*pp
= NULL
;
125 const struct gre_base_hdr
*greh
;
126 unsigned int hlen
, grehlen
;
129 struct packet_offload
*ptype
;
132 if (NAPI_GRO_CB(skb
)->encap_mark
)
135 NAPI_GRO_CB(skb
)->encap_mark
= 1;
137 off
= skb_gro_offset(skb
);
138 hlen
= off
+ sizeof(*greh
);
139 greh
= skb_gro_header_fast(skb
, off
);
140 if (skb_gro_header_hard(skb
, hlen
)) {
141 greh
= skb_gro_header_slow(skb
, hlen
, off
);
146 /* Only support version 0 and K (key), C (csum) flags. Note that
147 * although the support for the S (seq#) flag can be added easily
148 * for GRO, this is problematic for GSO hence can not be enabled
149 * here because a GRO pkt may end up in the forwarding path, thus
150 * requiring GSO support to break it up correctly.
152 if ((greh
->flags
& ~(GRE_KEY
|GRE_CSUM
)) != 0)
155 /* We can only support GRE_CSUM if we can track the location of
156 * the GRE header. In the case of FOU/GUE we cannot because the
157 * outer UDP header displaces the GRE header leaving us in a state
160 if ((greh
->flags
& GRE_CSUM
) && NAPI_GRO_CB(skb
)->is_fou
)
163 type
= greh
->protocol
;
166 ptype
= gro_find_receive_by_type(type
);
170 grehlen
= GRE_HEADER_SECTION
;
172 if (greh
->flags
& GRE_KEY
)
173 grehlen
+= GRE_HEADER_SECTION
;
175 if (greh
->flags
& GRE_CSUM
)
176 grehlen
+= GRE_HEADER_SECTION
;
178 hlen
= off
+ grehlen
;
179 if (skb_gro_header_hard(skb
, hlen
)) {
180 greh
= skb_gro_header_slow(skb
, hlen
, off
);
185 /* Don't bother verifying checksum if we're going to flush anyway. */
186 if ((greh
->flags
& GRE_CSUM
) && !NAPI_GRO_CB(skb
)->flush
) {
187 if (skb_gro_checksum_simple_validate(skb
))
190 skb_gro_checksum_try_convert(skb
, IPPROTO_GRE
,
191 null_compute_pseudo
);
194 list_for_each_entry(p
, head
, list
) {
195 const struct gre_base_hdr
*greh2
;
197 if (!NAPI_GRO_CB(p
)->same_flow
)
200 /* The following checks are needed to ensure only pkts
201 * from the same tunnel are considered for aggregation.
202 * The criteria for "the same tunnel" includes:
203 * 1) same version (we only support version 0 here)
204 * 2) same protocol (we only support ETH_P_IP for now)
205 * 3) same set of flags
206 * 4) same key if the key field is present.
208 greh2
= (struct gre_base_hdr
*)(p
->data
+ off
);
210 if (greh2
->flags
!= greh
->flags
||
211 greh2
->protocol
!= greh
->protocol
) {
212 NAPI_GRO_CB(p
)->same_flow
= 0;
215 if (greh
->flags
& GRE_KEY
) {
217 if (*(__be32
*)(greh2
+1) != *(__be32
*)(greh
+1)) {
218 NAPI_GRO_CB(p
)->same_flow
= 0;
224 skb_gro_pull(skb
, grehlen
);
226 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
227 skb_gro_postpull_rcsum(skb
, greh
, grehlen
);
229 pp
= call_gro_receive(ptype
->callbacks
.gro_receive
, head
, skb
);
235 skb_gro_flush_final(skb
, pp
, flush
);
240 static int gre_gro_complete(struct sk_buff
*skb
, int nhoff
)
242 struct gre_base_hdr
*greh
= (struct gre_base_hdr
*)(skb
->data
+ nhoff
);
243 struct packet_offload
*ptype
;
244 unsigned int grehlen
= sizeof(*greh
);
248 skb
->encapsulation
= 1;
249 skb_shinfo(skb
)->gso_type
= SKB_GSO_GRE
;
251 type
= greh
->protocol
;
252 if (greh
->flags
& GRE_KEY
)
253 grehlen
+= GRE_HEADER_SECTION
;
255 if (greh
->flags
& GRE_CSUM
)
256 grehlen
+= GRE_HEADER_SECTION
;
259 ptype
= gro_find_complete_by_type(type
);
261 err
= ptype
->callbacks
.gro_complete(skb
, nhoff
+ grehlen
);
265 skb_set_inner_mac_header(skb
, nhoff
+ grehlen
);
270 static const struct net_offload gre_offload
= {
272 .gso_segment
= gre_gso_segment
,
273 .gro_receive
= gre_gro_receive
,
274 .gro_complete
= gre_gro_complete
,
278 static int __init
gre_offload_init(void)
282 err
= inet_add_offload(&gre_offload
, IPPROTO_GRE
);
283 #if IS_ENABLED(CONFIG_IPV6)
287 err
= inet6_add_offload(&gre_offload
, IPPROTO_GRE
);
289 inet_del_offload(&gre_offload
, IPPROTO_GRE
);
294 device_initcall(gre_offload_init
);