]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - net/ipv6/esp6_offload.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[mirror_ubuntu-eoan-kernel.git] / net / ipv6 / esp6_offload.c
1 /*
2 * IPV6 GSO/GRO offload support
3 * Linux INET implementation
4 *
5 * Copyright (C) 2016 secunet Security Networks AG
6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * ESP GRO support
13 */
14
15 #include <linux/skbuff.h>
16 #include <linux/init.h>
17 #include <net/protocol.h>
18 #include <crypto/aead.h>
19 #include <crypto/authenc.h>
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <net/ip.h>
23 #include <net/xfrm.h>
24 #include <net/esp.h>
25 #include <linux/scatterlist.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_route.h>
30 #include <net/ipv6.h>
31 #include <linux/icmpv6.h>
32
33 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
34 {
35 int off = sizeof(struct ipv6hdr);
36 struct ipv6_opt_hdr *exthdr;
37
38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
39 return offsetof(struct ipv6hdr, nexthdr);
40
41 while (off < nhlen) {
42 exthdr = (void *)ipv6_hdr + off;
43 if (exthdr->nexthdr == NEXTHDR_ESP)
44 return off;
45
46 off += ipv6_optlen(exthdr);
47 }
48
49 return 0;
50 }
51
52 static struct sk_buff *esp6_gro_receive(struct list_head *head,
53 struct sk_buff *skb)
54 {
55 int offset = skb_gro_offset(skb);
56 struct xfrm_offload *xo;
57 struct xfrm_state *x;
58 __be32 seq;
59 __be32 spi;
60 int nhoff;
61 int err;
62
63 if (!pskb_pull(skb, offset))
64 return NULL;
65
66 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
67 goto out;
68
69 xo = xfrm_offload(skb);
70 if (!xo || !(xo->flags & CRYPTO_DONE)) {
71 err = secpath_set(skb);
72 if (err)
73 goto out;
74
75 if (skb->sp->len == XFRM_MAX_DEPTH)
76 goto out;
77
78 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
79 (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
80 spi, IPPROTO_ESP, AF_INET6);
81 if (!x)
82 goto out;
83
84 skb->sp->xvec[skb->sp->len++] = x;
85 skb->sp->olen++;
86
87 xo = xfrm_offload(skb);
88 if (!xo) {
89 xfrm_state_put(x);
90 goto out;
91 }
92 }
93
94 xo->flags |= XFRM_GRO;
95
96 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
97 if (!nhoff)
98 goto out;
99
100 IP6CB(skb)->nhoff = nhoff;
101 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
102 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
103 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
104 XFRM_SPI_SKB_CB(skb)->seq = seq;
105
106 /* We don't need to handle errors from xfrm_input, it does all
107 * the error handling and frees the resources on error. */
108 xfrm_input(skb, IPPROTO_ESP, spi, -2);
109
110 return ERR_PTR(-EINPROGRESS);
111 out:
112 skb_push(skb, offset);
113 NAPI_GRO_CB(skb)->same_flow = 0;
114 NAPI_GRO_CB(skb)->flush = 1;
115
116 return NULL;
117 }
118
119 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
120 {
121 struct ip_esp_hdr *esph;
122 struct ipv6hdr *iph = ipv6_hdr(skb);
123 struct xfrm_offload *xo = xfrm_offload(skb);
124 int proto = iph->nexthdr;
125
126 skb_push(skb, -skb_network_offset(skb));
127 esph = ip_esp_hdr(skb);
128 *skb_mac_header(skb) = IPPROTO_ESP;
129
130 esph->spi = x->id.spi;
131 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
132
133 xo->proto = proto;
134 }
135
136 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
137 netdev_features_t features)
138 {
139 struct xfrm_state *x;
140 struct ip_esp_hdr *esph;
141 struct crypto_aead *aead;
142 netdev_features_t esp_features = features;
143 struct xfrm_offload *xo = xfrm_offload(skb);
144
145 if (!xo)
146 return ERR_PTR(-EINVAL);
147
148 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
149 return ERR_PTR(-EINVAL);
150
151 x = skb->sp->xvec[skb->sp->len - 1];
152 aead = x->data;
153 esph = ip_esp_hdr(skb);
154
155 if (esph->spi != x->id.spi)
156 return ERR_PTR(-EINVAL);
157
158 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
159 return ERR_PTR(-EINVAL);
160
161 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
162
163 skb->encap_hdr_csum = 1;
164
165 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
166 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
167 else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
168 esp_features = features & ~NETIF_F_CSUM_MASK;
169
170 xo->flags |= XFRM_GSO_SEGMENT;
171
172 return x->outer_mode->gso_segment(x, skb, esp_features);
173 }
174
175 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
176 {
177 struct crypto_aead *aead = x->data;
178 struct xfrm_offload *xo = xfrm_offload(skb);
179
180 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
181 return -EINVAL;
182
183 if (!(xo->flags & CRYPTO_DONE))
184 skb->ip_summed = CHECKSUM_NONE;
185
186 return esp6_input_done2(skb, 0);
187 }
188
189 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
190 {
191 int len;
192 int err;
193 int alen;
194 int blksize;
195 struct xfrm_offload *xo;
196 struct ip_esp_hdr *esph;
197 struct crypto_aead *aead;
198 struct esp_info esp;
199 bool hw_offload = true;
200 __u32 seq;
201
202 esp.inplace = true;
203
204 xo = xfrm_offload(skb);
205
206 if (!xo)
207 return -EINVAL;
208
209 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
210 xo->flags |= CRYPTO_FALLBACK;
211 hw_offload = false;
212 }
213
214 esp.proto = xo->proto;
215
216 /* skb is pure payload to encrypt */
217
218 aead = x->data;
219 alen = crypto_aead_authsize(aead);
220
221 esp.tfclen = 0;
222 /* XXX: Add support for tfc padding here. */
223
224 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
225 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
226 esp.plen = esp.clen - skb->len - esp.tfclen;
227 esp.tailen = esp.tfclen + esp.plen + alen;
228
229 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
230 esp.nfrags = esp6_output_head(x, skb, &esp);
231 if (esp.nfrags < 0)
232 return esp.nfrags;
233 }
234
235 seq = xo->seq.low;
236
237 esph = ip_esp_hdr(skb);
238 esph->spi = x->id.spi;
239
240 skb_push(skb, -skb_network_offset(skb));
241
242 if (xo->flags & XFRM_GSO_SEGMENT) {
243 esph->seq_no = htonl(seq);
244
245 if (!skb_is_gso(skb))
246 xo->seq.low++;
247 else
248 xo->seq.low += skb_shinfo(skb)->gso_segs;
249 }
250
251 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
252
253 len = skb->len - sizeof(struct ipv6hdr);
254 if (len > IPV6_MAXPLEN)
255 len = 0;
256
257 ipv6_hdr(skb)->payload_len = htons(len);
258
259 if (hw_offload)
260 return 0;
261
262 err = esp6_output_tail(x, skb, &esp);
263 if (err)
264 return err;
265
266 secpath_reset(skb);
267
268 return 0;
269 }
270
271 static const struct net_offload esp6_offload = {
272 .callbacks = {
273 .gro_receive = esp6_gro_receive,
274 .gso_segment = esp6_gso_segment,
275 },
276 };
277
278 static const struct xfrm_type_offload esp6_type_offload = {
279 .description = "ESP6 OFFLOAD",
280 .owner = THIS_MODULE,
281 .proto = IPPROTO_ESP,
282 .input_tail = esp6_input_tail,
283 .xmit = esp6_xmit,
284 .encap = esp6_gso_encap,
285 };
286
287 static int __init esp6_offload_init(void)
288 {
289 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
290 pr_info("%s: can't add xfrm type offload\n", __func__);
291 return -EAGAIN;
292 }
293
294 return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
295 }
296
297 static void __exit esp6_offload_exit(void)
298 {
299 if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0)
300 pr_info("%s: can't remove xfrm type offload\n", __func__);
301
302 inet6_del_offload(&esp6_offload, IPPROTO_ESP);
303 }
304
305 module_init(esp6_offload_init);
306 module_exit(esp6_offload_exit);
307 MODULE_LICENSE("GPL");
308 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
309 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);