]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/seg6_iptunnel.c
Merge branch 'ipv6-sr'
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / seg6_iptunnel.c
1 /*
2 * SR-IPv6 implementation
3 *
4 * Author:
5 * David Lebrun <david.lebrun@uclouvain.be>
6 *
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #include <linux/types.h>
15 #include <linux/skbuff.h>
16 #include <linux/net.h>
17 #include <linux/module.h>
18 #include <net/ip.h>
19 #include <net/lwtunnel.h>
20 #include <net/netevent.h>
21 #include <net/netns/generic.h>
22 #include <net/ip6_fib.h>
23 #include <net/route.h>
24 #include <net/seg6.h>
25 #include <linux/seg6.h>
26 #include <linux/seg6_iptunnel.h>
27 #include <net/addrconf.h>
28 #include <net/ip6_route.h>
29 #ifdef CONFIG_DST_CACHE
30 #include <net/dst_cache.h>
31 #endif
32 #ifdef CONFIG_IPV6_SEG6_HMAC
33 #include <net/seg6_hmac.h>
34 #endif
35
36 struct seg6_lwt {
37 #ifdef CONFIG_DST_CACHE
38 struct dst_cache cache;
39 #endif
40 struct seg6_iptunnel_encap tuninfo[0];
41 };
42
43 static inline struct seg6_lwt *seg6_lwt_lwtunnel(struct lwtunnel_state *lwt)
44 {
45 return (struct seg6_lwt *)lwt->data;
46 }
47
48 static inline struct seg6_iptunnel_encap *
49 seg6_encap_lwtunnel(struct lwtunnel_state *lwt)
50 {
51 return seg6_lwt_lwtunnel(lwt)->tuninfo;
52 }
53
54 static const struct nla_policy seg6_iptunnel_policy[SEG6_IPTUNNEL_MAX + 1] = {
55 [SEG6_IPTUNNEL_SRH] = { .type = NLA_BINARY },
56 };
57
58 int nla_put_srh(struct sk_buff *skb, int attrtype,
59 struct seg6_iptunnel_encap *tuninfo)
60 {
61 struct seg6_iptunnel_encap *data;
62 struct nlattr *nla;
63 int len;
64
65 len = SEG6_IPTUN_ENCAP_SIZE(tuninfo);
66
67 nla = nla_reserve(skb, attrtype, len);
68 if (!nla)
69 return -EMSGSIZE;
70
71 data = nla_data(nla);
72 memcpy(data, tuninfo, len);
73
74 return 0;
75 }
76
77 static void set_tun_src(struct net *net, struct net_device *dev,
78 struct in6_addr *daddr, struct in6_addr *saddr)
79 {
80 struct seg6_pernet_data *sdata = seg6_pernet(net);
81 struct in6_addr *tun_src;
82
83 rcu_read_lock();
84
85 tun_src = rcu_dereference(sdata->tun_src);
86
87 if (!ipv6_addr_any(tun_src)) {
88 memcpy(saddr, tun_src, sizeof(struct in6_addr));
89 } else {
90 ipv6_dev_get_saddr(net, dev, daddr, IPV6_PREFER_SRC_PUBLIC,
91 saddr);
92 }
93
94 rcu_read_unlock();
95 }
96
97 /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */
98 static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
99 {
100 struct net *net = dev_net(skb_dst(skb)->dev);
101 struct ipv6hdr *hdr, *inner_hdr;
102 struct ipv6_sr_hdr *isrh;
103 int hdrlen, tot_len, err;
104
105 hdrlen = (osrh->hdrlen + 1) << 3;
106 tot_len = hdrlen + sizeof(*hdr);
107
108 err = pskb_expand_head(skb, tot_len, 0, GFP_ATOMIC);
109 if (unlikely(err))
110 return err;
111
112 inner_hdr = ipv6_hdr(skb);
113
114 skb_push(skb, tot_len);
115 skb_reset_network_header(skb);
116 skb_mac_header_rebuild(skb);
117 hdr = ipv6_hdr(skb);
118
119 /* inherit tc, flowlabel and hlim
120 * hlim will be decremented in ip6_forward() afterwards and
121 * decapsulation will overwrite inner hlim with outer hlim
122 */
123 ip6_flow_hdr(hdr, ip6_tclass(ip6_flowinfo(inner_hdr)),
124 ip6_flowlabel(inner_hdr));
125 hdr->hop_limit = inner_hdr->hop_limit;
126 hdr->nexthdr = NEXTHDR_ROUTING;
127
128 isrh = (void *)hdr + sizeof(*hdr);
129 memcpy(isrh, osrh, hdrlen);
130
131 isrh->nexthdr = NEXTHDR_IPV6;
132
133 hdr->daddr = isrh->segments[isrh->first_segment];
134 set_tun_src(net, skb->dev, &hdr->daddr, &hdr->saddr);
135
136 #ifdef CONFIG_IPV6_SEG6_HMAC
137 if (sr_has_hmac(isrh)) {
138 err = seg6_push_hmac(net, &hdr->saddr, isrh);
139 if (unlikely(err))
140 return err;
141 }
142 #endif
143
144 skb_postpush_rcsum(skb, hdr, tot_len);
145
146 return 0;
147 }
148
149 /* insert an SRH within an IPv6 packet, just after the IPv6 header */
150 #ifdef CONFIG_IPV6_SEG6_INLINE
151 static int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
152 {
153 struct ipv6hdr *hdr, *oldhdr;
154 struct ipv6_sr_hdr *isrh;
155 int hdrlen, err;
156
157 hdrlen = (osrh->hdrlen + 1) << 3;
158
159 err = pskb_expand_head(skb, hdrlen, 0, GFP_ATOMIC);
160 if (unlikely(err))
161 return err;
162
163 oldhdr = ipv6_hdr(skb);
164
165 skb_pull(skb, sizeof(struct ipv6hdr));
166 skb_postpull_rcsum(skb, skb_network_header(skb),
167 sizeof(struct ipv6hdr));
168
169 skb_push(skb, sizeof(struct ipv6hdr) + hdrlen);
170 skb_reset_network_header(skb);
171 skb_mac_header_rebuild(skb);
172
173 hdr = ipv6_hdr(skb);
174
175 memmove(hdr, oldhdr, sizeof(*hdr));
176
177 isrh = (void *)hdr + sizeof(*hdr);
178 memcpy(isrh, osrh, hdrlen);
179
180 isrh->nexthdr = hdr->nexthdr;
181 hdr->nexthdr = NEXTHDR_ROUTING;
182
183 isrh->segments[0] = hdr->daddr;
184 hdr->daddr = isrh->segments[isrh->first_segment];
185
186 #ifdef CONFIG_IPV6_SEG6_HMAC
187 if (sr_has_hmac(isrh)) {
188 struct net *net = dev_net(skb_dst(skb)->dev);
189
190 err = seg6_push_hmac(net, &hdr->saddr, isrh);
191 if (unlikely(err))
192 return err;
193 }
194 #endif
195
196 skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen);
197
198 return 0;
199 }
200 #endif
201
202 static int seg6_do_srh(struct sk_buff *skb)
203 {
204 struct dst_entry *dst = skb_dst(skb);
205 struct seg6_iptunnel_encap *tinfo;
206 int err = 0;
207
208 tinfo = seg6_encap_lwtunnel(dst->lwtstate);
209
210 if (likely(!skb->encapsulation)) {
211 skb_reset_inner_headers(skb);
212 skb->encapsulation = 1;
213 }
214
215 switch (tinfo->mode) {
216 #ifdef CONFIG_IPV6_SEG6_INLINE
217 case SEG6_IPTUN_MODE_INLINE:
218 err = seg6_do_srh_inline(skb, tinfo->srh);
219 skb_reset_inner_headers(skb);
220 break;
221 #endif
222 case SEG6_IPTUN_MODE_ENCAP:
223 err = seg6_do_srh_encap(skb, tinfo->srh);
224 break;
225 }
226
227 if (err)
228 return err;
229
230 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
231 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
232
233 skb_set_inner_protocol(skb, skb->protocol);
234
235 return 0;
236 }
237
238 int seg6_input(struct sk_buff *skb)
239 {
240 int err;
241
242 err = seg6_do_srh(skb);
243 if (unlikely(err)) {
244 kfree_skb(skb);
245 return err;
246 }
247
248 skb_dst_drop(skb);
249 ip6_route_input(skb);
250
251 return dst_input(skb);
252 }
253
254 int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
255 {
256 struct dst_entry *orig_dst = skb_dst(skb);
257 struct dst_entry *dst = NULL;
258 struct seg6_lwt *slwt;
259 int err = -EINVAL;
260
261 err = seg6_do_srh(skb);
262 if (unlikely(err))
263 goto drop;
264
265 slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
266
267 #ifdef CONFIG_DST_CACHE
268 dst = dst_cache_get(&slwt->cache);
269 #endif
270
271 if (unlikely(!dst)) {
272 struct ipv6hdr *hdr = ipv6_hdr(skb);
273 struct flowi6 fl6;
274
275 fl6.daddr = hdr->daddr;
276 fl6.saddr = hdr->saddr;
277 fl6.flowlabel = ip6_flowinfo(hdr);
278 fl6.flowi6_mark = skb->mark;
279 fl6.flowi6_proto = hdr->nexthdr;
280
281 dst = ip6_route_output(net, NULL, &fl6);
282 if (dst->error) {
283 err = dst->error;
284 dst_release(dst);
285 goto drop;
286 }
287
288 #ifdef CONFIG_DST_CACHE
289 dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
290 #endif
291 }
292
293 skb_dst_drop(skb);
294 skb_dst_set(skb, dst);
295
296 return dst_output(net, sk, skb);
297 drop:
298 kfree_skb(skb);
299 return err;
300 }
301
302 static int seg6_build_state(struct net_device *dev, struct nlattr *nla,
303 unsigned int family, const void *cfg,
304 struct lwtunnel_state **ts)
305 {
306 struct nlattr *tb[SEG6_IPTUNNEL_MAX + 1];
307 struct seg6_iptunnel_encap *tuninfo;
308 struct lwtunnel_state *newts;
309 int tuninfo_len, min_size;
310 struct seg6_lwt *slwt;
311 int err;
312
313 err = nla_parse_nested(tb, SEG6_IPTUNNEL_MAX, nla,
314 seg6_iptunnel_policy);
315
316 if (err < 0)
317 return err;
318
319 if (!tb[SEG6_IPTUNNEL_SRH])
320 return -EINVAL;
321
322 tuninfo = nla_data(tb[SEG6_IPTUNNEL_SRH]);
323 tuninfo_len = nla_len(tb[SEG6_IPTUNNEL_SRH]);
324
325 /* tuninfo must contain at least the iptunnel encap structure,
326 * the SRH and one segment
327 */
328 min_size = sizeof(*tuninfo) + sizeof(struct ipv6_sr_hdr) +
329 sizeof(struct in6_addr);
330 if (tuninfo_len < min_size)
331 return -EINVAL;
332
333 switch (tuninfo->mode) {
334 #ifdef CONFIG_IPV6_SEG6_INLINE
335 case SEG6_IPTUN_MODE_INLINE:
336 break;
337 #endif
338 case SEG6_IPTUN_MODE_ENCAP:
339 break;
340 default:
341 return -EINVAL;
342 }
343
344 /* verify that SRH is consistent */
345 if (!seg6_validate_srh(tuninfo->srh, tuninfo_len - sizeof(*tuninfo)))
346 return -EINVAL;
347
348 newts = lwtunnel_state_alloc(tuninfo_len + sizeof(*slwt));
349 if (!newts)
350 return -ENOMEM;
351
352 slwt = seg6_lwt_lwtunnel(newts);
353
354 #ifdef CONFIG_DST_CACHE
355 err = dst_cache_init(&slwt->cache, GFP_KERNEL);
356 if (err) {
357 kfree(newts);
358 return err;
359 }
360 #endif
361
362 memcpy(&slwt->tuninfo, tuninfo, tuninfo_len);
363
364 newts->type = LWTUNNEL_ENCAP_SEG6;
365 newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
366 LWTUNNEL_STATE_INPUT_REDIRECT;
367 newts->headroom = seg6_lwt_headroom(tuninfo);
368
369 *ts = newts;
370
371 return 0;
372 }
373
374 #ifdef CONFIG_DST_CACHE
375 static void seg6_destroy_state(struct lwtunnel_state *lwt)
376 {
377 dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache);
378 }
379 #endif
380
381 static int seg6_fill_encap_info(struct sk_buff *skb,
382 struct lwtunnel_state *lwtstate)
383 {
384 struct seg6_iptunnel_encap *tuninfo = seg6_encap_lwtunnel(lwtstate);
385
386 if (nla_put_srh(skb, SEG6_IPTUNNEL_SRH, tuninfo))
387 return -EMSGSIZE;
388
389 return 0;
390 }
391
392 static int seg6_encap_nlsize(struct lwtunnel_state *lwtstate)
393 {
394 struct seg6_iptunnel_encap *tuninfo = seg6_encap_lwtunnel(lwtstate);
395
396 return nla_total_size(SEG6_IPTUN_ENCAP_SIZE(tuninfo));
397 }
398
399 static int seg6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
400 {
401 struct seg6_iptunnel_encap *a_hdr = seg6_encap_lwtunnel(a);
402 struct seg6_iptunnel_encap *b_hdr = seg6_encap_lwtunnel(b);
403 int len = SEG6_IPTUN_ENCAP_SIZE(a_hdr);
404
405 if (len != SEG6_IPTUN_ENCAP_SIZE(b_hdr))
406 return 1;
407
408 return memcmp(a_hdr, b_hdr, len);
409 }
410
411 static const struct lwtunnel_encap_ops seg6_iptun_ops = {
412 .build_state = seg6_build_state,
413 #ifdef CONFIG_DST_CACHE
414 .destroy_state = seg6_destroy_state,
415 #endif
416 .output = seg6_output,
417 .input = seg6_input,
418 .fill_encap = seg6_fill_encap_info,
419 .get_encap_size = seg6_encap_nlsize,
420 .cmp_encap = seg6_encap_cmp,
421 };
422
423 int __init seg6_iptunnel_init(void)
424 {
425 return lwtunnel_encap_add_ops(&seg6_iptun_ops, LWTUNNEL_ENCAP_SEG6);
426 }
427
428 void seg6_iptunnel_exit(void)
429 {
430 lwtunnel_encap_del_ops(&seg6_iptun_ops, LWTUNNEL_ENCAP_SEG6);
431 }