]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/ipv6/seg6_local.c
Merge tag 'smp-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / seg6_local.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * SR-IPv6 implementation
4 *
5 * Authors:
6 * David Lebrun <david.lebrun@uclouvain.be>
7 * eBPF support: Mathieu Xhonneux <m.xhonneux@gmail.com>
8 */
9
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/net.h>
13 #include <linux/module.h>
14 #include <net/ip.h>
15 #include <net/lwtunnel.h>
16 #include <net/netevent.h>
17 #include <net/netns/generic.h>
18 #include <net/ip6_fib.h>
19 #include <net/route.h>
20 #include <net/seg6.h>
21 #include <linux/seg6.h>
22 #include <linux/seg6_local.h>
23 #include <net/addrconf.h>
24 #include <net/ip6_route.h>
25 #include <net/dst_cache.h>
26 #include <net/ip_tunnels.h>
27 #ifdef CONFIG_IPV6_SEG6_HMAC
28 #include <net/seg6_hmac.h>
29 #endif
30 #include <net/seg6_local.h>
31 #include <linux/etherdevice.h>
32 #include <linux/bpf.h>
33
34 struct seg6_local_lwt;
35
36 struct seg6_action_desc {
37 int action;
38 unsigned long attrs;
39 int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
40 int static_headroom;
41 };
42
43 struct bpf_lwt_prog {
44 struct bpf_prog *prog;
45 char *name;
46 };
47
48 struct seg6_local_lwt {
49 int action;
50 struct ipv6_sr_hdr *srh;
51 int table;
52 struct in_addr nh4;
53 struct in6_addr nh6;
54 int iif;
55 int oif;
56 struct bpf_lwt_prog bpf;
57
58 int headroom;
59 struct seg6_action_desc *desc;
60 };
61
62 static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
63 {
64 return (struct seg6_local_lwt *)lwt->data;
65 }
66
67 static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb)
68 {
69 struct ipv6_sr_hdr *srh;
70 int len, srhoff = 0;
71
72 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
73 return NULL;
74
75 if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
76 return NULL;
77
78 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
79
80 len = (srh->hdrlen + 1) << 3;
81
82 if (!pskb_may_pull(skb, srhoff + len))
83 return NULL;
84
85 /* note that pskb_may_pull may change pointers in header;
86 * for this reason it is necessary to reload them when needed.
87 */
88 srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
89
90 if (!seg6_validate_srh(srh, len))
91 return NULL;
92
93 return srh;
94 }
95
96 static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
97 {
98 struct ipv6_sr_hdr *srh;
99
100 srh = get_srh(skb);
101 if (!srh)
102 return NULL;
103
104 if (srh->segments_left == 0)
105 return NULL;
106
107 #ifdef CONFIG_IPV6_SEG6_HMAC
108 if (!seg6_hmac_validate_skb(skb))
109 return NULL;
110 #endif
111
112 return srh;
113 }
114
115 static bool decap_and_validate(struct sk_buff *skb, int proto)
116 {
117 struct ipv6_sr_hdr *srh;
118 unsigned int off = 0;
119
120 srh = get_srh(skb);
121 if (srh && srh->segments_left > 0)
122 return false;
123
124 #ifdef CONFIG_IPV6_SEG6_HMAC
125 if (srh && !seg6_hmac_validate_skb(skb))
126 return false;
127 #endif
128
129 if (ipv6_find_hdr(skb, &off, proto, NULL, NULL) < 0)
130 return false;
131
132 if (!pskb_pull(skb, off))
133 return false;
134
135 skb_postpull_rcsum(skb, skb_network_header(skb), off);
136
137 skb_reset_network_header(skb);
138 skb_reset_transport_header(skb);
139 if (iptunnel_pull_offloads(skb))
140 return false;
141
142 return true;
143 }
144
145 static void advance_nextseg(struct ipv6_sr_hdr *srh, struct in6_addr *daddr)
146 {
147 struct in6_addr *addr;
148
149 srh->segments_left--;
150 addr = srh->segments + srh->segments_left;
151 *daddr = *addr;
152 }
153
154 static int
155 seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
156 u32 tbl_id, bool local_delivery)
157 {
158 struct net *net = dev_net(skb->dev);
159 struct ipv6hdr *hdr = ipv6_hdr(skb);
160 int flags = RT6_LOOKUP_F_HAS_SADDR;
161 struct dst_entry *dst = NULL;
162 struct rt6_info *rt;
163 struct flowi6 fl6;
164 int dev_flags = 0;
165
166 fl6.flowi6_iif = skb->dev->ifindex;
167 fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
168 fl6.saddr = hdr->saddr;
169 fl6.flowlabel = ip6_flowinfo(hdr);
170 fl6.flowi6_mark = skb->mark;
171 fl6.flowi6_proto = hdr->nexthdr;
172
173 if (nhaddr)
174 fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
175
176 if (!tbl_id) {
177 dst = ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags);
178 } else {
179 struct fib6_table *table;
180
181 table = fib6_get_table(net, tbl_id);
182 if (!table)
183 goto out;
184
185 rt = ip6_pol_route(net, table, 0, &fl6, skb, flags);
186 dst = &rt->dst;
187 }
188
189 /* we want to discard traffic destined for local packet processing,
190 * if @local_delivery is set to false.
191 */
192 if (!local_delivery)
193 dev_flags |= IFF_LOOPBACK;
194
195 if (dst && (dst->dev->flags & dev_flags) && !dst->error) {
196 dst_release(dst);
197 dst = NULL;
198 }
199
200 out:
201 if (!dst) {
202 rt = net->ipv6.ip6_blk_hole_entry;
203 dst = &rt->dst;
204 dst_hold(dst);
205 }
206
207 skb_dst_drop(skb);
208 skb_dst_set(skb, dst);
209 return dst->error;
210 }
211
212 int seg6_lookup_nexthop(struct sk_buff *skb,
213 struct in6_addr *nhaddr, u32 tbl_id)
214 {
215 return seg6_lookup_any_nexthop(skb, nhaddr, tbl_id, false);
216 }
217
218 /* regular endpoint function */
219 static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt)
220 {
221 struct ipv6_sr_hdr *srh;
222
223 srh = get_and_validate_srh(skb);
224 if (!srh)
225 goto drop;
226
227 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
228
229 seg6_lookup_nexthop(skb, NULL, 0);
230
231 return dst_input(skb);
232
233 drop:
234 kfree_skb(skb);
235 return -EINVAL;
236 }
237
238 /* regular endpoint, and forward to specified nexthop */
239 static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt)
240 {
241 struct ipv6_sr_hdr *srh;
242
243 srh = get_and_validate_srh(skb);
244 if (!srh)
245 goto drop;
246
247 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
248
249 seg6_lookup_nexthop(skb, &slwt->nh6, 0);
250
251 return dst_input(skb);
252
253 drop:
254 kfree_skb(skb);
255 return -EINVAL;
256 }
257
258 static int input_action_end_t(struct sk_buff *skb, struct seg6_local_lwt *slwt)
259 {
260 struct ipv6_sr_hdr *srh;
261
262 srh = get_and_validate_srh(skb);
263 if (!srh)
264 goto drop;
265
266 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
267
268 seg6_lookup_nexthop(skb, NULL, slwt->table);
269
270 return dst_input(skb);
271
272 drop:
273 kfree_skb(skb);
274 return -EINVAL;
275 }
276
277 /* decapsulate and forward inner L2 frame on specified interface */
278 static int input_action_end_dx2(struct sk_buff *skb,
279 struct seg6_local_lwt *slwt)
280 {
281 struct net *net = dev_net(skb->dev);
282 struct net_device *odev;
283 struct ethhdr *eth;
284
285 if (!decap_and_validate(skb, IPPROTO_ETHERNET))
286 goto drop;
287
288 if (!pskb_may_pull(skb, ETH_HLEN))
289 goto drop;
290
291 skb_reset_mac_header(skb);
292 eth = (struct ethhdr *)skb->data;
293
294 /* To determine the frame's protocol, we assume it is 802.3. This avoids
295 * a call to eth_type_trans(), which is not really relevant for our
296 * use case.
297 */
298 if (!eth_proto_is_802_3(eth->h_proto))
299 goto drop;
300
301 odev = dev_get_by_index_rcu(net, slwt->oif);
302 if (!odev)
303 goto drop;
304
305 /* As we accept Ethernet frames, make sure the egress device is of
306 * the correct type.
307 */
308 if (odev->type != ARPHRD_ETHER)
309 goto drop;
310
311 if (!(odev->flags & IFF_UP) || !netif_carrier_ok(odev))
312 goto drop;
313
314 skb_orphan(skb);
315
316 if (skb_warn_if_lro(skb))
317 goto drop;
318
319 skb_forward_csum(skb);
320
321 if (skb->len - ETH_HLEN > odev->mtu)
322 goto drop;
323
324 skb->dev = odev;
325 skb->protocol = eth->h_proto;
326
327 return dev_queue_xmit(skb);
328
329 drop:
330 kfree_skb(skb);
331 return -EINVAL;
332 }
333
334 /* decapsulate and forward to specified nexthop */
335 static int input_action_end_dx6(struct sk_buff *skb,
336 struct seg6_local_lwt *slwt)
337 {
338 struct in6_addr *nhaddr = NULL;
339
340 /* this function accepts IPv6 encapsulated packets, with either
341 * an SRH with SL=0, or no SRH.
342 */
343
344 if (!decap_and_validate(skb, IPPROTO_IPV6))
345 goto drop;
346
347 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
348 goto drop;
349
350 /* The inner packet is not associated to any local interface,
351 * so we do not call netif_rx().
352 *
353 * If slwt->nh6 is set to ::, then lookup the nexthop for the
354 * inner packet's DA. Otherwise, use the specified nexthop.
355 */
356
357 if (!ipv6_addr_any(&slwt->nh6))
358 nhaddr = &slwt->nh6;
359
360 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
361
362 seg6_lookup_nexthop(skb, nhaddr, 0);
363
364 return dst_input(skb);
365 drop:
366 kfree_skb(skb);
367 return -EINVAL;
368 }
369
370 static int input_action_end_dx4(struct sk_buff *skb,
371 struct seg6_local_lwt *slwt)
372 {
373 struct iphdr *iph;
374 __be32 nhaddr;
375 int err;
376
377 if (!decap_and_validate(skb, IPPROTO_IPIP))
378 goto drop;
379
380 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
381 goto drop;
382
383 skb->protocol = htons(ETH_P_IP);
384
385 iph = ip_hdr(skb);
386
387 nhaddr = slwt->nh4.s_addr ?: iph->daddr;
388
389 skb_dst_drop(skb);
390
391 skb_set_transport_header(skb, sizeof(struct iphdr));
392
393 err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev);
394 if (err)
395 goto drop;
396
397 return dst_input(skb);
398
399 drop:
400 kfree_skb(skb);
401 return -EINVAL;
402 }
403
404 static int input_action_end_dt6(struct sk_buff *skb,
405 struct seg6_local_lwt *slwt)
406 {
407 if (!decap_and_validate(skb, IPPROTO_IPV6))
408 goto drop;
409
410 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
411 goto drop;
412
413 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
414
415 seg6_lookup_any_nexthop(skb, NULL, slwt->table, true);
416
417 return dst_input(skb);
418
419 drop:
420 kfree_skb(skb);
421 return -EINVAL;
422 }
423
424 /* push an SRH on top of the current one */
425 static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
426 {
427 struct ipv6_sr_hdr *srh;
428 int err = -EINVAL;
429
430 srh = get_and_validate_srh(skb);
431 if (!srh)
432 goto drop;
433
434 err = seg6_do_srh_inline(skb, slwt->srh);
435 if (err)
436 goto drop;
437
438 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
439 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
440
441 seg6_lookup_nexthop(skb, NULL, 0);
442
443 return dst_input(skb);
444
445 drop:
446 kfree_skb(skb);
447 return err;
448 }
449
450 /* encapsulate within an outer IPv6 header and a specified SRH */
451 static int input_action_end_b6_encap(struct sk_buff *skb,
452 struct seg6_local_lwt *slwt)
453 {
454 struct ipv6_sr_hdr *srh;
455 int err = -EINVAL;
456
457 srh = get_and_validate_srh(skb);
458 if (!srh)
459 goto drop;
460
461 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
462
463 skb_reset_inner_headers(skb);
464 skb->encapsulation = 1;
465
466 err = seg6_do_srh_encap(skb, slwt->srh, IPPROTO_IPV6);
467 if (err)
468 goto drop;
469
470 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
471 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
472
473 seg6_lookup_nexthop(skb, NULL, 0);
474
475 return dst_input(skb);
476
477 drop:
478 kfree_skb(skb);
479 return err;
480 }
481
482 DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
483
484 bool seg6_bpf_has_valid_srh(struct sk_buff *skb)
485 {
486 struct seg6_bpf_srh_state *srh_state =
487 this_cpu_ptr(&seg6_bpf_srh_states);
488 struct ipv6_sr_hdr *srh = srh_state->srh;
489
490 if (unlikely(srh == NULL))
491 return false;
492
493 if (unlikely(!srh_state->valid)) {
494 if ((srh_state->hdrlen & 7) != 0)
495 return false;
496
497 srh->hdrlen = (u8)(srh_state->hdrlen >> 3);
498 if (!seg6_validate_srh(srh, (srh->hdrlen + 1) << 3))
499 return false;
500
501 srh_state->valid = true;
502 }
503
504 return true;
505 }
506
507 static int input_action_end_bpf(struct sk_buff *skb,
508 struct seg6_local_lwt *slwt)
509 {
510 struct seg6_bpf_srh_state *srh_state =
511 this_cpu_ptr(&seg6_bpf_srh_states);
512 struct ipv6_sr_hdr *srh;
513 int ret;
514
515 srh = get_and_validate_srh(skb);
516 if (!srh) {
517 kfree_skb(skb);
518 return -EINVAL;
519 }
520 advance_nextseg(srh, &ipv6_hdr(skb)->daddr);
521
522 /* preempt_disable is needed to protect the per-CPU buffer srh_state,
523 * which is also accessed by the bpf_lwt_seg6_* helpers
524 */
525 preempt_disable();
526 srh_state->srh = srh;
527 srh_state->hdrlen = srh->hdrlen << 3;
528 srh_state->valid = true;
529
530 rcu_read_lock();
531 bpf_compute_data_pointers(skb);
532 ret = bpf_prog_run_save_cb(slwt->bpf.prog, skb);
533 rcu_read_unlock();
534
535 switch (ret) {
536 case BPF_OK:
537 case BPF_REDIRECT:
538 break;
539 case BPF_DROP:
540 goto drop;
541 default:
542 pr_warn_once("bpf-seg6local: Illegal return value %u\n", ret);
543 goto drop;
544 }
545
546 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
547 goto drop;
548
549 preempt_enable();
550 if (ret != BPF_REDIRECT)
551 seg6_lookup_nexthop(skb, NULL, 0);
552
553 return dst_input(skb);
554
555 drop:
556 preempt_enable();
557 kfree_skb(skb);
558 return -EINVAL;
559 }
560
561 static struct seg6_action_desc seg6_action_table[] = {
562 {
563 .action = SEG6_LOCAL_ACTION_END,
564 .attrs = 0,
565 .input = input_action_end,
566 },
567 {
568 .action = SEG6_LOCAL_ACTION_END_X,
569 .attrs = (1 << SEG6_LOCAL_NH6),
570 .input = input_action_end_x,
571 },
572 {
573 .action = SEG6_LOCAL_ACTION_END_T,
574 .attrs = (1 << SEG6_LOCAL_TABLE),
575 .input = input_action_end_t,
576 },
577 {
578 .action = SEG6_LOCAL_ACTION_END_DX2,
579 .attrs = (1 << SEG6_LOCAL_OIF),
580 .input = input_action_end_dx2,
581 },
582 {
583 .action = SEG6_LOCAL_ACTION_END_DX6,
584 .attrs = (1 << SEG6_LOCAL_NH6),
585 .input = input_action_end_dx6,
586 },
587 {
588 .action = SEG6_LOCAL_ACTION_END_DX4,
589 .attrs = (1 << SEG6_LOCAL_NH4),
590 .input = input_action_end_dx4,
591 },
592 {
593 .action = SEG6_LOCAL_ACTION_END_DT6,
594 .attrs = (1 << SEG6_LOCAL_TABLE),
595 .input = input_action_end_dt6,
596 },
597 {
598 .action = SEG6_LOCAL_ACTION_END_B6,
599 .attrs = (1 << SEG6_LOCAL_SRH),
600 .input = input_action_end_b6,
601 },
602 {
603 .action = SEG6_LOCAL_ACTION_END_B6_ENCAP,
604 .attrs = (1 << SEG6_LOCAL_SRH),
605 .input = input_action_end_b6_encap,
606 .static_headroom = sizeof(struct ipv6hdr),
607 },
608 {
609 .action = SEG6_LOCAL_ACTION_END_BPF,
610 .attrs = (1 << SEG6_LOCAL_BPF),
611 .input = input_action_end_bpf,
612 },
613
614 };
615
616 static struct seg6_action_desc *__get_action_desc(int action)
617 {
618 struct seg6_action_desc *desc;
619 int i, count;
620
621 count = ARRAY_SIZE(seg6_action_table);
622 for (i = 0; i < count; i++) {
623 desc = &seg6_action_table[i];
624 if (desc->action == action)
625 return desc;
626 }
627
628 return NULL;
629 }
630
631 static int seg6_local_input(struct sk_buff *skb)
632 {
633 struct dst_entry *orig_dst = skb_dst(skb);
634 struct seg6_action_desc *desc;
635 struct seg6_local_lwt *slwt;
636
637 if (skb->protocol != htons(ETH_P_IPV6)) {
638 kfree_skb(skb);
639 return -EINVAL;
640 }
641
642 slwt = seg6_local_lwtunnel(orig_dst->lwtstate);
643 desc = slwt->desc;
644
645 return desc->input(skb, slwt);
646 }
647
648 static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = {
649 [SEG6_LOCAL_ACTION] = { .type = NLA_U32 },
650 [SEG6_LOCAL_SRH] = { .type = NLA_BINARY },
651 [SEG6_LOCAL_TABLE] = { .type = NLA_U32 },
652 [SEG6_LOCAL_NH4] = { .type = NLA_BINARY,
653 .len = sizeof(struct in_addr) },
654 [SEG6_LOCAL_NH6] = { .type = NLA_BINARY,
655 .len = sizeof(struct in6_addr) },
656 [SEG6_LOCAL_IIF] = { .type = NLA_U32 },
657 [SEG6_LOCAL_OIF] = { .type = NLA_U32 },
658 [SEG6_LOCAL_BPF] = { .type = NLA_NESTED },
659 };
660
661 static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt)
662 {
663 struct ipv6_sr_hdr *srh;
664 int len;
665
666 srh = nla_data(attrs[SEG6_LOCAL_SRH]);
667 len = nla_len(attrs[SEG6_LOCAL_SRH]);
668
669 /* SRH must contain at least one segment */
670 if (len < sizeof(*srh) + sizeof(struct in6_addr))
671 return -EINVAL;
672
673 if (!seg6_validate_srh(srh, len))
674 return -EINVAL;
675
676 slwt->srh = kmemdup(srh, len, GFP_KERNEL);
677 if (!slwt->srh)
678 return -ENOMEM;
679
680 slwt->headroom += len;
681
682 return 0;
683 }
684
685 static int put_nla_srh(struct sk_buff *skb, struct seg6_local_lwt *slwt)
686 {
687 struct ipv6_sr_hdr *srh;
688 struct nlattr *nla;
689 int len;
690
691 srh = slwt->srh;
692 len = (srh->hdrlen + 1) << 3;
693
694 nla = nla_reserve(skb, SEG6_LOCAL_SRH, len);
695 if (!nla)
696 return -EMSGSIZE;
697
698 memcpy(nla_data(nla), srh, len);
699
700 return 0;
701 }
702
703 static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
704 {
705 int len = (a->srh->hdrlen + 1) << 3;
706
707 if (len != ((b->srh->hdrlen + 1) << 3))
708 return 1;
709
710 return memcmp(a->srh, b->srh, len);
711 }
712
713 static int parse_nla_table(struct nlattr **attrs, struct seg6_local_lwt *slwt)
714 {
715 slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]);
716
717 return 0;
718 }
719
720 static int put_nla_table(struct sk_buff *skb, struct seg6_local_lwt *slwt)
721 {
722 if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table))
723 return -EMSGSIZE;
724
725 return 0;
726 }
727
728 static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
729 {
730 if (a->table != b->table)
731 return 1;
732
733 return 0;
734 }
735
736 static int parse_nla_nh4(struct nlattr **attrs, struct seg6_local_lwt *slwt)
737 {
738 memcpy(&slwt->nh4, nla_data(attrs[SEG6_LOCAL_NH4]),
739 sizeof(struct in_addr));
740
741 return 0;
742 }
743
744 static int put_nla_nh4(struct sk_buff *skb, struct seg6_local_lwt *slwt)
745 {
746 struct nlattr *nla;
747
748 nla = nla_reserve(skb, SEG6_LOCAL_NH4, sizeof(struct in_addr));
749 if (!nla)
750 return -EMSGSIZE;
751
752 memcpy(nla_data(nla), &slwt->nh4, sizeof(struct in_addr));
753
754 return 0;
755 }
756
757 static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
758 {
759 return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr));
760 }
761
762 static int parse_nla_nh6(struct nlattr **attrs, struct seg6_local_lwt *slwt)
763 {
764 memcpy(&slwt->nh6, nla_data(attrs[SEG6_LOCAL_NH6]),
765 sizeof(struct in6_addr));
766
767 return 0;
768 }
769
770 static int put_nla_nh6(struct sk_buff *skb, struct seg6_local_lwt *slwt)
771 {
772 struct nlattr *nla;
773
774 nla = nla_reserve(skb, SEG6_LOCAL_NH6, sizeof(struct in6_addr));
775 if (!nla)
776 return -EMSGSIZE;
777
778 memcpy(nla_data(nla), &slwt->nh6, sizeof(struct in6_addr));
779
780 return 0;
781 }
782
783 static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
784 {
785 return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr));
786 }
787
788 static int parse_nla_iif(struct nlattr **attrs, struct seg6_local_lwt *slwt)
789 {
790 slwt->iif = nla_get_u32(attrs[SEG6_LOCAL_IIF]);
791
792 return 0;
793 }
794
795 static int put_nla_iif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
796 {
797 if (nla_put_u32(skb, SEG6_LOCAL_IIF, slwt->iif))
798 return -EMSGSIZE;
799
800 return 0;
801 }
802
803 static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
804 {
805 if (a->iif != b->iif)
806 return 1;
807
808 return 0;
809 }
810
811 static int parse_nla_oif(struct nlattr **attrs, struct seg6_local_lwt *slwt)
812 {
813 slwt->oif = nla_get_u32(attrs[SEG6_LOCAL_OIF]);
814
815 return 0;
816 }
817
818 static int put_nla_oif(struct sk_buff *skb, struct seg6_local_lwt *slwt)
819 {
820 if (nla_put_u32(skb, SEG6_LOCAL_OIF, slwt->oif))
821 return -EMSGSIZE;
822
823 return 0;
824 }
825
826 static int cmp_nla_oif(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
827 {
828 if (a->oif != b->oif)
829 return 1;
830
831 return 0;
832 }
833
834 #define MAX_PROG_NAME 256
835 static const struct nla_policy bpf_prog_policy[SEG6_LOCAL_BPF_PROG_MAX + 1] = {
836 [SEG6_LOCAL_BPF_PROG] = { .type = NLA_U32, },
837 [SEG6_LOCAL_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
838 .len = MAX_PROG_NAME },
839 };
840
841 static int parse_nla_bpf(struct nlattr **attrs, struct seg6_local_lwt *slwt)
842 {
843 struct nlattr *tb[SEG6_LOCAL_BPF_PROG_MAX + 1];
844 struct bpf_prog *p;
845 int ret;
846 u32 fd;
847
848 ret = nla_parse_nested_deprecated(tb, SEG6_LOCAL_BPF_PROG_MAX,
849 attrs[SEG6_LOCAL_BPF],
850 bpf_prog_policy, NULL);
851 if (ret < 0)
852 return ret;
853
854 if (!tb[SEG6_LOCAL_BPF_PROG] || !tb[SEG6_LOCAL_BPF_PROG_NAME])
855 return -EINVAL;
856
857 slwt->bpf.name = nla_memdup(tb[SEG6_LOCAL_BPF_PROG_NAME], GFP_KERNEL);
858 if (!slwt->bpf.name)
859 return -ENOMEM;
860
861 fd = nla_get_u32(tb[SEG6_LOCAL_BPF_PROG]);
862 p = bpf_prog_get_type(fd, BPF_PROG_TYPE_LWT_SEG6LOCAL);
863 if (IS_ERR(p)) {
864 kfree(slwt->bpf.name);
865 return PTR_ERR(p);
866 }
867
868 slwt->bpf.prog = p;
869 return 0;
870 }
871
872 static int put_nla_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt)
873 {
874 struct nlattr *nest;
875
876 if (!slwt->bpf.prog)
877 return 0;
878
879 nest = nla_nest_start_noflag(skb, SEG6_LOCAL_BPF);
880 if (!nest)
881 return -EMSGSIZE;
882
883 if (nla_put_u32(skb, SEG6_LOCAL_BPF_PROG, slwt->bpf.prog->aux->id))
884 return -EMSGSIZE;
885
886 if (slwt->bpf.name &&
887 nla_put_string(skb, SEG6_LOCAL_BPF_PROG_NAME, slwt->bpf.name))
888 return -EMSGSIZE;
889
890 return nla_nest_end(skb, nest);
891 }
892
893 static int cmp_nla_bpf(struct seg6_local_lwt *a, struct seg6_local_lwt *b)
894 {
895 if (!a->bpf.name && !b->bpf.name)
896 return 0;
897
898 if (!a->bpf.name || !b->bpf.name)
899 return 1;
900
901 return strcmp(a->bpf.name, b->bpf.name);
902 }
903
904 struct seg6_action_param {
905 int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt);
906 int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt);
907 int (*cmp)(struct seg6_local_lwt *a, struct seg6_local_lwt *b);
908 };
909
910 static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = {
911 [SEG6_LOCAL_SRH] = { .parse = parse_nla_srh,
912 .put = put_nla_srh,
913 .cmp = cmp_nla_srh },
914
915 [SEG6_LOCAL_TABLE] = { .parse = parse_nla_table,
916 .put = put_nla_table,
917 .cmp = cmp_nla_table },
918
919 [SEG6_LOCAL_NH4] = { .parse = parse_nla_nh4,
920 .put = put_nla_nh4,
921 .cmp = cmp_nla_nh4 },
922
923 [SEG6_LOCAL_NH6] = { .parse = parse_nla_nh6,
924 .put = put_nla_nh6,
925 .cmp = cmp_nla_nh6 },
926
927 [SEG6_LOCAL_IIF] = { .parse = parse_nla_iif,
928 .put = put_nla_iif,
929 .cmp = cmp_nla_iif },
930
931 [SEG6_LOCAL_OIF] = { .parse = parse_nla_oif,
932 .put = put_nla_oif,
933 .cmp = cmp_nla_oif },
934
935 [SEG6_LOCAL_BPF] = { .parse = parse_nla_bpf,
936 .put = put_nla_bpf,
937 .cmp = cmp_nla_bpf },
938
939 };
940
941 static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt)
942 {
943 struct seg6_action_param *param;
944 struct seg6_action_desc *desc;
945 int i, err;
946
947 desc = __get_action_desc(slwt->action);
948 if (!desc)
949 return -EINVAL;
950
951 if (!desc->input)
952 return -EOPNOTSUPP;
953
954 slwt->desc = desc;
955 slwt->headroom += desc->static_headroom;
956
957 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
958 if (desc->attrs & (1 << i)) {
959 if (!attrs[i])
960 return -EINVAL;
961
962 param = &seg6_action_params[i];
963
964 err = param->parse(attrs, slwt);
965 if (err < 0)
966 return err;
967 }
968 }
969
970 return 0;
971 }
972
973 static int seg6_local_build_state(struct nlattr *nla, unsigned int family,
974 const void *cfg, struct lwtunnel_state **ts,
975 struct netlink_ext_ack *extack)
976 {
977 struct nlattr *tb[SEG6_LOCAL_MAX + 1];
978 struct lwtunnel_state *newts;
979 struct seg6_local_lwt *slwt;
980 int err;
981
982 if (family != AF_INET6)
983 return -EINVAL;
984
985 err = nla_parse_nested_deprecated(tb, SEG6_LOCAL_MAX, nla,
986 seg6_local_policy, extack);
987
988 if (err < 0)
989 return err;
990
991 if (!tb[SEG6_LOCAL_ACTION])
992 return -EINVAL;
993
994 newts = lwtunnel_state_alloc(sizeof(*slwt));
995 if (!newts)
996 return -ENOMEM;
997
998 slwt = seg6_local_lwtunnel(newts);
999 slwt->action = nla_get_u32(tb[SEG6_LOCAL_ACTION]);
1000
1001 err = parse_nla_action(tb, slwt);
1002 if (err < 0)
1003 goto out_free;
1004
1005 newts->type = LWTUNNEL_ENCAP_SEG6_LOCAL;
1006 newts->flags = LWTUNNEL_STATE_INPUT_REDIRECT;
1007 newts->headroom = slwt->headroom;
1008
1009 *ts = newts;
1010
1011 return 0;
1012
1013 out_free:
1014 kfree(slwt->srh);
1015 kfree(newts);
1016 return err;
1017 }
1018
1019 static void seg6_local_destroy_state(struct lwtunnel_state *lwt)
1020 {
1021 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1022
1023 kfree(slwt->srh);
1024
1025 if (slwt->desc->attrs & (1 << SEG6_LOCAL_BPF)) {
1026 kfree(slwt->bpf.name);
1027 bpf_prog_put(slwt->bpf.prog);
1028 }
1029
1030 return;
1031 }
1032
1033 static int seg6_local_fill_encap(struct sk_buff *skb,
1034 struct lwtunnel_state *lwt)
1035 {
1036 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1037 struct seg6_action_param *param;
1038 int i, err;
1039
1040 if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action))
1041 return -EMSGSIZE;
1042
1043 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
1044 if (slwt->desc->attrs & (1 << i)) {
1045 param = &seg6_action_params[i];
1046 err = param->put(skb, slwt);
1047 if (err < 0)
1048 return err;
1049 }
1050 }
1051
1052 return 0;
1053 }
1054
1055 static int seg6_local_get_encap_size(struct lwtunnel_state *lwt)
1056 {
1057 struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt);
1058 unsigned long attrs;
1059 int nlsize;
1060
1061 nlsize = nla_total_size(4); /* action */
1062
1063 attrs = slwt->desc->attrs;
1064
1065 if (attrs & (1 << SEG6_LOCAL_SRH))
1066 nlsize += nla_total_size((slwt->srh->hdrlen + 1) << 3);
1067
1068 if (attrs & (1 << SEG6_LOCAL_TABLE))
1069 nlsize += nla_total_size(4);
1070
1071 if (attrs & (1 << SEG6_LOCAL_NH4))
1072 nlsize += nla_total_size(4);
1073
1074 if (attrs & (1 << SEG6_LOCAL_NH6))
1075 nlsize += nla_total_size(16);
1076
1077 if (attrs & (1 << SEG6_LOCAL_IIF))
1078 nlsize += nla_total_size(4);
1079
1080 if (attrs & (1 << SEG6_LOCAL_OIF))
1081 nlsize += nla_total_size(4);
1082
1083 if (attrs & (1 << SEG6_LOCAL_BPF))
1084 nlsize += nla_total_size(sizeof(struct nlattr)) +
1085 nla_total_size(MAX_PROG_NAME) +
1086 nla_total_size(4);
1087
1088 return nlsize;
1089 }
1090
1091 static int seg6_local_cmp_encap(struct lwtunnel_state *a,
1092 struct lwtunnel_state *b)
1093 {
1094 struct seg6_local_lwt *slwt_a, *slwt_b;
1095 struct seg6_action_param *param;
1096 int i;
1097
1098 slwt_a = seg6_local_lwtunnel(a);
1099 slwt_b = seg6_local_lwtunnel(b);
1100
1101 if (slwt_a->action != slwt_b->action)
1102 return 1;
1103
1104 if (slwt_a->desc->attrs != slwt_b->desc->attrs)
1105 return 1;
1106
1107 for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) {
1108 if (slwt_a->desc->attrs & (1 << i)) {
1109 param = &seg6_action_params[i];
1110 if (param->cmp(slwt_a, slwt_b))
1111 return 1;
1112 }
1113 }
1114
1115 return 0;
1116 }
1117
1118 static const struct lwtunnel_encap_ops seg6_local_ops = {
1119 .build_state = seg6_local_build_state,
1120 .destroy_state = seg6_local_destroy_state,
1121 .input = seg6_local_input,
1122 .fill_encap = seg6_local_fill_encap,
1123 .get_encap_size = seg6_local_get_encap_size,
1124 .cmp_encap = seg6_local_cmp_encap,
1125 .owner = THIS_MODULE,
1126 };
1127
1128 int __init seg6_local_init(void)
1129 {
1130 return lwtunnel_encap_add_ops(&seg6_local_ops,
1131 LWTUNNEL_ENCAP_SEG6_LOCAL);
1132 }
1133
1134 void seg6_local_exit(void)
1135 {
1136 lwtunnel_encap_del_ops(&seg6_local_ops, LWTUNNEL_ENCAP_SEG6_LOCAL);
1137 }