]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/mpls/af_mpls.c
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / net / mpls / af_mpls.c
1 #include <linux/types.h>
2 #include <linux/skbuff.h>
3 #include <linux/socket.h>
4 #include <linux/sysctl.h>
5 #include <linux/net.h>
6 #include <linux/module.h>
7 #include <linux/if_arp.h>
8 #include <linux/ipv6.h>
9 #include <linux/mpls.h>
10 #include <linux/netconf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/percpu.h>
13 #include <net/ip.h>
14 #include <net/dst.h>
15 #include <net/sock.h>
16 #include <net/arp.h>
17 #include <net/ip_fib.h>
18 #include <net/netevent.h>
19 #include <net/netns/generic.h>
20 #if IS_ENABLED(CONFIG_IPV6)
21 #include <net/ipv6.h>
22 #endif
23 #include <net/addrconf.h>
24 #include <net/nexthop.h>
25 #include "internal.h"
26
27 /* Maximum number of labels to look ahead at when selecting a path of
28 * a multipath route
29 */
30 #define MAX_MP_SELECT_LABELS 4
31
32 #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
33
34 static int zero = 0;
35 static int label_limit = (1 << 20) - 1;
36
37 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
38 struct nlmsghdr *nlh, struct net *net, u32 portid,
39 unsigned int nlm_flags);
40
41 static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
42 {
43 struct mpls_route *rt = NULL;
44
45 if (index < net->mpls.platform_labels) {
46 struct mpls_route __rcu **platform_label =
47 rcu_dereference(net->mpls.platform_label);
48 rt = rcu_dereference(platform_label[index]);
49 }
50 return rt;
51 }
52
53 bool mpls_output_possible(const struct net_device *dev)
54 {
55 return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
56 }
57 EXPORT_SYMBOL_GPL(mpls_output_possible);
58
59 static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
60 {
61 u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN);
62 int nh_index = nh - rt->rt_nh;
63
64 return nh0_via + rt->rt_max_alen * nh_index;
65 }
66
67 static const u8 *mpls_nh_via(const struct mpls_route *rt,
68 const struct mpls_nh *nh)
69 {
70 return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
71 }
72
73 static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
74 {
75 /* The size of the layer 2.5 labels to be added for this route */
76 return nh->nh_labels * sizeof(struct mpls_shim_hdr);
77 }
78
79 unsigned int mpls_dev_mtu(const struct net_device *dev)
80 {
81 /* The amount of data the layer 2 frame can hold */
82 return dev->mtu;
83 }
84 EXPORT_SYMBOL_GPL(mpls_dev_mtu);
85
86 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
87 {
88 if (skb->len <= mtu)
89 return false;
90
91 if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
92 return false;
93
94 return true;
95 }
96 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
97
98 void mpls_stats_inc_outucastpkts(struct net_device *dev,
99 const struct sk_buff *skb)
100 {
101 struct mpls_dev *mdev;
102
103 if (skb->protocol == htons(ETH_P_MPLS_UC)) {
104 mdev = mpls_dev_get(dev);
105 if (mdev)
106 MPLS_INC_STATS_LEN(mdev, skb->len,
107 tx_packets,
108 tx_bytes);
109 } else if (skb->protocol == htons(ETH_P_IP)) {
110 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
111 #if IS_ENABLED(CONFIG_IPV6)
112 } else if (skb->protocol == htons(ETH_P_IPV6)) {
113 struct inet6_dev *in6dev = __in6_dev_get(dev);
114
115 if (in6dev)
116 IP6_UPD_PO_STATS(dev_net(dev), in6dev,
117 IPSTATS_MIB_OUT, skb->len);
118 #endif
119 }
120 }
121 EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
122
123 static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
124 {
125 struct mpls_entry_decoded dec;
126 unsigned int mpls_hdr_len = 0;
127 struct mpls_shim_hdr *hdr;
128 bool eli_seen = false;
129 int label_index;
130 u32 hash = 0;
131
132 for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
133 label_index++) {
134 mpls_hdr_len += sizeof(*hdr);
135 if (!pskb_may_pull(skb, mpls_hdr_len))
136 break;
137
138 /* Read and decode the current label */
139 hdr = mpls_hdr(skb) + label_index;
140 dec = mpls_entry_decode(hdr);
141
142 /* RFC6790 - reserved labels MUST NOT be used as keys
143 * for the load-balancing function
144 */
145 if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
146 hash = jhash_1word(dec.label, hash);
147
148 /* The entropy label follows the entropy label
149 * indicator, so this means that the entropy
150 * label was just added to the hash - no need to
151 * go any deeper either in the label stack or in the
152 * payload
153 */
154 if (eli_seen)
155 break;
156 } else if (dec.label == MPLS_LABEL_ENTROPY) {
157 eli_seen = true;
158 }
159
160 if (!dec.bos)
161 continue;
162
163 /* found bottom label; does skb have room for a header? */
164 if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
165 const struct iphdr *v4hdr;
166
167 v4hdr = (const struct iphdr *)(hdr + 1);
168 if (v4hdr->version == 4) {
169 hash = jhash_3words(ntohl(v4hdr->saddr),
170 ntohl(v4hdr->daddr),
171 v4hdr->protocol, hash);
172 } else if (v4hdr->version == 6 &&
173 pskb_may_pull(skb, mpls_hdr_len +
174 sizeof(struct ipv6hdr))) {
175 const struct ipv6hdr *v6hdr;
176
177 v6hdr = (const struct ipv6hdr *)(hdr + 1);
178 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
179 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
180 hash = jhash_1word(v6hdr->nexthdr, hash);
181 }
182 }
183
184 break;
185 }
186
187 return hash;
188 }
189
190 static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
191 struct sk_buff *skb)
192 {
193 int alive = ACCESS_ONCE(rt->rt_nhn_alive);
194 u32 hash = 0;
195 int nh_index = 0;
196 int n = 0;
197
198 /* No need to look further into packet if there's only
199 * one path
200 */
201 if (rt->rt_nhn == 1)
202 goto out;
203
204 if (alive <= 0)
205 return NULL;
206
207 hash = mpls_multipath_hash(rt, skb);
208 nh_index = hash % alive;
209 if (alive == rt->rt_nhn)
210 goto out;
211 for_nexthops(rt) {
212 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
213 continue;
214 if (n == nh_index)
215 return nh;
216 n++;
217 } endfor_nexthops(rt);
218
219 out:
220 return &rt->rt_nh[nh_index];
221 }
222
223 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
224 struct mpls_entry_decoded dec)
225 {
226 enum mpls_payload_type payload_type;
227 bool success = false;
228
229 /* The IPv4 code below accesses through the IPv4 header
230 * checksum, which is 12 bytes into the packet.
231 * The IPv6 code below accesses through the IPv6 hop limit
232 * which is 8 bytes into the packet.
233 *
234 * For all supported cases there should always be at least 12
235 * bytes of packet data present. The IPv4 header is 20 bytes
236 * without options and the IPv6 header is always 40 bytes
237 * long.
238 */
239 if (!pskb_may_pull(skb, 12))
240 return false;
241
242 payload_type = rt->rt_payload_type;
243 if (payload_type == MPT_UNSPEC)
244 payload_type = ip_hdr(skb)->version;
245
246 switch (payload_type) {
247 case MPT_IPV4: {
248 struct iphdr *hdr4 = ip_hdr(skb);
249 skb->protocol = htons(ETH_P_IP);
250 csum_replace2(&hdr4->check,
251 htons(hdr4->ttl << 8),
252 htons(dec.ttl << 8));
253 hdr4->ttl = dec.ttl;
254 success = true;
255 break;
256 }
257 case MPT_IPV6: {
258 struct ipv6hdr *hdr6 = ipv6_hdr(skb);
259 skb->protocol = htons(ETH_P_IPV6);
260 hdr6->hop_limit = dec.ttl;
261 success = true;
262 break;
263 }
264 case MPT_UNSPEC:
265 break;
266 }
267
268 return success;
269 }
270
271 static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
272 struct packet_type *pt, struct net_device *orig_dev)
273 {
274 struct net *net = dev_net(dev);
275 struct mpls_shim_hdr *hdr;
276 struct mpls_route *rt;
277 struct mpls_nh *nh;
278 struct mpls_entry_decoded dec;
279 struct net_device *out_dev;
280 struct mpls_dev *out_mdev;
281 struct mpls_dev *mdev;
282 unsigned int hh_len;
283 unsigned int new_header_size;
284 unsigned int mtu;
285 int err;
286
287 /* Careful this entire function runs inside of an rcu critical section */
288
289 mdev = mpls_dev_get(dev);
290 if (!mdev)
291 goto drop;
292
293 MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
294 rx_bytes);
295
296 if (!mdev->input_enabled) {
297 MPLS_INC_STATS(mdev, rx_dropped);
298 goto drop;
299 }
300
301 if (skb->pkt_type != PACKET_HOST)
302 goto err;
303
304 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
305 goto err;
306
307 if (!pskb_may_pull(skb, sizeof(*hdr)))
308 goto err;
309
310 /* Read and decode the label */
311 hdr = mpls_hdr(skb);
312 dec = mpls_entry_decode(hdr);
313
314 rt = mpls_route_input_rcu(net, dec.label);
315 if (!rt) {
316 MPLS_INC_STATS(mdev, rx_noroute);
317 goto drop;
318 }
319
320 nh = mpls_select_multipath(rt, skb);
321 if (!nh)
322 goto err;
323
324 /* Pop the label */
325 skb_pull(skb, sizeof(*hdr));
326 skb_reset_network_header(skb);
327
328 skb_orphan(skb);
329
330 if (skb_warn_if_lro(skb))
331 goto err;
332
333 skb_forward_csum(skb);
334
335 /* Verify ttl is valid */
336 if (dec.ttl <= 1)
337 goto err;
338 dec.ttl -= 1;
339
340 /* Find the output device */
341 out_dev = rcu_dereference(nh->nh_dev);
342 if (!mpls_output_possible(out_dev))
343 goto tx_err;
344
345 /* Verify the destination can hold the packet */
346 new_header_size = mpls_nh_header_size(nh);
347 mtu = mpls_dev_mtu(out_dev);
348 if (mpls_pkt_too_big(skb, mtu - new_header_size))
349 goto tx_err;
350
351 hh_len = LL_RESERVED_SPACE(out_dev);
352 if (!out_dev->header_ops)
353 hh_len = 0;
354
355 /* Ensure there is enough space for the headers in the skb */
356 if (skb_cow(skb, hh_len + new_header_size))
357 goto tx_err;
358
359 skb->dev = out_dev;
360 skb->protocol = htons(ETH_P_MPLS_UC);
361
362 if (unlikely(!new_header_size && dec.bos)) {
363 /* Penultimate hop popping */
364 if (!mpls_egress(rt, skb, dec))
365 goto err;
366 } else {
367 bool bos;
368 int i;
369 skb_push(skb, new_header_size);
370 skb_reset_network_header(skb);
371 /* Push the new labels */
372 hdr = mpls_hdr(skb);
373 bos = dec.bos;
374 for (i = nh->nh_labels - 1; i >= 0; i--) {
375 hdr[i] = mpls_entry_encode(nh->nh_label[i],
376 dec.ttl, 0, bos);
377 bos = false;
378 }
379 }
380
381 mpls_stats_inc_outucastpkts(out_dev, skb);
382
383 /* If via wasn't specified then send out using device address */
384 if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
385 err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
386 out_dev->dev_addr, skb);
387 else
388 err = neigh_xmit(nh->nh_via_table, out_dev,
389 mpls_nh_via(rt, nh), skb);
390 if (err)
391 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
392 __func__, err);
393 return 0;
394
395 tx_err:
396 out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
397 if (out_mdev)
398 MPLS_INC_STATS(out_mdev, tx_errors);
399 goto drop;
400 err:
401 MPLS_INC_STATS(mdev, rx_errors);
402 drop:
403 kfree_skb(skb);
404 return NET_RX_DROP;
405 }
406
407 static struct packet_type mpls_packet_type __read_mostly = {
408 .type = cpu_to_be16(ETH_P_MPLS_UC),
409 .func = mpls_forward,
410 };
411
412 static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
413 [RTA_DST] = { .type = NLA_U32 },
414 [RTA_OIF] = { .type = NLA_U32 },
415 };
416
417 struct mpls_route_config {
418 u32 rc_protocol;
419 u32 rc_ifindex;
420 u8 rc_via_table;
421 u8 rc_via_alen;
422 u8 rc_via[MAX_VIA_ALEN];
423 u32 rc_label;
424 u8 rc_output_labels;
425 u32 rc_output_label[MAX_NEW_LABELS];
426 u32 rc_nlflags;
427 enum mpls_payload_type rc_payload_type;
428 struct nl_info rc_nlinfo;
429 struct rtnexthop *rc_mp;
430 int rc_mp_len;
431 };
432
433 static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
434 {
435 u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN);
436 struct mpls_route *rt;
437
438 rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh),
439 VIA_ALEN_ALIGN) +
440 num_nh * max_alen_aligned,
441 GFP_KERNEL);
442 if (rt) {
443 rt->rt_nhn = num_nh;
444 rt->rt_nhn_alive = num_nh;
445 rt->rt_max_alen = max_alen_aligned;
446 }
447
448 return rt;
449 }
450
451 static void mpls_rt_free(struct mpls_route *rt)
452 {
453 if (rt)
454 kfree_rcu(rt, rt_rcu);
455 }
456
457 static void mpls_notify_route(struct net *net, unsigned index,
458 struct mpls_route *old, struct mpls_route *new,
459 const struct nl_info *info)
460 {
461 struct nlmsghdr *nlh = info ? info->nlh : NULL;
462 unsigned portid = info ? info->portid : 0;
463 int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
464 struct mpls_route *rt = new ? new : old;
465 unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
466 /* Ignore reserved labels for now */
467 if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
468 rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
469 }
470
471 static void mpls_route_update(struct net *net, unsigned index,
472 struct mpls_route *new,
473 const struct nl_info *info)
474 {
475 struct mpls_route __rcu **platform_label;
476 struct mpls_route *rt;
477
478 ASSERT_RTNL();
479
480 platform_label = rtnl_dereference(net->mpls.platform_label);
481 rt = rtnl_dereference(platform_label[index]);
482 rcu_assign_pointer(platform_label[index], new);
483
484 mpls_notify_route(net, index, rt, new, info);
485
486 /* If we removed a route free it now */
487 mpls_rt_free(rt);
488 }
489
490 static unsigned find_free_label(struct net *net)
491 {
492 struct mpls_route __rcu **platform_label;
493 size_t platform_labels;
494 unsigned index;
495
496 platform_label = rtnl_dereference(net->mpls.platform_label);
497 platform_labels = net->mpls.platform_labels;
498 for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
499 index++) {
500 if (!rtnl_dereference(platform_label[index]))
501 return index;
502 }
503 return LABEL_NOT_SPECIFIED;
504 }
505
506 #if IS_ENABLED(CONFIG_INET)
507 static struct net_device *inet_fib_lookup_dev(struct net *net,
508 const void *addr)
509 {
510 struct net_device *dev;
511 struct rtable *rt;
512 struct in_addr daddr;
513
514 memcpy(&daddr, addr, sizeof(struct in_addr));
515 rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
516 if (IS_ERR(rt))
517 return ERR_CAST(rt);
518
519 dev = rt->dst.dev;
520 dev_hold(dev);
521
522 ip_rt_put(rt);
523
524 return dev;
525 }
526 #else
527 static struct net_device *inet_fib_lookup_dev(struct net *net,
528 const void *addr)
529 {
530 return ERR_PTR(-EAFNOSUPPORT);
531 }
532 #endif
533
534 #if IS_ENABLED(CONFIG_IPV6)
535 static struct net_device *inet6_fib_lookup_dev(struct net *net,
536 const void *addr)
537 {
538 struct net_device *dev;
539 struct dst_entry *dst;
540 struct flowi6 fl6;
541 int err;
542
543 if (!ipv6_stub)
544 return ERR_PTR(-EAFNOSUPPORT);
545
546 memset(&fl6, 0, sizeof(fl6));
547 memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
548 err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
549 if (err)
550 return ERR_PTR(err);
551
552 dev = dst->dev;
553 dev_hold(dev);
554 dst_release(dst);
555
556 return dev;
557 }
558 #else
559 static struct net_device *inet6_fib_lookup_dev(struct net *net,
560 const void *addr)
561 {
562 return ERR_PTR(-EAFNOSUPPORT);
563 }
564 #endif
565
566 static struct net_device *find_outdev(struct net *net,
567 struct mpls_route *rt,
568 struct mpls_nh *nh, int oif)
569 {
570 struct net_device *dev = NULL;
571
572 if (!oif) {
573 switch (nh->nh_via_table) {
574 case NEIGH_ARP_TABLE:
575 dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
576 break;
577 case NEIGH_ND_TABLE:
578 dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
579 break;
580 case NEIGH_LINK_TABLE:
581 break;
582 }
583 } else {
584 dev = dev_get_by_index(net, oif);
585 }
586
587 if (!dev)
588 return ERR_PTR(-ENODEV);
589
590 if (IS_ERR(dev))
591 return dev;
592
593 /* The caller is holding rtnl anyways, so release the dev reference */
594 dev_put(dev);
595
596 return dev;
597 }
598
599 static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
600 struct mpls_nh *nh, int oif)
601 {
602 struct net_device *dev = NULL;
603 int err = -ENODEV;
604
605 dev = find_outdev(net, rt, nh, oif);
606 if (IS_ERR(dev)) {
607 err = PTR_ERR(dev);
608 dev = NULL;
609 goto errout;
610 }
611
612 /* Ensure this is a supported device */
613 err = -EINVAL;
614 if (!mpls_dev_get(dev))
615 goto errout;
616
617 if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
618 (dev->addr_len != nh->nh_via_alen))
619 goto errout;
620
621 RCU_INIT_POINTER(nh->nh_dev, dev);
622
623 if (!(dev->flags & IFF_UP)) {
624 nh->nh_flags |= RTNH_F_DEAD;
625 } else {
626 unsigned int flags;
627
628 flags = dev_get_flags(dev);
629 if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
630 nh->nh_flags |= RTNH_F_LINKDOWN;
631 }
632
633 return 0;
634
635 errout:
636 return err;
637 }
638
639 static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
640 struct mpls_route *rt)
641 {
642 struct net *net = cfg->rc_nlinfo.nl_net;
643 struct mpls_nh *nh = rt->rt_nh;
644 int err;
645 int i;
646
647 if (!nh)
648 return -ENOMEM;
649
650 err = -EINVAL;
651 /* Ensure only a supported number of labels are present */
652 if (cfg->rc_output_labels > MAX_NEW_LABELS)
653 goto errout;
654
655 nh->nh_labels = cfg->rc_output_labels;
656 for (i = 0; i < nh->nh_labels; i++)
657 nh->nh_label[i] = cfg->rc_output_label[i];
658
659 nh->nh_via_table = cfg->rc_via_table;
660 memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
661 nh->nh_via_alen = cfg->rc_via_alen;
662
663 err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
664 if (err)
665 goto errout;
666
667 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
668 rt->rt_nhn_alive--;
669
670 return 0;
671
672 errout:
673 return err;
674 }
675
676 static int mpls_nh_build(struct net *net, struct mpls_route *rt,
677 struct mpls_nh *nh, int oif, struct nlattr *via,
678 struct nlattr *newdst)
679 {
680 int err = -ENOMEM;
681
682 if (!nh)
683 goto errout;
684
685 if (newdst) {
686 err = nla_get_labels(newdst, MAX_NEW_LABELS,
687 &nh->nh_labels, nh->nh_label);
688 if (err)
689 goto errout;
690 }
691
692 if (via) {
693 err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
694 __mpls_nh_via(rt, nh));
695 if (err)
696 goto errout;
697 } else {
698 nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
699 }
700
701 err = mpls_nh_assign_dev(net, rt, nh, oif);
702 if (err)
703 goto errout;
704
705 return 0;
706
707 errout:
708 return err;
709 }
710
711 static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
712 u8 cfg_via_alen, u8 *max_via_alen)
713 {
714 int nhs = 0;
715 int remaining = len;
716
717 if (!rtnh) {
718 *max_via_alen = cfg_via_alen;
719 return 1;
720 }
721
722 *max_via_alen = 0;
723
724 while (rtnh_ok(rtnh, remaining)) {
725 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
726 int attrlen;
727
728 attrlen = rtnh_attrlen(rtnh);
729 nla = nla_find(attrs, attrlen, RTA_VIA);
730 if (nla && nla_len(nla) >=
731 offsetof(struct rtvia, rtvia_addr)) {
732 int via_alen = nla_len(nla) -
733 offsetof(struct rtvia, rtvia_addr);
734
735 if (via_alen <= MAX_VIA_ALEN)
736 *max_via_alen = max_t(u16, *max_via_alen,
737 via_alen);
738 }
739
740 nhs++;
741 rtnh = rtnh_next(rtnh, &remaining);
742 }
743
744 /* leftover implies invalid nexthop configuration, discard it */
745 return remaining > 0 ? 0 : nhs;
746 }
747
748 static int mpls_nh_build_multi(struct mpls_route_config *cfg,
749 struct mpls_route *rt)
750 {
751 struct rtnexthop *rtnh = cfg->rc_mp;
752 struct nlattr *nla_via, *nla_newdst;
753 int remaining = cfg->rc_mp_len;
754 int nhs = 0;
755 int err = 0;
756
757 change_nexthops(rt) {
758 int attrlen;
759
760 nla_via = NULL;
761 nla_newdst = NULL;
762
763 err = -EINVAL;
764 if (!rtnh_ok(rtnh, remaining))
765 goto errout;
766
767 /* neither weighted multipath nor any flags
768 * are supported
769 */
770 if (rtnh->rtnh_hops || rtnh->rtnh_flags)
771 goto errout;
772
773 attrlen = rtnh_attrlen(rtnh);
774 if (attrlen > 0) {
775 struct nlattr *attrs = rtnh_attrs(rtnh);
776
777 nla_via = nla_find(attrs, attrlen, RTA_VIA);
778 nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
779 }
780
781 err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
782 rtnh->rtnh_ifindex, nla_via, nla_newdst);
783 if (err)
784 goto errout;
785
786 if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
787 rt->rt_nhn_alive--;
788
789 rtnh = rtnh_next(rtnh, &remaining);
790 nhs++;
791 } endfor_nexthops(rt);
792
793 rt->rt_nhn = nhs;
794
795 return 0;
796
797 errout:
798 return err;
799 }
800
801 static int mpls_route_add(struct mpls_route_config *cfg)
802 {
803 struct mpls_route __rcu **platform_label;
804 struct net *net = cfg->rc_nlinfo.nl_net;
805 struct mpls_route *rt, *old;
806 int err = -EINVAL;
807 u8 max_via_alen;
808 unsigned index;
809 int nhs;
810
811 index = cfg->rc_label;
812
813 /* If a label was not specified during insert pick one */
814 if ((index == LABEL_NOT_SPECIFIED) &&
815 (cfg->rc_nlflags & NLM_F_CREATE)) {
816 index = find_free_label(net);
817 }
818
819 /* Reserved labels may not be set */
820 if (index < MPLS_LABEL_FIRST_UNRESERVED)
821 goto errout;
822
823 /* The full 20 bit range may not be supported. */
824 if (index >= net->mpls.platform_labels)
825 goto errout;
826
827 /* Append makes no sense with mpls */
828 err = -EOPNOTSUPP;
829 if (cfg->rc_nlflags & NLM_F_APPEND)
830 goto errout;
831
832 err = -EEXIST;
833 platform_label = rtnl_dereference(net->mpls.platform_label);
834 old = rtnl_dereference(platform_label[index]);
835 if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
836 goto errout;
837
838 err = -EEXIST;
839 if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
840 goto errout;
841
842 err = -ENOENT;
843 if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
844 goto errout;
845
846 err = -EINVAL;
847 nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
848 cfg->rc_via_alen, &max_via_alen);
849 if (nhs == 0)
850 goto errout;
851
852 err = -ENOMEM;
853 rt = mpls_rt_alloc(nhs, max_via_alen);
854 if (!rt)
855 goto errout;
856
857 rt->rt_protocol = cfg->rc_protocol;
858 rt->rt_payload_type = cfg->rc_payload_type;
859
860 if (cfg->rc_mp)
861 err = mpls_nh_build_multi(cfg, rt);
862 else
863 err = mpls_nh_build_from_cfg(cfg, rt);
864 if (err)
865 goto freert;
866
867 mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
868
869 return 0;
870
871 freert:
872 mpls_rt_free(rt);
873 errout:
874 return err;
875 }
876
877 static int mpls_route_del(struct mpls_route_config *cfg)
878 {
879 struct net *net = cfg->rc_nlinfo.nl_net;
880 unsigned index;
881 int err = -EINVAL;
882
883 index = cfg->rc_label;
884
885 /* Reserved labels may not be removed */
886 if (index < MPLS_LABEL_FIRST_UNRESERVED)
887 goto errout;
888
889 /* The full 20 bit range may not be supported */
890 if (index >= net->mpls.platform_labels)
891 goto errout;
892
893 mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
894
895 err = 0;
896 errout:
897 return err;
898 }
899
900 static void mpls_get_stats(struct mpls_dev *mdev,
901 struct mpls_link_stats *stats)
902 {
903 struct mpls_pcpu_stats *p;
904 int i;
905
906 memset(stats, 0, sizeof(*stats));
907
908 for_each_possible_cpu(i) {
909 struct mpls_link_stats local;
910 unsigned int start;
911
912 p = per_cpu_ptr(mdev->stats, i);
913 do {
914 start = u64_stats_fetch_begin(&p->syncp);
915 local = p->stats;
916 } while (u64_stats_fetch_retry(&p->syncp, start));
917
918 stats->rx_packets += local.rx_packets;
919 stats->rx_bytes += local.rx_bytes;
920 stats->tx_packets += local.tx_packets;
921 stats->tx_bytes += local.tx_bytes;
922 stats->rx_errors += local.rx_errors;
923 stats->tx_errors += local.tx_errors;
924 stats->rx_dropped += local.rx_dropped;
925 stats->tx_dropped += local.tx_dropped;
926 stats->rx_noroute += local.rx_noroute;
927 }
928 }
929
930 static int mpls_fill_stats_af(struct sk_buff *skb,
931 const struct net_device *dev)
932 {
933 struct mpls_link_stats *stats;
934 struct mpls_dev *mdev;
935 struct nlattr *nla;
936
937 mdev = mpls_dev_get(dev);
938 if (!mdev)
939 return -ENODATA;
940
941 nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
942 sizeof(struct mpls_link_stats),
943 MPLS_STATS_UNSPEC);
944 if (!nla)
945 return -EMSGSIZE;
946
947 stats = nla_data(nla);
948 mpls_get_stats(mdev, stats);
949
950 return 0;
951 }
952
953 static size_t mpls_get_stats_af_size(const struct net_device *dev)
954 {
955 struct mpls_dev *mdev;
956
957 mdev = mpls_dev_get(dev);
958 if (!mdev)
959 return 0;
960
961 return nla_total_size_64bit(sizeof(struct mpls_link_stats));
962 }
963
964 static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
965 u32 portid, u32 seq, int event,
966 unsigned int flags, int type)
967 {
968 struct nlmsghdr *nlh;
969 struct netconfmsg *ncm;
970 bool all = false;
971
972 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
973 flags);
974 if (!nlh)
975 return -EMSGSIZE;
976
977 if (type == NETCONFA_ALL)
978 all = true;
979
980 ncm = nlmsg_data(nlh);
981 ncm->ncm_family = AF_MPLS;
982
983 if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
984 goto nla_put_failure;
985
986 if ((all || type == NETCONFA_INPUT) &&
987 nla_put_s32(skb, NETCONFA_INPUT,
988 mdev->input_enabled) < 0)
989 goto nla_put_failure;
990
991 nlmsg_end(skb, nlh);
992 return 0;
993
994 nla_put_failure:
995 nlmsg_cancel(skb, nlh);
996 return -EMSGSIZE;
997 }
998
999 static int mpls_netconf_msgsize_devconf(int type)
1000 {
1001 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1002 + nla_total_size(4); /* NETCONFA_IFINDEX */
1003 bool all = false;
1004
1005 if (type == NETCONFA_ALL)
1006 all = true;
1007
1008 if (all || type == NETCONFA_INPUT)
1009 size += nla_total_size(4);
1010
1011 return size;
1012 }
1013
1014 static void mpls_netconf_notify_devconf(struct net *net, int type,
1015 struct mpls_dev *mdev)
1016 {
1017 struct sk_buff *skb;
1018 int err = -ENOBUFS;
1019
1020 skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
1021 if (!skb)
1022 goto errout;
1023
1024 err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, RTM_NEWNETCONF,
1025 0, type);
1026 if (err < 0) {
1027 /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1028 WARN_ON(err == -EMSGSIZE);
1029 kfree_skb(skb);
1030 goto errout;
1031 }
1032
1033 rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
1034 return;
1035 errout:
1036 if (err < 0)
1037 rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
1038 }
1039
1040 static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
1041 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1042 };
1043
1044 static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
1045 struct nlmsghdr *nlh)
1046 {
1047 struct net *net = sock_net(in_skb->sk);
1048 struct nlattr *tb[NETCONFA_MAX + 1];
1049 struct netconfmsg *ncm;
1050 struct net_device *dev;
1051 struct mpls_dev *mdev;
1052 struct sk_buff *skb;
1053 int ifindex;
1054 int err;
1055
1056 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1057 devconf_mpls_policy);
1058 if (err < 0)
1059 goto errout;
1060
1061 err = -EINVAL;
1062 if (!tb[NETCONFA_IFINDEX])
1063 goto errout;
1064
1065 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1066 dev = __dev_get_by_index(net, ifindex);
1067 if (!dev)
1068 goto errout;
1069
1070 mdev = mpls_dev_get(dev);
1071 if (!mdev)
1072 goto errout;
1073
1074 err = -ENOBUFS;
1075 skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
1076 if (!skb)
1077 goto errout;
1078
1079 err = mpls_netconf_fill_devconf(skb, mdev,
1080 NETLINK_CB(in_skb).portid,
1081 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1082 NETCONFA_ALL);
1083 if (err < 0) {
1084 /* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1085 WARN_ON(err == -EMSGSIZE);
1086 kfree_skb(skb);
1087 goto errout;
1088 }
1089 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1090 errout:
1091 return err;
1092 }
1093
1094 static int mpls_netconf_dump_devconf(struct sk_buff *skb,
1095 struct netlink_callback *cb)
1096 {
1097 struct net *net = sock_net(skb->sk);
1098 struct hlist_head *head;
1099 struct net_device *dev;
1100 struct mpls_dev *mdev;
1101 int idx, s_idx;
1102 int h, s_h;
1103
1104 s_h = cb->args[0];
1105 s_idx = idx = cb->args[1];
1106
1107 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1108 idx = 0;
1109 head = &net->dev_index_head[h];
1110 rcu_read_lock();
1111 cb->seq = net->dev_base_seq;
1112 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1113 if (idx < s_idx)
1114 goto cont;
1115 mdev = mpls_dev_get(dev);
1116 if (!mdev)
1117 goto cont;
1118 if (mpls_netconf_fill_devconf(skb, mdev,
1119 NETLINK_CB(cb->skb).portid,
1120 cb->nlh->nlmsg_seq,
1121 RTM_NEWNETCONF,
1122 NLM_F_MULTI,
1123 NETCONFA_ALL) < 0) {
1124 rcu_read_unlock();
1125 goto done;
1126 }
1127 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1128 cont:
1129 idx++;
1130 }
1131 rcu_read_unlock();
1132 }
1133 done:
1134 cb->args[0] = h;
1135 cb->args[1] = idx;
1136
1137 return skb->len;
1138 }
1139
1140 #define MPLS_PERDEV_SYSCTL_OFFSET(field) \
1141 (&((struct mpls_dev *)0)->field)
1142
1143 static int mpls_conf_proc(struct ctl_table *ctl, int write,
1144 void __user *buffer,
1145 size_t *lenp, loff_t *ppos)
1146 {
1147 int oval = *(int *)ctl->data;
1148 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1149
1150 if (write) {
1151 struct mpls_dev *mdev = ctl->extra1;
1152 int i = (int *)ctl->data - (int *)mdev;
1153 struct net *net = ctl->extra2;
1154 int val = *(int *)ctl->data;
1155
1156 if (i == offsetof(struct mpls_dev, input_enabled) &&
1157 val != oval) {
1158 mpls_netconf_notify_devconf(net,
1159 NETCONFA_INPUT,
1160 mdev);
1161 }
1162 }
1163
1164 return ret;
1165 }
1166
1167 static const struct ctl_table mpls_dev_table[] = {
1168 {
1169 .procname = "input",
1170 .maxlen = sizeof(int),
1171 .mode = 0644,
1172 .proc_handler = mpls_conf_proc,
1173 .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
1174 },
1175 { }
1176 };
1177
1178 static int mpls_dev_sysctl_register(struct net_device *dev,
1179 struct mpls_dev *mdev)
1180 {
1181 char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
1182 struct net *net = dev_net(dev);
1183 struct ctl_table *table;
1184 int i;
1185
1186 table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
1187 if (!table)
1188 goto out;
1189
1190 /* Table data contains only offsets relative to the base of
1191 * the mdev at this point, so make them absolute.
1192 */
1193 for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
1194 table[i].data = (char *)mdev + (uintptr_t)table[i].data;
1195 table[i].extra1 = mdev;
1196 table[i].extra2 = net;
1197 }
1198
1199 snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
1200
1201 mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
1202 if (!mdev->sysctl)
1203 goto free;
1204
1205 return 0;
1206
1207 free:
1208 kfree(table);
1209 out:
1210 return -ENOBUFS;
1211 }
1212
1213 static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
1214 {
1215 struct ctl_table *table;
1216
1217 table = mdev->sysctl->ctl_table_arg;
1218 unregister_net_sysctl_table(mdev->sysctl);
1219 kfree(table);
1220 }
1221
1222 static struct mpls_dev *mpls_add_dev(struct net_device *dev)
1223 {
1224 struct mpls_dev *mdev;
1225 int err = -ENOMEM;
1226 int i;
1227
1228 ASSERT_RTNL();
1229
1230 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1231 if (!mdev)
1232 return ERR_PTR(err);
1233
1234 mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
1235 if (!mdev->stats)
1236 goto free;
1237
1238 for_each_possible_cpu(i) {
1239 struct mpls_pcpu_stats *mpls_stats;
1240
1241 mpls_stats = per_cpu_ptr(mdev->stats, i);
1242 u64_stats_init(&mpls_stats->syncp);
1243 }
1244
1245 err = mpls_dev_sysctl_register(dev, mdev);
1246 if (err)
1247 goto free;
1248
1249 mdev->dev = dev;
1250 rcu_assign_pointer(dev->mpls_ptr, mdev);
1251
1252 return mdev;
1253
1254 free:
1255 free_percpu(mdev->stats);
1256 kfree(mdev);
1257 return ERR_PTR(err);
1258 }
1259
1260 static void mpls_dev_destroy_rcu(struct rcu_head *head)
1261 {
1262 struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
1263
1264 free_percpu(mdev->stats);
1265 kfree(mdev);
1266 }
1267
1268 static void mpls_ifdown(struct net_device *dev, int event)
1269 {
1270 struct mpls_route __rcu **platform_label;
1271 struct net *net = dev_net(dev);
1272 unsigned index;
1273
1274 platform_label = rtnl_dereference(net->mpls.platform_label);
1275 for (index = 0; index < net->mpls.platform_labels; index++) {
1276 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1277
1278 if (!rt)
1279 continue;
1280
1281 change_nexthops(rt) {
1282 if (rtnl_dereference(nh->nh_dev) != dev)
1283 continue;
1284 switch (event) {
1285 case NETDEV_DOWN:
1286 case NETDEV_UNREGISTER:
1287 nh->nh_flags |= RTNH_F_DEAD;
1288 /* fall through */
1289 case NETDEV_CHANGE:
1290 nh->nh_flags |= RTNH_F_LINKDOWN;
1291 ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
1292 break;
1293 }
1294 if (event == NETDEV_UNREGISTER)
1295 RCU_INIT_POINTER(nh->nh_dev, NULL);
1296 } endfor_nexthops(rt);
1297 }
1298 }
1299
1300 static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
1301 {
1302 struct mpls_route __rcu **platform_label;
1303 struct net *net = dev_net(dev);
1304 unsigned index;
1305 int alive;
1306
1307 platform_label = rtnl_dereference(net->mpls.platform_label);
1308 for (index = 0; index < net->mpls.platform_labels; index++) {
1309 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1310
1311 if (!rt)
1312 continue;
1313
1314 alive = 0;
1315 change_nexthops(rt) {
1316 struct net_device *nh_dev =
1317 rtnl_dereference(nh->nh_dev);
1318
1319 if (!(nh->nh_flags & nh_flags)) {
1320 alive++;
1321 continue;
1322 }
1323 if (nh_dev != dev)
1324 continue;
1325 alive++;
1326 nh->nh_flags &= ~nh_flags;
1327 } endfor_nexthops(rt);
1328
1329 ACCESS_ONCE(rt->rt_nhn_alive) = alive;
1330 }
1331 }
1332
1333 static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1334 void *ptr)
1335 {
1336 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1337 struct mpls_dev *mdev;
1338 unsigned int flags;
1339
1340 if (event == NETDEV_REGISTER) {
1341 /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
1342 if (dev->type == ARPHRD_ETHER ||
1343 dev->type == ARPHRD_LOOPBACK ||
1344 dev->type == ARPHRD_IPGRE ||
1345 dev->type == ARPHRD_SIT ||
1346 dev->type == ARPHRD_TUNNEL) {
1347 mdev = mpls_add_dev(dev);
1348 if (IS_ERR(mdev))
1349 return notifier_from_errno(PTR_ERR(mdev));
1350 }
1351 return NOTIFY_OK;
1352 }
1353
1354 mdev = mpls_dev_get(dev);
1355 if (!mdev)
1356 return NOTIFY_OK;
1357
1358 switch (event) {
1359 case NETDEV_DOWN:
1360 mpls_ifdown(dev, event);
1361 break;
1362 case NETDEV_UP:
1363 flags = dev_get_flags(dev);
1364 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1365 mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1366 else
1367 mpls_ifup(dev, RTNH_F_DEAD);
1368 break;
1369 case NETDEV_CHANGE:
1370 flags = dev_get_flags(dev);
1371 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1372 mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1373 else
1374 mpls_ifdown(dev, event);
1375 break;
1376 case NETDEV_UNREGISTER:
1377 mpls_ifdown(dev, event);
1378 mdev = mpls_dev_get(dev);
1379 if (mdev) {
1380 mpls_dev_sysctl_unregister(mdev);
1381 RCU_INIT_POINTER(dev->mpls_ptr, NULL);
1382 call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
1383 }
1384 break;
1385 case NETDEV_CHANGENAME:
1386 mdev = mpls_dev_get(dev);
1387 if (mdev) {
1388 int err;
1389
1390 mpls_dev_sysctl_unregister(mdev);
1391 err = mpls_dev_sysctl_register(dev, mdev);
1392 if (err)
1393 return notifier_from_errno(err);
1394 }
1395 break;
1396 }
1397 return NOTIFY_OK;
1398 }
1399
1400 static struct notifier_block mpls_dev_notifier = {
1401 .notifier_call = mpls_dev_notify,
1402 };
1403
1404 static int nla_put_via(struct sk_buff *skb,
1405 u8 table, const void *addr, int alen)
1406 {
1407 static const int table_to_family[NEIGH_NR_TABLES + 1] = {
1408 AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
1409 };
1410 struct nlattr *nla;
1411 struct rtvia *via;
1412 int family = AF_UNSPEC;
1413
1414 nla = nla_reserve(skb, RTA_VIA, alen + 2);
1415 if (!nla)
1416 return -EMSGSIZE;
1417
1418 if (table <= NEIGH_NR_TABLES)
1419 family = table_to_family[table];
1420
1421 via = nla_data(nla);
1422 via->rtvia_family = family;
1423 memcpy(via->rtvia_addr, addr, alen);
1424 return 0;
1425 }
1426
1427 int nla_put_labels(struct sk_buff *skb, int attrtype,
1428 u8 labels, const u32 label[])
1429 {
1430 struct nlattr *nla;
1431 struct mpls_shim_hdr *nla_label;
1432 bool bos;
1433 int i;
1434 nla = nla_reserve(skb, attrtype, labels*4);
1435 if (!nla)
1436 return -EMSGSIZE;
1437
1438 nla_label = nla_data(nla);
1439 bos = true;
1440 for (i = labels - 1; i >= 0; i--) {
1441 nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
1442 bos = false;
1443 }
1444
1445 return 0;
1446 }
1447 EXPORT_SYMBOL_GPL(nla_put_labels);
1448
1449 int nla_get_labels(const struct nlattr *nla,
1450 u32 max_labels, u8 *labels, u32 label[])
1451 {
1452 unsigned len = nla_len(nla);
1453 unsigned nla_labels;
1454 struct mpls_shim_hdr *nla_label;
1455 bool bos;
1456 int i;
1457
1458 /* len needs to be an even multiple of 4 (the label size) */
1459 if (len & 3)
1460 return -EINVAL;
1461
1462 /* Limit the number of new labels allowed */
1463 nla_labels = len/4;
1464 if (nla_labels > max_labels)
1465 return -EINVAL;
1466
1467 nla_label = nla_data(nla);
1468 bos = true;
1469 for (i = nla_labels - 1; i >= 0; i--, bos = false) {
1470 struct mpls_entry_decoded dec;
1471 dec = mpls_entry_decode(nla_label + i);
1472
1473 /* Ensure the bottom of stack flag is properly set
1474 * and ttl and tc are both clear.
1475 */
1476 if ((dec.bos != bos) || dec.ttl || dec.tc)
1477 return -EINVAL;
1478
1479 switch (dec.label) {
1480 case MPLS_LABEL_IMPLNULL:
1481 /* RFC3032: This is a label that an LSR may
1482 * assign and distribute, but which never
1483 * actually appears in the encapsulation.
1484 */
1485 return -EINVAL;
1486 }
1487
1488 label[i] = dec.label;
1489 }
1490 *labels = nla_labels;
1491 return 0;
1492 }
1493 EXPORT_SYMBOL_GPL(nla_get_labels);
1494
1495 int nla_get_via(const struct nlattr *nla, u8 *via_alen,
1496 u8 *via_table, u8 via_addr[])
1497 {
1498 struct rtvia *via = nla_data(nla);
1499 int err = -EINVAL;
1500 int alen;
1501
1502 if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
1503 goto errout;
1504 alen = nla_len(nla) -
1505 offsetof(struct rtvia, rtvia_addr);
1506 if (alen > MAX_VIA_ALEN)
1507 goto errout;
1508
1509 /* Validate the address family */
1510 switch (via->rtvia_family) {
1511 case AF_PACKET:
1512 *via_table = NEIGH_LINK_TABLE;
1513 break;
1514 case AF_INET:
1515 *via_table = NEIGH_ARP_TABLE;
1516 if (alen != 4)
1517 goto errout;
1518 break;
1519 case AF_INET6:
1520 *via_table = NEIGH_ND_TABLE;
1521 if (alen != 16)
1522 goto errout;
1523 break;
1524 default:
1525 /* Unsupported address family */
1526 goto errout;
1527 }
1528
1529 memcpy(via_addr, via->rtvia_addr, alen);
1530 *via_alen = alen;
1531 err = 0;
1532
1533 errout:
1534 return err;
1535 }
1536
1537 static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1538 struct mpls_route_config *cfg)
1539 {
1540 struct rtmsg *rtm;
1541 struct nlattr *tb[RTA_MAX+1];
1542 int index;
1543 int err;
1544
1545 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy);
1546 if (err < 0)
1547 goto errout;
1548
1549 err = -EINVAL;
1550 rtm = nlmsg_data(nlh);
1551 memset(cfg, 0, sizeof(*cfg));
1552
1553 if (rtm->rtm_family != AF_MPLS)
1554 goto errout;
1555 if (rtm->rtm_dst_len != 20)
1556 goto errout;
1557 if (rtm->rtm_src_len != 0)
1558 goto errout;
1559 if (rtm->rtm_tos != 0)
1560 goto errout;
1561 if (rtm->rtm_table != RT_TABLE_MAIN)
1562 goto errout;
1563 /* Any value is acceptable for rtm_protocol */
1564
1565 /* As mpls uses destination specific addresses
1566 * (or source specific address in the case of multicast)
1567 * all addresses have universal scope.
1568 */
1569 if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
1570 goto errout;
1571 if (rtm->rtm_type != RTN_UNICAST)
1572 goto errout;
1573 if (rtm->rtm_flags != 0)
1574 goto errout;
1575
1576 cfg->rc_label = LABEL_NOT_SPECIFIED;
1577 cfg->rc_protocol = rtm->rtm_protocol;
1578 cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC;
1579 cfg->rc_nlflags = nlh->nlmsg_flags;
1580 cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid;
1581 cfg->rc_nlinfo.nlh = nlh;
1582 cfg->rc_nlinfo.nl_net = sock_net(skb->sk);
1583
1584 for (index = 0; index <= RTA_MAX; index++) {
1585 struct nlattr *nla = tb[index];
1586 if (!nla)
1587 continue;
1588
1589 switch (index) {
1590 case RTA_OIF:
1591 cfg->rc_ifindex = nla_get_u32(nla);
1592 break;
1593 case RTA_NEWDST:
1594 if (nla_get_labels(nla, MAX_NEW_LABELS,
1595 &cfg->rc_output_labels,
1596 cfg->rc_output_label))
1597 goto errout;
1598 break;
1599 case RTA_DST:
1600 {
1601 u8 label_count;
1602 if (nla_get_labels(nla, 1, &label_count,
1603 &cfg->rc_label))
1604 goto errout;
1605
1606 /* Reserved labels may not be set */
1607 if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
1608 goto errout;
1609
1610 break;
1611 }
1612 case RTA_VIA:
1613 {
1614 if (nla_get_via(nla, &cfg->rc_via_alen,
1615 &cfg->rc_via_table, cfg->rc_via))
1616 goto errout;
1617 break;
1618 }
1619 case RTA_MULTIPATH:
1620 {
1621 cfg->rc_mp = nla_data(nla);
1622 cfg->rc_mp_len = nla_len(nla);
1623 break;
1624 }
1625 default:
1626 /* Unsupported attribute */
1627 goto errout;
1628 }
1629 }
1630
1631 err = 0;
1632 errout:
1633 return err;
1634 }
1635
1636 static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1637 {
1638 struct mpls_route_config cfg;
1639 int err;
1640
1641 err = rtm_to_route_config(skb, nlh, &cfg);
1642 if (err < 0)
1643 return err;
1644
1645 return mpls_route_del(&cfg);
1646 }
1647
1648
1649 static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1650 {
1651 struct mpls_route_config cfg;
1652 int err;
1653
1654 err = rtm_to_route_config(skb, nlh, &cfg);
1655 if (err < 0)
1656 return err;
1657
1658 return mpls_route_add(&cfg);
1659 }
1660
1661 static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1662 u32 label, struct mpls_route *rt, int flags)
1663 {
1664 struct net_device *dev;
1665 struct nlmsghdr *nlh;
1666 struct rtmsg *rtm;
1667
1668 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1669 if (nlh == NULL)
1670 return -EMSGSIZE;
1671
1672 rtm = nlmsg_data(nlh);
1673 rtm->rtm_family = AF_MPLS;
1674 rtm->rtm_dst_len = 20;
1675 rtm->rtm_src_len = 0;
1676 rtm->rtm_tos = 0;
1677 rtm->rtm_table = RT_TABLE_MAIN;
1678 rtm->rtm_protocol = rt->rt_protocol;
1679 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
1680 rtm->rtm_type = RTN_UNICAST;
1681 rtm->rtm_flags = 0;
1682
1683 if (nla_put_labels(skb, RTA_DST, 1, &label))
1684 goto nla_put_failure;
1685 if (rt->rt_nhn == 1) {
1686 const struct mpls_nh *nh = rt->rt_nh;
1687
1688 if (nh->nh_labels &&
1689 nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
1690 nh->nh_label))
1691 goto nla_put_failure;
1692 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1693 nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
1694 nh->nh_via_alen))
1695 goto nla_put_failure;
1696 dev = rtnl_dereference(nh->nh_dev);
1697 if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
1698 goto nla_put_failure;
1699 if (nh->nh_flags & RTNH_F_LINKDOWN)
1700 rtm->rtm_flags |= RTNH_F_LINKDOWN;
1701 if (nh->nh_flags & RTNH_F_DEAD)
1702 rtm->rtm_flags |= RTNH_F_DEAD;
1703 } else {
1704 struct rtnexthop *rtnh;
1705 struct nlattr *mp;
1706 int dead = 0;
1707 int linkdown = 0;
1708
1709 mp = nla_nest_start(skb, RTA_MULTIPATH);
1710 if (!mp)
1711 goto nla_put_failure;
1712
1713 for_nexthops(rt) {
1714 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1715 if (!rtnh)
1716 goto nla_put_failure;
1717
1718 dev = rtnl_dereference(nh->nh_dev);
1719 if (dev)
1720 rtnh->rtnh_ifindex = dev->ifindex;
1721 if (nh->nh_flags & RTNH_F_LINKDOWN) {
1722 rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
1723 linkdown++;
1724 }
1725 if (nh->nh_flags & RTNH_F_DEAD) {
1726 rtnh->rtnh_flags |= RTNH_F_DEAD;
1727 dead++;
1728 }
1729
1730 if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
1731 nh->nh_labels,
1732 nh->nh_label))
1733 goto nla_put_failure;
1734 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1735 nla_put_via(skb, nh->nh_via_table,
1736 mpls_nh_via(rt, nh),
1737 nh->nh_via_alen))
1738 goto nla_put_failure;
1739
1740 /* length of rtnetlink header + attributes */
1741 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
1742 } endfor_nexthops(rt);
1743
1744 if (linkdown == rt->rt_nhn)
1745 rtm->rtm_flags |= RTNH_F_LINKDOWN;
1746 if (dead == rt->rt_nhn)
1747 rtm->rtm_flags |= RTNH_F_DEAD;
1748
1749 nla_nest_end(skb, mp);
1750 }
1751
1752 nlmsg_end(skb, nlh);
1753 return 0;
1754
1755 nla_put_failure:
1756 nlmsg_cancel(skb, nlh);
1757 return -EMSGSIZE;
1758 }
1759
1760 static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
1761 {
1762 struct net *net = sock_net(skb->sk);
1763 struct mpls_route __rcu **platform_label;
1764 size_t platform_labels;
1765 unsigned int index;
1766
1767 ASSERT_RTNL();
1768
1769 index = cb->args[0];
1770 if (index < MPLS_LABEL_FIRST_UNRESERVED)
1771 index = MPLS_LABEL_FIRST_UNRESERVED;
1772
1773 platform_label = rtnl_dereference(net->mpls.platform_label);
1774 platform_labels = net->mpls.platform_labels;
1775 for (; index < platform_labels; index++) {
1776 struct mpls_route *rt;
1777 rt = rtnl_dereference(platform_label[index]);
1778 if (!rt)
1779 continue;
1780
1781 if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
1782 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1783 index, rt, NLM_F_MULTI) < 0)
1784 break;
1785 }
1786 cb->args[0] = index;
1787
1788 return skb->len;
1789 }
1790
1791 static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1792 {
1793 size_t payload =
1794 NLMSG_ALIGN(sizeof(struct rtmsg))
1795 + nla_total_size(4); /* RTA_DST */
1796
1797 if (rt->rt_nhn == 1) {
1798 struct mpls_nh *nh = rt->rt_nh;
1799
1800 if (nh->nh_dev)
1801 payload += nla_total_size(4); /* RTA_OIF */
1802 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
1803 payload += nla_total_size(2 + nh->nh_via_alen);
1804 if (nh->nh_labels) /* RTA_NEWDST */
1805 payload += nla_total_size(nh->nh_labels * 4);
1806 } else {
1807 /* each nexthop is packed in an attribute */
1808 size_t nhsize = 0;
1809
1810 for_nexthops(rt) {
1811 nhsize += nla_total_size(sizeof(struct rtnexthop));
1812 /* RTA_VIA */
1813 if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
1814 nhsize += nla_total_size(2 + nh->nh_via_alen);
1815 if (nh->nh_labels)
1816 nhsize += nla_total_size(nh->nh_labels * 4);
1817 } endfor_nexthops(rt);
1818 /* nested attribute */
1819 payload += nla_total_size(nhsize);
1820 }
1821
1822 return payload;
1823 }
1824
1825 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
1826 struct nlmsghdr *nlh, struct net *net, u32 portid,
1827 unsigned int nlm_flags)
1828 {
1829 struct sk_buff *skb;
1830 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1831 int err = -ENOBUFS;
1832
1833 skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
1834 if (skb == NULL)
1835 goto errout;
1836
1837 err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
1838 if (err < 0) {
1839 /* -EMSGSIZE implies BUG in lfib_nlmsg_size */
1840 WARN_ON(err == -EMSGSIZE);
1841 kfree_skb(skb);
1842 goto errout;
1843 }
1844 rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
1845
1846 return;
1847 errout:
1848 if (err < 0)
1849 rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
1850 }
1851
1852 static int resize_platform_label_table(struct net *net, size_t limit)
1853 {
1854 size_t size = sizeof(struct mpls_route *) * limit;
1855 size_t old_limit;
1856 size_t cp_size;
1857 struct mpls_route __rcu **labels = NULL, **old;
1858 struct mpls_route *rt0 = NULL, *rt2 = NULL;
1859 unsigned index;
1860
1861 if (size) {
1862 labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1863 if (!labels)
1864 labels = vzalloc(size);
1865
1866 if (!labels)
1867 goto nolabels;
1868 }
1869
1870 /* In case the predefined labels need to be populated */
1871 if (limit > MPLS_LABEL_IPV4NULL) {
1872 struct net_device *lo = net->loopback_dev;
1873 rt0 = mpls_rt_alloc(1, lo->addr_len);
1874 if (!rt0)
1875 goto nort0;
1876 RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
1877 rt0->rt_protocol = RTPROT_KERNEL;
1878 rt0->rt_payload_type = MPT_IPV4;
1879 rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
1880 rt0->rt_nh->nh_via_alen = lo->addr_len;
1881 memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
1882 lo->addr_len);
1883 }
1884 if (limit > MPLS_LABEL_IPV6NULL) {
1885 struct net_device *lo = net->loopback_dev;
1886 rt2 = mpls_rt_alloc(1, lo->addr_len);
1887 if (!rt2)
1888 goto nort2;
1889 RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
1890 rt2->rt_protocol = RTPROT_KERNEL;
1891 rt2->rt_payload_type = MPT_IPV6;
1892 rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
1893 rt2->rt_nh->nh_via_alen = lo->addr_len;
1894 memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
1895 lo->addr_len);
1896 }
1897
1898 rtnl_lock();
1899 /* Remember the original table */
1900 old = rtnl_dereference(net->mpls.platform_label);
1901 old_limit = net->mpls.platform_labels;
1902
1903 /* Free any labels beyond the new table */
1904 for (index = limit; index < old_limit; index++)
1905 mpls_route_update(net, index, NULL, NULL);
1906
1907 /* Copy over the old labels */
1908 cp_size = size;
1909 if (old_limit < limit)
1910 cp_size = old_limit * sizeof(struct mpls_route *);
1911
1912 memcpy(labels, old, cp_size);
1913
1914 /* If needed set the predefined labels */
1915 if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
1916 (limit > MPLS_LABEL_IPV6NULL)) {
1917 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
1918 rt2 = NULL;
1919 }
1920
1921 if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
1922 (limit > MPLS_LABEL_IPV4NULL)) {
1923 RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
1924 rt0 = NULL;
1925 }
1926
1927 /* Update the global pointers */
1928 net->mpls.platform_labels = limit;
1929 rcu_assign_pointer(net->mpls.platform_label, labels);
1930
1931 rtnl_unlock();
1932
1933 mpls_rt_free(rt2);
1934 mpls_rt_free(rt0);
1935
1936 if (old) {
1937 synchronize_rcu();
1938 kvfree(old);
1939 }
1940 return 0;
1941
1942 nort2:
1943 mpls_rt_free(rt0);
1944 nort0:
1945 kvfree(labels);
1946 nolabels:
1947 return -ENOMEM;
1948 }
1949
1950 static int mpls_platform_labels(struct ctl_table *table, int write,
1951 void __user *buffer, size_t *lenp, loff_t *ppos)
1952 {
1953 struct net *net = table->data;
1954 int platform_labels = net->mpls.platform_labels;
1955 int ret;
1956 struct ctl_table tmp = {
1957 .procname = table->procname,
1958 .data = &platform_labels,
1959 .maxlen = sizeof(int),
1960 .mode = table->mode,
1961 .extra1 = &zero,
1962 .extra2 = &label_limit,
1963 };
1964
1965 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1966
1967 if (write && ret == 0)
1968 ret = resize_platform_label_table(net, platform_labels);
1969
1970 return ret;
1971 }
1972
1973 static const struct ctl_table mpls_table[] = {
1974 {
1975 .procname = "platform_labels",
1976 .data = NULL,
1977 .maxlen = sizeof(int),
1978 .mode = 0644,
1979 .proc_handler = mpls_platform_labels,
1980 },
1981 { }
1982 };
1983
1984 static int mpls_net_init(struct net *net)
1985 {
1986 struct ctl_table *table;
1987
1988 net->mpls.platform_labels = 0;
1989 net->mpls.platform_label = NULL;
1990
1991 table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
1992 if (table == NULL)
1993 return -ENOMEM;
1994
1995 table[0].data = net;
1996 net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
1997 if (net->mpls.ctl == NULL) {
1998 kfree(table);
1999 return -ENOMEM;
2000 }
2001
2002 return 0;
2003 }
2004
2005 static void mpls_net_exit(struct net *net)
2006 {
2007 struct mpls_route __rcu **platform_label;
2008 size_t platform_labels;
2009 struct ctl_table *table;
2010 unsigned int index;
2011
2012 table = net->mpls.ctl->ctl_table_arg;
2013 unregister_net_sysctl_table(net->mpls.ctl);
2014 kfree(table);
2015
2016 /* An rcu grace period has passed since there was a device in
2017 * the network namespace (and thus the last in flight packet)
2018 * left this network namespace. This is because
2019 * unregister_netdevice_many and netdev_run_todo has completed
2020 * for each network device that was in this network namespace.
2021 *
2022 * As such no additional rcu synchronization is necessary when
2023 * freeing the platform_label table.
2024 */
2025 rtnl_lock();
2026 platform_label = rtnl_dereference(net->mpls.platform_label);
2027 platform_labels = net->mpls.platform_labels;
2028 for (index = 0; index < platform_labels; index++) {
2029 struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2030 RCU_INIT_POINTER(platform_label[index], NULL);
2031 mpls_rt_free(rt);
2032 }
2033 rtnl_unlock();
2034
2035 kvfree(platform_label);
2036 }
2037
2038 static struct pernet_operations mpls_net_ops = {
2039 .init = mpls_net_init,
2040 .exit = mpls_net_exit,
2041 };
2042
2043 static struct rtnl_af_ops mpls_af_ops __read_mostly = {
2044 .family = AF_MPLS,
2045 .fill_stats_af = mpls_fill_stats_af,
2046 .get_stats_af_size = mpls_get_stats_af_size,
2047 };
2048
2049 static int __init mpls_init(void)
2050 {
2051 int err;
2052
2053 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
2054
2055 err = register_pernet_subsys(&mpls_net_ops);
2056 if (err)
2057 goto out;
2058
2059 err = register_netdevice_notifier(&mpls_dev_notifier);
2060 if (err)
2061 goto out_unregister_pernet;
2062
2063 dev_add_pack(&mpls_packet_type);
2064
2065 rtnl_af_register(&mpls_af_ops);
2066
2067 rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
2068 rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
2069 rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
2070 rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf,
2071 mpls_netconf_dump_devconf, NULL);
2072 err = 0;
2073 out:
2074 return err;
2075
2076 out_unregister_pernet:
2077 unregister_pernet_subsys(&mpls_net_ops);
2078 goto out;
2079 }
2080 module_init(mpls_init);
2081
2082 static void __exit mpls_exit(void)
2083 {
2084 rtnl_unregister_all(PF_MPLS);
2085 rtnl_af_unregister(&mpls_af_ops);
2086 dev_remove_pack(&mpls_packet_type);
2087 unregister_netdevice_notifier(&mpls_dev_notifier);
2088 unregister_pernet_subsys(&mpls_net_ops);
2089 }
2090 module_exit(mpls_exit);
2091
2092 MODULE_DESCRIPTION("MultiProtocol Label Switching");
2093 MODULE_LICENSE("GPL v2");
2094 MODULE_ALIAS_NETPROTO(PF_MPLS);