2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
64 rtnl_dumpit_func dumpit
;
65 rtnl_calcit_func calcit
;
68 static DEFINE_MUTEX(rtnl_mutex
);
72 mutex_lock(&rtnl_mutex
);
74 EXPORT_SYMBOL(rtnl_lock
);
76 static struct sk_buff
*defer_kfree_skb_list
;
77 void rtnl_kfree_skbs(struct sk_buff
*head
, struct sk_buff
*tail
)
80 tail
->next
= defer_kfree_skb_list
;
81 defer_kfree_skb_list
= head
;
84 EXPORT_SYMBOL(rtnl_kfree_skbs
);
86 void __rtnl_unlock(void)
88 struct sk_buff
*head
= defer_kfree_skb_list
;
90 defer_kfree_skb_list
= NULL
;
92 mutex_unlock(&rtnl_mutex
);
95 struct sk_buff
*next
= head
->next
;
103 void rtnl_unlock(void)
105 /* This fellow will unlock it for us. */
108 EXPORT_SYMBOL(rtnl_unlock
);
110 int rtnl_trylock(void)
112 return mutex_trylock(&rtnl_mutex
);
114 EXPORT_SYMBOL(rtnl_trylock
);
116 int rtnl_is_locked(void)
118 return mutex_is_locked(&rtnl_mutex
);
120 EXPORT_SYMBOL(rtnl_is_locked
);
122 #ifdef CONFIG_PROVE_LOCKING
123 bool lockdep_rtnl_is_held(void)
125 return lockdep_is_held(&rtnl_mutex
);
127 EXPORT_SYMBOL(lockdep_rtnl_is_held
);
128 #endif /* #ifdef CONFIG_PROVE_LOCKING */
130 static struct rtnl_link
*rtnl_msg_handlers
[RTNL_FAMILY_MAX
+ 1];
132 static inline int rtm_msgindex(int msgtype
)
134 int msgindex
= msgtype
- RTM_BASE
;
137 * msgindex < 0 implies someone tried to register a netlink
138 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
139 * the message type has not been added to linux/rtnetlink.h
141 BUG_ON(msgindex
< 0 || msgindex
>= RTM_NR_MSGTYPES
);
146 static rtnl_doit_func
rtnl_get_doit(int protocol
, int msgindex
)
148 struct rtnl_link
*tab
;
150 if (protocol
<= RTNL_FAMILY_MAX
)
151 tab
= rtnl_msg_handlers
[protocol
];
155 if (tab
== NULL
|| tab
[msgindex
].doit
== NULL
)
156 tab
= rtnl_msg_handlers
[PF_UNSPEC
];
158 return tab
[msgindex
].doit
;
161 static rtnl_dumpit_func
rtnl_get_dumpit(int protocol
, int msgindex
)
163 struct rtnl_link
*tab
;
165 if (protocol
<= RTNL_FAMILY_MAX
)
166 tab
= rtnl_msg_handlers
[protocol
];
170 if (tab
== NULL
|| tab
[msgindex
].dumpit
== NULL
)
171 tab
= rtnl_msg_handlers
[PF_UNSPEC
];
173 return tab
[msgindex
].dumpit
;
176 static rtnl_calcit_func
rtnl_get_calcit(int protocol
, int msgindex
)
178 struct rtnl_link
*tab
;
180 if (protocol
<= RTNL_FAMILY_MAX
)
181 tab
= rtnl_msg_handlers
[protocol
];
185 if (tab
== NULL
|| tab
[msgindex
].calcit
== NULL
)
186 tab
= rtnl_msg_handlers
[PF_UNSPEC
];
188 return tab
[msgindex
].calcit
;
192 * __rtnl_register - Register a rtnetlink message type
193 * @protocol: Protocol family or PF_UNSPEC
194 * @msgtype: rtnetlink message type
195 * @doit: Function pointer called for each request message
196 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
197 * @calcit: Function pointer to calc size of dump message
199 * Registers the specified function pointers (at least one of them has
200 * to be non-NULL) to be called whenever a request message for the
201 * specified protocol family and message type is received.
203 * The special protocol family PF_UNSPEC may be used to define fallback
204 * function pointers for the case when no entry for the specific protocol
207 * Returns 0 on success or a negative error code.
209 int __rtnl_register(int protocol
, int msgtype
,
210 rtnl_doit_func doit
, rtnl_dumpit_func dumpit
,
211 rtnl_calcit_func calcit
)
213 struct rtnl_link
*tab
;
216 BUG_ON(protocol
< 0 || protocol
> RTNL_FAMILY_MAX
);
217 msgindex
= rtm_msgindex(msgtype
);
219 tab
= rtnl_msg_handlers
[protocol
];
221 tab
= kcalloc(RTM_NR_MSGTYPES
, sizeof(*tab
), GFP_KERNEL
);
225 rtnl_msg_handlers
[protocol
] = tab
;
229 tab
[msgindex
].doit
= doit
;
232 tab
[msgindex
].dumpit
= dumpit
;
235 tab
[msgindex
].calcit
= calcit
;
239 EXPORT_SYMBOL_GPL(__rtnl_register
);
242 * rtnl_register - Register a rtnetlink message type
244 * Identical to __rtnl_register() but panics on failure. This is useful
245 * as failure of this function is very unlikely, it can only happen due
246 * to lack of memory when allocating the chain to store all message
247 * handlers for a protocol. Meant for use in init functions where lack
248 * of memory implies no sense in continuing.
250 void rtnl_register(int protocol
, int msgtype
,
251 rtnl_doit_func doit
, rtnl_dumpit_func dumpit
,
252 rtnl_calcit_func calcit
)
254 if (__rtnl_register(protocol
, msgtype
, doit
, dumpit
, calcit
) < 0)
255 panic("Unable to register rtnetlink message handler, "
256 "protocol = %d, message type = %d\n",
259 EXPORT_SYMBOL_GPL(rtnl_register
);
262 * rtnl_unregister - Unregister a rtnetlink message type
263 * @protocol: Protocol family or PF_UNSPEC
264 * @msgtype: rtnetlink message type
266 * Returns 0 on success or a negative error code.
268 int rtnl_unregister(int protocol
, int msgtype
)
272 BUG_ON(protocol
< 0 || protocol
> RTNL_FAMILY_MAX
);
273 msgindex
= rtm_msgindex(msgtype
);
275 if (rtnl_msg_handlers
[protocol
] == NULL
)
278 rtnl_msg_handlers
[protocol
][msgindex
].doit
= NULL
;
279 rtnl_msg_handlers
[protocol
][msgindex
].dumpit
= NULL
;
280 rtnl_msg_handlers
[protocol
][msgindex
].calcit
= NULL
;
284 EXPORT_SYMBOL_GPL(rtnl_unregister
);
287 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
288 * @protocol : Protocol family or PF_UNSPEC
290 * Identical to calling rtnl_unregster() for all registered message types
291 * of a certain protocol family.
293 void rtnl_unregister_all(int protocol
)
295 BUG_ON(protocol
< 0 || protocol
> RTNL_FAMILY_MAX
);
297 kfree(rtnl_msg_handlers
[protocol
]);
298 rtnl_msg_handlers
[protocol
] = NULL
;
300 EXPORT_SYMBOL_GPL(rtnl_unregister_all
);
302 static LIST_HEAD(link_ops
);
304 static const struct rtnl_link_ops
*rtnl_link_ops_get(const char *kind
)
306 const struct rtnl_link_ops
*ops
;
308 list_for_each_entry(ops
, &link_ops
, list
) {
309 if (!strcmp(ops
->kind
, kind
))
316 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
317 * @ops: struct rtnl_link_ops * to register
319 * The caller must hold the rtnl_mutex. This function should be used
320 * by drivers that create devices during module initialization. It
321 * must be called before registering the devices.
323 * Returns 0 on success or a negative error code.
325 int __rtnl_link_register(struct rtnl_link_ops
*ops
)
327 if (rtnl_link_ops_get(ops
->kind
))
330 /* The check for setup is here because if ops
331 * does not have that filled up, it is not possible
332 * to use the ops for creating device. So do not
333 * fill up dellink as well. That disables rtnl_dellink.
335 if (ops
->setup
&& !ops
->dellink
)
336 ops
->dellink
= unregister_netdevice_queue
;
338 list_add_tail(&ops
->list
, &link_ops
);
341 EXPORT_SYMBOL_GPL(__rtnl_link_register
);
344 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
345 * @ops: struct rtnl_link_ops * to register
347 * Returns 0 on success or a negative error code.
349 int rtnl_link_register(struct rtnl_link_ops
*ops
)
354 err
= __rtnl_link_register(ops
);
358 EXPORT_SYMBOL_GPL(rtnl_link_register
);
360 static void __rtnl_kill_links(struct net
*net
, struct rtnl_link_ops
*ops
)
362 struct net_device
*dev
;
363 LIST_HEAD(list_kill
);
365 for_each_netdev(net
, dev
) {
366 if (dev
->rtnl_link_ops
== ops
)
367 ops
->dellink(dev
, &list_kill
);
369 unregister_netdevice_many(&list_kill
);
373 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
374 * @ops: struct rtnl_link_ops * to unregister
376 * The caller must hold the rtnl_mutex.
378 void __rtnl_link_unregister(struct rtnl_link_ops
*ops
)
383 __rtnl_kill_links(net
, ops
);
385 list_del(&ops
->list
);
387 EXPORT_SYMBOL_GPL(__rtnl_link_unregister
);
389 /* Return with the rtnl_lock held when there are no network
390 * devices unregistering in any network namespace.
392 static void rtnl_lock_unregistering_all(void)
396 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
398 add_wait_queue(&netdev_unregistering_wq
, &wait
);
400 unregistering
= false;
403 if (net
->dev_unreg_count
> 0) {
404 unregistering
= true;
412 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
414 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
418 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
419 * @ops: struct rtnl_link_ops * to unregister
421 void rtnl_link_unregister(struct rtnl_link_ops
*ops
)
423 /* Close the race with cleanup_net() */
424 mutex_lock(&net_mutex
);
425 rtnl_lock_unregistering_all();
426 __rtnl_link_unregister(ops
);
428 mutex_unlock(&net_mutex
);
430 EXPORT_SYMBOL_GPL(rtnl_link_unregister
);
432 static size_t rtnl_link_get_slave_info_data_size(const struct net_device
*dev
)
434 struct net_device
*master_dev
;
435 const struct rtnl_link_ops
*ops
;
437 master_dev
= netdev_master_upper_dev_get((struct net_device
*) dev
);
440 ops
= master_dev
->rtnl_link_ops
;
441 if (!ops
|| !ops
->get_slave_size
)
443 /* IFLA_INFO_SLAVE_DATA + nested data */
444 return nla_total_size(sizeof(struct nlattr
)) +
445 ops
->get_slave_size(master_dev
, dev
);
448 static size_t rtnl_link_get_size(const struct net_device
*dev
)
450 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
456 size
= nla_total_size(sizeof(struct nlattr
)) + /* IFLA_LINKINFO */
457 nla_total_size(strlen(ops
->kind
) + 1); /* IFLA_INFO_KIND */
460 /* IFLA_INFO_DATA + nested data */
461 size
+= nla_total_size(sizeof(struct nlattr
)) +
464 if (ops
->get_xstats_size
)
465 /* IFLA_INFO_XSTATS */
466 size
+= nla_total_size(ops
->get_xstats_size(dev
));
468 size
+= rtnl_link_get_slave_info_data_size(dev
);
473 static LIST_HEAD(rtnl_af_ops
);
475 static const struct rtnl_af_ops
*rtnl_af_lookup(const int family
)
477 const struct rtnl_af_ops
*ops
;
479 list_for_each_entry(ops
, &rtnl_af_ops
, list
) {
480 if (ops
->family
== family
)
488 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
489 * @ops: struct rtnl_af_ops * to register
491 * Returns 0 on success or a negative error code.
493 void rtnl_af_register(struct rtnl_af_ops
*ops
)
496 list_add_tail(&ops
->list
, &rtnl_af_ops
);
499 EXPORT_SYMBOL_GPL(rtnl_af_register
);
502 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
503 * @ops: struct rtnl_af_ops * to unregister
505 * The caller must hold the rtnl_mutex.
507 void __rtnl_af_unregister(struct rtnl_af_ops
*ops
)
509 list_del(&ops
->list
);
511 EXPORT_SYMBOL_GPL(__rtnl_af_unregister
);
514 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
515 * @ops: struct rtnl_af_ops * to unregister
517 void rtnl_af_unregister(struct rtnl_af_ops
*ops
)
520 __rtnl_af_unregister(ops
);
523 EXPORT_SYMBOL_GPL(rtnl_af_unregister
);
525 static size_t rtnl_link_get_af_size(const struct net_device
*dev
,
528 struct rtnl_af_ops
*af_ops
;
532 size
= nla_total_size(sizeof(struct nlattr
));
534 list_for_each_entry(af_ops
, &rtnl_af_ops
, list
) {
535 if (af_ops
->get_link_af_size
) {
536 /* AF_* + nested data */
537 size
+= nla_total_size(sizeof(struct nlattr
)) +
538 af_ops
->get_link_af_size(dev
, ext_filter_mask
);
545 static bool rtnl_have_link_slave_info(const struct net_device
*dev
)
547 struct net_device
*master_dev
;
549 master_dev
= netdev_master_upper_dev_get((struct net_device
*) dev
);
550 if (master_dev
&& master_dev
->rtnl_link_ops
)
555 static int rtnl_link_slave_info_fill(struct sk_buff
*skb
,
556 const struct net_device
*dev
)
558 struct net_device
*master_dev
;
559 const struct rtnl_link_ops
*ops
;
560 struct nlattr
*slave_data
;
563 master_dev
= netdev_master_upper_dev_get((struct net_device
*) dev
);
566 ops
= master_dev
->rtnl_link_ops
;
569 if (nla_put_string(skb
, IFLA_INFO_SLAVE_KIND
, ops
->kind
) < 0)
571 if (ops
->fill_slave_info
) {
572 slave_data
= nla_nest_start(skb
, IFLA_INFO_SLAVE_DATA
);
575 err
= ops
->fill_slave_info(skb
, master_dev
, dev
);
577 goto err_cancel_slave_data
;
578 nla_nest_end(skb
, slave_data
);
582 err_cancel_slave_data
:
583 nla_nest_cancel(skb
, slave_data
);
587 static int rtnl_link_info_fill(struct sk_buff
*skb
,
588 const struct net_device
*dev
)
590 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
596 if (nla_put_string(skb
, IFLA_INFO_KIND
, ops
->kind
) < 0)
598 if (ops
->fill_xstats
) {
599 err
= ops
->fill_xstats(skb
, dev
);
603 if (ops
->fill_info
) {
604 data
= nla_nest_start(skb
, IFLA_INFO_DATA
);
607 err
= ops
->fill_info(skb
, dev
);
609 goto err_cancel_data
;
610 nla_nest_end(skb
, data
);
615 nla_nest_cancel(skb
, data
);
619 static int rtnl_link_fill(struct sk_buff
*skb
, const struct net_device
*dev
)
621 struct nlattr
*linkinfo
;
624 linkinfo
= nla_nest_start(skb
, IFLA_LINKINFO
);
625 if (linkinfo
== NULL
)
628 err
= rtnl_link_info_fill(skb
, dev
);
630 goto err_cancel_link
;
632 err
= rtnl_link_slave_info_fill(skb
, dev
);
634 goto err_cancel_link
;
636 nla_nest_end(skb
, linkinfo
);
640 nla_nest_cancel(skb
, linkinfo
);
645 int rtnetlink_send(struct sk_buff
*skb
, struct net
*net
, u32 pid
, unsigned int group
, int echo
)
647 struct sock
*rtnl
= net
->rtnl
;
650 NETLINK_CB(skb
).dst_group
= group
;
652 refcount_inc(&skb
->users
);
653 netlink_broadcast(rtnl
, skb
, pid
, group
, GFP_KERNEL
);
655 err
= netlink_unicast(rtnl
, skb
, pid
, MSG_DONTWAIT
);
659 int rtnl_unicast(struct sk_buff
*skb
, struct net
*net
, u32 pid
)
661 struct sock
*rtnl
= net
->rtnl
;
663 return nlmsg_unicast(rtnl
, skb
, pid
);
665 EXPORT_SYMBOL(rtnl_unicast
);
667 void rtnl_notify(struct sk_buff
*skb
, struct net
*net
, u32 pid
, u32 group
,
668 struct nlmsghdr
*nlh
, gfp_t flags
)
670 struct sock
*rtnl
= net
->rtnl
;
674 report
= nlmsg_report(nlh
);
676 nlmsg_notify(rtnl
, skb
, pid
, group
, report
, flags
);
678 EXPORT_SYMBOL(rtnl_notify
);
680 void rtnl_set_sk_err(struct net
*net
, u32 group
, int error
)
682 struct sock
*rtnl
= net
->rtnl
;
684 netlink_set_err(rtnl
, 0, group
, error
);
686 EXPORT_SYMBOL(rtnl_set_sk_err
);
688 int rtnetlink_put_metrics(struct sk_buff
*skb
, u32
*metrics
)
693 mx
= nla_nest_start(skb
, RTA_METRICS
);
697 for (i
= 0; i
< RTAX_MAX
; i
++) {
699 if (i
== RTAX_CC_ALGO
- 1) {
700 char tmp
[TCP_CA_NAME_MAX
], *name
;
702 name
= tcp_ca_get_name_by_key(metrics
[i
], tmp
);
705 if (nla_put_string(skb
, i
+ 1, name
))
706 goto nla_put_failure
;
707 } else if (i
== RTAX_FEATURES
- 1) {
708 u32 user_features
= metrics
[i
] & RTAX_FEATURE_MASK
;
712 BUILD_BUG_ON(RTAX_FEATURE_MASK
& DST_FEATURE_MASK
);
713 if (nla_put_u32(skb
, i
+ 1, user_features
))
714 goto nla_put_failure
;
716 if (nla_put_u32(skb
, i
+ 1, metrics
[i
]))
717 goto nla_put_failure
;
724 nla_nest_cancel(skb
, mx
);
728 return nla_nest_end(skb
, mx
);
731 nla_nest_cancel(skb
, mx
);
734 EXPORT_SYMBOL(rtnetlink_put_metrics
);
736 int rtnl_put_cacheinfo(struct sk_buff
*skb
, struct dst_entry
*dst
, u32 id
,
737 long expires
, u32 error
)
739 struct rta_cacheinfo ci
= {
740 .rta_lastuse
= jiffies_delta_to_clock_t(jiffies
- dst
->lastuse
),
741 .rta_used
= dst
->__use
,
742 .rta_clntref
= atomic_read(&(dst
->__refcnt
)),
750 clock
= jiffies_to_clock_t(abs(expires
));
751 clock
= min_t(unsigned long, clock
, INT_MAX
);
752 ci
.rta_expires
= (expires
> 0) ? clock
: -clock
;
754 return nla_put(skb
, RTA_CACHEINFO
, sizeof(ci
), &ci
);
756 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo
);
758 static void set_operstate(struct net_device
*dev
, unsigned char transition
)
760 unsigned char operstate
= dev
->operstate
;
762 switch (transition
) {
764 if ((operstate
== IF_OPER_DORMANT
||
765 operstate
== IF_OPER_UNKNOWN
) &&
767 operstate
= IF_OPER_UP
;
770 case IF_OPER_DORMANT
:
771 if (operstate
== IF_OPER_UP
||
772 operstate
== IF_OPER_UNKNOWN
)
773 operstate
= IF_OPER_DORMANT
;
777 if (dev
->operstate
!= operstate
) {
778 write_lock_bh(&dev_base_lock
);
779 dev
->operstate
= operstate
;
780 write_unlock_bh(&dev_base_lock
);
781 netdev_state_change(dev
);
785 static unsigned int rtnl_dev_get_flags(const struct net_device
*dev
)
787 return (dev
->flags
& ~(IFF_PROMISC
| IFF_ALLMULTI
)) |
788 (dev
->gflags
& (IFF_PROMISC
| IFF_ALLMULTI
));
791 static unsigned int rtnl_dev_combine_flags(const struct net_device
*dev
,
792 const struct ifinfomsg
*ifm
)
794 unsigned int flags
= ifm
->ifi_flags
;
796 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
798 flags
= (flags
& ifm
->ifi_change
) |
799 (rtnl_dev_get_flags(dev
) & ~ifm
->ifi_change
);
804 static void copy_rtnl_link_stats(struct rtnl_link_stats
*a
,
805 const struct rtnl_link_stats64
*b
)
807 a
->rx_packets
= b
->rx_packets
;
808 a
->tx_packets
= b
->tx_packets
;
809 a
->rx_bytes
= b
->rx_bytes
;
810 a
->tx_bytes
= b
->tx_bytes
;
811 a
->rx_errors
= b
->rx_errors
;
812 a
->tx_errors
= b
->tx_errors
;
813 a
->rx_dropped
= b
->rx_dropped
;
814 a
->tx_dropped
= b
->tx_dropped
;
816 a
->multicast
= b
->multicast
;
817 a
->collisions
= b
->collisions
;
819 a
->rx_length_errors
= b
->rx_length_errors
;
820 a
->rx_over_errors
= b
->rx_over_errors
;
821 a
->rx_crc_errors
= b
->rx_crc_errors
;
822 a
->rx_frame_errors
= b
->rx_frame_errors
;
823 a
->rx_fifo_errors
= b
->rx_fifo_errors
;
824 a
->rx_missed_errors
= b
->rx_missed_errors
;
826 a
->tx_aborted_errors
= b
->tx_aborted_errors
;
827 a
->tx_carrier_errors
= b
->tx_carrier_errors
;
828 a
->tx_fifo_errors
= b
->tx_fifo_errors
;
829 a
->tx_heartbeat_errors
= b
->tx_heartbeat_errors
;
830 a
->tx_window_errors
= b
->tx_window_errors
;
832 a
->rx_compressed
= b
->rx_compressed
;
833 a
->tx_compressed
= b
->tx_compressed
;
835 a
->rx_nohandler
= b
->rx_nohandler
;
839 static inline int rtnl_vfinfo_size(const struct net_device
*dev
,
842 if (dev
->dev
.parent
&& (ext_filter_mask
& RTEXT_FILTER_VF
)) {
843 int num_vfs
= dev_num_vf(dev
->dev
.parent
);
844 size_t size
= nla_total_size(0);
847 nla_total_size(sizeof(struct ifla_vf_mac
)) +
848 nla_total_size(sizeof(struct ifla_vf_vlan
)) +
849 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
850 nla_total_size(MAX_VLAN_LIST_LEN
*
851 sizeof(struct ifla_vf_vlan_info
)) +
852 nla_total_size(sizeof(struct ifla_vf_spoofchk
)) +
853 nla_total_size(sizeof(struct ifla_vf_tx_rate
)) +
854 nla_total_size(sizeof(struct ifla_vf_rate
)) +
855 nla_total_size(sizeof(struct ifla_vf_link_state
)) +
856 nla_total_size(sizeof(struct ifla_vf_rss_query_en
)) +
857 nla_total_size(0) + /* nest IFLA_VF_STATS */
858 /* IFLA_VF_STATS_RX_PACKETS */
859 nla_total_size_64bit(sizeof(__u64
)) +
860 /* IFLA_VF_STATS_TX_PACKETS */
861 nla_total_size_64bit(sizeof(__u64
)) +
862 /* IFLA_VF_STATS_RX_BYTES */
863 nla_total_size_64bit(sizeof(__u64
)) +
864 /* IFLA_VF_STATS_TX_BYTES */
865 nla_total_size_64bit(sizeof(__u64
)) +
866 /* IFLA_VF_STATS_BROADCAST */
867 nla_total_size_64bit(sizeof(__u64
)) +
868 /* IFLA_VF_STATS_MULTICAST */
869 nla_total_size_64bit(sizeof(__u64
)) +
870 nla_total_size(sizeof(struct ifla_vf_trust
)));
876 static size_t rtnl_port_size(const struct net_device
*dev
,
879 size_t port_size
= nla_total_size(4) /* PORT_VF */
880 + nla_total_size(PORT_PROFILE_MAX
) /* PORT_PROFILE */
881 + nla_total_size(PORT_UUID_MAX
) /* PORT_INSTANCE_UUID */
882 + nla_total_size(PORT_UUID_MAX
) /* PORT_HOST_UUID */
883 + nla_total_size(1) /* PROT_VDP_REQUEST */
884 + nla_total_size(2); /* PORT_VDP_RESPONSE */
885 size_t vf_ports_size
= nla_total_size(sizeof(struct nlattr
));
886 size_t vf_port_size
= nla_total_size(sizeof(struct nlattr
))
888 size_t port_self_size
= nla_total_size(sizeof(struct nlattr
))
891 if (!dev
->netdev_ops
->ndo_get_vf_port
|| !dev
->dev
.parent
||
892 !(ext_filter_mask
& RTEXT_FILTER_VF
))
894 if (dev_num_vf(dev
->dev
.parent
))
895 return port_self_size
+ vf_ports_size
+
896 vf_port_size
* dev_num_vf(dev
->dev
.parent
);
898 return port_self_size
;
901 static size_t rtnl_xdp_size(void)
903 size_t xdp_size
= nla_total_size(0) + /* nest IFLA_XDP */
904 nla_total_size(1) + /* XDP_ATTACHED */
905 nla_total_size(4); /* XDP_PROG_ID */
910 static noinline
size_t if_nlmsg_size(const struct net_device
*dev
,
913 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
914 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
915 + nla_total_size(IFALIASZ
) /* IFLA_IFALIAS */
916 + nla_total_size(IFNAMSIZ
) /* IFLA_QDISC */
917 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap
))
918 + nla_total_size(sizeof(struct rtnl_link_stats
))
919 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64
))
920 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
921 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_BROADCAST */
922 + nla_total_size(4) /* IFLA_TXQLEN */
923 + nla_total_size(4) /* IFLA_WEIGHT */
924 + nla_total_size(4) /* IFLA_MTU */
925 + nla_total_size(4) /* IFLA_LINK */
926 + nla_total_size(4) /* IFLA_MASTER */
927 + nla_total_size(1) /* IFLA_CARRIER */
928 + nla_total_size(4) /* IFLA_PROMISCUITY */
929 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
930 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
931 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
932 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
933 + nla_total_size(1) /* IFLA_OPERSTATE */
934 + nla_total_size(1) /* IFLA_LINKMODE */
935 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
936 + nla_total_size(4) /* IFLA_LINK_NETNSID */
937 + nla_total_size(4) /* IFLA_GROUP */
938 + nla_total_size(ext_filter_mask
939 & RTEXT_FILTER_VF
? 4 : 0) /* IFLA_NUM_VF */
940 + rtnl_vfinfo_size(dev
, ext_filter_mask
) /* IFLA_VFINFO_LIST */
941 + rtnl_port_size(dev
, ext_filter_mask
) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
942 + rtnl_link_get_size(dev
) /* IFLA_LINKINFO */
943 + rtnl_link_get_af_size(dev
, ext_filter_mask
) /* IFLA_AF_SPEC */
944 + nla_total_size(MAX_PHYS_ITEM_ID_LEN
) /* IFLA_PHYS_PORT_ID */
945 + nla_total_size(MAX_PHYS_ITEM_ID_LEN
) /* IFLA_PHYS_SWITCH_ID */
946 + nla_total_size(IFNAMSIZ
) /* IFLA_PHYS_PORT_NAME */
947 + rtnl_xdp_size() /* IFLA_XDP */
948 + nla_total_size(4) /* IFLA_EVENT */
949 + nla_total_size(1); /* IFLA_PROTO_DOWN */
953 static int rtnl_vf_ports_fill(struct sk_buff
*skb
, struct net_device
*dev
)
955 struct nlattr
*vf_ports
;
956 struct nlattr
*vf_port
;
960 vf_ports
= nla_nest_start(skb
, IFLA_VF_PORTS
);
964 for (vf
= 0; vf
< dev_num_vf(dev
->dev
.parent
); vf
++) {
965 vf_port
= nla_nest_start(skb
, IFLA_VF_PORT
);
967 goto nla_put_failure
;
968 if (nla_put_u32(skb
, IFLA_PORT_VF
, vf
))
969 goto nla_put_failure
;
970 err
= dev
->netdev_ops
->ndo_get_vf_port(dev
, vf
, skb
);
971 if (err
== -EMSGSIZE
)
972 goto nla_put_failure
;
974 nla_nest_cancel(skb
, vf_port
);
977 nla_nest_end(skb
, vf_port
);
980 nla_nest_end(skb
, vf_ports
);
985 nla_nest_cancel(skb
, vf_ports
);
989 static int rtnl_port_self_fill(struct sk_buff
*skb
, struct net_device
*dev
)
991 struct nlattr
*port_self
;
994 port_self
= nla_nest_start(skb
, IFLA_PORT_SELF
);
998 err
= dev
->netdev_ops
->ndo_get_vf_port(dev
, PORT_SELF_VF
, skb
);
1000 nla_nest_cancel(skb
, port_self
);
1001 return (err
== -EMSGSIZE
) ? err
: 0;
1004 nla_nest_end(skb
, port_self
);
1009 static int rtnl_port_fill(struct sk_buff
*skb
, struct net_device
*dev
,
1010 u32 ext_filter_mask
)
1014 if (!dev
->netdev_ops
->ndo_get_vf_port
|| !dev
->dev
.parent
||
1015 !(ext_filter_mask
& RTEXT_FILTER_VF
))
1018 err
= rtnl_port_self_fill(skb
, dev
);
1022 if (dev_num_vf(dev
->dev
.parent
)) {
1023 err
= rtnl_vf_ports_fill(skb
, dev
);
1031 static int rtnl_phys_port_id_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1034 struct netdev_phys_item_id ppid
;
1036 err
= dev_get_phys_port_id(dev
, &ppid
);
1038 if (err
== -EOPNOTSUPP
)
1043 if (nla_put(skb
, IFLA_PHYS_PORT_ID
, ppid
.id_len
, ppid
.id
))
1049 static int rtnl_phys_port_name_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1051 char name
[IFNAMSIZ
];
1054 err
= dev_get_phys_port_name(dev
, name
, sizeof(name
));
1056 if (err
== -EOPNOTSUPP
)
1061 if (nla_put_string(skb
, IFLA_PHYS_PORT_NAME
, name
))
1067 static int rtnl_phys_switch_id_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1070 struct switchdev_attr attr
= {
1072 .id
= SWITCHDEV_ATTR_ID_PORT_PARENT_ID
,
1073 .flags
= SWITCHDEV_F_NO_RECURSE
,
1076 err
= switchdev_port_attr_get(dev
, &attr
);
1078 if (err
== -EOPNOTSUPP
)
1083 if (nla_put(skb
, IFLA_PHYS_SWITCH_ID
, attr
.u
.ppid
.id_len
,
1090 static noinline_for_stack
int rtnl_fill_stats(struct sk_buff
*skb
,
1091 struct net_device
*dev
)
1093 struct rtnl_link_stats64
*sp
;
1094 struct nlattr
*attr
;
1096 attr
= nla_reserve_64bit(skb
, IFLA_STATS64
,
1097 sizeof(struct rtnl_link_stats64
), IFLA_PAD
);
1101 sp
= nla_data(attr
);
1102 dev_get_stats(dev
, sp
);
1104 attr
= nla_reserve(skb
, IFLA_STATS
,
1105 sizeof(struct rtnl_link_stats
));
1109 copy_rtnl_link_stats(nla_data(attr
), sp
);
1114 static noinline_for_stack
int rtnl_fill_vfinfo(struct sk_buff
*skb
,
1115 struct net_device
*dev
,
1117 struct nlattr
*vfinfo
)
1119 struct ifla_vf_rss_query_en vf_rss_query_en
;
1120 struct nlattr
*vf
, *vfstats
, *vfvlanlist
;
1121 struct ifla_vf_link_state vf_linkstate
;
1122 struct ifla_vf_vlan_info vf_vlan_info
;
1123 struct ifla_vf_spoofchk vf_spoofchk
;
1124 struct ifla_vf_tx_rate vf_tx_rate
;
1125 struct ifla_vf_stats vf_stats
;
1126 struct ifla_vf_trust vf_trust
;
1127 struct ifla_vf_vlan vf_vlan
;
1128 struct ifla_vf_rate vf_rate
;
1129 struct ifla_vf_mac vf_mac
;
1130 struct ifla_vf_info ivi
;
1132 memset(&ivi
, 0, sizeof(ivi
));
1134 /* Not all SR-IOV capable drivers support the
1135 * spoofcheck and "RSS query enable" query. Preset to
1136 * -1 so the user space tool can detect that the driver
1137 * didn't report anything.
1140 ivi
.rss_query_en
= -1;
1142 /* The default value for VF link state is "auto"
1143 * IFLA_VF_LINK_STATE_AUTO which equals zero
1146 /* VLAN Protocol by default is 802.1Q */
1147 ivi
.vlan_proto
= htons(ETH_P_8021Q
);
1148 if (dev
->netdev_ops
->ndo_get_vf_config(dev
, vfs_num
, &ivi
))
1151 memset(&vf_vlan_info
, 0, sizeof(vf_vlan_info
));
1160 vf_rss_query_en
.vf
=
1161 vf_trust
.vf
= ivi
.vf
;
1163 memcpy(vf_mac
.mac
, ivi
.mac
, sizeof(ivi
.mac
));
1164 vf_vlan
.vlan
= ivi
.vlan
;
1165 vf_vlan
.qos
= ivi
.qos
;
1166 vf_vlan_info
.vlan
= ivi
.vlan
;
1167 vf_vlan_info
.qos
= ivi
.qos
;
1168 vf_vlan_info
.vlan_proto
= ivi
.vlan_proto
;
1169 vf_tx_rate
.rate
= ivi
.max_tx_rate
;
1170 vf_rate
.min_tx_rate
= ivi
.min_tx_rate
;
1171 vf_rate
.max_tx_rate
= ivi
.max_tx_rate
;
1172 vf_spoofchk
.setting
= ivi
.spoofchk
;
1173 vf_linkstate
.link_state
= ivi
.linkstate
;
1174 vf_rss_query_en
.setting
= ivi
.rss_query_en
;
1175 vf_trust
.setting
= ivi
.trusted
;
1176 vf
= nla_nest_start(skb
, IFLA_VF_INFO
);
1178 goto nla_put_vfinfo_failure
;
1179 if (nla_put(skb
, IFLA_VF_MAC
, sizeof(vf_mac
), &vf_mac
) ||
1180 nla_put(skb
, IFLA_VF_VLAN
, sizeof(vf_vlan
), &vf_vlan
) ||
1181 nla_put(skb
, IFLA_VF_RATE
, sizeof(vf_rate
),
1183 nla_put(skb
, IFLA_VF_TX_RATE
, sizeof(vf_tx_rate
),
1185 nla_put(skb
, IFLA_VF_SPOOFCHK
, sizeof(vf_spoofchk
),
1187 nla_put(skb
, IFLA_VF_LINK_STATE
, sizeof(vf_linkstate
),
1189 nla_put(skb
, IFLA_VF_RSS_QUERY_EN
,
1190 sizeof(vf_rss_query_en
),
1191 &vf_rss_query_en
) ||
1192 nla_put(skb
, IFLA_VF_TRUST
,
1193 sizeof(vf_trust
), &vf_trust
))
1194 goto nla_put_vf_failure
;
1195 vfvlanlist
= nla_nest_start(skb
, IFLA_VF_VLAN_LIST
);
1197 goto nla_put_vf_failure
;
1198 if (nla_put(skb
, IFLA_VF_VLAN_INFO
, sizeof(vf_vlan_info
),
1200 nla_nest_cancel(skb
, vfvlanlist
);
1201 goto nla_put_vf_failure
;
1203 nla_nest_end(skb
, vfvlanlist
);
1204 memset(&vf_stats
, 0, sizeof(vf_stats
));
1205 if (dev
->netdev_ops
->ndo_get_vf_stats
)
1206 dev
->netdev_ops
->ndo_get_vf_stats(dev
, vfs_num
,
1208 vfstats
= nla_nest_start(skb
, IFLA_VF_STATS
);
1210 goto nla_put_vf_failure
;
1211 if (nla_put_u64_64bit(skb
, IFLA_VF_STATS_RX_PACKETS
,
1212 vf_stats
.rx_packets
, IFLA_VF_STATS_PAD
) ||
1213 nla_put_u64_64bit(skb
, IFLA_VF_STATS_TX_PACKETS
,
1214 vf_stats
.tx_packets
, IFLA_VF_STATS_PAD
) ||
1215 nla_put_u64_64bit(skb
, IFLA_VF_STATS_RX_BYTES
,
1216 vf_stats
.rx_bytes
, IFLA_VF_STATS_PAD
) ||
1217 nla_put_u64_64bit(skb
, IFLA_VF_STATS_TX_BYTES
,
1218 vf_stats
.tx_bytes
, IFLA_VF_STATS_PAD
) ||
1219 nla_put_u64_64bit(skb
, IFLA_VF_STATS_BROADCAST
,
1220 vf_stats
.broadcast
, IFLA_VF_STATS_PAD
) ||
1221 nla_put_u64_64bit(skb
, IFLA_VF_STATS_MULTICAST
,
1222 vf_stats
.multicast
, IFLA_VF_STATS_PAD
)) {
1223 nla_nest_cancel(skb
, vfstats
);
1224 goto nla_put_vf_failure
;
1226 nla_nest_end(skb
, vfstats
);
1227 nla_nest_end(skb
, vf
);
1231 nla_nest_cancel(skb
, vf
);
1232 nla_put_vfinfo_failure
:
1233 nla_nest_cancel(skb
, vfinfo
);
1237 static int rtnl_fill_link_ifmap(struct sk_buff
*skb
, struct net_device
*dev
)
1239 struct rtnl_link_ifmap map
;
1241 memset(&map
, 0, sizeof(map
));
1242 map
.mem_start
= dev
->mem_start
;
1243 map
.mem_end
= dev
->mem_end
;
1244 map
.base_addr
= dev
->base_addr
;
1247 map
.port
= dev
->if_port
;
1249 if (nla_put_64bit(skb
, IFLA_MAP
, sizeof(map
), &map
, IFLA_PAD
))
1255 static u8
rtnl_xdp_attached_mode(struct net_device
*dev
, u32
*prog_id
)
1257 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1258 const struct bpf_prog
*generic_xdp_prog
;
1263 generic_xdp_prog
= rtnl_dereference(dev
->xdp_prog
);
1264 if (generic_xdp_prog
) {
1265 *prog_id
= generic_xdp_prog
->aux
->id
;
1266 return XDP_ATTACHED_SKB
;
1269 return XDP_ATTACHED_NONE
;
1271 return __dev_xdp_attached(dev
, ops
->ndo_xdp
, prog_id
);
1274 static int rtnl_xdp_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1280 xdp
= nla_nest_start(skb
, IFLA_XDP
);
1284 err
= nla_put_u8(skb
, IFLA_XDP_ATTACHED
,
1285 rtnl_xdp_attached_mode(dev
, &prog_id
));
1290 err
= nla_put_u32(skb
, IFLA_XDP_PROG_ID
, prog_id
);
1295 nla_nest_end(skb
, xdp
);
1299 nla_nest_cancel(skb
, xdp
);
1303 static u32
rtnl_get_event(unsigned long event
)
1305 u32 rtnl_event_type
= IFLA_EVENT_NONE
;
1309 rtnl_event_type
= IFLA_EVENT_REBOOT
;
1311 case NETDEV_FEAT_CHANGE
:
1312 rtnl_event_type
= IFLA_EVENT_FEATURES
;
1314 case NETDEV_BONDING_FAILOVER
:
1315 rtnl_event_type
= IFLA_EVENT_BONDING_FAILOVER
;
1317 case NETDEV_NOTIFY_PEERS
:
1318 rtnl_event_type
= IFLA_EVENT_NOTIFY_PEERS
;
1320 case NETDEV_RESEND_IGMP
:
1321 rtnl_event_type
= IFLA_EVENT_IGMP_RESEND
;
1323 case NETDEV_CHANGEINFODATA
:
1324 rtnl_event_type
= IFLA_EVENT_BONDING_OPTIONS
;
1330 return rtnl_event_type
;
1333 static int rtnl_fill_ifinfo(struct sk_buff
*skb
, struct net_device
*dev
,
1334 int type
, u32 pid
, u32 seq
, u32 change
,
1335 unsigned int flags
, u32 ext_filter_mask
,
1338 struct ifinfomsg
*ifm
;
1339 struct nlmsghdr
*nlh
;
1340 struct nlattr
*af_spec
;
1341 struct rtnl_af_ops
*af_ops
;
1342 struct net_device
*upper_dev
= netdev_master_upper_dev_get(dev
);
1345 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ifm
), flags
);
1349 ifm
= nlmsg_data(nlh
);
1350 ifm
->ifi_family
= AF_UNSPEC
;
1352 ifm
->ifi_type
= dev
->type
;
1353 ifm
->ifi_index
= dev
->ifindex
;
1354 ifm
->ifi_flags
= dev_get_flags(dev
);
1355 ifm
->ifi_change
= change
;
1357 if (nla_put_string(skb
, IFLA_IFNAME
, dev
->name
) ||
1358 nla_put_u32(skb
, IFLA_TXQLEN
, dev
->tx_queue_len
) ||
1359 nla_put_u8(skb
, IFLA_OPERSTATE
,
1360 netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
) ||
1361 nla_put_u8(skb
, IFLA_LINKMODE
, dev
->link_mode
) ||
1362 nla_put_u32(skb
, IFLA_MTU
, dev
->mtu
) ||
1363 nla_put_u32(skb
, IFLA_GROUP
, dev
->group
) ||
1364 nla_put_u32(skb
, IFLA_PROMISCUITY
, dev
->promiscuity
) ||
1365 nla_put_u32(skb
, IFLA_NUM_TX_QUEUES
, dev
->num_tx_queues
) ||
1366 nla_put_u32(skb
, IFLA_GSO_MAX_SEGS
, dev
->gso_max_segs
) ||
1367 nla_put_u32(skb
, IFLA_GSO_MAX_SIZE
, dev
->gso_max_size
) ||
1369 nla_put_u32(skb
, IFLA_NUM_RX_QUEUES
, dev
->num_rx_queues
) ||
1371 (dev
->ifindex
!= dev_get_iflink(dev
) &&
1372 nla_put_u32(skb
, IFLA_LINK
, dev_get_iflink(dev
))) ||
1374 nla_put_u32(skb
, IFLA_MASTER
, upper_dev
->ifindex
)) ||
1375 nla_put_u8(skb
, IFLA_CARRIER
, netif_carrier_ok(dev
)) ||
1377 nla_put_string(skb
, IFLA_QDISC
, dev
->qdisc
->ops
->id
)) ||
1379 nla_put_string(skb
, IFLA_IFALIAS
, dev
->ifalias
)) ||
1380 nla_put_u32(skb
, IFLA_CARRIER_CHANGES
,
1381 atomic_read(&dev
->carrier_changes
)) ||
1382 nla_put_u8(skb
, IFLA_PROTO_DOWN
, dev
->proto_down
))
1383 goto nla_put_failure
;
1385 if (event
!= IFLA_EVENT_NONE
) {
1386 if (nla_put_u32(skb
, IFLA_EVENT
, event
))
1387 goto nla_put_failure
;
1390 if (rtnl_fill_link_ifmap(skb
, dev
))
1391 goto nla_put_failure
;
1393 if (dev
->addr_len
) {
1394 if (nla_put(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
) ||
1395 nla_put(skb
, IFLA_BROADCAST
, dev
->addr_len
, dev
->broadcast
))
1396 goto nla_put_failure
;
1399 if (rtnl_phys_port_id_fill(skb
, dev
))
1400 goto nla_put_failure
;
1402 if (rtnl_phys_port_name_fill(skb
, dev
))
1403 goto nla_put_failure
;
1405 if (rtnl_phys_switch_id_fill(skb
, dev
))
1406 goto nla_put_failure
;
1408 if (rtnl_fill_stats(skb
, dev
))
1409 goto nla_put_failure
;
1411 if (dev
->dev
.parent
&& (ext_filter_mask
& RTEXT_FILTER_VF
) &&
1412 nla_put_u32(skb
, IFLA_NUM_VF
, dev_num_vf(dev
->dev
.parent
)))
1413 goto nla_put_failure
;
1415 if (dev
->netdev_ops
->ndo_get_vf_config
&& dev
->dev
.parent
&&
1416 ext_filter_mask
& RTEXT_FILTER_VF
) {
1418 struct nlattr
*vfinfo
;
1419 int num_vfs
= dev_num_vf(dev
->dev
.parent
);
1421 vfinfo
= nla_nest_start(skb
, IFLA_VFINFO_LIST
);
1423 goto nla_put_failure
;
1424 for (i
= 0; i
< num_vfs
; i
++) {
1425 if (rtnl_fill_vfinfo(skb
, dev
, i
, vfinfo
))
1426 goto nla_put_failure
;
1429 nla_nest_end(skb
, vfinfo
);
1432 if (rtnl_port_fill(skb
, dev
, ext_filter_mask
))
1433 goto nla_put_failure
;
1435 if (rtnl_xdp_fill(skb
, dev
))
1436 goto nla_put_failure
;
1438 if (dev
->rtnl_link_ops
|| rtnl_have_link_slave_info(dev
)) {
1439 if (rtnl_link_fill(skb
, dev
) < 0)
1440 goto nla_put_failure
;
1443 if (dev
->rtnl_link_ops
&&
1444 dev
->rtnl_link_ops
->get_link_net
) {
1445 struct net
*link_net
= dev
->rtnl_link_ops
->get_link_net(dev
);
1447 if (!net_eq(dev_net(dev
), link_net
)) {
1448 int id
= peernet2id_alloc(dev_net(dev
), link_net
);
1450 if (nla_put_s32(skb
, IFLA_LINK_NETNSID
, id
))
1451 goto nla_put_failure
;
1455 if (!(af_spec
= nla_nest_start(skb
, IFLA_AF_SPEC
)))
1456 goto nla_put_failure
;
1458 list_for_each_entry(af_ops
, &rtnl_af_ops
, list
) {
1459 if (af_ops
->fill_link_af
) {
1463 if (!(af
= nla_nest_start(skb
, af_ops
->family
)))
1464 goto nla_put_failure
;
1466 err
= af_ops
->fill_link_af(skb
, dev
, ext_filter_mask
);
1469 * Caller may return ENODATA to indicate that there
1470 * was no data to be dumped. This is not an error, it
1471 * means we should trim the attribute header and
1474 if (err
== -ENODATA
)
1475 nla_nest_cancel(skb
, af
);
1477 goto nla_put_failure
;
1479 nla_nest_end(skb
, af
);
1483 nla_nest_end(skb
, af_spec
);
1485 nlmsg_end(skb
, nlh
);
1489 nlmsg_cancel(skb
, nlh
);
1493 static const struct nla_policy ifla_policy
[IFLA_MAX
+1] = {
1494 [IFLA_IFNAME
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
-1 },
1495 [IFLA_ADDRESS
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1496 [IFLA_BROADCAST
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1497 [IFLA_MAP
] = { .len
= sizeof(struct rtnl_link_ifmap
) },
1498 [IFLA_MTU
] = { .type
= NLA_U32
},
1499 [IFLA_LINK
] = { .type
= NLA_U32
},
1500 [IFLA_MASTER
] = { .type
= NLA_U32
},
1501 [IFLA_CARRIER
] = { .type
= NLA_U8
},
1502 [IFLA_TXQLEN
] = { .type
= NLA_U32
},
1503 [IFLA_WEIGHT
] = { .type
= NLA_U32
},
1504 [IFLA_OPERSTATE
] = { .type
= NLA_U8
},
1505 [IFLA_LINKMODE
] = { .type
= NLA_U8
},
1506 [IFLA_LINKINFO
] = { .type
= NLA_NESTED
},
1507 [IFLA_NET_NS_PID
] = { .type
= NLA_U32
},
1508 [IFLA_NET_NS_FD
] = { .type
= NLA_U32
},
1509 [IFLA_IFALIAS
] = { .type
= NLA_STRING
, .len
= IFALIASZ
-1 },
1510 [IFLA_VFINFO_LIST
] = {. type
= NLA_NESTED
},
1511 [IFLA_VF_PORTS
] = { .type
= NLA_NESTED
},
1512 [IFLA_PORT_SELF
] = { .type
= NLA_NESTED
},
1513 [IFLA_AF_SPEC
] = { .type
= NLA_NESTED
},
1514 [IFLA_EXT_MASK
] = { .type
= NLA_U32
},
1515 [IFLA_PROMISCUITY
] = { .type
= NLA_U32
},
1516 [IFLA_NUM_TX_QUEUES
] = { .type
= NLA_U32
},
1517 [IFLA_NUM_RX_QUEUES
] = { .type
= NLA_U32
},
1518 [IFLA_PHYS_PORT_ID
] = { .type
= NLA_BINARY
, .len
= MAX_PHYS_ITEM_ID_LEN
},
1519 [IFLA_CARRIER_CHANGES
] = { .type
= NLA_U32
}, /* ignored */
1520 [IFLA_PHYS_SWITCH_ID
] = { .type
= NLA_BINARY
, .len
= MAX_PHYS_ITEM_ID_LEN
},
1521 [IFLA_LINK_NETNSID
] = { .type
= NLA_S32
},
1522 [IFLA_PROTO_DOWN
] = { .type
= NLA_U8
},
1523 [IFLA_XDP
] = { .type
= NLA_NESTED
},
1524 [IFLA_EVENT
] = { .type
= NLA_U32
},
1525 [IFLA_GROUP
] = { .type
= NLA_U32
},
1528 static const struct nla_policy ifla_info_policy
[IFLA_INFO_MAX
+1] = {
1529 [IFLA_INFO_KIND
] = { .type
= NLA_STRING
},
1530 [IFLA_INFO_DATA
] = { .type
= NLA_NESTED
},
1531 [IFLA_INFO_SLAVE_KIND
] = { .type
= NLA_STRING
},
1532 [IFLA_INFO_SLAVE_DATA
] = { .type
= NLA_NESTED
},
1535 static const struct nla_policy ifla_vf_policy
[IFLA_VF_MAX
+1] = {
1536 [IFLA_VF_MAC
] = { .len
= sizeof(struct ifla_vf_mac
) },
1537 [IFLA_VF_VLAN
] = { .len
= sizeof(struct ifla_vf_vlan
) },
1538 [IFLA_VF_VLAN_LIST
] = { .type
= NLA_NESTED
},
1539 [IFLA_VF_TX_RATE
] = { .len
= sizeof(struct ifla_vf_tx_rate
) },
1540 [IFLA_VF_SPOOFCHK
] = { .len
= sizeof(struct ifla_vf_spoofchk
) },
1541 [IFLA_VF_RATE
] = { .len
= sizeof(struct ifla_vf_rate
) },
1542 [IFLA_VF_LINK_STATE
] = { .len
= sizeof(struct ifla_vf_link_state
) },
1543 [IFLA_VF_RSS_QUERY_EN
] = { .len
= sizeof(struct ifla_vf_rss_query_en
) },
1544 [IFLA_VF_STATS
] = { .type
= NLA_NESTED
},
1545 [IFLA_VF_TRUST
] = { .len
= sizeof(struct ifla_vf_trust
) },
1546 [IFLA_VF_IB_NODE_GUID
] = { .len
= sizeof(struct ifla_vf_guid
) },
1547 [IFLA_VF_IB_PORT_GUID
] = { .len
= sizeof(struct ifla_vf_guid
) },
1550 static const struct nla_policy ifla_port_policy
[IFLA_PORT_MAX
+1] = {
1551 [IFLA_PORT_VF
] = { .type
= NLA_U32
},
1552 [IFLA_PORT_PROFILE
] = { .type
= NLA_STRING
,
1553 .len
= PORT_PROFILE_MAX
},
1554 [IFLA_PORT_INSTANCE_UUID
] = { .type
= NLA_BINARY
,
1555 .len
= PORT_UUID_MAX
},
1556 [IFLA_PORT_HOST_UUID
] = { .type
= NLA_STRING
,
1557 .len
= PORT_UUID_MAX
},
1558 [IFLA_PORT_REQUEST
] = { .type
= NLA_U8
, },
1559 [IFLA_PORT_RESPONSE
] = { .type
= NLA_U16
, },
1561 /* Unused, but we need to keep it here since user space could
1562 * fill it. It's also broken with regard to NLA_BINARY use in
1563 * combination with structs.
1565 [IFLA_PORT_VSI_TYPE
] = { .type
= NLA_BINARY
,
1566 .len
= sizeof(struct ifla_port_vsi
) },
1569 static const struct nla_policy ifla_xdp_policy
[IFLA_XDP_MAX
+ 1] = {
1570 [IFLA_XDP_FD
] = { .type
= NLA_S32
},
1571 [IFLA_XDP_ATTACHED
] = { .type
= NLA_U8
},
1572 [IFLA_XDP_FLAGS
] = { .type
= NLA_U32
},
1573 [IFLA_XDP_PROG_ID
] = { .type
= NLA_U32
},
1576 static const struct rtnl_link_ops
*linkinfo_to_kind_ops(const struct nlattr
*nla
)
1578 const struct rtnl_link_ops
*ops
= NULL
;
1579 struct nlattr
*linfo
[IFLA_INFO_MAX
+ 1];
1581 if (nla_parse_nested(linfo
, IFLA_INFO_MAX
, nla
,
1582 ifla_info_policy
, NULL
) < 0)
1585 if (linfo
[IFLA_INFO_KIND
]) {
1586 char kind
[MODULE_NAME_LEN
];
1588 nla_strlcpy(kind
, linfo
[IFLA_INFO_KIND
], sizeof(kind
));
1589 ops
= rtnl_link_ops_get(kind
);
1595 static bool link_master_filtered(struct net_device
*dev
, int master_idx
)
1597 struct net_device
*master
;
1602 master
= netdev_master_upper_dev_get(dev
);
1603 if (!master
|| master
->ifindex
!= master_idx
)
1609 static bool link_kind_filtered(const struct net_device
*dev
,
1610 const struct rtnl_link_ops
*kind_ops
)
1612 if (kind_ops
&& dev
->rtnl_link_ops
!= kind_ops
)
1618 static bool link_dump_filtered(struct net_device
*dev
,
1620 const struct rtnl_link_ops
*kind_ops
)
1622 if (link_master_filtered(dev
, master_idx
) ||
1623 link_kind_filtered(dev
, kind_ops
))
1629 static int rtnl_dump_ifinfo(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1631 struct net
*net
= sock_net(skb
->sk
);
1634 struct net_device
*dev
;
1635 struct hlist_head
*head
;
1636 struct nlattr
*tb
[IFLA_MAX
+1];
1637 u32 ext_filter_mask
= 0;
1638 const struct rtnl_link_ops
*kind_ops
= NULL
;
1639 unsigned int flags
= NLM_F_MULTI
;
1645 s_idx
= cb
->args
[1];
1647 cb
->seq
= net
->dev_base_seq
;
1649 /* A hack to preserve kernel<->userspace interface.
1650 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1651 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1652 * what iproute2 < v3.9.0 used.
1653 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1654 * attribute, its netlink message is shorter than struct ifinfomsg.
1656 hdrlen
= nlmsg_len(cb
->nlh
) < sizeof(struct ifinfomsg
) ?
1657 sizeof(struct rtgenmsg
) : sizeof(struct ifinfomsg
);
1659 if (nlmsg_parse(cb
->nlh
, hdrlen
, tb
, IFLA_MAX
,
1660 ifla_policy
, NULL
) >= 0) {
1661 if (tb
[IFLA_EXT_MASK
])
1662 ext_filter_mask
= nla_get_u32(tb
[IFLA_EXT_MASK
]);
1664 if (tb
[IFLA_MASTER
])
1665 master_idx
= nla_get_u32(tb
[IFLA_MASTER
]);
1667 if (tb
[IFLA_LINKINFO
])
1668 kind_ops
= linkinfo_to_kind_ops(tb
[IFLA_LINKINFO
]);
1670 if (master_idx
|| kind_ops
)
1671 flags
|= NLM_F_DUMP_FILTERED
;
1674 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1676 head
= &net
->dev_index_head
[h
];
1677 hlist_for_each_entry(dev
, head
, index_hlist
) {
1678 if (link_dump_filtered(dev
, master_idx
, kind_ops
))
1682 err
= rtnl_fill_ifinfo(skb
, dev
, RTM_NEWLINK
,
1683 NETLINK_CB(cb
->skb
).portid
,
1684 cb
->nlh
->nlmsg_seq
, 0,
1686 ext_filter_mask
, 0);
1689 if (likely(skb
->len
))
1695 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1709 int rtnl_nla_parse_ifla(struct nlattr
**tb
, const struct nlattr
*head
, int len
,
1710 struct netlink_ext_ack
*exterr
)
1712 return nla_parse(tb
, IFLA_MAX
, head
, len
, ifla_policy
, exterr
);
1714 EXPORT_SYMBOL(rtnl_nla_parse_ifla
);
1716 struct net
*rtnl_link_get_net(struct net
*src_net
, struct nlattr
*tb
[])
1719 /* Examine the link attributes and figure out which
1720 * network namespace we are talking about.
1722 if (tb
[IFLA_NET_NS_PID
])
1723 net
= get_net_ns_by_pid(nla_get_u32(tb
[IFLA_NET_NS_PID
]));
1724 else if (tb
[IFLA_NET_NS_FD
])
1725 net
= get_net_ns_by_fd(nla_get_u32(tb
[IFLA_NET_NS_FD
]));
1727 net
= get_net(src_net
);
1730 EXPORT_SYMBOL(rtnl_link_get_net
);
1732 static int validate_linkmsg(struct net_device
*dev
, struct nlattr
*tb
[])
1735 if (tb
[IFLA_ADDRESS
] &&
1736 nla_len(tb
[IFLA_ADDRESS
]) < dev
->addr_len
)
1739 if (tb
[IFLA_BROADCAST
] &&
1740 nla_len(tb
[IFLA_BROADCAST
]) < dev
->addr_len
)
1744 if (tb
[IFLA_AF_SPEC
]) {
1748 nla_for_each_nested(af
, tb
[IFLA_AF_SPEC
], rem
) {
1749 const struct rtnl_af_ops
*af_ops
;
1751 if (!(af_ops
= rtnl_af_lookup(nla_type(af
))))
1752 return -EAFNOSUPPORT
;
1754 if (!af_ops
->set_link_af
)
1757 if (af_ops
->validate_link_af
) {
1758 err
= af_ops
->validate_link_af(dev
, af
);
1768 static int handle_infiniband_guid(struct net_device
*dev
, struct ifla_vf_guid
*ivt
,
1771 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1773 return ops
->ndo_set_vf_guid(dev
, ivt
->vf
, ivt
->guid
, guid_type
);
1776 static int handle_vf_guid(struct net_device
*dev
, struct ifla_vf_guid
*ivt
, int guid_type
)
1778 if (dev
->type
!= ARPHRD_INFINIBAND
)
1781 return handle_infiniband_guid(dev
, ivt
, guid_type
);
1784 static int do_setvfinfo(struct net_device
*dev
, struct nlattr
**tb
)
1786 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1789 if (tb
[IFLA_VF_MAC
]) {
1790 struct ifla_vf_mac
*ivm
= nla_data(tb
[IFLA_VF_MAC
]);
1793 if (ops
->ndo_set_vf_mac
)
1794 err
= ops
->ndo_set_vf_mac(dev
, ivm
->vf
,
1800 if (tb
[IFLA_VF_VLAN
]) {
1801 struct ifla_vf_vlan
*ivv
= nla_data(tb
[IFLA_VF_VLAN
]);
1804 if (ops
->ndo_set_vf_vlan
)
1805 err
= ops
->ndo_set_vf_vlan(dev
, ivv
->vf
, ivv
->vlan
,
1807 htons(ETH_P_8021Q
));
1812 if (tb
[IFLA_VF_VLAN_LIST
]) {
1813 struct ifla_vf_vlan_info
*ivvl
[MAX_VLAN_LIST_LEN
];
1814 struct nlattr
*attr
;
1818 if (!ops
->ndo_set_vf_vlan
)
1821 nla_for_each_nested(attr
, tb
[IFLA_VF_VLAN_LIST
], rem
) {
1822 if (nla_type(attr
) != IFLA_VF_VLAN_INFO
||
1823 nla_len(attr
) < NLA_HDRLEN
) {
1826 if (len
>= MAX_VLAN_LIST_LEN
)
1828 ivvl
[len
] = nla_data(attr
);
1835 err
= ops
->ndo_set_vf_vlan(dev
, ivvl
[0]->vf
, ivvl
[0]->vlan
,
1836 ivvl
[0]->qos
, ivvl
[0]->vlan_proto
);
1841 if (tb
[IFLA_VF_TX_RATE
]) {
1842 struct ifla_vf_tx_rate
*ivt
= nla_data(tb
[IFLA_VF_TX_RATE
]);
1843 struct ifla_vf_info ivf
;
1846 if (ops
->ndo_get_vf_config
)
1847 err
= ops
->ndo_get_vf_config(dev
, ivt
->vf
, &ivf
);
1852 if (ops
->ndo_set_vf_rate
)
1853 err
= ops
->ndo_set_vf_rate(dev
, ivt
->vf
,
1860 if (tb
[IFLA_VF_RATE
]) {
1861 struct ifla_vf_rate
*ivt
= nla_data(tb
[IFLA_VF_RATE
]);
1864 if (ops
->ndo_set_vf_rate
)
1865 err
= ops
->ndo_set_vf_rate(dev
, ivt
->vf
,
1872 if (tb
[IFLA_VF_SPOOFCHK
]) {
1873 struct ifla_vf_spoofchk
*ivs
= nla_data(tb
[IFLA_VF_SPOOFCHK
]);
1876 if (ops
->ndo_set_vf_spoofchk
)
1877 err
= ops
->ndo_set_vf_spoofchk(dev
, ivs
->vf
,
1883 if (tb
[IFLA_VF_LINK_STATE
]) {
1884 struct ifla_vf_link_state
*ivl
= nla_data(tb
[IFLA_VF_LINK_STATE
]);
1887 if (ops
->ndo_set_vf_link_state
)
1888 err
= ops
->ndo_set_vf_link_state(dev
, ivl
->vf
,
1894 if (tb
[IFLA_VF_RSS_QUERY_EN
]) {
1895 struct ifla_vf_rss_query_en
*ivrssq_en
;
1898 ivrssq_en
= nla_data(tb
[IFLA_VF_RSS_QUERY_EN
]);
1899 if (ops
->ndo_set_vf_rss_query_en
)
1900 err
= ops
->ndo_set_vf_rss_query_en(dev
, ivrssq_en
->vf
,
1901 ivrssq_en
->setting
);
1906 if (tb
[IFLA_VF_TRUST
]) {
1907 struct ifla_vf_trust
*ivt
= nla_data(tb
[IFLA_VF_TRUST
]);
1910 if (ops
->ndo_set_vf_trust
)
1911 err
= ops
->ndo_set_vf_trust(dev
, ivt
->vf
, ivt
->setting
);
1916 if (tb
[IFLA_VF_IB_NODE_GUID
]) {
1917 struct ifla_vf_guid
*ivt
= nla_data(tb
[IFLA_VF_IB_NODE_GUID
]);
1919 if (!ops
->ndo_set_vf_guid
)
1922 return handle_vf_guid(dev
, ivt
, IFLA_VF_IB_NODE_GUID
);
1925 if (tb
[IFLA_VF_IB_PORT_GUID
]) {
1926 struct ifla_vf_guid
*ivt
= nla_data(tb
[IFLA_VF_IB_PORT_GUID
]);
1928 if (!ops
->ndo_set_vf_guid
)
1931 return handle_vf_guid(dev
, ivt
, IFLA_VF_IB_PORT_GUID
);
1937 static int do_set_master(struct net_device
*dev
, int ifindex
)
1939 struct net_device
*upper_dev
= netdev_master_upper_dev_get(dev
);
1940 const struct net_device_ops
*ops
;
1944 if (upper_dev
->ifindex
== ifindex
)
1946 ops
= upper_dev
->netdev_ops
;
1947 if (ops
->ndo_del_slave
) {
1948 err
= ops
->ndo_del_slave(upper_dev
, dev
);
1957 upper_dev
= __dev_get_by_index(dev_net(dev
), ifindex
);
1960 ops
= upper_dev
->netdev_ops
;
1961 if (ops
->ndo_add_slave
) {
1962 err
= ops
->ndo_add_slave(upper_dev
, dev
);
1972 #define DO_SETLINK_MODIFIED 0x01
1973 /* notify flag means notify + modified. */
1974 #define DO_SETLINK_NOTIFY 0x03
1975 static int do_setlink(const struct sk_buff
*skb
,
1976 struct net_device
*dev
, struct ifinfomsg
*ifm
,
1977 struct netlink_ext_ack
*extack
,
1978 struct nlattr
**tb
, char *ifname
, int status
)
1980 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1983 if (tb
[IFLA_NET_NS_PID
] || tb
[IFLA_NET_NS_FD
]) {
1984 struct net
*net
= rtnl_link_get_net(dev_net(dev
), tb
);
1989 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
)) {
1994 err
= dev_change_net_namespace(dev
, net
, ifname
);
1998 status
|= DO_SETLINK_MODIFIED
;
2002 struct rtnl_link_ifmap
*u_map
;
2005 if (!ops
->ndo_set_config
) {
2010 if (!netif_device_present(dev
)) {
2015 u_map
= nla_data(tb
[IFLA_MAP
]);
2016 k_map
.mem_start
= (unsigned long) u_map
->mem_start
;
2017 k_map
.mem_end
= (unsigned long) u_map
->mem_end
;
2018 k_map
.base_addr
= (unsigned short) u_map
->base_addr
;
2019 k_map
.irq
= (unsigned char) u_map
->irq
;
2020 k_map
.dma
= (unsigned char) u_map
->dma
;
2021 k_map
.port
= (unsigned char) u_map
->port
;
2023 err
= ops
->ndo_set_config(dev
, &k_map
);
2027 status
|= DO_SETLINK_NOTIFY
;
2030 if (tb
[IFLA_ADDRESS
]) {
2031 struct sockaddr
*sa
;
2034 len
= sizeof(sa_family_t
) + max_t(size_t, dev
->addr_len
,
2036 sa
= kmalloc(len
, GFP_KERNEL
);
2041 sa
->sa_family
= dev
->type
;
2042 memcpy(sa
->sa_data
, nla_data(tb
[IFLA_ADDRESS
]),
2044 err
= dev_set_mac_address(dev
, sa
);
2048 status
|= DO_SETLINK_MODIFIED
;
2052 err
= dev_set_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
2055 status
|= DO_SETLINK_MODIFIED
;
2058 if (tb
[IFLA_GROUP
]) {
2059 dev_set_group(dev
, nla_get_u32(tb
[IFLA_GROUP
]));
2060 status
|= DO_SETLINK_NOTIFY
;
2064 * Interface selected by interface index but interface
2065 * name provided implies that a name change has been
2068 if (ifm
->ifi_index
> 0 && ifname
[0]) {
2069 err
= dev_change_name(dev
, ifname
);
2072 status
|= DO_SETLINK_MODIFIED
;
2075 if (tb
[IFLA_IFALIAS
]) {
2076 err
= dev_set_alias(dev
, nla_data(tb
[IFLA_IFALIAS
]),
2077 nla_len(tb
[IFLA_IFALIAS
]));
2080 status
|= DO_SETLINK_NOTIFY
;
2083 if (tb
[IFLA_BROADCAST
]) {
2084 nla_memcpy(dev
->broadcast
, tb
[IFLA_BROADCAST
], dev
->addr_len
);
2085 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
2088 if (ifm
->ifi_flags
|| ifm
->ifi_change
) {
2089 err
= dev_change_flags(dev
, rtnl_dev_combine_flags(dev
, ifm
));
2094 if (tb
[IFLA_MASTER
]) {
2095 err
= do_set_master(dev
, nla_get_u32(tb
[IFLA_MASTER
]));
2098 status
|= DO_SETLINK_MODIFIED
;
2101 if (tb
[IFLA_CARRIER
]) {
2102 err
= dev_change_carrier(dev
, nla_get_u8(tb
[IFLA_CARRIER
]));
2105 status
|= DO_SETLINK_MODIFIED
;
2108 if (tb
[IFLA_TXQLEN
]) {
2109 unsigned int value
= nla_get_u32(tb
[IFLA_TXQLEN
]);
2110 unsigned int orig_len
= dev
->tx_queue_len
;
2112 if (dev
->tx_queue_len
^ value
) {
2113 dev
->tx_queue_len
= value
;
2114 err
= call_netdevice_notifiers(
2115 NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
2116 err
= notifier_to_errno(err
);
2118 dev
->tx_queue_len
= orig_len
;
2121 status
|= DO_SETLINK_NOTIFY
;
2125 if (tb
[IFLA_OPERSTATE
])
2126 set_operstate(dev
, nla_get_u8(tb
[IFLA_OPERSTATE
]));
2128 if (tb
[IFLA_LINKMODE
]) {
2129 unsigned char value
= nla_get_u8(tb
[IFLA_LINKMODE
]);
2131 write_lock_bh(&dev_base_lock
);
2132 if (dev
->link_mode
^ value
)
2133 status
|= DO_SETLINK_NOTIFY
;
2134 dev
->link_mode
= value
;
2135 write_unlock_bh(&dev_base_lock
);
2138 if (tb
[IFLA_VFINFO_LIST
]) {
2139 struct nlattr
*vfinfo
[IFLA_VF_MAX
+ 1];
2140 struct nlattr
*attr
;
2143 nla_for_each_nested(attr
, tb
[IFLA_VFINFO_LIST
], rem
) {
2144 if (nla_type(attr
) != IFLA_VF_INFO
||
2145 nla_len(attr
) < NLA_HDRLEN
) {
2149 err
= nla_parse_nested(vfinfo
, IFLA_VF_MAX
, attr
,
2150 ifla_vf_policy
, NULL
);
2153 err
= do_setvfinfo(dev
, vfinfo
);
2156 status
|= DO_SETLINK_NOTIFY
;
2161 if (tb
[IFLA_VF_PORTS
]) {
2162 struct nlattr
*port
[IFLA_PORT_MAX
+1];
2163 struct nlattr
*attr
;
2168 if (!ops
->ndo_set_vf_port
)
2171 nla_for_each_nested(attr
, tb
[IFLA_VF_PORTS
], rem
) {
2172 if (nla_type(attr
) != IFLA_VF_PORT
||
2173 nla_len(attr
) < NLA_HDRLEN
) {
2177 err
= nla_parse_nested(port
, IFLA_PORT_MAX
, attr
,
2178 ifla_port_policy
, NULL
);
2181 if (!port
[IFLA_PORT_VF
]) {
2185 vf
= nla_get_u32(port
[IFLA_PORT_VF
]);
2186 err
= ops
->ndo_set_vf_port(dev
, vf
, port
);
2189 status
|= DO_SETLINK_NOTIFY
;
2194 if (tb
[IFLA_PORT_SELF
]) {
2195 struct nlattr
*port
[IFLA_PORT_MAX
+1];
2197 err
= nla_parse_nested(port
, IFLA_PORT_MAX
,
2198 tb
[IFLA_PORT_SELF
], ifla_port_policy
,
2204 if (ops
->ndo_set_vf_port
)
2205 err
= ops
->ndo_set_vf_port(dev
, PORT_SELF_VF
, port
);
2208 status
|= DO_SETLINK_NOTIFY
;
2211 if (tb
[IFLA_AF_SPEC
]) {
2215 nla_for_each_nested(af
, tb
[IFLA_AF_SPEC
], rem
) {
2216 const struct rtnl_af_ops
*af_ops
;
2218 if (!(af_ops
= rtnl_af_lookup(nla_type(af
))))
2221 err
= af_ops
->set_link_af(dev
, af
);
2225 status
|= DO_SETLINK_NOTIFY
;
2230 if (tb
[IFLA_PROTO_DOWN
]) {
2231 err
= dev_change_proto_down(dev
,
2232 nla_get_u8(tb
[IFLA_PROTO_DOWN
]));
2235 status
|= DO_SETLINK_NOTIFY
;
2239 struct nlattr
*xdp
[IFLA_XDP_MAX
+ 1];
2242 err
= nla_parse_nested(xdp
, IFLA_XDP_MAX
, tb
[IFLA_XDP
],
2243 ifla_xdp_policy
, NULL
);
2247 if (xdp
[IFLA_XDP_ATTACHED
] || xdp
[IFLA_XDP_PROG_ID
]) {
2252 if (xdp
[IFLA_XDP_FLAGS
]) {
2253 xdp_flags
= nla_get_u32(xdp
[IFLA_XDP_FLAGS
]);
2254 if (xdp_flags
& ~XDP_FLAGS_MASK
) {
2258 if (hweight32(xdp_flags
& XDP_FLAGS_MODES
) > 1) {
2264 if (xdp
[IFLA_XDP_FD
]) {
2265 err
= dev_change_xdp_fd(dev
, extack
,
2266 nla_get_s32(xdp
[IFLA_XDP_FD
]),
2270 status
|= DO_SETLINK_NOTIFY
;
2275 if (status
& DO_SETLINK_MODIFIED
) {
2276 if (status
& DO_SETLINK_NOTIFY
)
2277 netdev_state_change(dev
);
2280 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2287 static int rtnl_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2288 struct netlink_ext_ack
*extack
)
2290 struct net
*net
= sock_net(skb
->sk
);
2291 struct ifinfomsg
*ifm
;
2292 struct net_device
*dev
;
2294 struct nlattr
*tb
[IFLA_MAX
+1];
2295 char ifname
[IFNAMSIZ
];
2297 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
,
2302 if (tb
[IFLA_IFNAME
])
2303 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2308 ifm
= nlmsg_data(nlh
);
2309 if (ifm
->ifi_index
> 0)
2310 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
2311 else if (tb
[IFLA_IFNAME
])
2312 dev
= __dev_get_by_name(net
, ifname
);
2321 err
= validate_linkmsg(dev
, tb
);
2325 err
= do_setlink(skb
, dev
, ifm
, extack
, tb
, ifname
, 0);
2330 static int rtnl_group_dellink(const struct net
*net
, int group
)
2332 struct net_device
*dev
, *aux
;
2333 LIST_HEAD(list_kill
);
2339 for_each_netdev(net
, dev
) {
2340 if (dev
->group
== group
) {
2341 const struct rtnl_link_ops
*ops
;
2344 ops
= dev
->rtnl_link_ops
;
2345 if (!ops
|| !ops
->dellink
)
2353 for_each_netdev_safe(net
, dev
, aux
) {
2354 if (dev
->group
== group
) {
2355 const struct rtnl_link_ops
*ops
;
2357 ops
= dev
->rtnl_link_ops
;
2358 ops
->dellink(dev
, &list_kill
);
2361 unregister_netdevice_many(&list_kill
);
2366 int rtnl_delete_link(struct net_device
*dev
)
2368 const struct rtnl_link_ops
*ops
;
2369 LIST_HEAD(list_kill
);
2371 ops
= dev
->rtnl_link_ops
;
2372 if (!ops
|| !ops
->dellink
)
2375 ops
->dellink(dev
, &list_kill
);
2376 unregister_netdevice_many(&list_kill
);
2380 EXPORT_SYMBOL_GPL(rtnl_delete_link
);
2382 static int rtnl_dellink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2383 struct netlink_ext_ack
*extack
)
2385 struct net
*net
= sock_net(skb
->sk
);
2386 struct net_device
*dev
;
2387 struct ifinfomsg
*ifm
;
2388 char ifname
[IFNAMSIZ
];
2389 struct nlattr
*tb
[IFLA_MAX
+1];
2392 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
, extack
);
2396 if (tb
[IFLA_IFNAME
])
2397 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2399 ifm
= nlmsg_data(nlh
);
2400 if (ifm
->ifi_index
> 0)
2401 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
2402 else if (tb
[IFLA_IFNAME
])
2403 dev
= __dev_get_by_name(net
, ifname
);
2404 else if (tb
[IFLA_GROUP
])
2405 return rtnl_group_dellink(net
, nla_get_u32(tb
[IFLA_GROUP
]));
2412 return rtnl_delete_link(dev
);
2415 int rtnl_configure_link(struct net_device
*dev
, const struct ifinfomsg
*ifm
)
2417 unsigned int old_flags
;
2420 old_flags
= dev
->flags
;
2421 if (ifm
&& (ifm
->ifi_flags
|| ifm
->ifi_change
)) {
2422 err
= __dev_change_flags(dev
, rtnl_dev_combine_flags(dev
, ifm
));
2427 dev
->rtnl_link_state
= RTNL_LINK_INITIALIZED
;
2429 __dev_notify_flags(dev
, old_flags
, ~0U);
2432 EXPORT_SYMBOL(rtnl_configure_link
);
2434 struct net_device
*rtnl_create_link(struct net
*net
,
2435 const char *ifname
, unsigned char name_assign_type
,
2436 const struct rtnl_link_ops
*ops
, struct nlattr
*tb
[])
2438 struct net_device
*dev
;
2439 unsigned int num_tx_queues
= 1;
2440 unsigned int num_rx_queues
= 1;
2442 if (tb
[IFLA_NUM_TX_QUEUES
])
2443 num_tx_queues
= nla_get_u32(tb
[IFLA_NUM_TX_QUEUES
]);
2444 else if (ops
->get_num_tx_queues
)
2445 num_tx_queues
= ops
->get_num_tx_queues();
2447 if (tb
[IFLA_NUM_RX_QUEUES
])
2448 num_rx_queues
= nla_get_u32(tb
[IFLA_NUM_RX_QUEUES
]);
2449 else if (ops
->get_num_rx_queues
)
2450 num_rx_queues
= ops
->get_num_rx_queues();
2452 dev
= alloc_netdev_mqs(ops
->priv_size
, ifname
, name_assign_type
,
2453 ops
->setup
, num_tx_queues
, num_rx_queues
);
2455 return ERR_PTR(-ENOMEM
);
2457 dev_net_set(dev
, net
);
2458 dev
->rtnl_link_ops
= ops
;
2459 dev
->rtnl_link_state
= RTNL_LINK_INITIALIZING
;
2462 dev
->mtu
= nla_get_u32(tb
[IFLA_MTU
]);
2463 if (tb
[IFLA_ADDRESS
]) {
2464 memcpy(dev
->dev_addr
, nla_data(tb
[IFLA_ADDRESS
]),
2465 nla_len(tb
[IFLA_ADDRESS
]));
2466 dev
->addr_assign_type
= NET_ADDR_SET
;
2468 if (tb
[IFLA_BROADCAST
])
2469 memcpy(dev
->broadcast
, nla_data(tb
[IFLA_BROADCAST
]),
2470 nla_len(tb
[IFLA_BROADCAST
]));
2471 if (tb
[IFLA_TXQLEN
])
2472 dev
->tx_queue_len
= nla_get_u32(tb
[IFLA_TXQLEN
]);
2473 if (tb
[IFLA_OPERSTATE
])
2474 set_operstate(dev
, nla_get_u8(tb
[IFLA_OPERSTATE
]));
2475 if (tb
[IFLA_LINKMODE
])
2476 dev
->link_mode
= nla_get_u8(tb
[IFLA_LINKMODE
]);
2478 dev_set_group(dev
, nla_get_u32(tb
[IFLA_GROUP
]));
2482 EXPORT_SYMBOL(rtnl_create_link
);
2484 static int rtnl_group_changelink(const struct sk_buff
*skb
,
2485 struct net
*net
, int group
,
2486 struct ifinfomsg
*ifm
,
2487 struct netlink_ext_ack
*extack
,
2490 struct net_device
*dev
, *aux
;
2493 for_each_netdev_safe(net
, dev
, aux
) {
2494 if (dev
->group
== group
) {
2495 err
= do_setlink(skb
, dev
, ifm
, extack
, tb
, NULL
, 0);
2504 static int rtnl_newlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2505 struct netlink_ext_ack
*extack
)
2507 struct net
*net
= sock_net(skb
->sk
);
2508 const struct rtnl_link_ops
*ops
;
2509 const struct rtnl_link_ops
*m_ops
= NULL
;
2510 struct net_device
*dev
;
2511 struct net_device
*master_dev
= NULL
;
2512 struct ifinfomsg
*ifm
;
2513 char kind
[MODULE_NAME_LEN
];
2514 char ifname
[IFNAMSIZ
];
2515 struct nlattr
*tb
[IFLA_MAX
+1];
2516 struct nlattr
*linkinfo
[IFLA_INFO_MAX
+1];
2517 unsigned char name_assign_type
= NET_NAME_USER
;
2520 #ifdef CONFIG_MODULES
2523 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
, extack
);
2527 if (tb
[IFLA_IFNAME
])
2528 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2532 ifm
= nlmsg_data(nlh
);
2533 if (ifm
->ifi_index
> 0)
2534 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
2537 dev
= __dev_get_by_name(net
, ifname
);
2543 master_dev
= netdev_master_upper_dev_get(dev
);
2545 m_ops
= master_dev
->rtnl_link_ops
;
2548 err
= validate_linkmsg(dev
, tb
);
2552 if (tb
[IFLA_LINKINFO
]) {
2553 err
= nla_parse_nested(linkinfo
, IFLA_INFO_MAX
,
2554 tb
[IFLA_LINKINFO
], ifla_info_policy
,
2559 memset(linkinfo
, 0, sizeof(linkinfo
));
2561 if (linkinfo
[IFLA_INFO_KIND
]) {
2562 nla_strlcpy(kind
, linkinfo
[IFLA_INFO_KIND
], sizeof(kind
));
2563 ops
= rtnl_link_ops_get(kind
);
2570 struct nlattr
*attr
[ops
? ops
->maxtype
+ 1 : 1];
2571 struct nlattr
*slave_attr
[m_ops
? m_ops
->slave_maxtype
+ 1 : 1];
2572 struct nlattr
**data
= NULL
;
2573 struct nlattr
**slave_data
= NULL
;
2574 struct net
*dest_net
, *link_net
= NULL
;
2577 if (ops
->maxtype
&& linkinfo
[IFLA_INFO_DATA
]) {
2578 err
= nla_parse_nested(attr
, ops
->maxtype
,
2579 linkinfo
[IFLA_INFO_DATA
],
2585 if (ops
->validate
) {
2586 err
= ops
->validate(tb
, data
, extack
);
2593 if (m_ops
->slave_maxtype
&&
2594 linkinfo
[IFLA_INFO_SLAVE_DATA
]) {
2595 err
= nla_parse_nested(slave_attr
,
2596 m_ops
->slave_maxtype
,
2597 linkinfo
[IFLA_INFO_SLAVE_DATA
],
2598 m_ops
->slave_policy
,
2602 slave_data
= slave_attr
;
2604 if (m_ops
->slave_validate
) {
2605 err
= m_ops
->slave_validate(tb
, slave_data
,
2615 if (nlh
->nlmsg_flags
& NLM_F_EXCL
)
2617 if (nlh
->nlmsg_flags
& NLM_F_REPLACE
)
2620 if (linkinfo
[IFLA_INFO_DATA
]) {
2621 if (!ops
|| ops
!= dev
->rtnl_link_ops
||
2625 err
= ops
->changelink(dev
, tb
, data
, extack
);
2628 status
|= DO_SETLINK_NOTIFY
;
2631 if (linkinfo
[IFLA_INFO_SLAVE_DATA
]) {
2632 if (!m_ops
|| !m_ops
->slave_changelink
)
2635 err
= m_ops
->slave_changelink(master_dev
, dev
,
2640 status
|= DO_SETLINK_NOTIFY
;
2643 return do_setlink(skb
, dev
, ifm
, extack
, tb
, ifname
,
2647 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
2648 if (ifm
->ifi_index
== 0 && tb
[IFLA_GROUP
])
2649 return rtnl_group_changelink(skb
, net
,
2650 nla_get_u32(tb
[IFLA_GROUP
]),
2655 if (tb
[IFLA_MAP
] || tb
[IFLA_PROTINFO
])
2659 #ifdef CONFIG_MODULES
2662 request_module("rtnl-link-%s", kind
);
2664 ops
= rtnl_link_ops_get(kind
);
2676 snprintf(ifname
, IFNAMSIZ
, "%s%%d", ops
->kind
);
2677 name_assign_type
= NET_NAME_ENUM
;
2680 dest_net
= rtnl_link_get_net(net
, tb
);
2681 if (IS_ERR(dest_net
))
2682 return PTR_ERR(dest_net
);
2685 if (!netlink_ns_capable(skb
, dest_net
->user_ns
, CAP_NET_ADMIN
))
2688 if (tb
[IFLA_LINK_NETNSID
]) {
2689 int id
= nla_get_s32(tb
[IFLA_LINK_NETNSID
]);
2691 link_net
= get_net_ns_by_id(dest_net
, id
);
2697 if (!netlink_ns_capable(skb
, link_net
->user_ns
, CAP_NET_ADMIN
))
2701 dev
= rtnl_create_link(link_net
? : dest_net
, ifname
,
2702 name_assign_type
, ops
, tb
);
2708 dev
->ifindex
= ifm
->ifi_index
;
2711 err
= ops
->newlink(link_net
? : net
, dev
, tb
, data
,
2713 /* Drivers should call free_netdev() in ->destructor
2714 * and unregister it on failure after registration
2715 * so that device could be finally freed in rtnl_unlock.
2718 /* If device is not registered at all, free it now */
2719 if (dev
->reg_state
== NETREG_UNINITIALIZED
)
2724 err
= register_netdevice(dev
);
2730 err
= rtnl_configure_link(dev
, ifm
);
2732 goto out_unregister
;
2734 err
= dev_change_net_namespace(dev
, dest_net
, ifname
);
2736 goto out_unregister
;
2738 if (tb
[IFLA_MASTER
]) {
2739 err
= do_set_master(dev
, nla_get_u32(tb
[IFLA_MASTER
]));
2741 goto out_unregister
;
2750 LIST_HEAD(list_kill
);
2752 ops
->dellink(dev
, &list_kill
);
2753 unregister_netdevice_many(&list_kill
);
2755 unregister_netdevice(dev
);
2761 static int rtnl_getlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2762 struct netlink_ext_ack
*extack
)
2764 struct net
*net
= sock_net(skb
->sk
);
2765 struct ifinfomsg
*ifm
;
2766 char ifname
[IFNAMSIZ
];
2767 struct nlattr
*tb
[IFLA_MAX
+1];
2768 struct net_device
*dev
= NULL
;
2769 struct sk_buff
*nskb
;
2771 u32 ext_filter_mask
= 0;
2773 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
, extack
);
2777 if (tb
[IFLA_IFNAME
])
2778 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2780 if (tb
[IFLA_EXT_MASK
])
2781 ext_filter_mask
= nla_get_u32(tb
[IFLA_EXT_MASK
]);
2783 ifm
= nlmsg_data(nlh
);
2784 if (ifm
->ifi_index
> 0)
2785 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
2786 else if (tb
[IFLA_IFNAME
])
2787 dev
= __dev_get_by_name(net
, ifname
);
2794 nskb
= nlmsg_new(if_nlmsg_size(dev
, ext_filter_mask
), GFP_KERNEL
);
2798 err
= rtnl_fill_ifinfo(nskb
, dev
, RTM_NEWLINK
, NETLINK_CB(skb
).portid
,
2799 nlh
->nlmsg_seq
, 0, 0, ext_filter_mask
, 0);
2801 /* -EMSGSIZE implies BUG in if_nlmsg_size */
2802 WARN_ON(err
== -EMSGSIZE
);
2805 err
= rtnl_unicast(nskb
, net
, NETLINK_CB(skb
).portid
);
2810 static u16
rtnl_calcit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
2812 struct net
*net
= sock_net(skb
->sk
);
2813 struct net_device
*dev
;
2814 struct nlattr
*tb
[IFLA_MAX
+1];
2815 u32 ext_filter_mask
= 0;
2816 u16 min_ifinfo_dump_size
= 0;
2819 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
2820 hdrlen
= nlmsg_len(nlh
) < sizeof(struct ifinfomsg
) ?
2821 sizeof(struct rtgenmsg
) : sizeof(struct ifinfomsg
);
2823 if (nlmsg_parse(nlh
, hdrlen
, tb
, IFLA_MAX
, ifla_policy
, NULL
) >= 0) {
2824 if (tb
[IFLA_EXT_MASK
])
2825 ext_filter_mask
= nla_get_u32(tb
[IFLA_EXT_MASK
]);
2828 if (!ext_filter_mask
)
2829 return NLMSG_GOODSIZE
;
2831 * traverse the list of net devices and compute the minimum
2832 * buffer size based upon the filter mask.
2834 list_for_each_entry(dev
, &net
->dev_base_head
, dev_list
) {
2835 min_ifinfo_dump_size
= max_t(u16
, min_ifinfo_dump_size
,
2840 return nlmsg_total_size(min_ifinfo_dump_size
);
2843 static int rtnl_dump_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2846 int s_idx
= cb
->family
;
2850 for (idx
= 1; idx
<= RTNL_FAMILY_MAX
; idx
++) {
2851 int type
= cb
->nlh
->nlmsg_type
-RTM_BASE
;
2852 if (idx
< s_idx
|| idx
== PF_PACKET
)
2854 if (rtnl_msg_handlers
[idx
] == NULL
||
2855 rtnl_msg_handlers
[idx
][type
].dumpit
== NULL
)
2858 memset(&cb
->args
[0], 0, sizeof(cb
->args
));
2862 if (rtnl_msg_handlers
[idx
][type
].dumpit(skb
, cb
))
2870 struct sk_buff
*rtmsg_ifinfo_build_skb(int type
, struct net_device
*dev
,
2871 unsigned int change
,
2872 u32 event
, gfp_t flags
)
2874 struct net
*net
= dev_net(dev
);
2875 struct sk_buff
*skb
;
2877 size_t if_info_size
;
2879 skb
= nlmsg_new((if_info_size
= if_nlmsg_size(dev
, 0)), flags
);
2883 err
= rtnl_fill_ifinfo(skb
, dev
, type
, 0, 0, change
, 0, 0, event
);
2885 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
2886 WARN_ON(err
== -EMSGSIZE
);
2893 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
2897 void rtmsg_ifinfo_send(struct sk_buff
*skb
, struct net_device
*dev
, gfp_t flags
)
2899 struct net
*net
= dev_net(dev
);
2901 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, flags
);
2904 static void rtmsg_ifinfo_event(int type
, struct net_device
*dev
,
2905 unsigned int change
, u32 event
,
2908 struct sk_buff
*skb
;
2910 if (dev
->reg_state
!= NETREG_REGISTERED
)
2913 skb
= rtmsg_ifinfo_build_skb(type
, dev
, change
, event
, flags
);
2915 rtmsg_ifinfo_send(skb
, dev
, flags
);
2918 void rtmsg_ifinfo(int type
, struct net_device
*dev
, unsigned int change
,
2921 rtmsg_ifinfo_event(type
, dev
, change
, rtnl_get_event(0), flags
);
2923 EXPORT_SYMBOL(rtmsg_ifinfo
);
2925 static int nlmsg_populate_fdb_fill(struct sk_buff
*skb
,
2926 struct net_device
*dev
,
2927 u8
*addr
, u16 vid
, u32 pid
, u32 seq
,
2928 int type
, unsigned int flags
,
2929 int nlflags
, u16 ndm_state
)
2931 struct nlmsghdr
*nlh
;
2934 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), nlflags
);
2938 ndm
= nlmsg_data(nlh
);
2939 ndm
->ndm_family
= AF_BRIDGE
;
2942 ndm
->ndm_flags
= flags
;
2944 ndm
->ndm_ifindex
= dev
->ifindex
;
2945 ndm
->ndm_state
= ndm_state
;
2947 if (nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, addr
))
2948 goto nla_put_failure
;
2950 if (nla_put(skb
, NDA_VLAN
, sizeof(u16
), &vid
))
2951 goto nla_put_failure
;
2953 nlmsg_end(skb
, nlh
);
2957 nlmsg_cancel(skb
, nlh
);
2961 static inline size_t rtnl_fdb_nlmsg_size(void)
2963 return NLMSG_ALIGN(sizeof(struct ndmsg
)) +
2964 nla_total_size(ETH_ALEN
) + /* NDA_LLADDR */
2965 nla_total_size(sizeof(u16
)) + /* NDA_VLAN */
2969 static void rtnl_fdb_notify(struct net_device
*dev
, u8
*addr
, u16 vid
, int type
,
2972 struct net
*net
= dev_net(dev
);
2973 struct sk_buff
*skb
;
2976 skb
= nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC
);
2980 err
= nlmsg_populate_fdb_fill(skb
, dev
, addr
, vid
,
2981 0, 0, type
, NTF_SELF
, 0, ndm_state
);
2987 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2990 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2994 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
2996 int ndo_dflt_fdb_add(struct ndmsg
*ndm
,
2997 struct nlattr
*tb
[],
2998 struct net_device
*dev
,
2999 const unsigned char *addr
, u16 vid
,
3004 /* If aging addresses are supported device will need to
3005 * implement its own handler for this.
3007 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
3008 pr_info("%s: FDB only supports static addresses\n", dev
->name
);
3013 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
3017 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
3018 err
= dev_uc_add_excl(dev
, addr
);
3019 else if (is_multicast_ether_addr(addr
))
3020 err
= dev_mc_add_excl(dev
, addr
);
3022 /* Only return duplicate errors if NLM_F_EXCL is set */
3023 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
3028 EXPORT_SYMBOL(ndo_dflt_fdb_add
);
3030 static int fdb_vid_parse(struct nlattr
*vlan_attr
, u16
*p_vid
)
3035 if (nla_len(vlan_attr
) != sizeof(u16
)) {
3036 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan\n");
3040 vid
= nla_get_u16(vlan_attr
);
3042 if (!vid
|| vid
>= VLAN_VID_MASK
) {
3043 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid vlan id %d\n",
3052 static int rtnl_fdb_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3053 struct netlink_ext_ack
*extack
)
3055 struct net
*net
= sock_net(skb
->sk
);
3057 struct nlattr
*tb
[NDA_MAX
+1];
3058 struct net_device
*dev
;
3063 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
, extack
);
3067 ndm
= nlmsg_data(nlh
);
3068 if (ndm
->ndm_ifindex
== 0) {
3069 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
3073 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
3075 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
3079 if (!tb
[NDA_LLADDR
] || nla_len(tb
[NDA_LLADDR
]) != ETH_ALEN
) {
3080 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
3084 addr
= nla_data(tb
[NDA_LLADDR
]);
3086 err
= fdb_vid_parse(tb
[NDA_VLAN
], &vid
);
3092 /* Support fdb on master device the net/bridge default case */
3093 if ((!ndm
->ndm_flags
|| ndm
->ndm_flags
& NTF_MASTER
) &&
3094 (dev
->priv_flags
& IFF_BRIDGE_PORT
)) {
3095 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3096 const struct net_device_ops
*ops
= br_dev
->netdev_ops
;
3098 err
= ops
->ndo_fdb_add(ndm
, tb
, dev
, addr
, vid
,
3103 ndm
->ndm_flags
&= ~NTF_MASTER
;
3106 /* Embedded bridge, macvlan, and any other device support */
3107 if ((ndm
->ndm_flags
& NTF_SELF
)) {
3108 if (dev
->netdev_ops
->ndo_fdb_add
)
3109 err
= dev
->netdev_ops
->ndo_fdb_add(ndm
, tb
, dev
, addr
,
3113 err
= ndo_dflt_fdb_add(ndm
, tb
, dev
, addr
, vid
,
3117 rtnl_fdb_notify(dev
, addr
, vid
, RTM_NEWNEIGH
,
3119 ndm
->ndm_flags
&= ~NTF_SELF
;
3127 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3129 int ndo_dflt_fdb_del(struct ndmsg
*ndm
,
3130 struct nlattr
*tb
[],
3131 struct net_device
*dev
,
3132 const unsigned char *addr
, u16 vid
)
3136 /* If aging addresses are supported device will need to
3137 * implement its own handler for this.
3139 if (!(ndm
->ndm_state
& NUD_PERMANENT
)) {
3140 pr_info("%s: FDB only supports static addresses\n", dev
->name
);
3144 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
3145 err
= dev_uc_del(dev
, addr
);
3146 else if (is_multicast_ether_addr(addr
))
3147 err
= dev_mc_del(dev
, addr
);
3151 EXPORT_SYMBOL(ndo_dflt_fdb_del
);
3153 static int rtnl_fdb_del(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3154 struct netlink_ext_ack
*extack
)
3156 struct net
*net
= sock_net(skb
->sk
);
3158 struct nlattr
*tb
[NDA_MAX
+1];
3159 struct net_device
*dev
;
3164 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
3167 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
, extack
);
3171 ndm
= nlmsg_data(nlh
);
3172 if (ndm
->ndm_ifindex
== 0) {
3173 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
3177 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
3179 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
3183 if (!tb
[NDA_LLADDR
] || nla_len(tb
[NDA_LLADDR
]) != ETH_ALEN
) {
3184 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
3188 addr
= nla_data(tb
[NDA_LLADDR
]);
3190 err
= fdb_vid_parse(tb
[NDA_VLAN
], &vid
);
3196 /* Support fdb on master device the net/bridge default case */
3197 if ((!ndm
->ndm_flags
|| ndm
->ndm_flags
& NTF_MASTER
) &&
3198 (dev
->priv_flags
& IFF_BRIDGE_PORT
)) {
3199 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3200 const struct net_device_ops
*ops
= br_dev
->netdev_ops
;
3202 if (ops
->ndo_fdb_del
)
3203 err
= ops
->ndo_fdb_del(ndm
, tb
, dev
, addr
, vid
);
3208 ndm
->ndm_flags
&= ~NTF_MASTER
;
3211 /* Embedded bridge, macvlan, and any other device support */
3212 if (ndm
->ndm_flags
& NTF_SELF
) {
3213 if (dev
->netdev_ops
->ndo_fdb_del
)
3214 err
= dev
->netdev_ops
->ndo_fdb_del(ndm
, tb
, dev
, addr
,
3217 err
= ndo_dflt_fdb_del(ndm
, tb
, dev
, addr
, vid
);
3220 rtnl_fdb_notify(dev
, addr
, vid
, RTM_DELNEIGH
,
3222 ndm
->ndm_flags
&= ~NTF_SELF
;
3229 static int nlmsg_populate_fdb(struct sk_buff
*skb
,
3230 struct netlink_callback
*cb
,
3231 struct net_device
*dev
,
3233 struct netdev_hw_addr_list
*list
)
3235 struct netdev_hw_addr
*ha
;
3239 portid
= NETLINK_CB(cb
->skb
).portid
;
3240 seq
= cb
->nlh
->nlmsg_seq
;
3242 list_for_each_entry(ha
, &list
->list
, list
) {
3243 if (*idx
< cb
->args
[2])
3246 err
= nlmsg_populate_fdb_fill(skb
, dev
, ha
->addr
, 0,
3248 RTM_NEWNEIGH
, NTF_SELF
,
3249 NLM_F_MULTI
, NUD_PERMANENT
);
3259 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3260 * @nlh: netlink message header
3263 * Default netdevice operation to dump the existing unicast address list.
3264 * Returns number of addresses from list put in skb.
3266 int ndo_dflt_fdb_dump(struct sk_buff
*skb
,
3267 struct netlink_callback
*cb
,
3268 struct net_device
*dev
,
3269 struct net_device
*filter_dev
,
3274 netif_addr_lock_bh(dev
);
3275 err
= nlmsg_populate_fdb(skb
, cb
, dev
, idx
, &dev
->uc
);
3278 err
= nlmsg_populate_fdb(skb
, cb
, dev
, idx
, &dev
->mc
);
3280 netif_addr_unlock_bh(dev
);
3283 EXPORT_SYMBOL(ndo_dflt_fdb_dump
);
3285 static int rtnl_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3287 struct net_device
*dev
;
3288 struct nlattr
*tb
[IFLA_MAX
+1];
3289 struct net_device
*br_dev
= NULL
;
3290 const struct net_device_ops
*ops
= NULL
;
3291 const struct net_device_ops
*cops
= NULL
;
3292 struct ifinfomsg
*ifm
= nlmsg_data(cb
->nlh
);
3293 struct net
*net
= sock_net(skb
->sk
);
3294 struct hlist_head
*head
;
3302 err
= nlmsg_parse(cb
->nlh
, sizeof(struct ifinfomsg
), tb
,
3303 IFLA_MAX
, ifla_policy
, NULL
);
3306 } else if (err
== 0) {
3307 if (tb
[IFLA_MASTER
])
3308 br_idx
= nla_get_u32(tb
[IFLA_MASTER
]);
3311 brport_idx
= ifm
->ifi_index
;
3314 br_dev
= __dev_get_by_index(net
, br_idx
);
3318 ops
= br_dev
->netdev_ops
;
3322 s_idx
= cb
->args
[1];
3324 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
3326 head
= &net
->dev_index_head
[h
];
3327 hlist_for_each_entry(dev
, head
, index_hlist
) {
3329 if (brport_idx
&& (dev
->ifindex
!= brport_idx
))
3332 if (!br_idx
) { /* user did not specify a specific bridge */
3333 if (dev
->priv_flags
& IFF_BRIDGE_PORT
) {
3334 br_dev
= netdev_master_upper_dev_get(dev
);
3335 cops
= br_dev
->netdev_ops
;
3338 if (dev
!= br_dev
&&
3339 !(dev
->priv_flags
& IFF_BRIDGE_PORT
))
3342 if (br_dev
!= netdev_master_upper_dev_get(dev
) &&
3343 !(dev
->priv_flags
& IFF_EBRIDGE
))
3351 if (dev
->priv_flags
& IFF_BRIDGE_PORT
) {
3352 if (cops
&& cops
->ndo_fdb_dump
) {
3353 err
= cops
->ndo_fdb_dump(skb
, cb
,
3356 if (err
== -EMSGSIZE
)
3361 if (dev
->netdev_ops
->ndo_fdb_dump
)
3362 err
= dev
->netdev_ops
->ndo_fdb_dump(skb
, cb
,
3366 err
= ndo_dflt_fdb_dump(skb
, cb
, dev
, NULL
,
3368 if (err
== -EMSGSIZE
)
3373 /* reset fdb offset to 0 for rest of the interfaces */
3389 static int brport_nla_put_flag(struct sk_buff
*skb
, u32 flags
, u32 mask
,
3390 unsigned int attrnum
, unsigned int flag
)
3393 return nla_put_u8(skb
, attrnum
, !!(flags
& flag
));
3397 int ndo_dflt_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
3398 struct net_device
*dev
, u16 mode
,
3399 u32 flags
, u32 mask
, int nlflags
,
3401 int (*vlan_fill
)(struct sk_buff
*skb
,
3402 struct net_device
*dev
,
3405 struct nlmsghdr
*nlh
;
3406 struct ifinfomsg
*ifm
;
3407 struct nlattr
*br_afspec
;
3408 struct nlattr
*protinfo
;
3409 u8 operstate
= netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
;
3410 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3413 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_NEWLINK
, sizeof(*ifm
), nlflags
);
3417 ifm
= nlmsg_data(nlh
);
3418 ifm
->ifi_family
= AF_BRIDGE
;
3420 ifm
->ifi_type
= dev
->type
;
3421 ifm
->ifi_index
= dev
->ifindex
;
3422 ifm
->ifi_flags
= dev_get_flags(dev
);
3423 ifm
->ifi_change
= 0;
3426 if (nla_put_string(skb
, IFLA_IFNAME
, dev
->name
) ||
3427 nla_put_u32(skb
, IFLA_MTU
, dev
->mtu
) ||
3428 nla_put_u8(skb
, IFLA_OPERSTATE
, operstate
) ||
3430 nla_put_u32(skb
, IFLA_MASTER
, br_dev
->ifindex
)) ||
3432 nla_put(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
)) ||
3433 (dev
->ifindex
!= dev_get_iflink(dev
) &&
3434 nla_put_u32(skb
, IFLA_LINK
, dev_get_iflink(dev
))))
3435 goto nla_put_failure
;
3437 br_afspec
= nla_nest_start(skb
, IFLA_AF_SPEC
);
3439 goto nla_put_failure
;
3441 if (nla_put_u16(skb
, IFLA_BRIDGE_FLAGS
, BRIDGE_FLAGS_SELF
)) {
3442 nla_nest_cancel(skb
, br_afspec
);
3443 goto nla_put_failure
;
3446 if (mode
!= BRIDGE_MODE_UNDEF
) {
3447 if (nla_put_u16(skb
, IFLA_BRIDGE_MODE
, mode
)) {
3448 nla_nest_cancel(skb
, br_afspec
);
3449 goto nla_put_failure
;
3453 err
= vlan_fill(skb
, dev
, filter_mask
);
3455 nla_nest_cancel(skb
, br_afspec
);
3456 goto nla_put_failure
;
3459 nla_nest_end(skb
, br_afspec
);
3461 protinfo
= nla_nest_start(skb
, IFLA_PROTINFO
| NLA_F_NESTED
);
3463 goto nla_put_failure
;
3465 if (brport_nla_put_flag(skb
, flags
, mask
,
3466 IFLA_BRPORT_MODE
, BR_HAIRPIN_MODE
) ||
3467 brport_nla_put_flag(skb
, flags
, mask
,
3468 IFLA_BRPORT_GUARD
, BR_BPDU_GUARD
) ||
3469 brport_nla_put_flag(skb
, flags
, mask
,
3470 IFLA_BRPORT_FAST_LEAVE
,
3471 BR_MULTICAST_FAST_LEAVE
) ||
3472 brport_nla_put_flag(skb
, flags
, mask
,
3473 IFLA_BRPORT_PROTECT
, BR_ROOT_BLOCK
) ||
3474 brport_nla_put_flag(skb
, flags
, mask
,
3475 IFLA_BRPORT_LEARNING
, BR_LEARNING
) ||
3476 brport_nla_put_flag(skb
, flags
, mask
,
3477 IFLA_BRPORT_LEARNING_SYNC
, BR_LEARNING_SYNC
) ||
3478 brport_nla_put_flag(skb
, flags
, mask
,
3479 IFLA_BRPORT_UNICAST_FLOOD
, BR_FLOOD
) ||
3480 brport_nla_put_flag(skb
, flags
, mask
,
3481 IFLA_BRPORT_PROXYARP
, BR_PROXYARP
)) {
3482 nla_nest_cancel(skb
, protinfo
);
3483 goto nla_put_failure
;
3486 nla_nest_end(skb
, protinfo
);
3488 nlmsg_end(skb
, nlh
);
3491 nlmsg_cancel(skb
, nlh
);
3492 return err
? err
: -EMSGSIZE
;
3494 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink
);
3496 static int rtnl_bridge_getlink(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3498 struct net
*net
= sock_net(skb
->sk
);
3499 struct net_device
*dev
;
3501 u32 portid
= NETLINK_CB(cb
->skb
).portid
;
3502 u32 seq
= cb
->nlh
->nlmsg_seq
;
3503 u32 filter_mask
= 0;
3506 if (nlmsg_len(cb
->nlh
) > sizeof(struct ifinfomsg
)) {
3507 struct nlattr
*extfilt
;
3509 extfilt
= nlmsg_find_attr(cb
->nlh
, sizeof(struct ifinfomsg
),
3512 if (nla_len(extfilt
) < sizeof(filter_mask
))
3515 filter_mask
= nla_get_u32(extfilt
);
3520 for_each_netdev_rcu(net
, dev
) {
3521 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3522 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3524 if (br_dev
&& br_dev
->netdev_ops
->ndo_bridge_getlink
) {
3525 if (idx
>= cb
->args
[0]) {
3526 err
= br_dev
->netdev_ops
->ndo_bridge_getlink(
3527 skb
, portid
, seq
, dev
,
3528 filter_mask
, NLM_F_MULTI
);
3529 if (err
< 0 && err
!= -EOPNOTSUPP
) {
3530 if (likely(skb
->len
))
3539 if (ops
->ndo_bridge_getlink
) {
3540 if (idx
>= cb
->args
[0]) {
3541 err
= ops
->ndo_bridge_getlink(skb
, portid
,
3545 if (err
< 0 && err
!= -EOPNOTSUPP
) {
3546 if (likely(skb
->len
))
3563 static inline size_t bridge_nlmsg_size(void)
3565 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
3566 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
3567 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
3568 + nla_total_size(sizeof(u32
)) /* IFLA_MASTER */
3569 + nla_total_size(sizeof(u32
)) /* IFLA_MTU */
3570 + nla_total_size(sizeof(u32
)) /* IFLA_LINK */
3571 + nla_total_size(sizeof(u32
)) /* IFLA_OPERSTATE */
3572 + nla_total_size(sizeof(u8
)) /* IFLA_PROTINFO */
3573 + nla_total_size(sizeof(struct nlattr
)) /* IFLA_AF_SPEC */
3574 + nla_total_size(sizeof(u16
)) /* IFLA_BRIDGE_FLAGS */
3575 + nla_total_size(sizeof(u16
)); /* IFLA_BRIDGE_MODE */
3578 static int rtnl_bridge_notify(struct net_device
*dev
)
3580 struct net
*net
= dev_net(dev
);
3581 struct sk_buff
*skb
;
3582 int err
= -EOPNOTSUPP
;
3584 if (!dev
->netdev_ops
->ndo_bridge_getlink
)
3587 skb
= nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC
);
3593 err
= dev
->netdev_ops
->ndo_bridge_getlink(skb
, 0, 0, dev
, 0, 0);
3600 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, GFP_ATOMIC
);
3603 WARN_ON(err
== -EMSGSIZE
);
3606 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
3610 static int rtnl_bridge_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3611 struct netlink_ext_ack
*extack
)
3613 struct net
*net
= sock_net(skb
->sk
);
3614 struct ifinfomsg
*ifm
;
3615 struct net_device
*dev
;
3616 struct nlattr
*br_spec
, *attr
= NULL
;
3617 int rem
, err
= -EOPNOTSUPP
;
3619 bool have_flags
= false;
3621 if (nlmsg_len(nlh
) < sizeof(*ifm
))
3624 ifm
= nlmsg_data(nlh
);
3625 if (ifm
->ifi_family
!= AF_BRIDGE
)
3626 return -EPFNOSUPPORT
;
3628 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
3630 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3634 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
3636 nla_for_each_nested(attr
, br_spec
, rem
) {
3637 if (nla_type(attr
) == IFLA_BRIDGE_FLAGS
) {
3638 if (nla_len(attr
) < sizeof(flags
))
3642 flags
= nla_get_u16(attr
);
3648 if (!flags
|| (flags
& BRIDGE_FLAGS_MASTER
)) {
3649 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3651 if (!br_dev
|| !br_dev
->netdev_ops
->ndo_bridge_setlink
) {
3656 err
= br_dev
->netdev_ops
->ndo_bridge_setlink(dev
, nlh
, flags
);
3660 flags
&= ~BRIDGE_FLAGS_MASTER
;
3663 if ((flags
& BRIDGE_FLAGS_SELF
)) {
3664 if (!dev
->netdev_ops
->ndo_bridge_setlink
)
3667 err
= dev
->netdev_ops
->ndo_bridge_setlink(dev
, nlh
,
3670 flags
&= ~BRIDGE_FLAGS_SELF
;
3672 /* Generate event to notify upper layer of bridge
3675 err
= rtnl_bridge_notify(dev
);
3680 memcpy(nla_data(attr
), &flags
, sizeof(flags
));
3685 static int rtnl_bridge_dellink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3686 struct netlink_ext_ack
*extack
)
3688 struct net
*net
= sock_net(skb
->sk
);
3689 struct ifinfomsg
*ifm
;
3690 struct net_device
*dev
;
3691 struct nlattr
*br_spec
, *attr
= NULL
;
3692 int rem
, err
= -EOPNOTSUPP
;
3694 bool have_flags
= false;
3696 if (nlmsg_len(nlh
) < sizeof(*ifm
))
3699 ifm
= nlmsg_data(nlh
);
3700 if (ifm
->ifi_family
!= AF_BRIDGE
)
3701 return -EPFNOSUPPORT
;
3703 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
3705 pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
3709 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
3711 nla_for_each_nested(attr
, br_spec
, rem
) {
3712 if (nla_type(attr
) == IFLA_BRIDGE_FLAGS
) {
3713 if (nla_len(attr
) < sizeof(flags
))
3717 flags
= nla_get_u16(attr
);
3723 if (!flags
|| (flags
& BRIDGE_FLAGS_MASTER
)) {
3724 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3726 if (!br_dev
|| !br_dev
->netdev_ops
->ndo_bridge_dellink
) {
3731 err
= br_dev
->netdev_ops
->ndo_bridge_dellink(dev
, nlh
, flags
);
3735 flags
&= ~BRIDGE_FLAGS_MASTER
;
3738 if ((flags
& BRIDGE_FLAGS_SELF
)) {
3739 if (!dev
->netdev_ops
->ndo_bridge_dellink
)
3742 err
= dev
->netdev_ops
->ndo_bridge_dellink(dev
, nlh
,
3746 flags
&= ~BRIDGE_FLAGS_SELF
;
3748 /* Generate event to notify upper layer of bridge
3751 err
= rtnl_bridge_notify(dev
);
3756 memcpy(nla_data(attr
), &flags
, sizeof(flags
));
3761 static bool stats_attr_valid(unsigned int mask
, int attrid
, int idxattr
)
3763 return (mask
& IFLA_STATS_FILTER_BIT(attrid
)) &&
3764 (!idxattr
|| idxattr
== attrid
);
3767 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
3768 static int rtnl_get_offload_stats_attr_size(int attr_id
)
3771 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
3772 return sizeof(struct rtnl_link_stats64
);
3778 static int rtnl_get_offload_stats(struct sk_buff
*skb
, struct net_device
*dev
,
3781 struct nlattr
*attr
= NULL
;
3786 if (!(dev
->netdev_ops
&& dev
->netdev_ops
->ndo_has_offload_stats
&&
3787 dev
->netdev_ops
->ndo_get_offload_stats
))
3790 for (attr_id
= IFLA_OFFLOAD_XSTATS_FIRST
;
3791 attr_id
<= IFLA_OFFLOAD_XSTATS_MAX
; attr_id
++) {
3792 if (attr_id
< *prividx
)
3795 size
= rtnl_get_offload_stats_attr_size(attr_id
);
3799 if (!dev
->netdev_ops
->ndo_has_offload_stats(dev
, attr_id
))
3802 attr
= nla_reserve_64bit(skb
, attr_id
, size
,
3803 IFLA_OFFLOAD_XSTATS_UNSPEC
);
3805 goto nla_put_failure
;
3807 attr_data
= nla_data(attr
);
3808 memset(attr_data
, 0, size
);
3809 err
= dev
->netdev_ops
->ndo_get_offload_stats(attr_id
, dev
,
3812 goto get_offload_stats_failure
;
3823 get_offload_stats_failure
:
3828 static int rtnl_get_offload_stats_size(const struct net_device
*dev
)
3834 if (!(dev
->netdev_ops
&& dev
->netdev_ops
->ndo_has_offload_stats
&&
3835 dev
->netdev_ops
->ndo_get_offload_stats
))
3838 for (attr_id
= IFLA_OFFLOAD_XSTATS_FIRST
;
3839 attr_id
<= IFLA_OFFLOAD_XSTATS_MAX
; attr_id
++) {
3840 if (!dev
->netdev_ops
->ndo_has_offload_stats(dev
, attr_id
))
3842 size
= rtnl_get_offload_stats_attr_size(attr_id
);
3843 nla_size
+= nla_total_size_64bit(size
);
3847 nla_size
+= nla_total_size(0);
3852 static int rtnl_fill_statsinfo(struct sk_buff
*skb
, struct net_device
*dev
,
3853 int type
, u32 pid
, u32 seq
, u32 change
,
3854 unsigned int flags
, unsigned int filter_mask
,
3855 int *idxattr
, int *prividx
)
3857 struct if_stats_msg
*ifsm
;
3858 struct nlmsghdr
*nlh
;
3859 struct nlattr
*attr
;
3860 int s_prividx
= *prividx
;
3865 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ifsm
), flags
);
3869 ifsm
= nlmsg_data(nlh
);
3870 ifsm
->ifindex
= dev
->ifindex
;
3871 ifsm
->filter_mask
= filter_mask
;
3873 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_64
, *idxattr
)) {
3874 struct rtnl_link_stats64
*sp
;
3876 attr
= nla_reserve_64bit(skb
, IFLA_STATS_LINK_64
,
3877 sizeof(struct rtnl_link_stats64
),
3880 goto nla_put_failure
;
3882 sp
= nla_data(attr
);
3883 dev_get_stats(dev
, sp
);
3886 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS
, *idxattr
)) {
3887 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
3889 if (ops
&& ops
->fill_linkxstats
) {
3890 *idxattr
= IFLA_STATS_LINK_XSTATS
;
3891 attr
= nla_nest_start(skb
,
3892 IFLA_STATS_LINK_XSTATS
);
3894 goto nla_put_failure
;
3896 err
= ops
->fill_linkxstats(skb
, dev
, prividx
, *idxattr
);
3897 nla_nest_end(skb
, attr
);
3899 goto nla_put_failure
;
3904 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS_SLAVE
,
3906 const struct rtnl_link_ops
*ops
= NULL
;
3907 const struct net_device
*master
;
3909 master
= netdev_master_upper_dev_get(dev
);
3911 ops
= master
->rtnl_link_ops
;
3912 if (ops
&& ops
->fill_linkxstats
) {
3913 *idxattr
= IFLA_STATS_LINK_XSTATS_SLAVE
;
3914 attr
= nla_nest_start(skb
,
3915 IFLA_STATS_LINK_XSTATS_SLAVE
);
3917 goto nla_put_failure
;
3919 err
= ops
->fill_linkxstats(skb
, dev
, prividx
, *idxattr
);
3920 nla_nest_end(skb
, attr
);
3922 goto nla_put_failure
;
3927 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_OFFLOAD_XSTATS
,
3929 *idxattr
= IFLA_STATS_LINK_OFFLOAD_XSTATS
;
3930 attr
= nla_nest_start(skb
, IFLA_STATS_LINK_OFFLOAD_XSTATS
);
3932 goto nla_put_failure
;
3934 err
= rtnl_get_offload_stats(skb
, dev
, prividx
);
3935 if (err
== -ENODATA
)
3936 nla_nest_cancel(skb
, attr
);
3938 nla_nest_end(skb
, attr
);
3940 if (err
&& err
!= -ENODATA
)
3941 goto nla_put_failure
;
3945 if (stats_attr_valid(filter_mask
, IFLA_STATS_AF_SPEC
, *idxattr
)) {
3946 struct rtnl_af_ops
*af_ops
;
3948 *idxattr
= IFLA_STATS_AF_SPEC
;
3949 attr
= nla_nest_start(skb
, IFLA_STATS_AF_SPEC
);
3951 goto nla_put_failure
;
3953 list_for_each_entry(af_ops
, &rtnl_af_ops
, list
) {
3954 if (af_ops
->fill_stats_af
) {
3958 af
= nla_nest_start(skb
, af_ops
->family
);
3960 goto nla_put_failure
;
3962 err
= af_ops
->fill_stats_af(skb
, dev
);
3964 if (err
== -ENODATA
)
3965 nla_nest_cancel(skb
, af
);
3967 goto nla_put_failure
;
3969 nla_nest_end(skb
, af
);
3973 nla_nest_end(skb
, attr
);
3978 nlmsg_end(skb
, nlh
);
3983 /* not a multi message or no progress mean a real error */
3984 if (!(flags
& NLM_F_MULTI
) || s_prividx
== *prividx
)
3985 nlmsg_cancel(skb
, nlh
);
3987 nlmsg_end(skb
, nlh
);
3992 static size_t if_nlmsg_stats_size(const struct net_device
*dev
,
3997 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_64
, 0))
3998 size
+= nla_total_size_64bit(sizeof(struct rtnl_link_stats64
));
4000 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS
, 0)) {
4001 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
4002 int attr
= IFLA_STATS_LINK_XSTATS
;
4004 if (ops
&& ops
->get_linkxstats_size
) {
4005 size
+= nla_total_size(ops
->get_linkxstats_size(dev
,
4007 /* for IFLA_STATS_LINK_XSTATS */
4008 size
+= nla_total_size(0);
4012 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS_SLAVE
, 0)) {
4013 struct net_device
*_dev
= (struct net_device
*)dev
;
4014 const struct rtnl_link_ops
*ops
= NULL
;
4015 const struct net_device
*master
;
4017 /* netdev_master_upper_dev_get can't take const */
4018 master
= netdev_master_upper_dev_get(_dev
);
4020 ops
= master
->rtnl_link_ops
;
4021 if (ops
&& ops
->get_linkxstats_size
) {
4022 int attr
= IFLA_STATS_LINK_XSTATS_SLAVE
;
4024 size
+= nla_total_size(ops
->get_linkxstats_size(dev
,
4026 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4027 size
+= nla_total_size(0);
4031 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_OFFLOAD_XSTATS
, 0))
4032 size
+= rtnl_get_offload_stats_size(dev
);
4034 if (stats_attr_valid(filter_mask
, IFLA_STATS_AF_SPEC
, 0)) {
4035 struct rtnl_af_ops
*af_ops
;
4037 /* for IFLA_STATS_AF_SPEC */
4038 size
+= nla_total_size(0);
4040 list_for_each_entry(af_ops
, &rtnl_af_ops
, list
) {
4041 if (af_ops
->get_stats_af_size
) {
4042 size
+= nla_total_size(
4043 af_ops
->get_stats_af_size(dev
));
4046 size
+= nla_total_size(0);
4054 static int rtnl_stats_get(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
4055 struct netlink_ext_ack
*extack
)
4057 struct net
*net
= sock_net(skb
->sk
);
4058 struct net_device
*dev
= NULL
;
4059 int idxattr
= 0, prividx
= 0;
4060 struct if_stats_msg
*ifsm
;
4061 struct sk_buff
*nskb
;
4065 if (nlmsg_len(nlh
) < sizeof(*ifsm
))
4068 ifsm
= nlmsg_data(nlh
);
4069 if (ifsm
->ifindex
> 0)
4070 dev
= __dev_get_by_index(net
, ifsm
->ifindex
);
4077 filter_mask
= ifsm
->filter_mask
;
4081 nskb
= nlmsg_new(if_nlmsg_stats_size(dev
, filter_mask
), GFP_KERNEL
);
4085 err
= rtnl_fill_statsinfo(nskb
, dev
, RTM_NEWSTATS
,
4086 NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
4087 0, filter_mask
, &idxattr
, &prividx
);
4089 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4090 WARN_ON(err
== -EMSGSIZE
);
4093 err
= rtnl_unicast(nskb
, net
, NETLINK_CB(skb
).portid
);
4099 static int rtnl_stats_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4101 int h
, s_h
, err
, s_idx
, s_idxattr
, s_prividx
;
4102 struct net
*net
= sock_net(skb
->sk
);
4103 unsigned int flags
= NLM_F_MULTI
;
4104 struct if_stats_msg
*ifsm
;
4105 struct hlist_head
*head
;
4106 struct net_device
*dev
;
4107 u32 filter_mask
= 0;
4111 s_idx
= cb
->args
[1];
4112 s_idxattr
= cb
->args
[2];
4113 s_prividx
= cb
->args
[3];
4115 cb
->seq
= net
->dev_base_seq
;
4117 if (nlmsg_len(cb
->nlh
) < sizeof(*ifsm
))
4120 ifsm
= nlmsg_data(cb
->nlh
);
4121 filter_mask
= ifsm
->filter_mask
;
4125 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
4127 head
= &net
->dev_index_head
[h
];
4128 hlist_for_each_entry(dev
, head
, index_hlist
) {
4131 err
= rtnl_fill_statsinfo(skb
, dev
, RTM_NEWSTATS
,
4132 NETLINK_CB(cb
->skb
).portid
,
4133 cb
->nlh
->nlmsg_seq
, 0,
4135 &s_idxattr
, &s_prividx
);
4136 /* If we ran out of room on the first message,
4139 WARN_ON((err
== -EMSGSIZE
) && (skb
->len
== 0));
4145 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
4151 cb
->args
[3] = s_prividx
;
4152 cb
->args
[2] = s_idxattr
;
4159 /* Process one rtnetlink message. */
4161 static int rtnetlink_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
4162 struct netlink_ext_ack
*extack
)
4164 struct net
*net
= sock_net(skb
->sk
);
4165 rtnl_doit_func doit
;
4171 type
= nlh
->nlmsg_type
;
4177 /* All the messages must have at least 1 byte length */
4178 if (nlmsg_len(nlh
) < sizeof(struct rtgenmsg
))
4181 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
4184 if (kind
!= 2 && !netlink_net_capable(skb
, CAP_NET_ADMIN
))
4187 if (kind
== 2 && nlh
->nlmsg_flags
&NLM_F_DUMP
) {
4189 rtnl_dumpit_func dumpit
;
4190 rtnl_calcit_func calcit
;
4191 u16 min_dump_alloc
= 0;
4193 dumpit
= rtnl_get_dumpit(family
, type
);
4196 calcit
= rtnl_get_calcit(family
, type
);
4198 min_dump_alloc
= calcit(skb
, nlh
);
4203 struct netlink_dump_control c
= {
4205 .min_dump_alloc
= min_dump_alloc
,
4207 err
= netlink_dump_start(rtnl
, skb
, nlh
, &c
);
4213 doit
= rtnl_get_doit(family
, type
);
4217 return doit(skb
, nlh
, extack
);
4220 static void rtnetlink_rcv(struct sk_buff
*skb
)
4223 netlink_rcv_skb(skb
, &rtnetlink_rcv_msg
);
4227 static int rtnetlink_bind(struct net
*net
, int group
)
4230 case RTNLGRP_IPV4_MROUTE_R
:
4231 case RTNLGRP_IPV6_MROUTE_R
:
4232 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
4239 static int rtnetlink_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
4241 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4245 case NETDEV_CHANGEADDR
:
4246 case NETDEV_CHANGENAME
:
4247 case NETDEV_FEAT_CHANGE
:
4248 case NETDEV_BONDING_FAILOVER
:
4249 case NETDEV_NOTIFY_PEERS
:
4250 case NETDEV_RESEND_IGMP
:
4251 case NETDEV_CHANGEINFODATA
:
4252 rtmsg_ifinfo_event(RTM_NEWLINK
, dev
, 0, rtnl_get_event(event
),
4261 static struct notifier_block rtnetlink_dev_notifier
= {
4262 .notifier_call
= rtnetlink_event
,
4266 static int __net_init
rtnetlink_net_init(struct net
*net
)
4269 struct netlink_kernel_cfg cfg
= {
4270 .groups
= RTNLGRP_MAX
,
4271 .input
= rtnetlink_rcv
,
4272 .cb_mutex
= &rtnl_mutex
,
4273 .flags
= NL_CFG_F_NONROOT_RECV
,
4274 .bind
= rtnetlink_bind
,
4277 sk
= netlink_kernel_create(net
, NETLINK_ROUTE
, &cfg
);
4284 static void __net_exit
rtnetlink_net_exit(struct net
*net
)
4286 netlink_kernel_release(net
->rtnl
);
4290 static struct pernet_operations rtnetlink_net_ops
= {
4291 .init
= rtnetlink_net_init
,
4292 .exit
= rtnetlink_net_exit
,
4295 void __init
rtnetlink_init(void)
4297 if (register_pernet_subsys(&rtnetlink_net_ops
))
4298 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4300 register_netdevice_notifier(&rtnetlink_dev_notifier
);
4302 rtnl_register(PF_UNSPEC
, RTM_GETLINK
, rtnl_getlink
,
4303 rtnl_dump_ifinfo
, rtnl_calcit
);
4304 rtnl_register(PF_UNSPEC
, RTM_SETLINK
, rtnl_setlink
, NULL
, NULL
);
4305 rtnl_register(PF_UNSPEC
, RTM_NEWLINK
, rtnl_newlink
, NULL
, NULL
);
4306 rtnl_register(PF_UNSPEC
, RTM_DELLINK
, rtnl_dellink
, NULL
, NULL
);
4308 rtnl_register(PF_UNSPEC
, RTM_GETADDR
, NULL
, rtnl_dump_all
, NULL
);
4309 rtnl_register(PF_UNSPEC
, RTM_GETROUTE
, NULL
, rtnl_dump_all
, NULL
);
4310 rtnl_register(PF_UNSPEC
, RTM_GETNETCONF
, NULL
, rtnl_dump_all
, NULL
);
4312 rtnl_register(PF_BRIDGE
, RTM_NEWNEIGH
, rtnl_fdb_add
, NULL
, NULL
);
4313 rtnl_register(PF_BRIDGE
, RTM_DELNEIGH
, rtnl_fdb_del
, NULL
, NULL
);
4314 rtnl_register(PF_BRIDGE
, RTM_GETNEIGH
, NULL
, rtnl_fdb_dump
, NULL
);
4316 rtnl_register(PF_BRIDGE
, RTM_GETLINK
, NULL
, rtnl_bridge_getlink
, NULL
);
4317 rtnl_register(PF_BRIDGE
, RTM_DELLINK
, rtnl_bridge_dellink
, NULL
, NULL
);
4318 rtnl_register(PF_BRIDGE
, RTM_SETLINK
, rtnl_bridge_setlink
, NULL
, NULL
);
4320 rtnl_register(PF_UNSPEC
, RTM_GETSTATS
, rtnl_stats_get
, rtnl_stats_dump
,