2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Routing netlink socket interface: protocol independent part.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
19 #include <linux/bitops.h>
20 #include <linux/errno.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/string.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/fcntl.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/capability.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/security.h>
37 #include <linux/mutex.h>
38 #include <linux/if_addr.h>
39 #include <linux/if_bridge.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/etherdevice.h>
43 #include <linux/bpf.h>
45 #include <linux/uaccess.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <net/switchdev.h>
51 #include <net/protocol.h>
53 #include <net/route.h>
57 #include <net/pkt_sched.h>
58 #include <net/fib_rules.h>
59 #include <net/rtnetlink.h>
60 #include <net/net_namespace.h>
64 rtnl_dumpit_func dumpit
;
68 static DEFINE_MUTEX(rtnl_mutex
);
72 mutex_lock(&rtnl_mutex
);
74 EXPORT_SYMBOL(rtnl_lock
);
76 static struct sk_buff
*defer_kfree_skb_list
;
77 void rtnl_kfree_skbs(struct sk_buff
*head
, struct sk_buff
*tail
)
80 tail
->next
= defer_kfree_skb_list
;
81 defer_kfree_skb_list
= head
;
84 EXPORT_SYMBOL(rtnl_kfree_skbs
);
86 void __rtnl_unlock(void)
88 struct sk_buff
*head
= defer_kfree_skb_list
;
90 defer_kfree_skb_list
= NULL
;
92 mutex_unlock(&rtnl_mutex
);
95 struct sk_buff
*next
= head
->next
;
103 void rtnl_unlock(void)
105 /* This fellow will unlock it for us. */
108 EXPORT_SYMBOL(rtnl_unlock
);
110 int rtnl_trylock(void)
112 return mutex_trylock(&rtnl_mutex
);
114 EXPORT_SYMBOL(rtnl_trylock
);
116 int rtnl_is_locked(void)
118 return mutex_is_locked(&rtnl_mutex
);
120 EXPORT_SYMBOL(rtnl_is_locked
);
122 #ifdef CONFIG_PROVE_LOCKING
123 bool lockdep_rtnl_is_held(void)
125 return lockdep_is_held(&rtnl_mutex
);
127 EXPORT_SYMBOL(lockdep_rtnl_is_held
);
128 #endif /* #ifdef CONFIG_PROVE_LOCKING */
130 static struct rtnl_link __rcu
*rtnl_msg_handlers
[RTNL_FAMILY_MAX
+ 1];
131 static refcount_t rtnl_msg_handlers_ref
[RTNL_FAMILY_MAX
+ 1];
133 static inline int rtm_msgindex(int msgtype
)
135 int msgindex
= msgtype
- RTM_BASE
;
138 * msgindex < 0 implies someone tried to register a netlink
139 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
140 * the message type has not been added to linux/rtnetlink.h
142 BUG_ON(msgindex
< 0 || msgindex
>= RTM_NR_MSGTYPES
);
148 * __rtnl_register - Register a rtnetlink message type
149 * @protocol: Protocol family or PF_UNSPEC
150 * @msgtype: rtnetlink message type
151 * @doit: Function pointer called for each request message
152 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
153 * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
155 * Registers the specified function pointers (at least one of them has
156 * to be non-NULL) to be called whenever a request message for the
157 * specified protocol family and message type is received.
159 * The special protocol family PF_UNSPEC may be used to define fallback
160 * function pointers for the case when no entry for the specific protocol
163 * Returns 0 on success or a negative error code.
165 int __rtnl_register(int protocol
, int msgtype
,
166 rtnl_doit_func doit
, rtnl_dumpit_func dumpit
,
169 struct rtnl_link
*tab
;
172 BUG_ON(protocol
< 0 || protocol
> RTNL_FAMILY_MAX
);
173 msgindex
= rtm_msgindex(msgtype
);
175 tab
= rcu_dereference_raw(rtnl_msg_handlers
[protocol
]);
177 tab
= kcalloc(RTM_NR_MSGTYPES
, sizeof(*tab
), GFP_KERNEL
);
181 rcu_assign_pointer(rtnl_msg_handlers
[protocol
], tab
);
185 tab
[msgindex
].doit
= doit
;
187 tab
[msgindex
].dumpit
= dumpit
;
188 tab
[msgindex
].flags
|= flags
;
192 EXPORT_SYMBOL_GPL(__rtnl_register
);
195 * rtnl_register - Register a rtnetlink message type
197 * Identical to __rtnl_register() but panics on failure. This is useful
198 * as failure of this function is very unlikely, it can only happen due
199 * to lack of memory when allocating the chain to store all message
200 * handlers for a protocol. Meant for use in init functions where lack
201 * of memory implies no sense in continuing.
203 void rtnl_register(int protocol
, int msgtype
,
204 rtnl_doit_func doit
, rtnl_dumpit_func dumpit
,
207 if (__rtnl_register(protocol
, msgtype
, doit
, dumpit
, flags
) < 0)
208 panic("Unable to register rtnetlink message handler, "
209 "protocol = %d, message type = %d\n",
212 EXPORT_SYMBOL_GPL(rtnl_register
);
215 * rtnl_unregister - Unregister a rtnetlink message type
216 * @protocol: Protocol family or PF_UNSPEC
217 * @msgtype: rtnetlink message type
219 * Returns 0 on success or a negative error code.
221 int rtnl_unregister(int protocol
, int msgtype
)
223 struct rtnl_link
*handlers
;
226 BUG_ON(protocol
< 0 || protocol
> RTNL_FAMILY_MAX
);
227 msgindex
= rtm_msgindex(msgtype
);
230 handlers
= rtnl_dereference(rtnl_msg_handlers
[protocol
]);
236 handlers
[msgindex
].doit
= NULL
;
237 handlers
[msgindex
].dumpit
= NULL
;
238 handlers
[msgindex
].flags
= 0;
243 EXPORT_SYMBOL_GPL(rtnl_unregister
);
246 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
247 * @protocol : Protocol family or PF_UNSPEC
249 * Identical to calling rtnl_unregster() for all registered message types
250 * of a certain protocol family.
252 void rtnl_unregister_all(int protocol
)
254 struct rtnl_link
*handlers
;
256 BUG_ON(protocol
< 0 || protocol
> RTNL_FAMILY_MAX
);
259 handlers
= rtnl_dereference(rtnl_msg_handlers
[protocol
]);
260 RCU_INIT_POINTER(rtnl_msg_handlers
[protocol
], NULL
);
265 while (refcount_read(&rtnl_msg_handlers_ref
[protocol
]) > 1)
269 EXPORT_SYMBOL_GPL(rtnl_unregister_all
);
271 static LIST_HEAD(link_ops
);
273 static const struct rtnl_link_ops
*rtnl_link_ops_get(const char *kind
)
275 const struct rtnl_link_ops
*ops
;
277 list_for_each_entry(ops
, &link_ops
, list
) {
278 if (!strcmp(ops
->kind
, kind
))
285 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
286 * @ops: struct rtnl_link_ops * to register
288 * The caller must hold the rtnl_mutex. This function should be used
289 * by drivers that create devices during module initialization. It
290 * must be called before registering the devices.
292 * Returns 0 on success or a negative error code.
294 int __rtnl_link_register(struct rtnl_link_ops
*ops
)
296 if (rtnl_link_ops_get(ops
->kind
))
299 /* The check for setup is here because if ops
300 * does not have that filled up, it is not possible
301 * to use the ops for creating device. So do not
302 * fill up dellink as well. That disables rtnl_dellink.
304 if (ops
->setup
&& !ops
->dellink
)
305 ops
->dellink
= unregister_netdevice_queue
;
307 list_add_tail(&ops
->list
, &link_ops
);
310 EXPORT_SYMBOL_GPL(__rtnl_link_register
);
313 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
314 * @ops: struct rtnl_link_ops * to register
316 * Returns 0 on success or a negative error code.
318 int rtnl_link_register(struct rtnl_link_ops
*ops
)
323 err
= __rtnl_link_register(ops
);
327 EXPORT_SYMBOL_GPL(rtnl_link_register
);
329 static void __rtnl_kill_links(struct net
*net
, struct rtnl_link_ops
*ops
)
331 struct net_device
*dev
;
332 LIST_HEAD(list_kill
);
334 for_each_netdev(net
, dev
) {
335 if (dev
->rtnl_link_ops
== ops
)
336 ops
->dellink(dev
, &list_kill
);
338 unregister_netdevice_many(&list_kill
);
342 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
343 * @ops: struct rtnl_link_ops * to unregister
345 * The caller must hold the rtnl_mutex.
347 void __rtnl_link_unregister(struct rtnl_link_ops
*ops
)
352 __rtnl_kill_links(net
, ops
);
354 list_del(&ops
->list
);
356 EXPORT_SYMBOL_GPL(__rtnl_link_unregister
);
358 /* Return with the rtnl_lock held when there are no network
359 * devices unregistering in any network namespace.
361 static void rtnl_lock_unregistering_all(void)
365 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
367 add_wait_queue(&netdev_unregistering_wq
, &wait
);
369 unregistering
= false;
372 if (net
->dev_unreg_count
> 0) {
373 unregistering
= true;
381 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
383 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
387 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
388 * @ops: struct rtnl_link_ops * to unregister
390 void rtnl_link_unregister(struct rtnl_link_ops
*ops
)
392 /* Close the race with cleanup_net() */
393 mutex_lock(&net_mutex
);
394 rtnl_lock_unregistering_all();
395 __rtnl_link_unregister(ops
);
397 mutex_unlock(&net_mutex
);
399 EXPORT_SYMBOL_GPL(rtnl_link_unregister
);
401 static size_t rtnl_link_get_slave_info_data_size(const struct net_device
*dev
)
403 struct net_device
*master_dev
;
404 const struct rtnl_link_ops
*ops
;
409 master_dev
= netdev_master_upper_dev_get_rcu((struct net_device
*)dev
);
413 ops
= master_dev
->rtnl_link_ops
;
414 if (!ops
|| !ops
->get_slave_size
)
416 /* IFLA_INFO_SLAVE_DATA + nested data */
417 size
= nla_total_size(sizeof(struct nlattr
)) +
418 ops
->get_slave_size(master_dev
, dev
);
425 static size_t rtnl_link_get_size(const struct net_device
*dev
)
427 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
433 size
= nla_total_size(sizeof(struct nlattr
)) + /* IFLA_LINKINFO */
434 nla_total_size(strlen(ops
->kind
) + 1); /* IFLA_INFO_KIND */
437 /* IFLA_INFO_DATA + nested data */
438 size
+= nla_total_size(sizeof(struct nlattr
)) +
441 if (ops
->get_xstats_size
)
442 /* IFLA_INFO_XSTATS */
443 size
+= nla_total_size(ops
->get_xstats_size(dev
));
445 size
+= rtnl_link_get_slave_info_data_size(dev
);
450 static LIST_HEAD(rtnl_af_ops
);
452 static const struct rtnl_af_ops
*rtnl_af_lookup(const int family
)
454 const struct rtnl_af_ops
*ops
;
456 list_for_each_entry_rcu(ops
, &rtnl_af_ops
, list
) {
457 if (ops
->family
== family
)
465 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
466 * @ops: struct rtnl_af_ops * to register
468 * Returns 0 on success or a negative error code.
470 void rtnl_af_register(struct rtnl_af_ops
*ops
)
473 list_add_tail_rcu(&ops
->list
, &rtnl_af_ops
);
476 EXPORT_SYMBOL_GPL(rtnl_af_register
);
479 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
480 * @ops: struct rtnl_af_ops * to unregister
482 void rtnl_af_unregister(struct rtnl_af_ops
*ops
)
485 list_del_rcu(&ops
->list
);
490 EXPORT_SYMBOL_GPL(rtnl_af_unregister
);
492 static size_t rtnl_link_get_af_size(const struct net_device
*dev
,
495 struct rtnl_af_ops
*af_ops
;
499 size
= nla_total_size(sizeof(struct nlattr
));
502 list_for_each_entry_rcu(af_ops
, &rtnl_af_ops
, list
) {
503 if (af_ops
->get_link_af_size
) {
504 /* AF_* + nested data */
505 size
+= nla_total_size(sizeof(struct nlattr
)) +
506 af_ops
->get_link_af_size(dev
, ext_filter_mask
);
514 static bool rtnl_have_link_slave_info(const struct net_device
*dev
)
516 struct net_device
*master_dev
;
521 master_dev
= netdev_master_upper_dev_get_rcu((struct net_device
*)dev
);
522 if (master_dev
&& master_dev
->rtnl_link_ops
)
528 static int rtnl_link_slave_info_fill(struct sk_buff
*skb
,
529 const struct net_device
*dev
)
531 struct net_device
*master_dev
;
532 const struct rtnl_link_ops
*ops
;
533 struct nlattr
*slave_data
;
536 master_dev
= netdev_master_upper_dev_get((struct net_device
*) dev
);
539 ops
= master_dev
->rtnl_link_ops
;
542 if (nla_put_string(skb
, IFLA_INFO_SLAVE_KIND
, ops
->kind
) < 0)
544 if (ops
->fill_slave_info
) {
545 slave_data
= nla_nest_start(skb
, IFLA_INFO_SLAVE_DATA
);
548 err
= ops
->fill_slave_info(skb
, master_dev
, dev
);
550 goto err_cancel_slave_data
;
551 nla_nest_end(skb
, slave_data
);
555 err_cancel_slave_data
:
556 nla_nest_cancel(skb
, slave_data
);
560 static int rtnl_link_info_fill(struct sk_buff
*skb
,
561 const struct net_device
*dev
)
563 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
569 if (nla_put_string(skb
, IFLA_INFO_KIND
, ops
->kind
) < 0)
571 if (ops
->fill_xstats
) {
572 err
= ops
->fill_xstats(skb
, dev
);
576 if (ops
->fill_info
) {
577 data
= nla_nest_start(skb
, IFLA_INFO_DATA
);
580 err
= ops
->fill_info(skb
, dev
);
582 goto err_cancel_data
;
583 nla_nest_end(skb
, data
);
588 nla_nest_cancel(skb
, data
);
592 static int rtnl_link_fill(struct sk_buff
*skb
, const struct net_device
*dev
)
594 struct nlattr
*linkinfo
;
597 linkinfo
= nla_nest_start(skb
, IFLA_LINKINFO
);
598 if (linkinfo
== NULL
)
601 err
= rtnl_link_info_fill(skb
, dev
);
603 goto err_cancel_link
;
605 err
= rtnl_link_slave_info_fill(skb
, dev
);
607 goto err_cancel_link
;
609 nla_nest_end(skb
, linkinfo
);
613 nla_nest_cancel(skb
, linkinfo
);
618 int rtnetlink_send(struct sk_buff
*skb
, struct net
*net
, u32 pid
, unsigned int group
, int echo
)
620 struct sock
*rtnl
= net
->rtnl
;
623 NETLINK_CB(skb
).dst_group
= group
;
625 refcount_inc(&skb
->users
);
626 netlink_broadcast(rtnl
, skb
, pid
, group
, GFP_KERNEL
);
628 err
= netlink_unicast(rtnl
, skb
, pid
, MSG_DONTWAIT
);
632 int rtnl_unicast(struct sk_buff
*skb
, struct net
*net
, u32 pid
)
634 struct sock
*rtnl
= net
->rtnl
;
636 return nlmsg_unicast(rtnl
, skb
, pid
);
638 EXPORT_SYMBOL(rtnl_unicast
);
640 void rtnl_notify(struct sk_buff
*skb
, struct net
*net
, u32 pid
, u32 group
,
641 struct nlmsghdr
*nlh
, gfp_t flags
)
643 struct sock
*rtnl
= net
->rtnl
;
647 report
= nlmsg_report(nlh
);
649 nlmsg_notify(rtnl
, skb
, pid
, group
, report
, flags
);
651 EXPORT_SYMBOL(rtnl_notify
);
653 void rtnl_set_sk_err(struct net
*net
, u32 group
, int error
)
655 struct sock
*rtnl
= net
->rtnl
;
657 netlink_set_err(rtnl
, 0, group
, error
);
659 EXPORT_SYMBOL(rtnl_set_sk_err
);
661 int rtnetlink_put_metrics(struct sk_buff
*skb
, u32
*metrics
)
666 mx
= nla_nest_start(skb
, RTA_METRICS
);
670 for (i
= 0; i
< RTAX_MAX
; i
++) {
672 if (i
== RTAX_CC_ALGO
- 1) {
673 char tmp
[TCP_CA_NAME_MAX
], *name
;
675 name
= tcp_ca_get_name_by_key(metrics
[i
], tmp
);
678 if (nla_put_string(skb
, i
+ 1, name
))
679 goto nla_put_failure
;
680 } else if (i
== RTAX_FEATURES
- 1) {
681 u32 user_features
= metrics
[i
] & RTAX_FEATURE_MASK
;
685 BUILD_BUG_ON(RTAX_FEATURE_MASK
& DST_FEATURE_MASK
);
686 if (nla_put_u32(skb
, i
+ 1, user_features
))
687 goto nla_put_failure
;
689 if (nla_put_u32(skb
, i
+ 1, metrics
[i
]))
690 goto nla_put_failure
;
697 nla_nest_cancel(skb
, mx
);
701 return nla_nest_end(skb
, mx
);
704 nla_nest_cancel(skb
, mx
);
707 EXPORT_SYMBOL(rtnetlink_put_metrics
);
709 int rtnl_put_cacheinfo(struct sk_buff
*skb
, struct dst_entry
*dst
, u32 id
,
710 long expires
, u32 error
)
712 struct rta_cacheinfo ci
= {
713 .rta_lastuse
= jiffies_delta_to_clock_t(jiffies
- dst
->lastuse
),
714 .rta_used
= dst
->__use
,
715 .rta_clntref
= atomic_read(&(dst
->__refcnt
)),
723 clock
= jiffies_to_clock_t(abs(expires
));
724 clock
= min_t(unsigned long, clock
, INT_MAX
);
725 ci
.rta_expires
= (expires
> 0) ? clock
: -clock
;
727 return nla_put(skb
, RTA_CACHEINFO
, sizeof(ci
), &ci
);
729 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo
);
731 static void set_operstate(struct net_device
*dev
, unsigned char transition
)
733 unsigned char operstate
= dev
->operstate
;
735 switch (transition
) {
737 if ((operstate
== IF_OPER_DORMANT
||
738 operstate
== IF_OPER_UNKNOWN
) &&
740 operstate
= IF_OPER_UP
;
743 case IF_OPER_DORMANT
:
744 if (operstate
== IF_OPER_UP
||
745 operstate
== IF_OPER_UNKNOWN
)
746 operstate
= IF_OPER_DORMANT
;
750 if (dev
->operstate
!= operstate
) {
751 write_lock_bh(&dev_base_lock
);
752 dev
->operstate
= operstate
;
753 write_unlock_bh(&dev_base_lock
);
754 netdev_state_change(dev
);
758 static unsigned int rtnl_dev_get_flags(const struct net_device
*dev
)
760 return (dev
->flags
& ~(IFF_PROMISC
| IFF_ALLMULTI
)) |
761 (dev
->gflags
& (IFF_PROMISC
| IFF_ALLMULTI
));
764 static unsigned int rtnl_dev_combine_flags(const struct net_device
*dev
,
765 const struct ifinfomsg
*ifm
)
767 unsigned int flags
= ifm
->ifi_flags
;
769 /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
771 flags
= (flags
& ifm
->ifi_change
) |
772 (rtnl_dev_get_flags(dev
) & ~ifm
->ifi_change
);
777 static void copy_rtnl_link_stats(struct rtnl_link_stats
*a
,
778 const struct rtnl_link_stats64
*b
)
780 a
->rx_packets
= b
->rx_packets
;
781 a
->tx_packets
= b
->tx_packets
;
782 a
->rx_bytes
= b
->rx_bytes
;
783 a
->tx_bytes
= b
->tx_bytes
;
784 a
->rx_errors
= b
->rx_errors
;
785 a
->tx_errors
= b
->tx_errors
;
786 a
->rx_dropped
= b
->rx_dropped
;
787 a
->tx_dropped
= b
->tx_dropped
;
789 a
->multicast
= b
->multicast
;
790 a
->collisions
= b
->collisions
;
792 a
->rx_length_errors
= b
->rx_length_errors
;
793 a
->rx_over_errors
= b
->rx_over_errors
;
794 a
->rx_crc_errors
= b
->rx_crc_errors
;
795 a
->rx_frame_errors
= b
->rx_frame_errors
;
796 a
->rx_fifo_errors
= b
->rx_fifo_errors
;
797 a
->rx_missed_errors
= b
->rx_missed_errors
;
799 a
->tx_aborted_errors
= b
->tx_aborted_errors
;
800 a
->tx_carrier_errors
= b
->tx_carrier_errors
;
801 a
->tx_fifo_errors
= b
->tx_fifo_errors
;
802 a
->tx_heartbeat_errors
= b
->tx_heartbeat_errors
;
803 a
->tx_window_errors
= b
->tx_window_errors
;
805 a
->rx_compressed
= b
->rx_compressed
;
806 a
->tx_compressed
= b
->tx_compressed
;
808 a
->rx_nohandler
= b
->rx_nohandler
;
812 static inline int rtnl_vfinfo_size(const struct net_device
*dev
,
815 if (dev
->dev
.parent
&& (ext_filter_mask
& RTEXT_FILTER_VF
)) {
816 int num_vfs
= dev_num_vf(dev
->dev
.parent
);
817 size_t size
= nla_total_size(0);
820 nla_total_size(sizeof(struct ifla_vf_mac
)) +
821 nla_total_size(sizeof(struct ifla_vf_vlan
)) +
822 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
823 nla_total_size(MAX_VLAN_LIST_LEN
*
824 sizeof(struct ifla_vf_vlan_info
)) +
825 nla_total_size(sizeof(struct ifla_vf_spoofchk
)) +
826 nla_total_size(sizeof(struct ifla_vf_tx_rate
)) +
827 nla_total_size(sizeof(struct ifla_vf_rate
)) +
828 nla_total_size(sizeof(struct ifla_vf_link_state
)) +
829 nla_total_size(sizeof(struct ifla_vf_rss_query_en
)) +
830 nla_total_size(0) + /* nest IFLA_VF_STATS */
831 /* IFLA_VF_STATS_RX_PACKETS */
832 nla_total_size_64bit(sizeof(__u64
)) +
833 /* IFLA_VF_STATS_TX_PACKETS */
834 nla_total_size_64bit(sizeof(__u64
)) +
835 /* IFLA_VF_STATS_RX_BYTES */
836 nla_total_size_64bit(sizeof(__u64
)) +
837 /* IFLA_VF_STATS_TX_BYTES */
838 nla_total_size_64bit(sizeof(__u64
)) +
839 /* IFLA_VF_STATS_BROADCAST */
840 nla_total_size_64bit(sizeof(__u64
)) +
841 /* IFLA_VF_STATS_MULTICAST */
842 nla_total_size_64bit(sizeof(__u64
)) +
843 nla_total_size(sizeof(struct ifla_vf_trust
)));
849 static size_t rtnl_port_size(const struct net_device
*dev
,
852 size_t port_size
= nla_total_size(4) /* PORT_VF */
853 + nla_total_size(PORT_PROFILE_MAX
) /* PORT_PROFILE */
854 + nla_total_size(PORT_UUID_MAX
) /* PORT_INSTANCE_UUID */
855 + nla_total_size(PORT_UUID_MAX
) /* PORT_HOST_UUID */
856 + nla_total_size(1) /* PROT_VDP_REQUEST */
857 + nla_total_size(2); /* PORT_VDP_RESPONSE */
858 size_t vf_ports_size
= nla_total_size(sizeof(struct nlattr
));
859 size_t vf_port_size
= nla_total_size(sizeof(struct nlattr
))
861 size_t port_self_size
= nla_total_size(sizeof(struct nlattr
))
864 if (!dev
->netdev_ops
->ndo_get_vf_port
|| !dev
->dev
.parent
||
865 !(ext_filter_mask
& RTEXT_FILTER_VF
))
867 if (dev_num_vf(dev
->dev
.parent
))
868 return port_self_size
+ vf_ports_size
+
869 vf_port_size
* dev_num_vf(dev
->dev
.parent
);
871 return port_self_size
;
874 static size_t rtnl_xdp_size(void)
876 size_t xdp_size
= nla_total_size(0) + /* nest IFLA_XDP */
877 nla_total_size(1) + /* XDP_ATTACHED */
878 nla_total_size(4); /* XDP_PROG_ID */
883 static noinline
size_t if_nlmsg_size(const struct net_device
*dev
,
886 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
887 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
888 + nla_total_size(IFALIASZ
) /* IFLA_IFALIAS */
889 + nla_total_size(IFNAMSIZ
) /* IFLA_QDISC */
890 + nla_total_size_64bit(sizeof(struct rtnl_link_ifmap
))
891 + nla_total_size(sizeof(struct rtnl_link_stats
))
892 + nla_total_size_64bit(sizeof(struct rtnl_link_stats64
))
893 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
894 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_BROADCAST */
895 + nla_total_size(4) /* IFLA_TXQLEN */
896 + nla_total_size(4) /* IFLA_WEIGHT */
897 + nla_total_size(4) /* IFLA_MTU */
898 + nla_total_size(4) /* IFLA_LINK */
899 + nla_total_size(4) /* IFLA_MASTER */
900 + nla_total_size(1) /* IFLA_CARRIER */
901 + nla_total_size(4) /* IFLA_PROMISCUITY */
902 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
903 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
904 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
905 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
906 + nla_total_size(1) /* IFLA_OPERSTATE */
907 + nla_total_size(1) /* IFLA_LINKMODE */
908 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
909 + nla_total_size(4) /* IFLA_LINK_NETNSID */
910 + nla_total_size(4) /* IFLA_GROUP */
911 + nla_total_size(ext_filter_mask
912 & RTEXT_FILTER_VF
? 4 : 0) /* IFLA_NUM_VF */
913 + rtnl_vfinfo_size(dev
, ext_filter_mask
) /* IFLA_VFINFO_LIST */
914 + rtnl_port_size(dev
, ext_filter_mask
) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
915 + rtnl_link_get_size(dev
) /* IFLA_LINKINFO */
916 + rtnl_link_get_af_size(dev
, ext_filter_mask
) /* IFLA_AF_SPEC */
917 + nla_total_size(MAX_PHYS_ITEM_ID_LEN
) /* IFLA_PHYS_PORT_ID */
918 + nla_total_size(MAX_PHYS_ITEM_ID_LEN
) /* IFLA_PHYS_SWITCH_ID */
919 + nla_total_size(IFNAMSIZ
) /* IFLA_PHYS_PORT_NAME */
920 + rtnl_xdp_size() /* IFLA_XDP */
921 + nla_total_size(4) /* IFLA_EVENT */
922 + nla_total_size(4) /* IFLA_NEW_NETNSID */
923 + nla_total_size(4) /* IFLA_NEW_IFINDEX */
924 + nla_total_size(1) /* IFLA_PROTO_DOWN */
925 + nla_total_size(4) /* IFLA_IF_NETNSID */
926 + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */
927 + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */
931 static int rtnl_vf_ports_fill(struct sk_buff
*skb
, struct net_device
*dev
)
933 struct nlattr
*vf_ports
;
934 struct nlattr
*vf_port
;
938 vf_ports
= nla_nest_start(skb
, IFLA_VF_PORTS
);
942 for (vf
= 0; vf
< dev_num_vf(dev
->dev
.parent
); vf
++) {
943 vf_port
= nla_nest_start(skb
, IFLA_VF_PORT
);
945 goto nla_put_failure
;
946 if (nla_put_u32(skb
, IFLA_PORT_VF
, vf
))
947 goto nla_put_failure
;
948 err
= dev
->netdev_ops
->ndo_get_vf_port(dev
, vf
, skb
);
949 if (err
== -EMSGSIZE
)
950 goto nla_put_failure
;
952 nla_nest_cancel(skb
, vf_port
);
955 nla_nest_end(skb
, vf_port
);
958 nla_nest_end(skb
, vf_ports
);
963 nla_nest_cancel(skb
, vf_ports
);
967 static int rtnl_port_self_fill(struct sk_buff
*skb
, struct net_device
*dev
)
969 struct nlattr
*port_self
;
972 port_self
= nla_nest_start(skb
, IFLA_PORT_SELF
);
976 err
= dev
->netdev_ops
->ndo_get_vf_port(dev
, PORT_SELF_VF
, skb
);
978 nla_nest_cancel(skb
, port_self
);
979 return (err
== -EMSGSIZE
) ? err
: 0;
982 nla_nest_end(skb
, port_self
);
987 static int rtnl_port_fill(struct sk_buff
*skb
, struct net_device
*dev
,
992 if (!dev
->netdev_ops
->ndo_get_vf_port
|| !dev
->dev
.parent
||
993 !(ext_filter_mask
& RTEXT_FILTER_VF
))
996 err
= rtnl_port_self_fill(skb
, dev
);
1000 if (dev_num_vf(dev
->dev
.parent
)) {
1001 err
= rtnl_vf_ports_fill(skb
, dev
);
1009 static int rtnl_phys_port_id_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1012 struct netdev_phys_item_id ppid
;
1014 err
= dev_get_phys_port_id(dev
, &ppid
);
1016 if (err
== -EOPNOTSUPP
)
1021 if (nla_put(skb
, IFLA_PHYS_PORT_ID
, ppid
.id_len
, ppid
.id
))
1027 static int rtnl_phys_port_name_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1029 char name
[IFNAMSIZ
];
1032 err
= dev_get_phys_port_name(dev
, name
, sizeof(name
));
1034 if (err
== -EOPNOTSUPP
)
1039 if (nla_put_string(skb
, IFLA_PHYS_PORT_NAME
, name
))
1045 static int rtnl_phys_switch_id_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1048 struct switchdev_attr attr
= {
1050 .id
= SWITCHDEV_ATTR_ID_PORT_PARENT_ID
,
1051 .flags
= SWITCHDEV_F_NO_RECURSE
,
1054 err
= switchdev_port_attr_get(dev
, &attr
);
1056 if (err
== -EOPNOTSUPP
)
1061 if (nla_put(skb
, IFLA_PHYS_SWITCH_ID
, attr
.u
.ppid
.id_len
,
1068 static noinline_for_stack
int rtnl_fill_stats(struct sk_buff
*skb
,
1069 struct net_device
*dev
)
1071 struct rtnl_link_stats64
*sp
;
1072 struct nlattr
*attr
;
1074 attr
= nla_reserve_64bit(skb
, IFLA_STATS64
,
1075 sizeof(struct rtnl_link_stats64
), IFLA_PAD
);
1079 sp
= nla_data(attr
);
1080 dev_get_stats(dev
, sp
);
1082 attr
= nla_reserve(skb
, IFLA_STATS
,
1083 sizeof(struct rtnl_link_stats
));
1087 copy_rtnl_link_stats(nla_data(attr
), sp
);
1092 static noinline_for_stack
int rtnl_fill_vfinfo(struct sk_buff
*skb
,
1093 struct net_device
*dev
,
1095 struct nlattr
*vfinfo
)
1097 struct ifla_vf_rss_query_en vf_rss_query_en
;
1098 struct nlattr
*vf
, *vfstats
, *vfvlanlist
;
1099 struct ifla_vf_link_state vf_linkstate
;
1100 struct ifla_vf_vlan_info vf_vlan_info
;
1101 struct ifla_vf_spoofchk vf_spoofchk
;
1102 struct ifla_vf_tx_rate vf_tx_rate
;
1103 struct ifla_vf_stats vf_stats
;
1104 struct ifla_vf_trust vf_trust
;
1105 struct ifla_vf_vlan vf_vlan
;
1106 struct ifla_vf_rate vf_rate
;
1107 struct ifla_vf_mac vf_mac
;
1108 struct ifla_vf_info ivi
;
1110 memset(&ivi
, 0, sizeof(ivi
));
1112 /* Not all SR-IOV capable drivers support the
1113 * spoofcheck and "RSS query enable" query. Preset to
1114 * -1 so the user space tool can detect that the driver
1115 * didn't report anything.
1118 ivi
.rss_query_en
= -1;
1120 /* The default value for VF link state is "auto"
1121 * IFLA_VF_LINK_STATE_AUTO which equals zero
1124 /* VLAN Protocol by default is 802.1Q */
1125 ivi
.vlan_proto
= htons(ETH_P_8021Q
);
1126 if (dev
->netdev_ops
->ndo_get_vf_config(dev
, vfs_num
, &ivi
))
1129 memset(&vf_vlan_info
, 0, sizeof(vf_vlan_info
));
1138 vf_rss_query_en
.vf
=
1139 vf_trust
.vf
= ivi
.vf
;
1141 memcpy(vf_mac
.mac
, ivi
.mac
, sizeof(ivi
.mac
));
1142 vf_vlan
.vlan
= ivi
.vlan
;
1143 vf_vlan
.qos
= ivi
.qos
;
1144 vf_vlan_info
.vlan
= ivi
.vlan
;
1145 vf_vlan_info
.qos
= ivi
.qos
;
1146 vf_vlan_info
.vlan_proto
= ivi
.vlan_proto
;
1147 vf_tx_rate
.rate
= ivi
.max_tx_rate
;
1148 vf_rate
.min_tx_rate
= ivi
.min_tx_rate
;
1149 vf_rate
.max_tx_rate
= ivi
.max_tx_rate
;
1150 vf_spoofchk
.setting
= ivi
.spoofchk
;
1151 vf_linkstate
.link_state
= ivi
.linkstate
;
1152 vf_rss_query_en
.setting
= ivi
.rss_query_en
;
1153 vf_trust
.setting
= ivi
.trusted
;
1154 vf
= nla_nest_start(skb
, IFLA_VF_INFO
);
1156 goto nla_put_vfinfo_failure
;
1157 if (nla_put(skb
, IFLA_VF_MAC
, sizeof(vf_mac
), &vf_mac
) ||
1158 nla_put(skb
, IFLA_VF_VLAN
, sizeof(vf_vlan
), &vf_vlan
) ||
1159 nla_put(skb
, IFLA_VF_RATE
, sizeof(vf_rate
),
1161 nla_put(skb
, IFLA_VF_TX_RATE
, sizeof(vf_tx_rate
),
1163 nla_put(skb
, IFLA_VF_SPOOFCHK
, sizeof(vf_spoofchk
),
1165 nla_put(skb
, IFLA_VF_LINK_STATE
, sizeof(vf_linkstate
),
1167 nla_put(skb
, IFLA_VF_RSS_QUERY_EN
,
1168 sizeof(vf_rss_query_en
),
1169 &vf_rss_query_en
) ||
1170 nla_put(skb
, IFLA_VF_TRUST
,
1171 sizeof(vf_trust
), &vf_trust
))
1172 goto nla_put_vf_failure
;
1173 vfvlanlist
= nla_nest_start(skb
, IFLA_VF_VLAN_LIST
);
1175 goto nla_put_vf_failure
;
1176 if (nla_put(skb
, IFLA_VF_VLAN_INFO
, sizeof(vf_vlan_info
),
1178 nla_nest_cancel(skb
, vfvlanlist
);
1179 goto nla_put_vf_failure
;
1181 nla_nest_end(skb
, vfvlanlist
);
1182 memset(&vf_stats
, 0, sizeof(vf_stats
));
1183 if (dev
->netdev_ops
->ndo_get_vf_stats
)
1184 dev
->netdev_ops
->ndo_get_vf_stats(dev
, vfs_num
,
1186 vfstats
= nla_nest_start(skb
, IFLA_VF_STATS
);
1188 goto nla_put_vf_failure
;
1189 if (nla_put_u64_64bit(skb
, IFLA_VF_STATS_RX_PACKETS
,
1190 vf_stats
.rx_packets
, IFLA_VF_STATS_PAD
) ||
1191 nla_put_u64_64bit(skb
, IFLA_VF_STATS_TX_PACKETS
,
1192 vf_stats
.tx_packets
, IFLA_VF_STATS_PAD
) ||
1193 nla_put_u64_64bit(skb
, IFLA_VF_STATS_RX_BYTES
,
1194 vf_stats
.rx_bytes
, IFLA_VF_STATS_PAD
) ||
1195 nla_put_u64_64bit(skb
, IFLA_VF_STATS_TX_BYTES
,
1196 vf_stats
.tx_bytes
, IFLA_VF_STATS_PAD
) ||
1197 nla_put_u64_64bit(skb
, IFLA_VF_STATS_BROADCAST
,
1198 vf_stats
.broadcast
, IFLA_VF_STATS_PAD
) ||
1199 nla_put_u64_64bit(skb
, IFLA_VF_STATS_MULTICAST
,
1200 vf_stats
.multicast
, IFLA_VF_STATS_PAD
)) {
1201 nla_nest_cancel(skb
, vfstats
);
1202 goto nla_put_vf_failure
;
1204 nla_nest_end(skb
, vfstats
);
1205 nla_nest_end(skb
, vf
);
1209 nla_nest_cancel(skb
, vf
);
1210 nla_put_vfinfo_failure
:
1211 nla_nest_cancel(skb
, vfinfo
);
1215 static noinline_for_stack
int rtnl_fill_vf(struct sk_buff
*skb
,
1216 struct net_device
*dev
,
1217 u32 ext_filter_mask
)
1219 struct nlattr
*vfinfo
;
1222 if (!dev
->dev
.parent
|| ((ext_filter_mask
& RTEXT_FILTER_VF
) == 0))
1225 num_vfs
= dev_num_vf(dev
->dev
.parent
);
1226 if (nla_put_u32(skb
, IFLA_NUM_VF
, num_vfs
))
1229 if (!dev
->netdev_ops
->ndo_get_vf_config
)
1232 vfinfo
= nla_nest_start(skb
, IFLA_VFINFO_LIST
);
1236 for (i
= 0; i
< num_vfs
; i
++) {
1237 if (rtnl_fill_vfinfo(skb
, dev
, i
, vfinfo
))
1241 nla_nest_end(skb
, vfinfo
);
1245 static int rtnl_fill_link_ifmap(struct sk_buff
*skb
, struct net_device
*dev
)
1247 struct rtnl_link_ifmap map
;
1249 memset(&map
, 0, sizeof(map
));
1250 map
.mem_start
= dev
->mem_start
;
1251 map
.mem_end
= dev
->mem_end
;
1252 map
.base_addr
= dev
->base_addr
;
1255 map
.port
= dev
->if_port
;
1257 if (nla_put_64bit(skb
, IFLA_MAP
, sizeof(map
), &map
, IFLA_PAD
))
1263 static u8
rtnl_xdp_attached_mode(struct net_device
*dev
, u32
*prog_id
)
1265 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1266 const struct bpf_prog
*generic_xdp_prog
;
1271 generic_xdp_prog
= rtnl_dereference(dev
->xdp_prog
);
1272 if (generic_xdp_prog
) {
1273 *prog_id
= generic_xdp_prog
->aux
->id
;
1274 return XDP_ATTACHED_SKB
;
1277 return XDP_ATTACHED_NONE
;
1279 return __dev_xdp_attached(dev
, ops
->ndo_bpf
, prog_id
);
1282 static int rtnl_xdp_fill(struct sk_buff
*skb
, struct net_device
*dev
)
1288 xdp
= nla_nest_start(skb
, IFLA_XDP
);
1292 err
= nla_put_u8(skb
, IFLA_XDP_ATTACHED
,
1293 rtnl_xdp_attached_mode(dev
, &prog_id
));
1298 err
= nla_put_u32(skb
, IFLA_XDP_PROG_ID
, prog_id
);
1303 nla_nest_end(skb
, xdp
);
1307 nla_nest_cancel(skb
, xdp
);
1311 static u32
rtnl_get_event(unsigned long event
)
1313 u32 rtnl_event_type
= IFLA_EVENT_NONE
;
1317 rtnl_event_type
= IFLA_EVENT_REBOOT
;
1319 case NETDEV_FEAT_CHANGE
:
1320 rtnl_event_type
= IFLA_EVENT_FEATURES
;
1322 case NETDEV_BONDING_FAILOVER
:
1323 rtnl_event_type
= IFLA_EVENT_BONDING_FAILOVER
;
1325 case NETDEV_NOTIFY_PEERS
:
1326 rtnl_event_type
= IFLA_EVENT_NOTIFY_PEERS
;
1328 case NETDEV_RESEND_IGMP
:
1329 rtnl_event_type
= IFLA_EVENT_IGMP_RESEND
;
1331 case NETDEV_CHANGEINFODATA
:
1332 rtnl_event_type
= IFLA_EVENT_BONDING_OPTIONS
;
1338 return rtnl_event_type
;
1341 static int put_master_ifindex(struct sk_buff
*skb
, struct net_device
*dev
)
1343 const struct net_device
*upper_dev
;
1348 upper_dev
= netdev_master_upper_dev_get_rcu(dev
);
1350 ret
= nla_put_u32(skb
, IFLA_MASTER
, upper_dev
->ifindex
);
1356 static int nla_put_iflink(struct sk_buff
*skb
, const struct net_device
*dev
,
1359 int ifindex
= dev_get_iflink(dev
);
1361 if (force
|| dev
->ifindex
!= ifindex
)
1362 return nla_put_u32(skb
, IFLA_LINK
, ifindex
);
1367 static noinline_for_stack
int nla_put_ifalias(struct sk_buff
*skb
,
1368 struct net_device
*dev
)
1373 ret
= dev_get_alias(dev
, buf
, sizeof(buf
));
1374 return ret
> 0 ? nla_put_string(skb
, IFLA_IFALIAS
, buf
) : 0;
1377 static int rtnl_fill_link_netnsid(struct sk_buff
*skb
,
1378 const struct net_device
*dev
,
1379 struct net
*src_net
)
1381 bool put_iflink
= false;
1383 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->get_link_net
) {
1384 struct net
*link_net
= dev
->rtnl_link_ops
->get_link_net(dev
);
1386 if (!net_eq(dev_net(dev
), link_net
)) {
1387 int id
= peernet2id_alloc(src_net
, link_net
);
1389 if (nla_put_s32(skb
, IFLA_LINK_NETNSID
, id
))
1396 return nla_put_iflink(skb
, dev
, put_iflink
);
1399 static int rtnl_fill_link_af(struct sk_buff
*skb
,
1400 const struct net_device
*dev
,
1401 u32 ext_filter_mask
)
1403 const struct rtnl_af_ops
*af_ops
;
1404 struct nlattr
*af_spec
;
1406 af_spec
= nla_nest_start(skb
, IFLA_AF_SPEC
);
1410 list_for_each_entry_rcu(af_ops
, &rtnl_af_ops
, list
) {
1414 if (!af_ops
->fill_link_af
)
1417 af
= nla_nest_start(skb
, af_ops
->family
);
1421 err
= af_ops
->fill_link_af(skb
, dev
, ext_filter_mask
);
1423 * Caller may return ENODATA to indicate that there
1424 * was no data to be dumped. This is not an error, it
1425 * means we should trim the attribute header and
1428 if (err
== -ENODATA
)
1429 nla_nest_cancel(skb
, af
);
1433 nla_nest_end(skb
, af
);
1436 nla_nest_end(skb
, af_spec
);
1440 static int rtnl_fill_ifinfo(struct sk_buff
*skb
,
1441 struct net_device
*dev
, struct net
*src_net
,
1442 int type
, u32 pid
, u32 seq
, u32 change
,
1443 unsigned int flags
, u32 ext_filter_mask
,
1444 u32 event
, int *new_nsid
, int new_ifindex
,
1447 struct ifinfomsg
*ifm
;
1448 struct nlmsghdr
*nlh
;
1451 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ifm
), flags
);
1455 ifm
= nlmsg_data(nlh
);
1456 ifm
->ifi_family
= AF_UNSPEC
;
1458 ifm
->ifi_type
= dev
->type
;
1459 ifm
->ifi_index
= dev
->ifindex
;
1460 ifm
->ifi_flags
= dev_get_flags(dev
);
1461 ifm
->ifi_change
= change
;
1463 if (tgt_netnsid
>= 0 && nla_put_s32(skb
, IFLA_IF_NETNSID
, tgt_netnsid
))
1464 goto nla_put_failure
;
1466 if (nla_put_string(skb
, IFLA_IFNAME
, dev
->name
) ||
1467 nla_put_u32(skb
, IFLA_TXQLEN
, dev
->tx_queue_len
) ||
1468 nla_put_u8(skb
, IFLA_OPERSTATE
,
1469 netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
) ||
1470 nla_put_u8(skb
, IFLA_LINKMODE
, dev
->link_mode
) ||
1471 nla_put_u32(skb
, IFLA_MTU
, dev
->mtu
) ||
1472 nla_put_u32(skb
, IFLA_GROUP
, dev
->group
) ||
1473 nla_put_u32(skb
, IFLA_PROMISCUITY
, dev
->promiscuity
) ||
1474 nla_put_u32(skb
, IFLA_NUM_TX_QUEUES
, dev
->num_tx_queues
) ||
1475 nla_put_u32(skb
, IFLA_GSO_MAX_SEGS
, dev
->gso_max_segs
) ||
1476 nla_put_u32(skb
, IFLA_GSO_MAX_SIZE
, dev
->gso_max_size
) ||
1478 nla_put_u32(skb
, IFLA_NUM_RX_QUEUES
, dev
->num_rx_queues
) ||
1480 put_master_ifindex(skb
, dev
) ||
1481 nla_put_u8(skb
, IFLA_CARRIER
, netif_carrier_ok(dev
)) ||
1483 nla_put_string(skb
, IFLA_QDISC
, dev
->qdisc
->ops
->id
)) ||
1484 nla_put_ifalias(skb
, dev
) ||
1485 nla_put_u32(skb
, IFLA_CARRIER_CHANGES
,
1486 atomic_read(&dev
->carrier_up_count
) +
1487 atomic_read(&dev
->carrier_down_count
)) ||
1488 nla_put_u8(skb
, IFLA_PROTO_DOWN
, dev
->proto_down
) ||
1489 nla_put_u32(skb
, IFLA_CARRIER_UP_COUNT
,
1490 atomic_read(&dev
->carrier_up_count
)) ||
1491 nla_put_u32(skb
, IFLA_CARRIER_DOWN_COUNT
,
1492 atomic_read(&dev
->carrier_down_count
)))
1493 goto nla_put_failure
;
1495 if (event
!= IFLA_EVENT_NONE
) {
1496 if (nla_put_u32(skb
, IFLA_EVENT
, event
))
1497 goto nla_put_failure
;
1500 if (rtnl_fill_link_ifmap(skb
, dev
))
1501 goto nla_put_failure
;
1503 if (dev
->addr_len
) {
1504 if (nla_put(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
) ||
1505 nla_put(skb
, IFLA_BROADCAST
, dev
->addr_len
, dev
->broadcast
))
1506 goto nla_put_failure
;
1509 if (rtnl_phys_port_id_fill(skb
, dev
))
1510 goto nla_put_failure
;
1512 if (rtnl_phys_port_name_fill(skb
, dev
))
1513 goto nla_put_failure
;
1515 if (rtnl_phys_switch_id_fill(skb
, dev
))
1516 goto nla_put_failure
;
1518 if (rtnl_fill_stats(skb
, dev
))
1519 goto nla_put_failure
;
1521 if (rtnl_fill_vf(skb
, dev
, ext_filter_mask
))
1522 goto nla_put_failure
;
1524 if (rtnl_port_fill(skb
, dev
, ext_filter_mask
))
1525 goto nla_put_failure
;
1527 if (rtnl_xdp_fill(skb
, dev
))
1528 goto nla_put_failure
;
1530 if (dev
->rtnl_link_ops
|| rtnl_have_link_slave_info(dev
)) {
1531 if (rtnl_link_fill(skb
, dev
) < 0)
1532 goto nla_put_failure
;
1535 if (rtnl_fill_link_netnsid(skb
, dev
, src_net
))
1536 goto nla_put_failure
;
1539 nla_put_s32(skb
, IFLA_NEW_NETNSID
, *new_nsid
) < 0)
1540 goto nla_put_failure
;
1542 nla_put_s32(skb
, IFLA_NEW_IFINDEX
, new_ifindex
) < 0)
1543 goto nla_put_failure
;
1547 if (rtnl_fill_link_af(skb
, dev
, ext_filter_mask
))
1548 goto nla_put_failure_rcu
;
1551 nlmsg_end(skb
, nlh
);
1554 nla_put_failure_rcu
:
1557 nlmsg_cancel(skb
, nlh
);
1561 static const struct nla_policy ifla_policy
[IFLA_MAX
+1] = {
1562 [IFLA_IFNAME
] = { .type
= NLA_STRING
, .len
= IFNAMSIZ
-1 },
1563 [IFLA_ADDRESS
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1564 [IFLA_BROADCAST
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1565 [IFLA_MAP
] = { .len
= sizeof(struct rtnl_link_ifmap
) },
1566 [IFLA_MTU
] = { .type
= NLA_U32
},
1567 [IFLA_LINK
] = { .type
= NLA_U32
},
1568 [IFLA_MASTER
] = { .type
= NLA_U32
},
1569 [IFLA_CARRIER
] = { .type
= NLA_U8
},
1570 [IFLA_TXQLEN
] = { .type
= NLA_U32
},
1571 [IFLA_WEIGHT
] = { .type
= NLA_U32
},
1572 [IFLA_OPERSTATE
] = { .type
= NLA_U8
},
1573 [IFLA_LINKMODE
] = { .type
= NLA_U8
},
1574 [IFLA_LINKINFO
] = { .type
= NLA_NESTED
},
1575 [IFLA_NET_NS_PID
] = { .type
= NLA_U32
},
1576 [IFLA_NET_NS_FD
] = { .type
= NLA_U32
},
1577 /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
1578 * allow 0-length string (needed to remove an alias).
1580 [IFLA_IFALIAS
] = { .type
= NLA_BINARY
, .len
= IFALIASZ
- 1 },
1581 [IFLA_VFINFO_LIST
] = {. type
= NLA_NESTED
},
1582 [IFLA_VF_PORTS
] = { .type
= NLA_NESTED
},
1583 [IFLA_PORT_SELF
] = { .type
= NLA_NESTED
},
1584 [IFLA_AF_SPEC
] = { .type
= NLA_NESTED
},
1585 [IFLA_EXT_MASK
] = { .type
= NLA_U32
},
1586 [IFLA_PROMISCUITY
] = { .type
= NLA_U32
},
1587 [IFLA_NUM_TX_QUEUES
] = { .type
= NLA_U32
},
1588 [IFLA_NUM_RX_QUEUES
] = { .type
= NLA_U32
},
1589 [IFLA_PHYS_PORT_ID
] = { .type
= NLA_BINARY
, .len
= MAX_PHYS_ITEM_ID_LEN
},
1590 [IFLA_CARRIER_CHANGES
] = { .type
= NLA_U32
}, /* ignored */
1591 [IFLA_PHYS_SWITCH_ID
] = { .type
= NLA_BINARY
, .len
= MAX_PHYS_ITEM_ID_LEN
},
1592 [IFLA_LINK_NETNSID
] = { .type
= NLA_S32
},
1593 [IFLA_PROTO_DOWN
] = { .type
= NLA_U8
},
1594 [IFLA_XDP
] = { .type
= NLA_NESTED
},
1595 [IFLA_EVENT
] = { .type
= NLA_U32
},
1596 [IFLA_GROUP
] = { .type
= NLA_U32
},
1597 [IFLA_IF_NETNSID
] = { .type
= NLA_S32
},
1598 [IFLA_CARRIER_UP_COUNT
] = { .type
= NLA_U32
},
1599 [IFLA_CARRIER_DOWN_COUNT
] = { .type
= NLA_U32
},
1602 static const struct nla_policy ifla_info_policy
[IFLA_INFO_MAX
+1] = {
1603 [IFLA_INFO_KIND
] = { .type
= NLA_STRING
},
1604 [IFLA_INFO_DATA
] = { .type
= NLA_NESTED
},
1605 [IFLA_INFO_SLAVE_KIND
] = { .type
= NLA_STRING
},
1606 [IFLA_INFO_SLAVE_DATA
] = { .type
= NLA_NESTED
},
1609 static const struct nla_policy ifla_vf_policy
[IFLA_VF_MAX
+1] = {
1610 [IFLA_VF_MAC
] = { .len
= sizeof(struct ifla_vf_mac
) },
1611 [IFLA_VF_VLAN
] = { .len
= sizeof(struct ifla_vf_vlan
) },
1612 [IFLA_VF_VLAN_LIST
] = { .type
= NLA_NESTED
},
1613 [IFLA_VF_TX_RATE
] = { .len
= sizeof(struct ifla_vf_tx_rate
) },
1614 [IFLA_VF_SPOOFCHK
] = { .len
= sizeof(struct ifla_vf_spoofchk
) },
1615 [IFLA_VF_RATE
] = { .len
= sizeof(struct ifla_vf_rate
) },
1616 [IFLA_VF_LINK_STATE
] = { .len
= sizeof(struct ifla_vf_link_state
) },
1617 [IFLA_VF_RSS_QUERY_EN
] = { .len
= sizeof(struct ifla_vf_rss_query_en
) },
1618 [IFLA_VF_STATS
] = { .type
= NLA_NESTED
},
1619 [IFLA_VF_TRUST
] = { .len
= sizeof(struct ifla_vf_trust
) },
1620 [IFLA_VF_IB_NODE_GUID
] = { .len
= sizeof(struct ifla_vf_guid
) },
1621 [IFLA_VF_IB_PORT_GUID
] = { .len
= sizeof(struct ifla_vf_guid
) },
1624 static const struct nla_policy ifla_port_policy
[IFLA_PORT_MAX
+1] = {
1625 [IFLA_PORT_VF
] = { .type
= NLA_U32
},
1626 [IFLA_PORT_PROFILE
] = { .type
= NLA_STRING
,
1627 .len
= PORT_PROFILE_MAX
},
1628 [IFLA_PORT_INSTANCE_UUID
] = { .type
= NLA_BINARY
,
1629 .len
= PORT_UUID_MAX
},
1630 [IFLA_PORT_HOST_UUID
] = { .type
= NLA_STRING
,
1631 .len
= PORT_UUID_MAX
},
1632 [IFLA_PORT_REQUEST
] = { .type
= NLA_U8
, },
1633 [IFLA_PORT_RESPONSE
] = { .type
= NLA_U16
, },
1635 /* Unused, but we need to keep it here since user space could
1636 * fill it. It's also broken with regard to NLA_BINARY use in
1637 * combination with structs.
1639 [IFLA_PORT_VSI_TYPE
] = { .type
= NLA_BINARY
,
1640 .len
= sizeof(struct ifla_port_vsi
) },
1643 static const struct nla_policy ifla_xdp_policy
[IFLA_XDP_MAX
+ 1] = {
1644 [IFLA_XDP_FD
] = { .type
= NLA_S32
},
1645 [IFLA_XDP_ATTACHED
] = { .type
= NLA_U8
},
1646 [IFLA_XDP_FLAGS
] = { .type
= NLA_U32
},
1647 [IFLA_XDP_PROG_ID
] = { .type
= NLA_U32
},
1650 static const struct rtnl_link_ops
*linkinfo_to_kind_ops(const struct nlattr
*nla
)
1652 const struct rtnl_link_ops
*ops
= NULL
;
1653 struct nlattr
*linfo
[IFLA_INFO_MAX
+ 1];
1655 if (nla_parse_nested(linfo
, IFLA_INFO_MAX
, nla
,
1656 ifla_info_policy
, NULL
) < 0)
1659 if (linfo
[IFLA_INFO_KIND
]) {
1660 char kind
[MODULE_NAME_LEN
];
1662 nla_strlcpy(kind
, linfo
[IFLA_INFO_KIND
], sizeof(kind
));
1663 ops
= rtnl_link_ops_get(kind
);
1669 static bool link_master_filtered(struct net_device
*dev
, int master_idx
)
1671 struct net_device
*master
;
1676 master
= netdev_master_upper_dev_get(dev
);
1677 if (!master
|| master
->ifindex
!= master_idx
)
1683 static bool link_kind_filtered(const struct net_device
*dev
,
1684 const struct rtnl_link_ops
*kind_ops
)
1686 if (kind_ops
&& dev
->rtnl_link_ops
!= kind_ops
)
1692 static bool link_dump_filtered(struct net_device
*dev
,
1694 const struct rtnl_link_ops
*kind_ops
)
1696 if (link_master_filtered(dev
, master_idx
) ||
1697 link_kind_filtered(dev
, kind_ops
))
1703 static struct net
*get_target_net(struct sock
*sk
, int netnsid
)
1707 net
= get_net_ns_by_id(sock_net(sk
), netnsid
);
1709 return ERR_PTR(-EINVAL
);
1711 /* For now, the caller is required to have CAP_NET_ADMIN in
1712 * the user namespace owning the target net ns.
1714 if (!sk_ns_capable(sk
, net
->user_ns
, CAP_NET_ADMIN
)) {
1716 return ERR_PTR(-EACCES
);
1721 static int rtnl_dump_ifinfo(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1723 struct net
*net
= sock_net(skb
->sk
);
1724 struct net
*tgt_net
= net
;
1727 struct net_device
*dev
;
1728 struct hlist_head
*head
;
1729 struct nlattr
*tb
[IFLA_MAX
+1];
1730 u32 ext_filter_mask
= 0;
1731 const struct rtnl_link_ops
*kind_ops
= NULL
;
1732 unsigned int flags
= NLM_F_MULTI
;
1739 s_idx
= cb
->args
[1];
1741 /* A hack to preserve kernel<->userspace interface.
1742 * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
1743 * However, before Linux v3.9 the code here assumed rtgenmsg and that's
1744 * what iproute2 < v3.9.0 used.
1745 * We can detect the old iproute2. Even including the IFLA_EXT_MASK
1746 * attribute, its netlink message is shorter than struct ifinfomsg.
1748 hdrlen
= nlmsg_len(cb
->nlh
) < sizeof(struct ifinfomsg
) ?
1749 sizeof(struct rtgenmsg
) : sizeof(struct ifinfomsg
);
1751 if (nlmsg_parse(cb
->nlh
, hdrlen
, tb
, IFLA_MAX
,
1752 ifla_policy
, NULL
) >= 0) {
1753 if (tb
[IFLA_IF_NETNSID
]) {
1754 netnsid
= nla_get_s32(tb
[IFLA_IF_NETNSID
]);
1755 tgt_net
= get_target_net(skb
->sk
, netnsid
);
1756 if (IS_ERR(tgt_net
))
1757 return PTR_ERR(tgt_net
);
1760 if (tb
[IFLA_EXT_MASK
])
1761 ext_filter_mask
= nla_get_u32(tb
[IFLA_EXT_MASK
]);
1763 if (tb
[IFLA_MASTER
])
1764 master_idx
= nla_get_u32(tb
[IFLA_MASTER
]);
1766 if (tb
[IFLA_LINKINFO
])
1767 kind_ops
= linkinfo_to_kind_ops(tb
[IFLA_LINKINFO
]);
1769 if (master_idx
|| kind_ops
)
1770 flags
|= NLM_F_DUMP_FILTERED
;
1773 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
1775 head
= &tgt_net
->dev_index_head
[h
];
1776 hlist_for_each_entry(dev
, head
, index_hlist
) {
1777 if (link_dump_filtered(dev
, master_idx
, kind_ops
))
1781 err
= rtnl_fill_ifinfo(skb
, dev
, net
,
1783 NETLINK_CB(cb
->skb
).portid
,
1784 cb
->nlh
->nlmsg_seq
, 0,
1786 ext_filter_mask
, 0, NULL
, 0,
1790 if (likely(skb
->len
))
1804 cb
->seq
= net
->dev_base_seq
;
1805 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
1812 int rtnl_nla_parse_ifla(struct nlattr
**tb
, const struct nlattr
*head
, int len
,
1813 struct netlink_ext_ack
*exterr
)
1815 return nla_parse(tb
, IFLA_MAX
, head
, len
, ifla_policy
, exterr
);
1817 EXPORT_SYMBOL(rtnl_nla_parse_ifla
);
1819 struct net
*rtnl_link_get_net(struct net
*src_net
, struct nlattr
*tb
[])
1822 /* Examine the link attributes and figure out which
1823 * network namespace we are talking about.
1825 if (tb
[IFLA_NET_NS_PID
])
1826 net
= get_net_ns_by_pid(nla_get_u32(tb
[IFLA_NET_NS_PID
]));
1827 else if (tb
[IFLA_NET_NS_FD
])
1828 net
= get_net_ns_by_fd(nla_get_u32(tb
[IFLA_NET_NS_FD
]));
1830 net
= get_net(src_net
);
1833 EXPORT_SYMBOL(rtnl_link_get_net
);
1835 /* Figure out which network namespace we are talking about by
1836 * examining the link attributes in the following order:
1838 * 1. IFLA_NET_NS_PID
1840 * 3. IFLA_IF_NETNSID
1842 static struct net
*rtnl_link_get_net_by_nlattr(struct net
*src_net
,
1843 struct nlattr
*tb
[])
1847 if (tb
[IFLA_NET_NS_PID
] || tb
[IFLA_NET_NS_FD
])
1848 return rtnl_link_get_net(src_net
, tb
);
1850 if (!tb
[IFLA_IF_NETNSID
])
1851 return get_net(src_net
);
1853 net
= get_net_ns_by_id(src_net
, nla_get_u32(tb
[IFLA_IF_NETNSID
]));
1855 return ERR_PTR(-EINVAL
);
1860 static struct net
*rtnl_link_get_net_capable(const struct sk_buff
*skb
,
1861 struct net
*src_net
,
1862 struct nlattr
*tb
[], int cap
)
1866 net
= rtnl_link_get_net_by_nlattr(src_net
, tb
);
1870 if (!netlink_ns_capable(skb
, net
->user_ns
, cap
)) {
1872 return ERR_PTR(-EPERM
);
1878 /* Verify that rtnetlink requests do not pass additional properties
1879 * potentially referring to different network namespaces.
1881 static int rtnl_ensure_unique_netns(struct nlattr
*tb
[],
1882 struct netlink_ext_ack
*extack
,
1886 if (netns_id_only
) {
1887 if (!tb
[IFLA_NET_NS_PID
] && !tb
[IFLA_NET_NS_FD
])
1890 NL_SET_ERR_MSG(extack
, "specified netns attribute not supported");
1894 if (tb
[IFLA_IF_NETNSID
] && (tb
[IFLA_NET_NS_PID
] || tb
[IFLA_NET_NS_FD
]))
1897 if (tb
[IFLA_NET_NS_PID
] && (tb
[IFLA_IF_NETNSID
] || tb
[IFLA_NET_NS_FD
]))
1900 if (tb
[IFLA_NET_NS_FD
] && (tb
[IFLA_IF_NETNSID
] || tb
[IFLA_NET_NS_PID
]))
1906 NL_SET_ERR_MSG(extack
, "multiple netns identifying attributes specified");
1910 static int validate_linkmsg(struct net_device
*dev
, struct nlattr
*tb
[])
1913 if (tb
[IFLA_ADDRESS
] &&
1914 nla_len(tb
[IFLA_ADDRESS
]) < dev
->addr_len
)
1917 if (tb
[IFLA_BROADCAST
] &&
1918 nla_len(tb
[IFLA_BROADCAST
]) < dev
->addr_len
)
1922 if (tb
[IFLA_AF_SPEC
]) {
1926 nla_for_each_nested(af
, tb
[IFLA_AF_SPEC
], rem
) {
1927 const struct rtnl_af_ops
*af_ops
;
1930 af_ops
= rtnl_af_lookup(nla_type(af
));
1933 return -EAFNOSUPPORT
;
1936 if (!af_ops
->set_link_af
) {
1941 if (af_ops
->validate_link_af
) {
1942 err
= af_ops
->validate_link_af(dev
, af
);
1956 static int handle_infiniband_guid(struct net_device
*dev
, struct ifla_vf_guid
*ivt
,
1959 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1961 return ops
->ndo_set_vf_guid(dev
, ivt
->vf
, ivt
->guid
, guid_type
);
1964 static int handle_vf_guid(struct net_device
*dev
, struct ifla_vf_guid
*ivt
, int guid_type
)
1966 if (dev
->type
!= ARPHRD_INFINIBAND
)
1969 return handle_infiniband_guid(dev
, ivt
, guid_type
);
1972 static int do_setvfinfo(struct net_device
*dev
, struct nlattr
**tb
)
1974 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1977 if (tb
[IFLA_VF_MAC
]) {
1978 struct ifla_vf_mac
*ivm
= nla_data(tb
[IFLA_VF_MAC
]);
1981 if (ops
->ndo_set_vf_mac
)
1982 err
= ops
->ndo_set_vf_mac(dev
, ivm
->vf
,
1988 if (tb
[IFLA_VF_VLAN
]) {
1989 struct ifla_vf_vlan
*ivv
= nla_data(tb
[IFLA_VF_VLAN
]);
1992 if (ops
->ndo_set_vf_vlan
)
1993 err
= ops
->ndo_set_vf_vlan(dev
, ivv
->vf
, ivv
->vlan
,
1995 htons(ETH_P_8021Q
));
2000 if (tb
[IFLA_VF_VLAN_LIST
]) {
2001 struct ifla_vf_vlan_info
*ivvl
[MAX_VLAN_LIST_LEN
];
2002 struct nlattr
*attr
;
2006 if (!ops
->ndo_set_vf_vlan
)
2009 nla_for_each_nested(attr
, tb
[IFLA_VF_VLAN_LIST
], rem
) {
2010 if (nla_type(attr
) != IFLA_VF_VLAN_INFO
||
2011 nla_len(attr
) < NLA_HDRLEN
) {
2014 if (len
>= MAX_VLAN_LIST_LEN
)
2016 ivvl
[len
] = nla_data(attr
);
2023 err
= ops
->ndo_set_vf_vlan(dev
, ivvl
[0]->vf
, ivvl
[0]->vlan
,
2024 ivvl
[0]->qos
, ivvl
[0]->vlan_proto
);
2029 if (tb
[IFLA_VF_TX_RATE
]) {
2030 struct ifla_vf_tx_rate
*ivt
= nla_data(tb
[IFLA_VF_TX_RATE
]);
2031 struct ifla_vf_info ivf
;
2034 if (ops
->ndo_get_vf_config
)
2035 err
= ops
->ndo_get_vf_config(dev
, ivt
->vf
, &ivf
);
2040 if (ops
->ndo_set_vf_rate
)
2041 err
= ops
->ndo_set_vf_rate(dev
, ivt
->vf
,
2048 if (tb
[IFLA_VF_RATE
]) {
2049 struct ifla_vf_rate
*ivt
= nla_data(tb
[IFLA_VF_RATE
]);
2052 if (ops
->ndo_set_vf_rate
)
2053 err
= ops
->ndo_set_vf_rate(dev
, ivt
->vf
,
2060 if (tb
[IFLA_VF_SPOOFCHK
]) {
2061 struct ifla_vf_spoofchk
*ivs
= nla_data(tb
[IFLA_VF_SPOOFCHK
]);
2064 if (ops
->ndo_set_vf_spoofchk
)
2065 err
= ops
->ndo_set_vf_spoofchk(dev
, ivs
->vf
,
2071 if (tb
[IFLA_VF_LINK_STATE
]) {
2072 struct ifla_vf_link_state
*ivl
= nla_data(tb
[IFLA_VF_LINK_STATE
]);
2075 if (ops
->ndo_set_vf_link_state
)
2076 err
= ops
->ndo_set_vf_link_state(dev
, ivl
->vf
,
2082 if (tb
[IFLA_VF_RSS_QUERY_EN
]) {
2083 struct ifla_vf_rss_query_en
*ivrssq_en
;
2086 ivrssq_en
= nla_data(tb
[IFLA_VF_RSS_QUERY_EN
]);
2087 if (ops
->ndo_set_vf_rss_query_en
)
2088 err
= ops
->ndo_set_vf_rss_query_en(dev
, ivrssq_en
->vf
,
2089 ivrssq_en
->setting
);
2094 if (tb
[IFLA_VF_TRUST
]) {
2095 struct ifla_vf_trust
*ivt
= nla_data(tb
[IFLA_VF_TRUST
]);
2098 if (ops
->ndo_set_vf_trust
)
2099 err
= ops
->ndo_set_vf_trust(dev
, ivt
->vf
, ivt
->setting
);
2104 if (tb
[IFLA_VF_IB_NODE_GUID
]) {
2105 struct ifla_vf_guid
*ivt
= nla_data(tb
[IFLA_VF_IB_NODE_GUID
]);
2107 if (!ops
->ndo_set_vf_guid
)
2110 return handle_vf_guid(dev
, ivt
, IFLA_VF_IB_NODE_GUID
);
2113 if (tb
[IFLA_VF_IB_PORT_GUID
]) {
2114 struct ifla_vf_guid
*ivt
= nla_data(tb
[IFLA_VF_IB_PORT_GUID
]);
2116 if (!ops
->ndo_set_vf_guid
)
2119 return handle_vf_guid(dev
, ivt
, IFLA_VF_IB_PORT_GUID
);
2125 static int do_set_master(struct net_device
*dev
, int ifindex
,
2126 struct netlink_ext_ack
*extack
)
2128 struct net_device
*upper_dev
= netdev_master_upper_dev_get(dev
);
2129 const struct net_device_ops
*ops
;
2133 if (upper_dev
->ifindex
== ifindex
)
2135 ops
= upper_dev
->netdev_ops
;
2136 if (ops
->ndo_del_slave
) {
2137 err
= ops
->ndo_del_slave(upper_dev
, dev
);
2146 upper_dev
= __dev_get_by_index(dev_net(dev
), ifindex
);
2149 ops
= upper_dev
->netdev_ops
;
2150 if (ops
->ndo_add_slave
) {
2151 err
= ops
->ndo_add_slave(upper_dev
, dev
, extack
);
2161 #define DO_SETLINK_MODIFIED 0x01
2162 /* notify flag means notify + modified. */
2163 #define DO_SETLINK_NOTIFY 0x03
2164 static int do_setlink(const struct sk_buff
*skb
,
2165 struct net_device
*dev
, struct ifinfomsg
*ifm
,
2166 struct netlink_ext_ack
*extack
,
2167 struct nlattr
**tb
, char *ifname
, int status
)
2169 const struct net_device_ops
*ops
= dev
->netdev_ops
;
2172 err
= validate_linkmsg(dev
, tb
);
2176 if (tb
[IFLA_NET_NS_PID
] || tb
[IFLA_NET_NS_FD
] || tb
[IFLA_IF_NETNSID
]) {
2177 struct net
*net
= rtnl_link_get_net_capable(skb
, dev_net(dev
),
2184 err
= dev_change_net_namespace(dev
, net
, ifname
);
2188 status
|= DO_SETLINK_MODIFIED
;
2192 struct rtnl_link_ifmap
*u_map
;
2195 if (!ops
->ndo_set_config
) {
2200 if (!netif_device_present(dev
)) {
2205 u_map
= nla_data(tb
[IFLA_MAP
]);
2206 k_map
.mem_start
= (unsigned long) u_map
->mem_start
;
2207 k_map
.mem_end
= (unsigned long) u_map
->mem_end
;
2208 k_map
.base_addr
= (unsigned short) u_map
->base_addr
;
2209 k_map
.irq
= (unsigned char) u_map
->irq
;
2210 k_map
.dma
= (unsigned char) u_map
->dma
;
2211 k_map
.port
= (unsigned char) u_map
->port
;
2213 err
= ops
->ndo_set_config(dev
, &k_map
);
2217 status
|= DO_SETLINK_NOTIFY
;
2220 if (tb
[IFLA_ADDRESS
]) {
2221 struct sockaddr
*sa
;
2224 len
= sizeof(sa_family_t
) + max_t(size_t, dev
->addr_len
,
2226 sa
= kmalloc(len
, GFP_KERNEL
);
2231 sa
->sa_family
= dev
->type
;
2232 memcpy(sa
->sa_data
, nla_data(tb
[IFLA_ADDRESS
]),
2234 err
= dev_set_mac_address(dev
, sa
);
2238 status
|= DO_SETLINK_MODIFIED
;
2242 err
= dev_set_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
2245 status
|= DO_SETLINK_MODIFIED
;
2248 if (tb
[IFLA_GROUP
]) {
2249 dev_set_group(dev
, nla_get_u32(tb
[IFLA_GROUP
]));
2250 status
|= DO_SETLINK_NOTIFY
;
2254 * Interface selected by interface index but interface
2255 * name provided implies that a name change has been
2258 if (ifm
->ifi_index
> 0 && ifname
[0]) {
2259 err
= dev_change_name(dev
, ifname
);
2262 status
|= DO_SETLINK_MODIFIED
;
2265 if (tb
[IFLA_IFALIAS
]) {
2266 err
= dev_set_alias(dev
, nla_data(tb
[IFLA_IFALIAS
]),
2267 nla_len(tb
[IFLA_IFALIAS
]));
2270 status
|= DO_SETLINK_NOTIFY
;
2273 if (tb
[IFLA_BROADCAST
]) {
2274 nla_memcpy(dev
->broadcast
, tb
[IFLA_BROADCAST
], dev
->addr_len
);
2275 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
2278 if (ifm
->ifi_flags
|| ifm
->ifi_change
) {
2279 err
= dev_change_flags(dev
, rtnl_dev_combine_flags(dev
, ifm
));
2284 if (tb
[IFLA_MASTER
]) {
2285 err
= do_set_master(dev
, nla_get_u32(tb
[IFLA_MASTER
]), extack
);
2288 status
|= DO_SETLINK_MODIFIED
;
2291 if (tb
[IFLA_CARRIER
]) {
2292 err
= dev_change_carrier(dev
, nla_get_u8(tb
[IFLA_CARRIER
]));
2295 status
|= DO_SETLINK_MODIFIED
;
2298 if (tb
[IFLA_TXQLEN
]) {
2299 unsigned int value
= nla_get_u32(tb
[IFLA_TXQLEN
]);
2300 unsigned int orig_len
= dev
->tx_queue_len
;
2302 if (dev
->tx_queue_len
^ value
) {
2303 dev
->tx_queue_len
= value
;
2304 err
= call_netdevice_notifiers(
2305 NETDEV_CHANGE_TX_QUEUE_LEN
, dev
);
2306 err
= notifier_to_errno(err
);
2308 dev
->tx_queue_len
= orig_len
;
2311 status
|= DO_SETLINK_MODIFIED
;
2315 if (tb
[IFLA_OPERSTATE
])
2316 set_operstate(dev
, nla_get_u8(tb
[IFLA_OPERSTATE
]));
2318 if (tb
[IFLA_LINKMODE
]) {
2319 unsigned char value
= nla_get_u8(tb
[IFLA_LINKMODE
]);
2321 write_lock_bh(&dev_base_lock
);
2322 if (dev
->link_mode
^ value
)
2323 status
|= DO_SETLINK_NOTIFY
;
2324 dev
->link_mode
= value
;
2325 write_unlock_bh(&dev_base_lock
);
2328 if (tb
[IFLA_VFINFO_LIST
]) {
2329 struct nlattr
*vfinfo
[IFLA_VF_MAX
+ 1];
2330 struct nlattr
*attr
;
2333 nla_for_each_nested(attr
, tb
[IFLA_VFINFO_LIST
], rem
) {
2334 if (nla_type(attr
) != IFLA_VF_INFO
||
2335 nla_len(attr
) < NLA_HDRLEN
) {
2339 err
= nla_parse_nested(vfinfo
, IFLA_VF_MAX
, attr
,
2340 ifla_vf_policy
, NULL
);
2343 err
= do_setvfinfo(dev
, vfinfo
);
2346 status
|= DO_SETLINK_NOTIFY
;
2351 if (tb
[IFLA_VF_PORTS
]) {
2352 struct nlattr
*port
[IFLA_PORT_MAX
+1];
2353 struct nlattr
*attr
;
2358 if (!ops
->ndo_set_vf_port
)
2361 nla_for_each_nested(attr
, tb
[IFLA_VF_PORTS
], rem
) {
2362 if (nla_type(attr
) != IFLA_VF_PORT
||
2363 nla_len(attr
) < NLA_HDRLEN
) {
2367 err
= nla_parse_nested(port
, IFLA_PORT_MAX
, attr
,
2368 ifla_port_policy
, NULL
);
2371 if (!port
[IFLA_PORT_VF
]) {
2375 vf
= nla_get_u32(port
[IFLA_PORT_VF
]);
2376 err
= ops
->ndo_set_vf_port(dev
, vf
, port
);
2379 status
|= DO_SETLINK_NOTIFY
;
2384 if (tb
[IFLA_PORT_SELF
]) {
2385 struct nlattr
*port
[IFLA_PORT_MAX
+1];
2387 err
= nla_parse_nested(port
, IFLA_PORT_MAX
,
2388 tb
[IFLA_PORT_SELF
], ifla_port_policy
,
2394 if (ops
->ndo_set_vf_port
)
2395 err
= ops
->ndo_set_vf_port(dev
, PORT_SELF_VF
, port
);
2398 status
|= DO_SETLINK_NOTIFY
;
2401 if (tb
[IFLA_AF_SPEC
]) {
2405 nla_for_each_nested(af
, tb
[IFLA_AF_SPEC
], rem
) {
2406 const struct rtnl_af_ops
*af_ops
;
2410 BUG_ON(!(af_ops
= rtnl_af_lookup(nla_type(af
))));
2412 err
= af_ops
->set_link_af(dev
, af
);
2419 status
|= DO_SETLINK_NOTIFY
;
2424 if (tb
[IFLA_PROTO_DOWN
]) {
2425 err
= dev_change_proto_down(dev
,
2426 nla_get_u8(tb
[IFLA_PROTO_DOWN
]));
2429 status
|= DO_SETLINK_NOTIFY
;
2433 struct nlattr
*xdp
[IFLA_XDP_MAX
+ 1];
2436 err
= nla_parse_nested(xdp
, IFLA_XDP_MAX
, tb
[IFLA_XDP
],
2437 ifla_xdp_policy
, NULL
);
2441 if (xdp
[IFLA_XDP_ATTACHED
] || xdp
[IFLA_XDP_PROG_ID
]) {
2446 if (xdp
[IFLA_XDP_FLAGS
]) {
2447 xdp_flags
= nla_get_u32(xdp
[IFLA_XDP_FLAGS
]);
2448 if (xdp_flags
& ~XDP_FLAGS_MASK
) {
2452 if (hweight32(xdp_flags
& XDP_FLAGS_MODES
) > 1) {
2458 if (xdp
[IFLA_XDP_FD
]) {
2459 err
= dev_change_xdp_fd(dev
, extack
,
2460 nla_get_s32(xdp
[IFLA_XDP_FD
]),
2464 status
|= DO_SETLINK_NOTIFY
;
2469 if (status
& DO_SETLINK_MODIFIED
) {
2470 if ((status
& DO_SETLINK_NOTIFY
) == DO_SETLINK_NOTIFY
)
2471 netdev_state_change(dev
);
2474 net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
2481 static int rtnl_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2482 struct netlink_ext_ack
*extack
)
2484 struct net
*net
= sock_net(skb
->sk
);
2485 struct ifinfomsg
*ifm
;
2486 struct net_device
*dev
;
2488 struct nlattr
*tb
[IFLA_MAX
+1];
2489 char ifname
[IFNAMSIZ
];
2491 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
,
2496 err
= rtnl_ensure_unique_netns(tb
, extack
, false);
2500 if (tb
[IFLA_IFNAME
])
2501 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2506 ifm
= nlmsg_data(nlh
);
2507 if (ifm
->ifi_index
> 0)
2508 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
2509 else if (tb
[IFLA_IFNAME
])
2510 dev
= __dev_get_by_name(net
, ifname
);
2519 err
= do_setlink(skb
, dev
, ifm
, extack
, tb
, ifname
, 0);
2524 static int rtnl_group_dellink(const struct net
*net
, int group
)
2526 struct net_device
*dev
, *aux
;
2527 LIST_HEAD(list_kill
);
2533 for_each_netdev(net
, dev
) {
2534 if (dev
->group
== group
) {
2535 const struct rtnl_link_ops
*ops
;
2538 ops
= dev
->rtnl_link_ops
;
2539 if (!ops
|| !ops
->dellink
)
2547 for_each_netdev_safe(net
, dev
, aux
) {
2548 if (dev
->group
== group
) {
2549 const struct rtnl_link_ops
*ops
;
2551 ops
= dev
->rtnl_link_ops
;
2552 ops
->dellink(dev
, &list_kill
);
2555 unregister_netdevice_many(&list_kill
);
2560 int rtnl_delete_link(struct net_device
*dev
)
2562 const struct rtnl_link_ops
*ops
;
2563 LIST_HEAD(list_kill
);
2565 ops
= dev
->rtnl_link_ops
;
2566 if (!ops
|| !ops
->dellink
)
2569 ops
->dellink(dev
, &list_kill
);
2570 unregister_netdevice_many(&list_kill
);
2574 EXPORT_SYMBOL_GPL(rtnl_delete_link
);
2576 static int rtnl_dellink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2577 struct netlink_ext_ack
*extack
)
2579 struct net
*net
= sock_net(skb
->sk
);
2580 struct net
*tgt_net
= net
;
2581 struct net_device
*dev
= NULL
;
2582 struct ifinfomsg
*ifm
;
2583 char ifname
[IFNAMSIZ
];
2584 struct nlattr
*tb
[IFLA_MAX
+1];
2588 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
, extack
);
2592 err
= rtnl_ensure_unique_netns(tb
, extack
, true);
2596 if (tb
[IFLA_IFNAME
])
2597 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2599 if (tb
[IFLA_IF_NETNSID
]) {
2600 netnsid
= nla_get_s32(tb
[IFLA_IF_NETNSID
]);
2601 tgt_net
= get_target_net(NETLINK_CB(skb
).sk
, netnsid
);
2602 if (IS_ERR(tgt_net
))
2603 return PTR_ERR(tgt_net
);
2607 ifm
= nlmsg_data(nlh
);
2608 if (ifm
->ifi_index
> 0)
2609 dev
= __dev_get_by_index(tgt_net
, ifm
->ifi_index
);
2610 else if (tb
[IFLA_IFNAME
])
2611 dev
= __dev_get_by_name(tgt_net
, ifname
);
2612 else if (tb
[IFLA_GROUP
])
2613 err
= rtnl_group_dellink(tgt_net
, nla_get_u32(tb
[IFLA_GROUP
]));
2618 if (tb
[IFLA_IFNAME
] || ifm
->ifi_index
> 0)
2624 err
= rtnl_delete_link(dev
);
2633 int rtnl_configure_link(struct net_device
*dev
, const struct ifinfomsg
*ifm
)
2635 unsigned int old_flags
;
2638 old_flags
= dev
->flags
;
2639 if (ifm
&& (ifm
->ifi_flags
|| ifm
->ifi_change
)) {
2640 err
= __dev_change_flags(dev
, rtnl_dev_combine_flags(dev
, ifm
));
2645 if (dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
) {
2646 __dev_notify_flags(dev
, old_flags
, (old_flags
^ dev
->flags
));
2648 dev
->rtnl_link_state
= RTNL_LINK_INITIALIZED
;
2649 __dev_notify_flags(dev
, old_flags
, ~0U);
2653 EXPORT_SYMBOL(rtnl_configure_link
);
2655 struct net_device
*rtnl_create_link(struct net
*net
,
2656 const char *ifname
, unsigned char name_assign_type
,
2657 const struct rtnl_link_ops
*ops
, struct nlattr
*tb
[])
2659 struct net_device
*dev
;
2660 unsigned int num_tx_queues
= 1;
2661 unsigned int num_rx_queues
= 1;
2663 if (tb
[IFLA_NUM_TX_QUEUES
])
2664 num_tx_queues
= nla_get_u32(tb
[IFLA_NUM_TX_QUEUES
]);
2665 else if (ops
->get_num_tx_queues
)
2666 num_tx_queues
= ops
->get_num_tx_queues();
2668 if (tb
[IFLA_NUM_RX_QUEUES
])
2669 num_rx_queues
= nla_get_u32(tb
[IFLA_NUM_RX_QUEUES
]);
2670 else if (ops
->get_num_rx_queues
)
2671 num_rx_queues
= ops
->get_num_rx_queues();
2673 if (num_tx_queues
< 1 || num_tx_queues
> 4096)
2674 return ERR_PTR(-EINVAL
);
2676 if (num_rx_queues
< 1 || num_rx_queues
> 4096)
2677 return ERR_PTR(-EINVAL
);
2679 dev
= alloc_netdev_mqs(ops
->priv_size
, ifname
, name_assign_type
,
2680 ops
->setup
, num_tx_queues
, num_rx_queues
);
2682 return ERR_PTR(-ENOMEM
);
2684 dev_net_set(dev
, net
);
2685 dev
->rtnl_link_ops
= ops
;
2686 dev
->rtnl_link_state
= RTNL_LINK_INITIALIZING
;
2689 dev
->mtu
= nla_get_u32(tb
[IFLA_MTU
]);
2690 if (tb
[IFLA_ADDRESS
]) {
2691 memcpy(dev
->dev_addr
, nla_data(tb
[IFLA_ADDRESS
]),
2692 nla_len(tb
[IFLA_ADDRESS
]));
2693 dev
->addr_assign_type
= NET_ADDR_SET
;
2695 if (tb
[IFLA_BROADCAST
])
2696 memcpy(dev
->broadcast
, nla_data(tb
[IFLA_BROADCAST
]),
2697 nla_len(tb
[IFLA_BROADCAST
]));
2698 if (tb
[IFLA_TXQLEN
])
2699 dev
->tx_queue_len
= nla_get_u32(tb
[IFLA_TXQLEN
]);
2700 if (tb
[IFLA_OPERSTATE
])
2701 set_operstate(dev
, nla_get_u8(tb
[IFLA_OPERSTATE
]));
2702 if (tb
[IFLA_LINKMODE
])
2703 dev
->link_mode
= nla_get_u8(tb
[IFLA_LINKMODE
]);
2705 dev_set_group(dev
, nla_get_u32(tb
[IFLA_GROUP
]));
2709 EXPORT_SYMBOL(rtnl_create_link
);
2711 static int rtnl_group_changelink(const struct sk_buff
*skb
,
2712 struct net
*net
, int group
,
2713 struct ifinfomsg
*ifm
,
2714 struct netlink_ext_ack
*extack
,
2717 struct net_device
*dev
, *aux
;
2720 for_each_netdev_safe(net
, dev
, aux
) {
2721 if (dev
->group
== group
) {
2722 err
= do_setlink(skb
, dev
, ifm
, extack
, tb
, NULL
, 0);
2731 static int rtnl_newlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2732 struct netlink_ext_ack
*extack
)
2734 struct net
*net
= sock_net(skb
->sk
);
2735 const struct rtnl_link_ops
*ops
;
2736 const struct rtnl_link_ops
*m_ops
= NULL
;
2737 struct net_device
*dev
;
2738 struct net_device
*master_dev
= NULL
;
2739 struct ifinfomsg
*ifm
;
2740 char kind
[MODULE_NAME_LEN
];
2741 char ifname
[IFNAMSIZ
];
2742 struct nlattr
*tb
[IFLA_MAX
+1];
2743 struct nlattr
*linkinfo
[IFLA_INFO_MAX
+1];
2744 unsigned char name_assign_type
= NET_NAME_USER
;
2747 #ifdef CONFIG_MODULES
2750 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
, extack
);
2754 err
= rtnl_ensure_unique_netns(tb
, extack
, false);
2758 if (tb
[IFLA_IFNAME
])
2759 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
2763 ifm
= nlmsg_data(nlh
);
2764 if (ifm
->ifi_index
> 0)
2765 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
2768 dev
= __dev_get_by_name(net
, ifname
);
2774 master_dev
= netdev_master_upper_dev_get(dev
);
2776 m_ops
= master_dev
->rtnl_link_ops
;
2779 err
= validate_linkmsg(dev
, tb
);
2783 if (tb
[IFLA_LINKINFO
]) {
2784 err
= nla_parse_nested(linkinfo
, IFLA_INFO_MAX
,
2785 tb
[IFLA_LINKINFO
], ifla_info_policy
,
2790 memset(linkinfo
, 0, sizeof(linkinfo
));
2792 if (linkinfo
[IFLA_INFO_KIND
]) {
2793 nla_strlcpy(kind
, linkinfo
[IFLA_INFO_KIND
], sizeof(kind
));
2794 ops
= rtnl_link_ops_get(kind
);
2801 struct nlattr
*attr
[ops
? ops
->maxtype
+ 1 : 1];
2802 struct nlattr
*slave_attr
[m_ops
? m_ops
->slave_maxtype
+ 1 : 1];
2803 struct nlattr
**data
= NULL
;
2804 struct nlattr
**slave_data
= NULL
;
2805 struct net
*dest_net
, *link_net
= NULL
;
2808 if (ops
->maxtype
&& linkinfo
[IFLA_INFO_DATA
]) {
2809 err
= nla_parse_nested(attr
, ops
->maxtype
,
2810 linkinfo
[IFLA_INFO_DATA
],
2816 if (ops
->validate
) {
2817 err
= ops
->validate(tb
, data
, extack
);
2824 if (m_ops
->slave_maxtype
&&
2825 linkinfo
[IFLA_INFO_SLAVE_DATA
]) {
2826 err
= nla_parse_nested(slave_attr
,
2827 m_ops
->slave_maxtype
,
2828 linkinfo
[IFLA_INFO_SLAVE_DATA
],
2829 m_ops
->slave_policy
,
2833 slave_data
= slave_attr
;
2840 if (nlh
->nlmsg_flags
& NLM_F_EXCL
)
2842 if (nlh
->nlmsg_flags
& NLM_F_REPLACE
)
2845 if (linkinfo
[IFLA_INFO_DATA
]) {
2846 if (!ops
|| ops
!= dev
->rtnl_link_ops
||
2850 err
= ops
->changelink(dev
, tb
, data
, extack
);
2853 status
|= DO_SETLINK_NOTIFY
;
2856 if (linkinfo
[IFLA_INFO_SLAVE_DATA
]) {
2857 if (!m_ops
|| !m_ops
->slave_changelink
)
2860 err
= m_ops
->slave_changelink(master_dev
, dev
,
2865 status
|= DO_SETLINK_NOTIFY
;
2868 return do_setlink(skb
, dev
, ifm
, extack
, tb
, ifname
,
2872 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
2873 if (ifm
->ifi_index
== 0 && tb
[IFLA_GROUP
])
2874 return rtnl_group_changelink(skb
, net
,
2875 nla_get_u32(tb
[IFLA_GROUP
]),
2880 if (tb
[IFLA_MAP
] || tb
[IFLA_PROTINFO
])
2884 #ifdef CONFIG_MODULES
2887 request_module("rtnl-link-%s", kind
);
2889 ops
= rtnl_link_ops_get(kind
);
2901 snprintf(ifname
, IFNAMSIZ
, "%s%%d", ops
->kind
);
2902 name_assign_type
= NET_NAME_ENUM
;
2905 dest_net
= rtnl_link_get_net_capable(skb
, net
, tb
, CAP_NET_ADMIN
);
2906 if (IS_ERR(dest_net
))
2907 return PTR_ERR(dest_net
);
2909 if (tb
[IFLA_LINK_NETNSID
]) {
2910 int id
= nla_get_s32(tb
[IFLA_LINK_NETNSID
]);
2912 link_net
= get_net_ns_by_id(dest_net
, id
);
2918 if (!netlink_ns_capable(skb
, link_net
->user_ns
, CAP_NET_ADMIN
))
2922 dev
= rtnl_create_link(link_net
? : dest_net
, ifname
,
2923 name_assign_type
, ops
, tb
);
2929 dev
->ifindex
= ifm
->ifi_index
;
2932 err
= ops
->newlink(link_net
? : net
, dev
, tb
, data
,
2934 /* Drivers should call free_netdev() in ->destructor
2935 * and unregister it on failure after registration
2936 * so that device could be finally freed in rtnl_unlock.
2939 /* If device is not registered at all, free it now */
2940 if (dev
->reg_state
== NETREG_UNINITIALIZED
)
2945 err
= register_netdevice(dev
);
2951 err
= rtnl_configure_link(dev
, ifm
);
2953 goto out_unregister
;
2955 err
= dev_change_net_namespace(dev
, dest_net
, ifname
);
2957 goto out_unregister
;
2959 if (tb
[IFLA_MASTER
]) {
2960 err
= do_set_master(dev
, nla_get_u32(tb
[IFLA_MASTER
]),
2963 goto out_unregister
;
2972 LIST_HEAD(list_kill
);
2974 ops
->dellink(dev
, &list_kill
);
2975 unregister_netdevice_many(&list_kill
);
2977 unregister_netdevice(dev
);
2983 static int rtnl_getlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2984 struct netlink_ext_ack
*extack
)
2986 struct net
*net
= sock_net(skb
->sk
);
2987 struct net
*tgt_net
= net
;
2988 struct ifinfomsg
*ifm
;
2989 char ifname
[IFNAMSIZ
];
2990 struct nlattr
*tb
[IFLA_MAX
+1];
2991 struct net_device
*dev
= NULL
;
2992 struct sk_buff
*nskb
;
2995 u32 ext_filter_mask
= 0;
2997 err
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFLA_MAX
, ifla_policy
, extack
);
3001 err
= rtnl_ensure_unique_netns(tb
, extack
, true);
3005 if (tb
[IFLA_IF_NETNSID
]) {
3006 netnsid
= nla_get_s32(tb
[IFLA_IF_NETNSID
]);
3007 tgt_net
= get_target_net(NETLINK_CB(skb
).sk
, netnsid
);
3008 if (IS_ERR(tgt_net
))
3009 return PTR_ERR(tgt_net
);
3012 if (tb
[IFLA_IFNAME
])
3013 nla_strlcpy(ifname
, tb
[IFLA_IFNAME
], IFNAMSIZ
);
3015 if (tb
[IFLA_EXT_MASK
])
3016 ext_filter_mask
= nla_get_u32(tb
[IFLA_EXT_MASK
]);
3019 ifm
= nlmsg_data(nlh
);
3020 if (ifm
->ifi_index
> 0)
3021 dev
= __dev_get_by_index(tgt_net
, ifm
->ifi_index
);
3022 else if (tb
[IFLA_IFNAME
])
3023 dev
= __dev_get_by_name(tgt_net
, ifname
);
3032 nskb
= nlmsg_new(if_nlmsg_size(dev
, ext_filter_mask
), GFP_KERNEL
);
3036 err
= rtnl_fill_ifinfo(nskb
, dev
, net
,
3037 RTM_NEWLINK
, NETLINK_CB(skb
).portid
,
3038 nlh
->nlmsg_seq
, 0, 0, ext_filter_mask
,
3039 0, NULL
, 0, netnsid
);
3041 /* -EMSGSIZE implies BUG in if_nlmsg_size */
3042 WARN_ON(err
== -EMSGSIZE
);
3045 err
= rtnl_unicast(nskb
, net
, NETLINK_CB(skb
).portid
);
3053 static u16
rtnl_calcit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
3055 struct net
*net
= sock_net(skb
->sk
);
3056 struct net_device
*dev
;
3057 struct nlattr
*tb
[IFLA_MAX
+1];
3058 u32 ext_filter_mask
= 0;
3059 u16 min_ifinfo_dump_size
= 0;
3062 /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
3063 hdrlen
= nlmsg_len(nlh
) < sizeof(struct ifinfomsg
) ?
3064 sizeof(struct rtgenmsg
) : sizeof(struct ifinfomsg
);
3066 if (nlmsg_parse(nlh
, hdrlen
, tb
, IFLA_MAX
, ifla_policy
, NULL
) >= 0) {
3067 if (tb
[IFLA_EXT_MASK
])
3068 ext_filter_mask
= nla_get_u32(tb
[IFLA_EXT_MASK
]);
3071 if (!ext_filter_mask
)
3072 return NLMSG_GOODSIZE
;
3074 * traverse the list of net devices and compute the minimum
3075 * buffer size based upon the filter mask.
3078 for_each_netdev_rcu(net
, dev
) {
3079 min_ifinfo_dump_size
= max_t(u16
, min_ifinfo_dump_size
,
3085 return nlmsg_total_size(min_ifinfo_dump_size
);
3088 static int rtnl_dump_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3091 int s_idx
= cb
->family
;
3096 for (idx
= 1; idx
<= RTNL_FAMILY_MAX
; idx
++) {
3097 int type
= cb
->nlh
->nlmsg_type
-RTM_BASE
;
3098 struct rtnl_link
*handlers
;
3099 rtnl_dumpit_func dumpit
;
3101 if (idx
< s_idx
|| idx
== PF_PACKET
)
3104 handlers
= rtnl_dereference(rtnl_msg_handlers
[idx
]);
3108 dumpit
= READ_ONCE(handlers
[type
].dumpit
);
3113 memset(&cb
->args
[0], 0, sizeof(cb
->args
));
3117 if (dumpit(skb
, cb
))
3125 struct sk_buff
*rtmsg_ifinfo_build_skb(int type
, struct net_device
*dev
,
3126 unsigned int change
,
3127 u32 event
, gfp_t flags
, int *new_nsid
,
3130 struct net
*net
= dev_net(dev
);
3131 struct sk_buff
*skb
;
3133 size_t if_info_size
;
3135 skb
= nlmsg_new((if_info_size
= if_nlmsg_size(dev
, 0)), flags
);
3139 err
= rtnl_fill_ifinfo(skb
, dev
, dev_net(dev
),
3140 type
, 0, 0, change
, 0, 0, event
,
3141 new_nsid
, new_ifindex
, -1);
3143 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
3144 WARN_ON(err
== -EMSGSIZE
);
3151 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
3155 void rtmsg_ifinfo_send(struct sk_buff
*skb
, struct net_device
*dev
, gfp_t flags
)
3157 struct net
*net
= dev_net(dev
);
3159 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, flags
);
3162 static void rtmsg_ifinfo_event(int type
, struct net_device
*dev
,
3163 unsigned int change
, u32 event
,
3164 gfp_t flags
, int *new_nsid
, int new_ifindex
)
3166 struct sk_buff
*skb
;
3168 if (dev
->reg_state
!= NETREG_REGISTERED
)
3171 skb
= rtmsg_ifinfo_build_skb(type
, dev
, change
, event
, flags
, new_nsid
,
3174 rtmsg_ifinfo_send(skb
, dev
, flags
);
3177 void rtmsg_ifinfo(int type
, struct net_device
*dev
, unsigned int change
,
3180 rtmsg_ifinfo_event(type
, dev
, change
, rtnl_get_event(0), flags
,
3184 void rtmsg_ifinfo_newnet(int type
, struct net_device
*dev
, unsigned int change
,
3185 gfp_t flags
, int *new_nsid
, int new_ifindex
)
3187 rtmsg_ifinfo_event(type
, dev
, change
, rtnl_get_event(0), flags
,
3188 new_nsid
, new_ifindex
);
3191 static int nlmsg_populate_fdb_fill(struct sk_buff
*skb
,
3192 struct net_device
*dev
,
3193 u8
*addr
, u16 vid
, u32 pid
, u32 seq
,
3194 int type
, unsigned int flags
,
3195 int nlflags
, u16 ndm_state
)
3197 struct nlmsghdr
*nlh
;
3200 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), nlflags
);
3204 ndm
= nlmsg_data(nlh
);
3205 ndm
->ndm_family
= AF_BRIDGE
;
3208 ndm
->ndm_flags
= flags
;
3210 ndm
->ndm_ifindex
= dev
->ifindex
;
3211 ndm
->ndm_state
= ndm_state
;
3213 if (nla_put(skb
, NDA_LLADDR
, ETH_ALEN
, addr
))
3214 goto nla_put_failure
;
3216 if (nla_put(skb
, NDA_VLAN
, sizeof(u16
), &vid
))
3217 goto nla_put_failure
;
3219 nlmsg_end(skb
, nlh
);
3223 nlmsg_cancel(skb
, nlh
);
3227 static inline size_t rtnl_fdb_nlmsg_size(void)
3229 return NLMSG_ALIGN(sizeof(struct ndmsg
)) +
3230 nla_total_size(ETH_ALEN
) + /* NDA_LLADDR */
3231 nla_total_size(sizeof(u16
)) + /* NDA_VLAN */
3235 static void rtnl_fdb_notify(struct net_device
*dev
, u8
*addr
, u16 vid
, int type
,
3238 struct net
*net
= dev_net(dev
);
3239 struct sk_buff
*skb
;
3242 skb
= nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC
);
3246 err
= nlmsg_populate_fdb_fill(skb
, dev
, addr
, vid
,
3247 0, 0, type
, NTF_SELF
, 0, ndm_state
);
3253 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
3256 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
3260 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
3262 int ndo_dflt_fdb_add(struct ndmsg
*ndm
,
3263 struct nlattr
*tb
[],
3264 struct net_device
*dev
,
3265 const unsigned char *addr
, u16 vid
,
3270 /* If aging addresses are supported device will need to
3271 * implement its own handler for this.
3273 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
3274 pr_info("%s: FDB only supports static addresses\n", dev
->name
);
3279 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
3283 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
3284 err
= dev_uc_add_excl(dev
, addr
);
3285 else if (is_multicast_ether_addr(addr
))
3286 err
= dev_mc_add_excl(dev
, addr
);
3288 /* Only return duplicate errors if NLM_F_EXCL is set */
3289 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
3294 EXPORT_SYMBOL(ndo_dflt_fdb_add
);
3296 static int fdb_vid_parse(struct nlattr
*vlan_attr
, u16
*p_vid
,
3297 struct netlink_ext_ack
*extack
)
3302 if (nla_len(vlan_attr
) != sizeof(u16
)) {
3303 NL_SET_ERR_MSG(extack
, "invalid vlan attribute size");
3307 vid
= nla_get_u16(vlan_attr
);
3309 if (!vid
|| vid
>= VLAN_VID_MASK
) {
3310 NL_SET_ERR_MSG(extack
, "invalid vlan id");
3318 static int rtnl_fdb_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3319 struct netlink_ext_ack
*extack
)
3321 struct net
*net
= sock_net(skb
->sk
);
3323 struct nlattr
*tb
[NDA_MAX
+1];
3324 struct net_device
*dev
;
3329 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
, extack
);
3333 ndm
= nlmsg_data(nlh
);
3334 if (ndm
->ndm_ifindex
== 0) {
3335 NL_SET_ERR_MSG(extack
, "invalid ifindex");
3339 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
3341 NL_SET_ERR_MSG(extack
, "unknown ifindex");
3345 if (!tb
[NDA_LLADDR
] || nla_len(tb
[NDA_LLADDR
]) != ETH_ALEN
) {
3346 NL_SET_ERR_MSG(extack
, "invalid address");
3350 if (dev
->type
!= ARPHRD_ETHER
) {
3351 NL_SET_ERR_MSG(extack
, "FDB add only supported for Ethernet devices");
3355 addr
= nla_data(tb
[NDA_LLADDR
]);
3357 err
= fdb_vid_parse(tb
[NDA_VLAN
], &vid
, extack
);
3363 /* Support fdb on master device the net/bridge default case */
3364 if ((!ndm
->ndm_flags
|| ndm
->ndm_flags
& NTF_MASTER
) &&
3365 (dev
->priv_flags
& IFF_BRIDGE_PORT
)) {
3366 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3367 const struct net_device_ops
*ops
= br_dev
->netdev_ops
;
3369 err
= ops
->ndo_fdb_add(ndm
, tb
, dev
, addr
, vid
,
3374 ndm
->ndm_flags
&= ~NTF_MASTER
;
3377 /* Embedded bridge, macvlan, and any other device support */
3378 if ((ndm
->ndm_flags
& NTF_SELF
)) {
3379 if (dev
->netdev_ops
->ndo_fdb_add
)
3380 err
= dev
->netdev_ops
->ndo_fdb_add(ndm
, tb
, dev
, addr
,
3384 err
= ndo_dflt_fdb_add(ndm
, tb
, dev
, addr
, vid
,
3388 rtnl_fdb_notify(dev
, addr
, vid
, RTM_NEWNEIGH
,
3390 ndm
->ndm_flags
&= ~NTF_SELF
;
3398 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
3400 int ndo_dflt_fdb_del(struct ndmsg
*ndm
,
3401 struct nlattr
*tb
[],
3402 struct net_device
*dev
,
3403 const unsigned char *addr
, u16 vid
)
3407 /* If aging addresses are supported device will need to
3408 * implement its own handler for this.
3410 if (!(ndm
->ndm_state
& NUD_PERMANENT
)) {
3411 pr_info("%s: FDB only supports static addresses\n", dev
->name
);
3415 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
3416 err
= dev_uc_del(dev
, addr
);
3417 else if (is_multicast_ether_addr(addr
))
3418 err
= dev_mc_del(dev
, addr
);
3422 EXPORT_SYMBOL(ndo_dflt_fdb_del
);
3424 static int rtnl_fdb_del(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3425 struct netlink_ext_ack
*extack
)
3427 struct net
*net
= sock_net(skb
->sk
);
3429 struct nlattr
*tb
[NDA_MAX
+1];
3430 struct net_device
*dev
;
3435 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
3438 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
, extack
);
3442 ndm
= nlmsg_data(nlh
);
3443 if (ndm
->ndm_ifindex
== 0) {
3444 NL_SET_ERR_MSG(extack
, "invalid ifindex");
3448 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
3450 NL_SET_ERR_MSG(extack
, "unknown ifindex");
3454 if (!tb
[NDA_LLADDR
] || nla_len(tb
[NDA_LLADDR
]) != ETH_ALEN
) {
3455 NL_SET_ERR_MSG(extack
, "invalid address");
3459 if (dev
->type
!= ARPHRD_ETHER
) {
3460 NL_SET_ERR_MSG(extack
, "FDB delete only supported for Ethernet devices");
3464 addr
= nla_data(tb
[NDA_LLADDR
]);
3466 err
= fdb_vid_parse(tb
[NDA_VLAN
], &vid
, extack
);
3472 /* Support fdb on master device the net/bridge default case */
3473 if ((!ndm
->ndm_flags
|| ndm
->ndm_flags
& NTF_MASTER
) &&
3474 (dev
->priv_flags
& IFF_BRIDGE_PORT
)) {
3475 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3476 const struct net_device_ops
*ops
= br_dev
->netdev_ops
;
3478 if (ops
->ndo_fdb_del
)
3479 err
= ops
->ndo_fdb_del(ndm
, tb
, dev
, addr
, vid
);
3484 ndm
->ndm_flags
&= ~NTF_MASTER
;
3487 /* Embedded bridge, macvlan, and any other device support */
3488 if (ndm
->ndm_flags
& NTF_SELF
) {
3489 if (dev
->netdev_ops
->ndo_fdb_del
)
3490 err
= dev
->netdev_ops
->ndo_fdb_del(ndm
, tb
, dev
, addr
,
3493 err
= ndo_dflt_fdb_del(ndm
, tb
, dev
, addr
, vid
);
3496 rtnl_fdb_notify(dev
, addr
, vid
, RTM_DELNEIGH
,
3498 ndm
->ndm_flags
&= ~NTF_SELF
;
3505 static int nlmsg_populate_fdb(struct sk_buff
*skb
,
3506 struct netlink_callback
*cb
,
3507 struct net_device
*dev
,
3509 struct netdev_hw_addr_list
*list
)
3511 struct netdev_hw_addr
*ha
;
3515 portid
= NETLINK_CB(cb
->skb
).portid
;
3516 seq
= cb
->nlh
->nlmsg_seq
;
3518 list_for_each_entry(ha
, &list
->list
, list
) {
3519 if (*idx
< cb
->args
[2])
3522 err
= nlmsg_populate_fdb_fill(skb
, dev
, ha
->addr
, 0,
3524 RTM_NEWNEIGH
, NTF_SELF
,
3525 NLM_F_MULTI
, NUD_PERMANENT
);
3535 * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
3536 * @nlh: netlink message header
3539 * Default netdevice operation to dump the existing unicast address list.
3540 * Returns number of addresses from list put in skb.
3542 int ndo_dflt_fdb_dump(struct sk_buff
*skb
,
3543 struct netlink_callback
*cb
,
3544 struct net_device
*dev
,
3545 struct net_device
*filter_dev
,
3550 if (dev
->type
!= ARPHRD_ETHER
)
3553 netif_addr_lock_bh(dev
);
3554 err
= nlmsg_populate_fdb(skb
, cb
, dev
, idx
, &dev
->uc
);
3557 err
= nlmsg_populate_fdb(skb
, cb
, dev
, idx
, &dev
->mc
);
3559 netif_addr_unlock_bh(dev
);
3562 EXPORT_SYMBOL(ndo_dflt_fdb_dump
);
3564 static int rtnl_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3566 struct net_device
*dev
;
3567 struct nlattr
*tb
[IFLA_MAX
+1];
3568 struct net_device
*br_dev
= NULL
;
3569 const struct net_device_ops
*ops
= NULL
;
3570 const struct net_device_ops
*cops
= NULL
;
3571 struct ifinfomsg
*ifm
= nlmsg_data(cb
->nlh
);
3572 struct net
*net
= sock_net(skb
->sk
);
3573 struct hlist_head
*head
;
3581 /* A hack to preserve kernel<->userspace interface.
3582 * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
3583 * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
3584 * So, check for ndmsg with an optional u32 attribute (not used here).
3585 * Fortunately these sizes don't conflict with the size of ifinfomsg
3586 * with an optional attribute.
3588 if (nlmsg_len(cb
->nlh
) != sizeof(struct ndmsg
) &&
3589 (nlmsg_len(cb
->nlh
) != sizeof(struct ndmsg
) +
3590 nla_attr_size(sizeof(u32
)))) {
3591 err
= nlmsg_parse(cb
->nlh
, sizeof(struct ifinfomsg
), tb
,
3592 IFLA_MAX
, ifla_policy
, NULL
);
3595 } else if (err
== 0) {
3596 if (tb
[IFLA_MASTER
])
3597 br_idx
= nla_get_u32(tb
[IFLA_MASTER
]);
3600 brport_idx
= ifm
->ifi_index
;
3604 br_dev
= __dev_get_by_index(net
, br_idx
);
3608 ops
= br_dev
->netdev_ops
;
3612 s_idx
= cb
->args
[1];
3614 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
3616 head
= &net
->dev_index_head
[h
];
3617 hlist_for_each_entry(dev
, head
, index_hlist
) {
3619 if (brport_idx
&& (dev
->ifindex
!= brport_idx
))
3622 if (!br_idx
) { /* user did not specify a specific bridge */
3623 if (dev
->priv_flags
& IFF_BRIDGE_PORT
) {
3624 br_dev
= netdev_master_upper_dev_get(dev
);
3625 cops
= br_dev
->netdev_ops
;
3628 if (dev
!= br_dev
&&
3629 !(dev
->priv_flags
& IFF_BRIDGE_PORT
))
3632 if (br_dev
!= netdev_master_upper_dev_get(dev
) &&
3633 !(dev
->priv_flags
& IFF_EBRIDGE
))
3641 if (dev
->priv_flags
& IFF_BRIDGE_PORT
) {
3642 if (cops
&& cops
->ndo_fdb_dump
) {
3643 err
= cops
->ndo_fdb_dump(skb
, cb
,
3646 if (err
== -EMSGSIZE
)
3651 if (dev
->netdev_ops
->ndo_fdb_dump
)
3652 err
= dev
->netdev_ops
->ndo_fdb_dump(skb
, cb
,
3656 err
= ndo_dflt_fdb_dump(skb
, cb
, dev
, NULL
,
3658 if (err
== -EMSGSIZE
)
3663 /* reset fdb offset to 0 for rest of the interfaces */
3679 static int brport_nla_put_flag(struct sk_buff
*skb
, u32 flags
, u32 mask
,
3680 unsigned int attrnum
, unsigned int flag
)
3683 return nla_put_u8(skb
, attrnum
, !!(flags
& flag
));
3687 int ndo_dflt_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
3688 struct net_device
*dev
, u16 mode
,
3689 u32 flags
, u32 mask
, int nlflags
,
3691 int (*vlan_fill
)(struct sk_buff
*skb
,
3692 struct net_device
*dev
,
3695 struct nlmsghdr
*nlh
;
3696 struct ifinfomsg
*ifm
;
3697 struct nlattr
*br_afspec
;
3698 struct nlattr
*protinfo
;
3699 u8 operstate
= netif_running(dev
) ? dev
->operstate
: IF_OPER_DOWN
;
3700 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3703 nlh
= nlmsg_put(skb
, pid
, seq
, RTM_NEWLINK
, sizeof(*ifm
), nlflags
);
3707 ifm
= nlmsg_data(nlh
);
3708 ifm
->ifi_family
= AF_BRIDGE
;
3710 ifm
->ifi_type
= dev
->type
;
3711 ifm
->ifi_index
= dev
->ifindex
;
3712 ifm
->ifi_flags
= dev_get_flags(dev
);
3713 ifm
->ifi_change
= 0;
3716 if (nla_put_string(skb
, IFLA_IFNAME
, dev
->name
) ||
3717 nla_put_u32(skb
, IFLA_MTU
, dev
->mtu
) ||
3718 nla_put_u8(skb
, IFLA_OPERSTATE
, operstate
) ||
3720 nla_put_u32(skb
, IFLA_MASTER
, br_dev
->ifindex
)) ||
3722 nla_put(skb
, IFLA_ADDRESS
, dev
->addr_len
, dev
->dev_addr
)) ||
3723 (dev
->ifindex
!= dev_get_iflink(dev
) &&
3724 nla_put_u32(skb
, IFLA_LINK
, dev_get_iflink(dev
))))
3725 goto nla_put_failure
;
3727 br_afspec
= nla_nest_start(skb
, IFLA_AF_SPEC
);
3729 goto nla_put_failure
;
3731 if (nla_put_u16(skb
, IFLA_BRIDGE_FLAGS
, BRIDGE_FLAGS_SELF
)) {
3732 nla_nest_cancel(skb
, br_afspec
);
3733 goto nla_put_failure
;
3736 if (mode
!= BRIDGE_MODE_UNDEF
) {
3737 if (nla_put_u16(skb
, IFLA_BRIDGE_MODE
, mode
)) {
3738 nla_nest_cancel(skb
, br_afspec
);
3739 goto nla_put_failure
;
3743 err
= vlan_fill(skb
, dev
, filter_mask
);
3745 nla_nest_cancel(skb
, br_afspec
);
3746 goto nla_put_failure
;
3749 nla_nest_end(skb
, br_afspec
);
3751 protinfo
= nla_nest_start(skb
, IFLA_PROTINFO
| NLA_F_NESTED
);
3753 goto nla_put_failure
;
3755 if (brport_nla_put_flag(skb
, flags
, mask
,
3756 IFLA_BRPORT_MODE
, BR_HAIRPIN_MODE
) ||
3757 brport_nla_put_flag(skb
, flags
, mask
,
3758 IFLA_BRPORT_GUARD
, BR_BPDU_GUARD
) ||
3759 brport_nla_put_flag(skb
, flags
, mask
,
3760 IFLA_BRPORT_FAST_LEAVE
,
3761 BR_MULTICAST_FAST_LEAVE
) ||
3762 brport_nla_put_flag(skb
, flags
, mask
,
3763 IFLA_BRPORT_PROTECT
, BR_ROOT_BLOCK
) ||
3764 brport_nla_put_flag(skb
, flags
, mask
,
3765 IFLA_BRPORT_LEARNING
, BR_LEARNING
) ||
3766 brport_nla_put_flag(skb
, flags
, mask
,
3767 IFLA_BRPORT_LEARNING_SYNC
, BR_LEARNING_SYNC
) ||
3768 brport_nla_put_flag(skb
, flags
, mask
,
3769 IFLA_BRPORT_UNICAST_FLOOD
, BR_FLOOD
) ||
3770 brport_nla_put_flag(skb
, flags
, mask
,
3771 IFLA_BRPORT_PROXYARP
, BR_PROXYARP
)) {
3772 nla_nest_cancel(skb
, protinfo
);
3773 goto nla_put_failure
;
3776 nla_nest_end(skb
, protinfo
);
3778 nlmsg_end(skb
, nlh
);
3781 nlmsg_cancel(skb
, nlh
);
3782 return err
? err
: -EMSGSIZE
;
3784 EXPORT_SYMBOL_GPL(ndo_dflt_bridge_getlink
);
3786 static int rtnl_bridge_getlink(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3788 struct net
*net
= sock_net(skb
->sk
);
3789 struct net_device
*dev
;
3791 u32 portid
= NETLINK_CB(cb
->skb
).portid
;
3792 u32 seq
= cb
->nlh
->nlmsg_seq
;
3793 u32 filter_mask
= 0;
3796 if (nlmsg_len(cb
->nlh
) > sizeof(struct ifinfomsg
)) {
3797 struct nlattr
*extfilt
;
3799 extfilt
= nlmsg_find_attr(cb
->nlh
, sizeof(struct ifinfomsg
),
3802 if (nla_len(extfilt
) < sizeof(filter_mask
))
3805 filter_mask
= nla_get_u32(extfilt
);
3810 for_each_netdev_rcu(net
, dev
) {
3811 const struct net_device_ops
*ops
= dev
->netdev_ops
;
3812 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3814 if (br_dev
&& br_dev
->netdev_ops
->ndo_bridge_getlink
) {
3815 if (idx
>= cb
->args
[0]) {
3816 err
= br_dev
->netdev_ops
->ndo_bridge_getlink(
3817 skb
, portid
, seq
, dev
,
3818 filter_mask
, NLM_F_MULTI
);
3819 if (err
< 0 && err
!= -EOPNOTSUPP
) {
3820 if (likely(skb
->len
))
3829 if (ops
->ndo_bridge_getlink
) {
3830 if (idx
>= cb
->args
[0]) {
3831 err
= ops
->ndo_bridge_getlink(skb
, portid
,
3835 if (err
< 0 && err
!= -EOPNOTSUPP
) {
3836 if (likely(skb
->len
))
3853 static inline size_t bridge_nlmsg_size(void)
3855 return NLMSG_ALIGN(sizeof(struct ifinfomsg
))
3856 + nla_total_size(IFNAMSIZ
) /* IFLA_IFNAME */
3857 + nla_total_size(MAX_ADDR_LEN
) /* IFLA_ADDRESS */
3858 + nla_total_size(sizeof(u32
)) /* IFLA_MASTER */
3859 + nla_total_size(sizeof(u32
)) /* IFLA_MTU */
3860 + nla_total_size(sizeof(u32
)) /* IFLA_LINK */
3861 + nla_total_size(sizeof(u32
)) /* IFLA_OPERSTATE */
3862 + nla_total_size(sizeof(u8
)) /* IFLA_PROTINFO */
3863 + nla_total_size(sizeof(struct nlattr
)) /* IFLA_AF_SPEC */
3864 + nla_total_size(sizeof(u16
)) /* IFLA_BRIDGE_FLAGS */
3865 + nla_total_size(sizeof(u16
)); /* IFLA_BRIDGE_MODE */
3868 static int rtnl_bridge_notify(struct net_device
*dev
)
3870 struct net
*net
= dev_net(dev
);
3871 struct sk_buff
*skb
;
3872 int err
= -EOPNOTSUPP
;
3874 if (!dev
->netdev_ops
->ndo_bridge_getlink
)
3877 skb
= nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC
);
3883 err
= dev
->netdev_ops
->ndo_bridge_getlink(skb
, 0, 0, dev
, 0, 0);
3890 rtnl_notify(skb
, net
, 0, RTNLGRP_LINK
, NULL
, GFP_ATOMIC
);
3893 WARN_ON(err
== -EMSGSIZE
);
3896 rtnl_set_sk_err(net
, RTNLGRP_LINK
, err
);
3900 static int rtnl_bridge_setlink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3901 struct netlink_ext_ack
*extack
)
3903 struct net
*net
= sock_net(skb
->sk
);
3904 struct ifinfomsg
*ifm
;
3905 struct net_device
*dev
;
3906 struct nlattr
*br_spec
, *attr
= NULL
;
3907 int rem
, err
= -EOPNOTSUPP
;
3909 bool have_flags
= false;
3911 if (nlmsg_len(nlh
) < sizeof(*ifm
))
3914 ifm
= nlmsg_data(nlh
);
3915 if (ifm
->ifi_family
!= AF_BRIDGE
)
3916 return -EPFNOSUPPORT
;
3918 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
3920 NL_SET_ERR_MSG(extack
, "unknown ifindex");
3924 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
3926 nla_for_each_nested(attr
, br_spec
, rem
) {
3927 if (nla_type(attr
) == IFLA_BRIDGE_FLAGS
) {
3928 if (nla_len(attr
) < sizeof(flags
))
3932 flags
= nla_get_u16(attr
);
3938 if (!flags
|| (flags
& BRIDGE_FLAGS_MASTER
)) {
3939 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
3941 if (!br_dev
|| !br_dev
->netdev_ops
->ndo_bridge_setlink
) {
3946 err
= br_dev
->netdev_ops
->ndo_bridge_setlink(dev
, nlh
, flags
);
3950 flags
&= ~BRIDGE_FLAGS_MASTER
;
3953 if ((flags
& BRIDGE_FLAGS_SELF
)) {
3954 if (!dev
->netdev_ops
->ndo_bridge_setlink
)
3957 err
= dev
->netdev_ops
->ndo_bridge_setlink(dev
, nlh
,
3960 flags
&= ~BRIDGE_FLAGS_SELF
;
3962 /* Generate event to notify upper layer of bridge
3965 err
= rtnl_bridge_notify(dev
);
3970 memcpy(nla_data(attr
), &flags
, sizeof(flags
));
3975 static int rtnl_bridge_dellink(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
3976 struct netlink_ext_ack
*extack
)
3978 struct net
*net
= sock_net(skb
->sk
);
3979 struct ifinfomsg
*ifm
;
3980 struct net_device
*dev
;
3981 struct nlattr
*br_spec
, *attr
= NULL
;
3982 int rem
, err
= -EOPNOTSUPP
;
3984 bool have_flags
= false;
3986 if (nlmsg_len(nlh
) < sizeof(*ifm
))
3989 ifm
= nlmsg_data(nlh
);
3990 if (ifm
->ifi_family
!= AF_BRIDGE
)
3991 return -EPFNOSUPPORT
;
3993 dev
= __dev_get_by_index(net
, ifm
->ifi_index
);
3995 NL_SET_ERR_MSG(extack
, "unknown ifindex");
3999 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
4001 nla_for_each_nested(attr
, br_spec
, rem
) {
4002 if (nla_type(attr
) == IFLA_BRIDGE_FLAGS
) {
4003 if (nla_len(attr
) < sizeof(flags
))
4007 flags
= nla_get_u16(attr
);
4013 if (!flags
|| (flags
& BRIDGE_FLAGS_MASTER
)) {
4014 struct net_device
*br_dev
= netdev_master_upper_dev_get(dev
);
4016 if (!br_dev
|| !br_dev
->netdev_ops
->ndo_bridge_dellink
) {
4021 err
= br_dev
->netdev_ops
->ndo_bridge_dellink(dev
, nlh
, flags
);
4025 flags
&= ~BRIDGE_FLAGS_MASTER
;
4028 if ((flags
& BRIDGE_FLAGS_SELF
)) {
4029 if (!dev
->netdev_ops
->ndo_bridge_dellink
)
4032 err
= dev
->netdev_ops
->ndo_bridge_dellink(dev
, nlh
,
4036 flags
&= ~BRIDGE_FLAGS_SELF
;
4038 /* Generate event to notify upper layer of bridge
4041 err
= rtnl_bridge_notify(dev
);
4046 memcpy(nla_data(attr
), &flags
, sizeof(flags
));
4051 static bool stats_attr_valid(unsigned int mask
, int attrid
, int idxattr
)
4053 return (mask
& IFLA_STATS_FILTER_BIT(attrid
)) &&
4054 (!idxattr
|| idxattr
== attrid
);
4057 #define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
4058 static int rtnl_get_offload_stats_attr_size(int attr_id
)
4061 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
4062 return sizeof(struct rtnl_link_stats64
);
4068 static int rtnl_get_offload_stats(struct sk_buff
*skb
, struct net_device
*dev
,
4071 struct nlattr
*attr
= NULL
;
4076 if (!(dev
->netdev_ops
&& dev
->netdev_ops
->ndo_has_offload_stats
&&
4077 dev
->netdev_ops
->ndo_get_offload_stats
))
4080 for (attr_id
= IFLA_OFFLOAD_XSTATS_FIRST
;
4081 attr_id
<= IFLA_OFFLOAD_XSTATS_MAX
; attr_id
++) {
4082 if (attr_id
< *prividx
)
4085 size
= rtnl_get_offload_stats_attr_size(attr_id
);
4089 if (!dev
->netdev_ops
->ndo_has_offload_stats(dev
, attr_id
))
4092 attr
= nla_reserve_64bit(skb
, attr_id
, size
,
4093 IFLA_OFFLOAD_XSTATS_UNSPEC
);
4095 goto nla_put_failure
;
4097 attr_data
= nla_data(attr
);
4098 memset(attr_data
, 0, size
);
4099 err
= dev
->netdev_ops
->ndo_get_offload_stats(attr_id
, dev
,
4102 goto get_offload_stats_failure
;
4113 get_offload_stats_failure
:
4118 static int rtnl_get_offload_stats_size(const struct net_device
*dev
)
4124 if (!(dev
->netdev_ops
&& dev
->netdev_ops
->ndo_has_offload_stats
&&
4125 dev
->netdev_ops
->ndo_get_offload_stats
))
4128 for (attr_id
= IFLA_OFFLOAD_XSTATS_FIRST
;
4129 attr_id
<= IFLA_OFFLOAD_XSTATS_MAX
; attr_id
++) {
4130 if (!dev
->netdev_ops
->ndo_has_offload_stats(dev
, attr_id
))
4132 size
= rtnl_get_offload_stats_attr_size(attr_id
);
4133 nla_size
+= nla_total_size_64bit(size
);
4137 nla_size
+= nla_total_size(0);
4142 static int rtnl_fill_statsinfo(struct sk_buff
*skb
, struct net_device
*dev
,
4143 int type
, u32 pid
, u32 seq
, u32 change
,
4144 unsigned int flags
, unsigned int filter_mask
,
4145 int *idxattr
, int *prividx
)
4147 struct if_stats_msg
*ifsm
;
4148 struct nlmsghdr
*nlh
;
4149 struct nlattr
*attr
;
4150 int s_prividx
= *prividx
;
4155 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ifsm
), flags
);
4159 ifsm
= nlmsg_data(nlh
);
4160 ifsm
->family
= PF_UNSPEC
;
4163 ifsm
->ifindex
= dev
->ifindex
;
4164 ifsm
->filter_mask
= filter_mask
;
4166 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_64
, *idxattr
)) {
4167 struct rtnl_link_stats64
*sp
;
4169 attr
= nla_reserve_64bit(skb
, IFLA_STATS_LINK_64
,
4170 sizeof(struct rtnl_link_stats64
),
4173 goto nla_put_failure
;
4175 sp
= nla_data(attr
);
4176 dev_get_stats(dev
, sp
);
4179 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS
, *idxattr
)) {
4180 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
4182 if (ops
&& ops
->fill_linkxstats
) {
4183 *idxattr
= IFLA_STATS_LINK_XSTATS
;
4184 attr
= nla_nest_start(skb
,
4185 IFLA_STATS_LINK_XSTATS
);
4187 goto nla_put_failure
;
4189 err
= ops
->fill_linkxstats(skb
, dev
, prividx
, *idxattr
);
4190 nla_nest_end(skb
, attr
);
4192 goto nla_put_failure
;
4197 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS_SLAVE
,
4199 const struct rtnl_link_ops
*ops
= NULL
;
4200 const struct net_device
*master
;
4202 master
= netdev_master_upper_dev_get(dev
);
4204 ops
= master
->rtnl_link_ops
;
4205 if (ops
&& ops
->fill_linkxstats
) {
4206 *idxattr
= IFLA_STATS_LINK_XSTATS_SLAVE
;
4207 attr
= nla_nest_start(skb
,
4208 IFLA_STATS_LINK_XSTATS_SLAVE
);
4210 goto nla_put_failure
;
4212 err
= ops
->fill_linkxstats(skb
, dev
, prividx
, *idxattr
);
4213 nla_nest_end(skb
, attr
);
4215 goto nla_put_failure
;
4220 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_OFFLOAD_XSTATS
,
4222 *idxattr
= IFLA_STATS_LINK_OFFLOAD_XSTATS
;
4223 attr
= nla_nest_start(skb
, IFLA_STATS_LINK_OFFLOAD_XSTATS
);
4225 goto nla_put_failure
;
4227 err
= rtnl_get_offload_stats(skb
, dev
, prividx
);
4228 if (err
== -ENODATA
)
4229 nla_nest_cancel(skb
, attr
);
4231 nla_nest_end(skb
, attr
);
4233 if (err
&& err
!= -ENODATA
)
4234 goto nla_put_failure
;
4238 if (stats_attr_valid(filter_mask
, IFLA_STATS_AF_SPEC
, *idxattr
)) {
4239 struct rtnl_af_ops
*af_ops
;
4241 *idxattr
= IFLA_STATS_AF_SPEC
;
4242 attr
= nla_nest_start(skb
, IFLA_STATS_AF_SPEC
);
4244 goto nla_put_failure
;
4247 list_for_each_entry_rcu(af_ops
, &rtnl_af_ops
, list
) {
4248 if (af_ops
->fill_stats_af
) {
4252 af
= nla_nest_start(skb
, af_ops
->family
);
4255 goto nla_put_failure
;
4257 err
= af_ops
->fill_stats_af(skb
, dev
);
4259 if (err
== -ENODATA
) {
4260 nla_nest_cancel(skb
, af
);
4261 } else if (err
< 0) {
4263 goto nla_put_failure
;
4266 nla_nest_end(skb
, af
);
4271 nla_nest_end(skb
, attr
);
4276 nlmsg_end(skb
, nlh
);
4281 /* not a multi message or no progress mean a real error */
4282 if (!(flags
& NLM_F_MULTI
) || s_prividx
== *prividx
)
4283 nlmsg_cancel(skb
, nlh
);
4285 nlmsg_end(skb
, nlh
);
4290 static size_t if_nlmsg_stats_size(const struct net_device
*dev
,
4295 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_64
, 0))
4296 size
+= nla_total_size_64bit(sizeof(struct rtnl_link_stats64
));
4298 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS
, 0)) {
4299 const struct rtnl_link_ops
*ops
= dev
->rtnl_link_ops
;
4300 int attr
= IFLA_STATS_LINK_XSTATS
;
4302 if (ops
&& ops
->get_linkxstats_size
) {
4303 size
+= nla_total_size(ops
->get_linkxstats_size(dev
,
4305 /* for IFLA_STATS_LINK_XSTATS */
4306 size
+= nla_total_size(0);
4310 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_XSTATS_SLAVE
, 0)) {
4311 struct net_device
*_dev
= (struct net_device
*)dev
;
4312 const struct rtnl_link_ops
*ops
= NULL
;
4313 const struct net_device
*master
;
4315 /* netdev_master_upper_dev_get can't take const */
4316 master
= netdev_master_upper_dev_get(_dev
);
4318 ops
= master
->rtnl_link_ops
;
4319 if (ops
&& ops
->get_linkxstats_size
) {
4320 int attr
= IFLA_STATS_LINK_XSTATS_SLAVE
;
4322 size
+= nla_total_size(ops
->get_linkxstats_size(dev
,
4324 /* for IFLA_STATS_LINK_XSTATS_SLAVE */
4325 size
+= nla_total_size(0);
4329 if (stats_attr_valid(filter_mask
, IFLA_STATS_LINK_OFFLOAD_XSTATS
, 0))
4330 size
+= rtnl_get_offload_stats_size(dev
);
4332 if (stats_attr_valid(filter_mask
, IFLA_STATS_AF_SPEC
, 0)) {
4333 struct rtnl_af_ops
*af_ops
;
4335 /* for IFLA_STATS_AF_SPEC */
4336 size
+= nla_total_size(0);
4339 list_for_each_entry_rcu(af_ops
, &rtnl_af_ops
, list
) {
4340 if (af_ops
->get_stats_af_size
) {
4341 size
+= nla_total_size(
4342 af_ops
->get_stats_af_size(dev
));
4345 size
+= nla_total_size(0);
4354 static int rtnl_stats_get(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
4355 struct netlink_ext_ack
*extack
)
4357 struct net
*net
= sock_net(skb
->sk
);
4358 struct net_device
*dev
= NULL
;
4359 int idxattr
= 0, prividx
= 0;
4360 struct if_stats_msg
*ifsm
;
4361 struct sk_buff
*nskb
;
4365 if (nlmsg_len(nlh
) < sizeof(*ifsm
))
4368 ifsm
= nlmsg_data(nlh
);
4369 if (ifsm
->ifindex
> 0)
4370 dev
= __dev_get_by_index(net
, ifsm
->ifindex
);
4377 filter_mask
= ifsm
->filter_mask
;
4381 nskb
= nlmsg_new(if_nlmsg_stats_size(dev
, filter_mask
), GFP_KERNEL
);
4385 err
= rtnl_fill_statsinfo(nskb
, dev
, RTM_NEWSTATS
,
4386 NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
4387 0, filter_mask
, &idxattr
, &prividx
);
4389 /* -EMSGSIZE implies BUG in if_nlmsg_stats_size */
4390 WARN_ON(err
== -EMSGSIZE
);
4393 err
= rtnl_unicast(nskb
, net
, NETLINK_CB(skb
).portid
);
4399 static int rtnl_stats_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4401 int h
, s_h
, err
, s_idx
, s_idxattr
, s_prividx
;
4402 struct net
*net
= sock_net(skb
->sk
);
4403 unsigned int flags
= NLM_F_MULTI
;
4404 struct if_stats_msg
*ifsm
;
4405 struct hlist_head
*head
;
4406 struct net_device
*dev
;
4407 u32 filter_mask
= 0;
4411 s_idx
= cb
->args
[1];
4412 s_idxattr
= cb
->args
[2];
4413 s_prividx
= cb
->args
[3];
4415 cb
->seq
= net
->dev_base_seq
;
4417 if (nlmsg_len(cb
->nlh
) < sizeof(*ifsm
))
4420 ifsm
= nlmsg_data(cb
->nlh
);
4421 filter_mask
= ifsm
->filter_mask
;
4425 for (h
= s_h
; h
< NETDEV_HASHENTRIES
; h
++, s_idx
= 0) {
4427 head
= &net
->dev_index_head
[h
];
4428 hlist_for_each_entry(dev
, head
, index_hlist
) {
4431 err
= rtnl_fill_statsinfo(skb
, dev
, RTM_NEWSTATS
,
4432 NETLINK_CB(cb
->skb
).portid
,
4433 cb
->nlh
->nlmsg_seq
, 0,
4435 &s_idxattr
, &s_prividx
);
4436 /* If we ran out of room on the first message,
4439 WARN_ON((err
== -EMSGSIZE
) && (skb
->len
== 0));
4445 nl_dump_check_consistent(cb
, nlmsg_hdr(skb
));
4451 cb
->args
[3] = s_prividx
;
4452 cb
->args
[2] = s_idxattr
;
4459 /* Process one rtnetlink message. */
4461 static int rtnetlink_rcv_msg(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
4462 struct netlink_ext_ack
*extack
)
4464 struct net
*net
= sock_net(skb
->sk
);
4465 struct rtnl_link
*handlers
;
4466 int err
= -EOPNOTSUPP
;
4467 rtnl_doit_func doit
;
4473 type
= nlh
->nlmsg_type
;
4479 /* All the messages must have at least 1 byte length */
4480 if (nlmsg_len(nlh
) < sizeof(struct rtgenmsg
))
4483 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
4486 if (kind
!= 2 && !netlink_net_capable(skb
, CAP_NET_ADMIN
))
4489 if (family
>= ARRAY_SIZE(rtnl_msg_handlers
))
4493 handlers
= rcu_dereference(rtnl_msg_handlers
[family
]);
4496 handlers
= rcu_dereference(rtnl_msg_handlers
[family
]);
4499 if (kind
== 2 && nlh
->nlmsg_flags
&NLM_F_DUMP
) {
4501 rtnl_dumpit_func dumpit
;
4502 u16 min_dump_alloc
= 0;
4504 dumpit
= READ_ONCE(handlers
[type
].dumpit
);
4507 handlers
= rcu_dereference(rtnl_msg_handlers
[PF_UNSPEC
]);
4511 dumpit
= READ_ONCE(handlers
[type
].dumpit
);
4516 refcount_inc(&rtnl_msg_handlers_ref
[family
]);
4518 if (type
== RTM_GETLINK
- RTM_BASE
)
4519 min_dump_alloc
= rtnl_calcit(skb
, nlh
);
4525 struct netlink_dump_control c
= {
4527 .min_dump_alloc
= min_dump_alloc
,
4529 err
= netlink_dump_start(rtnl
, skb
, nlh
, &c
);
4531 refcount_dec(&rtnl_msg_handlers_ref
[family
]);
4535 doit
= READ_ONCE(handlers
[type
].doit
);
4538 handlers
= rcu_dereference(rtnl_msg_handlers
[family
]);
4541 flags
= READ_ONCE(handlers
[type
].flags
);
4542 if (flags
& RTNL_FLAG_DOIT_UNLOCKED
) {
4543 refcount_inc(&rtnl_msg_handlers_ref
[family
]);
4544 doit
= READ_ONCE(handlers
[type
].doit
);
4547 err
= doit(skb
, nlh
, extack
);
4548 refcount_dec(&rtnl_msg_handlers_ref
[family
]);
4555 handlers
= rtnl_dereference(rtnl_msg_handlers
[family
]);
4557 doit
= READ_ONCE(handlers
[type
].doit
);
4559 err
= doit(skb
, nlh
, extack
);
4569 static void rtnetlink_rcv(struct sk_buff
*skb
)
4571 netlink_rcv_skb(skb
, &rtnetlink_rcv_msg
);
4574 static int rtnetlink_bind(struct net
*net
, int group
)
4577 case RTNLGRP_IPV4_MROUTE_R
:
4578 case RTNLGRP_IPV6_MROUTE_R
:
4579 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
4586 static int rtnetlink_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
4588 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4592 case NETDEV_CHANGEMTU
:
4593 case NETDEV_CHANGEADDR
:
4594 case NETDEV_CHANGENAME
:
4595 case NETDEV_FEAT_CHANGE
:
4596 case NETDEV_BONDING_FAILOVER
:
4597 case NETDEV_POST_TYPE_CHANGE
:
4598 case NETDEV_NOTIFY_PEERS
:
4599 case NETDEV_CHANGEUPPER
:
4600 case NETDEV_RESEND_IGMP
:
4601 case NETDEV_CHANGEINFODATA
:
4602 case NETDEV_CHANGELOWERSTATE
:
4603 case NETDEV_CHANGE_TX_QUEUE_LEN
:
4604 rtmsg_ifinfo_event(RTM_NEWLINK
, dev
, 0, rtnl_get_event(event
),
4605 GFP_KERNEL
, NULL
, 0);
4613 static struct notifier_block rtnetlink_dev_notifier
= {
4614 .notifier_call
= rtnetlink_event
,
4618 static int __net_init
rtnetlink_net_init(struct net
*net
)
4621 struct netlink_kernel_cfg cfg
= {
4622 .groups
= RTNLGRP_MAX
,
4623 .input
= rtnetlink_rcv
,
4624 .cb_mutex
= &rtnl_mutex
,
4625 .flags
= NL_CFG_F_NONROOT_RECV
,
4626 .bind
= rtnetlink_bind
,
4629 sk
= netlink_kernel_create(net
, NETLINK_ROUTE
, &cfg
);
4636 static void __net_exit
rtnetlink_net_exit(struct net
*net
)
4638 netlink_kernel_release(net
->rtnl
);
4642 static struct pernet_operations rtnetlink_net_ops
= {
4643 .init
= rtnetlink_net_init
,
4644 .exit
= rtnetlink_net_exit
,
4647 void __init
rtnetlink_init(void)
4651 for (i
= 0; i
< ARRAY_SIZE(rtnl_msg_handlers_ref
); i
++)
4652 refcount_set(&rtnl_msg_handlers_ref
[i
], 1);
4654 if (register_pernet_subsys(&rtnetlink_net_ops
))
4655 panic("rtnetlink_init: cannot initialize rtnetlink\n");
4657 register_netdevice_notifier(&rtnetlink_dev_notifier
);
4659 rtnl_register(PF_UNSPEC
, RTM_GETLINK
, rtnl_getlink
,
4660 rtnl_dump_ifinfo
, 0);
4661 rtnl_register(PF_UNSPEC
, RTM_SETLINK
, rtnl_setlink
, NULL
, 0);
4662 rtnl_register(PF_UNSPEC
, RTM_NEWLINK
, rtnl_newlink
, NULL
, 0);
4663 rtnl_register(PF_UNSPEC
, RTM_DELLINK
, rtnl_dellink
, NULL
, 0);
4665 rtnl_register(PF_UNSPEC
, RTM_GETADDR
, NULL
, rtnl_dump_all
, 0);
4666 rtnl_register(PF_UNSPEC
, RTM_GETROUTE
, NULL
, rtnl_dump_all
, 0);
4667 rtnl_register(PF_UNSPEC
, RTM_GETNETCONF
, NULL
, rtnl_dump_all
, 0);
4669 rtnl_register(PF_BRIDGE
, RTM_NEWNEIGH
, rtnl_fdb_add
, NULL
, 0);
4670 rtnl_register(PF_BRIDGE
, RTM_DELNEIGH
, rtnl_fdb_del
, NULL
, 0);
4671 rtnl_register(PF_BRIDGE
, RTM_GETNEIGH
, NULL
, rtnl_fdb_dump
, 0);
4673 rtnl_register(PF_BRIDGE
, RTM_GETLINK
, NULL
, rtnl_bridge_getlink
, 0);
4674 rtnl_register(PF_BRIDGE
, RTM_DELLINK
, rtnl_bridge_dellink
, NULL
, 0);
4675 rtnl_register(PF_BRIDGE
, RTM_SETLINK
, rtnl_bridge_setlink
, NULL
, 0);
4677 rtnl_register(PF_UNSPEC
, RTM_GETSTATS
, rtnl_stats_get
, rtnl_stats_dump
,