1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/switchdev/switchdev.c - Switch device API
4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
22 static LIST_HEAD(deferred
);
23 static DEFINE_SPINLOCK(deferred_lock
);
25 typedef void switchdev_deferred_func_t(struct net_device
*dev
,
28 struct switchdev_deferred_item
{
29 struct list_head list
;
30 struct net_device
*dev
;
31 switchdev_deferred_func_t
*func
;
35 static struct switchdev_deferred_item
*switchdev_deferred_dequeue(void)
37 struct switchdev_deferred_item
*dfitem
;
39 spin_lock_bh(&deferred_lock
);
40 if (list_empty(&deferred
)) {
44 dfitem
= list_first_entry(&deferred
,
45 struct switchdev_deferred_item
, list
);
46 list_del(&dfitem
->list
);
48 spin_unlock_bh(&deferred_lock
);
53 * switchdev_deferred_process - Process ops in deferred queue
55 * Called to flush the ops currently queued in deferred ops queue.
56 * rtnl_lock must be held.
58 void switchdev_deferred_process(void)
60 struct switchdev_deferred_item
*dfitem
;
64 while ((dfitem
= switchdev_deferred_dequeue())) {
65 dfitem
->func(dfitem
->dev
, dfitem
->data
);
70 EXPORT_SYMBOL_GPL(switchdev_deferred_process
);
72 static void switchdev_deferred_process_work(struct work_struct
*work
)
75 switchdev_deferred_process();
79 static DECLARE_WORK(deferred_process_work
, switchdev_deferred_process_work
);
81 static int switchdev_deferred_enqueue(struct net_device
*dev
,
82 const void *data
, size_t data_len
,
83 switchdev_deferred_func_t
*func
)
85 struct switchdev_deferred_item
*dfitem
;
87 dfitem
= kmalloc(sizeof(*dfitem
) + data_len
, GFP_ATOMIC
);
92 memcpy(dfitem
->data
, data
, data_len
);
94 spin_lock_bh(&deferred_lock
);
95 list_add_tail(&dfitem
->list
, &deferred
);
96 spin_unlock_bh(&deferred_lock
);
97 schedule_work(&deferred_process_work
);
101 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt
,
102 struct net_device
*dev
,
103 const struct switchdev_attr
*attr
,
104 struct netlink_ext_ack
*extack
)
109 struct switchdev_notifier_port_attr_info attr_info
= {
114 rc
= call_switchdev_blocking_notifiers(nt
, dev
,
115 &attr_info
.info
, extack
);
116 err
= notifier_to_errno(rc
);
118 WARN_ON(!attr_info
.handled
);
122 if (!attr_info
.handled
)
128 static int switchdev_port_attr_set_now(struct net_device
*dev
,
129 const struct switchdev_attr
*attr
,
130 struct netlink_ext_ack
*extack
)
132 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET
, dev
, attr
,
136 static void switchdev_port_attr_set_deferred(struct net_device
*dev
,
139 const struct switchdev_attr
*attr
= data
;
142 err
= switchdev_port_attr_set_now(dev
, attr
, NULL
);
143 if (err
&& err
!= -EOPNOTSUPP
)
144 netdev_err(dev
, "failed (err=%d) to set attribute (id=%d)\n",
147 attr
->complete(dev
, err
, attr
->complete_priv
);
150 static int switchdev_port_attr_set_defer(struct net_device
*dev
,
151 const struct switchdev_attr
*attr
)
153 return switchdev_deferred_enqueue(dev
, attr
, sizeof(*attr
),
154 switchdev_port_attr_set_deferred
);
158 * switchdev_port_attr_set - Set port attribute
161 * @attr: attribute to set
162 * @extack: netlink extended ack, for error message propagation
164 * rtnl_lock must be held and must not be in atomic section,
165 * in case SWITCHDEV_F_DEFER flag is not set.
167 int switchdev_port_attr_set(struct net_device
*dev
,
168 const struct switchdev_attr
*attr
,
169 struct netlink_ext_ack
*extack
)
171 if (attr
->flags
& SWITCHDEV_F_DEFER
)
172 return switchdev_port_attr_set_defer(dev
, attr
);
174 return switchdev_port_attr_set_now(dev
, attr
, extack
);
176 EXPORT_SYMBOL_GPL(switchdev_port_attr_set
);
178 static size_t switchdev_obj_size(const struct switchdev_obj
*obj
)
181 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
182 return sizeof(struct switchdev_obj_port_vlan
);
183 case SWITCHDEV_OBJ_ID_PORT_MDB
:
184 return sizeof(struct switchdev_obj_port_mdb
);
185 case SWITCHDEV_OBJ_ID_HOST_MDB
:
186 return sizeof(struct switchdev_obj_port_mdb
);
193 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt
,
194 struct net_device
*dev
,
195 const struct switchdev_obj
*obj
,
196 struct netlink_ext_ack
*extack
)
201 struct switchdev_notifier_port_obj_info obj_info
= {
206 rc
= call_switchdev_blocking_notifiers(nt
, dev
, &obj_info
.info
, extack
);
207 err
= notifier_to_errno(rc
);
209 WARN_ON(!obj_info
.handled
);
212 if (!obj_info
.handled
)
217 static void switchdev_port_obj_add_deferred(struct net_device
*dev
,
220 const struct switchdev_obj
*obj
= data
;
224 err
= switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD
,
226 if (err
&& err
!= -EOPNOTSUPP
)
227 netdev_err(dev
, "failed (err=%d) to add object (id=%d)\n",
230 obj
->complete(dev
, err
, obj
->complete_priv
);
233 static int switchdev_port_obj_add_defer(struct net_device
*dev
,
234 const struct switchdev_obj
*obj
)
236 return switchdev_deferred_enqueue(dev
, obj
, switchdev_obj_size(obj
),
237 switchdev_port_obj_add_deferred
);
241 * switchdev_port_obj_add - Add port object
244 * @obj: object to add
245 * @extack: netlink extended ack
247 * rtnl_lock must be held and must not be in atomic section,
248 * in case SWITCHDEV_F_DEFER flag is not set.
250 int switchdev_port_obj_add(struct net_device
*dev
,
251 const struct switchdev_obj
*obj
,
252 struct netlink_ext_ack
*extack
)
254 if (obj
->flags
& SWITCHDEV_F_DEFER
)
255 return switchdev_port_obj_add_defer(dev
, obj
);
257 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD
,
260 EXPORT_SYMBOL_GPL(switchdev_port_obj_add
);
262 static int switchdev_port_obj_del_now(struct net_device
*dev
,
263 const struct switchdev_obj
*obj
)
265 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL
,
269 static void switchdev_port_obj_del_deferred(struct net_device
*dev
,
272 const struct switchdev_obj
*obj
= data
;
275 err
= switchdev_port_obj_del_now(dev
, obj
);
276 if (err
&& err
!= -EOPNOTSUPP
)
277 netdev_err(dev
, "failed (err=%d) to del object (id=%d)\n",
280 obj
->complete(dev
, err
, obj
->complete_priv
);
283 static int switchdev_port_obj_del_defer(struct net_device
*dev
,
284 const struct switchdev_obj
*obj
)
286 return switchdev_deferred_enqueue(dev
, obj
, switchdev_obj_size(obj
),
287 switchdev_port_obj_del_deferred
);
291 * switchdev_port_obj_del - Delete port object
294 * @obj: object to delete
296 * rtnl_lock must be held and must not be in atomic section,
297 * in case SWITCHDEV_F_DEFER flag is not set.
299 int switchdev_port_obj_del(struct net_device
*dev
,
300 const struct switchdev_obj
*obj
)
302 if (obj
->flags
& SWITCHDEV_F_DEFER
)
303 return switchdev_port_obj_del_defer(dev
, obj
);
305 return switchdev_port_obj_del_now(dev
, obj
);
307 EXPORT_SYMBOL_GPL(switchdev_port_obj_del
);
309 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain
);
310 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain
);
313 * register_switchdev_notifier - Register notifier
314 * @nb: notifier_block
316 * Register switch device notifier.
318 int register_switchdev_notifier(struct notifier_block
*nb
)
320 return atomic_notifier_chain_register(&switchdev_notif_chain
, nb
);
322 EXPORT_SYMBOL_GPL(register_switchdev_notifier
);
325 * unregister_switchdev_notifier - Unregister notifier
326 * @nb: notifier_block
328 * Unregister switch device notifier.
330 int unregister_switchdev_notifier(struct notifier_block
*nb
)
332 return atomic_notifier_chain_unregister(&switchdev_notif_chain
, nb
);
334 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier
);
337 * call_switchdev_notifiers - Call notifiers
338 * @val: value passed unmodified to notifier function
340 * @info: notifier information data
341 * @extack: netlink extended ack
342 * Call all network notifier blocks.
344 int call_switchdev_notifiers(unsigned long val
, struct net_device
*dev
,
345 struct switchdev_notifier_info
*info
,
346 struct netlink_ext_ack
*extack
)
349 info
->extack
= extack
;
350 return atomic_notifier_call_chain(&switchdev_notif_chain
, val
, info
);
352 EXPORT_SYMBOL_GPL(call_switchdev_notifiers
);
354 int register_switchdev_blocking_notifier(struct notifier_block
*nb
)
356 struct blocking_notifier_head
*chain
= &switchdev_blocking_notif_chain
;
358 return blocking_notifier_chain_register(chain
, nb
);
360 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier
);
362 int unregister_switchdev_blocking_notifier(struct notifier_block
*nb
)
364 struct blocking_notifier_head
*chain
= &switchdev_blocking_notif_chain
;
366 return blocking_notifier_chain_unregister(chain
, nb
);
368 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier
);
370 int call_switchdev_blocking_notifiers(unsigned long val
, struct net_device
*dev
,
371 struct switchdev_notifier_info
*info
,
372 struct netlink_ext_ack
*extack
)
375 info
->extack
= extack
;
376 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain
,
379 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers
);
381 struct switchdev_nested_priv
{
382 bool (*check_cb
)(const struct net_device
*dev
);
383 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
384 const struct net_device
*foreign_dev
);
385 const struct net_device
*dev
;
386 struct net_device
*lower_dev
;
389 static int switchdev_lower_dev_walk(struct net_device
*lower_dev
,
390 struct netdev_nested_priv
*priv
)
392 struct switchdev_nested_priv
*switchdev_priv
= priv
->data
;
393 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
394 const struct net_device
*foreign_dev
);
395 bool (*check_cb
)(const struct net_device
*dev
);
396 const struct net_device
*dev
;
398 check_cb
= switchdev_priv
->check_cb
;
399 foreign_dev_check_cb
= switchdev_priv
->foreign_dev_check_cb
;
400 dev
= switchdev_priv
->dev
;
402 if (check_cb(lower_dev
) && !foreign_dev_check_cb(lower_dev
, dev
)) {
403 switchdev_priv
->lower_dev
= lower_dev
;
410 static struct net_device
*
411 switchdev_lower_dev_find(struct net_device
*dev
,
412 bool (*check_cb
)(const struct net_device
*dev
),
413 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
414 const struct net_device
*foreign_dev
))
416 struct switchdev_nested_priv switchdev_priv
= {
417 .check_cb
= check_cb
,
418 .foreign_dev_check_cb
= foreign_dev_check_cb
,
422 struct netdev_nested_priv priv
= {
423 .data
= &switchdev_priv
,
426 netdev_walk_all_lower_dev_rcu(dev
, switchdev_lower_dev_walk
, &priv
);
428 return switchdev_priv
.lower_dev
;
431 static int __switchdev_handle_fdb_add_to_device(struct net_device
*dev
,
432 const struct net_device
*orig_dev
,
433 const struct switchdev_notifier_fdb_info
*fdb_info
,
434 bool (*check_cb
)(const struct net_device
*dev
),
435 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
436 const struct net_device
*foreign_dev
),
437 int (*add_cb
)(struct net_device
*dev
,
438 const struct net_device
*orig_dev
, const void *ctx
,
439 const struct switchdev_notifier_fdb_info
*fdb_info
),
440 int (*lag_add_cb
)(struct net_device
*dev
,
441 const struct net_device
*orig_dev
, const void *ctx
,
442 const struct switchdev_notifier_fdb_info
*fdb_info
))
444 const struct switchdev_notifier_info
*info
= &fdb_info
->info
;
445 struct net_device
*br
, *lower_dev
;
446 struct list_head
*iter
;
447 int err
= -EOPNOTSUPP
;
450 return add_cb(dev
, orig_dev
, info
->ctx
, fdb_info
);
452 if (netif_is_lag_master(dev
)) {
453 if (!switchdev_lower_dev_find(dev
, check_cb
, foreign_dev_check_cb
))
454 goto maybe_bridged_with_us
;
456 /* This is a LAG interface that we offload */
460 return lag_add_cb(dev
, orig_dev
, info
->ctx
, fdb_info
);
463 /* Recurse through lower interfaces in case the FDB entry is pointing
464 * towards a bridge device.
466 if (netif_is_bridge_master(dev
)) {
467 if (!switchdev_lower_dev_find(dev
, check_cb
, foreign_dev_check_cb
))
470 /* This is a bridge interface that we offload */
471 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
472 /* Do not propagate FDB entries across bridges */
473 if (netif_is_bridge_master(lower_dev
))
476 /* Bridge ports might be either us, or LAG interfaces
479 if (!check_cb(lower_dev
) &&
480 !switchdev_lower_dev_find(lower_dev
, check_cb
,
481 foreign_dev_check_cb
))
484 err
= __switchdev_handle_fdb_add_to_device(lower_dev
, orig_dev
,
486 foreign_dev_check_cb
,
488 if (err
&& err
!= -EOPNOTSUPP
)
495 maybe_bridged_with_us
:
496 /* Event is neither on a bridge nor a LAG. Check whether it is on an
497 * interface that is in a bridge with us.
499 br
= netdev_master_upper_dev_get_rcu(dev
);
500 if (!br
|| !netif_is_bridge_master(br
))
503 if (!switchdev_lower_dev_find(br
, check_cb
, foreign_dev_check_cb
))
506 return __switchdev_handle_fdb_add_to_device(br
, orig_dev
, fdb_info
,
507 check_cb
, foreign_dev_check_cb
,
511 int switchdev_handle_fdb_add_to_device(struct net_device
*dev
,
512 const struct switchdev_notifier_fdb_info
*fdb_info
,
513 bool (*check_cb
)(const struct net_device
*dev
),
514 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
515 const struct net_device
*foreign_dev
),
516 int (*add_cb
)(struct net_device
*dev
,
517 const struct net_device
*orig_dev
, const void *ctx
,
518 const struct switchdev_notifier_fdb_info
*fdb_info
),
519 int (*lag_add_cb
)(struct net_device
*dev
,
520 const struct net_device
*orig_dev
, const void *ctx
,
521 const struct switchdev_notifier_fdb_info
*fdb_info
))
525 err
= __switchdev_handle_fdb_add_to_device(dev
, dev
, fdb_info
,
527 foreign_dev_check_cb
,
529 if (err
== -EOPNOTSUPP
)
534 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_add_to_device
);
536 static int __switchdev_handle_fdb_del_to_device(struct net_device
*dev
,
537 const struct net_device
*orig_dev
,
538 const struct switchdev_notifier_fdb_info
*fdb_info
,
539 bool (*check_cb
)(const struct net_device
*dev
),
540 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
541 const struct net_device
*foreign_dev
),
542 int (*del_cb
)(struct net_device
*dev
,
543 const struct net_device
*orig_dev
, const void *ctx
,
544 const struct switchdev_notifier_fdb_info
*fdb_info
),
545 int (*lag_del_cb
)(struct net_device
*dev
,
546 const struct net_device
*orig_dev
, const void *ctx
,
547 const struct switchdev_notifier_fdb_info
*fdb_info
))
549 const struct switchdev_notifier_info
*info
= &fdb_info
->info
;
550 struct net_device
*br
, *lower_dev
;
551 struct list_head
*iter
;
552 int err
= -EOPNOTSUPP
;
555 return del_cb(dev
, orig_dev
, info
->ctx
, fdb_info
);
557 if (netif_is_lag_master(dev
)) {
558 if (!switchdev_lower_dev_find(dev
, check_cb
, foreign_dev_check_cb
))
559 goto maybe_bridged_with_us
;
561 /* This is a LAG interface that we offload */
565 return lag_del_cb(dev
, orig_dev
, info
->ctx
, fdb_info
);
568 /* Recurse through lower interfaces in case the FDB entry is pointing
569 * towards a bridge device.
571 if (netif_is_bridge_master(dev
)) {
572 if (!switchdev_lower_dev_find(dev
, check_cb
, foreign_dev_check_cb
))
575 /* This is a bridge interface that we offload */
576 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
577 /* Do not propagate FDB entries across bridges */
578 if (netif_is_bridge_master(lower_dev
))
581 /* Bridge ports might be either us, or LAG interfaces
584 if (!check_cb(lower_dev
) &&
585 !switchdev_lower_dev_find(lower_dev
, check_cb
,
586 foreign_dev_check_cb
))
589 err
= __switchdev_handle_fdb_del_to_device(lower_dev
, orig_dev
,
591 foreign_dev_check_cb
,
593 if (err
&& err
!= -EOPNOTSUPP
)
600 maybe_bridged_with_us
:
601 /* Event is neither on a bridge nor a LAG. Check whether it is on an
602 * interface that is in a bridge with us.
604 br
= netdev_master_upper_dev_get_rcu(dev
);
605 if (!br
|| !netif_is_bridge_master(br
))
608 if (!switchdev_lower_dev_find(br
, check_cb
, foreign_dev_check_cb
))
611 return __switchdev_handle_fdb_del_to_device(br
, orig_dev
, fdb_info
,
612 check_cb
, foreign_dev_check_cb
,
616 int switchdev_handle_fdb_del_to_device(struct net_device
*dev
,
617 const struct switchdev_notifier_fdb_info
*fdb_info
,
618 bool (*check_cb
)(const struct net_device
*dev
),
619 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
620 const struct net_device
*foreign_dev
),
621 int (*del_cb
)(struct net_device
*dev
,
622 const struct net_device
*orig_dev
, const void *ctx
,
623 const struct switchdev_notifier_fdb_info
*fdb_info
),
624 int (*lag_del_cb
)(struct net_device
*dev
,
625 const struct net_device
*orig_dev
, const void *ctx
,
626 const struct switchdev_notifier_fdb_info
*fdb_info
))
630 err
= __switchdev_handle_fdb_del_to_device(dev
, dev
, fdb_info
,
632 foreign_dev_check_cb
,
634 if (err
== -EOPNOTSUPP
)
639 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_del_to_device
);
641 static int __switchdev_handle_port_obj_add(struct net_device
*dev
,
642 struct switchdev_notifier_port_obj_info
*port_obj_info
,
643 bool (*check_cb
)(const struct net_device
*dev
),
644 int (*add_cb
)(struct net_device
*dev
, const void *ctx
,
645 const struct switchdev_obj
*obj
,
646 struct netlink_ext_ack
*extack
))
648 struct switchdev_notifier_info
*info
= &port_obj_info
->info
;
649 struct netlink_ext_ack
*extack
;
650 struct net_device
*lower_dev
;
651 struct list_head
*iter
;
652 int err
= -EOPNOTSUPP
;
654 extack
= switchdev_notifier_info_to_extack(info
);
657 err
= add_cb(dev
, info
->ctx
, port_obj_info
->obj
, extack
);
658 if (err
!= -EOPNOTSUPP
)
659 port_obj_info
->handled
= true;
663 /* Switch ports might be stacked under e.g. a LAG. Ignore the
664 * unsupported devices, another driver might be able to handle them. But
665 * propagate to the callers any hard errors.
667 * If the driver does its own bookkeeping of stacked ports, it's not
668 * necessary to go through this helper.
670 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
671 if (netif_is_bridge_master(lower_dev
))
674 err
= __switchdev_handle_port_obj_add(lower_dev
, port_obj_info
,
676 if (err
&& err
!= -EOPNOTSUPP
)
683 int switchdev_handle_port_obj_add(struct net_device
*dev
,
684 struct switchdev_notifier_port_obj_info
*port_obj_info
,
685 bool (*check_cb
)(const struct net_device
*dev
),
686 int (*add_cb
)(struct net_device
*dev
, const void *ctx
,
687 const struct switchdev_obj
*obj
,
688 struct netlink_ext_ack
*extack
))
692 err
= __switchdev_handle_port_obj_add(dev
, port_obj_info
, check_cb
,
694 if (err
== -EOPNOTSUPP
)
698 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add
);
700 static int __switchdev_handle_port_obj_del(struct net_device
*dev
,
701 struct switchdev_notifier_port_obj_info
*port_obj_info
,
702 bool (*check_cb
)(const struct net_device
*dev
),
703 int (*del_cb
)(struct net_device
*dev
, const void *ctx
,
704 const struct switchdev_obj
*obj
))
706 struct switchdev_notifier_info
*info
= &port_obj_info
->info
;
707 struct net_device
*lower_dev
;
708 struct list_head
*iter
;
709 int err
= -EOPNOTSUPP
;
712 err
= del_cb(dev
, info
->ctx
, port_obj_info
->obj
);
713 if (err
!= -EOPNOTSUPP
)
714 port_obj_info
->handled
= true;
718 /* Switch ports might be stacked under e.g. a LAG. Ignore the
719 * unsupported devices, another driver might be able to handle them. But
720 * propagate to the callers any hard errors.
722 * If the driver does its own bookkeeping of stacked ports, it's not
723 * necessary to go through this helper.
725 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
726 if (netif_is_bridge_master(lower_dev
))
729 err
= __switchdev_handle_port_obj_del(lower_dev
, port_obj_info
,
731 if (err
&& err
!= -EOPNOTSUPP
)
738 int switchdev_handle_port_obj_del(struct net_device
*dev
,
739 struct switchdev_notifier_port_obj_info
*port_obj_info
,
740 bool (*check_cb
)(const struct net_device
*dev
),
741 int (*del_cb
)(struct net_device
*dev
, const void *ctx
,
742 const struct switchdev_obj
*obj
))
746 err
= __switchdev_handle_port_obj_del(dev
, port_obj_info
, check_cb
,
748 if (err
== -EOPNOTSUPP
)
752 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del
);
754 static int __switchdev_handle_port_attr_set(struct net_device
*dev
,
755 struct switchdev_notifier_port_attr_info
*port_attr_info
,
756 bool (*check_cb
)(const struct net_device
*dev
),
757 int (*set_cb
)(struct net_device
*dev
, const void *ctx
,
758 const struct switchdev_attr
*attr
,
759 struct netlink_ext_ack
*extack
))
761 struct switchdev_notifier_info
*info
= &port_attr_info
->info
;
762 struct netlink_ext_ack
*extack
;
763 struct net_device
*lower_dev
;
764 struct list_head
*iter
;
765 int err
= -EOPNOTSUPP
;
767 extack
= switchdev_notifier_info_to_extack(info
);
770 err
= set_cb(dev
, info
->ctx
, port_attr_info
->attr
, extack
);
771 if (err
!= -EOPNOTSUPP
)
772 port_attr_info
->handled
= true;
776 /* Switch ports might be stacked under e.g. a LAG. Ignore the
777 * unsupported devices, another driver might be able to handle them. But
778 * propagate to the callers any hard errors.
780 * If the driver does its own bookkeeping of stacked ports, it's not
781 * necessary to go through this helper.
783 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
784 if (netif_is_bridge_master(lower_dev
))
787 err
= __switchdev_handle_port_attr_set(lower_dev
, port_attr_info
,
789 if (err
&& err
!= -EOPNOTSUPP
)
796 int switchdev_handle_port_attr_set(struct net_device
*dev
,
797 struct switchdev_notifier_port_attr_info
*port_attr_info
,
798 bool (*check_cb
)(const struct net_device
*dev
),
799 int (*set_cb
)(struct net_device
*dev
, const void *ctx
,
800 const struct switchdev_attr
*attr
,
801 struct netlink_ext_ack
*extack
))
805 err
= __switchdev_handle_port_attr_set(dev
, port_attr_info
, check_cb
,
807 if (err
== -EOPNOTSUPP
)
811 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set
);
813 int switchdev_bridge_port_offload(struct net_device
*brport_dev
,
814 struct net_device
*dev
, const void *ctx
,
815 struct notifier_block
*atomic_nb
,
816 struct notifier_block
*blocking_nb
,
818 struct netlink_ext_ack
*extack
)
820 struct switchdev_notifier_brport_info brport_info
= {
824 .atomic_nb
= atomic_nb
,
825 .blocking_nb
= blocking_nb
,
826 .tx_fwd_offload
= tx_fwd_offload
,
833 err
= call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED
,
834 brport_dev
, &brport_info
.info
,
836 return notifier_to_errno(err
);
838 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload
);
840 void switchdev_bridge_port_unoffload(struct net_device
*brport_dev
,
842 struct notifier_block
*atomic_nb
,
843 struct notifier_block
*blocking_nb
)
845 struct switchdev_notifier_brport_info brport_info
= {
848 .atomic_nb
= atomic_nb
,
849 .blocking_nb
= blocking_nb
,
855 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED
,
856 brport_dev
, &brport_info
.info
,
859 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload
);