1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handling of a single switch port
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
17 * dsa_port_notify - Notify the switching fabric of changes to a port
18 * @dp: port on which change occurred
19 * @e: event, must be of type DSA_NOTIFIER_*
20 * @v: event-specific value.
22 * Notify all switches in the DSA tree that this port's switch belongs to,
23 * including this switch itself, of an event. Allows the other switches to
24 * reconfigure themselves for cross-chip operations. Can also be used to
25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26 * a user port's state changes.
28 static int dsa_port_notify(const struct dsa_port
*dp
, unsigned long e
, void *v
)
30 return dsa_tree_notify(dp
->ds
->dst
, e
, v
);
33 int dsa_port_set_state(struct dsa_port
*dp
, u8 state
, bool do_fast_age
)
35 struct dsa_switch
*ds
= dp
->ds
;
38 if (!ds
->ops
->port_stp_state_set
)
41 ds
->ops
->port_stp_state_set(ds
, port
, state
);
43 if (do_fast_age
&& ds
->ops
->port_fast_age
) {
44 /* Fast age FDB entries or flush appropriate forwarding database
45 * for the given port, if we are moving it from Learning or
46 * Forwarding state, to Disabled or Blocking or Listening state.
47 * Ports that were standalone before the STP state change don't
48 * need to fast age the FDB, since address learning is off in
52 if ((dp
->stp_state
== BR_STATE_LEARNING
||
53 dp
->stp_state
== BR_STATE_FORWARDING
) &&
54 (state
== BR_STATE_DISABLED
||
55 state
== BR_STATE_BLOCKING
||
56 state
== BR_STATE_LISTENING
))
57 ds
->ops
->port_fast_age(ds
, port
);
60 dp
->stp_state
= state
;
65 static void dsa_port_set_state_now(struct dsa_port
*dp
, u8 state
,
70 err
= dsa_port_set_state(dp
, state
, do_fast_age
);
72 pr_err("DSA: failed to set STP state %u (%d)\n", state
, err
);
75 int dsa_port_enable_rt(struct dsa_port
*dp
, struct phy_device
*phy
)
77 struct dsa_switch
*ds
= dp
->ds
;
81 if (ds
->ops
->port_enable
) {
82 err
= ds
->ops
->port_enable(ds
, port
, phy
);
88 dsa_port_set_state_now(dp
, BR_STATE_FORWARDING
, false);
91 phylink_start(dp
->pl
);
96 int dsa_port_enable(struct dsa_port
*dp
, struct phy_device
*phy
)
101 err
= dsa_port_enable_rt(dp
, phy
);
107 void dsa_port_disable_rt(struct dsa_port
*dp
)
109 struct dsa_switch
*ds
= dp
->ds
;
110 int port
= dp
->index
;
113 phylink_stop(dp
->pl
);
116 dsa_port_set_state_now(dp
, BR_STATE_DISABLED
, false);
118 if (ds
->ops
->port_disable
)
119 ds
->ops
->port_disable(ds
, port
);
122 void dsa_port_disable(struct dsa_port
*dp
)
125 dsa_port_disable_rt(dp
);
129 static int dsa_port_inherit_brport_flags(struct dsa_port
*dp
,
130 struct netlink_ext_ack
*extack
)
132 const unsigned long mask
= BR_LEARNING
| BR_FLOOD
| BR_MCAST_FLOOD
|
134 struct net_device
*brport_dev
= dsa_port_to_bridge_port(dp
);
137 for_each_set_bit(flag
, &mask
, 32) {
138 struct switchdev_brport_flags flags
= {0};
140 flags
.mask
= BIT(flag
);
142 if (br_port_flag_is_set(brport_dev
, BIT(flag
)))
143 flags
.val
= BIT(flag
);
145 err
= dsa_port_bridge_flags(dp
, flags
, extack
);
146 if (err
&& err
!= -EOPNOTSUPP
)
153 static void dsa_port_clear_brport_flags(struct dsa_port
*dp
)
155 const unsigned long val
= BR_FLOOD
| BR_MCAST_FLOOD
| BR_BCAST_FLOOD
;
156 const unsigned long mask
= BR_LEARNING
| BR_FLOOD
| BR_MCAST_FLOOD
|
160 for_each_set_bit(flag
, &mask
, 32) {
161 struct switchdev_brport_flags flags
= {0};
163 flags
.mask
= BIT(flag
);
164 flags
.val
= val
& BIT(flag
);
166 err
= dsa_port_bridge_flags(dp
, flags
, NULL
);
167 if (err
&& err
!= -EOPNOTSUPP
)
169 "failed to clear bridge port flag %lu: %pe\n",
170 flags
.val
, ERR_PTR(err
));
174 static int dsa_port_switchdev_sync_attrs(struct dsa_port
*dp
,
175 struct netlink_ext_ack
*extack
)
177 struct net_device
*brport_dev
= dsa_port_to_bridge_port(dp
);
178 struct net_device
*br
= dp
->bridge_dev
;
181 err
= dsa_port_inherit_brport_flags(dp
, extack
);
185 err
= dsa_port_set_state(dp
, br_port_get_stp_state(brport_dev
), false);
186 if (err
&& err
!= -EOPNOTSUPP
)
189 err
= dsa_port_vlan_filtering(dp
, br_vlan_enabled(br
), extack
);
190 if (err
&& err
!= -EOPNOTSUPP
)
193 err
= dsa_port_ageing_time(dp
, br_get_ageing_time(br
));
194 if (err
&& err
!= -EOPNOTSUPP
)
200 static void dsa_port_switchdev_unsync_attrs(struct dsa_port
*dp
)
202 /* Configure the port for standalone mode (no address learning,
204 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
205 * when the user requests it through netlink or sysfs, but not
206 * automatically at port join or leave, so we need to handle resetting
207 * the brport flags ourselves. But we even prefer it that way, because
208 * otherwise, some setups might never get the notification they need,
209 * for example, when a port leaves a LAG that offloads the bridge,
210 * it becomes standalone, but as far as the bridge is concerned, no
213 dsa_port_clear_brport_flags(dp
);
215 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
216 * so allow it to be in BR_STATE_FORWARDING to be kept functional
218 dsa_port_set_state_now(dp
, BR_STATE_FORWARDING
, true);
220 /* VLAN filtering is handled by dsa_switch_bridge_leave */
222 /* Ageing time may be global to the switch chip, so don't change it
223 * here because we have no good reason (or value) to change it to.
227 static int dsa_tree_find_bridge_num(struct dsa_switch_tree
*dst
,
228 struct net_device
*bridge_dev
)
232 /* When preparing the offload for a port, it will have a valid
233 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
234 * However there might be other ports having the same dp->bridge_dev
235 * and a valid dp->bridge_num, so just ignore this port.
237 list_for_each_entry(dp
, &dst
->ports
, list
)
238 if (dp
->bridge_dev
== bridge_dev
&& dp
->bridge_num
!= -1)
239 return dp
->bridge_num
;
244 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port
*dp
,
245 struct net_device
*bridge_dev
)
247 struct dsa_switch_tree
*dst
= dp
->ds
->dst
;
248 int bridge_num
= dp
->bridge_num
;
249 struct dsa_switch
*ds
= dp
->ds
;
251 /* No bridge TX forwarding offload => do nothing */
252 if (!ds
->ops
->port_bridge_tx_fwd_unoffload
|| dp
->bridge_num
== -1)
257 /* Check if the bridge is still in use, otherwise it is time
258 * to clean it up so we can reuse this bridge_num later.
260 if (!dsa_tree_find_bridge_num(dst
, bridge_dev
))
261 clear_bit(bridge_num
, &dst
->fwd_offloading_bridges
);
263 /* Notify the chips only once the offload has been deactivated, so
264 * that they can update their configuration accordingly.
266 ds
->ops
->port_bridge_tx_fwd_unoffload(ds
, dp
->index
, bridge_dev
,
270 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port
*dp
,
271 struct net_device
*bridge_dev
)
273 struct dsa_switch_tree
*dst
= dp
->ds
->dst
;
274 struct dsa_switch
*ds
= dp
->ds
;
277 if (!ds
->ops
->port_bridge_tx_fwd_offload
)
280 bridge_num
= dsa_tree_find_bridge_num(dst
, bridge_dev
);
281 if (bridge_num
< 0) {
282 /* First port that offloads TX forwarding for this bridge */
283 bridge_num
= find_first_zero_bit(&dst
->fwd_offloading_bridges
,
284 DSA_MAX_NUM_OFFLOADING_BRIDGES
);
285 if (bridge_num
>= ds
->num_fwd_offloading_bridges
)
288 set_bit(bridge_num
, &dst
->fwd_offloading_bridges
);
291 dp
->bridge_num
= bridge_num
;
293 /* Notify the driver */
294 err
= ds
->ops
->port_bridge_tx_fwd_offload(ds
, dp
->index
, bridge_dev
,
297 dsa_port_bridge_tx_fwd_unoffload(dp
, bridge_dev
);
304 int dsa_port_bridge_join(struct dsa_port
*dp
, struct net_device
*br
,
305 struct netlink_ext_ack
*extack
)
307 struct dsa_notifier_bridge_info info
= {
308 .tree_index
= dp
->ds
->dst
->index
,
309 .sw_index
= dp
->ds
->index
,
313 struct net_device
*dev
= dp
->slave
;
314 struct net_device
*brport_dev
;
318 /* Here the interface is already bridged. Reflect the current
319 * configuration so that drivers can program their chips accordingly.
323 brport_dev
= dsa_port_to_bridge_port(dp
);
325 err
= dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN
, &info
);
329 tx_fwd_offload
= dsa_port_bridge_tx_fwd_offload(dp
, br
);
331 err
= switchdev_bridge_port_offload(brport_dev
, dev
, dp
,
332 &dsa_slave_switchdev_notifier
,
333 &dsa_slave_switchdev_blocking_notifier
,
334 tx_fwd_offload
, extack
);
336 goto out_rollback_unbridge
;
338 err
= dsa_port_switchdev_sync_attrs(dp
, extack
);
340 goto out_rollback_unoffload
;
344 out_rollback_unoffload
:
345 switchdev_bridge_port_unoffload(brport_dev
, dp
,
346 &dsa_slave_switchdev_notifier
,
347 &dsa_slave_switchdev_blocking_notifier
);
348 out_rollback_unbridge
:
349 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE
, &info
);
351 dp
->bridge_dev
= NULL
;
355 void dsa_port_pre_bridge_leave(struct dsa_port
*dp
, struct net_device
*br
)
357 struct net_device
*brport_dev
= dsa_port_to_bridge_port(dp
);
359 switchdev_bridge_port_unoffload(brport_dev
, dp
,
360 &dsa_slave_switchdev_notifier
,
361 &dsa_slave_switchdev_blocking_notifier
);
364 void dsa_port_bridge_leave(struct dsa_port
*dp
, struct net_device
*br
)
366 struct dsa_notifier_bridge_info info
= {
367 .tree_index
= dp
->ds
->dst
->index
,
368 .sw_index
= dp
->ds
->index
,
374 /* Here the port is already unbridged. Reflect the current configuration
375 * so that drivers can program their chips accordingly.
377 dp
->bridge_dev
= NULL
;
379 dsa_port_bridge_tx_fwd_unoffload(dp
, br
);
381 err
= dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE
, &info
);
383 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
385 dsa_port_switchdev_unsync_attrs(dp
);
388 int dsa_port_lag_change(struct dsa_port
*dp
,
389 struct netdev_lag_lower_state_info
*linfo
)
391 struct dsa_notifier_lag_info info
= {
392 .sw_index
= dp
->ds
->index
,
400 /* On statically configured aggregates (e.g. loadbalance
401 * without LACP) ports will always be tx_enabled, even if the
402 * link is down. Thus we require both link_up and tx_enabled
403 * in order to include it in the tx set.
405 tx_enabled
= linfo
->link_up
&& linfo
->tx_enabled
;
407 if (tx_enabled
== dp
->lag_tx_enabled
)
410 dp
->lag_tx_enabled
= tx_enabled
;
412 return dsa_port_notify(dp
, DSA_NOTIFIER_LAG_CHANGE
, &info
);
415 int dsa_port_lag_join(struct dsa_port
*dp
, struct net_device
*lag
,
416 struct netdev_lag_upper_info
*uinfo
,
417 struct netlink_ext_ack
*extack
)
419 struct dsa_notifier_lag_info info
= {
420 .sw_index
= dp
->ds
->index
,
425 struct net_device
*bridge_dev
;
428 dsa_lag_map(dp
->ds
->dst
, lag
);
431 err
= dsa_port_notify(dp
, DSA_NOTIFIER_LAG_JOIN
, &info
);
435 bridge_dev
= netdev_master_upper_dev_get(lag
);
436 if (!bridge_dev
|| !netif_is_bridge_master(bridge_dev
))
439 err
= dsa_port_bridge_join(dp
, bridge_dev
, extack
);
441 goto err_bridge_join
;
446 dsa_port_notify(dp
, DSA_NOTIFIER_LAG_LEAVE
, &info
);
449 dsa_lag_unmap(dp
->ds
->dst
, lag
);
453 void dsa_port_pre_lag_leave(struct dsa_port
*dp
, struct net_device
*lag
)
456 dsa_port_pre_bridge_leave(dp
, dp
->bridge_dev
);
459 void dsa_port_lag_leave(struct dsa_port
*dp
, struct net_device
*lag
)
461 struct dsa_notifier_lag_info info
= {
462 .sw_index
= dp
->ds
->index
,
471 /* Port might have been part of a LAG that in turn was
472 * attached to a bridge.
475 dsa_port_bridge_leave(dp
, dp
->bridge_dev
);
477 dp
->lag_tx_enabled
= false;
480 err
= dsa_port_notify(dp
, DSA_NOTIFIER_LAG_LEAVE
, &info
);
482 pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
485 dsa_lag_unmap(dp
->ds
->dst
, lag
);
488 /* Must be called under rcu_read_lock() */
489 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port
*dp
,
491 struct netlink_ext_ack
*extack
)
493 struct dsa_switch
*ds
= dp
->ds
;
496 /* VLAN awareness was off, so the question is "can we turn it on".
497 * We may have had 8021q uppers, those need to go. Make sure we don't
498 * enter an inconsistent state: deny changing the VLAN awareness state
499 * as long as we have 8021q uppers.
501 if (vlan_filtering
&& dsa_is_user_port(ds
, dp
->index
)) {
502 struct net_device
*upper_dev
, *slave
= dp
->slave
;
503 struct net_device
*br
= dp
->bridge_dev
;
504 struct list_head
*iter
;
506 netdev_for_each_upper_dev_rcu(slave
, upper_dev
, iter
) {
507 struct bridge_vlan_info br_info
;
510 if (!is_vlan_dev(upper_dev
))
513 vid
= vlan_dev_vlan_id(upper_dev
);
515 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
516 * device, respectively the VID is not found, returning
517 * 0 means success, which is a failure for us here.
519 err
= br_vlan_get_info(br
, vid
, &br_info
);
521 NL_SET_ERR_MSG_MOD(extack
,
522 "Must first remove VLAN uppers having VIDs also present in bridge");
528 if (!ds
->vlan_filtering_is_global
)
531 /* For cases where enabling/disabling VLAN awareness is global to the
532 * switch, we need to handle the case where multiple bridges span
533 * different ports of the same switch device and one of them has a
534 * different setting than what is being requested.
536 for (i
= 0; i
< ds
->num_ports
; i
++) {
537 struct net_device
*other_bridge
;
539 other_bridge
= dsa_to_port(ds
, i
)->bridge_dev
;
542 /* If it's the same bridge, it also has same
543 * vlan_filtering setting => no need to check
545 if (other_bridge
== dp
->bridge_dev
)
547 if (br_vlan_enabled(other_bridge
) != vlan_filtering
) {
548 NL_SET_ERR_MSG_MOD(extack
,
549 "VLAN filtering is a global setting");
556 int dsa_port_vlan_filtering(struct dsa_port
*dp
, bool vlan_filtering
,
557 struct netlink_ext_ack
*extack
)
559 struct dsa_switch
*ds
= dp
->ds
;
563 if (!ds
->ops
->port_vlan_filtering
)
566 /* We are called from dsa_slave_switchdev_blocking_event(),
567 * which is not under rcu_read_lock(), unlike
568 * dsa_slave_switchdev_event().
571 apply
= dsa_port_can_apply_vlan_filtering(dp
, vlan_filtering
, extack
);
576 if (dsa_port_is_vlan_filtering(dp
) == vlan_filtering
)
579 err
= ds
->ops
->port_vlan_filtering(ds
, dp
->index
, vlan_filtering
,
584 if (ds
->vlan_filtering_is_global
)
585 ds
->vlan_filtering
= vlan_filtering
;
587 dp
->vlan_filtering
= vlan_filtering
;
592 /* This enforces legacy behavior for switch drivers which assume they can't
593 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
595 bool dsa_port_skip_vlan_configuration(struct dsa_port
*dp
)
597 struct dsa_switch
*ds
= dp
->ds
;
602 return (!ds
->configure_vlan_while_not_filtering
&&
603 !br_vlan_enabled(dp
->bridge_dev
));
606 int dsa_port_ageing_time(struct dsa_port
*dp
, clock_t ageing_clock
)
608 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock
);
609 unsigned int ageing_time
= jiffies_to_msecs(ageing_jiffies
);
610 struct dsa_notifier_ageing_time_info info
;
613 info
.ageing_time
= ageing_time
;
615 err
= dsa_port_notify(dp
, DSA_NOTIFIER_AGEING_TIME
, &info
);
619 dp
->ageing_time
= ageing_time
;
624 int dsa_port_pre_bridge_flags(const struct dsa_port
*dp
,
625 struct switchdev_brport_flags flags
,
626 struct netlink_ext_ack
*extack
)
628 struct dsa_switch
*ds
= dp
->ds
;
630 if (!ds
->ops
->port_pre_bridge_flags
)
633 return ds
->ops
->port_pre_bridge_flags(ds
, dp
->index
, flags
, extack
);
636 int dsa_port_bridge_flags(const struct dsa_port
*dp
,
637 struct switchdev_brport_flags flags
,
638 struct netlink_ext_ack
*extack
)
640 struct dsa_switch
*ds
= dp
->ds
;
642 if (!ds
->ops
->port_bridge_flags
)
645 return ds
->ops
->port_bridge_flags(ds
, dp
->index
, flags
, extack
);
648 int dsa_port_mtu_change(struct dsa_port
*dp
, int new_mtu
,
651 struct dsa_notifier_mtu_info info
= {
652 .sw_index
= dp
->ds
->index
,
653 .targeted_match
= targeted_match
,
658 return dsa_port_notify(dp
, DSA_NOTIFIER_MTU
, &info
);
661 int dsa_port_fdb_add(struct dsa_port
*dp
, const unsigned char *addr
,
664 struct dsa_notifier_fdb_info info
= {
665 .sw_index
= dp
->ds
->index
,
671 return dsa_port_notify(dp
, DSA_NOTIFIER_FDB_ADD
, &info
);
674 int dsa_port_fdb_del(struct dsa_port
*dp
, const unsigned char *addr
,
677 struct dsa_notifier_fdb_info info
= {
678 .sw_index
= dp
->ds
->index
,
685 return dsa_port_notify(dp
, DSA_NOTIFIER_FDB_DEL
, &info
);
688 int dsa_port_host_fdb_add(struct dsa_port
*dp
, const unsigned char *addr
,
691 struct dsa_notifier_fdb_info info
= {
692 .sw_index
= dp
->ds
->index
,
697 struct dsa_port
*cpu_dp
= dp
->cpu_dp
;
700 err
= dev_uc_add(cpu_dp
->master
, addr
);
704 return dsa_port_notify(dp
, DSA_NOTIFIER_HOST_FDB_ADD
, &info
);
707 int dsa_port_host_fdb_del(struct dsa_port
*dp
, const unsigned char *addr
,
710 struct dsa_notifier_fdb_info info
= {
711 .sw_index
= dp
->ds
->index
,
716 struct dsa_port
*cpu_dp
= dp
->cpu_dp
;
719 err
= dev_uc_del(cpu_dp
->master
, addr
);
723 return dsa_port_notify(dp
, DSA_NOTIFIER_HOST_FDB_DEL
, &info
);
726 int dsa_port_fdb_dump(struct dsa_port
*dp
, dsa_fdb_dump_cb_t
*cb
, void *data
)
728 struct dsa_switch
*ds
= dp
->ds
;
729 int port
= dp
->index
;
731 if (!ds
->ops
->port_fdb_dump
)
734 return ds
->ops
->port_fdb_dump(ds
, port
, cb
, data
);
737 int dsa_port_mdb_add(const struct dsa_port
*dp
,
738 const struct switchdev_obj_port_mdb
*mdb
)
740 struct dsa_notifier_mdb_info info
= {
741 .sw_index
= dp
->ds
->index
,
746 return dsa_port_notify(dp
, DSA_NOTIFIER_MDB_ADD
, &info
);
749 int dsa_port_mdb_del(const struct dsa_port
*dp
,
750 const struct switchdev_obj_port_mdb
*mdb
)
752 struct dsa_notifier_mdb_info info
= {
753 .sw_index
= dp
->ds
->index
,
758 return dsa_port_notify(dp
, DSA_NOTIFIER_MDB_DEL
, &info
);
761 int dsa_port_host_mdb_add(const struct dsa_port
*dp
,
762 const struct switchdev_obj_port_mdb
*mdb
)
764 struct dsa_notifier_mdb_info info
= {
765 .sw_index
= dp
->ds
->index
,
769 struct dsa_port
*cpu_dp
= dp
->cpu_dp
;
772 err
= dev_mc_add(cpu_dp
->master
, mdb
->addr
);
776 return dsa_port_notify(dp
, DSA_NOTIFIER_HOST_MDB_ADD
, &info
);
779 int dsa_port_host_mdb_del(const struct dsa_port
*dp
,
780 const struct switchdev_obj_port_mdb
*mdb
)
782 struct dsa_notifier_mdb_info info
= {
783 .sw_index
= dp
->ds
->index
,
787 struct dsa_port
*cpu_dp
= dp
->cpu_dp
;
790 err
= dev_mc_del(cpu_dp
->master
, mdb
->addr
);
794 return dsa_port_notify(dp
, DSA_NOTIFIER_HOST_MDB_DEL
, &info
);
797 int dsa_port_vlan_add(struct dsa_port
*dp
,
798 const struct switchdev_obj_port_vlan
*vlan
,
799 struct netlink_ext_ack
*extack
)
801 struct dsa_notifier_vlan_info info
= {
802 .sw_index
= dp
->ds
->index
,
808 return dsa_port_notify(dp
, DSA_NOTIFIER_VLAN_ADD
, &info
);
811 int dsa_port_vlan_del(struct dsa_port
*dp
,
812 const struct switchdev_obj_port_vlan
*vlan
)
814 struct dsa_notifier_vlan_info info
= {
815 .sw_index
= dp
->ds
->index
,
820 return dsa_port_notify(dp
, DSA_NOTIFIER_VLAN_DEL
, &info
);
823 int dsa_port_mrp_add(const struct dsa_port
*dp
,
824 const struct switchdev_obj_mrp
*mrp
)
826 struct dsa_notifier_mrp_info info
= {
827 .sw_index
= dp
->ds
->index
,
832 return dsa_port_notify(dp
, DSA_NOTIFIER_MRP_ADD
, &info
);
835 int dsa_port_mrp_del(const struct dsa_port
*dp
,
836 const struct switchdev_obj_mrp
*mrp
)
838 struct dsa_notifier_mrp_info info
= {
839 .sw_index
= dp
->ds
->index
,
844 return dsa_port_notify(dp
, DSA_NOTIFIER_MRP_DEL
, &info
);
847 int dsa_port_mrp_add_ring_role(const struct dsa_port
*dp
,
848 const struct switchdev_obj_ring_role_mrp
*mrp
)
850 struct dsa_notifier_mrp_ring_role_info info
= {
851 .sw_index
= dp
->ds
->index
,
856 return dsa_port_notify(dp
, DSA_NOTIFIER_MRP_ADD_RING_ROLE
, &info
);
859 int dsa_port_mrp_del_ring_role(const struct dsa_port
*dp
,
860 const struct switchdev_obj_ring_role_mrp
*mrp
)
862 struct dsa_notifier_mrp_ring_role_info info
= {
863 .sw_index
= dp
->ds
->index
,
868 return dsa_port_notify(dp
, DSA_NOTIFIER_MRP_DEL_RING_ROLE
, &info
);
871 void dsa_port_set_tag_protocol(struct dsa_port
*cpu_dp
,
872 const struct dsa_device_ops
*tag_ops
)
874 cpu_dp
->rcv
= tag_ops
->rcv
;
875 cpu_dp
->tag_ops
= tag_ops
;
878 static struct phy_device
*dsa_port_get_phy_device(struct dsa_port
*dp
)
880 struct device_node
*phy_dn
;
881 struct phy_device
*phydev
;
883 phy_dn
= of_parse_phandle(dp
->dn
, "phy-handle", 0);
887 phydev
= of_phy_find_device(phy_dn
);
890 return ERR_PTR(-EPROBE_DEFER
);
897 static void dsa_port_phylink_validate(struct phylink_config
*config
,
898 unsigned long *supported
,
899 struct phylink_link_state
*state
)
901 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
902 struct dsa_switch
*ds
= dp
->ds
;
904 if (!ds
->ops
->phylink_validate
)
907 ds
->ops
->phylink_validate(ds
, dp
->index
, supported
, state
);
910 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config
*config
,
911 struct phylink_link_state
*state
)
913 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
914 struct dsa_switch
*ds
= dp
->ds
;
917 /* Only called for inband modes */
918 if (!ds
->ops
->phylink_mac_link_state
) {
923 err
= ds
->ops
->phylink_mac_link_state(ds
, dp
->index
, state
);
925 dev_err(ds
->dev
, "p%d: phylink_mac_link_state() failed: %d\n",
931 static void dsa_port_phylink_mac_config(struct phylink_config
*config
,
933 const struct phylink_link_state
*state
)
935 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
936 struct dsa_switch
*ds
= dp
->ds
;
938 if (!ds
->ops
->phylink_mac_config
)
941 ds
->ops
->phylink_mac_config(ds
, dp
->index
, mode
, state
);
944 static void dsa_port_phylink_mac_an_restart(struct phylink_config
*config
)
946 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
947 struct dsa_switch
*ds
= dp
->ds
;
949 if (!ds
->ops
->phylink_mac_an_restart
)
952 ds
->ops
->phylink_mac_an_restart(ds
, dp
->index
);
955 static void dsa_port_phylink_mac_link_down(struct phylink_config
*config
,
957 phy_interface_t interface
)
959 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
960 struct phy_device
*phydev
= NULL
;
961 struct dsa_switch
*ds
= dp
->ds
;
963 if (dsa_is_user_port(ds
, dp
->index
))
964 phydev
= dp
->slave
->phydev
;
966 if (!ds
->ops
->phylink_mac_link_down
) {
967 if (ds
->ops
->adjust_link
&& phydev
)
968 ds
->ops
->adjust_link(ds
, dp
->index
, phydev
);
972 ds
->ops
->phylink_mac_link_down(ds
, dp
->index
, mode
, interface
);
975 static void dsa_port_phylink_mac_link_up(struct phylink_config
*config
,
976 struct phy_device
*phydev
,
978 phy_interface_t interface
,
979 int speed
, int duplex
,
980 bool tx_pause
, bool rx_pause
)
982 struct dsa_port
*dp
= container_of(config
, struct dsa_port
, pl_config
);
983 struct dsa_switch
*ds
= dp
->ds
;
985 if (!ds
->ops
->phylink_mac_link_up
) {
986 if (ds
->ops
->adjust_link
&& phydev
)
987 ds
->ops
->adjust_link(ds
, dp
->index
, phydev
);
991 ds
->ops
->phylink_mac_link_up(ds
, dp
->index
, mode
, interface
, phydev
,
992 speed
, duplex
, tx_pause
, rx_pause
);
995 const struct phylink_mac_ops dsa_port_phylink_mac_ops
= {
996 .validate
= dsa_port_phylink_validate
,
997 .mac_pcs_get_state
= dsa_port_phylink_mac_pcs_get_state
,
998 .mac_config
= dsa_port_phylink_mac_config
,
999 .mac_an_restart
= dsa_port_phylink_mac_an_restart
,
1000 .mac_link_down
= dsa_port_phylink_mac_link_down
,
1001 .mac_link_up
= dsa_port_phylink_mac_link_up
,
1004 static int dsa_port_setup_phy_of(struct dsa_port
*dp
, bool enable
)
1006 struct dsa_switch
*ds
= dp
->ds
;
1007 struct phy_device
*phydev
;
1008 int port
= dp
->index
;
1011 phydev
= dsa_port_get_phy_device(dp
);
1016 return PTR_ERR(phydev
);
1019 err
= genphy_resume(phydev
);
1023 err
= genphy_read_status(phydev
);
1027 err
= genphy_suspend(phydev
);
1032 if (ds
->ops
->adjust_link
)
1033 ds
->ops
->adjust_link(ds
, port
, phydev
);
1035 dev_dbg(ds
->dev
, "enabled port's phy: %s", phydev_name(phydev
));
1038 put_device(&phydev
->mdio
.dev
);
1042 static int dsa_port_fixed_link_register_of(struct dsa_port
*dp
)
1044 struct device_node
*dn
= dp
->dn
;
1045 struct dsa_switch
*ds
= dp
->ds
;
1046 struct phy_device
*phydev
;
1047 int port
= dp
->index
;
1048 phy_interface_t mode
;
1051 err
= of_phy_register_fixed_link(dn
);
1054 "failed to register the fixed PHY of port %d\n",
1059 phydev
= of_phy_find_device(dn
);
1061 err
= of_get_phy_mode(dn
, &mode
);
1063 mode
= PHY_INTERFACE_MODE_NA
;
1064 phydev
->interface
= mode
;
1066 genphy_read_status(phydev
);
1068 if (ds
->ops
->adjust_link
)
1069 ds
->ops
->adjust_link(ds
, port
, phydev
);
1071 put_device(&phydev
->mdio
.dev
);
1076 static int dsa_port_phylink_register(struct dsa_port
*dp
)
1078 struct dsa_switch
*ds
= dp
->ds
;
1079 struct device_node
*port_dn
= dp
->dn
;
1080 phy_interface_t mode
;
1083 err
= of_get_phy_mode(port_dn
, &mode
);
1085 mode
= PHY_INTERFACE_MODE_NA
;
1087 dp
->pl_config
.dev
= ds
->dev
;
1088 dp
->pl_config
.type
= PHYLINK_DEV
;
1089 dp
->pl_config
.pcs_poll
= ds
->pcs_poll
;
1091 dp
->pl
= phylink_create(&dp
->pl_config
, of_fwnode_handle(port_dn
),
1092 mode
, &dsa_port_phylink_mac_ops
);
1093 if (IS_ERR(dp
->pl
)) {
1094 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp
->pl
));
1095 return PTR_ERR(dp
->pl
);
1098 err
= phylink_of_phy_connect(dp
->pl
, port_dn
, 0);
1099 if (err
&& err
!= -ENODEV
) {
1100 pr_err("could not attach to PHY: %d\n", err
);
1101 goto err_phy_connect
;
1107 phylink_destroy(dp
->pl
);
1111 int dsa_port_link_register_of(struct dsa_port
*dp
)
1113 struct dsa_switch
*ds
= dp
->ds
;
1114 struct device_node
*phy_np
;
1115 int port
= dp
->index
;
1117 if (!ds
->ops
->adjust_link
) {
1118 phy_np
= of_parse_phandle(dp
->dn
, "phy-handle", 0);
1119 if (of_phy_is_fixed_link(dp
->dn
) || phy_np
) {
1120 if (ds
->ops
->phylink_mac_link_down
)
1121 ds
->ops
->phylink_mac_link_down(ds
, port
,
1122 MLO_AN_FIXED
, PHY_INTERFACE_MODE_NA
);
1123 return dsa_port_phylink_register(dp
);
1129 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1131 if (of_phy_is_fixed_link(dp
->dn
))
1132 return dsa_port_fixed_link_register_of(dp
);
1134 return dsa_port_setup_phy_of(dp
, true);
1137 void dsa_port_link_unregister_of(struct dsa_port
*dp
)
1139 struct dsa_switch
*ds
= dp
->ds
;
1141 if (!ds
->ops
->adjust_link
&& dp
->pl
) {
1143 phylink_disconnect_phy(dp
->pl
);
1145 phylink_destroy(dp
->pl
);
1150 if (of_phy_is_fixed_link(dp
->dn
))
1151 of_phy_deregister_fixed_link(dp
->dn
);
1153 dsa_port_setup_phy_of(dp
, false);
1156 int dsa_port_get_phy_strings(struct dsa_port
*dp
, uint8_t *data
)
1158 struct phy_device
*phydev
;
1159 int ret
= -EOPNOTSUPP
;
1161 if (of_phy_is_fixed_link(dp
->dn
))
1164 phydev
= dsa_port_get_phy_device(dp
);
1165 if (IS_ERR_OR_NULL(phydev
))
1168 ret
= phy_ethtool_get_strings(phydev
, data
);
1169 put_device(&phydev
->mdio
.dev
);
1173 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings
);
1175 int dsa_port_get_ethtool_phy_stats(struct dsa_port
*dp
, uint64_t *data
)
1177 struct phy_device
*phydev
;
1178 int ret
= -EOPNOTSUPP
;
1180 if (of_phy_is_fixed_link(dp
->dn
))
1183 phydev
= dsa_port_get_phy_device(dp
);
1184 if (IS_ERR_OR_NULL(phydev
))
1187 ret
= phy_ethtool_get_stats(phydev
, NULL
, data
);
1188 put_device(&phydev
->mdio
.dev
);
1192 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats
);
1194 int dsa_port_get_phy_sset_count(struct dsa_port
*dp
)
1196 struct phy_device
*phydev
;
1197 int ret
= -EOPNOTSUPP
;
1199 if (of_phy_is_fixed_link(dp
->dn
))
1202 phydev
= dsa_port_get_phy_device(dp
);
1203 if (IS_ERR_OR_NULL(phydev
))
1206 ret
= phy_ethtool_get_sset_count(phydev
);
1207 put_device(&phydev
->mdio
.dev
);
1211 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count
);
1213 int dsa_port_hsr_join(struct dsa_port
*dp
, struct net_device
*hsr
)
1215 struct dsa_notifier_hsr_info info
= {
1216 .sw_index
= dp
->ds
->index
,
1224 err
= dsa_port_notify(dp
, DSA_NOTIFIER_HSR_JOIN
, &info
);
1231 void dsa_port_hsr_leave(struct dsa_port
*dp
, struct net_device
*hsr
)
1233 struct dsa_notifier_hsr_info info
= {
1234 .sw_index
= dp
->ds
->index
,
1242 err
= dsa_port_notify(dp
, DSA_NOTIFIER_HSR_LEAVE
, &info
);
1244 pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1247 int dsa_port_tag_8021q_vlan_add(struct dsa_port
*dp
, u16 vid
)
1249 struct dsa_notifier_tag_8021q_vlan_info info
= {
1250 .tree_index
= dp
->ds
->dst
->index
,
1251 .sw_index
= dp
->ds
->index
,
1256 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD
, &info
);
1259 void dsa_port_tag_8021q_vlan_del(struct dsa_port
*dp
, u16 vid
)
1261 struct dsa_notifier_tag_8021q_vlan_info info
= {
1262 .tree_index
= dp
->ds
->dst
->index
,
1263 .sw_index
= dp
->ds
->index
,
1269 err
= dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL
, &info
);
1271 pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n",