]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/dsa/port.c
net: dsa: don't fast age standalone ports
[mirror_ubuntu-jammy-kernel.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handling of a single switch port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17 * dsa_port_notify - Notify the switching fabric of changes to a port
18 * @dp: port on which change occurred
19 * @e: event, must be of type DSA_NOTIFIER_*
20 * @v: event-specific value.
21 *
22 * Notify all switches in the DSA tree that this port's switch belongs to,
23 * including this switch itself, of an event. Allows the other switches to
24 * reconfigure themselves for cross-chip operations. Can also be used to
25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26 * a user port's state changes.
27 */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age)
34 {
35 struct dsa_switch *ds = dp->ds;
36 int port = dp->index;
37
38 if (!ds->ops->port_stp_state_set)
39 return -EOPNOTSUPP;
40
41 ds->ops->port_stp_state_set(ds, port, state);
42
43 if (do_fast_age && ds->ops->port_fast_age) {
44 /* Fast age FDB entries or flush appropriate forwarding database
45 * for the given port, if we are moving it from Learning or
46 * Forwarding state, to Disabled or Blocking or Listening state.
47 * Ports that were standalone before the STP state change don't
48 * need to fast age the FDB, since address learning is off in
49 * standalone mode.
50 */
51
52 if ((dp->stp_state == BR_STATE_LEARNING ||
53 dp->stp_state == BR_STATE_FORWARDING) &&
54 (state == BR_STATE_DISABLED ||
55 state == BR_STATE_BLOCKING ||
56 state == BR_STATE_LISTENING))
57 ds->ops->port_fast_age(ds, port);
58 }
59
60 dp->stp_state = state;
61
62 return 0;
63 }
64
65 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state,
66 bool do_fast_age)
67 {
68 int err;
69
70 err = dsa_port_set_state(dp, state, do_fast_age);
71 if (err)
72 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
73 }
74
75 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
76 {
77 struct dsa_switch *ds = dp->ds;
78 int port = dp->index;
79 int err;
80
81 if (ds->ops->port_enable) {
82 err = ds->ops->port_enable(ds, port, phy);
83 if (err)
84 return err;
85 }
86
87 if (!dp->bridge_dev)
88 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
89
90 if (dp->pl)
91 phylink_start(dp->pl);
92
93 return 0;
94 }
95
96 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
97 {
98 int err;
99
100 rtnl_lock();
101 err = dsa_port_enable_rt(dp, phy);
102 rtnl_unlock();
103
104 return err;
105 }
106
107 void dsa_port_disable_rt(struct dsa_port *dp)
108 {
109 struct dsa_switch *ds = dp->ds;
110 int port = dp->index;
111
112 if (dp->pl)
113 phylink_stop(dp->pl);
114
115 if (!dp->bridge_dev)
116 dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
117
118 if (ds->ops->port_disable)
119 ds->ops->port_disable(ds, port);
120 }
121
122 void dsa_port_disable(struct dsa_port *dp)
123 {
124 rtnl_lock();
125 dsa_port_disable_rt(dp);
126 rtnl_unlock();
127 }
128
129 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
130 struct netlink_ext_ack *extack)
131 {
132 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
133 BR_BCAST_FLOOD;
134 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
135 int flag, err;
136
137 for_each_set_bit(flag, &mask, 32) {
138 struct switchdev_brport_flags flags = {0};
139
140 flags.mask = BIT(flag);
141
142 if (br_port_flag_is_set(brport_dev, BIT(flag)))
143 flags.val = BIT(flag);
144
145 err = dsa_port_bridge_flags(dp, flags, extack);
146 if (err && err != -EOPNOTSUPP)
147 return err;
148 }
149
150 return 0;
151 }
152
153 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
154 {
155 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
156 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
157 BR_BCAST_FLOOD;
158 int flag, err;
159
160 for_each_set_bit(flag, &mask, 32) {
161 struct switchdev_brport_flags flags = {0};
162
163 flags.mask = BIT(flag);
164 flags.val = val & BIT(flag);
165
166 err = dsa_port_bridge_flags(dp, flags, NULL);
167 if (err && err != -EOPNOTSUPP)
168 dev_err(dp->ds->dev,
169 "failed to clear bridge port flag %lu: %pe\n",
170 flags.val, ERR_PTR(err));
171 }
172 }
173
174 static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
175 struct netlink_ext_ack *extack)
176 {
177 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
178 struct net_device *br = dp->bridge_dev;
179 int err;
180
181 err = dsa_port_inherit_brport_flags(dp, extack);
182 if (err)
183 return err;
184
185 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev), false);
186 if (err && err != -EOPNOTSUPP)
187 return err;
188
189 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
190 if (err && err != -EOPNOTSUPP)
191 return err;
192
193 err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
194 if (err && err != -EOPNOTSUPP)
195 return err;
196
197 return 0;
198 }
199
200 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
201 {
202 /* Configure the port for standalone mode (no address learning,
203 * flood everything).
204 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
205 * when the user requests it through netlink or sysfs, but not
206 * automatically at port join or leave, so we need to handle resetting
207 * the brport flags ourselves. But we even prefer it that way, because
208 * otherwise, some setups might never get the notification they need,
209 * for example, when a port leaves a LAG that offloads the bridge,
210 * it becomes standalone, but as far as the bridge is concerned, no
211 * port ever left.
212 */
213 dsa_port_clear_brport_flags(dp);
214
215 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
216 * so allow it to be in BR_STATE_FORWARDING to be kept functional
217 */
218 dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
219
220 /* VLAN filtering is handled by dsa_switch_bridge_leave */
221
222 /* Ageing time may be global to the switch chip, so don't change it
223 * here because we have no good reason (or value) to change it to.
224 */
225 }
226
227 static int dsa_tree_find_bridge_num(struct dsa_switch_tree *dst,
228 struct net_device *bridge_dev)
229 {
230 struct dsa_port *dp;
231
232 /* When preparing the offload for a port, it will have a valid
233 * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
234 * However there might be other ports having the same dp->bridge_dev
235 * and a valid dp->bridge_num, so just ignore this port.
236 */
237 list_for_each_entry(dp, &dst->ports, list)
238 if (dp->bridge_dev == bridge_dev && dp->bridge_num != -1)
239 return dp->bridge_num;
240
241 return -1;
242 }
243
244 static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
245 struct net_device *bridge_dev)
246 {
247 struct dsa_switch_tree *dst = dp->ds->dst;
248 int bridge_num = dp->bridge_num;
249 struct dsa_switch *ds = dp->ds;
250
251 /* No bridge TX forwarding offload => do nothing */
252 if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
253 return;
254
255 dp->bridge_num = -1;
256
257 /* Check if the bridge is still in use, otherwise it is time
258 * to clean it up so we can reuse this bridge_num later.
259 */
260 if (!dsa_tree_find_bridge_num(dst, bridge_dev))
261 clear_bit(bridge_num, &dst->fwd_offloading_bridges);
262
263 /* Notify the chips only once the offload has been deactivated, so
264 * that they can update their configuration accordingly.
265 */
266 ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
267 bridge_num);
268 }
269
270 static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
271 struct net_device *bridge_dev)
272 {
273 struct dsa_switch_tree *dst = dp->ds->dst;
274 struct dsa_switch *ds = dp->ds;
275 int bridge_num, err;
276
277 if (!ds->ops->port_bridge_tx_fwd_offload)
278 return false;
279
280 bridge_num = dsa_tree_find_bridge_num(dst, bridge_dev);
281 if (bridge_num < 0) {
282 /* First port that offloads TX forwarding for this bridge */
283 bridge_num = find_first_zero_bit(&dst->fwd_offloading_bridges,
284 DSA_MAX_NUM_OFFLOADING_BRIDGES);
285 if (bridge_num >= ds->num_fwd_offloading_bridges)
286 return false;
287
288 set_bit(bridge_num, &dst->fwd_offloading_bridges);
289 }
290
291 dp->bridge_num = bridge_num;
292
293 /* Notify the driver */
294 err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
295 bridge_num);
296 if (err) {
297 dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
298 return false;
299 }
300
301 return true;
302 }
303
304 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
305 struct netlink_ext_ack *extack)
306 {
307 struct dsa_notifier_bridge_info info = {
308 .tree_index = dp->ds->dst->index,
309 .sw_index = dp->ds->index,
310 .port = dp->index,
311 .br = br,
312 };
313 struct net_device *dev = dp->slave;
314 struct net_device *brport_dev;
315 bool tx_fwd_offload;
316 int err;
317
318 /* Here the interface is already bridged. Reflect the current
319 * configuration so that drivers can program their chips accordingly.
320 */
321 dp->bridge_dev = br;
322
323 brport_dev = dsa_port_to_bridge_port(dp);
324
325 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
326 if (err)
327 goto out_rollback;
328
329 tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
330
331 err = switchdev_bridge_port_offload(brport_dev, dev, dp,
332 &dsa_slave_switchdev_notifier,
333 &dsa_slave_switchdev_blocking_notifier,
334 tx_fwd_offload, extack);
335 if (err)
336 goto out_rollback_unbridge;
337
338 err = dsa_port_switchdev_sync_attrs(dp, extack);
339 if (err)
340 goto out_rollback_unoffload;
341
342 return 0;
343
344 out_rollback_unoffload:
345 switchdev_bridge_port_unoffload(brport_dev, dp,
346 &dsa_slave_switchdev_notifier,
347 &dsa_slave_switchdev_blocking_notifier);
348 out_rollback_unbridge:
349 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
350 out_rollback:
351 dp->bridge_dev = NULL;
352 return err;
353 }
354
355 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br)
356 {
357 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
358
359 switchdev_bridge_port_unoffload(brport_dev, dp,
360 &dsa_slave_switchdev_notifier,
361 &dsa_slave_switchdev_blocking_notifier);
362 }
363
364 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
365 {
366 struct dsa_notifier_bridge_info info = {
367 .tree_index = dp->ds->dst->index,
368 .sw_index = dp->ds->index,
369 .port = dp->index,
370 .br = br,
371 };
372 int err;
373
374 /* Here the port is already unbridged. Reflect the current configuration
375 * so that drivers can program their chips accordingly.
376 */
377 dp->bridge_dev = NULL;
378
379 dsa_port_bridge_tx_fwd_unoffload(dp, br);
380
381 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
382 if (err)
383 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
384
385 dsa_port_switchdev_unsync_attrs(dp);
386 }
387
388 int dsa_port_lag_change(struct dsa_port *dp,
389 struct netdev_lag_lower_state_info *linfo)
390 {
391 struct dsa_notifier_lag_info info = {
392 .sw_index = dp->ds->index,
393 .port = dp->index,
394 };
395 bool tx_enabled;
396
397 if (!dp->lag_dev)
398 return 0;
399
400 /* On statically configured aggregates (e.g. loadbalance
401 * without LACP) ports will always be tx_enabled, even if the
402 * link is down. Thus we require both link_up and tx_enabled
403 * in order to include it in the tx set.
404 */
405 tx_enabled = linfo->link_up && linfo->tx_enabled;
406
407 if (tx_enabled == dp->lag_tx_enabled)
408 return 0;
409
410 dp->lag_tx_enabled = tx_enabled;
411
412 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
413 }
414
415 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
416 struct netdev_lag_upper_info *uinfo,
417 struct netlink_ext_ack *extack)
418 {
419 struct dsa_notifier_lag_info info = {
420 .sw_index = dp->ds->index,
421 .port = dp->index,
422 .lag = lag,
423 .info = uinfo,
424 };
425 struct net_device *bridge_dev;
426 int err;
427
428 dsa_lag_map(dp->ds->dst, lag);
429 dp->lag_dev = lag;
430
431 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
432 if (err)
433 goto err_lag_join;
434
435 bridge_dev = netdev_master_upper_dev_get(lag);
436 if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
437 return 0;
438
439 err = dsa_port_bridge_join(dp, bridge_dev, extack);
440 if (err)
441 goto err_bridge_join;
442
443 return 0;
444
445 err_bridge_join:
446 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
447 err_lag_join:
448 dp->lag_dev = NULL;
449 dsa_lag_unmap(dp->ds->dst, lag);
450 return err;
451 }
452
453 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
454 {
455 if (dp->bridge_dev)
456 dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
457 }
458
459 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
460 {
461 struct dsa_notifier_lag_info info = {
462 .sw_index = dp->ds->index,
463 .port = dp->index,
464 .lag = lag,
465 };
466 int err;
467
468 if (!dp->lag_dev)
469 return;
470
471 /* Port might have been part of a LAG that in turn was
472 * attached to a bridge.
473 */
474 if (dp->bridge_dev)
475 dsa_port_bridge_leave(dp, dp->bridge_dev);
476
477 dp->lag_tx_enabled = false;
478 dp->lag_dev = NULL;
479
480 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
481 if (err)
482 pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
483 err);
484
485 dsa_lag_unmap(dp->ds->dst, lag);
486 }
487
488 /* Must be called under rcu_read_lock() */
489 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
490 bool vlan_filtering,
491 struct netlink_ext_ack *extack)
492 {
493 struct dsa_switch *ds = dp->ds;
494 int err, i;
495
496 /* VLAN awareness was off, so the question is "can we turn it on".
497 * We may have had 8021q uppers, those need to go. Make sure we don't
498 * enter an inconsistent state: deny changing the VLAN awareness state
499 * as long as we have 8021q uppers.
500 */
501 if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
502 struct net_device *upper_dev, *slave = dp->slave;
503 struct net_device *br = dp->bridge_dev;
504 struct list_head *iter;
505
506 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
507 struct bridge_vlan_info br_info;
508 u16 vid;
509
510 if (!is_vlan_dev(upper_dev))
511 continue;
512
513 vid = vlan_dev_vlan_id(upper_dev);
514
515 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
516 * device, respectively the VID is not found, returning
517 * 0 means success, which is a failure for us here.
518 */
519 err = br_vlan_get_info(br, vid, &br_info);
520 if (err == 0) {
521 NL_SET_ERR_MSG_MOD(extack,
522 "Must first remove VLAN uppers having VIDs also present in bridge");
523 return false;
524 }
525 }
526 }
527
528 if (!ds->vlan_filtering_is_global)
529 return true;
530
531 /* For cases where enabling/disabling VLAN awareness is global to the
532 * switch, we need to handle the case where multiple bridges span
533 * different ports of the same switch device and one of them has a
534 * different setting than what is being requested.
535 */
536 for (i = 0; i < ds->num_ports; i++) {
537 struct net_device *other_bridge;
538
539 other_bridge = dsa_to_port(ds, i)->bridge_dev;
540 if (!other_bridge)
541 continue;
542 /* If it's the same bridge, it also has same
543 * vlan_filtering setting => no need to check
544 */
545 if (other_bridge == dp->bridge_dev)
546 continue;
547 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
548 NL_SET_ERR_MSG_MOD(extack,
549 "VLAN filtering is a global setting");
550 return false;
551 }
552 }
553 return true;
554 }
555
556 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
557 struct netlink_ext_ack *extack)
558 {
559 struct dsa_switch *ds = dp->ds;
560 bool apply;
561 int err;
562
563 if (!ds->ops->port_vlan_filtering)
564 return -EOPNOTSUPP;
565
566 /* We are called from dsa_slave_switchdev_blocking_event(),
567 * which is not under rcu_read_lock(), unlike
568 * dsa_slave_switchdev_event().
569 */
570 rcu_read_lock();
571 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
572 rcu_read_unlock();
573 if (!apply)
574 return -EINVAL;
575
576 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
577 return 0;
578
579 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
580 extack);
581 if (err)
582 return err;
583
584 if (ds->vlan_filtering_is_global)
585 ds->vlan_filtering = vlan_filtering;
586 else
587 dp->vlan_filtering = vlan_filtering;
588
589 return 0;
590 }
591
592 /* This enforces legacy behavior for switch drivers which assume they can't
593 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
594 */
595 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
596 {
597 struct dsa_switch *ds = dp->ds;
598
599 if (!dp->bridge_dev)
600 return false;
601
602 return (!ds->configure_vlan_while_not_filtering &&
603 !br_vlan_enabled(dp->bridge_dev));
604 }
605
606 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
607 {
608 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
609 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
610 struct dsa_notifier_ageing_time_info info;
611 int err;
612
613 info.ageing_time = ageing_time;
614
615 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
616 if (err)
617 return err;
618
619 dp->ageing_time = ageing_time;
620
621 return 0;
622 }
623
624 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
625 struct switchdev_brport_flags flags,
626 struct netlink_ext_ack *extack)
627 {
628 struct dsa_switch *ds = dp->ds;
629
630 if (!ds->ops->port_pre_bridge_flags)
631 return -EINVAL;
632
633 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
634 }
635
636 int dsa_port_bridge_flags(const struct dsa_port *dp,
637 struct switchdev_brport_flags flags,
638 struct netlink_ext_ack *extack)
639 {
640 struct dsa_switch *ds = dp->ds;
641
642 if (!ds->ops->port_bridge_flags)
643 return -EOPNOTSUPP;
644
645 return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
646 }
647
648 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
649 bool targeted_match)
650 {
651 struct dsa_notifier_mtu_info info = {
652 .sw_index = dp->ds->index,
653 .targeted_match = targeted_match,
654 .port = dp->index,
655 .mtu = new_mtu,
656 };
657
658 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
659 }
660
661 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
662 u16 vid)
663 {
664 struct dsa_notifier_fdb_info info = {
665 .sw_index = dp->ds->index,
666 .port = dp->index,
667 .addr = addr,
668 .vid = vid,
669 };
670
671 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
672 }
673
674 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
675 u16 vid)
676 {
677 struct dsa_notifier_fdb_info info = {
678 .sw_index = dp->ds->index,
679 .port = dp->index,
680 .addr = addr,
681 .vid = vid,
682
683 };
684
685 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
686 }
687
688 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
689 u16 vid)
690 {
691 struct dsa_notifier_fdb_info info = {
692 .sw_index = dp->ds->index,
693 .port = dp->index,
694 .addr = addr,
695 .vid = vid,
696 };
697 struct dsa_port *cpu_dp = dp->cpu_dp;
698 int err;
699
700 err = dev_uc_add(cpu_dp->master, addr);
701 if (err)
702 return err;
703
704 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
705 }
706
707 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
708 u16 vid)
709 {
710 struct dsa_notifier_fdb_info info = {
711 .sw_index = dp->ds->index,
712 .port = dp->index,
713 .addr = addr,
714 .vid = vid,
715 };
716 struct dsa_port *cpu_dp = dp->cpu_dp;
717 int err;
718
719 err = dev_uc_del(cpu_dp->master, addr);
720 if (err)
721 return err;
722
723 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
724 }
725
726 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
727 {
728 struct dsa_switch *ds = dp->ds;
729 int port = dp->index;
730
731 if (!ds->ops->port_fdb_dump)
732 return -EOPNOTSUPP;
733
734 return ds->ops->port_fdb_dump(ds, port, cb, data);
735 }
736
737 int dsa_port_mdb_add(const struct dsa_port *dp,
738 const struct switchdev_obj_port_mdb *mdb)
739 {
740 struct dsa_notifier_mdb_info info = {
741 .sw_index = dp->ds->index,
742 .port = dp->index,
743 .mdb = mdb,
744 };
745
746 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
747 }
748
749 int dsa_port_mdb_del(const struct dsa_port *dp,
750 const struct switchdev_obj_port_mdb *mdb)
751 {
752 struct dsa_notifier_mdb_info info = {
753 .sw_index = dp->ds->index,
754 .port = dp->index,
755 .mdb = mdb,
756 };
757
758 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
759 }
760
761 int dsa_port_host_mdb_add(const struct dsa_port *dp,
762 const struct switchdev_obj_port_mdb *mdb)
763 {
764 struct dsa_notifier_mdb_info info = {
765 .sw_index = dp->ds->index,
766 .port = dp->index,
767 .mdb = mdb,
768 };
769 struct dsa_port *cpu_dp = dp->cpu_dp;
770 int err;
771
772 err = dev_mc_add(cpu_dp->master, mdb->addr);
773 if (err)
774 return err;
775
776 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
777 }
778
779 int dsa_port_host_mdb_del(const struct dsa_port *dp,
780 const struct switchdev_obj_port_mdb *mdb)
781 {
782 struct dsa_notifier_mdb_info info = {
783 .sw_index = dp->ds->index,
784 .port = dp->index,
785 .mdb = mdb,
786 };
787 struct dsa_port *cpu_dp = dp->cpu_dp;
788 int err;
789
790 err = dev_mc_del(cpu_dp->master, mdb->addr);
791 if (err)
792 return err;
793
794 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
795 }
796
797 int dsa_port_vlan_add(struct dsa_port *dp,
798 const struct switchdev_obj_port_vlan *vlan,
799 struct netlink_ext_ack *extack)
800 {
801 struct dsa_notifier_vlan_info info = {
802 .sw_index = dp->ds->index,
803 .port = dp->index,
804 .vlan = vlan,
805 .extack = extack,
806 };
807
808 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
809 }
810
811 int dsa_port_vlan_del(struct dsa_port *dp,
812 const struct switchdev_obj_port_vlan *vlan)
813 {
814 struct dsa_notifier_vlan_info info = {
815 .sw_index = dp->ds->index,
816 .port = dp->index,
817 .vlan = vlan,
818 };
819
820 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
821 }
822
823 int dsa_port_mrp_add(const struct dsa_port *dp,
824 const struct switchdev_obj_mrp *mrp)
825 {
826 struct dsa_notifier_mrp_info info = {
827 .sw_index = dp->ds->index,
828 .port = dp->index,
829 .mrp = mrp,
830 };
831
832 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
833 }
834
835 int dsa_port_mrp_del(const struct dsa_port *dp,
836 const struct switchdev_obj_mrp *mrp)
837 {
838 struct dsa_notifier_mrp_info info = {
839 .sw_index = dp->ds->index,
840 .port = dp->index,
841 .mrp = mrp,
842 };
843
844 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
845 }
846
847 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
848 const struct switchdev_obj_ring_role_mrp *mrp)
849 {
850 struct dsa_notifier_mrp_ring_role_info info = {
851 .sw_index = dp->ds->index,
852 .port = dp->index,
853 .mrp = mrp,
854 };
855
856 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
857 }
858
859 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
860 const struct switchdev_obj_ring_role_mrp *mrp)
861 {
862 struct dsa_notifier_mrp_ring_role_info info = {
863 .sw_index = dp->ds->index,
864 .port = dp->index,
865 .mrp = mrp,
866 };
867
868 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
869 }
870
871 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
872 const struct dsa_device_ops *tag_ops)
873 {
874 cpu_dp->rcv = tag_ops->rcv;
875 cpu_dp->tag_ops = tag_ops;
876 }
877
878 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
879 {
880 struct device_node *phy_dn;
881 struct phy_device *phydev;
882
883 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
884 if (!phy_dn)
885 return NULL;
886
887 phydev = of_phy_find_device(phy_dn);
888 if (!phydev) {
889 of_node_put(phy_dn);
890 return ERR_PTR(-EPROBE_DEFER);
891 }
892
893 of_node_put(phy_dn);
894 return phydev;
895 }
896
897 static void dsa_port_phylink_validate(struct phylink_config *config,
898 unsigned long *supported,
899 struct phylink_link_state *state)
900 {
901 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
902 struct dsa_switch *ds = dp->ds;
903
904 if (!ds->ops->phylink_validate)
905 return;
906
907 ds->ops->phylink_validate(ds, dp->index, supported, state);
908 }
909
910 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
911 struct phylink_link_state *state)
912 {
913 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
914 struct dsa_switch *ds = dp->ds;
915 int err;
916
917 /* Only called for inband modes */
918 if (!ds->ops->phylink_mac_link_state) {
919 state->link = 0;
920 return;
921 }
922
923 err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
924 if (err < 0) {
925 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
926 dp->index, err);
927 state->link = 0;
928 }
929 }
930
931 static void dsa_port_phylink_mac_config(struct phylink_config *config,
932 unsigned int mode,
933 const struct phylink_link_state *state)
934 {
935 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
936 struct dsa_switch *ds = dp->ds;
937
938 if (!ds->ops->phylink_mac_config)
939 return;
940
941 ds->ops->phylink_mac_config(ds, dp->index, mode, state);
942 }
943
944 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
945 {
946 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
947 struct dsa_switch *ds = dp->ds;
948
949 if (!ds->ops->phylink_mac_an_restart)
950 return;
951
952 ds->ops->phylink_mac_an_restart(ds, dp->index);
953 }
954
955 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
956 unsigned int mode,
957 phy_interface_t interface)
958 {
959 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
960 struct phy_device *phydev = NULL;
961 struct dsa_switch *ds = dp->ds;
962
963 if (dsa_is_user_port(ds, dp->index))
964 phydev = dp->slave->phydev;
965
966 if (!ds->ops->phylink_mac_link_down) {
967 if (ds->ops->adjust_link && phydev)
968 ds->ops->adjust_link(ds, dp->index, phydev);
969 return;
970 }
971
972 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
973 }
974
975 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
976 struct phy_device *phydev,
977 unsigned int mode,
978 phy_interface_t interface,
979 int speed, int duplex,
980 bool tx_pause, bool rx_pause)
981 {
982 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
983 struct dsa_switch *ds = dp->ds;
984
985 if (!ds->ops->phylink_mac_link_up) {
986 if (ds->ops->adjust_link && phydev)
987 ds->ops->adjust_link(ds, dp->index, phydev);
988 return;
989 }
990
991 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
992 speed, duplex, tx_pause, rx_pause);
993 }
994
995 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
996 .validate = dsa_port_phylink_validate,
997 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
998 .mac_config = dsa_port_phylink_mac_config,
999 .mac_an_restart = dsa_port_phylink_mac_an_restart,
1000 .mac_link_down = dsa_port_phylink_mac_link_down,
1001 .mac_link_up = dsa_port_phylink_mac_link_up,
1002 };
1003
1004 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
1005 {
1006 struct dsa_switch *ds = dp->ds;
1007 struct phy_device *phydev;
1008 int port = dp->index;
1009 int err = 0;
1010
1011 phydev = dsa_port_get_phy_device(dp);
1012 if (!phydev)
1013 return 0;
1014
1015 if (IS_ERR(phydev))
1016 return PTR_ERR(phydev);
1017
1018 if (enable) {
1019 err = genphy_resume(phydev);
1020 if (err < 0)
1021 goto err_put_dev;
1022
1023 err = genphy_read_status(phydev);
1024 if (err < 0)
1025 goto err_put_dev;
1026 } else {
1027 err = genphy_suspend(phydev);
1028 if (err < 0)
1029 goto err_put_dev;
1030 }
1031
1032 if (ds->ops->adjust_link)
1033 ds->ops->adjust_link(ds, port, phydev);
1034
1035 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1036
1037 err_put_dev:
1038 put_device(&phydev->mdio.dev);
1039 return err;
1040 }
1041
1042 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1043 {
1044 struct device_node *dn = dp->dn;
1045 struct dsa_switch *ds = dp->ds;
1046 struct phy_device *phydev;
1047 int port = dp->index;
1048 phy_interface_t mode;
1049 int err;
1050
1051 err = of_phy_register_fixed_link(dn);
1052 if (err) {
1053 dev_err(ds->dev,
1054 "failed to register the fixed PHY of port %d\n",
1055 port);
1056 return err;
1057 }
1058
1059 phydev = of_phy_find_device(dn);
1060
1061 err = of_get_phy_mode(dn, &mode);
1062 if (err)
1063 mode = PHY_INTERFACE_MODE_NA;
1064 phydev->interface = mode;
1065
1066 genphy_read_status(phydev);
1067
1068 if (ds->ops->adjust_link)
1069 ds->ops->adjust_link(ds, port, phydev);
1070
1071 put_device(&phydev->mdio.dev);
1072
1073 return 0;
1074 }
1075
1076 static int dsa_port_phylink_register(struct dsa_port *dp)
1077 {
1078 struct dsa_switch *ds = dp->ds;
1079 struct device_node *port_dn = dp->dn;
1080 phy_interface_t mode;
1081 int err;
1082
1083 err = of_get_phy_mode(port_dn, &mode);
1084 if (err)
1085 mode = PHY_INTERFACE_MODE_NA;
1086
1087 dp->pl_config.dev = ds->dev;
1088 dp->pl_config.type = PHYLINK_DEV;
1089 dp->pl_config.pcs_poll = ds->pcs_poll;
1090
1091 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1092 mode, &dsa_port_phylink_mac_ops);
1093 if (IS_ERR(dp->pl)) {
1094 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1095 return PTR_ERR(dp->pl);
1096 }
1097
1098 err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1099 if (err && err != -ENODEV) {
1100 pr_err("could not attach to PHY: %d\n", err);
1101 goto err_phy_connect;
1102 }
1103
1104 return 0;
1105
1106 err_phy_connect:
1107 phylink_destroy(dp->pl);
1108 return err;
1109 }
1110
1111 int dsa_port_link_register_of(struct dsa_port *dp)
1112 {
1113 struct dsa_switch *ds = dp->ds;
1114 struct device_node *phy_np;
1115 int port = dp->index;
1116
1117 if (!ds->ops->adjust_link) {
1118 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1119 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1120 if (ds->ops->phylink_mac_link_down)
1121 ds->ops->phylink_mac_link_down(ds, port,
1122 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1123 return dsa_port_phylink_register(dp);
1124 }
1125 return 0;
1126 }
1127
1128 dev_warn(ds->dev,
1129 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1130
1131 if (of_phy_is_fixed_link(dp->dn))
1132 return dsa_port_fixed_link_register_of(dp);
1133 else
1134 return dsa_port_setup_phy_of(dp, true);
1135 }
1136
1137 void dsa_port_link_unregister_of(struct dsa_port *dp)
1138 {
1139 struct dsa_switch *ds = dp->ds;
1140
1141 if (!ds->ops->adjust_link && dp->pl) {
1142 rtnl_lock();
1143 phylink_disconnect_phy(dp->pl);
1144 rtnl_unlock();
1145 phylink_destroy(dp->pl);
1146 dp->pl = NULL;
1147 return;
1148 }
1149
1150 if (of_phy_is_fixed_link(dp->dn))
1151 of_phy_deregister_fixed_link(dp->dn);
1152 else
1153 dsa_port_setup_phy_of(dp, false);
1154 }
1155
1156 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1157 {
1158 struct phy_device *phydev;
1159 int ret = -EOPNOTSUPP;
1160
1161 if (of_phy_is_fixed_link(dp->dn))
1162 return ret;
1163
1164 phydev = dsa_port_get_phy_device(dp);
1165 if (IS_ERR_OR_NULL(phydev))
1166 return ret;
1167
1168 ret = phy_ethtool_get_strings(phydev, data);
1169 put_device(&phydev->mdio.dev);
1170
1171 return ret;
1172 }
1173 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1174
1175 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1176 {
1177 struct phy_device *phydev;
1178 int ret = -EOPNOTSUPP;
1179
1180 if (of_phy_is_fixed_link(dp->dn))
1181 return ret;
1182
1183 phydev = dsa_port_get_phy_device(dp);
1184 if (IS_ERR_OR_NULL(phydev))
1185 return ret;
1186
1187 ret = phy_ethtool_get_stats(phydev, NULL, data);
1188 put_device(&phydev->mdio.dev);
1189
1190 return ret;
1191 }
1192 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1193
1194 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1195 {
1196 struct phy_device *phydev;
1197 int ret = -EOPNOTSUPP;
1198
1199 if (of_phy_is_fixed_link(dp->dn))
1200 return ret;
1201
1202 phydev = dsa_port_get_phy_device(dp);
1203 if (IS_ERR_OR_NULL(phydev))
1204 return ret;
1205
1206 ret = phy_ethtool_get_sset_count(phydev);
1207 put_device(&phydev->mdio.dev);
1208
1209 return ret;
1210 }
1211 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1212
1213 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1214 {
1215 struct dsa_notifier_hsr_info info = {
1216 .sw_index = dp->ds->index,
1217 .port = dp->index,
1218 .hsr = hsr,
1219 };
1220 int err;
1221
1222 dp->hsr_dev = hsr;
1223
1224 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1225 if (err)
1226 dp->hsr_dev = NULL;
1227
1228 return err;
1229 }
1230
1231 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1232 {
1233 struct dsa_notifier_hsr_info info = {
1234 .sw_index = dp->ds->index,
1235 .port = dp->index,
1236 .hsr = hsr,
1237 };
1238 int err;
1239
1240 dp->hsr_dev = NULL;
1241
1242 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1243 if (err)
1244 pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1245 }
1246
1247 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid)
1248 {
1249 struct dsa_notifier_tag_8021q_vlan_info info = {
1250 .tree_index = dp->ds->dst->index,
1251 .sw_index = dp->ds->index,
1252 .port = dp->index,
1253 .vid = vid,
1254 };
1255
1256 return dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, &info);
1257 }
1258
1259 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid)
1260 {
1261 struct dsa_notifier_tag_8021q_vlan_info info = {
1262 .tree_index = dp->ds->dst->index,
1263 .sw_index = dp->ds->index,
1264 .port = dp->index,
1265 .vid = vid,
1266 };
1267 int err;
1268
1269 err = dsa_broadcast(DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, &info);
1270 if (err)
1271 pr_err("DSA: failed to notify tag_8021q VLAN deletion: %pe\n",
1272 ERR_PTR(err));
1273 }