]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/dsa/port.c
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-jammy-kernel.git] / net / dsa / port.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handling of a single switch port
4 *
5 * Copyright (c) 2017 Savoir-faire Linux Inc.
6 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
7 */
8
9 #include <linux/if_bridge.h>
10 #include <linux/notifier.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13
14 #include "dsa_priv.h"
15
16 /**
17 * dsa_port_notify - Notify the switching fabric of changes to a port
18 * @dp: port on which change occurred
19 * @e: event, must be of type DSA_NOTIFIER_*
20 * @v: event-specific value.
21 *
22 * Notify all switches in the DSA tree that this port's switch belongs to,
23 * including this switch itself, of an event. Allows the other switches to
24 * reconfigure themselves for cross-chip operations. Can also be used to
25 * reconfigure ports without net_devices (CPU ports, DSA links) whenever
26 * a user port's state changes.
27 */
28 static int dsa_port_notify(const struct dsa_port *dp, unsigned long e, void *v)
29 {
30 return dsa_tree_notify(dp->ds->dst, e, v);
31 }
32
33 int dsa_port_set_state(struct dsa_port *dp, u8 state)
34 {
35 struct dsa_switch *ds = dp->ds;
36 int port = dp->index;
37
38 if (!ds->ops->port_stp_state_set)
39 return -EOPNOTSUPP;
40
41 ds->ops->port_stp_state_set(ds, port, state);
42
43 if (ds->ops->port_fast_age) {
44 /* Fast age FDB entries or flush appropriate forwarding database
45 * for the given port, if we are moving it from Learning or
46 * Forwarding state, to Disabled or Blocking or Listening state.
47 */
48
49 if ((dp->stp_state == BR_STATE_LEARNING ||
50 dp->stp_state == BR_STATE_FORWARDING) &&
51 (state == BR_STATE_DISABLED ||
52 state == BR_STATE_BLOCKING ||
53 state == BR_STATE_LISTENING))
54 ds->ops->port_fast_age(ds, port);
55 }
56
57 dp->stp_state = state;
58
59 return 0;
60 }
61
62 static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
63 {
64 int err;
65
66 err = dsa_port_set_state(dp, state);
67 if (err)
68 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
69 }
70
71 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
72 {
73 struct dsa_switch *ds = dp->ds;
74 int port = dp->index;
75 int err;
76
77 if (ds->ops->port_enable) {
78 err = ds->ops->port_enable(ds, port, phy);
79 if (err)
80 return err;
81 }
82
83 if (!dp->bridge_dev)
84 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
85
86 if (dp->pl)
87 phylink_start(dp->pl);
88
89 return 0;
90 }
91
92 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
93 {
94 int err;
95
96 rtnl_lock();
97 err = dsa_port_enable_rt(dp, phy);
98 rtnl_unlock();
99
100 return err;
101 }
102
103 void dsa_port_disable_rt(struct dsa_port *dp)
104 {
105 struct dsa_switch *ds = dp->ds;
106 int port = dp->index;
107
108 if (dp->pl)
109 phylink_stop(dp->pl);
110
111 if (!dp->bridge_dev)
112 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
113
114 if (ds->ops->port_disable)
115 ds->ops->port_disable(ds, port);
116 }
117
118 void dsa_port_disable(struct dsa_port *dp)
119 {
120 rtnl_lock();
121 dsa_port_disable_rt(dp);
122 rtnl_unlock();
123 }
124
125 static int dsa_port_inherit_brport_flags(struct dsa_port *dp,
126 struct netlink_ext_ack *extack)
127 {
128 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
129 BR_BCAST_FLOOD;
130 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
131 int flag, err;
132
133 for_each_set_bit(flag, &mask, 32) {
134 struct switchdev_brport_flags flags = {0};
135
136 flags.mask = BIT(flag);
137
138 if (br_port_flag_is_set(brport_dev, BIT(flag)))
139 flags.val = BIT(flag);
140
141 err = dsa_port_bridge_flags(dp, flags, extack);
142 if (err && err != -EOPNOTSUPP)
143 return err;
144 }
145
146 return 0;
147 }
148
149 static void dsa_port_clear_brport_flags(struct dsa_port *dp)
150 {
151 const unsigned long val = BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
152 const unsigned long mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
153 BR_BCAST_FLOOD;
154 int flag, err;
155
156 for_each_set_bit(flag, &mask, 32) {
157 struct switchdev_brport_flags flags = {0};
158
159 flags.mask = BIT(flag);
160 flags.val = val & BIT(flag);
161
162 err = dsa_port_bridge_flags(dp, flags, NULL);
163 if (err && err != -EOPNOTSUPP)
164 dev_err(dp->ds->dev,
165 "failed to clear bridge port flag %lu: %pe\n",
166 flags.val, ERR_PTR(err));
167 }
168 }
169
170 static int dsa_port_switchdev_sync(struct dsa_port *dp,
171 struct netlink_ext_ack *extack)
172 {
173 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
174 struct net_device *br = dp->bridge_dev;
175 int err;
176
177 err = dsa_port_inherit_brport_flags(dp, extack);
178 if (err)
179 return err;
180
181 err = dsa_port_set_state(dp, br_port_get_stp_state(brport_dev));
182 if (err && err != -EOPNOTSUPP)
183 return err;
184
185 err = dsa_port_vlan_filtering(dp, br_vlan_enabled(br), extack);
186 if (err && err != -EOPNOTSUPP)
187 return err;
188
189 err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
190 if (err && err != -EOPNOTSUPP)
191 return err;
192
193 err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
194 if (err && err != -EOPNOTSUPP)
195 return err;
196
197 err = br_mdb_replay(br, brport_dev, dp, true,
198 &dsa_slave_switchdev_blocking_notifier, extack);
199 if (err && err != -EOPNOTSUPP)
200 return err;
201
202 /* Forwarding and termination FDB entries on the port */
203 err = br_fdb_replay(br, brport_dev, dp, true,
204 &dsa_slave_switchdev_notifier);
205 if (err && err != -EOPNOTSUPP)
206 return err;
207
208 /* Termination FDB entries on the bridge itself */
209 err = br_fdb_replay(br, br, dp, true, &dsa_slave_switchdev_notifier);
210 if (err && err != -EOPNOTSUPP)
211 return err;
212
213 err = br_vlan_replay(br, brport_dev, dp, true,
214 &dsa_slave_switchdev_blocking_notifier, extack);
215 if (err && err != -EOPNOTSUPP)
216 return err;
217
218 return 0;
219 }
220
221 static int dsa_port_switchdev_unsync_objs(struct dsa_port *dp,
222 struct net_device *br,
223 struct netlink_ext_ack *extack)
224 {
225 struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
226 int err;
227
228 /* Delete the switchdev objects left on this port */
229 err = br_mdb_replay(br, brport_dev, dp, false,
230 &dsa_slave_switchdev_blocking_notifier, extack);
231 if (err && err != -EOPNOTSUPP)
232 return err;
233
234 /* Forwarding and termination FDB entries on the port */
235 err = br_fdb_replay(br, brport_dev, dp, false,
236 &dsa_slave_switchdev_notifier);
237 if (err && err != -EOPNOTSUPP)
238 return err;
239
240 /* Termination FDB entries on the bridge itself */
241 err = br_fdb_replay(br, br, dp, false, &dsa_slave_switchdev_notifier);
242 if (err && err != -EOPNOTSUPP)
243 return err;
244
245 err = br_vlan_replay(br, brport_dev, dp, false,
246 &dsa_slave_switchdev_blocking_notifier, extack);
247 if (err && err != -EOPNOTSUPP)
248 return err;
249
250 return 0;
251 }
252
253 static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
254 {
255 /* Configure the port for standalone mode (no address learning,
256 * flood everything).
257 * The bridge only emits SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS events
258 * when the user requests it through netlink or sysfs, but not
259 * automatically at port join or leave, so we need to handle resetting
260 * the brport flags ourselves. But we even prefer it that way, because
261 * otherwise, some setups might never get the notification they need,
262 * for example, when a port leaves a LAG that offloads the bridge,
263 * it becomes standalone, but as far as the bridge is concerned, no
264 * port ever left.
265 */
266 dsa_port_clear_brport_flags(dp);
267
268 /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
269 * so allow it to be in BR_STATE_FORWARDING to be kept functional
270 */
271 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
272
273 /* VLAN filtering is handled by dsa_switch_bridge_leave */
274
275 /* Some drivers treat the notification for having a local multicast
276 * router by allowing multicast to be flooded to the CPU, so we should
277 * allow this in standalone mode too.
278 */
279 dsa_port_mrouter(dp->cpu_dp, true, NULL);
280
281 /* Ageing time may be global to the switch chip, so don't change it
282 * here because we have no good reason (or value) to change it to.
283 */
284 }
285
286 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
287 struct netlink_ext_ack *extack)
288 {
289 struct dsa_notifier_bridge_info info = {
290 .tree_index = dp->ds->dst->index,
291 .sw_index = dp->ds->index,
292 .port = dp->index,
293 .br = br,
294 };
295 int err;
296
297 /* Here the interface is already bridged. Reflect the current
298 * configuration so that drivers can program their chips accordingly.
299 */
300 dp->bridge_dev = br;
301
302 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
303 if (err)
304 goto out_rollback;
305
306 err = dsa_port_switchdev_sync(dp, extack);
307 if (err)
308 goto out_rollback_unbridge;
309
310 return 0;
311
312 out_rollback_unbridge:
313 dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
314 out_rollback:
315 dp->bridge_dev = NULL;
316 return err;
317 }
318
319 int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
320 struct netlink_ext_ack *extack)
321 {
322 return dsa_port_switchdev_unsync_objs(dp, br, extack);
323 }
324
325 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
326 {
327 struct dsa_notifier_bridge_info info = {
328 .tree_index = dp->ds->dst->index,
329 .sw_index = dp->ds->index,
330 .port = dp->index,
331 .br = br,
332 };
333 int err;
334
335 /* Here the port is already unbridged. Reflect the current configuration
336 * so that drivers can program their chips accordingly.
337 */
338 dp->bridge_dev = NULL;
339
340 err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
341 if (err)
342 pr_err("DSA: failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
343
344 dsa_port_switchdev_unsync_attrs(dp);
345 }
346
347 int dsa_port_lag_change(struct dsa_port *dp,
348 struct netdev_lag_lower_state_info *linfo)
349 {
350 struct dsa_notifier_lag_info info = {
351 .sw_index = dp->ds->index,
352 .port = dp->index,
353 };
354 bool tx_enabled;
355
356 if (!dp->lag_dev)
357 return 0;
358
359 /* On statically configured aggregates (e.g. loadbalance
360 * without LACP) ports will always be tx_enabled, even if the
361 * link is down. Thus we require both link_up and tx_enabled
362 * in order to include it in the tx set.
363 */
364 tx_enabled = linfo->link_up && linfo->tx_enabled;
365
366 if (tx_enabled == dp->lag_tx_enabled)
367 return 0;
368
369 dp->lag_tx_enabled = tx_enabled;
370
371 return dsa_port_notify(dp, DSA_NOTIFIER_LAG_CHANGE, &info);
372 }
373
374 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag,
375 struct netdev_lag_upper_info *uinfo,
376 struct netlink_ext_ack *extack)
377 {
378 struct dsa_notifier_lag_info info = {
379 .sw_index = dp->ds->index,
380 .port = dp->index,
381 .lag = lag,
382 .info = uinfo,
383 };
384 struct net_device *bridge_dev;
385 int err;
386
387 dsa_lag_map(dp->ds->dst, lag);
388 dp->lag_dev = lag;
389
390 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_JOIN, &info);
391 if (err)
392 goto err_lag_join;
393
394 bridge_dev = netdev_master_upper_dev_get(lag);
395 if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
396 return 0;
397
398 err = dsa_port_bridge_join(dp, bridge_dev, extack);
399 if (err)
400 goto err_bridge_join;
401
402 return 0;
403
404 err_bridge_join:
405 dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
406 err_lag_join:
407 dp->lag_dev = NULL;
408 dsa_lag_unmap(dp->ds->dst, lag);
409 return err;
410 }
411
412 int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag,
413 struct netlink_ext_ack *extack)
414 {
415 if (dp->bridge_dev)
416 return dsa_port_pre_bridge_leave(dp, dp->bridge_dev, extack);
417
418 return 0;
419 }
420
421 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
422 {
423 struct dsa_notifier_lag_info info = {
424 .sw_index = dp->ds->index,
425 .port = dp->index,
426 .lag = lag,
427 };
428 int err;
429
430 if (!dp->lag_dev)
431 return;
432
433 /* Port might have been part of a LAG that in turn was
434 * attached to a bridge.
435 */
436 if (dp->bridge_dev)
437 dsa_port_bridge_leave(dp, dp->bridge_dev);
438
439 dp->lag_tx_enabled = false;
440 dp->lag_dev = NULL;
441
442 err = dsa_port_notify(dp, DSA_NOTIFIER_LAG_LEAVE, &info);
443 if (err)
444 pr_err("DSA: failed to notify DSA_NOTIFIER_LAG_LEAVE: %d\n",
445 err);
446
447 dsa_lag_unmap(dp->ds->dst, lag);
448 }
449
450 /* Must be called under rcu_read_lock() */
451 static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
452 bool vlan_filtering,
453 struct netlink_ext_ack *extack)
454 {
455 struct dsa_switch *ds = dp->ds;
456 int err, i;
457
458 /* VLAN awareness was off, so the question is "can we turn it on".
459 * We may have had 8021q uppers, those need to go. Make sure we don't
460 * enter an inconsistent state: deny changing the VLAN awareness state
461 * as long as we have 8021q uppers.
462 */
463 if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
464 struct net_device *upper_dev, *slave = dp->slave;
465 struct net_device *br = dp->bridge_dev;
466 struct list_head *iter;
467
468 netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
469 struct bridge_vlan_info br_info;
470 u16 vid;
471
472 if (!is_vlan_dev(upper_dev))
473 continue;
474
475 vid = vlan_dev_vlan_id(upper_dev);
476
477 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
478 * device, respectively the VID is not found, returning
479 * 0 means success, which is a failure for us here.
480 */
481 err = br_vlan_get_info(br, vid, &br_info);
482 if (err == 0) {
483 NL_SET_ERR_MSG_MOD(extack,
484 "Must first remove VLAN uppers having VIDs also present in bridge");
485 return false;
486 }
487 }
488 }
489
490 if (!ds->vlan_filtering_is_global)
491 return true;
492
493 /* For cases where enabling/disabling VLAN awareness is global to the
494 * switch, we need to handle the case where multiple bridges span
495 * different ports of the same switch device and one of them has a
496 * different setting than what is being requested.
497 */
498 for (i = 0; i < ds->num_ports; i++) {
499 struct net_device *other_bridge;
500
501 other_bridge = dsa_to_port(ds, i)->bridge_dev;
502 if (!other_bridge)
503 continue;
504 /* If it's the same bridge, it also has same
505 * vlan_filtering setting => no need to check
506 */
507 if (other_bridge == dp->bridge_dev)
508 continue;
509 if (br_vlan_enabled(other_bridge) != vlan_filtering) {
510 NL_SET_ERR_MSG_MOD(extack,
511 "VLAN filtering is a global setting");
512 return false;
513 }
514 }
515 return true;
516 }
517
518 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
519 struct netlink_ext_ack *extack)
520 {
521 struct dsa_switch *ds = dp->ds;
522 bool apply;
523 int err;
524
525 if (!ds->ops->port_vlan_filtering)
526 return -EOPNOTSUPP;
527
528 /* We are called from dsa_slave_switchdev_blocking_event(),
529 * which is not under rcu_read_lock(), unlike
530 * dsa_slave_switchdev_event().
531 */
532 rcu_read_lock();
533 apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack);
534 rcu_read_unlock();
535 if (!apply)
536 return -EINVAL;
537
538 if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
539 return 0;
540
541 err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
542 extack);
543 if (err)
544 return err;
545
546 if (ds->vlan_filtering_is_global)
547 ds->vlan_filtering = vlan_filtering;
548 else
549 dp->vlan_filtering = vlan_filtering;
550
551 return 0;
552 }
553
554 /* This enforces legacy behavior for switch drivers which assume they can't
555 * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0
556 */
557 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
558 {
559 struct dsa_switch *ds = dp->ds;
560
561 if (!dp->bridge_dev)
562 return false;
563
564 return (!ds->configure_vlan_while_not_filtering &&
565 !br_vlan_enabled(dp->bridge_dev));
566 }
567
568 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
569 {
570 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock);
571 unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
572 struct dsa_notifier_ageing_time_info info;
573 int err;
574
575 info.ageing_time = ageing_time;
576
577 err = dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info);
578 if (err)
579 return err;
580
581 dp->ageing_time = ageing_time;
582
583 return 0;
584 }
585
586 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
587 struct switchdev_brport_flags flags,
588 struct netlink_ext_ack *extack)
589 {
590 struct dsa_switch *ds = dp->ds;
591
592 if (!ds->ops->port_pre_bridge_flags)
593 return -EINVAL;
594
595 return ds->ops->port_pre_bridge_flags(ds, dp->index, flags, extack);
596 }
597
598 int dsa_port_bridge_flags(const struct dsa_port *dp,
599 struct switchdev_brport_flags flags,
600 struct netlink_ext_ack *extack)
601 {
602 struct dsa_switch *ds = dp->ds;
603
604 if (!ds->ops->port_bridge_flags)
605 return -EOPNOTSUPP;
606
607 return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
608 }
609
610 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
611 struct netlink_ext_ack *extack)
612 {
613 struct dsa_switch *ds = dp->ds;
614
615 if (!ds->ops->port_set_mrouter)
616 return -EOPNOTSUPP;
617
618 return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
619 }
620
621 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
622 bool targeted_match)
623 {
624 struct dsa_notifier_mtu_info info = {
625 .sw_index = dp->ds->index,
626 .targeted_match = targeted_match,
627 .port = dp->index,
628 .mtu = new_mtu,
629 };
630
631 return dsa_port_notify(dp, DSA_NOTIFIER_MTU, &info);
632 }
633
634 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
635 u16 vid)
636 {
637 struct dsa_notifier_fdb_info info = {
638 .sw_index = dp->ds->index,
639 .port = dp->index,
640 .addr = addr,
641 .vid = vid,
642 };
643
644 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info);
645 }
646
647 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
648 u16 vid)
649 {
650 struct dsa_notifier_fdb_info info = {
651 .sw_index = dp->ds->index,
652 .port = dp->index,
653 .addr = addr,
654 .vid = vid,
655
656 };
657
658 return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info);
659 }
660
661 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
662 u16 vid)
663 {
664 struct dsa_notifier_fdb_info info = {
665 .sw_index = dp->ds->index,
666 .port = dp->index,
667 .addr = addr,
668 .vid = vid,
669 };
670 struct dsa_port *cpu_dp = dp->cpu_dp;
671 int err;
672
673 err = dev_uc_add(cpu_dp->master, addr);
674 if (err)
675 return err;
676
677 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
678 }
679
680 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
681 u16 vid)
682 {
683 struct dsa_notifier_fdb_info info = {
684 .sw_index = dp->ds->index,
685 .port = dp->index,
686 .addr = addr,
687 .vid = vid,
688 };
689 struct dsa_port *cpu_dp = dp->cpu_dp;
690 int err;
691
692 err = dev_uc_del(cpu_dp->master, addr);
693 if (err)
694 return err;
695
696 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
697 }
698
699 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data)
700 {
701 struct dsa_switch *ds = dp->ds;
702 int port = dp->index;
703
704 if (!ds->ops->port_fdb_dump)
705 return -EOPNOTSUPP;
706
707 return ds->ops->port_fdb_dump(ds, port, cb, data);
708 }
709
710 int dsa_port_mdb_add(const struct dsa_port *dp,
711 const struct switchdev_obj_port_mdb *mdb)
712 {
713 struct dsa_notifier_mdb_info info = {
714 .sw_index = dp->ds->index,
715 .port = dp->index,
716 .mdb = mdb,
717 };
718
719 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_ADD, &info);
720 }
721
722 int dsa_port_mdb_del(const struct dsa_port *dp,
723 const struct switchdev_obj_port_mdb *mdb)
724 {
725 struct dsa_notifier_mdb_info info = {
726 .sw_index = dp->ds->index,
727 .port = dp->index,
728 .mdb = mdb,
729 };
730
731 return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info);
732 }
733
734 int dsa_port_host_mdb_add(const struct dsa_port *dp,
735 const struct switchdev_obj_port_mdb *mdb)
736 {
737 struct dsa_notifier_mdb_info info = {
738 .sw_index = dp->ds->index,
739 .port = dp->index,
740 .mdb = mdb,
741 };
742 struct dsa_port *cpu_dp = dp->cpu_dp;
743 int err;
744
745 err = dev_mc_add(cpu_dp->master, mdb->addr);
746 if (err)
747 return err;
748
749 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_ADD, &info);
750 }
751
752 int dsa_port_host_mdb_del(const struct dsa_port *dp,
753 const struct switchdev_obj_port_mdb *mdb)
754 {
755 struct dsa_notifier_mdb_info info = {
756 .sw_index = dp->ds->index,
757 .port = dp->index,
758 .mdb = mdb,
759 };
760 struct dsa_port *cpu_dp = dp->cpu_dp;
761 int err;
762
763 err = dev_mc_del(cpu_dp->master, mdb->addr);
764 if (err)
765 return err;
766
767 return dsa_port_notify(dp, DSA_NOTIFIER_HOST_MDB_DEL, &info);
768 }
769
770 int dsa_port_vlan_add(struct dsa_port *dp,
771 const struct switchdev_obj_port_vlan *vlan,
772 struct netlink_ext_ack *extack)
773 {
774 struct dsa_notifier_vlan_info info = {
775 .sw_index = dp->ds->index,
776 .port = dp->index,
777 .vlan = vlan,
778 .extack = extack,
779 };
780
781 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
782 }
783
784 int dsa_port_vlan_del(struct dsa_port *dp,
785 const struct switchdev_obj_port_vlan *vlan)
786 {
787 struct dsa_notifier_vlan_info info = {
788 .sw_index = dp->ds->index,
789 .port = dp->index,
790 .vlan = vlan,
791 };
792
793 return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
794 }
795
796 int dsa_port_mrp_add(const struct dsa_port *dp,
797 const struct switchdev_obj_mrp *mrp)
798 {
799 struct dsa_notifier_mrp_info info = {
800 .sw_index = dp->ds->index,
801 .port = dp->index,
802 .mrp = mrp,
803 };
804
805 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
806 }
807
808 int dsa_port_mrp_del(const struct dsa_port *dp,
809 const struct switchdev_obj_mrp *mrp)
810 {
811 struct dsa_notifier_mrp_info info = {
812 .sw_index = dp->ds->index,
813 .port = dp->index,
814 .mrp = mrp,
815 };
816
817 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
818 }
819
820 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
821 const struct switchdev_obj_ring_role_mrp *mrp)
822 {
823 struct dsa_notifier_mrp_ring_role_info info = {
824 .sw_index = dp->ds->index,
825 .port = dp->index,
826 .mrp = mrp,
827 };
828
829 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
830 }
831
832 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
833 const struct switchdev_obj_ring_role_mrp *mrp)
834 {
835 struct dsa_notifier_mrp_ring_role_info info = {
836 .sw_index = dp->ds->index,
837 .port = dp->index,
838 .mrp = mrp,
839 };
840
841 return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
842 }
843
844 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
845 const struct dsa_device_ops *tag_ops)
846 {
847 cpu_dp->filter = tag_ops->filter;
848 cpu_dp->rcv = tag_ops->rcv;
849 cpu_dp->tag_ops = tag_ops;
850 }
851
852 static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
853 {
854 struct device_node *phy_dn;
855 struct phy_device *phydev;
856
857 phy_dn = of_parse_phandle(dp->dn, "phy-handle", 0);
858 if (!phy_dn)
859 return NULL;
860
861 phydev = of_phy_find_device(phy_dn);
862 if (!phydev) {
863 of_node_put(phy_dn);
864 return ERR_PTR(-EPROBE_DEFER);
865 }
866
867 of_node_put(phy_dn);
868 return phydev;
869 }
870
871 static void dsa_port_phylink_validate(struct phylink_config *config,
872 unsigned long *supported,
873 struct phylink_link_state *state)
874 {
875 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
876 struct dsa_switch *ds = dp->ds;
877
878 if (!ds->ops->phylink_validate)
879 return;
880
881 ds->ops->phylink_validate(ds, dp->index, supported, state);
882 }
883
884 static void dsa_port_phylink_mac_pcs_get_state(struct phylink_config *config,
885 struct phylink_link_state *state)
886 {
887 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
888 struct dsa_switch *ds = dp->ds;
889 int err;
890
891 /* Only called for inband modes */
892 if (!ds->ops->phylink_mac_link_state) {
893 state->link = 0;
894 return;
895 }
896
897 err = ds->ops->phylink_mac_link_state(ds, dp->index, state);
898 if (err < 0) {
899 dev_err(ds->dev, "p%d: phylink_mac_link_state() failed: %d\n",
900 dp->index, err);
901 state->link = 0;
902 }
903 }
904
905 static void dsa_port_phylink_mac_config(struct phylink_config *config,
906 unsigned int mode,
907 const struct phylink_link_state *state)
908 {
909 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
910 struct dsa_switch *ds = dp->ds;
911
912 if (!ds->ops->phylink_mac_config)
913 return;
914
915 ds->ops->phylink_mac_config(ds, dp->index, mode, state);
916 }
917
918 static void dsa_port_phylink_mac_an_restart(struct phylink_config *config)
919 {
920 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
921 struct dsa_switch *ds = dp->ds;
922
923 if (!ds->ops->phylink_mac_an_restart)
924 return;
925
926 ds->ops->phylink_mac_an_restart(ds, dp->index);
927 }
928
929 static void dsa_port_phylink_mac_link_down(struct phylink_config *config,
930 unsigned int mode,
931 phy_interface_t interface)
932 {
933 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
934 struct phy_device *phydev = NULL;
935 struct dsa_switch *ds = dp->ds;
936
937 if (dsa_is_user_port(ds, dp->index))
938 phydev = dp->slave->phydev;
939
940 if (!ds->ops->phylink_mac_link_down) {
941 if (ds->ops->adjust_link && phydev)
942 ds->ops->adjust_link(ds, dp->index, phydev);
943 return;
944 }
945
946 ds->ops->phylink_mac_link_down(ds, dp->index, mode, interface);
947 }
948
949 static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
950 struct phy_device *phydev,
951 unsigned int mode,
952 phy_interface_t interface,
953 int speed, int duplex,
954 bool tx_pause, bool rx_pause)
955 {
956 struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
957 struct dsa_switch *ds = dp->ds;
958
959 if (!ds->ops->phylink_mac_link_up) {
960 if (ds->ops->adjust_link && phydev)
961 ds->ops->adjust_link(ds, dp->index, phydev);
962 return;
963 }
964
965 ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
966 speed, duplex, tx_pause, rx_pause);
967 }
968
969 const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
970 .validate = dsa_port_phylink_validate,
971 .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
972 .mac_config = dsa_port_phylink_mac_config,
973 .mac_an_restart = dsa_port_phylink_mac_an_restart,
974 .mac_link_down = dsa_port_phylink_mac_link_down,
975 .mac_link_up = dsa_port_phylink_mac_link_up,
976 };
977
978 static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
979 {
980 struct dsa_switch *ds = dp->ds;
981 struct phy_device *phydev;
982 int port = dp->index;
983 int err = 0;
984
985 phydev = dsa_port_get_phy_device(dp);
986 if (!phydev)
987 return 0;
988
989 if (IS_ERR(phydev))
990 return PTR_ERR(phydev);
991
992 if (enable) {
993 err = genphy_resume(phydev);
994 if (err < 0)
995 goto err_put_dev;
996
997 err = genphy_read_status(phydev);
998 if (err < 0)
999 goto err_put_dev;
1000 } else {
1001 err = genphy_suspend(phydev);
1002 if (err < 0)
1003 goto err_put_dev;
1004 }
1005
1006 if (ds->ops->adjust_link)
1007 ds->ops->adjust_link(ds, port, phydev);
1008
1009 dev_dbg(ds->dev, "enabled port's phy: %s", phydev_name(phydev));
1010
1011 err_put_dev:
1012 put_device(&phydev->mdio.dev);
1013 return err;
1014 }
1015
1016 static int dsa_port_fixed_link_register_of(struct dsa_port *dp)
1017 {
1018 struct device_node *dn = dp->dn;
1019 struct dsa_switch *ds = dp->ds;
1020 struct phy_device *phydev;
1021 int port = dp->index;
1022 phy_interface_t mode;
1023 int err;
1024
1025 err = of_phy_register_fixed_link(dn);
1026 if (err) {
1027 dev_err(ds->dev,
1028 "failed to register the fixed PHY of port %d\n",
1029 port);
1030 return err;
1031 }
1032
1033 phydev = of_phy_find_device(dn);
1034
1035 err = of_get_phy_mode(dn, &mode);
1036 if (err)
1037 mode = PHY_INTERFACE_MODE_NA;
1038 phydev->interface = mode;
1039
1040 genphy_read_status(phydev);
1041
1042 if (ds->ops->adjust_link)
1043 ds->ops->adjust_link(ds, port, phydev);
1044
1045 put_device(&phydev->mdio.dev);
1046
1047 return 0;
1048 }
1049
1050 static int dsa_port_phylink_register(struct dsa_port *dp)
1051 {
1052 struct dsa_switch *ds = dp->ds;
1053 struct device_node *port_dn = dp->dn;
1054 phy_interface_t mode;
1055 int err;
1056
1057 err = of_get_phy_mode(port_dn, &mode);
1058 if (err)
1059 mode = PHY_INTERFACE_MODE_NA;
1060
1061 dp->pl_config.dev = ds->dev;
1062 dp->pl_config.type = PHYLINK_DEV;
1063 dp->pl_config.pcs_poll = ds->pcs_poll;
1064
1065 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
1066 mode, &dsa_port_phylink_mac_ops);
1067 if (IS_ERR(dp->pl)) {
1068 pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1069 return PTR_ERR(dp->pl);
1070 }
1071
1072 err = phylink_of_phy_connect(dp->pl, port_dn, 0);
1073 if (err && err != -ENODEV) {
1074 pr_err("could not attach to PHY: %d\n", err);
1075 goto err_phy_connect;
1076 }
1077
1078 return 0;
1079
1080 err_phy_connect:
1081 phylink_destroy(dp->pl);
1082 return err;
1083 }
1084
1085 int dsa_port_link_register_of(struct dsa_port *dp)
1086 {
1087 struct dsa_switch *ds = dp->ds;
1088 struct device_node *phy_np;
1089 int port = dp->index;
1090
1091 if (!ds->ops->adjust_link) {
1092 phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
1093 if (of_phy_is_fixed_link(dp->dn) || phy_np) {
1094 if (ds->ops->phylink_mac_link_down)
1095 ds->ops->phylink_mac_link_down(ds, port,
1096 MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
1097 return dsa_port_phylink_register(dp);
1098 }
1099 return 0;
1100 }
1101
1102 dev_warn(ds->dev,
1103 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
1104
1105 if (of_phy_is_fixed_link(dp->dn))
1106 return dsa_port_fixed_link_register_of(dp);
1107 else
1108 return dsa_port_setup_phy_of(dp, true);
1109 }
1110
1111 void dsa_port_link_unregister_of(struct dsa_port *dp)
1112 {
1113 struct dsa_switch *ds = dp->ds;
1114
1115 if (!ds->ops->adjust_link && dp->pl) {
1116 rtnl_lock();
1117 phylink_disconnect_phy(dp->pl);
1118 rtnl_unlock();
1119 phylink_destroy(dp->pl);
1120 dp->pl = NULL;
1121 return;
1122 }
1123
1124 if (of_phy_is_fixed_link(dp->dn))
1125 of_phy_deregister_fixed_link(dp->dn);
1126 else
1127 dsa_port_setup_phy_of(dp, false);
1128 }
1129
1130 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data)
1131 {
1132 struct phy_device *phydev;
1133 int ret = -EOPNOTSUPP;
1134
1135 if (of_phy_is_fixed_link(dp->dn))
1136 return ret;
1137
1138 phydev = dsa_port_get_phy_device(dp);
1139 if (IS_ERR_OR_NULL(phydev))
1140 return ret;
1141
1142 ret = phy_ethtool_get_strings(phydev, data);
1143 put_device(&phydev->mdio.dev);
1144
1145 return ret;
1146 }
1147 EXPORT_SYMBOL_GPL(dsa_port_get_phy_strings);
1148
1149 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data)
1150 {
1151 struct phy_device *phydev;
1152 int ret = -EOPNOTSUPP;
1153
1154 if (of_phy_is_fixed_link(dp->dn))
1155 return ret;
1156
1157 phydev = dsa_port_get_phy_device(dp);
1158 if (IS_ERR_OR_NULL(phydev))
1159 return ret;
1160
1161 ret = phy_ethtool_get_stats(phydev, NULL, data);
1162 put_device(&phydev->mdio.dev);
1163
1164 return ret;
1165 }
1166 EXPORT_SYMBOL_GPL(dsa_port_get_ethtool_phy_stats);
1167
1168 int dsa_port_get_phy_sset_count(struct dsa_port *dp)
1169 {
1170 struct phy_device *phydev;
1171 int ret = -EOPNOTSUPP;
1172
1173 if (of_phy_is_fixed_link(dp->dn))
1174 return ret;
1175
1176 phydev = dsa_port_get_phy_device(dp);
1177 if (IS_ERR_OR_NULL(phydev))
1178 return ret;
1179
1180 ret = phy_ethtool_get_sset_count(phydev);
1181 put_device(&phydev->mdio.dev);
1182
1183 return ret;
1184 }
1185 EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
1186
1187 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
1188 {
1189 struct dsa_notifier_hsr_info info = {
1190 .sw_index = dp->ds->index,
1191 .port = dp->index,
1192 .hsr = hsr,
1193 };
1194 int err;
1195
1196 dp->hsr_dev = hsr;
1197
1198 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
1199 if (err)
1200 dp->hsr_dev = NULL;
1201
1202 return err;
1203 }
1204
1205 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
1206 {
1207 struct dsa_notifier_hsr_info info = {
1208 .sw_index = dp->ds->index,
1209 .port = dp->index,
1210 .hsr = hsr,
1211 };
1212 int err;
1213
1214 dp->hsr_dev = NULL;
1215
1216 err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
1217 if (err)
1218 pr_err("DSA: failed to notify DSA_NOTIFIER_HSR_LEAVE\n");
1219 }