]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/dsa/slave.c
taprio: Add support for hardware offloading
[mirror_ubuntu-jammy-kernel.git] / net / dsa / slave.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
91da11f8
LB
2/*
3 * net/dsa/slave.c - Slave device handling
e84665c9 4 * Copyright (c) 2008-2009 Marvell Semiconductor
91da11f8
LB
5 */
6
7#include <linux/list.h>
df02c6ff 8#include <linux/etherdevice.h>
b73adef6 9#include <linux/netdevice.h>
91da11f8 10#include <linux/phy.h>
a2820543 11#include <linux/phy_fixed.h>
aab9c406 12#include <linux/phylink.h>
0d8bcdd3
FF
13#include <linux/of_net.h>
14#include <linux/of_mdio.h>
7f854420 15#include <linux/mdio.h>
b73adef6 16#include <net/rtnetlink.h>
f50f2127
FF
17#include <net/pkt_cls.h>
18#include <net/tc_act/tc_mirred.h>
b73adef6 19#include <linux/if_bridge.h>
04ff53f9 20#include <linux/netpoll.h>
90af1059 21#include <linux/ptp_classify.h>
ea5dd34b 22
91da11f8
LB
23#include "dsa_priv.h"
24
f3b78049 25static bool dsa_slave_dev_check(const struct net_device *dev);
f50f2127 26
91da11f8
LB
27/* slave mii_bus handling ***************************************************/
28static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
29{
30 struct dsa_switch *ds = bus->priv;
31
0d8bcdd3 32 if (ds->phys_mii_mask & (1 << addr))
9d490b4e 33 return ds->ops->phy_read(ds, addr, reg);
91da11f8
LB
34
35 return 0xffff;
36}
37
38static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
39{
40 struct dsa_switch *ds = bus->priv;
41
0d8bcdd3 42 if (ds->phys_mii_mask & (1 << addr))
9d490b4e 43 return ds->ops->phy_write(ds, addr, reg, val);
91da11f8
LB
44
45 return 0;
46}
47
48void dsa_slave_mii_bus_init(struct dsa_switch *ds)
49{
50 ds->slave_mii_bus->priv = (void *)ds;
51 ds->slave_mii_bus->name = "dsa slave smi";
52 ds->slave_mii_bus->read = dsa_slave_phy_read;
53 ds->slave_mii_bus->write = dsa_slave_phy_write;
0b7b498d 54 snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d",
49463b7f 55 ds->dst->index, ds->index);
c33063d6 56 ds->slave_mii_bus->parent = ds->dev;
24df8986 57 ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
91da11f8
LB
58}
59
60
61/* slave device handling ****************************************************/
abd2be00 62static int dsa_slave_get_iflink(const struct net_device *dev)
c0840801 63{
d0006b00 64 return dsa_slave_to_master(dev)->ifindex;
c0840801
LB
65}
66
91da11f8
LB
67static int dsa_slave_open(struct net_device *dev)
68{
d0006b00 69 struct net_device *master = dsa_slave_to_master(dev);
d945097b 70 struct dsa_port *dp = dsa_slave_to_port(dev);
df02c6ff
LB
71 int err;
72
73 if (!(master->flags & IFF_UP))
74 return -ENETDOWN;
75
8feedbb4 76 if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) {
a748ee24 77 err = dev_uc_add(master, dev->dev_addr);
df02c6ff
LB
78 if (err < 0)
79 goto out;
80 }
81
82 if (dev->flags & IFF_ALLMULTI) {
83 err = dev_set_allmulti(master, 1);
84 if (err < 0)
85 goto del_unicast;
86 }
87 if (dev->flags & IFF_PROMISC) {
88 err = dev_set_promiscuity(master, 1);
89 if (err < 0)
90 goto clear_allmulti;
91 }
92
0115dcd1 93 err = dsa_port_enable(dp, dev->phydev);
fb8a6a2b
VD
94 if (err)
95 goto clear_promisc;
b73adef6 96
aab9c406 97 phylink_start(dp->pl);
f7f1de51 98
91da11f8 99 return 0;
df02c6ff 100
b2f2af21
FF
101clear_promisc:
102 if (dev->flags & IFF_PROMISC)
4fdeddfe 103 dev_set_promiscuity(master, -1);
df02c6ff
LB
104clear_allmulti:
105 if (dev->flags & IFF_ALLMULTI)
106 dev_set_allmulti(master, -1);
107del_unicast:
8feedbb4 108 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
a748ee24 109 dev_uc_del(master, dev->dev_addr);
df02c6ff
LB
110out:
111 return err;
91da11f8
LB
112}
113
114static int dsa_slave_close(struct net_device *dev)
115{
d0006b00 116 struct net_device *master = dsa_slave_to_master(dev);
d945097b 117 struct dsa_port *dp = dsa_slave_to_port(dev);
df02c6ff 118
97a69a0d
VO
119 cancel_work_sync(&dp->xmit_work);
120 skb_queue_purge(&dp->xmit_queue);
121
aab9c406 122 phylink_stop(dp->pl);
f7f1de51 123
75104db0 124 dsa_port_disable(dp);
6457edfe 125
df02c6ff 126 dev_mc_unsync(master, dev);
a748ee24 127 dev_uc_unsync(master, dev);
df02c6ff
LB
128 if (dev->flags & IFF_ALLMULTI)
129 dev_set_allmulti(master, -1);
130 if (dev->flags & IFF_PROMISC)
131 dev_set_promiscuity(master, -1);
132
8feedbb4 133 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
a748ee24 134 dev_uc_del(master, dev->dev_addr);
df02c6ff 135
91da11f8
LB
136 return 0;
137}
138
139static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
140{
d0006b00 141 struct net_device *master = dsa_slave_to_master(dev);
17ab4f61
RG
142 if (dev->flags & IFF_UP) {
143 if (change & IFF_ALLMULTI)
144 dev_set_allmulti(master,
145 dev->flags & IFF_ALLMULTI ? 1 : -1);
146 if (change & IFF_PROMISC)
147 dev_set_promiscuity(master,
148 dev->flags & IFF_PROMISC ? 1 : -1);
149 }
91da11f8
LB
150}
151
152static void dsa_slave_set_rx_mode(struct net_device *dev)
153{
d0006b00 154 struct net_device *master = dsa_slave_to_master(dev);
91da11f8
LB
155
156 dev_mc_sync(master, dev);
a748ee24 157 dev_uc_sync(master, dev);
91da11f8
LB
158}
159
df02c6ff 160static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
91da11f8 161{
d0006b00 162 struct net_device *master = dsa_slave_to_master(dev);
df02c6ff
LB
163 struct sockaddr *addr = a;
164 int err;
165
166 if (!is_valid_ether_addr(addr->sa_data))
167 return -EADDRNOTAVAIL;
168
169 if (!(dev->flags & IFF_UP))
170 goto out;
171
8feedbb4 172 if (!ether_addr_equal(addr->sa_data, master->dev_addr)) {
a748ee24 173 err = dev_uc_add(master, addr->sa_data);
df02c6ff
LB
174 if (err < 0)
175 return err;
176 }
177
8feedbb4 178 if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
a748ee24 179 dev_uc_del(master, dev->dev_addr);
df02c6ff
LB
180
181out:
d08f161a 182 ether_addr_copy(dev->dev_addr, addr->sa_data);
91da11f8
LB
183
184 return 0;
185}
186
2bedde1a
AS
187struct dsa_slave_dump_ctx {
188 struct net_device *dev;
189 struct sk_buff *skb;
190 struct netlink_callback *cb;
191 int idx;
192};
193
194static int
195dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid,
196 bool is_static, void *data)
197{
198 struct dsa_slave_dump_ctx *dump = data;
199 u32 portid = NETLINK_CB(dump->cb->skb).portid;
200 u32 seq = dump->cb->nlh->nlmsg_seq;
201 struct nlmsghdr *nlh;
202 struct ndmsg *ndm;
203
204 if (dump->idx < dump->cb->args[2])
205 goto skip;
206
207 nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
208 sizeof(*ndm), NLM_F_MULTI);
209 if (!nlh)
210 return -EMSGSIZE;
211
212 ndm = nlmsg_data(nlh);
213 ndm->ndm_family = AF_BRIDGE;
214 ndm->ndm_pad1 = 0;
215 ndm->ndm_pad2 = 0;
216 ndm->ndm_flags = NTF_SELF;
217 ndm->ndm_type = 0;
218 ndm->ndm_ifindex = dump->dev->ifindex;
219 ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
220
221 if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
222 goto nla_put_failure;
223
224 if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
225 goto nla_put_failure;
226
227 nlmsg_end(dump->skb, nlh);
228
229skip:
230 dump->idx++;
231 return 0;
232
233nla_put_failure:
234 nlmsg_cancel(dump->skb, nlh);
235 return -EMSGSIZE;
236}
237
238static int
239dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
240 struct net_device *dev, struct net_device *filter_dev,
241 int *idx)
242{
d945097b 243 struct dsa_port *dp = dsa_slave_to_port(dev);
2bedde1a
AS
244 struct dsa_slave_dump_ctx dump = {
245 .dev = dev,
246 .skb = skb,
247 .cb = cb,
248 .idx = *idx,
249 };
2bedde1a
AS
250 int err;
251
de40fc5d 252 err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump);
2bedde1a 253 *idx = dump.idx;
de40fc5d 254
2bedde1a
AS
255 return err;
256}
257
91da11f8
LB
258static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
259{
0336369d
BS
260 struct dsa_slave_priv *p = netdev_priv(dev);
261 struct dsa_switch *ds = p->dp->ds;
262 int port = p->dp->index;
263
264 /* Pass through to switch driver if it supports timestamping */
265 switch (cmd) {
266 case SIOCGHWTSTAMP:
267 if (ds->ops->port_hwtstamp_get)
268 return ds->ops->port_hwtstamp_get(ds, port, ifr);
269 break;
270 case SIOCSHWTSTAMP:
271 if (ds->ops->port_hwtstamp_set)
272 return ds->ops->port_hwtstamp_set(ds, port, ifr);
273 break;
274 }
275
aab9c406 276 return phylink_mii_ioctl(p->dp->pl, ifr, cmd);
91da11f8
LB
277}
278
35636062 279static int dsa_slave_port_attr_set(struct net_device *dev,
f7fadf30 280 const struct switchdev_attr *attr,
7ea6eb3f 281 struct switchdev_trans *trans)
35636062 282{
d945097b 283 struct dsa_port *dp = dsa_slave_to_port(dev);
b8d866ac 284 int ret;
35636062
SF
285
286 switch (attr->id) {
1f868398 287 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
fd364541 288 ret = dsa_port_set_state(dp, attr->u.stp_state, trans);
35636062 289 break;
fb2dabad 290 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
c02c4175
VD
291 ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
292 trans);
fb2dabad 293 break;
34a79f63 294 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
072bb190 295 ret = dsa_port_ageing_time(dp, attr->u.ageing_time, trans);
34a79f63 296 break;
ea87005a
FF
297 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
298 ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags,
299 trans);
300 break;
57652796
RK
301 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
302 ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, trans);
303 break;
08cc83cc
VD
304 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
305 ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, trans);
306 break;
35636062
SF
307 default:
308 ret = -EOPNOTSUPP;
309 break;
310 }
311
312 return ret;
313}
314
bdcff080
VD
315static int dsa_slave_vlan_add(struct net_device *dev,
316 const struct switchdev_obj *obj,
317 struct switchdev_trans *trans)
318{
319 struct dsa_port *dp = dsa_slave_to_port(dev);
320 struct switchdev_obj_port_vlan vlan;
321 int err;
322
323 if (obj->orig_dev != dev)
324 return -EOPNOTSUPP;
325
c5335d73
VD
326 if (dp->bridge_dev && !br_vlan_enabled(dp->bridge_dev))
327 return 0;
328
bdcff080
VD
329 vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
330
331 err = dsa_port_vlan_add(dp, &vlan, trans);
332 if (err)
333 return err;
334
b9499904
VD
335 /* We need the dedicated CPU port to be a member of the VLAN as well.
336 * Even though drivers often handle CPU membership in special ways,
337 * it doesn't make sense to program a PVID, so clear this flag.
338 */
339 vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
340
7e1741b4
VD
341 err = dsa_port_vlan_add(dp->cpu_dp, &vlan, trans);
342 if (err)
343 return err;
344
bdcff080
VD
345 return 0;
346}
347
ba14d9eb 348static int dsa_slave_port_obj_add(struct net_device *dev,
648b4a99 349 const struct switchdev_obj *obj,
79b139f4
VD
350 struct switchdev_trans *trans,
351 struct netlink_ext_ack *extack)
ba14d9eb 352{
d945097b 353 struct dsa_port *dp = dsa_slave_to_port(dev);
ba14d9eb
VD
354 int err;
355
356 /* For the prepare phase, ensure the full set of changes is feasable in
357 * one go in order to signal a failure properly. If an operation is not
358 * supported, return -EOPNOTSUPP.
359 */
360
9e8f4a54 361 switch (obj->id) {
8df30255 362 case SWITCHDEV_OBJ_ID_PORT_MDB:
79b139f4
VD
363 if (obj->orig_dev != dev)
364 return -EOPNOTSUPP;
bcebb976 365 err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans);
8df30255 366 break;
5f4dbc50
AL
367 case SWITCHDEV_OBJ_ID_HOST_MDB:
368 /* DSA can directly translate this to a normal MDB add,
369 * but on the CPU port.
370 */
371 err = dsa_port_mdb_add(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj),
372 trans);
373 break;
57d80838 374 case SWITCHDEV_OBJ_ID_PORT_VLAN:
bdcff080 375 err = dsa_slave_vlan_add(dev, obj, trans);
11149536 376 break;
ba14d9eb
VD
377 default:
378 err = -EOPNOTSUPP;
379 break;
380 }
381
382 return err;
383}
384
bdcff080
VD
385static int dsa_slave_vlan_del(struct net_device *dev,
386 const struct switchdev_obj *obj)
387{
388 struct dsa_port *dp = dsa_slave_to_port(dev);
389
390 if (obj->orig_dev != dev)
391 return -EOPNOTSUPP;
392
c5335d73
VD
393 if (dp->bridge_dev && !br_vlan_enabled(dp->bridge_dev))
394 return 0;
395
7e1741b4
VD
396 /* Do not deprogram the CPU port as it may be shared with other user
397 * ports which can be members of this VLAN as well.
398 */
bdcff080
VD
399 return dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
400}
401
ba14d9eb 402static int dsa_slave_port_obj_del(struct net_device *dev,
648b4a99 403 const struct switchdev_obj *obj)
ba14d9eb 404{
d945097b 405 struct dsa_port *dp = dsa_slave_to_port(dev);
ba14d9eb
VD
406 int err;
407
9e8f4a54 408 switch (obj->id) {
8df30255 409 case SWITCHDEV_OBJ_ID_PORT_MDB:
79b139f4
VD
410 if (obj->orig_dev != dev)
411 return -EOPNOTSUPP;
bcebb976 412 err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
8df30255 413 break;
5f4dbc50
AL
414 case SWITCHDEV_OBJ_ID_HOST_MDB:
415 /* DSA can directly translate this to a normal MDB add,
416 * but on the CPU port.
417 */
418 err = dsa_port_mdb_del(dp->cpu_dp, SWITCHDEV_OBJ_PORT_MDB(obj));
419 break;
57d80838 420 case SWITCHDEV_OBJ_ID_PORT_VLAN:
bdcff080 421 err = dsa_slave_vlan_del(dev, obj);
11149536 422 break;
ba14d9eb
VD
423 default:
424 err = -EOPNOTSUPP;
425 break;
426 }
427
428 return err;
429}
430
929d6c14
FF
431static int dsa_slave_get_port_parent_id(struct net_device *dev,
432 struct netdev_phys_item_id *ppid)
b73adef6 433{
d945097b
VD
434 struct dsa_port *dp = dsa_slave_to_port(dev);
435 struct dsa_switch *ds = dp->ds;
a42c8e33 436 struct dsa_switch_tree *dst = ds->dst;
b73adef6 437
15b04ace
JP
438 /* For non-legacy ports, devlink is used and it takes
439 * care of the name generation. This ndo implementation
440 * should be removed with legacy support.
441 */
442 if (dp->ds->devlink)
443 return -EOPNOTSUPP;
444
929d6c14
FF
445 ppid->id_len = sizeof(dst->index);
446 memcpy(&ppid->id, &dst->index, ppid->id_len);
447
448 return 0;
449}
450
4fa7b718
VD
451static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
452 struct sk_buff *skb)
04ff53f9
FF
453{
454#ifdef CONFIG_NET_POLL_CONTROLLER
4fa7b718
VD
455 struct dsa_slave_priv *p = netdev_priv(dev);
456
04ff53f9
FF
457 if (p->netpoll)
458 netpoll_send_skb(p->netpoll, skb);
459#else
460 BUG();
461#endif
462 return NETDEV_TX_OK;
463}
464
90af1059
BS
465static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p,
466 struct sk_buff *skb)
467{
468 struct dsa_switch *ds = p->dp->ds;
469 struct sk_buff *clone;
470 unsigned int type;
471
472 type = ptp_classify_raw(skb);
473 if (type == PTP_CLASS_NONE)
474 return;
475
476 if (!ds->ops->port_txtstamp)
477 return;
478
479 clone = skb_clone_sk(skb);
480 if (!clone)
481 return;
482
146d442c
VO
483 DSA_SKB_CB(skb)->clone = clone;
484
90af1059
BS
485 if (ds->ops->port_txtstamp(ds, p->dp->index, clone, type))
486 return;
487
488 kfree_skb(clone);
489}
490
97a69a0d
VO
491netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
492{
493 /* SKB for netpoll still need to be mangled with the protocol-specific
494 * tag to be successfully transmitted
495 */
496 if (unlikely(netpoll_tx_running(dev)))
497 return dsa_slave_netpoll_send_skb(dev, skb);
498
499 /* Queue the SKB for transmission on the parent interface, but
500 * do not modify its EtherType
501 */
502 skb->dev = dsa_slave_to_master(dev);
503 dev_queue_xmit(skb);
504
505 return NETDEV_TX_OK;
506}
507EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
508
3e8a72d1
FF
509static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
510{
511 struct dsa_slave_priv *p = netdev_priv(dev);
5f6b4e14 512 struct pcpu_sw_netstats *s;
4ed70ce9 513 struct sk_buff *nskb;
3e8a72d1 514
5f6b4e14
FF
515 s = this_cpu_ptr(p->stats64);
516 u64_stats_update_begin(&s->syncp);
517 s->tx_packets++;
518 s->tx_bytes += skb->len;
519 u64_stats_update_end(&s->syncp);
3e8a72d1 520
87671375 521 DSA_SKB_CB(skb)->deferred_xmit = false;
146d442c 522 DSA_SKB_CB(skb)->clone = NULL;
87671375 523
90af1059
BS
524 /* Identify PTP protocol packets, clone them, and pass them to the
525 * switch driver
526 */
527 dsa_skb_tx_timestamp(p, skb);
528
fe47d563
VD
529 /* Transmit function may have to reallocate the original SKB,
530 * in which case it must have freed it. Only free it here on error.
531 */
4ed70ce9 532 nskb = p->xmit(skb, dev);
fe47d563 533 if (!nskb) {
97a69a0d
VO
534 if (!DSA_SKB_CB(skb)->deferred_xmit)
535 kfree_skb(skb);
4ed70ce9 536 return NETDEV_TX_OK;
fe47d563 537 }
5aed85ce 538
97a69a0d
VO
539 return dsa_enqueue_skb(nskb, dev);
540}
04ff53f9 541
97a69a0d
VO
542void *dsa_defer_xmit(struct sk_buff *skb, struct net_device *dev)
543{
544 struct dsa_port *dp = dsa_slave_to_port(dev);
5aed85ce 545
97a69a0d
VO
546 DSA_SKB_CB(skb)->deferred_xmit = true;
547
548 skb_queue_tail(&dp->xmit_queue, skb);
549 schedule_work(&dp->xmit_work);
550 return NULL;
551}
552EXPORT_SYMBOL_GPL(dsa_defer_xmit);
553
554static void dsa_port_xmit_work(struct work_struct *work)
555{
556 struct dsa_port *dp = container_of(work, struct dsa_port, xmit_work);
557 struct dsa_switch *ds = dp->ds;
558 struct sk_buff *skb;
559
560 if (unlikely(!ds->ops->port_deferred_xmit))
561 return;
562
563 while ((skb = skb_dequeue(&dp->xmit_queue)) != NULL)
564 ds->ops->port_deferred_xmit(ds, dp->index, skb);
5aed85ce
FF
565}
566
91da11f8 567/* ethtool operations *******************************************************/
91da11f8 568
91da11f8
LB
569static void dsa_slave_get_drvinfo(struct net_device *dev,
570 struct ethtool_drvinfo *drvinfo)
571{
7826d43f 572 strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
7826d43f
JP
573 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
574 strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
91da11f8
LB
575}
576
3d762a0f
GR
577static int dsa_slave_get_regs_len(struct net_device *dev)
578{
d945097b
VD
579 struct dsa_port *dp = dsa_slave_to_port(dev);
580 struct dsa_switch *ds = dp->ds;
3d762a0f 581
9d490b4e 582 if (ds->ops->get_regs_len)
d945097b 583 return ds->ops->get_regs_len(ds, dp->index);
3d762a0f
GR
584
585 return -EOPNOTSUPP;
586}
587
588static void
589dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
590{
d945097b
VD
591 struct dsa_port *dp = dsa_slave_to_port(dev);
592 struct dsa_switch *ds = dp->ds;
3d762a0f 593
9d490b4e 594 if (ds->ops->get_regs)
d945097b 595 ds->ops->get_regs(ds, dp->index, regs, _p);
3d762a0f
GR
596}
597
aab9c406
FF
598static int dsa_slave_nway_reset(struct net_device *dev)
599{
600 struct dsa_port *dp = dsa_slave_to_port(dev);
601
602 return phylink_ethtool_nway_reset(dp->pl);
603}
604
6793abb4
GR
605static int dsa_slave_get_eeprom_len(struct net_device *dev)
606{
d945097b
VD
607 struct dsa_port *dp = dsa_slave_to_port(dev);
608 struct dsa_switch *ds = dp->ds;
6793abb4 609
0e576044 610 if (ds->cd && ds->cd->eeprom_len)
ff04955c 611 return ds->cd->eeprom_len;
6793abb4 612
9d490b4e
VD
613 if (ds->ops->get_eeprom_len)
614 return ds->ops->get_eeprom_len(ds);
6793abb4
GR
615
616 return 0;
617}
618
619static int dsa_slave_get_eeprom(struct net_device *dev,
620 struct ethtool_eeprom *eeprom, u8 *data)
621{
d945097b
VD
622 struct dsa_port *dp = dsa_slave_to_port(dev);
623 struct dsa_switch *ds = dp->ds;
6793abb4 624
9d490b4e
VD
625 if (ds->ops->get_eeprom)
626 return ds->ops->get_eeprom(ds, eeprom, data);
6793abb4
GR
627
628 return -EOPNOTSUPP;
629}
630
631static int dsa_slave_set_eeprom(struct net_device *dev,
632 struct ethtool_eeprom *eeprom, u8 *data)
633{
d945097b
VD
634 struct dsa_port *dp = dsa_slave_to_port(dev);
635 struct dsa_switch *ds = dp->ds;
6793abb4 636
9d490b4e
VD
637 if (ds->ops->set_eeprom)
638 return ds->ops->set_eeprom(ds, eeprom, data);
6793abb4
GR
639
640 return -EOPNOTSUPP;
641}
642
91da11f8
LB
643static void dsa_slave_get_strings(struct net_device *dev,
644 uint32_t stringset, uint8_t *data)
645{
d945097b
VD
646 struct dsa_port *dp = dsa_slave_to_port(dev);
647 struct dsa_switch *ds = dp->ds;
91da11f8
LB
648
649 if (stringset == ETH_SS_STATS) {
650 int len = ETH_GSTRING_LEN;
651
652 strncpy(data, "tx_packets", len);
653 strncpy(data + len, "tx_bytes", len);
654 strncpy(data + 2 * len, "rx_packets", len);
655 strncpy(data + 3 * len, "rx_bytes", len);
9d490b4e 656 if (ds->ops->get_strings)
89f09048
FF
657 ds->ops->get_strings(ds, dp->index, stringset,
658 data + 4 * len);
91da11f8
LB
659 }
660}
661
662static void dsa_slave_get_ethtool_stats(struct net_device *dev,
663 struct ethtool_stats *stats,
664 uint64_t *data)
665{
d945097b 666 struct dsa_port *dp = dsa_slave_to_port(dev);
91da11f8 667 struct dsa_slave_priv *p = netdev_priv(dev);
d945097b 668 struct dsa_switch *ds = dp->ds;
5f6b4e14 669 struct pcpu_sw_netstats *s;
f613ed66 670 unsigned int start;
5f6b4e14
FF
671 int i;
672
673 for_each_possible_cpu(i) {
674 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
675
676 s = per_cpu_ptr(p->stats64, i);
677 do {
678 start = u64_stats_fetch_begin_irq(&s->syncp);
679 tx_packets = s->tx_packets;
680 tx_bytes = s->tx_bytes;
681 rx_packets = s->rx_packets;
682 rx_bytes = s->rx_bytes;
683 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
684 data[0] += tx_packets;
685 data[1] += tx_bytes;
686 data[2] += rx_packets;
687 data[3] += rx_bytes;
688 }
9d490b4e 689 if (ds->ops->get_ethtool_stats)
d945097b 690 ds->ops->get_ethtool_stats(ds, dp->index, data + 4);
91da11f8
LB
691}
692
693static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
694{
d945097b
VD
695 struct dsa_port *dp = dsa_slave_to_port(dev);
696 struct dsa_switch *ds = dp->ds;
91da11f8
LB
697
698 if (sset == ETH_SS_STATS) {
699 int count;
700
701 count = 4;
9d490b4e 702 if (ds->ops->get_sset_count)
89f09048 703 count += ds->ops->get_sset_count(ds, dp->index, sset);
91da11f8
LB
704
705 return count;
706 }
707
708 return -EOPNOTSUPP;
709}
710
19e57c4e
FF
711static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
712{
d945097b
VD
713 struct dsa_port *dp = dsa_slave_to_port(dev);
714 struct dsa_switch *ds = dp->ds;
19e57c4e 715
aab9c406
FF
716 phylink_ethtool_get_wol(dp->pl, w);
717
9d490b4e 718 if (ds->ops->get_wol)
d945097b 719 ds->ops->get_wol(ds, dp->index, w);
19e57c4e
FF
720}
721
722static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
723{
d945097b
VD
724 struct dsa_port *dp = dsa_slave_to_port(dev);
725 struct dsa_switch *ds = dp->ds;
19e57c4e
FF
726 int ret = -EOPNOTSUPP;
727
aab9c406
FF
728 phylink_ethtool_set_wol(dp->pl, w);
729
9d490b4e 730 if (ds->ops->set_wol)
d945097b 731 ret = ds->ops->set_wol(ds, dp->index, w);
19e57c4e
FF
732
733 return ret;
734}
735
7905288f
FF
736static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
737{
d945097b
VD
738 struct dsa_port *dp = dsa_slave_to_port(dev);
739 struct dsa_switch *ds = dp->ds;
7905288f
FF
740 int ret;
741
7b9cc738 742 /* Port's PHY and MAC both need to be EEE capable */
00670cb8 743 if (!dev->phydev || !dp->pl)
7b9cc738
VD
744 return -ENODEV;
745
08f50061 746 if (!ds->ops->set_mac_eee)
7905288f
FF
747 return -EOPNOTSUPP;
748
d945097b 749 ret = ds->ops->set_mac_eee(ds, dp->index, e);
7905288f
FF
750 if (ret)
751 return ret;
752
aab9c406 753 return phylink_ethtool_set_eee(dp->pl, e);
7905288f
FF
754}
755
756static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
757{
d945097b
VD
758 struct dsa_port *dp = dsa_slave_to_port(dev);
759 struct dsa_switch *ds = dp->ds;
7905288f
FF
760 int ret;
761
7b9cc738 762 /* Port's PHY and MAC both need to be EEE capable */
00670cb8 763 if (!dev->phydev || !dp->pl)
7b9cc738
VD
764 return -ENODEV;
765
08f50061 766 if (!ds->ops->get_mac_eee)
7905288f
FF
767 return -EOPNOTSUPP;
768
d945097b 769 ret = ds->ops->get_mac_eee(ds, dp->index, e);
7905288f
FF
770 if (ret)
771 return ret;
772
aab9c406
FF
773 return phylink_ethtool_get_eee(dp->pl, e);
774}
775
776static int dsa_slave_get_link_ksettings(struct net_device *dev,
777 struct ethtool_link_ksettings *cmd)
778{
779 struct dsa_port *dp = dsa_slave_to_port(dev);
780
781 return phylink_ethtool_ksettings_get(dp->pl, cmd);
782}
783
784static int dsa_slave_set_link_ksettings(struct net_device *dev,
785 const struct ethtool_link_ksettings *cmd)
786{
787 struct dsa_port *dp = dsa_slave_to_port(dev);
788
789 return phylink_ethtool_ksettings_set(dp->pl, cmd);
7905288f
FF
790}
791
04ff53f9
FF
792#ifdef CONFIG_NET_POLL_CONTROLLER
793static int dsa_slave_netpoll_setup(struct net_device *dev,
794 struct netpoll_info *ni)
795{
d0006b00 796 struct net_device *master = dsa_slave_to_master(dev);
04ff53f9 797 struct dsa_slave_priv *p = netdev_priv(dev);
04ff53f9
FF
798 struct netpoll *netpoll;
799 int err = 0;
800
801 netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
802 if (!netpoll)
803 return -ENOMEM;
804
805 err = __netpoll_setup(netpoll, master);
806 if (err) {
807 kfree(netpoll);
808 goto out;
809 }
810
811 p->netpoll = netpoll;
812out:
813 return err;
814}
815
816static void dsa_slave_netpoll_cleanup(struct net_device *dev)
817{
818 struct dsa_slave_priv *p = netdev_priv(dev);
819 struct netpoll *netpoll = p->netpoll;
820
821 if (!netpoll)
822 return;
823
824 p->netpoll = NULL;
825
c9fbd71f 826 __netpoll_free(netpoll);
04ff53f9
FF
827}
828
829static void dsa_slave_poll_controller(struct net_device *dev)
830{
831}
832#endif
833
44bb765c
FF
834static int dsa_slave_get_phys_port_name(struct net_device *dev,
835 char *name, size_t len)
836{
d945097b 837 struct dsa_port *dp = dsa_slave_to_port(dev);
44bb765c 838
d484210b
JP
839 /* For non-legacy ports, devlink is used and it takes
840 * care of the name generation. This ndo implementation
841 * should be removed with legacy support.
842 */
843 if (dp->ds->devlink)
844 return -EOPNOTSUPP;
845
d945097b 846 if (snprintf(name, len, "p%d", dp->index) >= len)
44bb765c 847 return -EINVAL;
3a543ef4
FF
848
849 return 0;
850}
851
f50f2127 852static struct dsa_mall_tc_entry *
4fa7b718 853dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
f50f2127 854{
4fa7b718 855 struct dsa_slave_priv *p = netdev_priv(dev);
f50f2127
FF
856 struct dsa_mall_tc_entry *mall_tc_entry;
857
858 list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
859 if (mall_tc_entry->cookie == cookie)
860 return mall_tc_entry;
861
862 return NULL;
863}
864
865static int dsa_slave_add_cls_matchall(struct net_device *dev,
f50f2127
FF
866 struct tc_cls_matchall_offload *cls,
867 bool ingress)
868{
d945097b 869 struct dsa_port *dp = dsa_slave_to_port(dev);
f50f2127
FF
870 struct dsa_slave_priv *p = netdev_priv(dev);
871 struct dsa_mall_tc_entry *mall_tc_entry;
5fd9fc4e 872 __be16 protocol = cls->common.protocol;
d945097b 873 struct dsa_switch *ds = dp->ds;
9681e8b3 874 struct flow_action_entry *act;
d945097b 875 struct dsa_port *to_dp;
f50f2127 876 int err = -EOPNOTSUPP;
f50f2127
FF
877
878 if (!ds->ops->port_mirror_add)
879 return err;
880
9681e8b3 881 if (!flow_offload_has_one_action(&cls->rule->action))
f50f2127
FF
882 return err;
883
9681e8b3 884 act = &cls->rule->action.entries[0];
f50f2127 885
9681e8b3 886 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
f50f2127
FF
887 struct dsa_mall_mirror_tc_entry *mirror;
888
9681e8b3 889 if (!act->dev)
f50f2127
FF
890 return -EINVAL;
891
9681e8b3 892 if (!dsa_slave_dev_check(act->dev))
f50f2127
FF
893 return -EOPNOTSUPP;
894
895 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
896 if (!mall_tc_entry)
897 return -ENOMEM;
898
899 mall_tc_entry->cookie = cls->cookie;
900 mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
901 mirror = &mall_tc_entry->mirror;
902
9681e8b3 903 to_dp = dsa_slave_to_port(act->dev);
f50f2127 904
d945097b 905 mirror->to_local_port = to_dp->index;
f50f2127
FF
906 mirror->ingress = ingress;
907
d945097b 908 err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress);
f50f2127
FF
909 if (err) {
910 kfree(mall_tc_entry);
911 return err;
912 }
913
914 list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
915 }
916
917 return 0;
918}
919
920static void dsa_slave_del_cls_matchall(struct net_device *dev,
921 struct tc_cls_matchall_offload *cls)
922{
d945097b 923 struct dsa_port *dp = dsa_slave_to_port(dev);
f50f2127 924 struct dsa_mall_tc_entry *mall_tc_entry;
d945097b 925 struct dsa_switch *ds = dp->ds;
f50f2127
FF
926
927 if (!ds->ops->port_mirror_del)
928 return;
929
4fa7b718 930 mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie);
f50f2127
FF
931 if (!mall_tc_entry)
932 return;
933
934 list_del(&mall_tc_entry->list);
935
936 switch (mall_tc_entry->type) {
937 case DSA_PORT_MALL_MIRROR:
d945097b 938 ds->ops->port_mirror_del(ds, dp->index, &mall_tc_entry->mirror);
f50f2127
FF
939 break;
940 default:
941 WARN_ON(1);
942 }
943
944 kfree(mall_tc_entry);
945}
946
3fbae382 947static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
6b3eb752
JP
948 struct tc_cls_matchall_offload *cls,
949 bool ingress)
f50f2127 950{
5fd9fc4e 951 if (cls->common.chain_index)
a5fcf8a6 952 return -EOPNOTSUPP;
f50f2127 953
3fbae382
JP
954 switch (cls->command) {
955 case TC_CLSMATCHALL_REPLACE:
5fd9fc4e 956 return dsa_slave_add_cls_matchall(dev, cls, ingress);
3fbae382
JP
957 case TC_CLSMATCHALL_DESTROY:
958 dsa_slave_del_cls_matchall(dev, cls);
959 return 0;
960 default:
961 return -EOPNOTSUPP;
962 }
963}
964
6b3eb752
JP
965static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
966 void *cb_priv, bool ingress)
967{
968 struct net_device *dev = cb_priv;
969
44ae12a7
JP
970 if (!tc_can_offload(dev))
971 return -EOPNOTSUPP;
972
6b3eb752
JP
973 switch (type) {
974 case TC_SETUP_CLSMATCHALL:
975 return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress);
976 default:
977 return -EOPNOTSUPP;
978 }
979}
980
981static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type,
982 void *type_data, void *cb_priv)
983{
984 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true);
985}
986
987static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type,
988 void *type_data, void *cb_priv)
989{
990 return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false);
991}
992
955bcb6e
PNA
993static LIST_HEAD(dsa_slave_block_cb_list);
994
6b3eb752 995static int dsa_slave_setup_tc_block(struct net_device *dev,
955bcb6e 996 struct flow_block_offload *f)
6b3eb752 997{
955bcb6e 998 struct flow_block_cb *block_cb;
a7323311 999 flow_setup_cb_t *cb;
6b3eb752 1000
32f8c409 1001 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
6b3eb752 1002 cb = dsa_slave_setup_tc_block_cb_ig;
32f8c409 1003 else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
6b3eb752
JP
1004 cb = dsa_slave_setup_tc_block_cb_eg;
1005 else
1006 return -EOPNOTSUPP;
1007
955bcb6e
PNA
1008 f->driver_block_list = &dsa_slave_block_cb_list;
1009
6b3eb752 1010 switch (f->command) {
9c0e189e 1011 case FLOW_BLOCK_BIND:
0d4fd02e
PNA
1012 if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list))
1013 return -EBUSY;
1014
0c7294dd 1015 block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
955bcb6e
PNA
1016 if (IS_ERR(block_cb))
1017 return PTR_ERR(block_cb);
1018
1019 flow_block_cb_add(block_cb, f);
1020 list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list);
1021 return 0;
9c0e189e 1022 case FLOW_BLOCK_UNBIND:
14bfb13f 1023 block_cb = flow_block_cb_lookup(f->block, cb, dev);
955bcb6e
PNA
1024 if (!block_cb)
1025 return -ENOENT;
1026
1027 flow_block_cb_remove(block_cb, f);
1028 list_del(&block_cb->driver_list);
6b3eb752
JP
1029 return 0;
1030 default:
1031 return -EOPNOTSUPP;
1032 }
1033}
1034
3fbae382 1035static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1036 void *type_data)
3fbae382 1037{
2572ac53 1038 switch (type) {
6b3eb752
JP
1039 case TC_SETUP_BLOCK:
1040 return dsa_slave_setup_tc_block(dev, type_data);
f50f2127 1041 default:
a5fcf8a6 1042 return -EOPNOTSUPP;
f50f2127 1043 }
f50f2127
FF
1044}
1045
f613ed66
FF
1046static void dsa_slave_get_stats64(struct net_device *dev,
1047 struct rtnl_link_stats64 *stats)
1048{
1049 struct dsa_slave_priv *p = netdev_priv(dev);
5f6b4e14 1050 struct pcpu_sw_netstats *s;
f613ed66 1051 unsigned int start;
5f6b4e14 1052 int i;
f613ed66
FF
1053
1054 netdev_stats_to_stats64(stats, &dev->stats);
5f6b4e14
FF
1055 for_each_possible_cpu(i) {
1056 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
1057
1058 s = per_cpu_ptr(p->stats64, i);
1059 do {
1060 start = u64_stats_fetch_begin_irq(&s->syncp);
1061 tx_packets = s->tx_packets;
1062 tx_bytes = s->tx_bytes;
1063 rx_packets = s->rx_packets;
1064 rx_bytes = s->rx_bytes;
1065 } while (u64_stats_fetch_retry_irq(&s->syncp, start));
1066
1067 stats->tx_packets += tx_packets;
1068 stats->tx_bytes += tx_bytes;
1069 stats->rx_packets += rx_packets;
1070 stats->rx_bytes += rx_bytes;
1071 }
f613ed66
FF
1072}
1073
bf9f2648
FF
1074static int dsa_slave_get_rxnfc(struct net_device *dev,
1075 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1076{
d945097b
VD
1077 struct dsa_port *dp = dsa_slave_to_port(dev);
1078 struct dsa_switch *ds = dp->ds;
bf9f2648
FF
1079
1080 if (!ds->ops->get_rxnfc)
1081 return -EOPNOTSUPP;
1082
d945097b 1083 return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs);
bf9f2648
FF
1084}
1085
1086static int dsa_slave_set_rxnfc(struct net_device *dev,
1087 struct ethtool_rxnfc *nfc)
1088{
d945097b
VD
1089 struct dsa_port *dp = dsa_slave_to_port(dev);
1090 struct dsa_switch *ds = dp->ds;
bf9f2648
FF
1091
1092 if (!ds->ops->set_rxnfc)
1093 return -EOPNOTSUPP;
1094
d945097b 1095 return ds->ops->set_rxnfc(ds, dp->index, nfc);
bf9f2648
FF
1096}
1097
0336369d
BS
1098static int dsa_slave_get_ts_info(struct net_device *dev,
1099 struct ethtool_ts_info *ts)
1100{
1101 struct dsa_slave_priv *p = netdev_priv(dev);
1102 struct dsa_switch *ds = p->dp->ds;
1103
1104 if (!ds->ops->get_ts_info)
1105 return -EOPNOTSUPP;
1106
1107 return ds->ops->get_ts_info(ds, p->dp->index, ts);
1108}
1109
061f6a50
FF
1110static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
1111 u16 vid)
1112{
1113 struct dsa_port *dp = dsa_slave_to_port(dev);
061f6a50
FF
1114 struct bridge_vlan_info info;
1115 int ret;
1116
1117 /* Check for a possible bridge VLAN entry now since there is no
1118 * need to emulate the switchdev prepare + commit phase.
1119 */
1120 if (dp->bridge_dev) {
c5335d73
VD
1121 if (!br_vlan_enabled(dp->bridge_dev))
1122 return 0;
1123
061f6a50
FF
1124 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1125 * device, respectively the VID is not found, returning
1126 * 0 means success, which is a failure for us here.
1127 */
1128 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1129 if (ret == 0)
1130 return -EBUSY;
1131 }
1132
cf360866 1133 ret = dsa_port_vid_add(dp, vid, 0);
9b236d2a 1134 if (ret)
cf360866
VD
1135 return ret;
1136
7e1741b4 1137 ret = dsa_port_vid_add(dp->cpu_dp, vid, 0);
9b236d2a 1138 if (ret)
7e1741b4
VD
1139 return ret;
1140
cf360866 1141 return 0;
061f6a50
FF
1142}
1143
1144static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
1145 u16 vid)
1146{
1147 struct dsa_port *dp = dsa_slave_to_port(dev);
061f6a50
FF
1148 struct bridge_vlan_info info;
1149 int ret;
1150
1151 /* Check for a possible bridge VLAN entry now since there is no
1152 * need to emulate the switchdev prepare + commit phase.
1153 */
1154 if (dp->bridge_dev) {
c5335d73
VD
1155 if (!br_vlan_enabled(dp->bridge_dev))
1156 return 0;
1157
061f6a50
FF
1158 /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
1159 * device, respectively the VID is not found, returning
1160 * 0 means success, which is a failure for us here.
1161 */
1162 ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
1163 if (ret == 0)
1164 return -EBUSY;
1165 }
1166
7e1741b4
VD
1167 /* Do not deprogram the CPU port as it may be shared with other user
1168 * ports which can be members of this VLAN as well.
1169 */
9b236d2a 1170 return dsa_port_vid_del(dp, vid);
061f6a50
FF
1171}
1172
91da11f8 1173static const struct ethtool_ops dsa_slave_ethtool_ops = {
91da11f8 1174 .get_drvinfo = dsa_slave_get_drvinfo,
3d762a0f
GR
1175 .get_regs_len = dsa_slave_get_regs_len,
1176 .get_regs = dsa_slave_get_regs,
aab9c406 1177 .nway_reset = dsa_slave_nway_reset,
c4aef9fc 1178 .get_link = ethtool_op_get_link,
6793abb4
GR
1179 .get_eeprom_len = dsa_slave_get_eeprom_len,
1180 .get_eeprom = dsa_slave_get_eeprom,
1181 .set_eeprom = dsa_slave_set_eeprom,
91da11f8
LB
1182 .get_strings = dsa_slave_get_strings,
1183 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
1184 .get_sset_count = dsa_slave_get_sset_count,
19e57c4e
FF
1185 .set_wol = dsa_slave_set_wol,
1186 .get_wol = dsa_slave_get_wol,
7905288f
FF
1187 .set_eee = dsa_slave_set_eee,
1188 .get_eee = dsa_slave_get_eee,
aab9c406
FF
1189 .get_link_ksettings = dsa_slave_get_link_ksettings,
1190 .set_link_ksettings = dsa_slave_set_link_ksettings,
bf9f2648
FF
1191 .get_rxnfc = dsa_slave_get_rxnfc,
1192 .set_rxnfc = dsa_slave_set_rxnfc,
0336369d 1193 .get_ts_info = dsa_slave_get_ts_info,
91da11f8
LB
1194};
1195
2a93c1a3
FF
1196/* legacy way, bypassing the bridge *****************************************/
1197int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1198 struct net_device *dev,
1199 const unsigned char *addr, u16 vid,
87b0984e
PM
1200 u16 flags,
1201 struct netlink_ext_ack *extack)
2a93c1a3
FF
1202{
1203 struct dsa_port *dp = dsa_slave_to_port(dev);
1204
1205 return dsa_port_fdb_add(dp, addr, vid);
1206}
1207
1208int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1209 struct net_device *dev,
1210 const unsigned char *addr, u16 vid)
1211{
1212 struct dsa_port *dp = dsa_slave_to_port(dev);
1213
1214 return dsa_port_fdb_del(dp, addr, vid);
1215}
1216
716efee2
JP
1217static struct devlink_port *dsa_slave_get_devlink_port(struct net_device *dev)
1218{
1219 struct dsa_port *dp = dsa_slave_to_port(dev);
1220
1221 return dp->ds->devlink ? &dp->devlink_port : NULL;
1222}
1223
3e8a72d1 1224static const struct net_device_ops dsa_slave_netdev_ops = {
d442ad4a
SH
1225 .ndo_open = dsa_slave_open,
1226 .ndo_stop = dsa_slave_close,
3e8a72d1 1227 .ndo_start_xmit = dsa_slave_xmit,
d442ad4a
SH
1228 .ndo_change_rx_flags = dsa_slave_change_rx_flags,
1229 .ndo_set_rx_mode = dsa_slave_set_rx_mode,
d442ad4a 1230 .ndo_set_mac_address = dsa_slave_set_mac_address,
37b8da1a
AS
1231 .ndo_fdb_add = dsa_legacy_fdb_add,
1232 .ndo_fdb_del = dsa_legacy_fdb_del,
2bedde1a 1233 .ndo_fdb_dump = dsa_slave_fdb_dump,
d442ad4a 1234 .ndo_do_ioctl = dsa_slave_ioctl,
abd2be00 1235 .ndo_get_iflink = dsa_slave_get_iflink,
04ff53f9
FF
1236#ifdef CONFIG_NET_POLL_CONTROLLER
1237 .ndo_netpoll_setup = dsa_slave_netpoll_setup,
1238 .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
1239 .ndo_poll_controller = dsa_slave_poll_controller,
1240#endif
44bb765c 1241 .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
f50f2127 1242 .ndo_setup_tc = dsa_slave_setup_tc,
f613ed66 1243 .ndo_get_stats64 = dsa_slave_get_stats64,
929d6c14 1244 .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
061f6a50
FF
1245 .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
1246 .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
716efee2 1247 .ndo_get_devlink_port = dsa_slave_get_devlink_port,
98237d43
SF
1248};
1249
f37db85d
FF
1250static struct device_type dsa_type = {
1251 .name = "dsa",
1252};
1253
aab9c406
FF
1254void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up)
1255{
1256 const struct dsa_port *dp = dsa_to_port(ds, port);
1257
1258 phylink_mac_change(dp->pl, up);
1259}
1260EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change);
1261
1262static void dsa_slave_phylink_fixed_state(struct net_device *dev,
1263 struct phylink_link_state *state)
1264{
1265 struct dsa_port *dp = dsa_slave_to_port(dev);
1266 struct dsa_switch *ds = dp->ds;
1267
1268 /* No need to check that this operation is valid, the callback would
1269 * not be called if it was not.
1270 */
1271 ds->ops->phylink_fixed_state(ds, dp->index, state);
ce31b31c
FF
1272}
1273
91da11f8 1274/* slave device setup *******************************************************/
4fa7b718 1275static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr)
c305c165 1276{
d945097b 1277 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
d945097b 1278 struct dsa_switch *ds = dp->ds;
c305c165 1279
0115dcd1
VD
1280 slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr);
1281 if (!slave_dev->phydev) {
d25b8e74 1282 netdev_err(slave_dev, "no phy at %d\n", addr);
c305c165 1283 return -ENODEV;
d25b8e74 1284 }
c305c165 1285
aab9c406 1286 return phylink_connect_phy(dp->pl, slave_dev->phydev);
11d8f3dd 1287}
11d8f3dd 1288
4fa7b718 1289static int dsa_slave_phy_setup(struct net_device *slave_dev)
0d8bcdd3 1290{
d945097b 1291 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
d945097b
VD
1292 struct device_node *port_dn = dp->dn;
1293 struct dsa_switch *ds = dp->ds;
6819563e 1294 u32 phy_flags = 0;
19334920 1295 int mode, ret;
0d8bcdd3 1296
19334920
GR
1297 mode = of_get_phy_mode(port_dn);
1298 if (mode < 0)
1299 mode = PHY_INTERFACE_MODE_NA;
0d8bcdd3 1300
44cc27e4
IC
1301 dp->pl_config.dev = &slave_dev->dev;
1302 dp->pl_config.type = PHYLINK_NETDEV;
1303
1304 dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
77373d49 1305 &dsa_port_phylink_mac_ops);
aab9c406
FF
1306 if (IS_ERR(dp->pl)) {
1307 netdev_err(slave_dev,
1308 "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
1309 return PTR_ERR(dp->pl);
0d8bcdd3
FF
1310 }
1311
aab9c406
FF
1312 /* Register only if the switch provides such a callback, since this
1313 * callback takes precedence over polling the link GPIO in PHYLINK
1314 * (see phylink_get_fixed_state).
1315 */
1316 if (ds->ops->phylink_fixed_state)
1317 phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state);
1318
9d490b4e 1319 if (ds->ops->get_phy_flags)
d945097b 1320 phy_flags = ds->ops->get_phy_flags(ds, dp->index);
6819563e 1321
aab9c406 1322 ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
6146dd45
VO
1323 if (ret == -ENODEV && ds->slave_mii_bus) {
1324 /* We could not connect to a designated PHY or SFP, so try to
1325 * use the switch internal MDIO bus instead
aab9c406 1326 */
d945097b 1327 ret = dsa_slave_phy_connect(slave_dev, dp->index);
d25b8e74 1328 if (ret) {
aab9c406
FF
1329 netdev_err(slave_dev,
1330 "failed to connect to port %d: %d\n",
d945097b 1331 dp->index, ret);
aab9c406 1332 phylink_destroy(dp->pl);
c305c165 1333 return ret;
d25b8e74 1334 }
b31f65fb 1335 }
9697f1cd 1336
6146dd45 1337 return ret;
0d8bcdd3
FF
1338}
1339
448b4482
AL
1340static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
1341static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
1342 struct netdev_queue *txq,
1343 void *_unused)
1344{
1345 lockdep_set_class(&txq->_xmit_lock,
1346 &dsa_slave_netdev_xmit_lock_key);
1347}
1348
24462549
FF
1349int dsa_slave_suspend(struct net_device *slave_dev)
1350{
aab9c406 1351 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
24462549 1352
a94c689e
FF
1353 if (!netif_running(slave_dev))
1354 return 0;
1355
97a69a0d
VO
1356 cancel_work_sync(&dp->xmit_work);
1357 skb_queue_purge(&dp->xmit_queue);
1358
f154be24
FF
1359 netif_device_detach(slave_dev);
1360
aab9c406
FF
1361 rtnl_lock();
1362 phylink_stop(dp->pl);
1363 rtnl_unlock();
24462549
FF
1364
1365 return 0;
1366}
1367
1368int dsa_slave_resume(struct net_device *slave_dev)
1369{
aab9c406
FF
1370 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1371
a94c689e
FF
1372 if (!netif_running(slave_dev))
1373 return 0;
1374
24462549
FF
1375 netif_device_attach(slave_dev);
1376
aab9c406
FF
1377 rtnl_lock();
1378 phylink_start(dp->pl);
1379 rtnl_unlock();
24462549
FF
1380
1381 return 0;
1382}
1383
6158eaa7
VD
1384static void dsa_slave_notify(struct net_device *dev, unsigned long val)
1385{
d0006b00 1386 struct net_device *master = dsa_slave_to_master(dev);
d945097b 1387 struct dsa_port *dp = dsa_slave_to_port(dev);
6158eaa7
VD
1388 struct dsa_notifier_register_info rinfo = {
1389 .switch_number = dp->ds->index,
1390 .port_number = dp->index,
1391 .master = master,
1392 .info.dev = dev,
1393 };
1394
1395 call_dsa_notifiers(val, dev, &rinfo.info);
1396}
1397
951259aa 1398int dsa_slave_create(struct dsa_port *port)
91da11f8 1399{
24a9332a 1400 const struct dsa_port *cpu_dp = port->cpu_dp;
f8b8b1cd 1401 struct net_device *master = cpu_dp->master;
4cfbf09c 1402 struct dsa_switch *ds = port->ds;
951259aa 1403 const char *name = port->name;
91da11f8
LB
1404 struct net_device *slave_dev;
1405 struct dsa_slave_priv *p;
1406 int ret;
1407
55199df6
FF
1408 if (!ds->num_tx_queues)
1409 ds->num_tx_queues = 1;
1410
1411 slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name,
1412 NET_NAME_UNKNOWN, ether_setup,
1413 ds->num_tx_queues, 1);
91da11f8 1414 if (slave_dev == NULL)
d87d6f44 1415 return -ENOMEM;
91da11f8 1416
9b236d2a
VO
1417 slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
1418 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
1419 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
f50f2127 1420 slave_dev->hw_features |= NETIF_F_HW_TC;
7ad24ea4 1421 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
4974f9b7 1422 if (!IS_ERR_OR_NULL(port->mac))
a2c7023f
XS
1423 ether_addr_copy(slave_dev->dev_addr, port->mac);
1424 else
1425 eth_hw_addr_inherit(slave_dev, master);
0a5f107b 1426 slave_dev->priv_flags |= IFF_NO_QUEUE;
3e8a72d1 1427 slave_dev->netdev_ops = &dsa_slave_netdev_ops;
8b1efc0f
JW
1428 slave_dev->min_mtu = 0;
1429 slave_dev->max_mtu = ETH_MAX_MTU;
f37db85d 1430 SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
d442ad4a 1431
448b4482
AL
1432 netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
1433 NULL);
1434
4cfbf09c
VD
1435 SET_NETDEV_DEV(slave_dev, port->ds->dev);
1436 slave_dev->dev.of_node = port->dn;
5075314e
AD
1437 slave_dev->vlan_features = master->vlan_features;
1438
1439 p = netdev_priv(slave_dev);
5f6b4e14
FF
1440 p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1441 if (!p->stats64) {
1442 free_netdev(slave_dev);
1443 return -ENOMEM;
1444 }
4cfbf09c 1445 p->dp = port;
f50f2127 1446 INIT_LIST_HEAD(&p->mall_tc_list);
97a69a0d
VO
1447 INIT_WORK(&port->xmit_work, dsa_port_xmit_work);
1448 skb_queue_head_init(&port->xmit_queue);
15240248 1449 p->xmit = cpu_dp->tag_ops->xmit;
f8b8b1cd 1450 port->slave = slave_dev;
91da11f8
LB
1451
1452 netif_carrier_off(slave_dev);
1453
4fa7b718 1454 ret = dsa_slave_phy_setup(slave_dev);
0071f56e
AL
1455 if (ret) {
1456 netdev_err(master, "error %d setting up slave phy\n", ret);
e804441c
FF
1457 goto out_free;
1458 }
1459
6158eaa7 1460 dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
60724d4b 1461
e804441c
FF
1462 ret = register_netdev(slave_dev);
1463 if (ret) {
1464 netdev_err(master, "error %d registering interface %s\n",
1465 ret, slave_dev->name);
1466 goto out_phy;
0071f56e
AL
1467 }
1468
d87d6f44 1469 return 0;
e804441c
FF
1470
1471out_phy:
aab9c406
FF
1472 rtnl_lock();
1473 phylink_disconnect_phy(p->dp->pl);
1474 rtnl_unlock();
1475 phylink_destroy(p->dp->pl);
e804441c
FF
1476out_free:
1477 free_percpu(p->stats64);
1478 free_netdev(slave_dev);
f8b8b1cd 1479 port->slave = NULL;
e804441c 1480 return ret;
91da11f8 1481}
b73adef6 1482
cda5c15b
NA
1483void dsa_slave_destroy(struct net_device *slave_dev)
1484{
d945097b 1485 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
cda5c15b
NA
1486 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1487
1488 netif_carrier_off(slave_dev);
aab9c406
FF
1489 rtnl_lock();
1490 phylink_disconnect_phy(dp->pl);
1491 rtnl_unlock();
881eadab 1492
6158eaa7 1493 dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
cda5c15b 1494 unregister_netdev(slave_dev);
aab9c406 1495 phylink_destroy(dp->pl);
5f6b4e14 1496 free_percpu(p->stats64);
cda5c15b
NA
1497 free_netdev(slave_dev);
1498}
1499
f3b78049 1500static bool dsa_slave_dev_check(const struct net_device *dev)
b73adef6
FF
1501{
1502 return dev->netdev_ops == &dsa_slave_netdev_ops;
1503}
1504
8e92ab3a
VD
1505static int dsa_slave_changeupper(struct net_device *dev,
1506 struct netdev_notifier_changeupper_info *info)
b73adef6 1507{
d945097b 1508 struct dsa_port *dp = dsa_slave_to_port(dev);
8e92ab3a 1509 int err = NOTIFY_DONE;
b73adef6 1510
8e92ab3a
VD
1511 if (netif_is_bridge_master(info->upper_dev)) {
1512 if (info->linking) {
17d7802b 1513 err = dsa_port_bridge_join(dp, info->upper_dev);
8e92ab3a
VD
1514 err = notifier_from_errno(err);
1515 } else {
17d7802b 1516 dsa_port_bridge_leave(dp, info->upper_dev);
8e92ab3a 1517 err = NOTIFY_OK;
6debb68a 1518 }
6debb68a 1519 }
b73adef6 1520
8e92ab3a 1521 return err;
6debb68a 1522}
b73adef6 1523
cc1d5bda
FF
1524static int dsa_slave_upper_vlan_check(struct net_device *dev,
1525 struct netdev_notifier_changeupper_info *
1526 info)
1527{
1528 struct netlink_ext_ack *ext_ack;
1529 struct net_device *slave;
1530 struct dsa_port *dp;
1531
1532 ext_ack = netdev_notifier_info_to_extack(&info->info);
1533
1534 if (!is_vlan_dev(dev))
1535 return NOTIFY_DONE;
1536
1537 slave = vlan_dev_real_dev(dev);
1538 if (!dsa_slave_dev_check(slave))
1539 return NOTIFY_DONE;
1540
1541 dp = dsa_slave_to_port(slave);
1542 if (!dp->bridge_dev)
1543 return NOTIFY_DONE;
1544
1545 /* Deny enslaving a VLAN device into a VLAN-aware bridge */
1546 if (br_vlan_enabled(dp->bridge_dev) &&
1547 netif_is_bridge_master(info->upper_dev) && info->linking) {
1548 NL_SET_ERR_MSG_MOD(ext_ack,
1549 "Cannot enslave VLAN device into VLAN aware bridge");
1550 return notifier_from_errno(-EINVAL);
1551 }
1552
1553 return NOTIFY_DONE;
1554}
1555
88e4f0ca
VD
1556static int dsa_slave_netdevice_event(struct notifier_block *nb,
1557 unsigned long event, void *ptr)
6debb68a
VD
1558{
1559 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1560
cc1d5bda
FF
1561 if (event == NETDEV_CHANGEUPPER) {
1562 if (!dsa_slave_dev_check(dev))
1563 return dsa_slave_upper_vlan_check(dev, ptr);
8e92ab3a 1564
8e92ab3a 1565 return dsa_slave_changeupper(dev, ptr);
cc1d5bda 1566 }
b73adef6 1567
b73adef6
FF
1568 return NOTIFY_DONE;
1569}
88e4f0ca 1570
c9eb3e0f
AS
1571struct dsa_switchdev_event_work {
1572 struct work_struct work;
1573 struct switchdev_notifier_fdb_info fdb_info;
1574 struct net_device *dev;
1575 unsigned long event;
1576};
1577
1578static void dsa_slave_switchdev_event_work(struct work_struct *work)
1579{
1580 struct dsa_switchdev_event_work *switchdev_work =
1581 container_of(work, struct dsa_switchdev_event_work, work);
1582 struct net_device *dev = switchdev_work->dev;
1583 struct switchdev_notifier_fdb_info *fdb_info;
d945097b 1584 struct dsa_port *dp = dsa_slave_to_port(dev);
c9eb3e0f
AS
1585 int err;
1586
1587 rtnl_lock();
1588 switch (switchdev_work->event) {
1589 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1590 fdb_info = &switchdev_work->fdb_info;
a37fb855
VD
1591 if (!fdb_info->added_by_user)
1592 break;
1593
d945097b 1594 err = dsa_port_fdb_add(dp, fdb_info->addr, fdb_info->vid);
c9eb3e0f
AS
1595 if (err) {
1596 netdev_dbg(dev, "fdb add failed err=%d\n", err);
1597 break;
1598 }
e9ba0fbc 1599 fdb_info->offloaded = true;
c9eb3e0f 1600 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev,
6685987c 1601 &fdb_info->info, NULL);
c9eb3e0f
AS
1602 break;
1603
1604 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1605 fdb_info = &switchdev_work->fdb_info;
a37fb855
VD
1606 if (!fdb_info->added_by_user)
1607 break;
1608
d945097b 1609 err = dsa_port_fdb_del(dp, fdb_info->addr, fdb_info->vid);
c9eb3e0f
AS
1610 if (err) {
1611 netdev_dbg(dev, "fdb del failed err=%d\n", err);
1612 dev_close(dev);
1613 }
1614 break;
1615 }
1616 rtnl_unlock();
1617
1618 kfree(switchdev_work->fdb_info.addr);
1619 kfree(switchdev_work);
1620 dev_put(dev);
1621}
1622
1623static int
1624dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work *
1625 switchdev_work,
1626 const struct switchdev_notifier_fdb_info *
1627 fdb_info)
1628{
1629 memcpy(&switchdev_work->fdb_info, fdb_info,
1630 sizeof(switchdev_work->fdb_info));
1631 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1632 if (!switchdev_work->fdb_info.addr)
1633 return -ENOMEM;
1634 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1635 fdb_info->addr);
1636 return 0;
1637}
1638
1639/* Called under rcu_read_lock() */
1640static int dsa_slave_switchdev_event(struct notifier_block *unused,
1641 unsigned long event, void *ptr)
1642{
1643 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1644 struct dsa_switchdev_event_work *switchdev_work;
79b139f4
VD
1645 int err;
1646
1647 if (event == SWITCHDEV_PORT_ATTR_SET) {
1648 err = switchdev_handle_port_attr_set(dev, ptr,
1649 dsa_slave_dev_check,
1650 dsa_slave_port_attr_set);
1651 return notifier_from_errno(err);
1652 }
c9eb3e0f
AS
1653
1654 if (!dsa_slave_dev_check(dev))
1655 return NOTIFY_DONE;
1656
1657 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1658 if (!switchdev_work)
1659 return NOTIFY_BAD;
1660
1661 INIT_WORK(&switchdev_work->work,
1662 dsa_slave_switchdev_event_work);
1663 switchdev_work->dev = dev;
1664 switchdev_work->event = event;
1665
1666 switch (event) {
1667 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1668 case SWITCHDEV_FDB_DEL_TO_DEVICE:
a37fb855 1669 if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
c9eb3e0f
AS
1670 goto err_fdb_work_init;
1671 dev_hold(dev);
1672 break;
1673 default:
1674 kfree(switchdev_work);
1675 return NOTIFY_DONE;
1676 }
1677
1678 dsa_schedule_work(&switchdev_work->work);
1679 return NOTIFY_OK;
1680
1681err_fdb_work_init:
1682 kfree(switchdev_work);
1683 return NOTIFY_BAD;
1684}
1685
2b239f67
PM
1686static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
1687 unsigned long event, void *ptr)
1688{
1689 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
79b139f4 1690 int err;
2b239f67
PM
1691
1692 switch (event) {
79b139f4
VD
1693 case SWITCHDEV_PORT_OBJ_ADD:
1694 err = switchdev_handle_port_obj_add(dev, ptr,
1695 dsa_slave_dev_check,
1696 dsa_slave_port_obj_add);
1697 return notifier_from_errno(err);
2b239f67 1698 case SWITCHDEV_PORT_OBJ_DEL:
79b139f4
VD
1699 err = switchdev_handle_port_obj_del(dev, ptr,
1700 dsa_slave_dev_check,
1701 dsa_slave_port_obj_del);
1702 return notifier_from_errno(err);
9ed1eced 1703 case SWITCHDEV_PORT_ATTR_SET:
79b139f4
VD
1704 err = switchdev_handle_port_attr_set(dev, ptr,
1705 dsa_slave_dev_check,
1706 dsa_slave_port_attr_set);
1707 return notifier_from_errno(err);
2b239f67
PM
1708 }
1709
1710 return NOTIFY_DONE;
1711}
1712
88e4f0ca 1713static struct notifier_block dsa_slave_nb __read_mostly = {
c9eb3e0f
AS
1714 .notifier_call = dsa_slave_netdevice_event,
1715};
1716
1717static struct notifier_block dsa_slave_switchdev_notifier = {
1718 .notifier_call = dsa_slave_switchdev_event,
88e4f0ca
VD
1719};
1720
2b239f67
PM
1721static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
1722 .notifier_call = dsa_slave_switchdev_blocking_event,
1723};
1724
88e4f0ca
VD
1725int dsa_slave_register_notifier(void)
1726{
2b239f67 1727 struct notifier_block *nb;
c9eb3e0f
AS
1728 int err;
1729
1730 err = register_netdevice_notifier(&dsa_slave_nb);
1731 if (err)
1732 return err;
1733
1734 err = register_switchdev_notifier(&dsa_slave_switchdev_notifier);
1735 if (err)
1736 goto err_switchdev_nb;
1737
2b239f67
PM
1738 nb = &dsa_slave_switchdev_blocking_notifier;
1739 err = register_switchdev_blocking_notifier(nb);
1740 if (err)
1741 goto err_switchdev_blocking_nb;
1742
c9eb3e0f
AS
1743 return 0;
1744
2b239f67
PM
1745err_switchdev_blocking_nb:
1746 unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
c9eb3e0f
AS
1747err_switchdev_nb:
1748 unregister_netdevice_notifier(&dsa_slave_nb);
1749 return err;
88e4f0ca
VD
1750}
1751
1752void dsa_slave_unregister_notifier(void)
1753{
2b239f67 1754 struct notifier_block *nb;
88e4f0ca
VD
1755 int err;
1756
2b239f67
PM
1757 nb = &dsa_slave_switchdev_blocking_notifier;
1758 err = unregister_switchdev_blocking_notifier(nb);
1759 if (err)
1760 pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
1761
c9eb3e0f
AS
1762 err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
1763 if (err)
1764 pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
1765
88e4f0ca
VD
1766 err = unregister_netdevice_notifier(&dsa_slave_nb);
1767 if (err)
1768 pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
1769}