]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/net/dsa.h
net: dsa: sja1105: rely on DSA core tracking of port learning state
[mirror_ubuntu-jammy-kernel.git] / include / net / dsa.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7 #ifndef __LINUX_NET_DSA_H
8 #define __LINUX_NET_DSA_H
9
10 #include <linux/if.h>
11 #include <linux/if_ether.h>
12 #include <linux/list.h>
13 #include <linux/notifier.h>
14 #include <linux/timer.h>
15 #include <linux/workqueue.h>
16 #include <linux/of.h>
17 #include <linux/ethtool.h>
18 #include <linux/net_tstamp.h>
19 #include <linux/phy.h>
20 #include <linux/platform_data/dsa.h>
21 #include <linux/phylink.h>
22 #include <net/devlink.h>
23 #include <net/switchdev.h>
24
25 struct tc_action;
26 struct phy_device;
27 struct fixed_phy_status;
28 struct phylink_link_state;
29
30 #define DSA_TAG_PROTO_NONE_VALUE 0
31 #define DSA_TAG_PROTO_BRCM_VALUE 1
32 #define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2
33 #define DSA_TAG_PROTO_DSA_VALUE 3
34 #define DSA_TAG_PROTO_EDSA_VALUE 4
35 #define DSA_TAG_PROTO_GSWIP_VALUE 5
36 #define DSA_TAG_PROTO_KSZ9477_VALUE 6
37 #define DSA_TAG_PROTO_KSZ9893_VALUE 7
38 #define DSA_TAG_PROTO_LAN9303_VALUE 8
39 #define DSA_TAG_PROTO_MTK_VALUE 9
40 #define DSA_TAG_PROTO_QCA_VALUE 10
41 #define DSA_TAG_PROTO_TRAILER_VALUE 11
42 #define DSA_TAG_PROTO_8021Q_VALUE 12
43 #define DSA_TAG_PROTO_SJA1105_VALUE 13
44 #define DSA_TAG_PROTO_KSZ8795_VALUE 14
45 #define DSA_TAG_PROTO_OCELOT_VALUE 15
46 #define DSA_TAG_PROTO_AR9331_VALUE 16
47 #define DSA_TAG_PROTO_RTL4_A_VALUE 17
48 #define DSA_TAG_PROTO_HELLCREEK_VALUE 18
49 #define DSA_TAG_PROTO_XRS700X_VALUE 19
50 #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
51 #define DSA_TAG_PROTO_SEVILLE_VALUE 21
52 #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
53 #define DSA_TAG_PROTO_SJA1110_VALUE 23
54
55 enum dsa_tag_protocol {
56 DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
57 DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
58 DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
59 DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
60 DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
61 DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
62 DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE,
63 DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE,
64 DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE,
65 DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE,
66 DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE,
67 DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE,
68 DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE,
69 DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
70 DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
71 DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
72 DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
73 DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
74 DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
75 DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
76 DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
77 DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
78 DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
79 DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
80 };
81
82 struct dsa_switch;
83
84 struct dsa_device_ops {
85 struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
86 struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
87 void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
88 int *offset);
89 unsigned int needed_headroom;
90 unsigned int needed_tailroom;
91 const char *name;
92 enum dsa_tag_protocol proto;
93 /* Some tagging protocols either mangle or shift the destination MAC
94 * address, in which case the DSA master would drop packets on ingress
95 * if what it understands out of the destination MAC address is not in
96 * its RX filter.
97 */
98 bool promisc_on_master;
99 };
100
101 /* This structure defines the control interfaces that are overlayed by the
102 * DSA layer on top of the DSA CPU/management net_device instance. This is
103 * used by the core net_device layer while calling various net_device_ops
104 * function pointers.
105 */
106 struct dsa_netdevice_ops {
107 int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr,
108 int cmd);
109 };
110
111 #define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
112 #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
113 MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
114
115 struct dsa_switch_tree {
116 struct list_head list;
117
118 /* Notifier chain for switch-wide events */
119 struct raw_notifier_head nh;
120
121 /* Tree identifier */
122 unsigned int index;
123
124 /* Number of switches attached to this tree */
125 struct kref refcount;
126
127 /* Has this tree been applied to the hardware? */
128 bool setup;
129
130 /* Tagging protocol operations */
131 const struct dsa_device_ops *tag_ops;
132
133 /* Default tagging protocol preferred by the switches in this
134 * tree.
135 */
136 enum dsa_tag_protocol default_proto;
137
138 /*
139 * Configuration data for the platform device that owns
140 * this dsa switch tree instance.
141 */
142 struct dsa_platform_data *pd;
143
144 /* List of switch ports */
145 struct list_head ports;
146
147 /* List of DSA links composing the routing table */
148 struct list_head rtable;
149
150 /* Maps offloaded LAG netdevs to a zero-based linear ID for
151 * drivers that need it.
152 */
153 struct net_device **lags;
154 unsigned int lags_len;
155
156 /* Track the largest switch index within a tree */
157 unsigned int last_switch;
158
159 /* Track the bridges with forwarding offload enabled */
160 unsigned long fwd_offloading_bridges;
161 };
162
163 #define dsa_lags_foreach_id(_id, _dst) \
164 for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \
165 if ((_dst)->lags[(_id)])
166
167 #define dsa_lag_foreach_port(_dp, _dst, _lag) \
168 list_for_each_entry((_dp), &(_dst)->ports, list) \
169 if ((_dp)->lag_dev == (_lag))
170
171 #define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
172 list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
173 if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
174
175 static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
176 unsigned int id)
177 {
178 return dst->lags[id];
179 }
180
181 static inline int dsa_lag_id(struct dsa_switch_tree *dst,
182 struct net_device *lag)
183 {
184 unsigned int id;
185
186 dsa_lags_foreach_id(id, dst) {
187 if (dsa_lag_dev(dst, id) == lag)
188 return id;
189 }
190
191 return -ENODEV;
192 }
193
194 /* TC matchall action types */
195 enum dsa_port_mall_action_type {
196 DSA_PORT_MALL_MIRROR,
197 DSA_PORT_MALL_POLICER,
198 };
199
200 /* TC mirroring entry */
201 struct dsa_mall_mirror_tc_entry {
202 u8 to_local_port;
203 bool ingress;
204 };
205
206 /* TC port policer entry */
207 struct dsa_mall_policer_tc_entry {
208 u32 burst;
209 u64 rate_bytes_per_sec;
210 };
211
212 /* TC matchall entry */
213 struct dsa_mall_tc_entry {
214 struct list_head list;
215 unsigned long cookie;
216 enum dsa_port_mall_action_type type;
217 union {
218 struct dsa_mall_mirror_tc_entry mirror;
219 struct dsa_mall_policer_tc_entry policer;
220 };
221 };
222
223
224 struct dsa_port {
225 /* A CPU port is physically connected to a master device.
226 * A user port exposed to userspace has a slave device.
227 */
228 union {
229 struct net_device *master;
230 struct net_device *slave;
231 };
232
233 /* Copy of the tagging protocol operations, for quicker access
234 * in the data path. Valid only for the CPU ports.
235 */
236 const struct dsa_device_ops *tag_ops;
237
238 /* Copies for faster access in master receive hot path */
239 struct dsa_switch_tree *dst;
240 struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
241
242 enum {
243 DSA_PORT_TYPE_UNUSED = 0,
244 DSA_PORT_TYPE_CPU,
245 DSA_PORT_TYPE_DSA,
246 DSA_PORT_TYPE_USER,
247 } type;
248
249 struct dsa_switch *ds;
250 unsigned int index;
251 const char *name;
252 struct dsa_port *cpu_dp;
253 u8 mac[ETH_ALEN];
254 struct device_node *dn;
255 unsigned int ageing_time;
256 bool vlan_filtering;
257 /* Managed by DSA on user ports and by drivers on CPU and DSA ports */
258 bool learning;
259 u8 stp_state;
260 struct net_device *bridge_dev;
261 int bridge_num;
262 struct devlink_port devlink_port;
263 bool devlink_port_setup;
264 struct phylink *pl;
265 struct phylink_config pl_config;
266 struct net_device *lag_dev;
267 bool lag_tx_enabled;
268 struct net_device *hsr_dev;
269
270 struct list_head list;
271
272 /*
273 * Give the switch driver somewhere to hang its per-port private data
274 * structures (accessible from the tagger).
275 */
276 void *priv;
277
278 /*
279 * Original copy of the master netdev ethtool_ops
280 */
281 const struct ethtool_ops *orig_ethtool_ops;
282
283 /*
284 * Original copy of the master netdev net_device_ops
285 */
286 const struct dsa_netdevice_ops *netdev_ops;
287
288 /* List of MAC addresses that must be forwarded on this port.
289 * These are only valid on CPU ports and DSA links.
290 */
291 struct list_head fdbs;
292 struct list_head mdbs;
293
294 bool setup;
295 };
296
297 /* TODO: ideally DSA ports would have a single dp->link_dp member,
298 * and no dst->rtable nor this struct dsa_link would be needed,
299 * but this would require some more complex tree walking,
300 * so keep it stupid at the moment and list them all.
301 */
302 struct dsa_link {
303 struct dsa_port *dp;
304 struct dsa_port *link_dp;
305 struct list_head list;
306 };
307
308 struct dsa_mac_addr {
309 unsigned char addr[ETH_ALEN];
310 u16 vid;
311 refcount_t refcount;
312 struct list_head list;
313 };
314
315 struct dsa_switch {
316 bool setup;
317
318 struct device *dev;
319
320 /*
321 * Parent switch tree, and switch index.
322 */
323 struct dsa_switch_tree *dst;
324 unsigned int index;
325
326 /* Listener for switch fabric events */
327 struct notifier_block nb;
328
329 /*
330 * Give the switch driver somewhere to hang its private data
331 * structure.
332 */
333 void *priv;
334
335 /*
336 * Configuration data for this switch.
337 */
338 struct dsa_chip_data *cd;
339
340 /*
341 * The switch operations.
342 */
343 const struct dsa_switch_ops *ops;
344
345 /*
346 * Slave mii_bus and devices for the individual ports.
347 */
348 u32 phys_mii_mask;
349 struct mii_bus *slave_mii_bus;
350
351 /* Ageing Time limits in msecs */
352 unsigned int ageing_time_min;
353 unsigned int ageing_time_max;
354
355 /* Storage for drivers using tag_8021q */
356 struct dsa_8021q_context *tag_8021q_ctx;
357
358 /* devlink used to represent this switch device */
359 struct devlink *devlink;
360
361 /* Number of switch port queues */
362 unsigned int num_tx_queues;
363
364 /* Disallow bridge core from requesting different VLAN awareness
365 * settings on ports if not hardware-supported
366 */
367 bool vlan_filtering_is_global;
368
369 /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
370 * that have vlan_filtering=0. All drivers should ideally set this (and
371 * then the option would get removed), but it is unknown whether this
372 * would break things or not.
373 */
374 bool configure_vlan_while_not_filtering;
375
376 /* If the switch driver always programs the CPU port as egress tagged
377 * despite the VLAN configuration indicating otherwise, then setting
378 * @untag_bridge_pvid will force the DSA receive path to pop the bridge's
379 * default_pvid VLAN tagged frames to offer a consistent behavior
380 * between a vlan_filtering=0 and vlan_filtering=1 bridge device.
381 */
382 bool untag_bridge_pvid;
383
384 /* Let DSA manage the FDB entries towards the CPU, based on the
385 * software bridge database.
386 */
387 bool assisted_learning_on_cpu_port;
388
389 /* In case vlan_filtering_is_global is set, the VLAN awareness state
390 * should be retrieved from here and not from the per-port settings.
391 */
392 bool vlan_filtering;
393
394 /* MAC PCS does not provide link state change interrupt, and requires
395 * polling. Flag passed on to PHYLINK.
396 */
397 bool pcs_poll;
398
399 /* For switches that only have the MRU configurable. To ensure the
400 * configured MTU is not exceeded, normalization of MRU on all bridged
401 * interfaces is needed.
402 */
403 bool mtu_enforcement_ingress;
404
405 /* Drivers that benefit from having an ID associated with each
406 * offloaded LAG should set this to the maximum number of
407 * supported IDs. DSA will then maintain a mapping of _at
408 * least_ these many IDs, accessible to drivers via
409 * dsa_lag_id().
410 */
411 unsigned int num_lag_ids;
412
413 /* Drivers that support bridge forwarding offload should set this to
414 * the maximum number of bridges spanning the same switch tree that can
415 * be offloaded.
416 */
417 unsigned int num_fwd_offloading_bridges;
418
419 size_t num_ports;
420 };
421
422 static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
423 {
424 struct dsa_switch_tree *dst = ds->dst;
425 struct dsa_port *dp;
426
427 list_for_each_entry(dp, &dst->ports, list)
428 if (dp->ds == ds && dp->index == p)
429 return dp;
430
431 return NULL;
432 }
433
434 static inline bool dsa_port_is_dsa(struct dsa_port *port)
435 {
436 return port->type == DSA_PORT_TYPE_DSA;
437 }
438
439 static inline bool dsa_port_is_cpu(struct dsa_port *port)
440 {
441 return port->type == DSA_PORT_TYPE_CPU;
442 }
443
444 static inline bool dsa_port_is_user(struct dsa_port *dp)
445 {
446 return dp->type == DSA_PORT_TYPE_USER;
447 }
448
449 static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
450 {
451 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
452 }
453
454 static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
455 {
456 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
457 }
458
459 static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
460 {
461 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
462 }
463
464 static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
465 {
466 return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
467 }
468
469 static inline u32 dsa_user_ports(struct dsa_switch *ds)
470 {
471 u32 mask = 0;
472 int p;
473
474 for (p = 0; p < ds->num_ports; p++)
475 if (dsa_is_user_port(ds, p))
476 mask |= BIT(p);
477
478 return mask;
479 }
480
481 /* Return the local port used to reach an arbitrary switch device */
482 static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
483 {
484 struct dsa_switch_tree *dst = ds->dst;
485 struct dsa_link *dl;
486
487 list_for_each_entry(dl, &dst->rtable, list)
488 if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
489 return dl->dp->index;
490
491 return ds->num_ports;
492 }
493
494 /* Return the local port used to reach an arbitrary switch port */
495 static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
496 int port)
497 {
498 if (device == ds->index)
499 return port;
500 else
501 return dsa_routing_port(ds, device);
502 }
503
504 /* Return the local port used to reach the dedicated CPU port */
505 static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
506 {
507 const struct dsa_port *dp = dsa_to_port(ds, port);
508 const struct dsa_port *cpu_dp = dp->cpu_dp;
509
510 if (!cpu_dp)
511 return port;
512
513 return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
514 }
515
516 /* Return true if this is the local port used to reach the CPU port */
517 static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
518 {
519 if (dsa_is_unused_port(ds, port))
520 return false;
521
522 return port == dsa_upstream_port(ds, port);
523 }
524
525 /* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
526 * that the routing port from @downstream_ds to @upstream_ds is also the port
527 * which @downstream_ds uses to reach its dedicated CPU.
528 */
529 static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
530 struct dsa_switch *downstream_ds)
531 {
532 int routing_port;
533
534 if (upstream_ds == downstream_ds)
535 return true;
536
537 routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
538
539 return dsa_is_upstream_port(downstream_ds, routing_port);
540 }
541
542 static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
543 {
544 const struct dsa_switch *ds = dp->ds;
545
546 if (ds->vlan_filtering_is_global)
547 return ds->vlan_filtering;
548 else
549 return dp->vlan_filtering;
550 }
551
552 static inline
553 struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
554 {
555 if (!dp->bridge_dev)
556 return NULL;
557
558 if (dp->lag_dev)
559 return dp->lag_dev;
560 else if (dp->hsr_dev)
561 return dp->hsr_dev;
562
563 return dp->slave;
564 }
565
566 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
567 bool is_static, void *data);
568 struct dsa_switch_ops {
569 /*
570 * Tagging protocol helpers called for the CPU ports and DSA links.
571 * @get_tag_protocol retrieves the initial tagging protocol and is
572 * mandatory. Switches which can operate using multiple tagging
573 * protocols should implement @change_tag_protocol and report in
574 * @get_tag_protocol the tagger in current use.
575 */
576 enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
577 int port,
578 enum dsa_tag_protocol mprot);
579 int (*change_tag_protocol)(struct dsa_switch *ds, int port,
580 enum dsa_tag_protocol proto);
581
582 int (*setup)(struct dsa_switch *ds);
583 void (*teardown)(struct dsa_switch *ds);
584 u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
585
586 /*
587 * Access to the switch's PHY registers.
588 */
589 int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
590 int (*phy_write)(struct dsa_switch *ds, int port,
591 int regnum, u16 val);
592
593 /*
594 * Link state adjustment (called from libphy)
595 */
596 void (*adjust_link)(struct dsa_switch *ds, int port,
597 struct phy_device *phydev);
598 void (*fixed_link_update)(struct dsa_switch *ds, int port,
599 struct fixed_phy_status *st);
600
601 /*
602 * PHYLINK integration
603 */
604 void (*phylink_validate)(struct dsa_switch *ds, int port,
605 unsigned long *supported,
606 struct phylink_link_state *state);
607 int (*phylink_mac_link_state)(struct dsa_switch *ds, int port,
608 struct phylink_link_state *state);
609 void (*phylink_mac_config)(struct dsa_switch *ds, int port,
610 unsigned int mode,
611 const struct phylink_link_state *state);
612 void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
613 void (*phylink_mac_link_down)(struct dsa_switch *ds, int port,
614 unsigned int mode,
615 phy_interface_t interface);
616 void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
617 unsigned int mode,
618 phy_interface_t interface,
619 struct phy_device *phydev,
620 int speed, int duplex,
621 bool tx_pause, bool rx_pause);
622 void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
623 struct phylink_link_state *state);
624 /*
625 * Port statistics counters.
626 */
627 void (*get_strings)(struct dsa_switch *ds, int port,
628 u32 stringset, uint8_t *data);
629 void (*get_ethtool_stats)(struct dsa_switch *ds,
630 int port, uint64_t *data);
631 int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
632 void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
633 int port, uint64_t *data);
634 void (*get_stats64)(struct dsa_switch *ds, int port,
635 struct rtnl_link_stats64 *s);
636 void (*self_test)(struct dsa_switch *ds, int port,
637 struct ethtool_test *etest, u64 *data);
638
639 /*
640 * ethtool Wake-on-LAN
641 */
642 void (*get_wol)(struct dsa_switch *ds, int port,
643 struct ethtool_wolinfo *w);
644 int (*set_wol)(struct dsa_switch *ds, int port,
645 struct ethtool_wolinfo *w);
646
647 /*
648 * ethtool timestamp info
649 */
650 int (*get_ts_info)(struct dsa_switch *ds, int port,
651 struct ethtool_ts_info *ts);
652
653 /*
654 * Suspend and resume
655 */
656 int (*suspend)(struct dsa_switch *ds);
657 int (*resume)(struct dsa_switch *ds);
658
659 /*
660 * Port enable/disable
661 */
662 int (*port_enable)(struct dsa_switch *ds, int port,
663 struct phy_device *phy);
664 void (*port_disable)(struct dsa_switch *ds, int port);
665
666 /*
667 * Port's MAC EEE settings
668 */
669 int (*set_mac_eee)(struct dsa_switch *ds, int port,
670 struct ethtool_eee *e);
671 int (*get_mac_eee)(struct dsa_switch *ds, int port,
672 struct ethtool_eee *e);
673
674 /* EEPROM access */
675 int (*get_eeprom_len)(struct dsa_switch *ds);
676 int (*get_eeprom)(struct dsa_switch *ds,
677 struct ethtool_eeprom *eeprom, u8 *data);
678 int (*set_eeprom)(struct dsa_switch *ds,
679 struct ethtool_eeprom *eeprom, u8 *data);
680
681 /*
682 * Register access.
683 */
684 int (*get_regs_len)(struct dsa_switch *ds, int port);
685 void (*get_regs)(struct dsa_switch *ds, int port,
686 struct ethtool_regs *regs, void *p);
687
688 /*
689 * Upper device tracking.
690 */
691 int (*port_prechangeupper)(struct dsa_switch *ds, int port,
692 struct netdev_notifier_changeupper_info *info);
693
694 /*
695 * Bridge integration
696 */
697 int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
698 int (*port_bridge_join)(struct dsa_switch *ds, int port,
699 struct net_device *bridge);
700 void (*port_bridge_leave)(struct dsa_switch *ds, int port,
701 struct net_device *bridge);
702 /* Called right after .port_bridge_join() */
703 int (*port_bridge_tx_fwd_offload)(struct dsa_switch *ds, int port,
704 struct net_device *bridge,
705 int bridge_num);
706 /* Called right before .port_bridge_leave() */
707 void (*port_bridge_tx_fwd_unoffload)(struct dsa_switch *ds, int port,
708 struct net_device *bridge,
709 int bridge_num);
710 void (*port_stp_state_set)(struct dsa_switch *ds, int port,
711 u8 state);
712 void (*port_fast_age)(struct dsa_switch *ds, int port);
713 int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
714 struct switchdev_brport_flags flags,
715 struct netlink_ext_ack *extack);
716 int (*port_bridge_flags)(struct dsa_switch *ds, int port,
717 struct switchdev_brport_flags flags,
718 struct netlink_ext_ack *extack);
719
720 /*
721 * VLAN support
722 */
723 int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
724 bool vlan_filtering,
725 struct netlink_ext_ack *extack);
726 int (*port_vlan_add)(struct dsa_switch *ds, int port,
727 const struct switchdev_obj_port_vlan *vlan,
728 struct netlink_ext_ack *extack);
729 int (*port_vlan_del)(struct dsa_switch *ds, int port,
730 const struct switchdev_obj_port_vlan *vlan);
731 /*
732 * Forwarding database
733 */
734 int (*port_fdb_add)(struct dsa_switch *ds, int port,
735 const unsigned char *addr, u16 vid);
736 int (*port_fdb_del)(struct dsa_switch *ds, int port,
737 const unsigned char *addr, u16 vid);
738 int (*port_fdb_dump)(struct dsa_switch *ds, int port,
739 dsa_fdb_dump_cb_t *cb, void *data);
740
741 /*
742 * Multicast database
743 */
744 int (*port_mdb_add)(struct dsa_switch *ds, int port,
745 const struct switchdev_obj_port_mdb *mdb);
746 int (*port_mdb_del)(struct dsa_switch *ds, int port,
747 const struct switchdev_obj_port_mdb *mdb);
748 /*
749 * RXNFC
750 */
751 int (*get_rxnfc)(struct dsa_switch *ds, int port,
752 struct ethtool_rxnfc *nfc, u32 *rule_locs);
753 int (*set_rxnfc)(struct dsa_switch *ds, int port,
754 struct ethtool_rxnfc *nfc);
755
756 /*
757 * TC integration
758 */
759 int (*cls_flower_add)(struct dsa_switch *ds, int port,
760 struct flow_cls_offload *cls, bool ingress);
761 int (*cls_flower_del)(struct dsa_switch *ds, int port,
762 struct flow_cls_offload *cls, bool ingress);
763 int (*cls_flower_stats)(struct dsa_switch *ds, int port,
764 struct flow_cls_offload *cls, bool ingress);
765 int (*port_mirror_add)(struct dsa_switch *ds, int port,
766 struct dsa_mall_mirror_tc_entry *mirror,
767 bool ingress);
768 void (*port_mirror_del)(struct dsa_switch *ds, int port,
769 struct dsa_mall_mirror_tc_entry *mirror);
770 int (*port_policer_add)(struct dsa_switch *ds, int port,
771 struct dsa_mall_policer_tc_entry *policer);
772 void (*port_policer_del)(struct dsa_switch *ds, int port);
773 int (*port_setup_tc)(struct dsa_switch *ds, int port,
774 enum tc_setup_type type, void *type_data);
775
776 /*
777 * Cross-chip operations
778 */
779 int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
780 int sw_index, int port,
781 struct net_device *br);
782 void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
783 int sw_index, int port,
784 struct net_device *br);
785 int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
786 int port);
787 int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
788 int port, struct net_device *lag,
789 struct netdev_lag_upper_info *info);
790 int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
791 int port, struct net_device *lag);
792
793 /*
794 * PTP functionality
795 */
796 int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
797 struct ifreq *ifr);
798 int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
799 struct ifreq *ifr);
800 void (*port_txtstamp)(struct dsa_switch *ds, int port,
801 struct sk_buff *skb);
802 bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
803 struct sk_buff *skb, unsigned int type);
804
805 /* Devlink parameters, etc */
806 int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
807 struct devlink_param_gset_ctx *ctx);
808 int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
809 struct devlink_param_gset_ctx *ctx);
810 int (*devlink_info_get)(struct dsa_switch *ds,
811 struct devlink_info_req *req,
812 struct netlink_ext_ack *extack);
813 int (*devlink_sb_pool_get)(struct dsa_switch *ds,
814 unsigned int sb_index, u16 pool_index,
815 struct devlink_sb_pool_info *pool_info);
816 int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
817 u16 pool_index, u32 size,
818 enum devlink_sb_threshold_type threshold_type,
819 struct netlink_ext_ack *extack);
820 int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
821 unsigned int sb_index, u16 pool_index,
822 u32 *p_threshold);
823 int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
824 unsigned int sb_index, u16 pool_index,
825 u32 threshold,
826 struct netlink_ext_ack *extack);
827 int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
828 unsigned int sb_index, u16 tc_index,
829 enum devlink_sb_pool_type pool_type,
830 u16 *p_pool_index, u32 *p_threshold);
831 int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
832 unsigned int sb_index, u16 tc_index,
833 enum devlink_sb_pool_type pool_type,
834 u16 pool_index, u32 threshold,
835 struct netlink_ext_ack *extack);
836 int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
837 unsigned int sb_index);
838 int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
839 unsigned int sb_index);
840 int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
841 unsigned int sb_index, u16 pool_index,
842 u32 *p_cur, u32 *p_max);
843 int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
844 unsigned int sb_index, u16 tc_index,
845 enum devlink_sb_pool_type pool_type,
846 u32 *p_cur, u32 *p_max);
847
848 /*
849 * MTU change functionality. Switches can also adjust their MRU through
850 * this method. By MTU, one understands the SDU (L2 payload) length.
851 * If the switch needs to account for the DSA tag on the CPU port, this
852 * method needs to do so privately.
853 */
854 int (*port_change_mtu)(struct dsa_switch *ds, int port,
855 int new_mtu);
856 int (*port_max_mtu)(struct dsa_switch *ds, int port);
857
858 /*
859 * LAG integration
860 */
861 int (*port_lag_change)(struct dsa_switch *ds, int port);
862 int (*port_lag_join)(struct dsa_switch *ds, int port,
863 struct net_device *lag,
864 struct netdev_lag_upper_info *info);
865 int (*port_lag_leave)(struct dsa_switch *ds, int port,
866 struct net_device *lag);
867
868 /*
869 * HSR integration
870 */
871 int (*port_hsr_join)(struct dsa_switch *ds, int port,
872 struct net_device *hsr);
873 int (*port_hsr_leave)(struct dsa_switch *ds, int port,
874 struct net_device *hsr);
875
876 /*
877 * MRP integration
878 */
879 int (*port_mrp_add)(struct dsa_switch *ds, int port,
880 const struct switchdev_obj_mrp *mrp);
881 int (*port_mrp_del)(struct dsa_switch *ds, int port,
882 const struct switchdev_obj_mrp *mrp);
883 int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
884 const struct switchdev_obj_ring_role_mrp *mrp);
885 int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
886 const struct switchdev_obj_ring_role_mrp *mrp);
887
888 /*
889 * tag_8021q operations
890 */
891 int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
892 u16 flags);
893 int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
894 };
895
896 #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
897 DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
898 dsa_devlink_param_get, dsa_devlink_param_set, NULL)
899
900 int dsa_devlink_param_get(struct devlink *dl, u32 id,
901 struct devlink_param_gset_ctx *ctx);
902 int dsa_devlink_param_set(struct devlink *dl, u32 id,
903 struct devlink_param_gset_ctx *ctx);
904 int dsa_devlink_params_register(struct dsa_switch *ds,
905 const struct devlink_param *params,
906 size_t params_count);
907 void dsa_devlink_params_unregister(struct dsa_switch *ds,
908 const struct devlink_param *params,
909 size_t params_count);
910 int dsa_devlink_resource_register(struct dsa_switch *ds,
911 const char *resource_name,
912 u64 resource_size,
913 u64 resource_id,
914 u64 parent_resource_id,
915 const struct devlink_resource_size_params *size_params);
916
917 void dsa_devlink_resources_unregister(struct dsa_switch *ds);
918
919 void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
920 u64 resource_id,
921 devlink_resource_occ_get_t *occ_get,
922 void *occ_get_priv);
923 void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
924 u64 resource_id);
925 struct devlink_region *
926 dsa_devlink_region_create(struct dsa_switch *ds,
927 const struct devlink_region_ops *ops,
928 u32 region_max_snapshots, u64 region_size);
929 struct devlink_region *
930 dsa_devlink_port_region_create(struct dsa_switch *ds,
931 int port,
932 const struct devlink_port_region_ops *ops,
933 u32 region_max_snapshots, u64 region_size);
934 void dsa_devlink_region_destroy(struct devlink_region *region);
935
936 struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
937
938 struct dsa_devlink_priv {
939 struct dsa_switch *ds;
940 };
941
942 static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
943 {
944 struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
945
946 return dl_priv->ds;
947 }
948
949 static inline
950 struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
951 {
952 struct devlink *dl = port->devlink;
953 struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
954
955 return dl_priv->ds;
956 }
957
958 static inline int dsa_devlink_port_to_port(struct devlink_port *port)
959 {
960 return port->index;
961 }
962
963 struct dsa_switch_driver {
964 struct list_head list;
965 const struct dsa_switch_ops *ops;
966 };
967
968 struct net_device *dsa_dev_to_net_device(struct device *dev);
969
970 /* Keep inline for faster access in hot path */
971 static inline bool netdev_uses_dsa(const struct net_device *dev)
972 {
973 #if IS_ENABLED(CONFIG_NET_DSA)
974 return dev->dsa_ptr && dev->dsa_ptr->rcv;
975 #endif
976 return false;
977 }
978
979 /* All DSA tags that push the EtherType to the right (basically all except tail
980 * tags, which don't break dissection) can be treated the same from the
981 * perspective of the flow dissector.
982 *
983 * We need to return:
984 * - offset: the (B - A) difference between:
985 * A. the position of the real EtherType and
986 * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
987 * after the normal EtherType was supposed to be)
988 * The offset in bytes is exactly equal to the tagger overhead (and half of
989 * that, in __be16 shorts).
990 *
991 * - proto: the value of the real EtherType.
992 */
993 static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
994 __be16 *proto, int *offset)
995 {
996 #if IS_ENABLED(CONFIG_NET_DSA)
997 const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
998 int tag_len = ops->needed_headroom;
999
1000 *offset = tag_len;
1001 *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
1002 #endif
1003 }
1004
1005 #if IS_ENABLED(CONFIG_NET_DSA)
1006 static inline int __dsa_netdevice_ops_check(struct net_device *dev)
1007 {
1008 int err = -EOPNOTSUPP;
1009
1010 if (!dev->dsa_ptr)
1011 return err;
1012
1013 if (!dev->dsa_ptr->netdev_ops)
1014 return err;
1015
1016 return 0;
1017 }
1018
1019 static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
1020 int cmd)
1021 {
1022 const struct dsa_netdevice_ops *ops;
1023 int err;
1024
1025 err = __dsa_netdevice_ops_check(dev);
1026 if (err)
1027 return err;
1028
1029 ops = dev->dsa_ptr->netdev_ops;
1030
1031 return ops->ndo_eth_ioctl(dev, ifr, cmd);
1032 }
1033 #else
1034 static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
1035 int cmd)
1036 {
1037 return -EOPNOTSUPP;
1038 }
1039 #endif
1040
1041 void dsa_unregister_switch(struct dsa_switch *ds);
1042 int dsa_register_switch(struct dsa_switch *ds);
1043 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
1044 #ifdef CONFIG_PM_SLEEP
1045 int dsa_switch_suspend(struct dsa_switch *ds);
1046 int dsa_switch_resume(struct dsa_switch *ds);
1047 #else
1048 static inline int dsa_switch_suspend(struct dsa_switch *ds)
1049 {
1050 return 0;
1051 }
1052 static inline int dsa_switch_resume(struct dsa_switch *ds)
1053 {
1054 return 0;
1055 }
1056 #endif /* CONFIG_PM_SLEEP */
1057
1058 #if IS_ENABLED(CONFIG_NET_DSA)
1059 bool dsa_slave_dev_check(const struct net_device *dev);
1060 #else
1061 static inline bool dsa_slave_dev_check(const struct net_device *dev)
1062 {
1063 return false;
1064 }
1065 #endif
1066
1067 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
1068 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
1069 int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
1070 int dsa_port_get_phy_sset_count(struct dsa_port *dp);
1071 void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
1072
1073 struct dsa_tag_driver {
1074 const struct dsa_device_ops *ops;
1075 struct list_head list;
1076 struct module *owner;
1077 };
1078
1079 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
1080 unsigned int count,
1081 struct module *owner);
1082 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
1083 unsigned int count);
1084
1085 #define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
1086 static int __init dsa_tag_driver_module_init(void) \
1087 { \
1088 dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
1089 THIS_MODULE); \
1090 return 0; \
1091 } \
1092 module_init(dsa_tag_driver_module_init); \
1093 \
1094 static void __exit dsa_tag_driver_module_exit(void) \
1095 { \
1096 dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
1097 } \
1098 module_exit(dsa_tag_driver_module_exit)
1099
1100 /**
1101 * module_dsa_tag_drivers() - Helper macro for registering DSA tag
1102 * drivers
1103 * @__ops_array: Array of tag driver strucutres
1104 *
1105 * Helper macro for DSA tag drivers which do not do anything special
1106 * in module init/exit. Each module may only use this macro once, and
1107 * calling it replaces module_init() and module_exit().
1108 */
1109 #define module_dsa_tag_drivers(__ops_array) \
1110 dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
1111
1112 #define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
1113
1114 /* Create a static structure we can build a linked list of dsa_tag
1115 * drivers
1116 */
1117 #define DSA_TAG_DRIVER(__ops) \
1118 static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
1119 .ops = &__ops, \
1120 }
1121
1122 /**
1123 * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
1124 * driver
1125 * @__ops: Single tag driver structures
1126 *
1127 * Helper macro for DSA tag drivers which do not do anything special
1128 * in module init/exit. Each module may only use this macro once, and
1129 * calling it replaces module_init() and module_exit().
1130 */
1131 #define module_dsa_tag_driver(__ops) \
1132 DSA_TAG_DRIVER(__ops); \
1133 \
1134 static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
1135 &DSA_TAG_DRIVER_NAME(__ops) \
1136 }; \
1137 module_dsa_tag_drivers(dsa_tag_driver_array)
1138 #endif
1139