]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/dsa/dsa_priv.h
net: dsa: don't disable multicast flooding to the CPU even without an IGMP querier
[mirror_ubuntu-jammy-kernel.git] / net / dsa / dsa_priv.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * net/dsa/dsa_priv.h - Hardware switch handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 */
6
7 #ifndef __DSA_PRIV_H
8 #define __DSA_PRIV_H
9
10 #include <linux/if_bridge.h>
11 #include <linux/phy.h>
12 #include <linux/netdevice.h>
13 #include <linux/netpoll.h>
14 #include <net/dsa.h>
15 #include <net/gro_cells.h>
16
17 #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
18
19 enum {
20 DSA_NOTIFIER_AGEING_TIME,
21 DSA_NOTIFIER_BRIDGE_JOIN,
22 DSA_NOTIFIER_BRIDGE_LEAVE,
23 DSA_NOTIFIER_FDB_ADD,
24 DSA_NOTIFIER_FDB_DEL,
25 DSA_NOTIFIER_HOST_FDB_ADD,
26 DSA_NOTIFIER_HOST_FDB_DEL,
27 DSA_NOTIFIER_HSR_JOIN,
28 DSA_NOTIFIER_HSR_LEAVE,
29 DSA_NOTIFIER_LAG_CHANGE,
30 DSA_NOTIFIER_LAG_JOIN,
31 DSA_NOTIFIER_LAG_LEAVE,
32 DSA_NOTIFIER_MDB_ADD,
33 DSA_NOTIFIER_MDB_DEL,
34 DSA_NOTIFIER_HOST_MDB_ADD,
35 DSA_NOTIFIER_HOST_MDB_DEL,
36 DSA_NOTIFIER_VLAN_ADD,
37 DSA_NOTIFIER_VLAN_DEL,
38 DSA_NOTIFIER_MTU,
39 DSA_NOTIFIER_TAG_PROTO,
40 DSA_NOTIFIER_MRP_ADD,
41 DSA_NOTIFIER_MRP_DEL,
42 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
43 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
44 DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
45 DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
46 };
47
48 /* DSA_NOTIFIER_AGEING_TIME */
49 struct dsa_notifier_ageing_time_info {
50 unsigned int ageing_time;
51 };
52
53 /* DSA_NOTIFIER_BRIDGE_* */
54 struct dsa_notifier_bridge_info {
55 struct net_device *br;
56 int tree_index;
57 int sw_index;
58 int port;
59 };
60
61 /* DSA_NOTIFIER_FDB_* */
62 struct dsa_notifier_fdb_info {
63 int sw_index;
64 int port;
65 const unsigned char *addr;
66 u16 vid;
67 };
68
69 /* DSA_NOTIFIER_MDB_* */
70 struct dsa_notifier_mdb_info {
71 const struct switchdev_obj_port_mdb *mdb;
72 int sw_index;
73 int port;
74 };
75
76 /* DSA_NOTIFIER_LAG_* */
77 struct dsa_notifier_lag_info {
78 struct net_device *lag;
79 int sw_index;
80 int port;
81
82 struct netdev_lag_upper_info *info;
83 };
84
85 /* DSA_NOTIFIER_VLAN_* */
86 struct dsa_notifier_vlan_info {
87 const struct switchdev_obj_port_vlan *vlan;
88 int sw_index;
89 int port;
90 struct netlink_ext_ack *extack;
91 };
92
93 /* DSA_NOTIFIER_MTU */
94 struct dsa_notifier_mtu_info {
95 bool targeted_match;
96 int sw_index;
97 int port;
98 int mtu;
99 };
100
101 /* DSA_NOTIFIER_TAG_PROTO_* */
102 struct dsa_notifier_tag_proto_info {
103 const struct dsa_device_ops *tag_ops;
104 };
105
106 /* DSA_NOTIFIER_MRP_* */
107 struct dsa_notifier_mrp_info {
108 const struct switchdev_obj_mrp *mrp;
109 int sw_index;
110 int port;
111 };
112
113 /* DSA_NOTIFIER_MRP_* */
114 struct dsa_notifier_mrp_ring_role_info {
115 const struct switchdev_obj_ring_role_mrp *mrp;
116 int sw_index;
117 int port;
118 };
119
120 /* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
121 struct dsa_notifier_tag_8021q_vlan_info {
122 int tree_index;
123 int sw_index;
124 int port;
125 u16 vid;
126 };
127
128 struct dsa_switchdev_event_work {
129 struct dsa_switch *ds;
130 int port;
131 struct net_device *dev;
132 struct work_struct work;
133 unsigned long event;
134 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
135 * SWITCHDEV_FDB_DEL_TO_DEVICE
136 */
137 unsigned char addr[ETH_ALEN];
138 u16 vid;
139 bool host_addr;
140 };
141
142 /* DSA_NOTIFIER_HSR_* */
143 struct dsa_notifier_hsr_info {
144 struct net_device *hsr;
145 int sw_index;
146 int port;
147 };
148
149 struct dsa_slave_priv {
150 /* Copy of CPU port xmit for faster access in slave transmit hot path */
151 struct sk_buff * (*xmit)(struct sk_buff *skb,
152 struct net_device *dev);
153
154 struct gro_cells gcells;
155
156 /* DSA port data, such as switch, port index, etc. */
157 struct dsa_port *dp;
158
159 #ifdef CONFIG_NET_POLL_CONTROLLER
160 struct netpoll *netpoll;
161 #endif
162
163 /* TC context */
164 struct list_head mall_tc_list;
165 };
166
167 /* dsa.c */
168 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
169 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
170 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
171
172 bool dsa_schedule_work(struct work_struct *work);
173 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
174
175 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
176 {
177 return ops->needed_headroom + ops->needed_tailroom;
178 }
179
180 /* master.c */
181 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
182 void dsa_master_teardown(struct net_device *dev);
183
184 static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
185 int device, int port)
186 {
187 struct dsa_port *cpu_dp = dev->dsa_ptr;
188 struct dsa_switch_tree *dst = cpu_dp->dst;
189 struct dsa_port *dp;
190
191 list_for_each_entry(dp, &dst->ports, list)
192 if (dp->ds->index == device && dp->index == port &&
193 dp->type == DSA_PORT_TYPE_USER)
194 return dp->slave;
195
196 return NULL;
197 }
198
199 /* port.c */
200 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
201 const struct dsa_device_ops *tag_ops);
202 int dsa_port_set_state(struct dsa_port *dp, u8 state);
203 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
204 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
205 void dsa_port_disable_rt(struct dsa_port *dp);
206 void dsa_port_disable(struct dsa_port *dp);
207 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
208 struct netlink_ext_ack *extack);
209 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
210 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
211 int dsa_port_lag_change(struct dsa_port *dp,
212 struct netdev_lag_lower_state_info *linfo);
213 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
214 struct netdev_lag_upper_info *uinfo,
215 struct netlink_ext_ack *extack);
216 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
217 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
218 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
219 struct netlink_ext_ack *extack);
220 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
221 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
222 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
223 bool targeted_match);
224 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
225 u16 vid);
226 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
227 u16 vid);
228 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
229 u16 vid);
230 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
231 u16 vid);
232 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
233 int dsa_port_mdb_add(const struct dsa_port *dp,
234 const struct switchdev_obj_port_mdb *mdb);
235 int dsa_port_mdb_del(const struct dsa_port *dp,
236 const struct switchdev_obj_port_mdb *mdb);
237 int dsa_port_host_mdb_add(const struct dsa_port *dp,
238 const struct switchdev_obj_port_mdb *mdb);
239 int dsa_port_host_mdb_del(const struct dsa_port *dp,
240 const struct switchdev_obj_port_mdb *mdb);
241 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
242 struct switchdev_brport_flags flags,
243 struct netlink_ext_ack *extack);
244 int dsa_port_bridge_flags(const struct dsa_port *dp,
245 struct switchdev_brport_flags flags,
246 struct netlink_ext_ack *extack);
247 int dsa_port_vlan_add(struct dsa_port *dp,
248 const struct switchdev_obj_port_vlan *vlan,
249 struct netlink_ext_ack *extack);
250 int dsa_port_vlan_del(struct dsa_port *dp,
251 const struct switchdev_obj_port_vlan *vlan);
252 int dsa_port_mrp_add(const struct dsa_port *dp,
253 const struct switchdev_obj_mrp *mrp);
254 int dsa_port_mrp_del(const struct dsa_port *dp,
255 const struct switchdev_obj_mrp *mrp);
256 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
257 const struct switchdev_obj_ring_role_mrp *mrp);
258 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
259 const struct switchdev_obj_ring_role_mrp *mrp);
260 int dsa_port_link_register_of(struct dsa_port *dp);
261 void dsa_port_link_unregister_of(struct dsa_port *dp);
262 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
263 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
264 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid);
265 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid);
266 extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
267
268 static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
269 const struct net_device *dev)
270 {
271 return dsa_port_to_bridge_port(dp) == dev;
272 }
273
274 static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
275 const struct net_device *bridge_dev)
276 {
277 /* DSA ports connected to a bridge, and event was emitted
278 * for the bridge.
279 */
280 return dp->bridge_dev == bridge_dev;
281 }
282
283 /* Returns true if any port of this tree offloads the given net_device */
284 static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
285 const struct net_device *dev)
286 {
287 struct dsa_port *dp;
288
289 list_for_each_entry(dp, &dst->ports, list)
290 if (dsa_port_offloads_bridge_port(dp, dev))
291 return true;
292
293 return false;
294 }
295
296 /* Returns true if any port of this tree offloads the given bridge */
297 static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst,
298 const struct net_device *bridge_dev)
299 {
300 struct dsa_port *dp;
301
302 list_for_each_entry(dp, &dst->ports, list)
303 if (dsa_port_offloads_bridge(dp, bridge_dev))
304 return true;
305
306 return false;
307 }
308
309 /* slave.c */
310 extern const struct dsa_device_ops notag_netdev_ops;
311 extern struct notifier_block dsa_slave_switchdev_notifier;
312 extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
313
314 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
315 int dsa_slave_create(struct dsa_port *dp);
316 void dsa_slave_destroy(struct net_device *slave_dev);
317 int dsa_slave_suspend(struct net_device *slave_dev);
318 int dsa_slave_resume(struct net_device *slave_dev);
319 int dsa_slave_register_notifier(void);
320 void dsa_slave_unregister_notifier(void);
321 void dsa_slave_setup_tagger(struct net_device *slave);
322 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
323
324 static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
325 {
326 struct dsa_slave_priv *p = netdev_priv(dev);
327
328 return p->dp;
329 }
330
331 static inline struct net_device *
332 dsa_slave_to_master(const struct net_device *dev)
333 {
334 struct dsa_port *dp = dsa_slave_to_port(dev);
335
336 return dp->cpu_dp->master;
337 }
338
339 /* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
340 * frames as untagged, since the bridge will not untag them.
341 */
342 static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
343 {
344 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
345 struct net_device *br = dp->bridge_dev;
346 struct net_device *dev = skb->dev;
347 struct net_device *upper_dev;
348 u16 vid, pvid, proto;
349 int err;
350
351 if (!br || br_vlan_enabled(br))
352 return skb;
353
354 err = br_vlan_get_proto(br, &proto);
355 if (err)
356 return skb;
357
358 /* Move VLAN tag from data to hwaccel */
359 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
360 skb = skb_vlan_untag(skb);
361 if (!skb)
362 return NULL;
363 }
364
365 if (!skb_vlan_tag_present(skb))
366 return skb;
367
368 vid = skb_vlan_tag_get_id(skb);
369
370 /* We already run under an RCU read-side critical section since
371 * we are called from netif_receive_skb_list_internal().
372 */
373 err = br_vlan_get_pvid_rcu(dev, &pvid);
374 if (err)
375 return skb;
376
377 if (vid != pvid)
378 return skb;
379
380 /* The sad part about attempting to untag from DSA is that we
381 * don't know, unless we check, if the skb will end up in
382 * the bridge's data path - br_allowed_ingress() - or not.
383 * For example, there might be an 8021q upper for the
384 * default_pvid of the bridge, which will steal VLAN-tagged traffic
385 * from the bridge's data path. This is a configuration that DSA
386 * supports because vlan_filtering is 0. In that case, we should
387 * definitely keep the tag, to make sure it keeps working.
388 */
389 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
390 if (upper_dev)
391 return skb;
392
393 __vlan_hwaccel_clear_tag(skb);
394
395 return skb;
396 }
397
398 /* For switches without hardware support for DSA tagging to be able
399 * to support termination through the bridge.
400 */
401 static inline struct net_device *
402 dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
403 {
404 struct dsa_port *cpu_dp = master->dsa_ptr;
405 struct dsa_switch_tree *dst = cpu_dp->dst;
406 struct bridge_vlan_info vinfo;
407 struct net_device *slave;
408 struct dsa_port *dp;
409 int err;
410
411 list_for_each_entry(dp, &dst->ports, list) {
412 if (dp->type != DSA_PORT_TYPE_USER)
413 continue;
414
415 if (!dp->bridge_dev)
416 continue;
417
418 if (dp->stp_state != BR_STATE_LEARNING &&
419 dp->stp_state != BR_STATE_FORWARDING)
420 continue;
421
422 /* Since the bridge might learn this packet, keep the CPU port
423 * affinity with the port that will be used for the reply on
424 * xmit.
425 */
426 if (dp->cpu_dp != cpu_dp)
427 continue;
428
429 slave = dp->slave;
430
431 err = br_vlan_get_info_rcu(slave, vid, &vinfo);
432 if (err)
433 continue;
434
435 return slave;
436 }
437
438 return NULL;
439 }
440
441 /* If the ingress port offloads the bridge, we mark the frame as autonomously
442 * forwarded by hardware, so the software bridge doesn't forward in twice, back
443 * to us, because we already did. However, if we're in fallback mode and we do
444 * software bridging, we are not offloading it, therefore the dp->bridge_dev
445 * pointer is not populated, and flooding needs to be done by software (we are
446 * effectively operating in standalone ports mode).
447 */
448 static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
449 {
450 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
451
452 skb->offload_fwd_mark = !!(dp->bridge_dev);
453 }
454
455 /* switch.c */
456 int dsa_switch_register_notifier(struct dsa_switch *ds);
457 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
458
459 /* dsa2.c */
460 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
461 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
462 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
463 int dsa_broadcast(unsigned long e, void *v);
464 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
465 struct net_device *master,
466 const struct dsa_device_ops *tag_ops,
467 const struct dsa_device_ops *old_tag_ops);
468
469 /* tag_8021q.c */
470 int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
471 struct dsa_notifier_bridge_info *info);
472 int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
473 struct dsa_notifier_bridge_info *info);
474 int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
475 struct dsa_notifier_tag_8021q_vlan_info *info);
476 int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
477 struct dsa_notifier_tag_8021q_vlan_info *info);
478
479 extern struct list_head dsa_tree_list;
480
481 #endif