]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_span.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/if_bridge.h>
5 #include <linux/list.h>
6 #include <net/arp.h>
7 #include <net/gre.h>
8 #include <net/lag.h>
9 #include <net/ndisc.h>
10 #include <net/ip6_tunnel.h>
11
12 #include "spectrum.h"
13 #include "spectrum_ipip.h"
14 #include "spectrum_span.h"
15 #include "spectrum_switchdev.h"
16
17 static u64 mlxsw_sp_span_occ_get(void *priv)
18 {
19 const struct mlxsw_sp *mlxsw_sp = priv;
20 u64 occ = 0;
21 int i;
22
23 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
24 if (mlxsw_sp->span.entries[i].ref_count)
25 occ++;
26 }
27
28 return occ;
29 }
30
31 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
32 {
33 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
34 int i;
35
36 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
37 return -EIO;
38
39 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
40 MAX_SPAN);
41 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
42 sizeof(struct mlxsw_sp_span_entry),
43 GFP_KERNEL);
44 if (!mlxsw_sp->span.entries)
45 return -ENOMEM;
46
47 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
48 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
49
50 INIT_LIST_HEAD(&curr->bound_ports_list);
51 curr->id = i;
52 }
53
54 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
55 mlxsw_sp_span_occ_get, mlxsw_sp);
56
57 return 0;
58 }
59
60 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
61 {
62 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
63 int i;
64
65 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
66
67 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
68 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
69
70 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
71 }
72 kfree(mlxsw_sp->span.entries);
73 }
74
75 static int
76 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
77 struct mlxsw_sp_span_parms *sparmsp)
78 {
79 sparmsp->dest_port = netdev_priv(to_dev);
80 return 0;
81 }
82
83 static int
84 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
85 struct mlxsw_sp_span_parms sparms)
86 {
87 struct mlxsw_sp_port *dest_port = sparms.dest_port;
88 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
89 u8 local_port = dest_port->local_port;
90 char mpat_pl[MLXSW_REG_MPAT_LEN];
91 int pa_id = span_entry->id;
92
93 /* Create a new port analayzer entry for local_port. */
94 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
95 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
96
97 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
98 }
99
100 static void
101 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
102 enum mlxsw_reg_mpat_span_type span_type)
103 {
104 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
105 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
106 u8 local_port = dest_port->local_port;
107 char mpat_pl[MLXSW_REG_MPAT_LEN];
108 int pa_id = span_entry->id;
109
110 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
111 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
112 }
113
114 static void
115 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
116 {
117 mlxsw_sp_span_entry_deconfigure_common(span_entry,
118 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
119 }
120
121 static const
122 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
123 .can_handle = mlxsw_sp_port_dev_check,
124 .parms = mlxsw_sp_span_entry_phys_parms,
125 .configure = mlxsw_sp_span_entry_phys_configure,
126 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
127 };
128
129 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
130 const void *pkey,
131 struct net_device *dev,
132 unsigned char dmac[ETH_ALEN])
133 {
134 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
135 int err = 0;
136
137 if (!neigh) {
138 neigh = neigh_create(tbl, pkey, dev);
139 if (IS_ERR(neigh))
140 return PTR_ERR(neigh);
141 }
142
143 neigh_event_send(neigh, NULL);
144
145 read_lock_bh(&neigh->lock);
146 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
147 memcpy(dmac, neigh->ha, ETH_ALEN);
148 else
149 err = -ENOENT;
150 read_unlock_bh(&neigh->lock);
151
152 neigh_release(neigh);
153 return err;
154 }
155
156 static int
157 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
158 {
159 sparmsp->dest_port = NULL;
160 return 0;
161 }
162
163 static struct net_device *
164 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
165 unsigned char *dmac,
166 u16 *p_vid)
167 {
168 struct bridge_vlan_info vinfo;
169 struct net_device *edev;
170 u16 vid = *p_vid;
171
172 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
173 return NULL;
174 if (!vid ||
175 br_vlan_get_info(br_dev, vid, &vinfo) ||
176 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
177 return NULL;
178
179 edev = br_fdb_find_port(br_dev, dmac, vid);
180 if (!edev)
181 return NULL;
182
183 if (br_vlan_get_info(edev, vid, &vinfo))
184 return NULL;
185 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
186 *p_vid = 0;
187 else
188 *p_vid = vid;
189 return edev;
190 }
191
192 static struct net_device *
193 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
194 unsigned char *dmac)
195 {
196 return br_fdb_find_port(br_dev, dmac, 0);
197 }
198
199 static struct net_device *
200 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
201 unsigned char dmac[ETH_ALEN],
202 u16 *p_vid)
203 {
204 struct mlxsw_sp_bridge_port *bridge_port;
205 enum mlxsw_reg_spms_state spms_state;
206 struct net_device *dev = NULL;
207 struct mlxsw_sp_port *port;
208 u8 stp_state;
209
210 if (br_vlan_enabled(br_dev))
211 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
212 else if (!*p_vid)
213 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
214 if (!dev)
215 return NULL;
216
217 port = mlxsw_sp_port_dev_lower_find(dev);
218 if (!port)
219 return NULL;
220
221 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
222 if (!bridge_port)
223 return NULL;
224
225 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
226 spms_state = mlxsw_sp_stp_spms_state(stp_state);
227 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
228 return NULL;
229
230 return dev;
231 }
232
233 static struct net_device *
234 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
235 u16 *p_vid)
236 {
237 *p_vid = vlan_dev_vlan_id(vlan_dev);
238 return vlan_dev_real_dev(vlan_dev);
239 }
240
241 static struct net_device *
242 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
243 {
244 struct net_device *dev;
245 struct list_head *iter;
246
247 netdev_for_each_lower_dev(lag_dev, dev, iter)
248 if (netif_carrier_ok(dev) &&
249 net_lag_port_dev_txable(dev) &&
250 mlxsw_sp_port_dev_check(dev))
251 return dev;
252
253 return NULL;
254 }
255
256 static __maybe_unused int
257 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
258 union mlxsw_sp_l3addr saddr,
259 union mlxsw_sp_l3addr daddr,
260 union mlxsw_sp_l3addr gw,
261 __u8 ttl,
262 struct neigh_table *tbl,
263 struct mlxsw_sp_span_parms *sparmsp)
264 {
265 unsigned char dmac[ETH_ALEN];
266 u16 vid = 0;
267
268 if (mlxsw_sp_l3addr_is_zero(gw))
269 gw = daddr;
270
271 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
272 goto unoffloadable;
273
274 if (is_vlan_dev(edev))
275 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
276
277 if (netif_is_bridge_master(edev)) {
278 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
279 if (!edev)
280 goto unoffloadable;
281 }
282
283 if (is_vlan_dev(edev)) {
284 if (vid || !(edev->flags & IFF_UP))
285 goto unoffloadable;
286 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
287 }
288
289 if (netif_is_lag_master(edev)) {
290 if (!(edev->flags & IFF_UP))
291 goto unoffloadable;
292 edev = mlxsw_sp_span_entry_lag(edev);
293 if (!edev)
294 goto unoffloadable;
295 }
296
297 if (!mlxsw_sp_port_dev_check(edev))
298 goto unoffloadable;
299
300 sparmsp->dest_port = netdev_priv(edev);
301 sparmsp->ttl = ttl;
302 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
303 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
304 sparmsp->saddr = saddr;
305 sparmsp->daddr = daddr;
306 sparmsp->vid = vid;
307 return 0;
308
309 unoffloadable:
310 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
311 }
312
313 #if IS_ENABLED(CONFIG_NET_IPGRE)
314 static struct net_device *
315 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
316 __be32 *saddrp, __be32 *daddrp)
317 {
318 struct ip_tunnel *tun = netdev_priv(to_dev);
319 struct net_device *dev = NULL;
320 struct ip_tunnel_parm parms;
321 struct rtable *rt = NULL;
322 struct flowi4 fl4;
323
324 /* We assume "dev" stays valid after rt is put. */
325 ASSERT_RTNL();
326
327 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
328 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
329 0, 0, parms.link, tun->fwmark, 0);
330
331 rt = ip_route_output_key(tun->net, &fl4);
332 if (IS_ERR(rt))
333 return NULL;
334
335 if (rt->rt_type != RTN_UNICAST)
336 goto out;
337
338 dev = rt->dst.dev;
339 *saddrp = fl4.saddr;
340 if (rt->rt_gw_family == AF_INET)
341 *daddrp = rt->rt_gw4;
342 /* can not offload if route has an IPv6 gateway */
343 else if (rt->rt_gw_family == AF_INET6)
344 dev = NULL;
345
346 out:
347 ip_rt_put(rt);
348 return dev;
349 }
350
351 static int
352 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
353 struct mlxsw_sp_span_parms *sparmsp)
354 {
355 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
356 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
357 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
358 bool inherit_tos = tparm.iph.tos & 0x1;
359 bool inherit_ttl = !tparm.iph.ttl;
360 union mlxsw_sp_l3addr gw = daddr;
361 struct net_device *l3edev;
362
363 if (!(to_dev->flags & IFF_UP) ||
364 /* Reject tunnels with GRE keys, checksums, etc. */
365 tparm.i_flags || tparm.o_flags ||
366 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
367 inherit_ttl || !inherit_tos ||
368 /* A destination address may not be "any". */
369 mlxsw_sp_l3addr_is_zero(daddr))
370 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
371
372 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
373 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
374 tparm.iph.ttl,
375 &arp_tbl, sparmsp);
376 }
377
378 static int
379 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
380 struct mlxsw_sp_span_parms sparms)
381 {
382 struct mlxsw_sp_port *dest_port = sparms.dest_port;
383 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
384 u8 local_port = dest_port->local_port;
385 char mpat_pl[MLXSW_REG_MPAT_LEN];
386 int pa_id = span_entry->id;
387
388 /* Create a new port analayzer entry for local_port. */
389 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
390 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
391 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
392 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
393 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
394 sparms.dmac, !!sparms.vid);
395 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
396 sparms.ttl, sparms.smac,
397 be32_to_cpu(sparms.saddr.addr4),
398 be32_to_cpu(sparms.daddr.addr4));
399
400 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
401 }
402
403 static void
404 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
405 {
406 mlxsw_sp_span_entry_deconfigure_common(span_entry,
407 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
408 }
409
410 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
411 .can_handle = netif_is_gretap,
412 .parms = mlxsw_sp_span_entry_gretap4_parms,
413 .configure = mlxsw_sp_span_entry_gretap4_configure,
414 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
415 };
416 #endif
417
418 #if IS_ENABLED(CONFIG_IPV6_GRE)
419 static struct net_device *
420 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
421 struct in6_addr *saddrp,
422 struct in6_addr *daddrp)
423 {
424 struct ip6_tnl *t = netdev_priv(to_dev);
425 struct flowi6 fl6 = t->fl.u.ip6;
426 struct net_device *dev = NULL;
427 struct dst_entry *dst;
428 struct rt6_info *rt6;
429
430 /* We assume "dev" stays valid after dst is released. */
431 ASSERT_RTNL();
432
433 fl6.flowi6_mark = t->parms.fwmark;
434 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
435 return NULL;
436
437 dst = ip6_route_output(t->net, NULL, &fl6);
438 if (!dst || dst->error)
439 goto out;
440
441 rt6 = container_of(dst, struct rt6_info, dst);
442
443 dev = dst->dev;
444 *saddrp = fl6.saddr;
445 *daddrp = rt6->rt6i_gateway;
446
447 out:
448 dst_release(dst);
449 return dev;
450 }
451
452 static int
453 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
454 struct mlxsw_sp_span_parms *sparmsp)
455 {
456 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
457 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
458 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
459 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
460 bool inherit_ttl = !tparm.hop_limit;
461 union mlxsw_sp_l3addr gw = daddr;
462 struct net_device *l3edev;
463
464 if (!(to_dev->flags & IFF_UP) ||
465 /* Reject tunnels with GRE keys, checksums, etc. */
466 tparm.i_flags || tparm.o_flags ||
467 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
468 inherit_ttl || !inherit_tos ||
469 /* A destination address may not be "any". */
470 mlxsw_sp_l3addr_is_zero(daddr))
471 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
472
473 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
474 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
475 tparm.hop_limit,
476 &nd_tbl, sparmsp);
477 }
478
479 static int
480 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
481 struct mlxsw_sp_span_parms sparms)
482 {
483 struct mlxsw_sp_port *dest_port = sparms.dest_port;
484 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
485 u8 local_port = dest_port->local_port;
486 char mpat_pl[MLXSW_REG_MPAT_LEN];
487 int pa_id = span_entry->id;
488
489 /* Create a new port analayzer entry for local_port. */
490 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
491 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
492 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
493 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
494 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
495 sparms.dmac, !!sparms.vid);
496 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
497 sparms.saddr.addr6,
498 sparms.daddr.addr6);
499
500 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
501 }
502
503 static void
504 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
505 {
506 mlxsw_sp_span_entry_deconfigure_common(span_entry,
507 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
508 }
509
510 static const
511 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
512 .can_handle = netif_is_ip6gretap,
513 .parms = mlxsw_sp_span_entry_gretap6_parms,
514 .configure = mlxsw_sp_span_entry_gretap6_configure,
515 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
516 };
517 #endif
518
519 static bool
520 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
521 {
522 return is_vlan_dev(dev) &&
523 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
524 }
525
526 static int
527 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
528 struct mlxsw_sp_span_parms *sparmsp)
529 {
530 struct net_device *real_dev;
531 u16 vid;
532
533 if (!(to_dev->flags & IFF_UP))
534 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
535
536 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
537 sparmsp->dest_port = netdev_priv(real_dev);
538 sparmsp->vid = vid;
539 return 0;
540 }
541
542 static int
543 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
544 struct mlxsw_sp_span_parms sparms)
545 {
546 struct mlxsw_sp_port *dest_port = sparms.dest_port;
547 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
548 u8 local_port = dest_port->local_port;
549 char mpat_pl[MLXSW_REG_MPAT_LEN];
550 int pa_id = span_entry->id;
551
552 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
553 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
554 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
555
556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
557 }
558
559 static void
560 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
561 {
562 mlxsw_sp_span_entry_deconfigure_common(span_entry,
563 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
564 }
565
566 static const
567 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
568 .can_handle = mlxsw_sp_span_vlan_can_handle,
569 .parms = mlxsw_sp_span_entry_vlan_parms,
570 .configure = mlxsw_sp_span_entry_vlan_configure,
571 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
572 };
573
574 static const
575 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
576 &mlxsw_sp_span_entry_ops_phys,
577 #if IS_ENABLED(CONFIG_NET_IPGRE)
578 &mlxsw_sp_span_entry_ops_gretap4,
579 #endif
580 #if IS_ENABLED(CONFIG_IPV6_GRE)
581 &mlxsw_sp_span_entry_ops_gretap6,
582 #endif
583 &mlxsw_sp_span_entry_ops_vlan,
584 };
585
586 static int
587 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
588 struct mlxsw_sp_span_parms *sparmsp)
589 {
590 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
591 }
592
593 static int
594 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
595 struct mlxsw_sp_span_parms sparms)
596 {
597 return 0;
598 }
599
600 static void
601 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
602 {
603 }
604
605 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
606 .parms = mlxsw_sp_span_entry_nop_parms,
607 .configure = mlxsw_sp_span_entry_nop_configure,
608 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
609 };
610
611 static void
612 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
613 struct mlxsw_sp_span_entry *span_entry,
614 struct mlxsw_sp_span_parms sparms)
615 {
616 if (sparms.dest_port) {
617 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
618 netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
619 sparms.dest_port->dev->name);
620 sparms.dest_port = NULL;
621 } else if (span_entry->ops->configure(span_entry, sparms)) {
622 netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
623 sparms.dest_port->dev->name);
624 sparms.dest_port = NULL;
625 }
626 }
627
628 span_entry->parms = sparms;
629 }
630
631 static void
632 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
633 {
634 if (span_entry->parms.dest_port)
635 span_entry->ops->deconfigure(span_entry);
636 }
637
638 static struct mlxsw_sp_span_entry *
639 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
640 const struct net_device *to_dev,
641 const struct mlxsw_sp_span_entry_ops *ops,
642 struct mlxsw_sp_span_parms sparms)
643 {
644 struct mlxsw_sp_span_entry *span_entry = NULL;
645 int i;
646
647 /* find a free entry to use */
648 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
649 if (!mlxsw_sp->span.entries[i].ref_count) {
650 span_entry = &mlxsw_sp->span.entries[i];
651 break;
652 }
653 }
654 if (!span_entry)
655 return NULL;
656
657 span_entry->ops = ops;
658 span_entry->ref_count = 1;
659 span_entry->to_dev = to_dev;
660 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
661
662 return span_entry;
663 }
664
665 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
666 {
667 mlxsw_sp_span_entry_deconfigure(span_entry);
668 }
669
670 struct mlxsw_sp_span_entry *
671 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
672 const struct net_device *to_dev)
673 {
674 int i;
675
676 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
677 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
678
679 if (curr->ref_count && curr->to_dev == to_dev)
680 return curr;
681 }
682 return NULL;
683 }
684
685 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
686 struct mlxsw_sp_span_entry *span_entry)
687 {
688 mlxsw_sp_span_entry_deconfigure(span_entry);
689 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
690 }
691
692 static struct mlxsw_sp_span_entry *
693 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
694 {
695 int i;
696
697 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
698 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
699
700 if (curr->ref_count && curr->id == span_id)
701 return curr;
702 }
703 return NULL;
704 }
705
706 static struct mlxsw_sp_span_entry *
707 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
708 const struct net_device *to_dev,
709 const struct mlxsw_sp_span_entry_ops *ops,
710 struct mlxsw_sp_span_parms sparms)
711 {
712 struct mlxsw_sp_span_entry *span_entry;
713
714 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
715 if (span_entry) {
716 /* Already exists, just take a reference */
717 span_entry->ref_count++;
718 return span_entry;
719 }
720
721 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
722 }
723
724 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
725 struct mlxsw_sp_span_entry *span_entry)
726 {
727 WARN_ON(!span_entry->ref_count);
728 if (--span_entry->ref_count == 0)
729 mlxsw_sp_span_entry_destroy(span_entry);
730 return 0;
731 }
732
733 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
734 {
735 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
736 struct mlxsw_sp_span_inspected_port *p;
737 int i;
738
739 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
740 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
741
742 list_for_each_entry(p, &curr->bound_ports_list, list)
743 if (p->local_port == port->local_port &&
744 p->type == MLXSW_SP_SPAN_EGRESS)
745 return true;
746 }
747
748 return false;
749 }
750
751 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
752 int mtu)
753 {
754 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
755 }
756
757 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
758 {
759 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
760 char sbib_pl[MLXSW_REG_SBIB_LEN];
761 int err;
762
763 /* If port is egress mirrored, the shared buffer size should be
764 * updated according to the mtu value
765 */
766 if (mlxsw_sp_span_is_egress_mirror(port)) {
767 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
768
769 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
770 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
771 if (err) {
772 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
773 return err;
774 }
775 }
776
777 return 0;
778 }
779
780 static struct mlxsw_sp_span_inspected_port *
781 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
782 enum mlxsw_sp_span_type type,
783 struct mlxsw_sp_port *port,
784 bool bind)
785 {
786 struct mlxsw_sp_span_inspected_port *p;
787
788 list_for_each_entry(p, &span_entry->bound_ports_list, list)
789 if (type == p->type &&
790 port->local_port == p->local_port &&
791 bind == p->bound)
792 return p;
793 return NULL;
794 }
795
796 static int
797 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
798 struct mlxsw_sp_span_entry *span_entry,
799 enum mlxsw_sp_span_type type,
800 bool bind)
801 {
802 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
803 char mpar_pl[MLXSW_REG_MPAR_LEN];
804 int pa_id = span_entry->id;
805
806 /* bind the port to the SPAN entry */
807 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
808 (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
810 }
811
812 static int
813 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
814 struct mlxsw_sp_span_entry *span_entry,
815 enum mlxsw_sp_span_type type,
816 bool bind)
817 {
818 struct mlxsw_sp_span_inspected_port *inspected_port;
819 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
820 char sbib_pl[MLXSW_REG_SBIB_LEN];
821 int i;
822 int err;
823
824 /* A given (source port, direction) can only be bound to one analyzer,
825 * so if a binding is requested, check for conflicts.
826 */
827 if (bind)
828 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
829 struct mlxsw_sp_span_entry *curr =
830 &mlxsw_sp->span.entries[i];
831
832 if (mlxsw_sp_span_entry_bound_port_find(curr, type,
833 port, bind))
834 return -EEXIST;
835 }
836
837 /* if it is an egress SPAN, bind a shared buffer to it */
838 if (type == MLXSW_SP_SPAN_EGRESS) {
839 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
840 port->dev->mtu);
841
842 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
843 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
844 if (err) {
845 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
846 return err;
847 }
848 }
849
850 if (bind) {
851 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
852 true);
853 if (err)
854 goto err_port_bind;
855 }
856
857 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
858 if (!inspected_port) {
859 err = -ENOMEM;
860 goto err_inspected_port_alloc;
861 }
862 inspected_port->local_port = port->local_port;
863 inspected_port->type = type;
864 inspected_port->bound = bind;
865 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
866
867 return 0;
868
869 err_inspected_port_alloc:
870 if (bind)
871 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
872 false);
873 err_port_bind:
874 if (type == MLXSW_SP_SPAN_EGRESS) {
875 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
876 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
877 }
878 return err;
879 }
880
881 static void
882 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
883 struct mlxsw_sp_span_entry *span_entry,
884 enum mlxsw_sp_span_type type,
885 bool bind)
886 {
887 struct mlxsw_sp_span_inspected_port *inspected_port;
888 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
889 char sbib_pl[MLXSW_REG_SBIB_LEN];
890
891 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
892 port, bind);
893 if (!inspected_port)
894 return;
895
896 if (bind)
897 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
898 false);
899 /* remove the SBIB buffer if it was egress SPAN */
900 if (type == MLXSW_SP_SPAN_EGRESS) {
901 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
902 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
903 }
904
905 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
906
907 list_del(&inspected_port->list);
908 kfree(inspected_port);
909 }
910
911 static const struct mlxsw_sp_span_entry_ops *
912 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
913 const struct net_device *to_dev)
914 {
915 size_t i;
916
917 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
918 if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
919 return mlxsw_sp_span_entry_types[i];
920
921 return NULL;
922 }
923
924 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
925 const struct net_device *to_dev,
926 enum mlxsw_sp_span_type type, bool bind,
927 int *p_span_id)
928 {
929 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
930 const struct mlxsw_sp_span_entry_ops *ops;
931 struct mlxsw_sp_span_parms sparms = {NULL};
932 struct mlxsw_sp_span_entry *span_entry;
933 int err;
934
935 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
936 if (!ops) {
937 netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
938 return -EOPNOTSUPP;
939 }
940
941 err = ops->parms(to_dev, &sparms);
942 if (err)
943 return err;
944
945 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
946 if (!span_entry)
947 return -ENOBUFS;
948
949 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
950 span_entry->id);
951
952 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
953 if (err)
954 goto err_port_bind;
955
956 *p_span_id = span_entry->id;
957 return 0;
958
959 err_port_bind:
960 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
961 return err;
962 }
963
964 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
965 enum mlxsw_sp_span_type type, bool bind)
966 {
967 struct mlxsw_sp_span_entry *span_entry;
968
969 span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
970 if (!span_entry) {
971 netdev_err(from->dev, "no span entry found\n");
972 return;
973 }
974
975 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
976 span_entry->id);
977 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
978 }
979
980 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
981 {
982 int i;
983 int err;
984
985 ASSERT_RTNL();
986 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
987 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
988 struct mlxsw_sp_span_parms sparms = {NULL};
989
990 if (!curr->ref_count)
991 continue;
992
993 err = curr->ops->parms(curr->to_dev, &sparms);
994 if (err)
995 continue;
996
997 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
998 mlxsw_sp_span_entry_deconfigure(curr);
999 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1000 }
1001 }
1002 }