]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
mlxsw: spectrum_span: Use struct_size() to simplify allocation
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_span.c
CommitLineData
9948a064
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
a629ef21 3
946a11e7 4#include <linux/if_bridge.h>
a629ef21 5#include <linux/list.h>
27cf76fe
PM
6#include <net/arp.h>
7#include <net/gre.h>
b5de82f3 8#include <net/lag.h>
8f08a528
PM
9#include <net/ndisc.h>
10#include <net/ip6_tunnel.h>
a629ef21
PM
11
12#include "spectrum.h"
27cf76fe 13#include "spectrum_ipip.h"
946a11e7
PM
14#include "spectrum_span.h"
15#include "spectrum_switchdev.h"
a629ef21 16
9a9f8d1e 17struct mlxsw_sp_span {
9a9f8d1e 18 int entries_count;
6627b93b 19 struct mlxsw_sp_span_entry entries[0];
9a9f8d1e
IS
20};
21
868678c5
DR
22static u64 mlxsw_sp_span_occ_get(void *priv)
23{
24 const struct mlxsw_sp *mlxsw_sp = priv;
25 u64 occ = 0;
26 int i;
27
9a9f8d1e
IS
28 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
29 if (mlxsw_sp->span->entries[i].ref_count)
868678c5
DR
30 occ++;
31 }
32
33 return occ;
34}
35
a629ef21
PM
36int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
37{
868678c5 38 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
9a9f8d1e 39 struct mlxsw_sp_span *span;
6627b93b 40 int i, entries_count;
a629ef21
PM
41
42 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
43 return -EIO;
44
6627b93b
IS
45 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
46 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
9a9f8d1e 47 if (!span)
a629ef21 48 return -ENOMEM;
6627b93b 49 span->entries_count = entries_count;
9a9f8d1e
IS
50 mlxsw_sp->span = span;
51
9a9f8d1e
IS
52 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
53 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
3546b03f
PM
54
55 INIT_LIST_HEAD(&curr->bound_ports_list);
56 curr->id = i;
57 }
a629ef21 58
868678c5
DR
59 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
60 mlxsw_sp_span_occ_get, mlxsw_sp);
61
a629ef21
PM
62 return 0;
63}
64
65void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
66{
868678c5 67 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
a629ef21
PM
68 int i;
69
868678c5
DR
70 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
71
9a9f8d1e
IS
72 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
73 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
a629ef21
PM
74
75 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
76 }
9a9f8d1e 77 kfree(mlxsw_sp->span);
a629ef21
PM
78}
79
7b2ef81f 80static int
169b5d95
PM
81mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
82 struct mlxsw_sp_span_parms *sparmsp)
83{
84 sparmsp->dest_port = netdev_priv(to_dev);
85 return 0;
86}
87
88static int
89mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
90 struct mlxsw_sp_span_parms sparms)
7b2ef81f 91{
169b5d95
PM
92 struct mlxsw_sp_port *dest_port = sparms.dest_port;
93 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
94 u8 local_port = dest_port->local_port;
7b2ef81f
PM
95 char mpat_pl[MLXSW_REG_MPAT_LEN];
96 int pa_id = span_entry->id;
97
98 /* Create a new port analayzer entry for local_port. */
99 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
100 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
169b5d95 101
7b2ef81f
PM
102 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
103}
104
105static void
169b5d95
PM
106mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
107 enum mlxsw_reg_mpat_span_type span_type)
7b2ef81f 108{
169b5d95
PM
109 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
110 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
111 u8 local_port = dest_port->local_port;
7b2ef81f
PM
112 char mpat_pl[MLXSW_REG_MPAT_LEN];
113 int pa_id = span_entry->id;
114
169b5d95 115 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
7b2ef81f
PM
116 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
117}
118
169b5d95
PM
119static void
120mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
121{
122 mlxsw_sp_span_entry_deconfigure_common(span_entry,
123 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
124}
125
126static const
127struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
128 .can_handle = mlxsw_sp_port_dev_check,
129 .parms = mlxsw_sp_span_entry_phys_parms,
130 .configure = mlxsw_sp_span_entry_phys_configure,
131 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
132};
133
27cf76fe
PM
134static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
135 const void *pkey,
fc74ecbc 136 struct net_device *dev,
27cf76fe
PM
137 unsigned char dmac[ETH_ALEN])
138{
fc74ecbc 139 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
27cf76fe
PM
140 int err = 0;
141
142 if (!neigh) {
fc74ecbc 143 neigh = neigh_create(tbl, pkey, dev);
27cf76fe
PM
144 if (IS_ERR(neigh))
145 return PTR_ERR(neigh);
146 }
147
148 neigh_event_send(neigh, NULL);
149
150 read_lock_bh(&neigh->lock);
151 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
152 memcpy(dmac, neigh->ha, ETH_ALEN);
153 else
154 err = -ENOENT;
155 read_unlock_bh(&neigh->lock);
156
157 neigh_release(neigh);
158 return err;
159}
160
161static int
162mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
163{
164 sparmsp->dest_port = NULL;
165 return 0;
166}
167
946a11e7
PM
168static struct net_device *
169mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
170 unsigned char *dmac,
171 u16 *p_vid)
172{
173 struct bridge_vlan_info vinfo;
174 struct net_device *edev;
03c44132 175 u16 vid = *p_vid;
946a11e7 176
03c44132 177 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
946a11e7 178 return NULL;
03c44132
PM
179 if (!vid ||
180 br_vlan_get_info(br_dev, vid, &vinfo) ||
181 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
946a11e7
PM
182 return NULL;
183
03c44132 184 edev = br_fdb_find_port(br_dev, dmac, vid);
946a11e7
PM
185 if (!edev)
186 return NULL;
187
03c44132 188 if (br_vlan_get_info(edev, vid, &vinfo))
946a11e7 189 return NULL;
1fc68bb7
PM
190 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
191 *p_vid = 0;
192 else
03c44132 193 *p_vid = vid;
946a11e7
PM
194 return edev;
195}
196
197static struct net_device *
198mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
199 unsigned char *dmac)
200{
201 return br_fdb_find_port(br_dev, dmac, 0);
202}
203
204static struct net_device *
205mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
206 unsigned char dmac[ETH_ALEN],
207 u16 *p_vid)
208{
209 struct mlxsw_sp_bridge_port *bridge_port;
210 enum mlxsw_reg_spms_state spms_state;
03c44132 211 struct net_device *dev = NULL;
946a11e7 212 struct mlxsw_sp_port *port;
946a11e7
PM
213 u8 stp_state;
214
215 if (br_vlan_enabled(br_dev))
216 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
03c44132 217 else if (!*p_vid)
946a11e7
PM
218 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
219 if (!dev)
220 return NULL;
221
222 port = mlxsw_sp_port_dev_lower_find(dev);
223 if (!port)
224 return NULL;
225
226 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
227 if (!bridge_port)
228 return NULL;
229
230 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
231 spms_state = mlxsw_sp_stp_spms_state(stp_state);
232 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
233 return NULL;
234
235 return dev;
236}
237
e00698d1
PM
238static struct net_device *
239mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
240 u16 *p_vid)
241{
242 *p_vid = vlan_dev_vlan_id(vlan_dev);
243 return vlan_dev_real_dev(vlan_dev);
244}
245
55c0211d
PM
246static struct net_device *
247mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
248{
249 struct net_device *dev;
250 struct list_head *iter;
251
252 netdev_for_each_lower_dev(lag_dev, dev, iter)
b5de82f3
PM
253 if (netif_carrier_ok(dev) &&
254 net_lag_port_dev_txable(dev) &&
255 mlxsw_sp_port_dev_check(dev))
55c0211d
PM
256 return dev;
257
258 return NULL;
259}
260
99db5229 261static __maybe_unused int
fc74ecbc 262mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
27cf76fe
PM
263 union mlxsw_sp_l3addr saddr,
264 union mlxsw_sp_l3addr daddr,
265 union mlxsw_sp_l3addr gw,
266 __u8 ttl,
267 struct neigh_table *tbl,
268 struct mlxsw_sp_span_parms *sparmsp)
269{
270 unsigned char dmac[ETH_ALEN];
946a11e7 271 u16 vid = 0;
27cf76fe
PM
272
273 if (mlxsw_sp_l3addr_is_zero(gw))
274 gw = daddr;
275
fc74ecbc 276 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
946a11e7
PM
277 goto unoffloadable;
278
fc74ecbc
PM
279 if (is_vlan_dev(edev))
280 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
03c44132 281
fc74ecbc
PM
282 if (netif_is_bridge_master(edev)) {
283 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
284 if (!edev)
946a11e7
PM
285 goto unoffloadable;
286 }
287
fc74ecbc
PM
288 if (is_vlan_dev(edev)) {
289 if (vid || !(edev->flags & IFF_UP))
03c44132 290 goto unoffloadable;
fc74ecbc 291 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
03c44132
PM
292 }
293
55c0211d
PM
294 if (netif_is_lag_master(edev)) {
295 if (!(edev->flags & IFF_UP))
296 goto unoffloadable;
297 edev = mlxsw_sp_span_entry_lag(edev);
298 if (!edev)
299 goto unoffloadable;
300 }
301
fc74ecbc 302 if (!mlxsw_sp_port_dev_check(edev))
946a11e7 303 goto unoffloadable;
27cf76fe 304
fc74ecbc 305 sparmsp->dest_port = netdev_priv(edev);
27cf76fe
PM
306 sparmsp->ttl = ttl;
307 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
fc74ecbc 308 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
27cf76fe
PM
309 sparmsp->saddr = saddr;
310 sparmsp->daddr = daddr;
946a11e7 311 sparmsp->vid = vid;
27cf76fe 312 return 0;
946a11e7
PM
313
314unoffloadable:
315 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
27cf76fe
PM
316}
317
99db5229 318#if IS_ENABLED(CONFIG_NET_IPGRE)
36a1c3bd
PM
319static struct net_device *
320mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
321 __be32 *saddrp, __be32 *daddrp)
322{
323 struct ip_tunnel *tun = netdev_priv(to_dev);
324 struct net_device *dev = NULL;
325 struct ip_tunnel_parm parms;
326 struct rtable *rt = NULL;
327 struct flowi4 fl4;
328
329 /* We assume "dev" stays valid after rt is put. */
330 ASSERT_RTNL();
331
332 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
333 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
24ba1440 334 0, 0, parms.link, tun->fwmark, 0);
36a1c3bd
PM
335
336 rt = ip_route_output_key(tun->net, &fl4);
337 if (IS_ERR(rt))
338 return NULL;
339
340 if (rt->rt_type != RTN_UNICAST)
341 goto out;
342
343 dev = rt->dst.dev;
344 *saddrp = fl4.saddr;
1550c171
DA
345 if (rt->rt_gw_family == AF_INET)
346 *daddrp = rt->rt_gw4;
0f5f7d7b
DA
347 /* can not offload if route has an IPv6 gateway */
348 else if (rt->rt_gw_family == AF_INET6)
349 dev = NULL;
36a1c3bd
PM
350
351out:
352 ip_rt_put(rt);
353 return dev;
354}
355
27cf76fe
PM
356static int
357mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
358 struct mlxsw_sp_span_parms *sparmsp)
359{
360 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
361 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
362 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
363 bool inherit_tos = tparm.iph.tos & 0x1;
364 bool inherit_ttl = !tparm.iph.ttl;
365 union mlxsw_sp_l3addr gw = daddr;
366 struct net_device *l3edev;
367
368 if (!(to_dev->flags & IFF_UP) ||
369 /* Reject tunnels with GRE keys, checksums, etc. */
370 tparm.i_flags || tparm.o_flags ||
371 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
372 inherit_ttl || !inherit_tos ||
373 /* A destination address may not be "any". */
374 mlxsw_sp_l3addr_is_zero(daddr))
375 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
376
377 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
378 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
379 tparm.iph.ttl,
380 &arp_tbl, sparmsp);
381}
382
383static int
384mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
385 struct mlxsw_sp_span_parms sparms)
386{
387 struct mlxsw_sp_port *dest_port = sparms.dest_port;
388 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
389 u8 local_port = dest_port->local_port;
390 char mpat_pl[MLXSW_REG_MPAT_LEN];
391 int pa_id = span_entry->id;
392
393 /* Create a new port analayzer entry for local_port. */
394 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
395 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
946a11e7 396 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
27cf76fe
PM
397 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
398 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
946a11e7 399 sparms.dmac, !!sparms.vid);
27cf76fe
PM
400 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
401 sparms.ttl, sparms.smac,
402 be32_to_cpu(sparms.saddr.addr4),
403 be32_to_cpu(sparms.daddr.addr4));
404
405 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
406}
407
408static void
409mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
410{
411 mlxsw_sp_span_entry_deconfigure_common(span_entry,
412 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
413}
414
415static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
0621e6fc 416 .can_handle = netif_is_gretap,
27cf76fe
PM
417 .parms = mlxsw_sp_span_entry_gretap4_parms,
418 .configure = mlxsw_sp_span_entry_gretap4_configure,
419 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
420};
99db5229 421#endif
27cf76fe 422
99db5229 423#if IS_ENABLED(CONFIG_IPV6_GRE)
8f08a528
PM
424static struct net_device *
425mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
426 struct in6_addr *saddrp,
427 struct in6_addr *daddrp)
428{
429 struct ip6_tnl *t = netdev_priv(to_dev);
430 struct flowi6 fl6 = t->fl.u.ip6;
431 struct net_device *dev = NULL;
432 struct dst_entry *dst;
433 struct rt6_info *rt6;
434
435 /* We assume "dev" stays valid after dst is released. */
436 ASSERT_RTNL();
437
438 fl6.flowi6_mark = t->parms.fwmark;
439 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
440 return NULL;
441
442 dst = ip6_route_output(t->net, NULL, &fl6);
443 if (!dst || dst->error)
444 goto out;
445
446 rt6 = container_of(dst, struct rt6_info, dst);
447
448 dev = dst->dev;
449 *saddrp = fl6.saddr;
450 *daddrp = rt6->rt6i_gateway;
451
452out:
453 dst_release(dst);
454 return dev;
455}
456
457static int
458mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
459 struct mlxsw_sp_span_parms *sparmsp)
460{
461 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
462 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
463 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
464 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
465 bool inherit_ttl = !tparm.hop_limit;
466 union mlxsw_sp_l3addr gw = daddr;
467 struct net_device *l3edev;
468
469 if (!(to_dev->flags & IFF_UP) ||
470 /* Reject tunnels with GRE keys, checksums, etc. */
471 tparm.i_flags || tparm.o_flags ||
472 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
473 inherit_ttl || !inherit_tos ||
474 /* A destination address may not be "any". */
475 mlxsw_sp_l3addr_is_zero(daddr))
476 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
477
478 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
479 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
480 tparm.hop_limit,
481 &nd_tbl, sparmsp);
482}
483
484static int
485mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
486 struct mlxsw_sp_span_parms sparms)
487{
488 struct mlxsw_sp_port *dest_port = sparms.dest_port;
489 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
490 u8 local_port = dest_port->local_port;
491 char mpat_pl[MLXSW_REG_MPAT_LEN];
492 int pa_id = span_entry->id;
493
494 /* Create a new port analayzer entry for local_port. */
495 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
496 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
946a11e7 497 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
8f08a528
PM
498 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
499 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
946a11e7 500 sparms.dmac, !!sparms.vid);
8f08a528
PM
501 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
502 sparms.saddr.addr6,
503 sparms.daddr.addr6);
504
505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
506}
507
508static void
509mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
510{
511 mlxsw_sp_span_entry_deconfigure_common(span_entry,
512 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
513}
514
515static const
516struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
0621e6fc 517 .can_handle = netif_is_ip6gretap,
8f08a528
PM
518 .parms = mlxsw_sp_span_entry_gretap6_parms,
519 .configure = mlxsw_sp_span_entry_gretap6_configure,
520 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
521};
99db5229 522#endif
8f08a528 523
e00698d1
PM
524static bool
525mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
526{
527 return is_vlan_dev(dev) &&
528 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
529}
530
531static int
532mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
533 struct mlxsw_sp_span_parms *sparmsp)
534{
535 struct net_device *real_dev;
536 u16 vid;
537
538 if (!(to_dev->flags & IFF_UP))
539 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
540
541 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
542 sparmsp->dest_port = netdev_priv(real_dev);
543 sparmsp->vid = vid;
544 return 0;
545}
546
547static int
548mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
549 struct mlxsw_sp_span_parms sparms)
550{
551 struct mlxsw_sp_port *dest_port = sparms.dest_port;
552 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
553 u8 local_port = dest_port->local_port;
554 char mpat_pl[MLXSW_REG_MPAT_LEN];
555 int pa_id = span_entry->id;
556
557 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
558 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
559 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
560
561 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
562}
563
564static void
565mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
566{
567 mlxsw_sp_span_entry_deconfigure_common(span_entry,
568 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
569}
570
571static const
572struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
573 .can_handle = mlxsw_sp_span_vlan_can_handle,
574 .parms = mlxsw_sp_span_entry_vlan_parms,
575 .configure = mlxsw_sp_span_entry_vlan_configure,
576 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
577};
578
169b5d95
PM
579static const
580struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
581 &mlxsw_sp_span_entry_ops_phys,
99db5229 582#if IS_ENABLED(CONFIG_NET_IPGRE)
27cf76fe 583 &mlxsw_sp_span_entry_ops_gretap4,
99db5229
PM
584#endif
585#if IS_ENABLED(CONFIG_IPV6_GRE)
8f08a528 586 &mlxsw_sp_span_entry_ops_gretap6,
99db5229 587#endif
e00698d1 588 &mlxsw_sp_span_entry_ops_vlan,
169b5d95
PM
589};
590
591static int
592mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
593 struct mlxsw_sp_span_parms *sparmsp)
594{
27cf76fe 595 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
169b5d95
PM
596}
597
598static int
599mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
600 struct mlxsw_sp_span_parms sparms)
601{
602 return 0;
603}
604
605static void
606mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
607{
608}
609
610static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
611 .parms = mlxsw_sp_span_entry_nop_parms,
612 .configure = mlxsw_sp_span_entry_nop_configure,
613 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
614};
615
616static void
617mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
618 struct mlxsw_sp_span_entry *span_entry,
619 struct mlxsw_sp_span_parms sparms)
620{
621 if (sparms.dest_port) {
52a6444c
PM
622 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
623 netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
624 sparms.dest_port->dev->name);
625 sparms.dest_port = NULL;
626 } else if (span_entry->ops->configure(span_entry, sparms)) {
169b5d95
PM
627 netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
628 sparms.dest_port->dev->name);
629 sparms.dest_port = NULL;
630 }
631 }
632
633 span_entry->parms = sparms;
634}
635
636static void
637mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
638{
639 if (span_entry->parms.dest_port)
640 span_entry->ops->deconfigure(span_entry);
641}
642
a629ef21 643static struct mlxsw_sp_span_entry *
079c9f39 644mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
169b5d95
PM
645 const struct net_device *to_dev,
646 const struct mlxsw_sp_span_entry_ops *ops,
647 struct mlxsw_sp_span_parms sparms)
a629ef21 648{
3546b03f 649 struct mlxsw_sp_span_entry *span_entry = NULL;
a629ef21 650 int i;
a629ef21
PM
651
652 /* find a free entry to use */
9a9f8d1e
IS
653 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
654 if (!mlxsw_sp->span->entries[i].ref_count) {
655 span_entry = &mlxsw_sp->span->entries[i];
a629ef21
PM
656 break;
657 }
658 }
3546b03f 659 if (!span_entry)
a629ef21
PM
660 return NULL;
661
169b5d95 662 span_entry->ops = ops;
a629ef21 663 span_entry->ref_count = 1;
079c9f39 664 span_entry->to_dev = to_dev;
169b5d95
PM
665 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
666
a629ef21
PM
667 return span_entry;
668}
669
169b5d95 670static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
a629ef21 671{
169b5d95 672 mlxsw_sp_span_entry_deconfigure(span_entry);
a629ef21
PM
673}
674
675struct mlxsw_sp_span_entry *
079c9f39
PM
676mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
677 const struct net_device *to_dev)
a629ef21
PM
678{
679 int i;
680
9a9f8d1e
IS
681 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
682 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
a629ef21 683
079c9f39 684 if (curr->ref_count && curr->to_dev == to_dev)
a629ef21
PM
685 return curr;
686 }
687 return NULL;
688}
689
079c9f39
PM
690void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
691 struct mlxsw_sp_span_entry *span_entry)
692{
169b5d95
PM
693 mlxsw_sp_span_entry_deconfigure(span_entry);
694 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
079c9f39
PM
695}
696
98977089
PM
697static struct mlxsw_sp_span_entry *
698mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
699{
700 int i;
701
9a9f8d1e
IS
702 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
703 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
98977089
PM
704
705 if (curr->ref_count && curr->id == span_id)
706 return curr;
707 }
708 return NULL;
709}
710
a629ef21 711static struct mlxsw_sp_span_entry *
079c9f39 712mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
169b5d95
PM
713 const struct net_device *to_dev,
714 const struct mlxsw_sp_span_entry_ops *ops,
715 struct mlxsw_sp_span_parms sparms)
a629ef21
PM
716{
717 struct mlxsw_sp_span_entry *span_entry;
718
079c9f39 719 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
a629ef21
PM
720 if (span_entry) {
721 /* Already exists, just take a reference */
722 span_entry->ref_count++;
723 return span_entry;
724 }
725
169b5d95 726 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
a629ef21
PM
727}
728
729static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
730 struct mlxsw_sp_span_entry *span_entry)
731{
732 WARN_ON(!span_entry->ref_count);
733 if (--span_entry->ref_count == 0)
169b5d95 734 mlxsw_sp_span_entry_destroy(span_entry);
a629ef21
PM
735 return 0;
736}
737
738static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
739{
740 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
741 struct mlxsw_sp_span_inspected_port *p;
742 int i;
743
9a9f8d1e
IS
744 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
745 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
a629ef21
PM
746
747 list_for_each_entry(p, &curr->bound_ports_list, list)
748 if (p->local_port == port->local_port &&
749 p->type == MLXSW_SP_SPAN_EGRESS)
750 return true;
751 }
752
753 return false;
754}
755
31c25b94
JP
756static int
757mlxsw_sp_span_port_buffsize_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
a629ef21 758{
31c25b94 759 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
a629ef21 760 char sbib_pl[MLXSW_REG_SBIB_LEN];
31c25b94 761 u32 buffsize;
ff9fdfec
JP
762 u32 speed;
763 int err;
764
765 err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
766 if (err)
767 return err;
768 if (speed == SPEED_UNKNOWN)
769 speed = 0;
a629ef21 770
ff9fdfec 771 buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
31c25b94
JP
772 mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
773 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
774}
775
776int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
777{
a629ef21
PM
778 /* If port is egress mirrored, the shared buffer size should be
779 * updated according to the mtu value
780 */
31c25b94
JP
781 if (mlxsw_sp_span_is_egress_mirror(port))
782 return mlxsw_sp_span_port_buffsize_update(port, mtu);
a629ef21
PM
783 return 0;
784}
785
3a3e627c
JP
786void mlxsw_sp_span_speed_update_work(struct work_struct *work)
787{
788 struct delayed_work *dwork = to_delayed_work(work);
789 struct mlxsw_sp_port *mlxsw_sp_port;
790
791 mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
792 span.speed_update_dw);
793
794 /* If port is egress mirrored, the shared buffer size should be
795 * updated according to the speed value.
796 */
797 if (mlxsw_sp_span_is_egress_mirror(mlxsw_sp_port))
798 mlxsw_sp_span_port_buffsize_update(mlxsw_sp_port,
799 mlxsw_sp_port->dev->mtu);
800}
801
a629ef21 802static struct mlxsw_sp_span_inspected_port *
353def80
IS
803mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
804 enum mlxsw_sp_span_type type,
805 struct mlxsw_sp_port *port,
806 bool bind)
a629ef21
PM
807{
808 struct mlxsw_sp_span_inspected_port *p;
809
810 list_for_each_entry(p, &span_entry->bound_ports_list, list)
353def80
IS
811 if (type == p->type &&
812 port->local_port == p->local_port &&
813 bind == p->bound)
a629ef21
PM
814 return p;
815 return NULL;
816}
817
818static int
819mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
820 struct mlxsw_sp_span_entry *span_entry,
821 enum mlxsw_sp_span_type type,
822 bool bind)
823{
824 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
825 char mpar_pl[MLXSW_REG_MPAR_LEN];
826 int pa_id = span_entry->id;
827
828 /* bind the port to the SPAN entry */
829 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
830 (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
831 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
832}
833
834static int
835mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
836 struct mlxsw_sp_span_entry *span_entry,
837 enum mlxsw_sp_span_type type,
838 bool bind)
839{
840 struct mlxsw_sp_span_inspected_port *inspected_port;
841 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
842 char sbib_pl[MLXSW_REG_SBIB_LEN];
353def80 843 int i;
a629ef21
PM
844 int err;
845
353def80
IS
846 /* A given (source port, direction) can only be bound to one analyzer,
847 * so if a binding is requested, check for conflicts.
848 */
849 if (bind)
9a9f8d1e 850 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
353def80 851 struct mlxsw_sp_span_entry *curr =
9a9f8d1e 852 &mlxsw_sp->span->entries[i];
353def80
IS
853
854 if (mlxsw_sp_span_entry_bound_port_find(curr, type,
855 port, bind))
856 return -EEXIST;
857 }
858
a629ef21
PM
859 /* if it is an egress SPAN, bind a shared buffer to it */
860 if (type == MLXSW_SP_SPAN_EGRESS) {
31c25b94
JP
861 err = mlxsw_sp_span_port_buffsize_update(port, port->dev->mtu);
862 if (err)
a629ef21 863 return err;
a629ef21
PM
864 }
865
866 if (bind) {
867 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
868 true);
869 if (err)
870 goto err_port_bind;
871 }
872
873 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
874 if (!inspected_port) {
875 err = -ENOMEM;
876 goto err_inspected_port_alloc;
877 }
878 inspected_port->local_port = port->local_port;
879 inspected_port->type = type;
353def80 880 inspected_port->bound = bind;
a629ef21
PM
881 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
882
883 return 0;
884
885err_inspected_port_alloc:
886 if (bind)
887 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
888 false);
889err_port_bind:
890 if (type == MLXSW_SP_SPAN_EGRESS) {
891 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
892 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
893 }
894 return err;
895}
896
897static void
898mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
899 struct mlxsw_sp_span_entry *span_entry,
900 enum mlxsw_sp_span_type type,
901 bool bind)
902{
903 struct mlxsw_sp_span_inspected_port *inspected_port;
904 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
905 char sbib_pl[MLXSW_REG_SBIB_LEN];
906
353def80
IS
907 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
908 port, bind);
a629ef21
PM
909 if (!inspected_port)
910 return;
911
912 if (bind)
913 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
914 false);
915 /* remove the SBIB buffer if it was egress SPAN */
916 if (type == MLXSW_SP_SPAN_EGRESS) {
917 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
918 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
919 }
920
921 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
922
923 list_del(&inspected_port->list);
924 kfree(inspected_port);
925}
926
169b5d95
PM
927static const struct mlxsw_sp_span_entry_ops *
928mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
929 const struct net_device *to_dev)
930{
931 size_t i;
932
933 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
934 if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
935 return mlxsw_sp_span_entry_types[i];
936
937 return NULL;
938}
939
a629ef21 940int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
079c9f39 941 const struct net_device *to_dev,
98977089
PM
942 enum mlxsw_sp_span_type type, bool bind,
943 int *p_span_id)
a629ef21
PM
944{
945 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
169b5d95 946 const struct mlxsw_sp_span_entry_ops *ops;
885b0d43 947 struct mlxsw_sp_span_parms sparms = {NULL};
a629ef21
PM
948 struct mlxsw_sp_span_entry *span_entry;
949 int err;
950
169b5d95
PM
951 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
952 if (!ops) {
953 netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
954 return -EOPNOTSUPP;
955 }
956
957 err = ops->parms(to_dev, &sparms);
958 if (err)
959 return err;
960
961 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
a629ef21 962 if (!span_entry)
c41c0dd7 963 return -ENOBUFS;
a629ef21
PM
964
965 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
966 span_entry->id);
967
968 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
969 if (err)
970 goto err_port_bind;
971
98977089 972 *p_span_id = span_entry->id;
a629ef21
PM
973 return 0;
974
975err_port_bind:
976 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
977 return err;
978}
979
98977089 980void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
a629ef21
PM
981 enum mlxsw_sp_span_type type, bool bind)
982{
983 struct mlxsw_sp_span_entry *span_entry;
984
98977089 985 span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
a629ef21
PM
986 if (!span_entry) {
987 netdev_err(from->dev, "no span entry found\n");
988 return;
989 }
990
991 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
992 span_entry->id);
993 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
994}
803335ac
PM
995
996void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
997{
998 int i;
999 int err;
1000
1001 ASSERT_RTNL();
9a9f8d1e
IS
1002 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
1003 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
885b0d43 1004 struct mlxsw_sp_span_parms sparms = {NULL};
803335ac
PM
1005
1006 if (!curr->ref_count)
1007 continue;
1008
1009 err = curr->ops->parms(curr->to_dev, &sparms);
1010 if (err)
1011 continue;
1012
1013 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1014 mlxsw_sp_span_entry_deconfigure(curr);
1015 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1016 }
1017 }
1018}