]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
treewide: Use fallthrough pseudo-keyword
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_span.c
CommitLineData
9948a064
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
a629ef21 3
946a11e7 4#include <linux/if_bridge.h>
a629ef21 5#include <linux/list.h>
ed04458d 6#include <linux/mutex.h>
4c00dafc 7#include <linux/refcount.h>
a8e7e6e7
IS
8#include <linux/rtnetlink.h>
9#include <linux/workqueue.h>
27cf76fe
PM
10#include <net/arp.h>
11#include <net/gre.h>
b5de82f3 12#include <net/lag.h>
8f08a528
PM
13#include <net/ndisc.h>
14#include <net/ip6_tunnel.h>
a629ef21
PM
15
16#include "spectrum.h"
27cf76fe 17#include "spectrum_ipip.h"
946a11e7
PM
18#include "spectrum_span.h"
19#include "spectrum_switchdev.h"
a629ef21 20
9a9f8d1e 21struct mlxsw_sp_span {
a8e7e6e7
IS
22 struct work_struct work;
23 struct mlxsw_sp *mlxsw_sp;
08a3641f 24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr;
34e4ace5
IS
25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr;
26 size_t span_entry_ops_arr_size;
ed04458d
IS
27 struct list_head analyzed_ports_list;
28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
c056618c 29 struct list_head trigger_entries_list;
4039504e
IS
30 u16 policer_id_base;
31 refcount_t policer_id_base_ref_count;
eb833eec 32 atomic_t active_entries_count;
9a9f8d1e 33 int entries_count;
4780dbdb 34 struct mlxsw_sp_span_entry entries[];
9a9f8d1e
IS
35};
36
ed04458d
IS
37struct mlxsw_sp_span_analyzed_port {
38 struct list_head list; /* Member of analyzed_ports_list */
39 refcount_t ref_count;
40 u8 local_port;
41 bool ingress;
42};
43
c056618c
IS
44struct mlxsw_sp_span_trigger_entry {
45 struct list_head list; /* Member of trigger_entries_list */
08a3641f
IS
46 struct mlxsw_sp_span *span;
47 const struct mlxsw_sp_span_trigger_ops *ops;
c056618c
IS
48 refcount_t ref_count;
49 u8 local_port;
50 enum mlxsw_sp_span_trigger trigger;
51 struct mlxsw_sp_span_trigger_parms parms;
52};
53
08a3641f
IS
54enum mlxsw_sp_span_trigger_type {
55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT,
ab8c06b7 56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL,
08a3641f
IS
57};
58
59struct mlxsw_sp_span_trigger_ops {
60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
63 enum mlxsw_sp_span_trigger trigger,
64 struct mlxsw_sp_port *mlxsw_sp_port);
2bafb216
IS
65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
08a3641f
IS
69};
70
a8e7e6e7
IS
71static void mlxsw_sp_span_respin_work(struct work_struct *work);
72
868678c5
DR
73static u64 mlxsw_sp_span_occ_get(void *priv)
74{
75 const struct mlxsw_sp *mlxsw_sp = priv;
868678c5 76
eb833eec 77 return atomic_read(&mlxsw_sp->span->active_entries_count);
868678c5
DR
78}
79
a629ef21
PM
80int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
81{
868678c5 82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
9a9f8d1e 83 struct mlxsw_sp_span *span;
08a3641f 84 int i, entries_count, err;
a629ef21
PM
85
86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
87 return -EIO;
88
6627b93b
IS
89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
9a9f8d1e 91 if (!span)
a629ef21 92 return -ENOMEM;
4039504e 93 refcount_set(&span->policer_id_base_ref_count, 0);
6627b93b 94 span->entries_count = entries_count;
eb833eec 95 atomic_set(&span->active_entries_count, 0);
ed04458d
IS
96 mutex_init(&span->analyzed_ports_lock);
97 INIT_LIST_HEAD(&span->analyzed_ports_list);
c056618c 98 INIT_LIST_HEAD(&span->trigger_entries_list);
a8e7e6e7 99 span->mlxsw_sp = mlxsw_sp;
9a9f8d1e
IS
100 mlxsw_sp->span = span;
101
ca089223
IS
102 for (i = 0; i < mlxsw_sp->span->entries_count; i++)
103 mlxsw_sp->span->entries[i].id = i;
a629ef21 104
08a3641f
IS
105 err = mlxsw_sp->span_ops->init(mlxsw_sp);
106 if (err)
107 goto err_init;
108
868678c5
DR
109 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
110 mlxsw_sp_span_occ_get, mlxsw_sp);
a8e7e6e7 111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
868678c5 112
a629ef21 113 return 0;
08a3641f
IS
114
115err_init:
116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
117 kfree(mlxsw_sp->span);
118 return err;
a629ef21
PM
119}
120
121void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
122{
868678c5 123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
a629ef21 124
a8e7e6e7 125 cancel_work_sync(&mlxsw_sp->span->work);
868678c5
DR
126 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
127
c056618c 128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
ed04458d
IS
129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
9a9f8d1e 131 kfree(mlxsw_sp->span);
a629ef21
PM
132}
133
fa8c08b8
IS
134static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev)
135{
136 return !dev;
137}
138
139static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
140 const struct net_device *to_dev,
141 struct mlxsw_sp_span_parms *sparmsp)
142{
143 return -EOPNOTSUPP;
144}
145
146static int
147mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
148 struct mlxsw_sp_span_parms sparms)
149{
150 return -EOPNOTSUPP;
151}
152
153static void
154mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
155{
156}
157
158static const
159struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
160 .can_handle = mlxsw_sp1_span_cpu_can_handle,
161 .parms_set = mlxsw_sp1_span_entry_cpu_parms,
162 .configure = mlxsw_sp1_span_entry_cpu_configure,
163 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure,
164};
165
7b2ef81f 166static int
f4a626e2
IS
167mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp,
168 const struct net_device *to_dev,
169b5d95
PM
169 struct mlxsw_sp_span_parms *sparmsp)
170{
171 sparmsp->dest_port = netdev_priv(to_dev);
172 return 0;
173}
174
175static int
176mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
177 struct mlxsw_sp_span_parms sparms)
7b2ef81f 178{
169b5d95
PM
179 struct mlxsw_sp_port *dest_port = sparms.dest_port;
180 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
181 u8 local_port = dest_port->local_port;
7b2ef81f
PM
182 char mpat_pl[MLXSW_REG_MPAT_LEN];
183 int pa_id = span_entry->id;
184
185 /* Create a new port analayzer entry for local_port. */
186 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
187 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
4039504e
IS
188 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
189 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
169b5d95 190
7b2ef81f
PM
191 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
192}
193
194static void
169b5d95
PM
195mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
196 enum mlxsw_reg_mpat_span_type span_type)
7b2ef81f 197{
169b5d95
PM
198 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
199 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
200 u8 local_port = dest_port->local_port;
7b2ef81f
PM
201 char mpat_pl[MLXSW_REG_MPAT_LEN];
202 int pa_id = span_entry->id;
203
169b5d95 204 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
7b2ef81f
PM
205 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
206}
207
169b5d95
PM
208static void
209mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
210{
211 mlxsw_sp_span_entry_deconfigure_common(span_entry,
212 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
213}
214
215static const
216struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
217 .can_handle = mlxsw_sp_port_dev_check,
7f9b099b 218 .parms_set = mlxsw_sp_span_entry_phys_parms,
169b5d95
PM
219 .configure = mlxsw_sp_span_entry_phys_configure,
220 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
221};
222
27cf76fe
PM
223static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
224 const void *pkey,
fc74ecbc 225 struct net_device *dev,
27cf76fe
PM
226 unsigned char dmac[ETH_ALEN])
227{
fc74ecbc 228 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
27cf76fe
PM
229 int err = 0;
230
231 if (!neigh) {
fc74ecbc 232 neigh = neigh_create(tbl, pkey, dev);
27cf76fe
PM
233 if (IS_ERR(neigh))
234 return PTR_ERR(neigh);
235 }
236
237 neigh_event_send(neigh, NULL);
238
239 read_lock_bh(&neigh->lock);
240 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
241 memcpy(dmac, neigh->ha, ETH_ALEN);
242 else
243 err = -ENOENT;
244 read_unlock_bh(&neigh->lock);
245
246 neigh_release(neigh);
247 return err;
248}
249
250static int
251mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
252{
253 sparmsp->dest_port = NULL;
254 return 0;
255}
256
946a11e7
PM
257static struct net_device *
258mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
259 unsigned char *dmac,
260 u16 *p_vid)
261{
262 struct bridge_vlan_info vinfo;
263 struct net_device *edev;
03c44132 264 u16 vid = *p_vid;
946a11e7 265
03c44132 266 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
946a11e7 267 return NULL;
03c44132
PM
268 if (!vid ||
269 br_vlan_get_info(br_dev, vid, &vinfo) ||
270 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
946a11e7
PM
271 return NULL;
272
03c44132 273 edev = br_fdb_find_port(br_dev, dmac, vid);
946a11e7
PM
274 if (!edev)
275 return NULL;
276
03c44132 277 if (br_vlan_get_info(edev, vid, &vinfo))
946a11e7 278 return NULL;
1fc68bb7
PM
279 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
280 *p_vid = 0;
281 else
03c44132 282 *p_vid = vid;
946a11e7
PM
283 return edev;
284}
285
286static struct net_device *
287mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
288 unsigned char *dmac)
289{
290 return br_fdb_find_port(br_dev, dmac, 0);
291}
292
293static struct net_device *
294mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
295 unsigned char dmac[ETH_ALEN],
296 u16 *p_vid)
297{
298 struct mlxsw_sp_bridge_port *bridge_port;
299 enum mlxsw_reg_spms_state spms_state;
03c44132 300 struct net_device *dev = NULL;
946a11e7 301 struct mlxsw_sp_port *port;
946a11e7
PM
302 u8 stp_state;
303
304 if (br_vlan_enabled(br_dev))
305 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
03c44132 306 else if (!*p_vid)
946a11e7
PM
307 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
308 if (!dev)
309 return NULL;
310
311 port = mlxsw_sp_port_dev_lower_find(dev);
312 if (!port)
313 return NULL;
314
315 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
316 if (!bridge_port)
317 return NULL;
318
319 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
320 spms_state = mlxsw_sp_stp_spms_state(stp_state);
321 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
322 return NULL;
323
324 return dev;
325}
326
e00698d1
PM
327static struct net_device *
328mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
329 u16 *p_vid)
330{
331 *p_vid = vlan_dev_vlan_id(vlan_dev);
332 return vlan_dev_real_dev(vlan_dev);
333}
334
55c0211d
PM
335static struct net_device *
336mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
337{
338 struct net_device *dev;
339 struct list_head *iter;
340
341 netdev_for_each_lower_dev(lag_dev, dev, iter)
b5de82f3
PM
342 if (netif_carrier_ok(dev) &&
343 net_lag_port_dev_txable(dev) &&
344 mlxsw_sp_port_dev_check(dev))
55c0211d
PM
345 return dev;
346
347 return NULL;
348}
349
99db5229 350static __maybe_unused int
fc74ecbc 351mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
27cf76fe
PM
352 union mlxsw_sp_l3addr saddr,
353 union mlxsw_sp_l3addr daddr,
354 union mlxsw_sp_l3addr gw,
355 __u8 ttl,
356 struct neigh_table *tbl,
357 struct mlxsw_sp_span_parms *sparmsp)
358{
359 unsigned char dmac[ETH_ALEN];
946a11e7 360 u16 vid = 0;
27cf76fe
PM
361
362 if (mlxsw_sp_l3addr_is_zero(gw))
363 gw = daddr;
364
fc74ecbc 365 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
946a11e7
PM
366 goto unoffloadable;
367
fc74ecbc
PM
368 if (is_vlan_dev(edev))
369 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
03c44132 370
fc74ecbc
PM
371 if (netif_is_bridge_master(edev)) {
372 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
373 if (!edev)
946a11e7
PM
374 goto unoffloadable;
375 }
376
fc74ecbc
PM
377 if (is_vlan_dev(edev)) {
378 if (vid || !(edev->flags & IFF_UP))
03c44132 379 goto unoffloadable;
fc74ecbc 380 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
03c44132
PM
381 }
382
55c0211d
PM
383 if (netif_is_lag_master(edev)) {
384 if (!(edev->flags & IFF_UP))
385 goto unoffloadable;
386 edev = mlxsw_sp_span_entry_lag(edev);
387 if (!edev)
388 goto unoffloadable;
389 }
390
fc74ecbc 391 if (!mlxsw_sp_port_dev_check(edev))
946a11e7 392 goto unoffloadable;
27cf76fe 393
fc74ecbc 394 sparmsp->dest_port = netdev_priv(edev);
27cf76fe
PM
395 sparmsp->ttl = ttl;
396 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
fc74ecbc 397 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
27cf76fe
PM
398 sparmsp->saddr = saddr;
399 sparmsp->daddr = daddr;
946a11e7 400 sparmsp->vid = vid;
27cf76fe 401 return 0;
946a11e7
PM
402
403unoffloadable:
404 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
27cf76fe
PM
405}
406
99db5229 407#if IS_ENABLED(CONFIG_NET_IPGRE)
36a1c3bd
PM
408static struct net_device *
409mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
410 __be32 *saddrp, __be32 *daddrp)
411{
412 struct ip_tunnel *tun = netdev_priv(to_dev);
413 struct net_device *dev = NULL;
414 struct ip_tunnel_parm parms;
415 struct rtable *rt = NULL;
416 struct flowi4 fl4;
417
418 /* We assume "dev" stays valid after rt is put. */
419 ASSERT_RTNL();
420
421 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
422 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
24ba1440 423 0, 0, parms.link, tun->fwmark, 0);
36a1c3bd
PM
424
425 rt = ip_route_output_key(tun->net, &fl4);
426 if (IS_ERR(rt))
427 return NULL;
428
429 if (rt->rt_type != RTN_UNICAST)
430 goto out;
431
432 dev = rt->dst.dev;
433 *saddrp = fl4.saddr;
1550c171
DA
434 if (rt->rt_gw_family == AF_INET)
435 *daddrp = rt->rt_gw4;
0f5f7d7b
DA
436 /* can not offload if route has an IPv6 gateway */
437 else if (rt->rt_gw_family == AF_INET6)
438 dev = NULL;
36a1c3bd
PM
439
440out:
441 ip_rt_put(rt);
442 return dev;
443}
444
27cf76fe 445static int
f4a626e2
IS
446mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
447 const struct net_device *to_dev,
27cf76fe
PM
448 struct mlxsw_sp_span_parms *sparmsp)
449{
450 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
451 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
452 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
453 bool inherit_tos = tparm.iph.tos & 0x1;
454 bool inherit_ttl = !tparm.iph.ttl;
455 union mlxsw_sp_l3addr gw = daddr;
456 struct net_device *l3edev;
457
458 if (!(to_dev->flags & IFF_UP) ||
459 /* Reject tunnels with GRE keys, checksums, etc. */
460 tparm.i_flags || tparm.o_flags ||
461 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
462 inherit_ttl || !inherit_tos ||
463 /* A destination address may not be "any". */
464 mlxsw_sp_l3addr_is_zero(daddr))
465 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
466
467 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
468 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
469 tparm.iph.ttl,
470 &arp_tbl, sparmsp);
471}
472
473static int
474mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
475 struct mlxsw_sp_span_parms sparms)
476{
477 struct mlxsw_sp_port *dest_port = sparms.dest_port;
478 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
479 u8 local_port = dest_port->local_port;
480 char mpat_pl[MLXSW_REG_MPAT_LEN];
481 int pa_id = span_entry->id;
482
483 /* Create a new port analayzer entry for local_port. */
484 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
485 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
4039504e
IS
486 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
487 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
946a11e7 488 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
27cf76fe
PM
489 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
490 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
946a11e7 491 sparms.dmac, !!sparms.vid);
27cf76fe
PM
492 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
493 sparms.ttl, sparms.smac,
494 be32_to_cpu(sparms.saddr.addr4),
495 be32_to_cpu(sparms.daddr.addr4));
496
497 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
498}
499
500static void
501mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
502{
503 mlxsw_sp_span_entry_deconfigure_common(span_entry,
504 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
505}
506
507static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
0621e6fc 508 .can_handle = netif_is_gretap,
7f9b099b 509 .parms_set = mlxsw_sp_span_entry_gretap4_parms,
27cf76fe
PM
510 .configure = mlxsw_sp_span_entry_gretap4_configure,
511 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
512};
99db5229 513#endif
27cf76fe 514
99db5229 515#if IS_ENABLED(CONFIG_IPV6_GRE)
8f08a528
PM
516static struct net_device *
517mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
518 struct in6_addr *saddrp,
519 struct in6_addr *daddrp)
520{
521 struct ip6_tnl *t = netdev_priv(to_dev);
522 struct flowi6 fl6 = t->fl.u.ip6;
523 struct net_device *dev = NULL;
524 struct dst_entry *dst;
525 struct rt6_info *rt6;
526
527 /* We assume "dev" stays valid after dst is released. */
528 ASSERT_RTNL();
529
530 fl6.flowi6_mark = t->parms.fwmark;
531 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
532 return NULL;
533
534 dst = ip6_route_output(t->net, NULL, &fl6);
535 if (!dst || dst->error)
536 goto out;
537
538 rt6 = container_of(dst, struct rt6_info, dst);
539
540 dev = dst->dev;
541 *saddrp = fl6.saddr;
542 *daddrp = rt6->rt6i_gateway;
543
544out:
545 dst_release(dst);
546 return dev;
547}
548
549static int
f4a626e2
IS
550mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
551 const struct net_device *to_dev,
8f08a528
PM
552 struct mlxsw_sp_span_parms *sparmsp)
553{
554 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
555 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
556 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
557 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
558 bool inherit_ttl = !tparm.hop_limit;
559 union mlxsw_sp_l3addr gw = daddr;
560 struct net_device *l3edev;
561
562 if (!(to_dev->flags & IFF_UP) ||
563 /* Reject tunnels with GRE keys, checksums, etc. */
564 tparm.i_flags || tparm.o_flags ||
565 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
566 inherit_ttl || !inherit_tos ||
567 /* A destination address may not be "any". */
568 mlxsw_sp_l3addr_is_zero(daddr))
569 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
570
571 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
572 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
573 tparm.hop_limit,
574 &nd_tbl, sparmsp);
575}
576
577static int
578mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
579 struct mlxsw_sp_span_parms sparms)
580{
581 struct mlxsw_sp_port *dest_port = sparms.dest_port;
582 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
583 u8 local_port = dest_port->local_port;
584 char mpat_pl[MLXSW_REG_MPAT_LEN];
585 int pa_id = span_entry->id;
586
587 /* Create a new port analayzer entry for local_port. */
588 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
589 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
4039504e
IS
590 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
591 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
946a11e7 592 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
8f08a528
PM
593 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
594 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
946a11e7 595 sparms.dmac, !!sparms.vid);
8f08a528
PM
596 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
597 sparms.saddr.addr6,
598 sparms.daddr.addr6);
599
600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
601}
602
603static void
604mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
605{
606 mlxsw_sp_span_entry_deconfigure_common(span_entry,
607 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
608}
609
610static const
611struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
0621e6fc 612 .can_handle = netif_is_ip6gretap,
7f9b099b 613 .parms_set = mlxsw_sp_span_entry_gretap6_parms,
8f08a528
PM
614 .configure = mlxsw_sp_span_entry_gretap6_configure,
615 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
616};
99db5229 617#endif
8f08a528 618
e00698d1
PM
619static bool
620mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
621{
622 return is_vlan_dev(dev) &&
623 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
624}
625
626static int
f4a626e2
IS
627mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp,
628 const struct net_device *to_dev,
e00698d1
PM
629 struct mlxsw_sp_span_parms *sparmsp)
630{
631 struct net_device *real_dev;
632 u16 vid;
633
634 if (!(to_dev->flags & IFF_UP))
635 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
636
637 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
638 sparmsp->dest_port = netdev_priv(real_dev);
639 sparmsp->vid = vid;
640 return 0;
641}
642
643static int
644mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
645 struct mlxsw_sp_span_parms sparms)
646{
647 struct mlxsw_sp_port *dest_port = sparms.dest_port;
648 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
649 u8 local_port = dest_port->local_port;
650 char mpat_pl[MLXSW_REG_MPAT_LEN];
651 int pa_id = span_entry->id;
652
653 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
654 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
4039504e
IS
655 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
656 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
e00698d1
PM
657 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
658
659 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
660}
661
662static void
663mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
664{
665 mlxsw_sp_span_entry_deconfigure_common(span_entry,
666 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
667}
668
669static const
670struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
671 .can_handle = mlxsw_sp_span_vlan_can_handle,
7f9b099b 672 .parms_set = mlxsw_sp_span_entry_vlan_parms,
e00698d1
PM
673 .configure = mlxsw_sp_span_entry_vlan_configure,
674 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
675};
676
169b5d95 677static const
34e4ace5 678struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = {
fa8c08b8 679 &mlxsw_sp1_span_entry_ops_cpu,
34e4ace5
IS
680 &mlxsw_sp_span_entry_ops_phys,
681#if IS_ENABLED(CONFIG_NET_IPGRE)
682 &mlxsw_sp_span_entry_ops_gretap4,
683#endif
684#if IS_ENABLED(CONFIG_IPV6_GRE)
685 &mlxsw_sp_span_entry_ops_gretap6,
686#endif
687 &mlxsw_sp_span_entry_ops_vlan,
688};
689
fa8c08b8
IS
690static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev)
691{
692 return !dev;
693}
694
695static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
696 const struct net_device *to_dev,
697 struct mlxsw_sp_span_parms *sparmsp)
698{
699 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
700 return 0;
701}
702
703static int
704mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
705 struct mlxsw_sp_span_parms sparms)
706{
707 /* Mirroring to the CPU port is like mirroring to any other physical
708 * port. Its local port is used instead of that of the physical port.
709 */
710 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms);
711}
712
713static void
714mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
715{
716 enum mlxsw_reg_mpat_span_type span_type;
717
718 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH;
719 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type);
720}
721
722static const
723struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
724 .can_handle = mlxsw_sp2_span_cpu_can_handle,
725 .parms_set = mlxsw_sp2_span_entry_cpu_parms,
726 .configure = mlxsw_sp2_span_entry_cpu_configure,
727 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure,
728};
729
34e4ace5
IS
730static const
731struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = {
fa8c08b8 732 &mlxsw_sp2_span_entry_ops_cpu,
169b5d95 733 &mlxsw_sp_span_entry_ops_phys,
99db5229 734#if IS_ENABLED(CONFIG_NET_IPGRE)
27cf76fe 735 &mlxsw_sp_span_entry_ops_gretap4,
99db5229
PM
736#endif
737#if IS_ENABLED(CONFIG_IPV6_GRE)
8f08a528 738 &mlxsw_sp_span_entry_ops_gretap6,
99db5229 739#endif
e00698d1 740 &mlxsw_sp_span_entry_ops_vlan,
169b5d95
PM
741};
742
743static int
f4a626e2
IS
744mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp,
745 const struct net_device *to_dev,
169b5d95
PM
746 struct mlxsw_sp_span_parms *sparmsp)
747{
27cf76fe 748 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
169b5d95
PM
749}
750
751static int
752mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
753 struct mlxsw_sp_span_parms sparms)
754{
755 return 0;
756}
757
758static void
759mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
760{
761}
762
763static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
7f9b099b 764 .parms_set = mlxsw_sp_span_entry_nop_parms,
169b5d95
PM
765 .configure = mlxsw_sp_span_entry_nop_configure,
766 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
767};
768
769static void
770mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
771 struct mlxsw_sp_span_entry *span_entry,
772 struct mlxsw_sp_span_parms sparms)
773{
8146458f
AC
774 int err;
775
776 if (!sparms.dest_port)
777 goto set_parms;
778
779 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
6edc8bea
IS
780 dev_err(mlxsw_sp->bus_info->dev,
781 "Cannot mirror to a port which belongs to a different mlxsw instance\n");
8146458f
AC
782 sparms.dest_port = NULL;
783 goto set_parms;
784 }
785
786 err = span_entry->ops->configure(span_entry, sparms);
787 if (err) {
6edc8bea 788 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n");
8146458f
AC
789 sparms.dest_port = NULL;
790 goto set_parms;
169b5d95
PM
791 }
792
8146458f 793set_parms:
169b5d95
PM
794 span_entry->parms = sparms;
795}
796
797static void
798mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
799{
800 if (span_entry->parms.dest_port)
801 span_entry->ops->deconfigure(span_entry);
802}
803
4039504e
IS
804static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
805 u16 policer_id)
806{
807 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
808 u16 policer_id_base;
809 int err;
810
811 /* Policers set on SPAN agents must be in the range of
812 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the
813 * base is set and the new policer is not within the range, then we
814 * must error out.
815 */
816 if (refcount_read(&span->policer_id_base_ref_count)) {
817 if (policer_id < span->policer_id_base ||
818 policer_id >= span->policer_id_base + span->entries_count)
819 return -EINVAL;
820
821 refcount_inc(&span->policer_id_base_ref_count);
822 return 0;
823 }
824
825 /* Base must be even. */
826 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1;
827 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp,
828 policer_id_base);
829 if (err)
830 return err;
831
832 span->policer_id_base = policer_id_base;
833 refcount_set(&span->policer_id_base_ref_count, 1);
834
835 return 0;
836}
837
838static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
839{
928345c0
PM
840 if (refcount_dec_and_test(&span->policer_id_base_ref_count))
841 span->policer_id_base = 0;
4039504e
IS
842}
843
a629ef21 844static struct mlxsw_sp_span_entry *
079c9f39 845mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
169b5d95
PM
846 const struct net_device *to_dev,
847 const struct mlxsw_sp_span_entry_ops *ops,
848 struct mlxsw_sp_span_parms sparms)
a629ef21 849{
3546b03f 850 struct mlxsw_sp_span_entry *span_entry = NULL;
a629ef21 851 int i;
a629ef21
PM
852
853 /* find a free entry to use */
9a9f8d1e 854 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
4c00dafc 855 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
9a9f8d1e 856 span_entry = &mlxsw_sp->span->entries[i];
a629ef21
PM
857 break;
858 }
859 }
3546b03f 860 if (!span_entry)
a629ef21
PM
861 return NULL;
862
4039504e
IS
863 if (sparms.policer_enable) {
864 int err;
865
866 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span,
867 sparms.policer_id);
868 if (err)
869 return NULL;
870 }
871
eb833eec 872 atomic_inc(&mlxsw_sp->span->active_entries_count);
169b5d95 873 span_entry->ops = ops;
4c00dafc 874 refcount_set(&span_entry->ref_count, 1);
079c9f39 875 span_entry->to_dev = to_dev;
169b5d95
PM
876 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
877
a629ef21
PM
878 return span_entry;
879}
880
eb833eec
IS
881static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
882 struct mlxsw_sp_span_entry *span_entry)
a629ef21 883{
169b5d95 884 mlxsw_sp_span_entry_deconfigure(span_entry);
eb833eec 885 atomic_dec(&mlxsw_sp->span->active_entries_count);
4039504e
IS
886 if (span_entry->parms.policer_enable)
887 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span);
a629ef21
PM
888}
889
890struct mlxsw_sp_span_entry *
079c9f39
PM
891mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
892 const struct net_device *to_dev)
a629ef21
PM
893{
894 int i;
895
9a9f8d1e
IS
896 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
897 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
a629ef21 898
4c00dafc 899 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
a629ef21
PM
900 return curr;
901 }
902 return NULL;
903}
904
079c9f39
PM
905void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
906 struct mlxsw_sp_span_entry *span_entry)
907{
169b5d95
PM
908 mlxsw_sp_span_entry_deconfigure(span_entry);
909 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
079c9f39
PM
910}
911
98977089
PM
912static struct mlxsw_sp_span_entry *
913mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
914{
915 int i;
916
9a9f8d1e
IS
917 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
918 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
98977089 919
4c00dafc 920 if (refcount_read(&curr->ref_count) && curr->id == span_id)
98977089
PM
921 return curr;
922 }
923 return NULL;
924}
925
4039504e
IS
926static struct mlxsw_sp_span_entry *
927mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
928 const struct net_device *to_dev,
929 const struct mlxsw_sp_span_parms *sparms)
930{
931 int i;
932
933 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
934 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
935
936 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
937 curr->parms.policer_enable == sparms->policer_enable &&
938 curr->parms.policer_id == sparms->policer_id)
939 return curr;
940 }
941 return NULL;
942}
943
a629ef21 944static struct mlxsw_sp_span_entry *
079c9f39 945mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
169b5d95
PM
946 const struct net_device *to_dev,
947 const struct mlxsw_sp_span_entry_ops *ops,
948 struct mlxsw_sp_span_parms sparms)
a629ef21
PM
949{
950 struct mlxsw_sp_span_entry *span_entry;
951
4039504e
IS
952 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev,
953 &sparms);
a629ef21
PM
954 if (span_entry) {
955 /* Already exists, just take a reference */
4c00dafc 956 refcount_inc(&span_entry->ref_count);
a629ef21
PM
957 return span_entry;
958 }
959
169b5d95 960 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
a629ef21
PM
961}
962
963static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
964 struct mlxsw_sp_span_entry *span_entry)
965{
4c00dafc 966 if (refcount_dec_and_test(&span_entry->ref_count))
eb833eec 967 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
a629ef21
PM
968 return 0;
969}
970
4bafb85a
IS
971static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu,
972 u32 speed)
973{
974 u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
975
976 return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
977}
978
31c25b94 979static int
eb773c3a 980mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
a629ef21 981{
31c25b94 982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
a629ef21 983 char sbib_pl[MLXSW_REG_SBIB_LEN];
31c25b94 984 u32 buffsize;
ff9fdfec
JP
985 u32 speed;
986 int err;
987
988 err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
989 if (err)
990 return err;
991 if (speed == SPEED_UNKNOWN)
992 speed = 0;
a629ef21 993
ff9fdfec 994 buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
f3fe412b 995 buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
31c25b94
JP
996 mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
998}
999
14366da6
IS
1000static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
1001 u8 local_port)
1002{
1003 char sbib_pl[MLXSW_REG_SBIB_LEN];
1004
1005 mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
1006 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
1007}
1008
835d6b8c
IS
1009static struct mlxsw_sp_span_analyzed_port *
1010mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
1011 bool ingress)
1012{
1013 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1014
1015 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
1016 if (analyzed_port->local_port == local_port &&
1017 analyzed_port->ingress == ingress)
1018 return analyzed_port;
1019 }
1020
1021 return NULL;
1022}
1023
31c25b94
JP
1024int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
1025{
835d6b8c
IS
1026 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
1027 int err = 0;
1028
a629ef21
PM
1029 /* If port is egress mirrored, the shared buffer size should be
1030 * updated according to the mtu value
1031 */
835d6b8c
IS
1032 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1033
1034 if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
1035 false))
1036 err = mlxsw_sp_span_port_buffer_update(port, mtu);
1037
1038 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1039
1040 return err;
a629ef21
PM
1041}
1042
3a3e627c
JP
1043void mlxsw_sp_span_speed_update_work(struct work_struct *work)
1044{
1045 struct delayed_work *dwork = to_delayed_work(work);
1046 struct mlxsw_sp_port *mlxsw_sp_port;
835d6b8c 1047 struct mlxsw_sp *mlxsw_sp;
3a3e627c
JP
1048
1049 mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
1050 span.speed_update_dw);
1051
1052 /* If port is egress mirrored, the shared buffer size should be
1053 * updated according to the speed value.
1054 */
835d6b8c
IS
1055 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1056 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1057
1058 if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1059 mlxsw_sp_port->local_port, false))
eb773c3a
IS
1060 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
1061 mlxsw_sp_port->dev->mtu);
835d6b8c
IS
1062
1063 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
3a3e627c
JP
1064}
1065
169b5d95
PM
1066static const struct mlxsw_sp_span_entry_ops *
1067mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
1068 const struct net_device *to_dev)
1069{
34e4ace5 1070 struct mlxsw_sp_span *span = mlxsw_sp->span;
169b5d95
PM
1071 size_t i;
1072
34e4ace5
IS
1073 for (i = 0; i < span->span_entry_ops_arr_size; ++i)
1074 if (span->span_entry_ops_arr[i]->can_handle(to_dev))
1075 return span->span_entry_ops_arr[i];
169b5d95
PM
1076
1077 return NULL;
1078}
1079
a8e7e6e7
IS
1080static void mlxsw_sp_span_respin_work(struct work_struct *work)
1081{
1082 struct mlxsw_sp_span *span;
1083 struct mlxsw_sp *mlxsw_sp;
1084 int i, err;
1085
1086 span = container_of(work, struct mlxsw_sp_span, work);
1087 mlxsw_sp = span->mlxsw_sp;
1088
1089 rtnl_lock();
1090 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
1091 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
1092 struct mlxsw_sp_span_parms sparms = {NULL};
1093
4c00dafc 1094 if (!refcount_read(&curr->ref_count))
a8e7e6e7
IS
1095 continue;
1096
f4a626e2 1097 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
a8e7e6e7
IS
1098 if (err)
1099 continue;
1100
1101 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1102 mlxsw_sp_span_entry_deconfigure(curr);
1103 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1104 }
1105 }
1106 rtnl_unlock();
1107}
622110f2
IS
1108
1109void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
1110{
eb833eec
IS
1111 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
1112 return;
622110f2
IS
1113 mlxsw_core_schedule_work(&mlxsw_sp->span->work);
1114}
46601034 1115
a120ecc3
IS
1116int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
1117 const struct mlxsw_sp_span_agent_parms *parms)
46601034 1118{
a120ecc3 1119 const struct net_device *to_dev = parms->to_dev;
46601034
IS
1120 const struct mlxsw_sp_span_entry_ops *ops;
1121 struct mlxsw_sp_span_entry *span_entry;
1122 struct mlxsw_sp_span_parms sparms;
1123 int err;
1124
1125 ASSERT_RTNL();
1126
1127 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
1128 if (!ops) {
1129 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
1130 return -EOPNOTSUPP;
1131 }
1132
1133 memset(&sparms, 0, sizeof(sparms));
f4a626e2 1134 err = ops->parms_set(mlxsw_sp, to_dev, &sparms);
46601034
IS
1135 if (err)
1136 return err;
1137
4039504e
IS
1138 sparms.policer_id = parms->policer_id;
1139 sparms.policer_enable = parms->policer_enable;
46601034
IS
1140 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
1141 if (!span_entry)
1142 return -ENOBUFS;
1143
1144 *p_span_id = span_entry->id;
1145
1146 return 0;
1147}
1148
1149void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
1150{
1151 struct mlxsw_sp_span_entry *span_entry;
1152
1153 ASSERT_RTNL();
1154
1155 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
1156 if (WARN_ON_ONCE(!span_entry))
1157 return;
1158
1159 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
1160}
ed04458d 1161
ed04458d
IS
1162static struct mlxsw_sp_span_analyzed_port *
1163mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
1164 struct mlxsw_sp_port *mlxsw_sp_port,
1165 bool ingress)
1166{
1167 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1168 int err;
1169
1170 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
1171 if (!analyzed_port)
1172 return ERR_PTR(-ENOMEM);
1173
1174 refcount_set(&analyzed_port->ref_count, 1);
1175 analyzed_port->local_port = mlxsw_sp_port->local_port;
1176 analyzed_port->ingress = ingress;
1177 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
1178
1179 /* An egress mirror buffer should be allocated on the egress port which
1180 * does the mirroring.
1181 */
1182 if (!ingress) {
1183 u16 mtu = mlxsw_sp_port->dev->mtu;
1184
eb773c3a 1185 err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
ed04458d 1186 if (err)
eb773c3a 1187 goto err_buffer_update;
ed04458d
IS
1188 }
1189
1190 return analyzed_port;
1191
eb773c3a 1192err_buffer_update:
ed04458d
IS
1193 list_del(&analyzed_port->list);
1194 kfree(analyzed_port);
1195 return ERR_PTR(err);
1196}
1197
1198static void
1199mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
1200 struct mlxsw_sp_span_analyzed_port *
1201 analyzed_port)
1202{
1203 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
ed04458d
IS
1204
1205 /* Remove egress mirror buffer now that port is no longer analyzed
1206 * at egress.
1207 */
14366da6
IS
1208 if (!analyzed_port->ingress)
1209 mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
1210 analyzed_port->local_port);
ed04458d
IS
1211
1212 list_del(&analyzed_port->list);
1213 kfree(analyzed_port);
1214}
1215
1216int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
1217 bool ingress)
1218{
1219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1220 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1221 u8 local_port = mlxsw_sp_port->local_port;
1222 int err = 0;
1223
1224 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1225
1226 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1227 local_port, ingress);
1228 if (analyzed_port) {
1229 refcount_inc(&analyzed_port->ref_count);
1230 goto out_unlock;
1231 }
1232
1233 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
1234 mlxsw_sp_port,
1235 ingress);
1236 if (IS_ERR(analyzed_port))
1237 err = PTR_ERR(analyzed_port);
1238
1239out_unlock:
1240 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1241 return err;
1242}
1243
1244void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
1245 bool ingress)
1246{
1247 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1248 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1249 u8 local_port = mlxsw_sp_port->local_port;
1250
1251 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1252
1253 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1254 local_port, ingress);
1255 if (WARN_ON_ONCE(!analyzed_port))
1256 goto out_unlock;
1257
1258 if (!refcount_dec_and_test(&analyzed_port->ref_count))
1259 goto out_unlock;
1260
1261 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
1262
1263out_unlock:
1264 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1265}
c056618c
IS
1266
1267static int
08a3641f
IS
1268__mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
1269 struct mlxsw_sp_span_trigger_entry *
1270 trigger_entry, bool enable)
c056618c
IS
1271{
1272 char mpar_pl[MLXSW_REG_MPAR_LEN];
1273 enum mlxsw_reg_mpar_i_e i_e;
1274
1275 switch (trigger_entry->trigger) {
1276 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1277 i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
1278 break;
1279 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1280 i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
1281 break;
1282 default:
1283 WARN_ON_ONCE(1);
1284 return -EINVAL;
1285 }
1286
1287 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
1288 trigger_entry->parms.span_id);
1289 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
1290}
1291
1292static int
08a3641f
IS
1293mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry *
1294 trigger_entry)
c056618c 1295{
08a3641f
IS
1296 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span,
1297 trigger_entry, true);
c056618c
IS
1298}
1299
1300static void
08a3641f
IS
1301mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry *
1302 trigger_entry)
c056618c 1303{
08a3641f
IS
1304 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry,
1305 false);
1306}
1307
1308static bool
1309mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry *
1310 trigger_entry,
1311 enum mlxsw_sp_span_trigger trigger,
1312 struct mlxsw_sp_port *mlxsw_sp_port)
1313{
1314 return trigger_entry->trigger == trigger &&
1315 trigger_entry->local_port == mlxsw_sp_port->local_port;
1316}
1317
2bafb216
IS
1318static int
1319mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry *
1320 trigger_entry,
1321 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
1322{
1323 /* Port trigger are enabled during binding. */
1324 return 0;
1325}
1326
1327static void
1328mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry *
1329 trigger_entry,
1330 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
1331{
1332}
1333
08a3641f
IS
1334static const struct mlxsw_sp_span_trigger_ops
1335mlxsw_sp_span_trigger_port_ops = {
1336 .bind = mlxsw_sp_span_trigger_port_bind,
1337 .unbind = mlxsw_sp_span_trigger_port_unbind,
1338 .matches = mlxsw_sp_span_trigger_port_matches,
2bafb216
IS
1339 .enable = mlxsw_sp_span_trigger_port_enable,
1340 .disable = mlxsw_sp_span_trigger_port_disable,
08a3641f
IS
1341};
1342
ab8c06b7
IS
1343static int
1344mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
1345 trigger_entry)
1346{
1347 return -EOPNOTSUPP;
1348}
1349
1350static void
1351mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
1352 trigger_entry)
1353{
1354}
1355
1356static bool
1357mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
1358 trigger_entry,
1359 enum mlxsw_sp_span_trigger trigger,
1360 struct mlxsw_sp_port *mlxsw_sp_port)
1361{
1362 WARN_ON_ONCE(1);
1363 return false;
1364}
1365
2bafb216
IS
1366static int
1367mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1368 trigger_entry,
1369 struct mlxsw_sp_port *mlxsw_sp_port,
1370 u8 tc)
1371{
1372 return -EOPNOTSUPP;
1373}
1374
1375static void
1376mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
1377 trigger_entry,
1378 struct mlxsw_sp_port *mlxsw_sp_port,
1379 u8 tc)
1380{
1381}
1382
ab8c06b7
IS
1383static const struct mlxsw_sp_span_trigger_ops
1384mlxsw_sp1_span_trigger_global_ops = {
1385 .bind = mlxsw_sp1_span_trigger_global_bind,
1386 .unbind = mlxsw_sp1_span_trigger_global_unbind,
1387 .matches = mlxsw_sp1_span_trigger_global_matches,
2bafb216
IS
1388 .enable = mlxsw_sp1_span_trigger_global_enable,
1389 .disable = mlxsw_sp1_span_trigger_global_disable,
ab8c06b7
IS
1390};
1391
1392static const struct mlxsw_sp_span_trigger_ops *
1393mlxsw_sp1_span_trigger_ops_arr[] = {
1394 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
1395 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
1396 &mlxsw_sp1_span_trigger_global_ops,
1397};
1398
1399static int
1400mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
1401 trigger_entry)
1402{
1403 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
1404 enum mlxsw_reg_mpagr_trigger trigger;
1405 char mpagr_pl[MLXSW_REG_MPAGR_LEN];
1406
1407 switch (trigger_entry->trigger) {
1408 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1409 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER;
1410 break;
1411 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1412 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED;
1413 break;
1414 case MLXSW_SP_SPAN_TRIGGER_ECN:
1415 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN;
1416 break;
1417 default:
1418 WARN_ON_ONCE(1);
1419 return -EINVAL;
1420 }
1421
1422 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
1423 1);
1424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
1425}
1426
1427static void
1428mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
1429 trigger_entry)
1430{
1431 /* There is no unbinding for global triggers. The trigger should be
1432 * disabled on all ports by now.
1433 */
1434}
1435
1436static bool
1437mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
1438 trigger_entry,
1439 enum mlxsw_sp_span_trigger trigger,
1440 struct mlxsw_sp_port *mlxsw_sp_port)
1441{
1442 return trigger_entry->trigger == trigger;
1443}
1444
2bafb216
IS
1445static int
1446__mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1447 trigger_entry,
1448 struct mlxsw_sp_port *mlxsw_sp_port,
1449 u8 tc, bool enable)
1450{
1451 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
1452 char momte_pl[MLXSW_REG_MOMTE_LEN];
1453 enum mlxsw_reg_momte_type type;
1454 int err;
1455
1456 switch (trigger_entry->trigger) {
1457 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1458 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS;
1459 break;
1460 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1461 type = MLXSW_REG_MOMTE_TYPE_WRED;
1462 break;
1463 case MLXSW_SP_SPAN_TRIGGER_ECN:
1464 type = MLXSW_REG_MOMTE_TYPE_ECN;
1465 break;
1466 default:
1467 WARN_ON_ONCE(1);
1468 return -EINVAL;
1469 }
1470
1471 /* Query existing configuration in order to only change the state of
1472 * the specified traffic class.
1473 */
1474 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type);
1475 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
1476 if (err)
1477 return err;
1478
1479 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable);
1480 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
1481}
1482
1483static int
1484mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1485 trigger_entry,
1486 struct mlxsw_sp_port *mlxsw_sp_port,
1487 u8 tc)
1488{
1489 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry,
1490 mlxsw_sp_port, tc, true);
1491}
1492
1493static void
1494mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
1495 trigger_entry,
1496 struct mlxsw_sp_port *mlxsw_sp_port,
1497 u8 tc)
1498{
1499 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc,
1500 false);
1501}
1502
ab8c06b7
IS
1503static const struct mlxsw_sp_span_trigger_ops
1504mlxsw_sp2_span_trigger_global_ops = {
1505 .bind = mlxsw_sp2_span_trigger_global_bind,
1506 .unbind = mlxsw_sp2_span_trigger_global_unbind,
1507 .matches = mlxsw_sp2_span_trigger_global_matches,
2bafb216
IS
1508 .enable = mlxsw_sp2_span_trigger_global_enable,
1509 .disable = mlxsw_sp2_span_trigger_global_disable,
ab8c06b7
IS
1510};
1511
08a3641f 1512static const struct mlxsw_sp_span_trigger_ops *
ab8c06b7 1513mlxsw_sp2_span_trigger_ops_arr[] = {
08a3641f 1514 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
ab8c06b7
IS
1515 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
1516 &mlxsw_sp2_span_trigger_global_ops,
08a3641f
IS
1517};
1518
1519static void
1520mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
1521{
1522 struct mlxsw_sp_span *span = trigger_entry->span;
1523 enum mlxsw_sp_span_trigger_type type;
1524
1525 switch (trigger_entry->trigger) {
df561f66 1526 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
08a3641f
IS
1527 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1528 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
1529 break;
df561f66
GS
1530 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1531 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
ab8c06b7
IS
1532 case MLXSW_SP_SPAN_TRIGGER_ECN:
1533 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
1534 break;
08a3641f
IS
1535 default:
1536 WARN_ON_ONCE(1);
1537 return;
1538 }
1539
1540 trigger_entry->ops = span->span_trigger_ops_arr[type];
c056618c
IS
1541}
1542
1543static struct mlxsw_sp_span_trigger_entry *
1544mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
1545 enum mlxsw_sp_span_trigger trigger,
1546 struct mlxsw_sp_port *mlxsw_sp_port,
1547 const struct mlxsw_sp_span_trigger_parms
1548 *parms)
1549{
1550 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1551 int err;
1552
1553 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
1554 if (!trigger_entry)
1555 return ERR_PTR(-ENOMEM);
1556
1557 refcount_set(&trigger_entry->ref_count, 1);
08a3641f
IS
1558 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port :
1559 0;
c056618c
IS
1560 trigger_entry->trigger = trigger;
1561 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
08a3641f
IS
1562 trigger_entry->span = span;
1563 mlxsw_sp_span_trigger_ops_set(trigger_entry);
c056618c
IS
1564 list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
1565
08a3641f 1566 err = trigger_entry->ops->bind(trigger_entry);
c056618c
IS
1567 if (err)
1568 goto err_trigger_entry_bind;
1569
1570 return trigger_entry;
1571
1572err_trigger_entry_bind:
1573 list_del(&trigger_entry->list);
1574 kfree(trigger_entry);
1575 return ERR_PTR(err);
1576}
1577
1578static void
1579mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
1580 struct mlxsw_sp_span_trigger_entry *
1581 trigger_entry)
1582{
08a3641f 1583 trigger_entry->ops->unbind(trigger_entry);
c056618c
IS
1584 list_del(&trigger_entry->list);
1585 kfree(trigger_entry);
1586}
1587
1588static struct mlxsw_sp_span_trigger_entry *
1589mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
1590 enum mlxsw_sp_span_trigger trigger,
1591 struct mlxsw_sp_port *mlxsw_sp_port)
1592{
1593 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1594
1595 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
08a3641f
IS
1596 if (trigger_entry->ops->matches(trigger_entry, trigger,
1597 mlxsw_sp_port))
c056618c
IS
1598 return trigger_entry;
1599 }
1600
1601 return NULL;
1602}
1603
1604int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
1605 enum mlxsw_sp_span_trigger trigger,
1606 struct mlxsw_sp_port *mlxsw_sp_port,
1607 const struct mlxsw_sp_span_trigger_parms *parms)
1608{
1609 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1610 int err = 0;
1611
1612 ASSERT_RTNL();
1613
1614 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
1615 return -EINVAL;
1616
1617 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1618 trigger,
1619 mlxsw_sp_port);
1620 if (trigger_entry) {
1621 if (trigger_entry->parms.span_id != parms->span_id)
1622 return -EINVAL;
1623 refcount_inc(&trigger_entry->ref_count);
1624 goto out;
1625 }
1626
1627 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
1628 trigger,
1629 mlxsw_sp_port,
1630 parms);
1631 if (IS_ERR(trigger_entry))
1632 err = PTR_ERR(trigger_entry);
1633
1634out:
1635 return err;
1636}
1637
1638void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
1639 enum mlxsw_sp_span_trigger trigger,
1640 struct mlxsw_sp_port *mlxsw_sp_port,
1641 const struct mlxsw_sp_span_trigger_parms *parms)
1642{
1643 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1644
1645 ASSERT_RTNL();
1646
1647 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
1648 parms->span_id)))
1649 return;
1650
1651 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1652 trigger,
1653 mlxsw_sp_port);
1654 if (WARN_ON_ONCE(!trigger_entry))
1655 return;
1656
1657 if (!refcount_dec_and_test(&trigger_entry->ref_count))
1658 return;
1659
1660 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
1661}
4bafb85a 1662
2bafb216
IS
1663int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
1664 enum mlxsw_sp_span_trigger trigger, u8 tc)
1665{
1666 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1667 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1668
1669 ASSERT_RTNL();
1670
1671 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1672 trigger,
1673 mlxsw_sp_port);
1674 if (WARN_ON_ONCE(!trigger_entry))
1675 return -EINVAL;
1676
1677 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc);
1678}
1679
1680void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
1681 enum mlxsw_sp_span_trigger trigger, u8 tc)
1682{
1683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1684 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1685
1686 ASSERT_RTNL();
1687
1688 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1689 trigger,
1690 mlxsw_sp_port);
1691 if (WARN_ON_ONCE(!trigger_entry))
1692 return;
1693
1694 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
1695}
1696
08a3641f
IS
1697static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
1698{
34e4ace5
IS
1699 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
1700
fa8c08b8
IS
1701 /* Must be first to avoid NULL pointer dereference by subsequent
1702 * can_handle() callbacks.
1703 */
1704 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] !=
1705 &mlxsw_sp1_span_entry_ops_cpu))
1706 return -EINVAL;
1707
ab8c06b7 1708 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr;
34e4ace5
IS
1709 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr;
1710 mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
08a3641f
IS
1711
1712 return 0;
1713}
1714
4bafb85a
IS
1715static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
1716{
1717 return mtu * 5 / 2;
1718}
1719
4039504e
IS
1720static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
1721 u16 policer_id_base)
1722{
1723 return -EOPNOTSUPP;
1724}
1725
4bafb85a 1726const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
08a3641f 1727 .init = mlxsw_sp1_span_init,
4bafb85a 1728 .buffsize_get = mlxsw_sp1_span_buffsize_get,
4039504e 1729 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
4bafb85a
IS
1730};
1731
08a3641f
IS
1732static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
1733{
34e4ace5
IS
1734 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr);
1735
fa8c08b8
IS
1736 /* Must be first to avoid NULL pointer dereference by subsequent
1737 * can_handle() callbacks.
1738 */
1739 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] !=
1740 &mlxsw_sp2_span_entry_ops_cpu))
1741 return -EINVAL;
1742
ab8c06b7 1743 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr;
34e4ace5
IS
1744 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr;
1745 mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
08a3641f
IS
1746
1747 return 0;
1748}
1749
4bafb85a
IS
1750#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
1751#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
1752
1753static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
1754{
1755 return 3 * mtu + buffer_factor * speed / 1000;
1756}
1757
1758static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
1759{
1760 int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
1761
1762 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
1763}
1764
4039504e
IS
1765static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
1766 u16 policer_id_base)
1767{
1768 char mogcr_pl[MLXSW_REG_MOGCR_LEN];
1769 int err;
1770
1771 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
1772 if (err)
1773 return err;
1774
1775 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base);
1776 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
1777}
1778
4bafb85a 1779const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
08a3641f 1780 .init = mlxsw_sp2_span_init,
4bafb85a 1781 .buffsize_get = mlxsw_sp2_span_buffsize_get,
4039504e 1782 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
4bafb85a
IS
1783};
1784
1785static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
1786{
1787 int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
1788
1789 return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
1790}
1791
1792const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
08a3641f 1793 .init = mlxsw_sp2_span_init,
4bafb85a 1794 .buffsize_get = mlxsw_sp3_span_buffsize_get,
4039504e 1795 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
4bafb85a 1796};