]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
UBUNTU: Ubuntu-5.15.0-39.42
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
cb67b832
HHZ
33#include <linux/mlx5/fs.h>
34#include <net/switchdev.h>
d957b4e3 35#include <net/pkt_cls.h>
717503b9 36#include <net/act_api.h>
f60f315d 37#include <net/devlink.h>
5cc3a8c6 38#include <net/ipv6_stubs.h>
cb67b832
HHZ
39
40#include "eswitch.h"
41#include "en.h"
1d447a39 42#include "en_rep.h"
b3a131c2 43#include "en/params.h"
b307f7f1 44#include "en/txrx.h"
adb4c123 45#include "en_tc.h"
768c3667 46#include "en/rep/tc.h"
549c243e 47#include "en/rep/neigh.h"
19e9bfa0 48#include "en/rep/bridge.h"
7a9fb35e 49#include "en/devlink.h"
f6dfb4c3 50#include "fs_core.h"
71c6eaeb 51#include "lib/mlx5.h"
898b0786 52#include "lib/devcom.h"
8ed6b176 53#include "lib/vxlan.h"
5970882a
VB
54#define CREATE_TRACE_POINTS
55#include "diag/en_rep_tracepoint.h"
5589b8f1 56#include "en_accel/ipsec.h"
cb67b832 57
4c8fb298 58#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
31450b43 59 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
8956f001 60#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
4246f698 61
cb67b832
HHZ
62static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
63
64static void mlx5e_rep_get_drvinfo(struct net_device *dev,
65 struct ethtool_drvinfo *drvinfo)
66{
cf83c8fd
DL
67 struct mlx5e_priv *priv = netdev_priv(dev);
68 struct mlx5_core_dev *mdev = priv->mdev;
69
cb67b832
HHZ
70 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
71 sizeof(drvinfo->driver));
cf83c8fd
DL
72 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
73 "%d.%d.%04d (%.16s)",
74 fw_rev_maj(mdev), fw_rev_min(mdev),
75 fw_rev_sub(mdev), mdev->board_id);
76}
77
cb67b832
HHZ
78static const struct counter_desc sw_rep_stats_desc[] = {
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
83};
84
a228060a
OG
85struct vport_stats {
86 u64 vport_rx_packets;
87 u64 vport_tx_packets;
88 u64 vport_rx_bytes;
89 u64 vport_tx_bytes;
90};
91
92static const struct counter_desc vport_rep_stats_desc[] = {
93 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
94 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
95 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
96 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
97};
98
99#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
100#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
cb67b832 101
8a236b15 102static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
cb67b832 103{
8a236b15
VB
104 return NUM_VPORT_REP_SW_COUNTERS;
105}
cb67b832 106
8a236b15
VB
107static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
108{
109 int i;
110
111 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
112 strcpy(data + (idx++) * ETH_GSTRING_LEN,
113 sw_rep_stats_desc[i].format);
114 return idx;
115}
116
117static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
118{
119 int i;
120
121 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
122 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
123 sw_rep_stats_desc, i);
124 return idx;
125}
126
127static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
128{
129 struct mlx5e_sw_stats *s = &priv->stats.sw;
130 struct rtnl_link_stats64 stats64 = {};
131
132 memset(s, 0, sizeof(*s));
133 mlx5e_fold_sw_stats64(priv, &stats64);
134
135 s->rx_packets = stats64.rx_packets;
136 s->rx_bytes = stats64.rx_bytes;
137 s->tx_packets = stats64.tx_packets;
138 s->tx_bytes = stats64.tx_bytes;
139 s->tx_queue_dropped = stats64.tx_dropped;
140}
141
142static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
143{
144 return NUM_VPORT_REP_HW_COUNTERS;
145}
146
147static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
148{
149 int i;
150
151 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
152 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
153 return idx;
154}
155
156static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
157{
158 int i;
159
160 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
161 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
162 vport_rep_stats_desc, i);
163 return idx;
cb67b832
HHZ
164}
165
7c453526 166static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
370bad0f
OG
167{
168 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
169 struct mlx5e_rep_priv *rpriv = priv->ppriv;
170 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
171 struct rtnl_link_stats64 *vport_stats;
172 struct ifla_vf_stats vf_stats;
173 int err;
174
175 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
176 if (err) {
237ac8de
RD
177 netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
178 rep->vport, err);
370bad0f
OG
179 return;
180 }
181
182 vport_stats = &priv->stats.vf_vport;
183 /* flip tx/rx as we are reporting the counters for the switch vport */
184 vport_stats->rx_packets = vf_stats.tx_packets;
185 vport_stats->rx_bytes = vf_stats.tx_bytes;
186 vport_stats->tx_packets = vf_stats.rx_packets;
187 vport_stats->tx_bytes = vf_stats.rx_bytes;
188}
189
8a236b15
VB
190static void mlx5e_rep_get_strings(struct net_device *dev,
191 u32 stringset, uint8_t *data)
192{
193 struct mlx5e_priv *priv = netdev_priv(dev);
194
195 switch (stringset) {
196 case ETH_SS_STATS:
197 mlx5e_stats_fill_strings(priv, data);
198 break;
199 }
370bad0f
OG
200}
201
cb67b832
HHZ
202static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
203 struct ethtool_stats *stats, u64 *data)
204{
205 struct mlx5e_priv *priv = netdev_priv(dev);
cb67b832 206
8a236b15 207 mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
cb67b832
HHZ
208}
209
210static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
211{
8a236b15
VB
212 struct mlx5e_priv *priv = netdev_priv(dev);
213
cb67b832
HHZ
214 switch (sset) {
215 case ETH_SS_STATS:
8a236b15 216 return mlx5e_stats_total_num(priv);
cb67b832
HHZ
217 default:
218 return -EOPNOTSUPP;
219 }
220}
221
f128f138
GT
222static void mlx5e_rep_get_ringparam(struct net_device *dev,
223 struct ethtool_ringparam *param)
224{
225 struct mlx5e_priv *priv = netdev_priv(dev);
226
227 mlx5e_ethtool_get_ringparam(priv, param);
228}
229
230static int mlx5e_rep_set_ringparam(struct net_device *dev,
231 struct ethtool_ringparam *param)
232{
233 struct mlx5e_priv *priv = netdev_priv(dev);
234
235 return mlx5e_ethtool_set_ringparam(priv, param);
236}
237
84a09733
GT
238static void mlx5e_rep_get_channels(struct net_device *dev,
239 struct ethtool_channels *ch)
240{
241 struct mlx5e_priv *priv = netdev_priv(dev);
242
243 mlx5e_ethtool_get_channels(priv, ch);
244}
245
246static int mlx5e_rep_set_channels(struct net_device *dev,
247 struct ethtool_channels *ch)
248{
249 struct mlx5e_priv *priv = netdev_priv(dev);
84a09733 250
20f7b37f 251 return mlx5e_ethtool_set_channels(priv, ch);
84a09733
GT
252}
253
ff9b85de 254static int mlx5e_rep_get_coalesce(struct net_device *netdev,
f3ccfda1
YM
255 struct ethtool_coalesce *coal,
256 struct kernel_ethtool_coalesce *kernel_coal,
257 struct netlink_ext_ack *extack)
ff9b85de
OG
258{
259 struct mlx5e_priv *priv = netdev_priv(netdev);
260
261 return mlx5e_ethtool_get_coalesce(priv, coal);
262}
263
264static int mlx5e_rep_set_coalesce(struct net_device *netdev,
f3ccfda1
YM
265 struct ethtool_coalesce *coal,
266 struct kernel_ethtool_coalesce *kernel_coal,
267 struct netlink_ext_ack *extack)
ff9b85de
OG
268{
269 struct mlx5e_priv *priv = netdev_priv(netdev);
270
271 return mlx5e_ethtool_set_coalesce(priv, coal);
272}
273
84a09733
GT
274static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
275{
276 struct mlx5e_priv *priv = netdev_priv(netdev);
277
278 return mlx5e_ethtool_get_rxfh_key_size(priv);
279}
280
281static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
282{
283 struct mlx5e_priv *priv = netdev_priv(netdev);
284
285 return mlx5e_ethtool_get_rxfh_indir_size(priv);
286}
287
9b81d5a9 288static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
55808762
JK
289 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
290 ETHTOOL_COALESCE_MAX_FRAMES |
291 ETHTOOL_COALESCE_USE_ADAPTIVE,
ff9b85de
OG
292 .get_drvinfo = mlx5e_rep_get_drvinfo,
293 .get_link = ethtool_op_get_link,
294 .get_strings = mlx5e_rep_get_strings,
295 .get_sset_count = mlx5e_rep_get_sset_count,
296 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
297 .get_ringparam = mlx5e_rep_get_ringparam,
298 .set_ringparam = mlx5e_rep_set_ringparam,
299 .get_channels = mlx5e_rep_get_channels,
300 .set_channels = mlx5e_rep_set_channels,
301 .get_coalesce = mlx5e_rep_get_coalesce,
302 .set_coalesce = mlx5e_rep_set_coalesce,
303 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
304 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
305};
306
f7a68945
MB
307static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
308 struct mlx5_eswitch_rep *rep)
309{
2c47bf80 310 struct mlx5e_rep_sq *rep_sq, *tmp;
5ed99fb4 311 struct mlx5e_rep_priv *rpriv;
f7a68945 312
f6455de0 313 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
f7a68945
MB
314 return;
315
5ed99fb4 316 rpriv = mlx5e_rep_to_rep_priv(rep);
2c47bf80
MB
317 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
318 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
898b0786
MB
319 if (rep_sq->send_to_vport_rule_peer)
320 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
2c47bf80
MB
321 list_del(&rep_sq->list);
322 kfree(rep_sq);
f7a68945
MB
323 }
324}
325
326static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
327 struct mlx5_eswitch_rep *rep,
5ecadff0 328 u32 *sqns_array, int sqns_num)
f7a68945 329{
898b0786 330 struct mlx5_eswitch *peer_esw = NULL;
f7a68945 331 struct mlx5_flow_handle *flow_rule;
5ed99fb4 332 struct mlx5e_rep_priv *rpriv;
2c47bf80 333 struct mlx5e_rep_sq *rep_sq;
f7a68945
MB
334 int err;
335 int i;
336
f6455de0 337 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
f7a68945
MB
338 return 0;
339
5ed99fb4 340 rpriv = mlx5e_rep_to_rep_priv(rep);
898b0786
MB
341 if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
342 peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
343 MLX5_DEVCOM_ESW_OFFLOADS);
344
f7a68945 345 for (i = 0; i < sqns_num; i++) {
2c47bf80
MB
346 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
347 if (!rep_sq) {
f7a68945
MB
348 err = -ENOMEM;
349 goto out_err;
350 }
351
352 /* Add re-inject rule to the PF/representor sqs */
979bf468 353 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
f7a68945
MB
354 sqns_array[i]);
355 if (IS_ERR(flow_rule)) {
356 err = PTR_ERR(flow_rule);
2c47bf80 357 kfree(rep_sq);
f7a68945
MB
358 goto out_err;
359 }
2c47bf80 360 rep_sq->send_to_vport_rule = flow_rule;
898b0786
MB
361 rep_sq->sqn = sqns_array[i];
362
363 if (peer_esw) {
364 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
365 rep, sqns_array[i]);
366 if (IS_ERR(flow_rule)) {
367 err = PTR_ERR(flow_rule);
368 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
369 kfree(rep_sq);
370 goto out_err;
371 }
372 rep_sq->send_to_vport_rule_peer = flow_rule;
373 }
374
2c47bf80 375 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
f7a68945 376 }
898b0786
MB
377
378 if (peer_esw)
379 mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
380
f7a68945
MB
381 return 0;
382
383out_err:
384 mlx5e_sqs2vport_stop(esw, rep);
898b0786
MB
385
386 if (peer_esw)
387 mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
388
f7a68945
MB
389 return err;
390}
391
cb67b832 392int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
393{
394 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
395 struct mlx5e_rep_priv *rpriv = priv->ppriv;
396 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 397 struct mlx5e_channel *c;
9008ae07
SM
398 int n, tc, num_sqs = 0;
399 int err = -ENOMEM;
5ecadff0 400 u32 *sqs;
cb67b832 401
86d747a3
TT
402 sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
403 sizeof(*sqs), GFP_KERNEL);
cb67b832 404 if (!sqs)
9008ae07 405 goto out;
cb67b832 406
ff9c852f
SM
407 for (n = 0; n < priv->channels.num; n++) {
408 c = priv->channels.c[n];
cb67b832
HHZ
409 for (tc = 0; tc < c->num_tc; tc++)
410 sqs[num_sqs++] = c->sq[tc].sqn;
411 }
412
f7a68945 413 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 414 kfree(sqs);
9008ae07
SM
415
416out:
417 if (err)
418 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
419 return err;
420}
421
cb67b832
HHZ
422void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
423{
424 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
425 struct mlx5e_rep_priv *rpriv = priv->ppriv;
426 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 427
f7a68945 428 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
429}
430
9b81d5a9 431static int mlx5e_rep_open(struct net_device *dev)
20a1ea67
OG
432{
433 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
434 struct mlx5e_rep_priv *rpriv = priv->ppriv;
435 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
436 int err;
437
63bfd399
EBE
438 mutex_lock(&priv->state_lock);
439 err = mlx5e_open_locked(dev);
20a1ea67 440 if (err)
63bfd399 441 goto unlock;
20a1ea67 442
84c9c8f2 443 if (!mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 444 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
445 rep->vport, 1,
446 MLX5_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
447 netif_carrier_on(dev);
448
63bfd399
EBE
449unlock:
450 mutex_unlock(&priv->state_lock);
451 return err;
20a1ea67
OG
452}
453
9b81d5a9 454static int mlx5e_rep_close(struct net_device *dev)
20a1ea67
OG
455{
456 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
457 struct mlx5e_rep_priv *rpriv = priv->ppriv;
458 struct mlx5_eswitch_rep *rep = rpriv->rep;
63bfd399 459 int ret;
20a1ea67 460
63bfd399 461 mutex_lock(&priv->state_lock);
84c9c8f2 462 mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 463 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
464 rep->vport, 1,
465 MLX5_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
466 ret = mlx5e_close_locked(dev);
467 mutex_unlock(&priv->state_lock);
468 return ret;
20a1ea67
OG
469}
470
370bad0f
OG
471bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
472{
1d447a39
SM
473 struct mlx5e_rep_priv *rpriv = priv->ppriv;
474 struct mlx5_eswitch_rep *rep;
475
733d3e54 476 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 477 return false;
370bad0f 478
d9ee0491
OG
479 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
480 return false;
370bad0f 481
d9ee0491 482 rep = rpriv->rep;
b05af6aa 483 return (rep->vport == MLX5_VPORT_UPLINK);
370bad0f
OG
484}
485
ee526030 486bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
370bad0f 487{
370bad0f
OG
488 switch (attr_id) {
489 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
370bad0f
OG
490 return true;
491 }
492
493 return false;
494}
495
496static int
497mlx5e_get_sw_stats64(const struct net_device *dev,
498 struct rtnl_link_stats64 *stats)
499{
500 struct mlx5e_priv *priv = netdev_priv(dev);
370bad0f 501
b832d4fd 502 mlx5e_fold_sw_stats64(priv, stats);
370bad0f
OG
503 return 0;
504}
505
ee526030
RD
506int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
507 void *sp)
370bad0f
OG
508{
509 switch (attr_id) {
510 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
511 return mlx5e_get_sw_stats64(dev, sp);
512 }
513
514 return -EINVAL;
515}
516
bc1f4470 517static void
9b81d5a9 518mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
370bad0f
OG
519{
520 struct mlx5e_priv *priv = netdev_priv(dev);
521
ed56c519 522 /* update HW stats in background for next time */
cdeef2b1 523 mlx5e_queue_update_stats(priv);
370bad0f 524 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
525}
526
9b81d5a9 527static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491
OG
528{
529 return mlx5e_change_mtu(netdev, new_mtu, NULL);
530}
531
c7eddc60 532static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
f60f315d 533{
c7eddc60 534 struct mlx5e_priv *priv = netdev_priv(netdev);
f60f315d 535 struct mlx5e_rep_priv *rpriv = priv->ppriv;
c7eddc60 536 struct mlx5_core_dev *dev = priv->mdev;
f60f315d 537
c7eddc60 538 return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
f60f315d
PP
539}
540
45d252ca
PP
541static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
542{
543 struct mlx5e_priv *priv = netdev_priv(dev);
544 struct mlx5e_rep_priv *rpriv = priv->ppriv;
545 struct mlx5_eswitch_rep *rep = rpriv->rep;
546 int err;
547
548 if (new_carrier) {
549 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
550 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
551 if (err)
552 return err;
553 netif_carrier_on(dev);
554 } else {
555 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
556 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
557 if (err)
558 return err;
559 netif_carrier_off(dev);
560 }
561 return 0;
562}
563
9b81d5a9
VP
564static const struct net_device_ops mlx5e_netdev_ops_rep = {
565 .ndo_open = mlx5e_rep_open,
566 .ndo_stop = mlx5e_rep_close,
d9ee0491 567 .ndo_start_xmit = mlx5e_xmit,
d9ee0491 568 .ndo_setup_tc = mlx5e_rep_setup_tc,
ab8f963a 569 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
9b81d5a9 570 .ndo_get_stats64 = mlx5e_rep_get_stats,
13e509a4
OG
571 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
572 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
9b81d5a9 573 .ndo_change_mtu = mlx5e_rep_change_mtu,
45d252ca 574 .ndo_change_carrier = mlx5e_rep_change_carrier,
d9ee0491 575};
250a42b6 576
07810152 577bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
ffec9702 578{
c97a2c06
RD
579 return netdev->netdev_ops == &mlx5e_netdev_ops &&
580 mlx5e_is_uplink_rep(netdev_priv(netdev));
ffec9702
TZ
581}
582
07810152 583bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
a0646c88 584{
32134847 585 return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
a0646c88
EB
586}
587
025380b2 588static void mlx5e_build_rep_params(struct net_device *netdev)
cb67b832 589{
025380b2 590 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
591 struct mlx5e_rep_priv *rpriv = priv->ppriv;
592 struct mlx5_eswitch_rep *rep = rpriv->rep;
025380b2
OG
593 struct mlx5_core_dev *mdev = priv->mdev;
594 struct mlx5e_params *params;
595
cb67b832
HHZ
596 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
597 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
598 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
599
025380b2 600 params = &priv->channels.params;
3ef14e46
SM
601
602 params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
472a1e44 603 params->hard_mtu = MLX5E_ETH_HARD_MTU;
025380b2 604 params->sw_mtu = netdev->mtu;
d9ee0491
OG
605
606 /* SQ */
b05af6aa 607 if (rep->vport == MLX5_VPORT_UPLINK)
d9ee0491
OG
608 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
609 else
5d1f7354 610 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
cb67b832 611
749359f4
GT
612 /* RQ */
613 mlx5e_build_rq_params(mdev, params);
614
615 /* CQ moderation params */
9a317425 616 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
6a9764ef 617 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 618
86d747a3 619 params->mqprio.num_tc = 1;
69dad68d 620 params->tunneled_offload_en = false;
5f195c2c 621
84c8a874
MM
622 /* Set an initial non-zero value, so that mlx5e_select_queue won't
623 * divide by zero if called before first activating channels.
624 */
625 priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
626
5f195c2c 627 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
cb67b832
HHZ
628}
629
3ef14e46 630static void mlx5e_build_rep_netdev(struct net_device *netdev,
7a9fb35e 631 struct mlx5_core_dev *mdev)
cb67b832 632{
123f0f53 633 SET_NETDEV_DEV(netdev, mdev->device);
7a9fb35e
RD
634 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
635 eth_hw_addr_random(netdev);
636 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
cb67b832
HHZ
637
638 netdev->watchdog_timeo = 15 * HZ;
639
156878d0 640#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
d3cbd425 641 netdev->hw_features |= NETIF_F_HW_TC;
156878d0 642#endif
dabeb3b0
GT
643 netdev->hw_features |= NETIF_F_SG;
644 netdev->hw_features |= NETIF_F_IP_CSUM;
645 netdev->hw_features |= NETIF_F_IPV6_CSUM;
646 netdev->hw_features |= NETIF_F_GRO;
647 netdev->hw_features |= NETIF_F_TSO;
648 netdev->hw_features |= NETIF_F_TSO6;
649 netdev->hw_features |= NETIF_F_RXCSUM;
650
651 netdev->features |= netdev->hw_features;
7a9fb35e 652 netdev->features |= NETIF_F_NETNS_LOCAL;
cb67b832
HHZ
653}
654
182570b2 655static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
3ef14e46 656 struct net_device *netdev)
cb67b832 657{
6a9764ef 658 struct mlx5e_priv *priv = netdev_priv(netdev);
c139dbfd 659
025380b2 660 mlx5e_build_rep_params(netdev);
237f258c 661 mlx5e_timestamp_init(priv);
182570b2
FD
662
663 return 0;
664}
665
84db6612
RD
666static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
667 struct net_device *netdev)
668{
669 struct mlx5e_priv *priv = netdev_priv(netdev);
5589b8f1
RS
670 int err;
671
672 err = mlx5e_ipsec_init(priv);
673 if (err)
674 mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
84db6612
RD
675
676 mlx5e_vxlan_set_netdev_info(priv);
677 return mlx5e_init_rep(mdev, netdev);
678}
679
182570b2
FD
680static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
681{
5589b8f1 682 mlx5e_ipsec_cleanup(priv);
cb67b832
HHZ
683}
684
84a09733
GT
685static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
686{
20f7b37f
SM
687 struct mlx5e_rep_priv *rpriv = priv->ppriv;
688 struct mlx5_eswitch_rep *rep = rpriv->rep;
84a09733 689 struct ttc_params ttc_params = {};
bc29764e 690 int err;
84a09733
GT
691
692 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
693 MLX5_FLOW_NAMESPACE_KERNEL);
694
695 /* The inner_ttc in the ttc params is intentionally not set */
bc29764e 696 mlx5e_set_ttc_params(priv, &ttc_params, false);
20f7b37f
SM
697
698 if (rep->vport != MLX5_VPORT_UPLINK)
699 /* To give uplik rep TTC a lower level for chaining from root ft */
700 ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
701
f4b45940
MG
702 priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
703 if (IS_ERR(priv->fs.ttc)) {
704 err = PTR_ERR(priv->fs.ttc);
705 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
706 err);
84a09733
GT
707 return err;
708 }
709 return 0;
710}
711
20f7b37f
SM
712static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
713{
714 struct mlx5e_rep_priv *rpriv = priv->ppriv;
715 struct mlx5_eswitch_rep *rep = rpriv->rep;
716 struct mlx5_flow_table_attr ft_attr = {};
717 struct mlx5_flow_namespace *ns;
718 int err = 0;
719
720 if (rep->vport != MLX5_VPORT_UPLINK) {
721 /* non uplik reps will skip any bypass tables and go directly to
722 * their own ttc
723 */
f4b45940 724 rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
20f7b37f
SM
725 return 0;
726 }
727
728 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
729 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
730 if (!ns) {
731 netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
732 return -EOPNOTSUPP;
733 }
734
735 ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
c6fe5729 736 ft_attr.prio = 1;
20f7b37f
SM
737 ft_attr.level = 1;
738
739 rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
740 if (IS_ERR(rpriv->root_ft)) {
741 err = PTR_ERR(rpriv->root_ft);
742 rpriv->root_ft = NULL;
743 }
744
745 return err;
746}
747
748static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
749{
750 struct mlx5e_rep_priv *rpriv = priv->ppriv;
751 struct mlx5_eswitch_rep *rep = rpriv->rep;
752
753 if (rep->vport != MLX5_VPORT_UPLINK)
754 return;
755 mlx5_destroy_flow_table(rpriv->root_ft);
756}
757
092297e0 758static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
cb67b832
HHZ
759{
760 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
761 struct mlx5e_rep_priv *rpriv = priv->ppriv;
762 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 763 struct mlx5_flow_handle *flow_rule;
c966f7d5 764 struct mlx5_flow_destination dest;
092297e0 765
20f7b37f
SM
766 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
767 dest.ft = rpriv->root_ft;
768
769 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
092297e0
GT
770 if (IS_ERR(flow_rule))
771 return PTR_ERR(flow_rule);
772 rpriv->vport_rx_rule = flow_rule;
773 return 0;
774}
775
88e96e53
VP
776static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
777{
778 struct mlx5e_rep_priv *rpriv = priv->ppriv;
779
780 if (!rpriv->vport_rx_rule)
781 return;
782
783 mlx5_del_flow_rules(rpriv->vport_rx_rule);
784 rpriv->vport_rx_rule = NULL;
785}
786
787int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
788{
789 rep_vport_rx_rule_destroy(priv);
790
791 return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
792}
793
092297e0
GT
794static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
795{
796 struct mlx5_core_dev *mdev = priv->mdev;
cb67b832 797 int err;
cb67b832 798
43ec0f41 799 priv->rx_res = mlx5e_rx_res_alloc();
3f22d6c7
MM
800 if (!priv->rx_res)
801 return -ENOMEM;
802
2c3b5bee
SM
803 mlx5e_init_l2_addr(priv);
804
1462e48d
RD
805 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
806 if (err) {
807 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
808 return err;
809 }
810
43ec0f41 811 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
d0208ff1
KM
812 priv->max_nch, priv->drop_rq.rqn,
813 &priv->channels.params.packet_merge,
43ec0f41 814 priv->channels.params.num_channels);
8f493ffd 815 if (err)
1462e48d 816 goto err_close_drop_rq;
cb67b832 817
84a09733 818 err = mlx5e_create_rep_ttc_table(priv);
092297e0 819 if (err)
43ec0f41 820 goto err_destroy_rx_res;
cb67b832 821
20f7b37f 822 err = mlx5e_create_rep_root_ft(priv);
84a09733
GT
823 if (err)
824 goto err_destroy_ttc_table;
825
20f7b37f
SM
826 err = mlx5e_create_rep_vport_rx_rule(priv);
827 if (err)
828 goto err_destroy_root_ft;
829
6783e8b2
VB
830 mlx5e_ethtool_init_steering(priv);
831
cb67b832
HHZ
832 return 0;
833
20f7b37f
SM
834err_destroy_root_ft:
835 mlx5e_destroy_rep_root_ft(priv);
84a09733 836err_destroy_ttc_table:
f4b45940 837 mlx5_destroy_ttc_table(priv->fs.ttc);
43ec0f41
MM
838err_destroy_rx_res:
839 mlx5e_rx_res_destroy(priv->rx_res);
1462e48d
RD
840err_close_drop_rq:
841 mlx5e_close_drop_rq(&priv->drop_rq);
43ec0f41 842 mlx5e_rx_res_free(priv->rx_res);
3f22d6c7 843 priv->rx_res = NULL;
cb67b832
HHZ
844 return err;
845}
846
847static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
848{
0e2e7aa5 849 mlx5e_ethtool_cleanup_steering(priv);
88e96e53 850 rep_vport_rx_rule_destroy(priv);
20f7b37f 851 mlx5e_destroy_rep_root_ft(priv);
f4b45940 852 mlx5_destroy_ttc_table(priv->fs.ttc);
43ec0f41 853 mlx5e_rx_res_destroy(priv->rx_res);
1462e48d 854 mlx5e_close_drop_rq(&priv->drop_rq);
43ec0f41 855 mlx5e_rx_res_free(priv->rx_res);
3f22d6c7 856 priv->rx_res = NULL;
cb67b832
HHZ
857}
858
8520fa57
VB
859static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
860{
8520fa57 861 mlx5e_create_q_counters(priv);
67b38de6 862 return mlx5e_init_rep_rx(priv);
8520fa57
VB
863}
864
865static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
866{
8520fa57 867 mlx5e_cleanup_rep_rx(priv);
67b38de6 868 mlx5e_destroy_q_counters(priv);
8520fa57
VB
869}
870
29b598dd
RD
871static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
872{
873 struct mlx5_rep_uplink_priv *uplink_priv;
874 struct net_device *netdev;
875 struct mlx5e_priv *priv;
876 int err;
877
878 netdev = rpriv->netdev;
879 priv = netdev_priv(netdev);
880 uplink_priv = &rpriv->uplink_priv;
881
768c3667 882 err = mlx5e_rep_tc_init(rpriv);
29b598dd
RD
883 if (err)
884 return err;
885
886 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
887
7e51891a 888 mlx5e_rep_bond_init(rpriv);
768c3667 889 err = mlx5e_rep_tc_netdevice_event_register(rpriv);
29b598dd 890 if (err) {
768c3667
VB
891 mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
892 err);
7e51891a 893 goto err_event_reg;
29b598dd
RD
894 }
895
896 return 0;
897
7e51891a
OG
898err_event_reg:
899 mlx5e_rep_bond_cleanup(rpriv);
768c3667 900 mlx5e_rep_tc_cleanup(rpriv);
29b598dd
RD
901 return err;
902}
903
cb67b832
HHZ
904static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
905{
d9ee0491 906 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3c145626 907 int err;
cb67b832
HHZ
908
909 err = mlx5e_create_tises(priv);
910 if (err) {
911 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
912 return err;
913 }
d9ee0491 914
b05af6aa 915 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
29b598dd 916 err = mlx5e_init_uplink_rep_tx(rpriv);
d9ee0491
OG
917 if (err)
918 goto destroy_tises;
d9ee0491
OG
919 }
920
cb67b832 921 return 0;
d9ee0491 922
d9ee0491 923destroy_tises:
3c145626 924 mlx5e_destroy_tises(priv);
d9ee0491
OG
925 return err;
926}
927
29b598dd
RD
928static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
929{
768c3667 930 mlx5e_rep_tc_netdevice_event_unregister(rpriv);
7e51891a 931 mlx5e_rep_bond_cleanup(rpriv);
768c3667 932 mlx5e_rep_tc_cleanup(rpriv);
29b598dd
RD
933}
934
d9ee0491
OG
935static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
936{
937 struct mlx5e_rep_priv *rpriv = priv->ppriv;
d9ee0491 938
3c145626 939 mlx5e_destroy_tises(priv);
d9ee0491 940
29b598dd
RD
941 if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
942 mlx5e_cleanup_uplink_rep_tx(rpriv);
cb67b832
HHZ
943}
944
9b81d5a9 945static void mlx5e_rep_enable(struct mlx5e_priv *priv)
b36cdb42 946{
6b424e13
RD
947 struct mlx5e_rep_priv *rpriv = priv->ppriv;
948
6d7ee2ed 949 mlx5e_set_netdev_mtu_boundaries(priv);
6b424e13
RD
950 mlx5e_rep_neigh_init(rpriv);
951}
952
953static void mlx5e_rep_disable(struct mlx5e_priv *priv)
954{
955 struct mlx5e_rep_priv *rpriv = priv->ppriv;
956
957 mlx5e_rep_neigh_cleanup(rpriv);
b36cdb42
OG
958}
959
a90f88fe
GT
960static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
961{
962 return 0;
963}
964
b36cdb42
OG
965static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
966{
967 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
b36cdb42 968
b4a23329
RD
969 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
970 struct mlx5_eqe *eqe = data;
b36cdb42 971
b4a23329
RD
972 switch (eqe->sub_type) {
973 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
974 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
975 queue_work(priv->wq, &priv->update_carrier_work);
976 break;
977 default:
978 return NOTIFY_DONE;
979 }
980
981 return NOTIFY_OK;
b36cdb42
OG
982 }
983
768c3667
VB
984 if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
985 return mlx5e_rep_tc_event_port_affinity(priv);
b4a23329
RD
986
987 return NOTIFY_DONE;
b36cdb42
OG
988}
989
990static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
991{
6b424e13 992 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
993 struct net_device *netdev = priv->netdev;
994 struct mlx5_core_dev *mdev = priv->mdev;
995 u16 max_mtu;
996
997 netdev->min_mtu = ETH_MIN_MTU;
998 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
999 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1000 mlx5e_set_dev_port_mtu(priv);
1001
768c3667 1002 mlx5e_rep_tc_enable(priv);
b4a23329 1003
1a73704c
EC
1004 if (MLX5_CAP_GEN(mdev, uplink_follow))
1005 mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1006 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
8a66e458 1007 mlx5_lag_add_netdev(mdev, netdev);
b36cdb42
OG
1008 priv->events_nb.notifier_call = uplink_rep_async_event;
1009 mlx5_notifier_register(mdev, &priv->events_nb);
b36cdb42
OG
1010 mlx5e_dcbnl_initialize(priv);
1011 mlx5e_dcbnl_init_app(priv);
6b424e13 1012 mlx5e_rep_neigh_init(rpriv);
19e9bfa0 1013 mlx5e_rep_bridge_init(priv);
7a9fb35e
RD
1014
1015 netdev->wanted_features |= NETIF_F_HW_TC;
1016
1017 rtnl_lock();
1018 if (netif_running(netdev))
1019 mlx5e_open(netdev);
8ed6b176 1020 udp_tunnel_nic_reset_ntf(priv->netdev);
7a9fb35e
RD
1021 netif_device_attach(netdev);
1022 rtnl_unlock();
b36cdb42
OG
1023}
1024
1025static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1026{
6b424e13 1027 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1028 struct mlx5_core_dev *mdev = priv->mdev;
1029
7a9fb35e
RD
1030 rtnl_lock();
1031 if (netif_running(priv->netdev))
1032 mlx5e_close(priv->netdev);
1033 netif_device_detach(priv->netdev);
1034 rtnl_unlock();
1035
19e9bfa0 1036 mlx5e_rep_bridge_cleanup(priv);
6b424e13 1037 mlx5e_rep_neigh_cleanup(rpriv);
b36cdb42 1038 mlx5e_dcbnl_delete_app(priv);
b36cdb42 1039 mlx5_notifier_unregister(mdev, &priv->events_nb);
768c3667 1040 mlx5e_rep_tc_disable(priv);
8a66e458 1041 mlx5_lag_remove_netdev(mdev, priv->netdev);
8ed6b176 1042 mlx5_vxlan_reset_to_default(mdev->vxlan);
b36cdb42
OG
1043}
1044
8a236b15
VB
1045static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1046static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1047
1048/* The stats groups order is opposite to the update_stats() order calls */
1049static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1050 &MLX5E_STATS_GRP(sw_rep),
1051 &MLX5E_STATS_GRP(vport_rep),
1052};
1053
1054static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1055{
1056 return ARRAY_SIZE(mlx5e_rep_stats_grps);
1057}
1058
1059/* The stats groups order is opposite to the update_stats() order calls */
1060static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
7c453526
VB
1061 &MLX5E_STATS_GRP(sw),
1062 &MLX5E_STATS_GRP(qcnt),
1063 &MLX5E_STATS_GRP(vnic_env),
1064 &MLX5E_STATS_GRP(vport),
1065 &MLX5E_STATS_GRP(802_3),
1066 &MLX5E_STATS_GRP(2863),
1067 &MLX5E_STATS_GRP(2819),
1068 &MLX5E_STATS_GRP(phy),
1069 &MLX5E_STATS_GRP(eth_ext),
1070 &MLX5E_STATS_GRP(pcie),
1071 &MLX5E_STATS_GRP(per_prio),
1072 &MLX5E_STATS_GRP(pme),
1073 &MLX5E_STATS_GRP(channels),
1074 &MLX5E_STATS_GRP(per_port_buff_congest),
8e10af70
RS
1075#ifdef CONFIG_MLX5_EN_IPSEC
1076 &MLX5E_STATS_GRP(ipsec_sw),
1077 &MLX5E_STATS_GRP(ipsec_hw),
1078#endif
8a236b15
VB
1079};
1080
1081static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1082{
1083 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1084}
1085
9b81d5a9 1086static const struct mlx5e_profile mlx5e_rep_profile = {
cb67b832 1087 .init = mlx5e_init_rep,
182570b2 1088 .cleanup = mlx5e_cleanup_rep,
cb67b832
HHZ
1089 .init_rx = mlx5e_init_rep_rx,
1090 .cleanup_rx = mlx5e_cleanup_rep_rx,
1091 .init_tx = mlx5e_init_rep_tx,
d9ee0491 1092 .cleanup_tx = mlx5e_cleanup_rep_tx,
9b81d5a9 1093 .enable = mlx5e_rep_enable,
6b424e13 1094 .disable = mlx5e_rep_disable,
a90f88fe 1095 .update_rx = mlx5e_update_rep_rx,
b521105b 1096 .update_stats = mlx5e_stats_update_ndo_stats,
5adf4c47 1097 .rx_handlers = &mlx5e_rx_handlers_rep,
cb67b832 1098 .max_tc = 1,
694826e3 1099 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
8a236b15
VB
1100 .stats_grps = mlx5e_rep_stats_grps,
1101 .stats_grps_num = mlx5e_rep_stats_grps_num,
3adb60b6 1102 .rx_ptp_support = false,
cb67b832
HHZ
1103};
1104
b36cdb42 1105static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
84db6612 1106 .init = mlx5e_init_ul_rep,
b36cdb42 1107 .cleanup = mlx5e_cleanup_rep,
8520fa57
VB
1108 .init_rx = mlx5e_init_ul_rep_rx,
1109 .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
b36cdb42
OG
1110 .init_tx = mlx5e_init_rep_tx,
1111 .cleanup_tx = mlx5e_cleanup_rep_tx,
1112 .enable = mlx5e_uplink_rep_enable,
1113 .disable = mlx5e_uplink_rep_disable,
a90f88fe 1114 .update_rx = mlx5e_update_rep_rx,
b521105b 1115 .update_stats = mlx5e_stats_update_ndo_stats,
b36cdb42 1116 .update_carrier = mlx5e_update_carrier,
5adf4c47 1117 .rx_handlers = &mlx5e_rx_handlers_rep,
b36cdb42 1118 .max_tc = MLX5E_MAX_NUM_TC,
f031dbd5
SM
1119 /* XSK is needed so we can replace profile with NIC netdev */
1120 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
8a236b15
VB
1121 .stats_grps = mlx5e_ul_rep_stats_grps,
1122 .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
3adb60b6 1123 .rx_ptp_support = false,
b36cdb42
OG
1124};
1125
1d447a39 1126/* e-Switch vport representors */
1d447a39 1127static int
7a9fb35e
RD
1128mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1129{
1130 struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1131 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1132 struct devlink_port *dl_port;
1133 int err;
1134
1135 rpriv->netdev = priv->netdev;
1136
1137 err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1138 rpriv);
1139 if (err)
1140 return err;
1141
1142 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1143 if (dl_port)
1144 devlink_port_type_eth_set(dl_port, rpriv->netdev);
1145
1146 return 0;
1147}
1148
1149static void
1150mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1151{
1152 struct net_device *netdev = rpriv->netdev;
1153 struct devlink_port *dl_port;
1154 struct mlx5_core_dev *dev;
1155 struct mlx5e_priv *priv;
1156
1157 priv = netdev_priv(netdev);
1158 dev = priv->mdev;
1159
1160 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1161 if (dl_port)
1162 devlink_port_type_clear(dl_port);
1163 mlx5e_netdev_attach_nic_profile(priv);
1164}
1165
1166static int
1167mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1168{
7a9fb35e 1169 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
b36cdb42 1170 const struct mlx5e_profile *profile;
c7eddc60 1171 struct devlink_port *dl_port;
26e59d80 1172 struct net_device *netdev;
3ef14e46
SM
1173 struct mlx5e_priv *priv;
1174 unsigned int txqs, rxqs;
779d986d 1175 int nch, err;
26e59d80 1176
7a9fb35e 1177 profile = &mlx5e_rep_profile;
3ef14e46
SM
1178 nch = mlx5e_get_max_num_channels(dev);
1179 txqs = nch * profile->max_tc;
1180 rxqs = nch * profile->rq_groups;
9d758d4a 1181 netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
26e59d80 1182 if (!netdev) {
237ac8de
RD
1183 mlx5_core_warn(dev,
1184 "Failed to create representor netdev for vport %d\n",
1185 rep->vport);
cb67b832
HHZ
1186 return -EINVAL;
1187 }
26e59d80 1188
7a9fb35e 1189 mlx5e_build_rep_netdev(netdev, dev);
5ed99fb4 1190 rpriv->netdev = netdev;
aec002f6 1191
3ef14e46
SM
1192 priv = netdev_priv(netdev);
1193 priv->profile = profile;
1194 priv->ppriv = rpriv;
1195 err = profile->init(dev, netdev);
1196 if (err) {
1197 netdev_warn(netdev, "rep profile init failed, %d\n", err);
7a9fb35e 1198 goto err_destroy_netdev;
3ef14e46
SM
1199 }
1200
2c3b5bee 1201 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80 1202 if (err) {
237ac8de
RD
1203 netdev_warn(netdev,
1204 "Failed to attach representor netdev for vport %d\n",
1205 rep->vport);
3ef14e46 1206 goto err_cleanup_profile;
26e59d80
MHY
1207 }
1208
1209 err = register_netdev(netdev);
1210 if (err) {
237ac8de
RD
1211 netdev_warn(netdev,
1212 "Failed to register representor netdev for vport %d\n",
1213 rep->vport);
6b424e13 1214 goto err_detach_netdev;
26e59d80
MHY
1215 }
1216
c7eddc60
PP
1217 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1218 if (dl_port)
1219 devlink_port_type_eth_set(dl_port, netdev);
cb67b832 1220 return 0;
26e59d80
MHY
1221
1222err_detach_netdev:
2c3b5bee 1223 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80 1224
3ef14e46
SM
1225err_cleanup_profile:
1226 priv->profile->cleanup(priv);
1227
26e59d80 1228err_destroy_netdev:
2c3b5bee 1229 mlx5e_destroy_netdev(netdev_priv(netdev));
7a9fb35e
RD
1230 return err;
1231}
1232
1233static int
1234mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1235{
1236 struct mlx5e_rep_priv *rpriv;
1237 int err;
1238
1239 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1240 if (!rpriv)
1241 return -ENOMEM;
1242
1243 /* rpriv->rep to be looked up when profile->init() is called */
1244 rpriv->rep = rep;
1245 rep->rep_data[REP_ETH].priv = rpriv;
1246 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1247
1248 if (rep->vport == MLX5_VPORT_UPLINK)
1249 err = mlx5e_vport_uplink_rep_load(dev, rep);
1250 else
1251 err = mlx5e_vport_vf_rep_load(dev, rep);
1252
1253 if (err)
1254 kfree(rpriv);
1255
26e59d80 1256 return err;
cb67b832
HHZ
1257}
1258
1d447a39 1259static void
4c66df01 1260mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1261{
5ed99fb4
MB
1262 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1263 struct net_device *netdev = rpriv->netdev;
1d447a39 1264 struct mlx5e_priv *priv = netdev_priv(netdev);
f60f315d 1265 struct mlx5_core_dev *dev = priv->mdev;
c7eddc60 1266 struct devlink_port *dl_port;
1d447a39 1267 void *ppriv = priv->ppriv;
cb67b832 1268
7a9fb35e
RD
1269 if (rep->vport == MLX5_VPORT_UPLINK) {
1270 mlx5e_vport_uplink_rep_unload(rpriv);
1271 goto free_ppriv;
1272 }
1273
c7eddc60
PP
1274 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1275 if (dl_port)
1276 devlink_port_type_clear(dl_port);
5ed99fb4 1277 unregister_netdev(netdev);
1d447a39 1278 mlx5e_detach_netdev(priv);
3ef14e46 1279 priv->profile->cleanup(priv);
1d447a39 1280 mlx5e_destroy_netdev(priv);
7a9fb35e 1281free_ppriv:
1d447a39
SM
1282 kfree(ppriv); /* mlx5e_rep_priv */
1283}
1284
22215908
MB
1285static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1286{
1287 struct mlx5e_rep_priv *rpriv;
1288
1289 rpriv = mlx5e_rep_to_rep_priv(rep);
1290
1291 return rpriv->netdev;
1292}
1293
898b0786
MB
1294static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
1295{
1296 struct mlx5e_rep_priv *rpriv;
1297 struct mlx5e_rep_sq *rep_sq;
1298
1299 rpriv = mlx5e_rep_to_rep_priv(rep);
1300 list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1301 if (!rep_sq->send_to_vport_rule_peer)
1302 continue;
1303 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
1304 rep_sq->send_to_vport_rule_peer = NULL;
1305 }
1306}
1307
1308static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1309 struct mlx5_eswitch_rep *rep,
1310 struct mlx5_eswitch *peer_esw)
1311{
1312 struct mlx5_flow_handle *flow_rule;
1313 struct mlx5e_rep_priv *rpriv;
1314 struct mlx5e_rep_sq *rep_sq;
1315
1316 rpriv = mlx5e_rep_to_rep_priv(rep);
1317 list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1318 if (rep_sq->send_to_vport_rule_peer)
1319 continue;
1320 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
1321 if (IS_ERR(flow_rule))
1322 goto err_out;
1323 rep_sq->send_to_vport_rule_peer = flow_rule;
1324 }
1325
1326 return 0;
1327err_out:
1328 mlx5e_vport_rep_event_unpair(rep);
1329 return PTR_ERR(flow_rule);
1330}
1331
1332static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1333 struct mlx5_eswitch_rep *rep,
1334 enum mlx5_switchdev_event event,
1335 void *data)
1336{
1337 int err = 0;
1338
1339 if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1340 err = mlx5e_vport_rep_event_pair(esw, rep, data);
1341 else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1342 mlx5e_vport_rep_event_unpair(rep);
1343
1344 return err;
1345}
1346
8693115a
PP
1347static const struct mlx5_eswitch_rep_ops rep_ops = {
1348 .load = mlx5e_vport_rep_load,
1349 .unload = mlx5e_vport_rep_unload,
898b0786
MB
1350 .get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1351 .event = mlx5e_vport_rep_event,
8693115a
PP
1352};
1353
912cebf4
LR
1354static int mlx5e_rep_probe(struct auxiliary_device *adev,
1355 const struct auxiliary_device_id *id)
1d447a39 1356{
912cebf4
LR
1357 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1358 struct mlx5_core_dev *mdev = edev->mdev;
1359 struct mlx5_eswitch *esw;
1d447a39 1360
912cebf4 1361 esw = mdev->priv.eswitch;
8693115a 1362 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
912cebf4 1363 return 0;
1d447a39
SM
1364}
1365
912cebf4 1366static void mlx5e_rep_remove(struct auxiliary_device *adev)
1d447a39 1367{
912cebf4
LR
1368 struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1369 struct mlx5_core_dev *mdev = vdev->mdev;
1370 struct mlx5_eswitch *esw;
1d447a39 1371
912cebf4 1372 esw = mdev->priv.eswitch;
f8e8fa02 1373 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1d447a39 1374}
912cebf4
LR
1375
1376static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1377 { .name = MLX5_ADEV_NAME ".eth-rep", },
1378 {},
1379};
1380
1381MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1382
1383static struct auxiliary_driver mlx5e_rep_driver = {
1384 .name = "eth-rep",
1385 .probe = mlx5e_rep_probe,
1386 .remove = mlx5e_rep_remove,
1387 .id_table = mlx5e_rep_id_table,
1388};
1389
1390int mlx5e_rep_init(void)
1391{
1392 return auxiliary_driver_register(&mlx5e_rep_driver);
1393}
1394
1395void mlx5e_rep_cleanup(void)
1396{
1397 auxiliary_driver_unregister(&mlx5e_rep_driver);
1398}