]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
net/mlx5: E-Switch, Add ovs internal port mapping to metadata support
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
39
40 #include "eswitch.h"
41 #include "en.h"
42 #include "en_rep.h"
43 #include "en/params.h"
44 #include "en/txrx.h"
45 #include "en_tc.h"
46 #include "en/rep/tc.h"
47 #include "en/rep/neigh.h"
48 #include "en/rep/bridge.h"
49 #include "en/devlink.h"
50 #include "fs_core.h"
51 #include "lib/mlx5.h"
52 #include "lib/devcom.h"
53 #include "lib/vxlan.h"
54 #define CREATE_TRACE_POINTS
55 #include "diag/en_rep_tracepoint.h"
56 #include "en_accel/ipsec.h"
57 #include "en/tc/int_port.h"
58
59 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
60 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
61 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
62
63 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
64
65 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo)
67 {
68 struct mlx5e_priv *priv = netdev_priv(dev);
69 struct mlx5_core_dev *mdev = priv->mdev;
70
71 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
72 sizeof(drvinfo->driver));
73 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
74 "%d.%d.%04d (%.16s)",
75 fw_rev_maj(mdev), fw_rev_min(mdev),
76 fw_rev_sub(mdev), mdev->board_id);
77 }
78
79 static const struct counter_desc sw_rep_stats_desc[] = {
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
84 };
85
86 struct vport_stats {
87 u64 vport_rx_packets;
88 u64 vport_tx_packets;
89 u64 vport_rx_bytes;
90 u64 vport_tx_bytes;
91 };
92
93 static const struct counter_desc vport_rep_stats_desc[] = {
94 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
95 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
96 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
97 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
98 };
99
100 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
101 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
102
103 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
104 {
105 return NUM_VPORT_REP_SW_COUNTERS;
106 }
107
108 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
109 {
110 int i;
111
112 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
113 strcpy(data + (idx++) * ETH_GSTRING_LEN,
114 sw_rep_stats_desc[i].format);
115 return idx;
116 }
117
118 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
119 {
120 int i;
121
122 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
123 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
124 sw_rep_stats_desc, i);
125 return idx;
126 }
127
128 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
129 {
130 struct mlx5e_sw_stats *s = &priv->stats.sw;
131 struct rtnl_link_stats64 stats64 = {};
132
133 memset(s, 0, sizeof(*s));
134 mlx5e_fold_sw_stats64(priv, &stats64);
135
136 s->rx_packets = stats64.rx_packets;
137 s->rx_bytes = stats64.rx_bytes;
138 s->tx_packets = stats64.tx_packets;
139 s->tx_bytes = stats64.tx_bytes;
140 s->tx_queue_dropped = stats64.tx_dropped;
141 }
142
143 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
144 {
145 return NUM_VPORT_REP_HW_COUNTERS;
146 }
147
148 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
149 {
150 int i;
151
152 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
153 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
154 return idx;
155 }
156
157 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
158 {
159 int i;
160
161 for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
162 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
163 vport_rep_stats_desc, i);
164 return idx;
165 }
166
167 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
168 {
169 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
170 struct mlx5e_rep_priv *rpriv = priv->ppriv;
171 struct mlx5_eswitch_rep *rep = rpriv->rep;
172 struct rtnl_link_stats64 *vport_stats;
173 struct ifla_vf_stats vf_stats;
174 int err;
175
176 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
177 if (err) {
178 netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
179 rep->vport, err);
180 return;
181 }
182
183 vport_stats = &priv->stats.vf_vport;
184 /* flip tx/rx as we are reporting the counters for the switch vport */
185 vport_stats->rx_packets = vf_stats.tx_packets;
186 vport_stats->rx_bytes = vf_stats.tx_bytes;
187 vport_stats->tx_packets = vf_stats.rx_packets;
188 vport_stats->tx_bytes = vf_stats.rx_bytes;
189 }
190
191 static void mlx5e_rep_get_strings(struct net_device *dev,
192 u32 stringset, uint8_t *data)
193 {
194 struct mlx5e_priv *priv = netdev_priv(dev);
195
196 switch (stringset) {
197 case ETH_SS_STATS:
198 mlx5e_stats_fill_strings(priv, data);
199 break;
200 }
201 }
202
203 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
204 struct ethtool_stats *stats, u64 *data)
205 {
206 struct mlx5e_priv *priv = netdev_priv(dev);
207
208 mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
209 }
210
211 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
212 {
213 struct mlx5e_priv *priv = netdev_priv(dev);
214
215 switch (sset) {
216 case ETH_SS_STATS:
217 return mlx5e_stats_total_num(priv);
218 default:
219 return -EOPNOTSUPP;
220 }
221 }
222
223 static void mlx5e_rep_get_ringparam(struct net_device *dev,
224 struct ethtool_ringparam *param)
225 {
226 struct mlx5e_priv *priv = netdev_priv(dev);
227
228 mlx5e_ethtool_get_ringparam(priv, param);
229 }
230
231 static int mlx5e_rep_set_ringparam(struct net_device *dev,
232 struct ethtool_ringparam *param)
233 {
234 struct mlx5e_priv *priv = netdev_priv(dev);
235
236 return mlx5e_ethtool_set_ringparam(priv, param);
237 }
238
239 static void mlx5e_rep_get_channels(struct net_device *dev,
240 struct ethtool_channels *ch)
241 {
242 struct mlx5e_priv *priv = netdev_priv(dev);
243
244 mlx5e_ethtool_get_channels(priv, ch);
245 }
246
247 static int mlx5e_rep_set_channels(struct net_device *dev,
248 struct ethtool_channels *ch)
249 {
250 struct mlx5e_priv *priv = netdev_priv(dev);
251
252 return mlx5e_ethtool_set_channels(priv, ch);
253 }
254
255 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
256 struct ethtool_coalesce *coal,
257 struct kernel_ethtool_coalesce *kernel_coal,
258 struct netlink_ext_ack *extack)
259 {
260 struct mlx5e_priv *priv = netdev_priv(netdev);
261
262 return mlx5e_ethtool_get_coalesce(priv, coal);
263 }
264
265 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
266 struct ethtool_coalesce *coal,
267 struct kernel_ethtool_coalesce *kernel_coal,
268 struct netlink_ext_ack *extack)
269 {
270 struct mlx5e_priv *priv = netdev_priv(netdev);
271
272 return mlx5e_ethtool_set_coalesce(priv, coal);
273 }
274
275 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
276 {
277 struct mlx5e_priv *priv = netdev_priv(netdev);
278
279 return mlx5e_ethtool_get_rxfh_key_size(priv);
280 }
281
282 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
283 {
284 struct mlx5e_priv *priv = netdev_priv(netdev);
285
286 return mlx5e_ethtool_get_rxfh_indir_size(priv);
287 }
288
289 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
290 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
291 ETHTOOL_COALESCE_MAX_FRAMES |
292 ETHTOOL_COALESCE_USE_ADAPTIVE,
293 .get_drvinfo = mlx5e_rep_get_drvinfo,
294 .get_link = ethtool_op_get_link,
295 .get_strings = mlx5e_rep_get_strings,
296 .get_sset_count = mlx5e_rep_get_sset_count,
297 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
298 .get_ringparam = mlx5e_rep_get_ringparam,
299 .set_ringparam = mlx5e_rep_set_ringparam,
300 .get_channels = mlx5e_rep_get_channels,
301 .set_channels = mlx5e_rep_set_channels,
302 .get_coalesce = mlx5e_rep_get_coalesce,
303 .set_coalesce = mlx5e_rep_set_coalesce,
304 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
305 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
306 };
307
308 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
309 struct mlx5_eswitch_rep *rep)
310 {
311 struct mlx5e_rep_sq *rep_sq, *tmp;
312 struct mlx5e_rep_priv *rpriv;
313
314 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
315 return;
316
317 rpriv = mlx5e_rep_to_rep_priv(rep);
318 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
319 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
320 if (rep_sq->send_to_vport_rule_peer)
321 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
322 list_del(&rep_sq->list);
323 kfree(rep_sq);
324 }
325 }
326
327 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
328 struct mlx5_eswitch_rep *rep,
329 u32 *sqns_array, int sqns_num)
330 {
331 struct mlx5_eswitch *peer_esw = NULL;
332 struct mlx5_flow_handle *flow_rule;
333 struct mlx5e_rep_priv *rpriv;
334 struct mlx5e_rep_sq *rep_sq;
335 int err;
336 int i;
337
338 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
339 return 0;
340
341 rpriv = mlx5e_rep_to_rep_priv(rep);
342 if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
343 peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
344 MLX5_DEVCOM_ESW_OFFLOADS);
345
346 for (i = 0; i < sqns_num; i++) {
347 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
348 if (!rep_sq) {
349 err = -ENOMEM;
350 goto out_err;
351 }
352
353 /* Add re-inject rule to the PF/representor sqs */
354 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
355 sqns_array[i]);
356 if (IS_ERR(flow_rule)) {
357 err = PTR_ERR(flow_rule);
358 kfree(rep_sq);
359 goto out_err;
360 }
361 rep_sq->send_to_vport_rule = flow_rule;
362 rep_sq->sqn = sqns_array[i];
363
364 if (peer_esw) {
365 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
366 rep, sqns_array[i]);
367 if (IS_ERR(flow_rule)) {
368 err = PTR_ERR(flow_rule);
369 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
370 kfree(rep_sq);
371 goto out_err;
372 }
373 rep_sq->send_to_vport_rule_peer = flow_rule;
374 }
375
376 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
377 }
378
379 if (peer_esw)
380 mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
381
382 return 0;
383
384 out_err:
385 mlx5e_sqs2vport_stop(esw, rep);
386
387 if (peer_esw)
388 mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
389
390 return err;
391 }
392
393 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
394 {
395 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
396 struct mlx5e_rep_priv *rpriv = priv->ppriv;
397 struct mlx5_eswitch_rep *rep = rpriv->rep;
398 struct mlx5e_channel *c;
399 int n, tc, num_sqs = 0;
400 int err = -ENOMEM;
401 u32 *sqs;
402
403 sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
404 sizeof(*sqs), GFP_KERNEL);
405 if (!sqs)
406 goto out;
407
408 for (n = 0; n < priv->channels.num; n++) {
409 c = priv->channels.c[n];
410 for (tc = 0; tc < c->num_tc; tc++)
411 sqs[num_sqs++] = c->sq[tc].sqn;
412 }
413
414 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
415 kfree(sqs);
416
417 out:
418 if (err)
419 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
420 return err;
421 }
422
423 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
424 {
425 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
426 struct mlx5e_rep_priv *rpriv = priv->ppriv;
427 struct mlx5_eswitch_rep *rep = rpriv->rep;
428
429 mlx5e_sqs2vport_stop(esw, rep);
430 }
431
432 static int mlx5e_rep_open(struct net_device *dev)
433 {
434 struct mlx5e_priv *priv = netdev_priv(dev);
435 struct mlx5e_rep_priv *rpriv = priv->ppriv;
436 struct mlx5_eswitch_rep *rep = rpriv->rep;
437 int err;
438
439 mutex_lock(&priv->state_lock);
440 err = mlx5e_open_locked(dev);
441 if (err)
442 goto unlock;
443
444 if (!mlx5_modify_vport_admin_state(priv->mdev,
445 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
446 rep->vport, 1,
447 MLX5_VPORT_ADMIN_STATE_UP))
448 netif_carrier_on(dev);
449
450 unlock:
451 mutex_unlock(&priv->state_lock);
452 return err;
453 }
454
455 static int mlx5e_rep_close(struct net_device *dev)
456 {
457 struct mlx5e_priv *priv = netdev_priv(dev);
458 struct mlx5e_rep_priv *rpriv = priv->ppriv;
459 struct mlx5_eswitch_rep *rep = rpriv->rep;
460 int ret;
461
462 mutex_lock(&priv->state_lock);
463 mlx5_modify_vport_admin_state(priv->mdev,
464 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
465 rep->vport, 1,
466 MLX5_VPORT_ADMIN_STATE_DOWN);
467 ret = mlx5e_close_locked(dev);
468 mutex_unlock(&priv->state_lock);
469 return ret;
470 }
471
472 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
473 {
474 struct mlx5e_rep_priv *rpriv = priv->ppriv;
475 struct mlx5_eswitch_rep *rep;
476
477 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
478 return false;
479
480 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
481 return false;
482
483 rep = rpriv->rep;
484 return (rep->vport == MLX5_VPORT_UPLINK);
485 }
486
487 bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
488 {
489 switch (attr_id) {
490 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
491 return true;
492 }
493
494 return false;
495 }
496
497 static int
498 mlx5e_get_sw_stats64(const struct net_device *dev,
499 struct rtnl_link_stats64 *stats)
500 {
501 struct mlx5e_priv *priv = netdev_priv(dev);
502
503 mlx5e_fold_sw_stats64(priv, stats);
504 return 0;
505 }
506
507 int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
508 void *sp)
509 {
510 switch (attr_id) {
511 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
512 return mlx5e_get_sw_stats64(dev, sp);
513 }
514
515 return -EINVAL;
516 }
517
518 static void
519 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
520 {
521 struct mlx5e_priv *priv = netdev_priv(dev);
522
523 /* update HW stats in background for next time */
524 mlx5e_queue_update_stats(priv);
525 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
526 }
527
528 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
529 {
530 return mlx5e_change_mtu(netdev, new_mtu, NULL);
531 }
532
533 static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
534 {
535 struct mlx5e_priv *priv = netdev_priv(netdev);
536 struct mlx5e_rep_priv *rpriv = priv->ppriv;
537 struct mlx5_core_dev *dev = priv->mdev;
538
539 return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
540 }
541
542 static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
543 {
544 struct mlx5e_priv *priv = netdev_priv(dev);
545 struct mlx5e_rep_priv *rpriv = priv->ppriv;
546 struct mlx5_eswitch_rep *rep = rpriv->rep;
547 int err;
548
549 if (new_carrier) {
550 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
551 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
552 if (err)
553 return err;
554 netif_carrier_on(dev);
555 } else {
556 err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
557 rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
558 if (err)
559 return err;
560 netif_carrier_off(dev);
561 }
562 return 0;
563 }
564
565 static const struct net_device_ops mlx5e_netdev_ops_rep = {
566 .ndo_open = mlx5e_rep_open,
567 .ndo_stop = mlx5e_rep_close,
568 .ndo_start_xmit = mlx5e_xmit,
569 .ndo_setup_tc = mlx5e_rep_setup_tc,
570 .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
571 .ndo_get_stats64 = mlx5e_rep_get_stats,
572 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
573 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
574 .ndo_change_mtu = mlx5e_rep_change_mtu,
575 .ndo_change_carrier = mlx5e_rep_change_carrier,
576 };
577
578 bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
579 {
580 return netdev->netdev_ops == &mlx5e_netdev_ops &&
581 mlx5e_is_uplink_rep(netdev_priv(netdev));
582 }
583
584 bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
585 {
586 return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
587 }
588
589 static void mlx5e_build_rep_params(struct net_device *netdev)
590 {
591 struct mlx5e_priv *priv = netdev_priv(netdev);
592 struct mlx5e_rep_priv *rpriv = priv->ppriv;
593 struct mlx5_eswitch_rep *rep = rpriv->rep;
594 struct mlx5_core_dev *mdev = priv->mdev;
595 struct mlx5e_params *params;
596
597 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
598 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
599 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
600
601 params = &priv->channels.params;
602
603 params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
604 params->hard_mtu = MLX5E_ETH_HARD_MTU;
605 params->sw_mtu = netdev->mtu;
606
607 /* SQ */
608 if (rep->vport == MLX5_VPORT_UPLINK)
609 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
610 else
611 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
612
613 /* RQ */
614 mlx5e_build_rq_params(mdev, params);
615
616 /* CQ moderation params */
617 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
618 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
619
620 params->mqprio.num_tc = 1;
621 params->tunneled_offload_en = false;
622
623 /* Set an initial non-zero value, so that mlx5e_select_queue won't
624 * divide by zero if called before first activating channels.
625 */
626 priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
627
628 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
629 }
630
631 static void mlx5e_build_rep_netdev(struct net_device *netdev,
632 struct mlx5_core_dev *mdev)
633 {
634 SET_NETDEV_DEV(netdev, mdev->device);
635 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
636 eth_hw_addr_random(netdev);
637 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
638
639 netdev->watchdog_timeo = 15 * HZ;
640
641 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
642 netdev->hw_features |= NETIF_F_HW_TC;
643 #endif
644 netdev->hw_features |= NETIF_F_SG;
645 netdev->hw_features |= NETIF_F_IP_CSUM;
646 netdev->hw_features |= NETIF_F_IPV6_CSUM;
647 netdev->hw_features |= NETIF_F_GRO;
648 netdev->hw_features |= NETIF_F_TSO;
649 netdev->hw_features |= NETIF_F_TSO6;
650 netdev->hw_features |= NETIF_F_RXCSUM;
651
652 netdev->features |= netdev->hw_features;
653 netdev->features |= NETIF_F_NETNS_LOCAL;
654 }
655
656 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
657 struct net_device *netdev)
658 {
659 struct mlx5e_priv *priv = netdev_priv(netdev);
660
661 mlx5e_build_rep_params(netdev);
662 mlx5e_timestamp_init(priv);
663
664 return 0;
665 }
666
667 static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
668 struct net_device *netdev)
669 {
670 struct mlx5e_priv *priv = netdev_priv(netdev);
671 int err;
672
673 err = mlx5e_ipsec_init(priv);
674 if (err)
675 mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
676
677 mlx5e_vxlan_set_netdev_info(priv);
678 return mlx5e_init_rep(mdev, netdev);
679 }
680
681 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
682 {
683 mlx5e_ipsec_cleanup(priv);
684 }
685
686 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
687 {
688 struct mlx5e_rep_priv *rpriv = priv->ppriv;
689 struct mlx5_eswitch_rep *rep = rpriv->rep;
690 struct ttc_params ttc_params = {};
691 int err;
692
693 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
694 MLX5_FLOW_NAMESPACE_KERNEL);
695
696 /* The inner_ttc in the ttc params is intentionally not set */
697 mlx5e_set_ttc_params(priv, &ttc_params, false);
698
699 if (rep->vport != MLX5_VPORT_UPLINK)
700 /* To give uplik rep TTC a lower level for chaining from root ft */
701 ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
702
703 priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
704 if (IS_ERR(priv->fs.ttc)) {
705 err = PTR_ERR(priv->fs.ttc);
706 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
707 err);
708 return err;
709 }
710 return 0;
711 }
712
713 static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
714 {
715 struct mlx5e_rep_priv *rpriv = priv->ppriv;
716 struct mlx5_eswitch_rep *rep = rpriv->rep;
717 struct mlx5_flow_table_attr ft_attr = {};
718 struct mlx5_flow_namespace *ns;
719 int err = 0;
720
721 if (rep->vport != MLX5_VPORT_UPLINK) {
722 /* non uplik reps will skip any bypass tables and go directly to
723 * their own ttc
724 */
725 rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
726 return 0;
727 }
728
729 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
730 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_OFFLOADS);
731 if (!ns) {
732 netdev_err(priv->netdev, "Failed to get reps offloads namespace\n");
733 return -EOPNOTSUPP;
734 }
735
736 ft_attr.max_fte = 0; /* Empty table, miss rule will always point to next table */
737 ft_attr.prio = 1;
738 ft_attr.level = 1;
739
740 rpriv->root_ft = mlx5_create_flow_table(ns, &ft_attr);
741 if (IS_ERR(rpriv->root_ft)) {
742 err = PTR_ERR(rpriv->root_ft);
743 rpriv->root_ft = NULL;
744 }
745
746 return err;
747 }
748
749 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv *priv)
750 {
751 struct mlx5e_rep_priv *rpriv = priv->ppriv;
752 struct mlx5_eswitch_rep *rep = rpriv->rep;
753
754 if (rep->vport != MLX5_VPORT_UPLINK)
755 return;
756 mlx5_destroy_flow_table(rpriv->root_ft);
757 }
758
759 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
760 {
761 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
762 struct mlx5e_rep_priv *rpriv = priv->ppriv;
763 struct mlx5_eswitch_rep *rep = rpriv->rep;
764 struct mlx5_flow_handle *flow_rule;
765 struct mlx5_flow_destination dest;
766
767 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
768 dest.ft = rpriv->root_ft;
769
770 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, rep->vport, &dest);
771 if (IS_ERR(flow_rule))
772 return PTR_ERR(flow_rule);
773 rpriv->vport_rx_rule = flow_rule;
774 return 0;
775 }
776
777 static void rep_vport_rx_rule_destroy(struct mlx5e_priv *priv)
778 {
779 struct mlx5e_rep_priv *rpriv = priv->ppriv;
780
781 if (!rpriv->vport_rx_rule)
782 return;
783
784 mlx5_del_flow_rules(rpriv->vport_rx_rule);
785 rpriv->vport_rx_rule = NULL;
786 }
787
788 int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
789 {
790 rep_vport_rx_rule_destroy(priv);
791
792 return cleanup ? 0 : mlx5e_create_rep_vport_rx_rule(priv);
793 }
794
795 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
796 {
797 struct mlx5_core_dev *mdev = priv->mdev;
798 int err;
799
800 priv->rx_res = mlx5e_rx_res_alloc();
801 if (!priv->rx_res)
802 return -ENOMEM;
803
804 mlx5e_init_l2_addr(priv);
805
806 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
807 if (err) {
808 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
809 return err;
810 }
811
812 err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
813 priv->max_nch, priv->drop_rq.rqn,
814 &priv->channels.params.packet_merge,
815 priv->channels.params.num_channels);
816 if (err)
817 goto err_close_drop_rq;
818
819 err = mlx5e_create_rep_ttc_table(priv);
820 if (err)
821 goto err_destroy_rx_res;
822
823 err = mlx5e_create_rep_root_ft(priv);
824 if (err)
825 goto err_destroy_ttc_table;
826
827 err = mlx5e_create_rep_vport_rx_rule(priv);
828 if (err)
829 goto err_destroy_root_ft;
830
831 mlx5e_ethtool_init_steering(priv);
832
833 return 0;
834
835 err_destroy_root_ft:
836 mlx5e_destroy_rep_root_ft(priv);
837 err_destroy_ttc_table:
838 mlx5_destroy_ttc_table(priv->fs.ttc);
839 err_destroy_rx_res:
840 mlx5e_rx_res_destroy(priv->rx_res);
841 err_close_drop_rq:
842 mlx5e_close_drop_rq(&priv->drop_rq);
843 mlx5e_rx_res_free(priv->rx_res);
844 priv->rx_res = NULL;
845 return err;
846 }
847
848 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
849 {
850 mlx5e_ethtool_cleanup_steering(priv);
851 rep_vport_rx_rule_destroy(priv);
852 mlx5e_destroy_rep_root_ft(priv);
853 mlx5_destroy_ttc_table(priv->fs.ttc);
854 mlx5e_rx_res_destroy(priv->rx_res);
855 mlx5e_close_drop_rq(&priv->drop_rq);
856 mlx5e_rx_res_free(priv->rx_res);
857 priv->rx_res = NULL;
858 }
859
860 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
861 {
862 int err;
863
864 mlx5e_create_q_counters(priv);
865 err = mlx5e_init_rep_rx(priv);
866 if (err)
867 goto out;
868
869 mlx5e_tc_int_port_init_rep_rx(priv);
870
871 out:
872 return err;
873 }
874
875 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
876 {
877 mlx5e_tc_int_port_cleanup_rep_rx(priv);
878 mlx5e_cleanup_rep_rx(priv);
879 mlx5e_destroy_q_counters(priv);
880 }
881
882 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
883 {
884 struct mlx5_rep_uplink_priv *uplink_priv;
885 struct net_device *netdev;
886 struct mlx5e_priv *priv;
887 int err;
888
889 netdev = rpriv->netdev;
890 priv = netdev_priv(netdev);
891 uplink_priv = &rpriv->uplink_priv;
892
893 err = mlx5e_rep_tc_init(rpriv);
894 if (err)
895 return err;
896
897 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
898
899 mlx5e_rep_bond_init(rpriv);
900 err = mlx5e_rep_tc_netdevice_event_register(rpriv);
901 if (err) {
902 mlx5_core_err(priv->mdev, "Failed to register netdev notifier, err: %d\n",
903 err);
904 goto err_event_reg;
905 }
906
907 return 0;
908
909 err_event_reg:
910 mlx5e_rep_bond_cleanup(rpriv);
911 mlx5e_rep_tc_cleanup(rpriv);
912 return err;
913 }
914
915 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
916 {
917 struct mlx5e_rep_priv *rpriv = priv->ppriv;
918 int err;
919
920 err = mlx5e_create_tises(priv);
921 if (err) {
922 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
923 return err;
924 }
925
926 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
927 err = mlx5e_init_uplink_rep_tx(rpriv);
928 if (err)
929 goto destroy_tises;
930 }
931
932 return 0;
933
934 destroy_tises:
935 mlx5e_destroy_tises(priv);
936 return err;
937 }
938
939 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
940 {
941 mlx5e_rep_tc_netdevice_event_unregister(rpriv);
942 mlx5e_rep_bond_cleanup(rpriv);
943 mlx5e_rep_tc_cleanup(rpriv);
944 }
945
946 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
947 {
948 struct mlx5e_rep_priv *rpriv = priv->ppriv;
949
950 mlx5e_destroy_tises(priv);
951
952 if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
953 mlx5e_cleanup_uplink_rep_tx(rpriv);
954 }
955
956 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
957 {
958 struct mlx5e_rep_priv *rpriv = priv->ppriv;
959
960 mlx5e_set_netdev_mtu_boundaries(priv);
961 mlx5e_rep_neigh_init(rpriv);
962 }
963
964 static void mlx5e_rep_disable(struct mlx5e_priv *priv)
965 {
966 struct mlx5e_rep_priv *rpriv = priv->ppriv;
967
968 mlx5e_rep_neigh_cleanup(rpriv);
969 }
970
971 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
972 {
973 return 0;
974 }
975
976 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
977 {
978 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
979
980 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
981 struct mlx5_eqe *eqe = data;
982
983 switch (eqe->sub_type) {
984 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
985 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
986 queue_work(priv->wq, &priv->update_carrier_work);
987 break;
988 default:
989 return NOTIFY_DONE;
990 }
991
992 return NOTIFY_OK;
993 }
994
995 if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
996 return mlx5e_rep_tc_event_port_affinity(priv);
997
998 return NOTIFY_DONE;
999 }
1000
1001 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1002 {
1003 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1004 struct net_device *netdev = priv->netdev;
1005 struct mlx5_core_dev *mdev = priv->mdev;
1006 u16 max_mtu;
1007
1008 netdev->min_mtu = ETH_MIN_MTU;
1009 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1010 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1011 mlx5e_set_dev_port_mtu(priv);
1012
1013 mlx5e_rep_tc_enable(priv);
1014
1015 if (MLX5_CAP_GEN(mdev, uplink_follow))
1016 mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
1017 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
1018 mlx5_lag_add_netdev(mdev, netdev);
1019 priv->events_nb.notifier_call = uplink_rep_async_event;
1020 mlx5_notifier_register(mdev, &priv->events_nb);
1021 mlx5e_dcbnl_initialize(priv);
1022 mlx5e_dcbnl_init_app(priv);
1023 mlx5e_rep_neigh_init(rpriv);
1024 mlx5e_rep_bridge_init(priv);
1025
1026 netdev->wanted_features |= NETIF_F_HW_TC;
1027
1028 rtnl_lock();
1029 if (netif_running(netdev))
1030 mlx5e_open(netdev);
1031 udp_tunnel_nic_reset_ntf(priv->netdev);
1032 netif_device_attach(netdev);
1033 rtnl_unlock();
1034 }
1035
1036 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1037 {
1038 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1039 struct mlx5_core_dev *mdev = priv->mdev;
1040
1041 rtnl_lock();
1042 if (netif_running(priv->netdev))
1043 mlx5e_close(priv->netdev);
1044 netif_device_detach(priv->netdev);
1045 rtnl_unlock();
1046
1047 mlx5e_rep_bridge_cleanup(priv);
1048 mlx5e_rep_neigh_cleanup(rpriv);
1049 mlx5e_dcbnl_delete_app(priv);
1050 mlx5_notifier_unregister(mdev, &priv->events_nb);
1051 mlx5e_rep_tc_disable(priv);
1052 mlx5_lag_remove_netdev(mdev, priv->netdev);
1053 mlx5_vxlan_reset_to_default(mdev->vxlan);
1054 }
1055
1056 static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
1057 static MLX5E_DEFINE_STATS_GRP(vport_rep, MLX5E_NDO_UPDATE_STATS);
1058
1059 /* The stats groups order is opposite to the update_stats() order calls */
1060 static mlx5e_stats_grp_t mlx5e_rep_stats_grps[] = {
1061 &MLX5E_STATS_GRP(sw_rep),
1062 &MLX5E_STATS_GRP(vport_rep),
1063 };
1064
1065 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv *priv)
1066 {
1067 return ARRAY_SIZE(mlx5e_rep_stats_grps);
1068 }
1069
1070 /* The stats groups order is opposite to the update_stats() order calls */
1071 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
1072 &MLX5E_STATS_GRP(sw),
1073 &MLX5E_STATS_GRP(qcnt),
1074 &MLX5E_STATS_GRP(vnic_env),
1075 &MLX5E_STATS_GRP(vport),
1076 &MLX5E_STATS_GRP(802_3),
1077 &MLX5E_STATS_GRP(2863),
1078 &MLX5E_STATS_GRP(2819),
1079 &MLX5E_STATS_GRP(phy),
1080 &MLX5E_STATS_GRP(eth_ext),
1081 &MLX5E_STATS_GRP(pcie),
1082 &MLX5E_STATS_GRP(per_prio),
1083 &MLX5E_STATS_GRP(pme),
1084 &MLX5E_STATS_GRP(channels),
1085 &MLX5E_STATS_GRP(per_port_buff_congest),
1086 #ifdef CONFIG_MLX5_EN_IPSEC
1087 &MLX5E_STATS_GRP(ipsec_sw),
1088 &MLX5E_STATS_GRP(ipsec_hw),
1089 #endif
1090 };
1091
1092 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
1093 {
1094 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps);
1095 }
1096
1097 static const struct mlx5e_profile mlx5e_rep_profile = {
1098 .init = mlx5e_init_rep,
1099 .cleanup = mlx5e_cleanup_rep,
1100 .init_rx = mlx5e_init_rep_rx,
1101 .cleanup_rx = mlx5e_cleanup_rep_rx,
1102 .init_tx = mlx5e_init_rep_tx,
1103 .cleanup_tx = mlx5e_cleanup_rep_tx,
1104 .enable = mlx5e_rep_enable,
1105 .disable = mlx5e_rep_disable,
1106 .update_rx = mlx5e_update_rep_rx,
1107 .update_stats = mlx5e_stats_update_ndo_stats,
1108 .rx_handlers = &mlx5e_rx_handlers_rep,
1109 .max_tc = 1,
1110 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1111 .stats_grps = mlx5e_rep_stats_grps,
1112 .stats_grps_num = mlx5e_rep_stats_grps_num,
1113 .rx_ptp_support = false,
1114 };
1115
1116 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1117 .init = mlx5e_init_ul_rep,
1118 .cleanup = mlx5e_cleanup_rep,
1119 .init_rx = mlx5e_init_ul_rep_rx,
1120 .cleanup_rx = mlx5e_cleanup_ul_rep_rx,
1121 .init_tx = mlx5e_init_rep_tx,
1122 .cleanup_tx = mlx5e_cleanup_rep_tx,
1123 .enable = mlx5e_uplink_rep_enable,
1124 .disable = mlx5e_uplink_rep_disable,
1125 .update_rx = mlx5e_update_rep_rx,
1126 .update_stats = mlx5e_stats_update_ndo_stats,
1127 .update_carrier = mlx5e_update_carrier,
1128 .rx_handlers = &mlx5e_rx_handlers_rep,
1129 .max_tc = MLX5E_MAX_NUM_TC,
1130 /* XSK is needed so we can replace profile with NIC netdev */
1131 .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
1132 .stats_grps = mlx5e_ul_rep_stats_grps,
1133 .stats_grps_num = mlx5e_ul_rep_stats_grps_num,
1134 .rx_ptp_support = false,
1135 };
1136
1137 /* e-Switch vport representors */
1138 static int
1139 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1140 {
1141 struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
1142 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1143 struct devlink_port *dl_port;
1144 int err;
1145
1146 rpriv->netdev = priv->netdev;
1147
1148 err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
1149 rpriv);
1150 if (err)
1151 return err;
1152
1153 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1154 if (dl_port)
1155 devlink_port_type_eth_set(dl_port, rpriv->netdev);
1156
1157 return 0;
1158 }
1159
1160 static void
1161 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
1162 {
1163 struct net_device *netdev = rpriv->netdev;
1164 struct devlink_port *dl_port;
1165 struct mlx5_core_dev *dev;
1166 struct mlx5e_priv *priv;
1167
1168 priv = netdev_priv(netdev);
1169 dev = priv->mdev;
1170
1171 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1172 if (dl_port)
1173 devlink_port_type_clear(dl_port);
1174 mlx5e_netdev_attach_nic_profile(priv);
1175 }
1176
1177 static int
1178 mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1179 {
1180 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1181 const struct mlx5e_profile *profile;
1182 struct devlink_port *dl_port;
1183 struct net_device *netdev;
1184 struct mlx5e_priv *priv;
1185 unsigned int txqs, rxqs;
1186 int nch, err;
1187
1188 profile = &mlx5e_rep_profile;
1189 nch = mlx5e_get_max_num_channels(dev);
1190 txqs = nch * profile->max_tc;
1191 rxqs = nch * profile->rq_groups;
1192 netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs);
1193 if (!netdev) {
1194 mlx5_core_warn(dev,
1195 "Failed to create representor netdev for vport %d\n",
1196 rep->vport);
1197 return -EINVAL;
1198 }
1199
1200 mlx5e_build_rep_netdev(netdev, dev);
1201 rpriv->netdev = netdev;
1202
1203 priv = netdev_priv(netdev);
1204 priv->profile = profile;
1205 priv->ppriv = rpriv;
1206 err = profile->init(dev, netdev);
1207 if (err) {
1208 netdev_warn(netdev, "rep profile init failed, %d\n", err);
1209 goto err_destroy_netdev;
1210 }
1211
1212 err = mlx5e_attach_netdev(netdev_priv(netdev));
1213 if (err) {
1214 netdev_warn(netdev,
1215 "Failed to attach representor netdev for vport %d\n",
1216 rep->vport);
1217 goto err_cleanup_profile;
1218 }
1219
1220 err = register_netdev(netdev);
1221 if (err) {
1222 netdev_warn(netdev,
1223 "Failed to register representor netdev for vport %d\n",
1224 rep->vport);
1225 goto err_detach_netdev;
1226 }
1227
1228 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1229 if (dl_port)
1230 devlink_port_type_eth_set(dl_port, netdev);
1231 return 0;
1232
1233 err_detach_netdev:
1234 mlx5e_detach_netdev(netdev_priv(netdev));
1235
1236 err_cleanup_profile:
1237 priv->profile->cleanup(priv);
1238
1239 err_destroy_netdev:
1240 mlx5e_destroy_netdev(netdev_priv(netdev));
1241 return err;
1242 }
1243
1244 static int
1245 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1246 {
1247 struct mlx5e_rep_priv *rpriv;
1248 int err;
1249
1250 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1251 if (!rpriv)
1252 return -ENOMEM;
1253
1254 /* rpriv->rep to be looked up when profile->init() is called */
1255 rpriv->rep = rep;
1256 rep->rep_data[REP_ETH].priv = rpriv;
1257 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1258
1259 if (rep->vport == MLX5_VPORT_UPLINK)
1260 err = mlx5e_vport_uplink_rep_load(dev, rep);
1261 else
1262 err = mlx5e_vport_vf_rep_load(dev, rep);
1263
1264 if (err)
1265 kfree(rpriv);
1266
1267 return err;
1268 }
1269
1270 static void
1271 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1272 {
1273 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1274 struct net_device *netdev = rpriv->netdev;
1275 struct mlx5e_priv *priv = netdev_priv(netdev);
1276 struct mlx5_core_dev *dev = priv->mdev;
1277 struct devlink_port *dl_port;
1278 void *ppriv = priv->ppriv;
1279
1280 if (rep->vport == MLX5_VPORT_UPLINK) {
1281 mlx5e_vport_uplink_rep_unload(rpriv);
1282 goto free_ppriv;
1283 }
1284
1285 dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
1286 if (dl_port)
1287 devlink_port_type_clear(dl_port);
1288 unregister_netdev(netdev);
1289 mlx5e_detach_netdev(priv);
1290 priv->profile->cleanup(priv);
1291 mlx5e_destroy_netdev(priv);
1292 free_ppriv:
1293 kfree(ppriv); /* mlx5e_rep_priv */
1294 }
1295
1296 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1297 {
1298 struct mlx5e_rep_priv *rpriv;
1299
1300 rpriv = mlx5e_rep_to_rep_priv(rep);
1301
1302 return rpriv->netdev;
1303 }
1304
1305 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
1306 {
1307 struct mlx5e_rep_priv *rpriv;
1308 struct mlx5e_rep_sq *rep_sq;
1309
1310 rpriv = mlx5e_rep_to_rep_priv(rep);
1311 list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1312 if (!rep_sq->send_to_vport_rule_peer)
1313 continue;
1314 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
1315 rep_sq->send_to_vport_rule_peer = NULL;
1316 }
1317 }
1318
1319 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
1320 struct mlx5_eswitch_rep *rep,
1321 struct mlx5_eswitch *peer_esw)
1322 {
1323 struct mlx5_flow_handle *flow_rule;
1324 struct mlx5e_rep_priv *rpriv;
1325 struct mlx5e_rep_sq *rep_sq;
1326
1327 rpriv = mlx5e_rep_to_rep_priv(rep);
1328 list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
1329 if (rep_sq->send_to_vport_rule_peer)
1330 continue;
1331 flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
1332 if (IS_ERR(flow_rule))
1333 goto err_out;
1334 rep_sq->send_to_vport_rule_peer = flow_rule;
1335 }
1336
1337 return 0;
1338 err_out:
1339 mlx5e_vport_rep_event_unpair(rep);
1340 return PTR_ERR(flow_rule);
1341 }
1342
1343 static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
1344 struct mlx5_eswitch_rep *rep,
1345 enum mlx5_switchdev_event event,
1346 void *data)
1347 {
1348 int err = 0;
1349
1350 if (event == MLX5_SWITCHDEV_EVENT_PAIR)
1351 err = mlx5e_vport_rep_event_pair(esw, rep, data);
1352 else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
1353 mlx5e_vport_rep_event_unpair(rep);
1354
1355 return err;
1356 }
1357
1358 static const struct mlx5_eswitch_rep_ops rep_ops = {
1359 .load = mlx5e_vport_rep_load,
1360 .unload = mlx5e_vport_rep_unload,
1361 .get_proto_dev = mlx5e_vport_rep_get_proto_dev,
1362 .event = mlx5e_vport_rep_event,
1363 };
1364
1365 static int mlx5e_rep_probe(struct auxiliary_device *adev,
1366 const struct auxiliary_device_id *id)
1367 {
1368 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
1369 struct mlx5_core_dev *mdev = edev->mdev;
1370 struct mlx5_eswitch *esw;
1371
1372 esw = mdev->priv.eswitch;
1373 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1374 return 0;
1375 }
1376
1377 static void mlx5e_rep_remove(struct auxiliary_device *adev)
1378 {
1379 struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev);
1380 struct mlx5_core_dev *mdev = vdev->mdev;
1381 struct mlx5_eswitch *esw;
1382
1383 esw = mdev->priv.eswitch;
1384 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1385 }
1386
1387 static const struct auxiliary_device_id mlx5e_rep_id_table[] = {
1388 { .name = MLX5_ADEV_NAME ".eth-rep", },
1389 {},
1390 };
1391
1392 MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table);
1393
1394 static struct auxiliary_driver mlx5e_rep_driver = {
1395 .name = "eth-rep",
1396 .probe = mlx5e_rep_probe,
1397 .remove = mlx5e_rep_remove,
1398 .id_table = mlx5e_rep_id_table,
1399 };
1400
1401 int mlx5e_rep_init(void)
1402 {
1403 return auxiliary_driver_register(&mlx5e_rep_driver);
1404 }
1405
1406 void mlx5e_rep_cleanup(void)
1407 {
1408 auxiliary_driver_unregister(&mlx5e_rep_driver);
1409 }