]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
net/mlx5e: Extendable vport representor netdev private data
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37
38 #include "eswitch.h"
39 #include "en.h"
40 #include "en_rep.h"
41 #include "en_tc.h"
42
43 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
44
45 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
46 struct ethtool_drvinfo *drvinfo)
47 {
48 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
49 sizeof(drvinfo->driver));
50 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
51 }
52
53 static const struct counter_desc sw_rep_stats_desc[] = {
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
58 };
59
60 #define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
61
62 static void mlx5e_rep_get_strings(struct net_device *dev,
63 u32 stringset, uint8_t *data)
64 {
65 int i;
66
67 switch (stringset) {
68 case ETH_SS_STATS:
69 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
70 strcpy(data + (i * ETH_GSTRING_LEN),
71 sw_rep_stats_desc[i].format);
72 break;
73 }
74 }
75
76 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
77 {
78 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
79 struct mlx5e_rep_priv *rpriv = priv->ppriv;
80 struct mlx5_eswitch_rep *rep = rpriv->rep;
81 struct rtnl_link_stats64 *vport_stats;
82 struct ifla_vf_stats vf_stats;
83 int err;
84
85 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
86 if (err) {
87 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
88 return;
89 }
90
91 vport_stats = &priv->stats.vf_vport;
92 /* flip tx/rx as we are reporting the counters for the switch vport */
93 vport_stats->rx_packets = vf_stats.tx_packets;
94 vport_stats->rx_bytes = vf_stats.tx_bytes;
95 vport_stats->tx_packets = vf_stats.rx_packets;
96 vport_stats->tx_bytes = vf_stats.rx_bytes;
97 }
98
99 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
100 {
101 struct mlx5e_sw_stats *s = &priv->stats.sw;
102 struct mlx5e_rq_stats *rq_stats;
103 struct mlx5e_sq_stats *sq_stats;
104 int i, j;
105
106 memset(s, 0, sizeof(*s));
107 for (i = 0; i < priv->channels.num; i++) {
108 struct mlx5e_channel *c = priv->channels.c[i];
109
110 rq_stats = &c->rq.stats;
111
112 s->rx_packets += rq_stats->packets;
113 s->rx_bytes += rq_stats->bytes;
114
115 for (j = 0; j < priv->channels.params.num_tc; j++) {
116 sq_stats = &c->sq[j].stats;
117
118 s->tx_packets += sq_stats->packets;
119 s->tx_bytes += sq_stats->bytes;
120 }
121 }
122 }
123
124 static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
125 {
126 mlx5e_rep_update_sw_counters(priv);
127 mlx5e_rep_update_hw_counters(priv);
128 }
129
130 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
131 struct ethtool_stats *stats, u64 *data)
132 {
133 struct mlx5e_priv *priv = netdev_priv(dev);
134 int i;
135
136 if (!data)
137 return;
138
139 mutex_lock(&priv->state_lock);
140 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
141 mlx5e_rep_update_sw_counters(priv);
142 mutex_unlock(&priv->state_lock);
143
144 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
145 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
146 sw_rep_stats_desc, i);
147 }
148
149 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
150 {
151 switch (sset) {
152 case ETH_SS_STATS:
153 return NUM_VPORT_REP_COUNTERS;
154 default:
155 return -EOPNOTSUPP;
156 }
157 }
158
159 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
160 .get_drvinfo = mlx5e_rep_get_drvinfo,
161 .get_link = ethtool_op_get_link,
162 .get_strings = mlx5e_rep_get_strings,
163 .get_sset_count = mlx5e_rep_get_sset_count,
164 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
165 };
166
167 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
168 {
169 struct mlx5e_priv *priv = netdev_priv(dev);
170 struct mlx5e_rep_priv *rpriv = priv->ppriv;
171 struct mlx5_eswitch_rep *rep = rpriv->rep;
172 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
173
174 if (esw->mode == SRIOV_NONE)
175 return -EOPNOTSUPP;
176
177 switch (attr->id) {
178 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
179 attr->u.ppid.id_len = ETH_ALEN;
180 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
181 break;
182 default:
183 return -EOPNOTSUPP;
184 }
185
186 return 0;
187 }
188
189 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
190 {
191 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
192 struct mlx5e_rep_priv *rpriv = priv->ppriv;
193 struct mlx5_eswitch_rep *rep = rpriv->rep;
194 struct mlx5e_channel *c;
195 int n, tc, num_sqs = 0;
196 int err = -ENOMEM;
197 u16 *sqs;
198
199 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
200 if (!sqs)
201 goto out;
202
203 for (n = 0; n < priv->channels.num; n++) {
204 c = priv->channels.c[n];
205 for (tc = 0; tc < c->num_tc; tc++)
206 sqs[num_sqs++] = c->sq[tc].sqn;
207 }
208
209 err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
210 kfree(sqs);
211
212 out:
213 if (err)
214 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
215 return err;
216 }
217
218 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
219 {
220 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
221 struct mlx5e_rep_priv *rpriv = priv->ppriv;
222 struct mlx5_eswitch_rep *rep = rpriv->rep;
223
224 mlx5_eswitch_sqs2vport_stop(esw, rep);
225 }
226
227 static int mlx5e_rep_open(struct net_device *dev)
228 {
229 struct mlx5e_priv *priv = netdev_priv(dev);
230 struct mlx5e_rep_priv *rpriv = priv->ppriv;
231 struct mlx5_eswitch_rep *rep = rpriv->rep;
232 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
233 int err;
234
235 err = mlx5e_open(dev);
236 if (err)
237 return err;
238
239 err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP);
240 if (!err)
241 netif_carrier_on(dev);
242
243 return 0;
244 }
245
246 static int mlx5e_rep_close(struct net_device *dev)
247 {
248 struct mlx5e_priv *priv = netdev_priv(dev);
249 struct mlx5e_rep_priv *rpriv = priv->ppriv;
250 struct mlx5_eswitch_rep *rep = rpriv->rep;
251 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
252
253 (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
254
255 return mlx5e_close(dev);
256 }
257
258 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
259 char *buf, size_t len)
260 {
261 struct mlx5e_priv *priv = netdev_priv(dev);
262 struct mlx5e_rep_priv *rpriv = priv->ppriv;
263 struct mlx5_eswitch_rep *rep = rpriv->rep;
264 int ret;
265
266 ret = snprintf(buf, len, "%d", rep->vport - 1);
267 if (ret >= len)
268 return -EOPNOTSUPP;
269
270 return 0;
271 }
272
273 static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
274 __be16 proto, struct tc_to_netdev *tc)
275 {
276 struct mlx5e_priv *priv = netdev_priv(dev);
277
278 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
279 return -EOPNOTSUPP;
280
281 if (tc->egress_dev) {
282 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
283 struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
284
285 return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
286 proto, tc);
287 }
288
289 switch (tc->type) {
290 case TC_SETUP_CLSFLOWER:
291 switch (tc->cls_flower->command) {
292 case TC_CLSFLOWER_REPLACE:
293 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
294 case TC_CLSFLOWER_DESTROY:
295 return mlx5e_delete_flower(priv, tc->cls_flower);
296 case TC_CLSFLOWER_STATS:
297 return mlx5e_stats_flower(priv, tc->cls_flower);
298 }
299 default:
300 return -EOPNOTSUPP;
301 }
302 }
303
304 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
305 {
306 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
307 struct mlx5e_rep_priv *rpriv = priv->ppriv;
308 struct mlx5_eswitch_rep *rep;
309
310 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
311 return false;
312
313 rep = rpriv->rep;
314 if (esw->mode == SRIOV_OFFLOADS &&
315 rep && rep->vport == FDB_UPLINK_VPORT)
316 return true;
317
318 return false;
319 }
320
321 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
322 {
323 struct mlx5e_rep_priv *rpriv = priv->ppriv;
324 struct mlx5_eswitch_rep *rep = rpriv->rep;
325
326 if (rep && rep->vport != FDB_UPLINK_VPORT)
327 return true;
328
329 return false;
330 }
331
332 bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
333 {
334 struct mlx5e_priv *priv = netdev_priv(dev);
335
336 switch (attr_id) {
337 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
338 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
339 return true;
340 }
341
342 return false;
343 }
344
345 static int
346 mlx5e_get_sw_stats64(const struct net_device *dev,
347 struct rtnl_link_stats64 *stats)
348 {
349 struct mlx5e_priv *priv = netdev_priv(dev);
350 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
351
352 stats->rx_packets = sstats->rx_packets;
353 stats->rx_bytes = sstats->rx_bytes;
354 stats->tx_packets = sstats->tx_packets;
355 stats->tx_bytes = sstats->tx_bytes;
356
357 stats->tx_dropped = sstats->tx_queue_dropped;
358
359 return 0;
360 }
361
362 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
363 void *sp)
364 {
365 switch (attr_id) {
366 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
367 return mlx5e_get_sw_stats64(dev, sp);
368 }
369
370 return -EINVAL;
371 }
372
373 static void
374 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
375 {
376 struct mlx5e_priv *priv = netdev_priv(dev);
377
378 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
379 }
380
381 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
382 .switchdev_port_attr_get = mlx5e_attr_get,
383 };
384
385 static const struct net_device_ops mlx5e_netdev_ops_rep = {
386 .ndo_open = mlx5e_rep_open,
387 .ndo_stop = mlx5e_rep_close,
388 .ndo_start_xmit = mlx5e_xmit,
389 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
390 .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
391 .ndo_get_stats64 = mlx5e_rep_get_stats,
392 .ndo_has_offload_stats = mlx5e_has_offload_stats,
393 .ndo_get_offload_stats = mlx5e_get_offload_stats,
394 };
395
396 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
397 struct mlx5e_params *params)
398 {
399 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
400 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
401 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
402
403 params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
404 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
405 params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
406
407 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
408 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
409
410 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
411 params->num_tc = 1;
412 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
413 }
414
415 static void mlx5e_build_rep_netdev(struct net_device *netdev)
416 {
417 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
418
419 netdev->watchdog_timeo = 15 * HZ;
420
421 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
422
423 #ifdef CONFIG_NET_SWITCHDEV
424 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
425 #endif
426
427 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
428 netdev->hw_features |= NETIF_F_HW_TC;
429
430 eth_hw_addr_random(netdev);
431 }
432
433 static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
434 struct net_device *netdev,
435 const struct mlx5e_profile *profile,
436 void *ppriv)
437 {
438 struct mlx5e_priv *priv = netdev_priv(netdev);
439
440 priv->mdev = mdev;
441 priv->netdev = netdev;
442 priv->profile = profile;
443 priv->ppriv = ppriv;
444
445 mutex_init(&priv->state_lock);
446
447 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
448
449 priv->channels.params.num_channels = profile->max_nch(mdev);
450 mlx5e_build_rep_params(mdev, &priv->channels.params);
451 mlx5e_build_rep_netdev(netdev);
452 }
453
454 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
455 {
456 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
457 struct mlx5e_rep_priv *rpriv = priv->ppriv;
458 struct mlx5_eswitch_rep *rep = rpriv->rep;
459 struct mlx5_flow_handle *flow_rule;
460 int err;
461
462 mlx5e_init_l2_addr(priv);
463
464 err = mlx5e_create_direct_rqts(priv);
465 if (err)
466 return err;
467
468 err = mlx5e_create_direct_tirs(priv);
469 if (err)
470 goto err_destroy_direct_rqts;
471
472 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
473 rep->vport,
474 priv->direct_tir[0].tirn);
475 if (IS_ERR(flow_rule)) {
476 err = PTR_ERR(flow_rule);
477 goto err_destroy_direct_tirs;
478 }
479 rep->vport_rx_rule = flow_rule;
480
481 err = mlx5e_tc_init(priv);
482 if (err)
483 goto err_del_flow_rule;
484
485 return 0;
486
487 err_del_flow_rule:
488 mlx5_del_flow_rules(rep->vport_rx_rule);
489 err_destroy_direct_tirs:
490 mlx5e_destroy_direct_tirs(priv);
491 err_destroy_direct_rqts:
492 mlx5e_destroy_direct_rqts(priv);
493 return err;
494 }
495
496 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
497 {
498 struct mlx5e_rep_priv *rpriv = priv->ppriv;
499 struct mlx5_eswitch_rep *rep = rpriv->rep;
500
501 mlx5e_tc_cleanup(priv);
502 mlx5_del_flow_rules(rep->vport_rx_rule);
503 mlx5e_destroy_direct_tirs(priv);
504 mlx5e_destroy_direct_rqts(priv);
505 }
506
507 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
508 {
509 int err;
510
511 err = mlx5e_create_tises(priv);
512 if (err) {
513 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
514 return err;
515 }
516 return 0;
517 }
518
519 static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
520 {
521 #define MLX5E_PORT_REPRESENTOR_NCH 1
522 return MLX5E_PORT_REPRESENTOR_NCH;
523 }
524
525 static struct mlx5e_profile mlx5e_rep_profile = {
526 .init = mlx5e_init_rep,
527 .init_rx = mlx5e_init_rep_rx,
528 .cleanup_rx = mlx5e_cleanup_rep_rx,
529 .init_tx = mlx5e_init_rep_tx,
530 .cleanup_tx = mlx5e_cleanup_nic_tx,
531 .update_stats = mlx5e_rep_update_stats,
532 .max_nch = mlx5e_get_rep_max_num_channels,
533 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
534 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
535 .max_tc = 1,
536 };
537
538 /* e-Switch vport representors */
539
540 static int
541 mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
542 {
543 struct net_device *netdev = rep->netdev;
544 struct mlx5e_priv *priv = netdev_priv(netdev);
545
546 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
547 return mlx5e_add_sqs_fwd_rules(priv);
548 return 0;
549 }
550
551 static void
552 mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
553 {
554 struct net_device *netdev = rep->netdev;
555 struct mlx5e_priv *priv = netdev_priv(netdev);
556
557 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
558 mlx5e_remove_sqs_fwd_rules(priv);
559
560 /* clean (and re-init) existing uplink offloaded TC rules */
561 mlx5e_tc_cleanup(priv);
562 mlx5e_tc_init(priv);
563 }
564
565 static int
566 mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
567 {
568 struct mlx5e_rep_priv *rpriv;
569 struct net_device *netdev;
570 int err;
571
572 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
573 if (!rpriv)
574 return -ENOMEM;
575
576 netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
577 if (!netdev) {
578 pr_warn("Failed to create representor netdev for vport %d\n",
579 rep->vport);
580 kfree(rpriv);
581 return -EINVAL;
582 }
583
584 rep->netdev = netdev;
585 rpriv->rep = rep;
586
587 err = mlx5e_attach_netdev(netdev_priv(netdev));
588 if (err) {
589 pr_warn("Failed to attach representor netdev for vport %d\n",
590 rep->vport);
591 goto err_destroy_netdev;
592 }
593
594 err = register_netdev(netdev);
595 if (err) {
596 pr_warn("Failed to register representor netdev for vport %d\n",
597 rep->vport);
598 goto err_detach_netdev;
599 }
600
601 return 0;
602
603 err_detach_netdev:
604 mlx5e_detach_netdev(netdev_priv(netdev));
605
606 err_destroy_netdev:
607 mlx5e_destroy_netdev(netdev_priv(netdev));
608 kfree(rpriv);
609 return err;
610
611 }
612
613 static void
614 mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
615 {
616 struct net_device *netdev = rep->netdev;
617 struct mlx5e_priv *priv = netdev_priv(netdev);
618 void *ppriv = priv->ppriv;
619
620 unregister_netdev(netdev);
621 mlx5e_detach_netdev(priv);
622 mlx5e_destroy_netdev(priv);
623 kfree(ppriv); /* mlx5e_rep_priv */
624 }
625
626 static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
627 {
628 struct mlx5_core_dev *mdev = priv->mdev;
629 struct mlx5_eswitch *esw = mdev->priv.eswitch;
630 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
631 int vport;
632 u8 mac[ETH_ALEN];
633
634 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
635
636 for (vport = 1; vport < total_vfs; vport++) {
637 struct mlx5_eswitch_rep rep;
638
639 rep.load = mlx5e_vport_rep_load;
640 rep.unload = mlx5e_vport_rep_unload;
641 rep.vport = vport;
642 ether_addr_copy(rep.hw_id, mac);
643 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
644 }
645 }
646
647 static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
648 {
649 struct mlx5_core_dev *mdev = priv->mdev;
650 struct mlx5_eswitch *esw = mdev->priv.eswitch;
651 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
652 int vport;
653
654 for (vport = 1; vport < total_vfs; vport++)
655 mlx5_eswitch_unregister_vport_rep(esw, vport);
656 }
657
658 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
659 {
660 struct mlx5_core_dev *mdev = priv->mdev;
661 struct mlx5_eswitch *esw = mdev->priv.eswitch;
662 struct mlx5_eswitch_rep rep;
663
664 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
665 rep.load = mlx5e_nic_rep_load;
666 rep.unload = mlx5e_nic_rep_unload;
667 rep.vport = FDB_UPLINK_VPORT;
668 rep.netdev = priv->netdev;
669 mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
670
671 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
672 }
673
674 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
675 {
676 struct mlx5_core_dev *mdev = priv->mdev;
677 struct mlx5_eswitch *esw = mdev->priv.eswitch;
678
679 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
680 mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
681 }