]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
net/mlx5e: Add some ethtool port control entries to the uplink rep netdev
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
39 #include <net/arp.h>
40
41 #include "eswitch.h"
42 #include "en.h"
43 #include "en_rep.h"
44 #include "en_tc.h"
45 #include "en/tc_tun.h"
46 #include "fs_core.h"
47
48 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
49 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
50
51 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
52
53 struct mlx5e_rep_indr_block_priv {
54 struct net_device *netdev;
55 struct mlx5e_rep_priv *rpriv;
56
57 struct list_head list;
58 };
59
60 static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
61
62 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
63 struct ethtool_drvinfo *drvinfo)
64 {
65 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
66 sizeof(drvinfo->driver));
67 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
68 }
69
70 static const struct counter_desc sw_rep_stats_desc[] = {
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
75 };
76
77 struct vport_stats {
78 u64 vport_rx_packets;
79 u64 vport_tx_packets;
80 u64 vport_rx_bytes;
81 u64 vport_tx_bytes;
82 };
83
84 static const struct counter_desc vport_rep_stats_desc[] = {
85 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
86 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
87 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
88 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
89 };
90
91 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
92 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
93
94 static void mlx5e_rep_get_strings(struct net_device *dev,
95 u32 stringset, uint8_t *data)
96 {
97 int i, j;
98
99 switch (stringset) {
100 case ETH_SS_STATS:
101 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
102 strcpy(data + (i * ETH_GSTRING_LEN),
103 sw_rep_stats_desc[i].format);
104 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
105 strcpy(data + (i * ETH_GSTRING_LEN),
106 vport_rep_stats_desc[j].format);
107 break;
108 }
109 }
110
111 static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
112 {
113 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
114 struct mlx5e_rep_priv *rpriv = priv->ppriv;
115 struct mlx5_eswitch_rep *rep = rpriv->rep;
116 struct rtnl_link_stats64 *vport_stats;
117 struct ifla_vf_stats vf_stats;
118 int err;
119
120 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
121 if (err) {
122 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
123 return;
124 }
125
126 vport_stats = &priv->stats.vf_vport;
127 /* flip tx/rx as we are reporting the counters for the switch vport */
128 vport_stats->rx_packets = vf_stats.tx_packets;
129 vport_stats->rx_bytes = vf_stats.tx_bytes;
130 vport_stats->tx_packets = vf_stats.rx_packets;
131 vport_stats->tx_bytes = vf_stats.rx_bytes;
132 }
133
134 static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
135 {
136 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
137 struct rtnl_link_stats64 *vport_stats;
138
139 mlx5e_grp_802_3_update_stats(priv);
140
141 vport_stats = &priv->stats.vf_vport;
142
143 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
144 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
145 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
146 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
147 }
148
149 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
150 {
151 struct mlx5e_rep_priv *rpriv = priv->ppriv;
152 struct mlx5_eswitch_rep *rep = rpriv->rep;
153
154 if (rep->vport == FDB_UPLINK_VPORT)
155 mlx5e_uplink_rep_update_hw_counters(priv);
156 else
157 mlx5e_vf_rep_update_hw_counters(priv);
158 }
159
160 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
161 {
162 struct mlx5e_sw_stats *s = &priv->stats.sw;
163 struct mlx5e_rq_stats *rq_stats;
164 struct mlx5e_sq_stats *sq_stats;
165 int i, j;
166
167 memset(s, 0, sizeof(*s));
168 for (i = 0; i < priv->channels.num; i++) {
169 struct mlx5e_channel *c = priv->channels.c[i];
170
171 rq_stats = c->rq.stats;
172
173 s->rx_packets += rq_stats->packets;
174 s->rx_bytes += rq_stats->bytes;
175
176 for (j = 0; j < priv->channels.params.num_tc; j++) {
177 sq_stats = c->sq[j].stats;
178
179 s->tx_packets += sq_stats->packets;
180 s->tx_bytes += sq_stats->bytes;
181 }
182 }
183 }
184
185 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
186 struct ethtool_stats *stats, u64 *data)
187 {
188 struct mlx5e_priv *priv = netdev_priv(dev);
189 int i, j;
190
191 if (!data)
192 return;
193
194 mutex_lock(&priv->state_lock);
195 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
196 mlx5e_rep_update_sw_counters(priv);
197 mlx5e_rep_update_hw_counters(priv);
198 mutex_unlock(&priv->state_lock);
199
200 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
201 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
202 sw_rep_stats_desc, i);
203
204 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
205 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
206 vport_rep_stats_desc, j);
207 }
208
209 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
210 {
211 switch (sset) {
212 case ETH_SS_STATS:
213 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
214 default:
215 return -EOPNOTSUPP;
216 }
217 }
218
219 static void mlx5e_rep_get_ringparam(struct net_device *dev,
220 struct ethtool_ringparam *param)
221 {
222 struct mlx5e_priv *priv = netdev_priv(dev);
223
224 mlx5e_ethtool_get_ringparam(priv, param);
225 }
226
227 static int mlx5e_rep_set_ringparam(struct net_device *dev,
228 struct ethtool_ringparam *param)
229 {
230 struct mlx5e_priv *priv = netdev_priv(dev);
231
232 return mlx5e_ethtool_set_ringparam(priv, param);
233 }
234
235 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
236 struct mlx5_flow_destination *dest)
237 {
238 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
239 struct mlx5e_rep_priv *rpriv = priv->ppriv;
240 struct mlx5_eswitch_rep *rep = rpriv->rep;
241 struct mlx5_flow_handle *flow_rule;
242
243 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
244 rep->vport,
245 dest);
246 if (IS_ERR(flow_rule))
247 return PTR_ERR(flow_rule);
248
249 mlx5_del_flow_rules(rpriv->vport_rx_rule);
250 rpriv->vport_rx_rule = flow_rule;
251 return 0;
252 }
253
254 static void mlx5e_rep_get_channels(struct net_device *dev,
255 struct ethtool_channels *ch)
256 {
257 struct mlx5e_priv *priv = netdev_priv(dev);
258
259 mlx5e_ethtool_get_channels(priv, ch);
260 }
261
262 static int mlx5e_rep_set_channels(struct net_device *dev,
263 struct ethtool_channels *ch)
264 {
265 struct mlx5e_priv *priv = netdev_priv(dev);
266 u16 curr_channels_amount = priv->channels.params.num_channels;
267 u32 new_channels_amount = ch->combined_count;
268 struct mlx5_flow_destination new_dest;
269 int err = 0;
270
271 err = mlx5e_ethtool_set_channels(priv, ch);
272 if (err)
273 return err;
274
275 if (curr_channels_amount == 1 && new_channels_amount > 1) {
276 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
277 new_dest.ft = priv->fs.ttc.ft.t;
278 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
279 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
280 new_dest.tir_num = priv->direct_tir[0].tirn;
281 } else {
282 return 0;
283 }
284
285 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
286 if (err) {
287 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
288 curr_channels_amount, new_channels_amount);
289 return err;
290 }
291
292 return 0;
293 }
294
295 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
296 struct ethtool_coalesce *coal)
297 {
298 struct mlx5e_priv *priv = netdev_priv(netdev);
299
300 return mlx5e_ethtool_get_coalesce(priv, coal);
301 }
302
303 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
304 struct ethtool_coalesce *coal)
305 {
306 struct mlx5e_priv *priv = netdev_priv(netdev);
307
308 return mlx5e_ethtool_set_coalesce(priv, coal);
309 }
310
311 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
312 {
313 struct mlx5e_priv *priv = netdev_priv(netdev);
314
315 return mlx5e_ethtool_get_rxfh_key_size(priv);
316 }
317
318 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
319 {
320 struct mlx5e_priv *priv = netdev_priv(netdev);
321
322 return mlx5e_ethtool_get_rxfh_indir_size(priv);
323 }
324
325 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
326 struct ethtool_pauseparam *pauseparam)
327 {
328 struct mlx5e_priv *priv = netdev_priv(netdev);
329
330 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
331 }
332
333 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
334 struct ethtool_pauseparam *pauseparam)
335 {
336 struct mlx5e_priv *priv = netdev_priv(netdev);
337
338 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
339 }
340
341 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
342 struct ethtool_link_ksettings *link_ksettings)
343 {
344 struct mlx5e_priv *priv = netdev_priv(netdev);
345
346 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
347 }
348
349 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
350 const struct ethtool_link_ksettings *link_ksettings)
351 {
352 struct mlx5e_priv *priv = netdev_priv(netdev);
353
354 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
355 }
356
357 static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
358 .get_drvinfo = mlx5e_rep_get_drvinfo,
359 .get_link = ethtool_op_get_link,
360 .get_strings = mlx5e_rep_get_strings,
361 .get_sset_count = mlx5e_rep_get_sset_count,
362 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
363 .get_ringparam = mlx5e_rep_get_ringparam,
364 .set_ringparam = mlx5e_rep_set_ringparam,
365 .get_channels = mlx5e_rep_get_channels,
366 .set_channels = mlx5e_rep_set_channels,
367 .get_coalesce = mlx5e_rep_get_coalesce,
368 .set_coalesce = mlx5e_rep_set_coalesce,
369 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
370 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
371 };
372
373 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
374 .get_drvinfo = mlx5e_rep_get_drvinfo,
375 .get_link = ethtool_op_get_link,
376 .get_strings = mlx5e_rep_get_strings,
377 .get_sset_count = mlx5e_rep_get_sset_count,
378 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
379 .get_ringparam = mlx5e_rep_get_ringparam,
380 .set_ringparam = mlx5e_rep_set_ringparam,
381 .get_channels = mlx5e_rep_get_channels,
382 .set_channels = mlx5e_rep_set_channels,
383 .get_coalesce = mlx5e_rep_get_coalesce,
384 .set_coalesce = mlx5e_rep_set_coalesce,
385 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
386 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
387 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
388 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
389 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
390 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
391 };
392
393 static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
394 {
395 struct mlx5e_priv *priv = netdev_priv(dev);
396 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
397 struct net_device *uplink_upper = NULL;
398 struct mlx5e_priv *uplink_priv = NULL;
399 struct net_device *uplink_dev;
400
401 if (esw->mode == SRIOV_NONE)
402 return -EOPNOTSUPP;
403
404 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
405 if (uplink_dev) {
406 uplink_upper = netdev_master_upper_dev_get(uplink_dev);
407 uplink_priv = netdev_priv(uplink_dev);
408 }
409
410 switch (attr->id) {
411 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
412 attr->u.ppid.id_len = ETH_ALEN;
413 if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
414 ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
415 } else {
416 struct mlx5e_rep_priv *rpriv = priv->ppriv;
417 struct mlx5_eswitch_rep *rep = rpriv->rep;
418
419 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
420 }
421 break;
422 default:
423 return -EOPNOTSUPP;
424 }
425
426 return 0;
427 }
428
429 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
430 struct mlx5_eswitch_rep *rep)
431 {
432 struct mlx5e_rep_sq *rep_sq, *tmp;
433 struct mlx5e_rep_priv *rpriv;
434
435 if (esw->mode != SRIOV_OFFLOADS)
436 return;
437
438 rpriv = mlx5e_rep_to_rep_priv(rep);
439 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
440 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
441 list_del(&rep_sq->list);
442 kfree(rep_sq);
443 }
444 }
445
446 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
447 struct mlx5_eswitch_rep *rep,
448 u32 *sqns_array, int sqns_num)
449 {
450 struct mlx5_flow_handle *flow_rule;
451 struct mlx5e_rep_priv *rpriv;
452 struct mlx5e_rep_sq *rep_sq;
453 int err;
454 int i;
455
456 if (esw->mode != SRIOV_OFFLOADS)
457 return 0;
458
459 rpriv = mlx5e_rep_to_rep_priv(rep);
460 for (i = 0; i < sqns_num; i++) {
461 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
462 if (!rep_sq) {
463 err = -ENOMEM;
464 goto out_err;
465 }
466
467 /* Add re-inject rule to the PF/representor sqs */
468 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
469 rep->vport,
470 sqns_array[i]);
471 if (IS_ERR(flow_rule)) {
472 err = PTR_ERR(flow_rule);
473 kfree(rep_sq);
474 goto out_err;
475 }
476 rep_sq->send_to_vport_rule = flow_rule;
477 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
478 }
479 return 0;
480
481 out_err:
482 mlx5e_sqs2vport_stop(esw, rep);
483 return err;
484 }
485
486 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
487 {
488 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
489 struct mlx5e_rep_priv *rpriv = priv->ppriv;
490 struct mlx5_eswitch_rep *rep = rpriv->rep;
491 struct mlx5e_channel *c;
492 int n, tc, num_sqs = 0;
493 int err = -ENOMEM;
494 u32 *sqs;
495
496 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
497 if (!sqs)
498 goto out;
499
500 for (n = 0; n < priv->channels.num; n++) {
501 c = priv->channels.c[n];
502 for (tc = 0; tc < c->num_tc; tc++)
503 sqs[num_sqs++] = c->sq[tc].sqn;
504 }
505
506 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
507 kfree(sqs);
508
509 out:
510 if (err)
511 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
512 return err;
513 }
514
515 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
516 {
517 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
518 struct mlx5e_rep_priv *rpriv = priv->ppriv;
519 struct mlx5_eswitch_rep *rep = rpriv->rep;
520
521 mlx5e_sqs2vport_stop(esw, rep);
522 }
523
524 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
525 {
526 #if IS_ENABLED(CONFIG_IPV6)
527 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
528 DELAY_PROBE_TIME);
529 #else
530 unsigned long ipv6_interval = ~0UL;
531 #endif
532 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
533 DELAY_PROBE_TIME);
534 struct net_device *netdev = rpriv->netdev;
535 struct mlx5e_priv *priv = netdev_priv(netdev);
536
537 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
538 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
539 }
540
541 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
542 {
543 struct mlx5e_rep_priv *rpriv = priv->ppriv;
544 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
545
546 mlx5_fc_queue_stats_work(priv->mdev,
547 &neigh_update->neigh_stats_work,
548 neigh_update->min_interval);
549 }
550
551 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
552 {
553 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
554 neigh_update.neigh_stats_work.work);
555 struct net_device *netdev = rpriv->netdev;
556 struct mlx5e_priv *priv = netdev_priv(netdev);
557 struct mlx5e_neigh_hash_entry *nhe;
558
559 rtnl_lock();
560 if (!list_empty(&rpriv->neigh_update.neigh_list))
561 mlx5e_rep_queue_neigh_stats_work(priv);
562
563 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
564 mlx5e_tc_update_neigh_used_value(nhe);
565
566 rtnl_unlock();
567 }
568
569 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
570 {
571 refcount_inc(&nhe->refcnt);
572 }
573
574 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
575 {
576 if (refcount_dec_and_test(&nhe->refcnt))
577 kfree(nhe);
578 }
579
580 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
581 struct mlx5e_encap_entry *e,
582 bool neigh_connected,
583 unsigned char ha[ETH_ALEN])
584 {
585 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
586
587 ASSERT_RTNL();
588
589 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
590 !ether_addr_equal(e->h_dest, ha))
591 mlx5e_tc_encap_flows_del(priv, e);
592
593 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
594 ether_addr_copy(e->h_dest, ha);
595 ether_addr_copy(eth->h_dest, ha);
596
597 mlx5e_tc_encap_flows_add(priv, e);
598 }
599 }
600
601 static void mlx5e_rep_neigh_update(struct work_struct *work)
602 {
603 struct mlx5e_neigh_hash_entry *nhe =
604 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
605 struct neighbour *n = nhe->n;
606 struct mlx5e_encap_entry *e;
607 unsigned char ha[ETH_ALEN];
608 struct mlx5e_priv *priv;
609 bool neigh_connected;
610 bool encap_connected;
611 u8 nud_state, dead;
612
613 rtnl_lock();
614
615 /* If these parameters are changed after we release the lock,
616 * we'll receive another event letting us know about it.
617 * We use this lock to avoid inconsistency between the neigh validity
618 * and it's hw address.
619 */
620 read_lock_bh(&n->lock);
621 memcpy(ha, n->ha, ETH_ALEN);
622 nud_state = n->nud_state;
623 dead = n->dead;
624 read_unlock_bh(&n->lock);
625
626 neigh_connected = (nud_state & NUD_VALID) && !dead;
627
628 list_for_each_entry(e, &nhe->encap_list, encap_list) {
629 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
630 priv = netdev_priv(e->out_dev);
631
632 if (encap_connected != neigh_connected ||
633 !ether_addr_equal(e->h_dest, ha))
634 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
635 }
636 mlx5e_rep_neigh_entry_release(nhe);
637 rtnl_unlock();
638 neigh_release(n);
639 }
640
641 static struct mlx5e_rep_indr_block_priv *
642 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
643 struct net_device *netdev)
644 {
645 struct mlx5e_rep_indr_block_priv *cb_priv;
646
647 /* All callback list access should be protected by RTNL. */
648 ASSERT_RTNL();
649
650 list_for_each_entry(cb_priv,
651 &rpriv->uplink_priv.tc_indr_block_priv_list,
652 list)
653 if (cb_priv->netdev == netdev)
654 return cb_priv;
655
656 return NULL;
657 }
658
659 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
660 {
661 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
662 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
663
664 list_for_each_entry_safe(cb_priv, temp, head, list) {
665 mlx5e_rep_indr_unregister_block(cb_priv->netdev);
666 kfree(cb_priv);
667 }
668 }
669
670 static int
671 mlx5e_rep_indr_offload(struct net_device *netdev,
672 struct tc_cls_flower_offload *flower,
673 struct mlx5e_rep_indr_block_priv *indr_priv)
674 {
675 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
676 int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
677 int err = 0;
678
679 switch (flower->command) {
680 case TC_CLSFLOWER_REPLACE:
681 err = mlx5e_configure_flower(netdev, priv, flower, flags);
682 break;
683 case TC_CLSFLOWER_DESTROY:
684 err = mlx5e_delete_flower(netdev, priv, flower, flags);
685 break;
686 case TC_CLSFLOWER_STATS:
687 err = mlx5e_stats_flower(netdev, priv, flower, flags);
688 break;
689 default:
690 err = -EOPNOTSUPP;
691 }
692
693 return err;
694 }
695
696 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
697 void *type_data, void *indr_priv)
698 {
699 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
700
701 switch (type) {
702 case TC_SETUP_CLSFLOWER:
703 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
704 default:
705 return -EOPNOTSUPP;
706 }
707 }
708
709 static int
710 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
711 struct mlx5e_rep_priv *rpriv,
712 struct tc_block_offload *f)
713 {
714 struct mlx5e_rep_indr_block_priv *indr_priv;
715 int err = 0;
716
717 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
718 return -EOPNOTSUPP;
719
720 switch (f->command) {
721 case TC_BLOCK_BIND:
722 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
723 if (indr_priv)
724 return -EEXIST;
725
726 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
727 if (!indr_priv)
728 return -ENOMEM;
729
730 indr_priv->netdev = netdev;
731 indr_priv->rpriv = rpriv;
732 list_add(&indr_priv->list,
733 &rpriv->uplink_priv.tc_indr_block_priv_list);
734
735 err = tcf_block_cb_register(f->block,
736 mlx5e_rep_indr_setup_block_cb,
737 netdev, indr_priv, f->extack);
738 if (err) {
739 list_del(&indr_priv->list);
740 kfree(indr_priv);
741 }
742
743 return err;
744 case TC_BLOCK_UNBIND:
745 tcf_block_cb_unregister(f->block,
746 mlx5e_rep_indr_setup_block_cb,
747 netdev);
748 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
749 if (indr_priv) {
750 list_del(&indr_priv->list);
751 kfree(indr_priv);
752 }
753
754 return 0;
755 default:
756 return -EOPNOTSUPP;
757 }
758 return 0;
759 }
760
761 static
762 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
763 enum tc_setup_type type, void *type_data)
764 {
765 switch (type) {
766 case TC_SETUP_BLOCK:
767 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
768 type_data);
769 default:
770 return -EOPNOTSUPP;
771 }
772 }
773
774 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
775 struct net_device *netdev)
776 {
777 int err;
778
779 err = __tc_indr_block_cb_register(netdev, rpriv,
780 mlx5e_rep_indr_setup_tc_cb,
781 netdev);
782 if (err) {
783 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
784
785 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
786 netdev_name(netdev), err);
787 }
788 return err;
789 }
790
791 static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
792 {
793 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
794 netdev);
795 }
796
797 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
798 unsigned long event, void *ptr)
799 {
800 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
801 uplink_priv.netdevice_nb);
802 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
803 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
804
805 if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
806 return NOTIFY_OK;
807
808 switch (event) {
809 case NETDEV_REGISTER:
810 mlx5e_rep_indr_register_block(rpriv, netdev);
811 break;
812 case NETDEV_UNREGISTER:
813 mlx5e_rep_indr_unregister_block(netdev);
814 break;
815 }
816 return NOTIFY_OK;
817 }
818
819 static struct mlx5e_neigh_hash_entry *
820 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
821 struct mlx5e_neigh *m_neigh);
822
823 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
824 unsigned long event, void *ptr)
825 {
826 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
827 neigh_update.netevent_nb);
828 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
829 struct net_device *netdev = rpriv->netdev;
830 struct mlx5e_priv *priv = netdev_priv(netdev);
831 struct mlx5e_neigh_hash_entry *nhe = NULL;
832 struct mlx5e_neigh m_neigh = {};
833 struct neigh_parms *p;
834 struct neighbour *n;
835 bool found = false;
836
837 switch (event) {
838 case NETEVENT_NEIGH_UPDATE:
839 n = ptr;
840 #if IS_ENABLED(CONFIG_IPV6)
841 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
842 #else
843 if (n->tbl != &arp_tbl)
844 #endif
845 return NOTIFY_DONE;
846
847 m_neigh.dev = n->dev;
848 m_neigh.family = n->ops->family;
849 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
850
851 /* We are in atomic context and can't take RTNL mutex, so use
852 * spin_lock_bh to lookup the neigh table. bh is used since
853 * netevent can be called from a softirq context.
854 */
855 spin_lock_bh(&neigh_update->encap_lock);
856 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
857 if (!nhe) {
858 spin_unlock_bh(&neigh_update->encap_lock);
859 return NOTIFY_DONE;
860 }
861
862 /* This assignment is valid as long as the the neigh reference
863 * is taken
864 */
865 nhe->n = n;
866
867 /* Take a reference to ensure the neighbour and mlx5 encap
868 * entry won't be destructed until we drop the reference in
869 * delayed work.
870 */
871 neigh_hold(n);
872 mlx5e_rep_neigh_entry_hold(nhe);
873
874 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
875 mlx5e_rep_neigh_entry_release(nhe);
876 neigh_release(n);
877 }
878 spin_unlock_bh(&neigh_update->encap_lock);
879 break;
880
881 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
882 p = ptr;
883
884 /* We check the device is present since we don't care about
885 * changes in the default table, we only care about changes
886 * done per device delay prob time parameter.
887 */
888 #if IS_ENABLED(CONFIG_IPV6)
889 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
890 #else
891 if (!p->dev || p->tbl != &arp_tbl)
892 #endif
893 return NOTIFY_DONE;
894
895 /* We are in atomic context and can't take RTNL mutex,
896 * so use spin_lock_bh to walk the neigh list and look for
897 * the relevant device. bh is used since netevent can be
898 * called from a softirq context.
899 */
900 spin_lock_bh(&neigh_update->encap_lock);
901 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
902 if (p->dev == nhe->m_neigh.dev) {
903 found = true;
904 break;
905 }
906 }
907 spin_unlock_bh(&neigh_update->encap_lock);
908 if (!found)
909 return NOTIFY_DONE;
910
911 neigh_update->min_interval = min_t(unsigned long,
912 NEIGH_VAR(p, DELAY_PROBE_TIME),
913 neigh_update->min_interval);
914 mlx5_fc_update_sampling_interval(priv->mdev,
915 neigh_update->min_interval);
916 break;
917 }
918 return NOTIFY_DONE;
919 }
920
921 static const struct rhashtable_params mlx5e_neigh_ht_params = {
922 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
923 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
924 .key_len = sizeof(struct mlx5e_neigh),
925 .automatic_shrinking = true,
926 };
927
928 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
929 {
930 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
931 int err;
932
933 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
934 if (err)
935 return err;
936
937 INIT_LIST_HEAD(&neigh_update->neigh_list);
938 spin_lock_init(&neigh_update->encap_lock);
939 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
940 mlx5e_rep_neigh_stats_work);
941 mlx5e_rep_neigh_update_init_interval(rpriv);
942
943 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
944 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
945 if (err)
946 goto out_err;
947 return 0;
948
949 out_err:
950 rhashtable_destroy(&neigh_update->neigh_ht);
951 return err;
952 }
953
954 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
955 {
956 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
957 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
958
959 unregister_netevent_notifier(&neigh_update->netevent_nb);
960
961 flush_workqueue(priv->wq); /* flush neigh update works */
962
963 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
964
965 rhashtable_destroy(&neigh_update->neigh_ht);
966 }
967
968 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
969 struct mlx5e_neigh_hash_entry *nhe)
970 {
971 struct mlx5e_rep_priv *rpriv = priv->ppriv;
972 int err;
973
974 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
975 &nhe->rhash_node,
976 mlx5e_neigh_ht_params);
977 if (err)
978 return err;
979
980 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
981
982 return err;
983 }
984
985 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
986 struct mlx5e_neigh_hash_entry *nhe)
987 {
988 struct mlx5e_rep_priv *rpriv = priv->ppriv;
989
990 spin_lock_bh(&rpriv->neigh_update.encap_lock);
991
992 list_del(&nhe->neigh_list);
993
994 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
995 &nhe->rhash_node,
996 mlx5e_neigh_ht_params);
997 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
998 }
999
1000 /* This function must only be called under RTNL lock or under the
1001 * representor's encap_lock in case RTNL mutex can't be held.
1002 */
1003 static struct mlx5e_neigh_hash_entry *
1004 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1005 struct mlx5e_neigh *m_neigh)
1006 {
1007 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1008 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1009
1010 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1011 mlx5e_neigh_ht_params);
1012 }
1013
1014 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1015 struct mlx5e_encap_entry *e,
1016 struct mlx5e_neigh_hash_entry **nhe)
1017 {
1018 int err;
1019
1020 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1021 if (!*nhe)
1022 return -ENOMEM;
1023
1024 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1025 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1026 INIT_LIST_HEAD(&(*nhe)->encap_list);
1027 refcount_set(&(*nhe)->refcnt, 1);
1028
1029 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1030 if (err)
1031 goto out_free;
1032 return 0;
1033
1034 out_free:
1035 kfree(*nhe);
1036 return err;
1037 }
1038
1039 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
1040 struct mlx5e_neigh_hash_entry *nhe)
1041 {
1042 /* The neigh hash entry must be removed from the hash table regardless
1043 * of the reference count value, so it won't be found by the next
1044 * neigh notification call. The neigh hash entry reference count is
1045 * incremented only during creation and neigh notification calls and
1046 * protects from freeing the nhe struct.
1047 */
1048 mlx5e_rep_neigh_entry_remove(priv, nhe);
1049 mlx5e_rep_neigh_entry_release(nhe);
1050 }
1051
1052 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1053 struct mlx5e_encap_entry *e)
1054 {
1055 struct mlx5e_neigh_hash_entry *nhe;
1056 int err;
1057
1058 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1059 if (!nhe) {
1060 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1061 if (err)
1062 return err;
1063 }
1064 list_add(&e->encap_list, &nhe->encap_list);
1065 return 0;
1066 }
1067
1068 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1069 struct mlx5e_encap_entry *e)
1070 {
1071 struct mlx5e_neigh_hash_entry *nhe;
1072
1073 list_del(&e->encap_list);
1074 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1075
1076 if (list_empty(&nhe->encap_list))
1077 mlx5e_rep_neigh_entry_destroy(priv, nhe);
1078 }
1079
1080 static int mlx5e_vf_rep_open(struct net_device *dev)
1081 {
1082 struct mlx5e_priv *priv = netdev_priv(dev);
1083 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1084 struct mlx5_eswitch_rep *rep = rpriv->rep;
1085 int err;
1086
1087 mutex_lock(&priv->state_lock);
1088 err = mlx5e_open_locked(dev);
1089 if (err)
1090 goto unlock;
1091
1092 if (!mlx5_modify_vport_admin_state(priv->mdev,
1093 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1094 rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
1095 netif_carrier_on(dev);
1096
1097 unlock:
1098 mutex_unlock(&priv->state_lock);
1099 return err;
1100 }
1101
1102 static int mlx5e_vf_rep_close(struct net_device *dev)
1103 {
1104 struct mlx5e_priv *priv = netdev_priv(dev);
1105 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1106 struct mlx5_eswitch_rep *rep = rpriv->rep;
1107 int ret;
1108
1109 mutex_lock(&priv->state_lock);
1110 mlx5_modify_vport_admin_state(priv->mdev,
1111 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1112 rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
1113 ret = mlx5e_close_locked(dev);
1114 mutex_unlock(&priv->state_lock);
1115 return ret;
1116 }
1117
1118 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1119 char *buf, size_t len)
1120 {
1121 struct mlx5e_priv *priv = netdev_priv(dev);
1122 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1123 struct mlx5_eswitch_rep *rep = rpriv->rep;
1124 int ret;
1125
1126 ret = snprintf(buf, len, "%d", rep->vport - 1);
1127 if (ret >= len)
1128 return -EOPNOTSUPP;
1129
1130 return 0;
1131 }
1132
1133 static int
1134 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1135 struct tc_cls_flower_offload *cls_flower, int flags)
1136 {
1137 switch (cls_flower->command) {
1138 case TC_CLSFLOWER_REPLACE:
1139 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1140 flags);
1141 case TC_CLSFLOWER_DESTROY:
1142 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1143 flags);
1144 case TC_CLSFLOWER_STATS:
1145 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1146 flags);
1147 default:
1148 return -EOPNOTSUPP;
1149 }
1150 }
1151
1152 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1153 void *cb_priv)
1154 {
1155 struct mlx5e_priv *priv = cb_priv;
1156
1157 switch (type) {
1158 case TC_SETUP_CLSFLOWER:
1159 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
1160 MLX5E_TC_ESW_OFFLOAD);
1161 default:
1162 return -EOPNOTSUPP;
1163 }
1164 }
1165
1166 static int mlx5e_rep_setup_tc_block(struct net_device *dev,
1167 struct tc_block_offload *f)
1168 {
1169 struct mlx5e_priv *priv = netdev_priv(dev);
1170
1171 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1172 return -EOPNOTSUPP;
1173
1174 switch (f->command) {
1175 case TC_BLOCK_BIND:
1176 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
1177 priv, priv, f->extack);
1178 case TC_BLOCK_UNBIND:
1179 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
1180 return 0;
1181 default:
1182 return -EOPNOTSUPP;
1183 }
1184 }
1185
1186 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1187 void *type_data)
1188 {
1189 switch (type) {
1190 case TC_SETUP_BLOCK:
1191 return mlx5e_rep_setup_tc_block(dev, type_data);
1192 default:
1193 return -EOPNOTSUPP;
1194 }
1195 }
1196
1197 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1198 {
1199 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1200 struct mlx5_eswitch_rep *rep;
1201
1202 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1203 return false;
1204
1205 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1206 return false;
1207
1208 rep = rpriv->rep;
1209 return (rep->vport == FDB_UPLINK_VPORT);
1210 }
1211
1212 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1213 {
1214 switch (attr_id) {
1215 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1216 return true;
1217 }
1218
1219 return false;
1220 }
1221
1222 static int
1223 mlx5e_get_sw_stats64(const struct net_device *dev,
1224 struct rtnl_link_stats64 *stats)
1225 {
1226 struct mlx5e_priv *priv = netdev_priv(dev);
1227 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
1228
1229 mlx5e_rep_update_sw_counters(priv);
1230
1231 stats->rx_packets = sstats->rx_packets;
1232 stats->rx_bytes = sstats->rx_bytes;
1233 stats->tx_packets = sstats->tx_packets;
1234 stats->tx_bytes = sstats->tx_bytes;
1235
1236 stats->tx_dropped = sstats->tx_queue_dropped;
1237
1238 return 0;
1239 }
1240
1241 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1242 void *sp)
1243 {
1244 switch (attr_id) {
1245 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1246 return mlx5e_get_sw_stats64(dev, sp);
1247 }
1248
1249 return -EINVAL;
1250 }
1251
1252 static void
1253 mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1254 {
1255 struct mlx5e_priv *priv = netdev_priv(dev);
1256
1257 /* update HW stats in background for next time */
1258 mlx5e_queue_update_stats(priv);
1259 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1260 }
1261
1262 static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
1263 {
1264 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1265 }
1266
1267 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1268 {
1269 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
1270 }
1271
1272 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1273 {
1274 struct sockaddr *saddr = addr;
1275
1276 if (!is_valid_ether_addr(saddr->sa_data))
1277 return -EADDRNOTAVAIL;
1278
1279 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1280 return 0;
1281 }
1282
1283 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
1284 .switchdev_port_attr_get = mlx5e_attr_get,
1285 };
1286
1287 static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
1288 .ndo_open = mlx5e_vf_rep_open,
1289 .ndo_stop = mlx5e_vf_rep_close,
1290 .ndo_start_xmit = mlx5e_xmit,
1291 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1292 .ndo_setup_tc = mlx5e_rep_setup_tc,
1293 .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
1294 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1295 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1296 .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
1297 };
1298
1299 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1300 .ndo_open = mlx5e_open,
1301 .ndo_stop = mlx5e_close,
1302 .ndo_start_xmit = mlx5e_xmit,
1303 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
1304 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1305 .ndo_setup_tc = mlx5e_rep_setup_tc,
1306 .ndo_get_stats64 = mlx5e_get_stats,
1307 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1308 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1309 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
1310 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1311 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1312 .ndo_features_check = mlx5e_features_check,
1313 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1314 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1315 .ndo_get_vf_config = mlx5e_get_vf_config,
1316 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1317 };
1318
1319 static void mlx5e_build_rep_params(struct net_device *netdev)
1320 {
1321 struct mlx5e_priv *priv = netdev_priv(netdev);
1322 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1323 struct mlx5_eswitch_rep *rep = rpriv->rep;
1324 struct mlx5_core_dev *mdev = priv->mdev;
1325 struct mlx5e_params *params;
1326
1327 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1328 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1329 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1330
1331 params = &priv->channels.params;
1332 params->hard_mtu = MLX5E_ETH_HARD_MTU;
1333 params->sw_mtu = netdev->mtu;
1334
1335 /* SQ */
1336 if (rep->vport == FDB_UPLINK_VPORT)
1337 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1338 else
1339 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
1340
1341 /* RQ */
1342 mlx5e_build_rq_params(mdev, params);
1343
1344 /* CQ moderation params */
1345 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1346 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1347
1348 params->num_tc = 1;
1349
1350 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
1351
1352 /* RSS */
1353 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1354 }
1355
1356 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1357 {
1358 struct mlx5e_priv *priv = netdev_priv(netdev);
1359 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1360 struct mlx5_eswitch_rep *rep = rpriv->rep;
1361 struct mlx5_core_dev *mdev = priv->mdev;
1362
1363 if (rep->vport == FDB_UPLINK_VPORT) {
1364 SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
1365 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1366 /* we want a persistent mac for the uplink rep */
1367 mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
1368 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1369 #ifdef CONFIG_MLX5_CORE_EN_DCB
1370 if (MLX5_CAP_GEN(mdev, qos))
1371 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1372 #endif
1373 } else {
1374 netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
1375 eth_hw_addr_random(netdev);
1376 netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
1377 }
1378
1379 netdev->watchdog_timeo = 15 * HZ;
1380
1381
1382 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
1383
1384 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
1385 netdev->hw_features |= NETIF_F_HW_TC;
1386
1387 netdev->hw_features |= NETIF_F_SG;
1388 netdev->hw_features |= NETIF_F_IP_CSUM;
1389 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1390 netdev->hw_features |= NETIF_F_GRO;
1391 netdev->hw_features |= NETIF_F_TSO;
1392 netdev->hw_features |= NETIF_F_TSO6;
1393 netdev->hw_features |= NETIF_F_RXCSUM;
1394
1395 netdev->features |= netdev->hw_features;
1396 }
1397
1398 static int mlx5e_rep_get_default_num_channels(struct mlx5_eswitch_rep *rep,
1399 struct net_device *netdev)
1400 {
1401 if (rep->vport == FDB_UPLINK_VPORT)
1402 return mlx5e_get_netdev_max_channels(netdev);
1403 else
1404 return 1;
1405 }
1406
1407 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1408 struct net_device *netdev,
1409 const struct mlx5e_profile *profile,
1410 void *ppriv)
1411 {
1412 struct mlx5e_priv *priv = netdev_priv(netdev);
1413 struct mlx5e_rep_priv *rpriv = ppriv;
1414 int err;
1415
1416 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1417 if (err)
1418 return err;
1419
1420 priv->channels.params.num_channels =
1421 mlx5e_rep_get_default_num_channels(rpriv->rep, netdev);
1422
1423 mlx5e_build_rep_params(netdev);
1424 mlx5e_build_rep_netdev(netdev);
1425
1426 mlx5e_timestamp_init(priv);
1427
1428 return 0;
1429 }
1430
1431 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1432 {
1433 mlx5e_netdev_cleanup(priv->netdev, priv);
1434 }
1435
1436 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1437 {
1438 struct ttc_params ttc_params = {};
1439 int tt, err;
1440
1441 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1442 MLX5_FLOW_NAMESPACE_KERNEL);
1443
1444 /* The inner_ttc in the ttc params is intentionally not set */
1445 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1446 mlx5e_set_ttc_ft_params(&ttc_params);
1447 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1448 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1449
1450 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1451 if (err) {
1452 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1453 return err;
1454 }
1455 return 0;
1456 }
1457
1458 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1459 {
1460 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1461 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1462 struct mlx5_eswitch_rep *rep = rpriv->rep;
1463 struct mlx5_flow_handle *flow_rule;
1464 struct mlx5_flow_destination dest;
1465
1466 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1467 dest.tir_num = priv->direct_tir[0].tirn;
1468 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1469 rep->vport,
1470 &dest);
1471 if (IS_ERR(flow_rule))
1472 return PTR_ERR(flow_rule);
1473 rpriv->vport_rx_rule = flow_rule;
1474 return 0;
1475 }
1476
1477 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1478 {
1479 struct mlx5_core_dev *mdev = priv->mdev;
1480 int err;
1481
1482 mlx5e_init_l2_addr(priv);
1483
1484 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1485 if (err) {
1486 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1487 return err;
1488 }
1489
1490 err = mlx5e_create_indirect_rqt(priv);
1491 if (err)
1492 goto err_close_drop_rq;
1493
1494 err = mlx5e_create_direct_rqts(priv);
1495 if (err)
1496 goto err_destroy_indirect_rqts;
1497
1498 err = mlx5e_create_indirect_tirs(priv, false);
1499 if (err)
1500 goto err_destroy_direct_rqts;
1501
1502 err = mlx5e_create_direct_tirs(priv);
1503 if (err)
1504 goto err_destroy_indirect_tirs;
1505
1506 err = mlx5e_create_rep_ttc_table(priv);
1507 if (err)
1508 goto err_destroy_direct_tirs;
1509
1510 err = mlx5e_create_rep_vport_rx_rule(priv);
1511 if (err)
1512 goto err_destroy_ttc_table;
1513
1514 return 0;
1515
1516 err_destroy_ttc_table:
1517 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1518 err_destroy_direct_tirs:
1519 mlx5e_destroy_direct_tirs(priv);
1520 err_destroy_indirect_tirs:
1521 mlx5e_destroy_indirect_tirs(priv, false);
1522 err_destroy_direct_rqts:
1523 mlx5e_destroy_direct_rqts(priv);
1524 err_destroy_indirect_rqts:
1525 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1526 err_close_drop_rq:
1527 mlx5e_close_drop_rq(&priv->drop_rq);
1528 return err;
1529 }
1530
1531 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1532 {
1533 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1534
1535 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1536 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1537 mlx5e_destroy_direct_tirs(priv);
1538 mlx5e_destroy_indirect_tirs(priv, false);
1539 mlx5e_destroy_direct_rqts(priv);
1540 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1541 mlx5e_close_drop_rq(&priv->drop_rq);
1542 }
1543
1544 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1545 {
1546 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1547 struct mlx5_rep_uplink_priv *uplink_priv;
1548 int tc, err;
1549
1550 err = mlx5e_create_tises(priv);
1551 if (err) {
1552 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1553 return err;
1554 }
1555
1556 if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
1557 uplink_priv = &rpriv->uplink_priv;
1558
1559 /* init shared tc flow table */
1560 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1561 if (err)
1562 goto destroy_tises;
1563
1564 /* init indirect block notifications */
1565 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1566 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1567 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1568 if (err) {
1569 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1570 goto tc_esw_cleanup;
1571 }
1572 }
1573
1574 return 0;
1575
1576 tc_esw_cleanup:
1577 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1578 destroy_tises:
1579 for (tc = 0; tc < priv->profile->max_tc; tc++)
1580 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1581 return err;
1582 }
1583
1584 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1585 {
1586 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1587 int tc;
1588
1589 for (tc = 0; tc < priv->profile->max_tc; tc++)
1590 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1591
1592 if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
1593 /* clean indirect TC block notifications */
1594 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1595 mlx5e_rep_indr_clean_block_privs(rpriv);
1596
1597 /* delete shared tc flow table */
1598 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1599 }
1600 }
1601
1602 static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
1603 {
1604 struct net_device *netdev = priv->netdev;
1605 struct mlx5_core_dev *mdev = priv->mdev;
1606 u16 max_mtu;
1607
1608 netdev->min_mtu = ETH_MIN_MTU;
1609 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1610 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1611 }
1612
1613 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1614 {
1615 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1616 struct mlx5_eqe *eqe = data;
1617
1618 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
1619 return NOTIFY_DONE;
1620
1621 switch (eqe->sub_type) {
1622 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1623 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1624 queue_work(priv->wq, &priv->update_carrier_work);
1625 break;
1626 default:
1627 return NOTIFY_DONE;
1628 }
1629
1630 return NOTIFY_OK;
1631 }
1632
1633 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1634 {
1635 struct net_device *netdev = priv->netdev;
1636 struct mlx5_core_dev *mdev = priv->mdev;
1637 u16 max_mtu;
1638
1639 netdev->min_mtu = ETH_MIN_MTU;
1640 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1641 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1642 mlx5e_set_dev_port_mtu(priv);
1643
1644 mlx5_lag_add(mdev, netdev);
1645 priv->events_nb.notifier_call = uplink_rep_async_event;
1646 mlx5_notifier_register(mdev, &priv->events_nb);
1647 #ifdef CONFIG_MLX5_CORE_EN_DCB
1648 mlx5e_dcbnl_initialize(priv);
1649 mlx5e_dcbnl_init_app(priv);
1650 #endif
1651 }
1652
1653 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1654 {
1655 struct mlx5_core_dev *mdev = priv->mdev;
1656
1657 #ifdef CONFIG_MLX5_CORE_EN_DCB
1658 mlx5e_dcbnl_delete_app(priv);
1659 #endif
1660 mlx5_notifier_unregister(mdev, &priv->events_nb);
1661 mlx5_lag_remove(mdev);
1662 }
1663
1664 static const struct mlx5e_profile mlx5e_vf_rep_profile = {
1665 .init = mlx5e_init_rep,
1666 .cleanup = mlx5e_cleanup_rep,
1667 .init_rx = mlx5e_init_rep_rx,
1668 .cleanup_rx = mlx5e_cleanup_rep_rx,
1669 .init_tx = mlx5e_init_rep_tx,
1670 .cleanup_tx = mlx5e_cleanup_rep_tx,
1671 .enable = mlx5e_vf_rep_enable,
1672 .update_stats = mlx5e_vf_rep_update_hw_counters,
1673 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1674 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1675 .max_tc = 1,
1676 };
1677
1678 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1679 .init = mlx5e_init_rep,
1680 .cleanup = mlx5e_cleanup_rep,
1681 .init_rx = mlx5e_init_rep_rx,
1682 .cleanup_rx = mlx5e_cleanup_rep_rx,
1683 .init_tx = mlx5e_init_rep_tx,
1684 .cleanup_tx = mlx5e_cleanup_rep_tx,
1685 .enable = mlx5e_uplink_rep_enable,
1686 .disable = mlx5e_uplink_rep_disable,
1687 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1688 .update_carrier = mlx5e_update_carrier,
1689 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1690 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1691 .max_tc = MLX5E_MAX_NUM_TC,
1692 };
1693
1694 /* e-Switch vport representors */
1695 static int
1696 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1697 {
1698 const struct mlx5e_profile *profile;
1699 struct mlx5e_rep_priv *rpriv;
1700 struct net_device *netdev;
1701 int nch, err;
1702
1703 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1704 if (!rpriv)
1705 return -ENOMEM;
1706
1707 /* rpriv->rep to be looked up when profile->init() is called */
1708 rpriv->rep = rep;
1709
1710 nch = mlx5e_get_max_num_channels(dev);
1711 profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
1712 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
1713 if (!netdev) {
1714 pr_warn("Failed to create representor netdev for vport %d\n",
1715 rep->vport);
1716 kfree(rpriv);
1717 return -EINVAL;
1718 }
1719
1720 rpriv->netdev = netdev;
1721 rep->rep_if[REP_ETH].priv = rpriv;
1722 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1723
1724 if (rep->vport == FDB_UPLINK_VPORT) {
1725 err = mlx5e_create_mdev_resources(dev);
1726 if (err)
1727 goto err_destroy_netdev;
1728 }
1729
1730 err = mlx5e_attach_netdev(netdev_priv(netdev));
1731 if (err) {
1732 pr_warn("Failed to attach representor netdev for vport %d\n",
1733 rep->vport);
1734 goto err_destroy_mdev_resources;
1735 }
1736
1737 err = mlx5e_rep_neigh_init(rpriv);
1738 if (err) {
1739 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1740 rep->vport);
1741 goto err_detach_netdev;
1742 }
1743
1744 err = register_netdev(netdev);
1745 if (err) {
1746 pr_warn("Failed to register representor netdev for vport %d\n",
1747 rep->vport);
1748 goto err_neigh_cleanup;
1749 }
1750
1751 return 0;
1752
1753 err_neigh_cleanup:
1754 mlx5e_rep_neigh_cleanup(rpriv);
1755
1756 err_detach_netdev:
1757 mlx5e_detach_netdev(netdev_priv(netdev));
1758
1759 err_destroy_mdev_resources:
1760 if (rep->vport == FDB_UPLINK_VPORT)
1761 mlx5e_destroy_mdev_resources(dev);
1762
1763 err_destroy_netdev:
1764 mlx5e_destroy_netdev(netdev_priv(netdev));
1765 kfree(rpriv);
1766 return err;
1767 }
1768
1769 static void
1770 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1771 {
1772 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1773 struct net_device *netdev = rpriv->netdev;
1774 struct mlx5e_priv *priv = netdev_priv(netdev);
1775 void *ppriv = priv->ppriv;
1776
1777 unregister_netdev(netdev);
1778 mlx5e_rep_neigh_cleanup(rpriv);
1779 mlx5e_detach_netdev(priv);
1780 if (rep->vport == FDB_UPLINK_VPORT)
1781 mlx5e_destroy_mdev_resources(priv->mdev);
1782 mlx5e_destroy_netdev(priv);
1783 kfree(ppriv); /* mlx5e_rep_priv */
1784 }
1785
1786 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1787 {
1788 struct mlx5e_rep_priv *rpriv;
1789
1790 rpriv = mlx5e_rep_to_rep_priv(rep);
1791
1792 return rpriv->netdev;
1793 }
1794
1795 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1796 {
1797 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1798 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1799 int vport;
1800
1801 for (vport = 0; vport < total_vfs; vport++) {
1802 struct mlx5_eswitch_rep_if rep_if = {};
1803
1804 rep_if.load = mlx5e_vport_rep_load;
1805 rep_if.unload = mlx5e_vport_rep_unload;
1806 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1807 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1808 }
1809 }
1810
1811 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1812 {
1813 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1814 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1815 int vport;
1816
1817 for (vport = total_vfs - 1; vport >= 0; vport--)
1818 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1819 }