]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
net/mlx5: E-Switch, Create a dedicated send to vport rule deletion function
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
d957b4e3 36#include <net/pkt_cls.h>
717503b9 37#include <net/act_api.h>
232c0013
HHZ
38#include <net/netevent.h>
39#include <net/arp.h>
cb67b832
HHZ
40
41#include "eswitch.h"
42#include "en.h"
1d447a39 43#include "en_rep.h"
adb4c123 44#include "en_tc.h"
f6dfb4c3 45#include "fs_core.h"
cb67b832
HHZ
46
47static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
48
49static void mlx5e_rep_get_drvinfo(struct net_device *dev,
50 struct ethtool_drvinfo *drvinfo)
51{
52 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
53 sizeof(drvinfo->driver));
54 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
55}
56
57static const struct counter_desc sw_rep_stats_desc[] = {
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
62};
63
64#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
65
66static void mlx5e_rep_get_strings(struct net_device *dev,
67 u32 stringset, uint8_t *data)
68{
69 int i;
70
71 switch (stringset) {
72 case ETH_SS_STATS:
73 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
74 strcpy(data + (i * ETH_GSTRING_LEN),
75 sw_rep_stats_desc[i].format);
76 break;
77 }
78}
79
370bad0f
OG
80static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
81{
82 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
83 struct mlx5e_rep_priv *rpriv = priv->ppriv;
84 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
85 struct rtnl_link_stats64 *vport_stats;
86 struct ifla_vf_stats vf_stats;
87 int err;
88
89 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
90 if (err) {
91 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
92 return;
93 }
94
95 vport_stats = &priv->stats.vf_vport;
96 /* flip tx/rx as we are reporting the counters for the switch vport */
97 vport_stats->rx_packets = vf_stats.tx_packets;
98 vport_stats->rx_bytes = vf_stats.tx_bytes;
99 vport_stats->tx_packets = vf_stats.rx_packets;
100 vport_stats->tx_bytes = vf_stats.rx_bytes;
101}
102
103static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
cb67b832
HHZ
104{
105 struct mlx5e_sw_stats *s = &priv->stats.sw;
106 struct mlx5e_rq_stats *rq_stats;
107 struct mlx5e_sq_stats *sq_stats;
108 int i, j;
109
110 memset(s, 0, sizeof(*s));
ff9c852f
SM
111 for (i = 0; i < priv->channels.num; i++) {
112 struct mlx5e_channel *c = priv->channels.c[i];
113
114 rq_stats = &c->rq.stats;
cb67b832
HHZ
115
116 s->rx_packets += rq_stats->packets;
117 s->rx_bytes += rq_stats->bytes;
118
6a9764ef 119 for (j = 0; j < priv->channels.params.num_tc; j++) {
ff9c852f 120 sq_stats = &c->sq[j].stats;
cb67b832
HHZ
121
122 s->tx_packets += sq_stats->packets;
123 s->tx_bytes += sq_stats->bytes;
124 }
125 }
126}
127
370bad0f
OG
128static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
129{
130 mlx5e_rep_update_sw_counters(priv);
131 mlx5e_rep_update_hw_counters(priv);
132}
133
cb67b832
HHZ
134static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
135 struct ethtool_stats *stats, u64 *data)
136{
137 struct mlx5e_priv *priv = netdev_priv(dev);
138 int i;
139
140 if (!data)
141 return;
142
143 mutex_lock(&priv->state_lock);
144 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
370bad0f 145 mlx5e_rep_update_sw_counters(priv);
cb67b832
HHZ
146 mutex_unlock(&priv->state_lock);
147
148 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
149 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
150 sw_rep_stats_desc, i);
151}
152
153static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
154{
155 switch (sset) {
156 case ETH_SS_STATS:
157 return NUM_VPORT_REP_COUNTERS;
158 default:
159 return -EOPNOTSUPP;
160 }
161}
162
163static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
164 .get_drvinfo = mlx5e_rep_get_drvinfo,
165 .get_link = ethtool_op_get_link,
166 .get_strings = mlx5e_rep_get_strings,
167 .get_sset_count = mlx5e_rep_get_sset_count,
168 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
169};
170
171int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
172{
173 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
174 struct mlx5e_rep_priv *rpriv = priv->ppriv;
175 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 176 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
cb67b832
HHZ
177
178 if (esw->mode == SRIOV_NONE)
179 return -EOPNOTSUPP;
180
181 switch (attr->id) {
182 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
cb67b832 183 attr->u.ppid.id_len = ETH_ALEN;
dbe413e3 184 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
cb67b832
HHZ
185 break;
186 default:
187 return -EOPNOTSUPP;
188 }
189
190 return 0;
191}
192
f7a68945
MB
193static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
194 struct mlx5_eswitch_rep *rep)
195{
196 struct mlx5_esw_sq *esw_sq, *tmp;
197
198 if (esw->mode != SRIOV_OFFLOADS)
199 return;
200
201 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
159fe639 202 mlx5_eswitch_del_send_to_vport_rule(esw_sq->send_to_vport_rule);
f7a68945
MB
203 list_del(&esw_sq->list);
204 kfree(esw_sq);
205 }
206}
207
208static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
209 struct mlx5_eswitch_rep *rep,
210 u16 *sqns_array, int sqns_num)
211{
212 struct mlx5_flow_handle *flow_rule;
213 struct mlx5_esw_sq *esw_sq;
214 int err;
215 int i;
216
217 if (esw->mode != SRIOV_OFFLOADS)
218 return 0;
219
220 for (i = 0; i < sqns_num; i++) {
221 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
222 if (!esw_sq) {
223 err = -ENOMEM;
224 goto out_err;
225 }
226
227 /* Add re-inject rule to the PF/representor sqs */
228 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
229 rep->vport,
230 sqns_array[i]);
231 if (IS_ERR(flow_rule)) {
232 err = PTR_ERR(flow_rule);
233 kfree(esw_sq);
234 goto out_err;
235 }
236 esw_sq->send_to_vport_rule = flow_rule;
237 list_add(&esw_sq->list, &rep->vport_sqs_list);
238 }
239 return 0;
240
241out_err:
242 mlx5e_sqs2vport_stop(esw, rep);
243 return err;
244}
245
cb67b832 246int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
247{
248 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
249 struct mlx5e_rep_priv *rpriv = priv->ppriv;
250 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 251 struct mlx5e_channel *c;
9008ae07
SM
252 int n, tc, num_sqs = 0;
253 int err = -ENOMEM;
cb67b832
HHZ
254 u16 *sqs;
255
6a9764ef 256 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
cb67b832 257 if (!sqs)
9008ae07 258 goto out;
cb67b832 259
ff9c852f
SM
260 for (n = 0; n < priv->channels.num; n++) {
261 c = priv->channels.c[n];
cb67b832
HHZ
262 for (tc = 0; tc < c->num_tc; tc++)
263 sqs[num_sqs++] = c->sq[tc].sqn;
264 }
265
f7a68945 266 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 267 kfree(sqs);
9008ae07
SM
268
269out:
270 if (err)
271 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
272 return err;
273}
274
cb67b832
HHZ
275void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
276{
277 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
278 struct mlx5e_rep_priv *rpriv = priv->ppriv;
279 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 280
f7a68945 281 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
282}
283
f6dfb4c3
HHZ
284static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
285{
286#if IS_ENABLED(CONFIG_IPV6)
287 unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
288 DELAY_PROBE_TIME);
289#else
290 unsigned long ipv6_interval = ~0UL;
291#endif
292 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
293 DELAY_PROBE_TIME);
294 struct net_device *netdev = rpriv->rep->netdev;
295 struct mlx5e_priv *priv = netdev_priv(netdev);
296
297 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
298 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
299}
300
301void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
302{
303 struct mlx5e_rep_priv *rpriv = priv->ppriv;
304 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
305
306 mlx5_fc_queue_stats_work(priv->mdev,
307 &neigh_update->neigh_stats_work,
308 neigh_update->min_interval);
309}
310
311static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
312{
313 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
314 neigh_update.neigh_stats_work.work);
315 struct net_device *netdev = rpriv->rep->netdev;
316 struct mlx5e_priv *priv = netdev_priv(netdev);
317 struct mlx5e_neigh_hash_entry *nhe;
318
319 rtnl_lock();
320 if (!list_empty(&rpriv->neigh_update.neigh_list))
321 mlx5e_rep_queue_neigh_stats_work(priv);
322
323 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
324 mlx5e_tc_update_neigh_used_value(nhe);
325
326 rtnl_unlock();
327}
328
232c0013
HHZ
329static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
330{
331 refcount_inc(&nhe->refcnt);
332}
333
334static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
335{
336 if (refcount_dec_and_test(&nhe->refcnt))
337 kfree(nhe);
338}
339
340static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
341 struct mlx5e_encap_entry *e,
342 bool neigh_connected,
343 unsigned char ha[ETH_ALEN])
344{
345 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
346
347 ASSERT_RTNL();
348
349 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
350 !ether_addr_equal(e->h_dest, ha))
351 mlx5e_tc_encap_flows_del(priv, e);
352
353 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
354 ether_addr_copy(e->h_dest, ha);
355 ether_addr_copy(eth->h_dest, ha);
356
357 mlx5e_tc_encap_flows_add(priv, e);
358 }
359}
360
361static void mlx5e_rep_neigh_update(struct work_struct *work)
362{
363 struct mlx5e_neigh_hash_entry *nhe =
364 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
365 struct neighbour *n = nhe->n;
366 struct mlx5e_encap_entry *e;
367 unsigned char ha[ETH_ALEN];
368 struct mlx5e_priv *priv;
369 bool neigh_connected;
370 bool encap_connected;
371 u8 nud_state, dead;
372
373 rtnl_lock();
374
375 /* If these parameters are changed after we release the lock,
376 * we'll receive another event letting us know about it.
377 * We use this lock to avoid inconsistency between the neigh validity
378 * and it's hw address.
379 */
380 read_lock_bh(&n->lock);
381 memcpy(ha, n->ha, ETH_ALEN);
382 nud_state = n->nud_state;
383 dead = n->dead;
384 read_unlock_bh(&n->lock);
385
386 neigh_connected = (nud_state & NUD_VALID) && !dead;
387
388 list_for_each_entry(e, &nhe->encap_list, encap_list) {
389 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
390 priv = netdev_priv(e->out_dev);
391
392 if (encap_connected != neigh_connected ||
393 !ether_addr_equal(e->h_dest, ha))
394 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
395 }
396 mlx5e_rep_neigh_entry_release(nhe);
397 rtnl_unlock();
398 neigh_release(n);
399}
400
401static struct mlx5e_neigh_hash_entry *
402mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
403 struct mlx5e_neigh *m_neigh);
404
405static int mlx5e_rep_netevent_event(struct notifier_block *nb,
406 unsigned long event, void *ptr)
407{
408 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
409 neigh_update.netevent_nb);
410 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
411 struct net_device *netdev = rpriv->rep->netdev;
412 struct mlx5e_priv *priv = netdev_priv(netdev);
413 struct mlx5e_neigh_hash_entry *nhe = NULL;
414 struct mlx5e_neigh m_neigh = {};
a2fa1fe5 415 struct neigh_parms *p;
232c0013 416 struct neighbour *n;
a2fa1fe5 417 bool found = false;
232c0013
HHZ
418
419 switch (event) {
420 case NETEVENT_NEIGH_UPDATE:
421 n = ptr;
422#if IS_ENABLED(CONFIG_IPV6)
423 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
424#else
425 if (n->tbl != &arp_tbl)
426#endif
427 return NOTIFY_DONE;
428
429 m_neigh.dev = n->dev;
f6dfb4c3 430 m_neigh.family = n->ops->family;
232c0013
HHZ
431 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
432
433 /* We are in atomic context and can't take RTNL mutex, so use
434 * spin_lock_bh to lookup the neigh table. bh is used since
435 * netevent can be called from a softirq context.
436 */
437 spin_lock_bh(&neigh_update->encap_lock);
438 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
439 if (!nhe) {
440 spin_unlock_bh(&neigh_update->encap_lock);
441 return NOTIFY_DONE;
442 }
443
444 /* This assignment is valid as long as the the neigh reference
445 * is taken
446 */
447 nhe->n = n;
448
449 /* Take a reference to ensure the neighbour and mlx5 encap
450 * entry won't be destructed until we drop the reference in
451 * delayed work.
452 */
453 neigh_hold(n);
454 mlx5e_rep_neigh_entry_hold(nhe);
455
456 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
457 mlx5e_rep_neigh_entry_release(nhe);
458 neigh_release(n);
459 }
460 spin_unlock_bh(&neigh_update->encap_lock);
461 break;
a2fa1fe5
HHZ
462
463 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
464 p = ptr;
465
466 /* We check the device is present since we don't care about
467 * changes in the default table, we only care about changes
468 * done per device delay prob time parameter.
469 */
470#if IS_ENABLED(CONFIG_IPV6)
471 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
472#else
473 if (!p->dev || p->tbl != &arp_tbl)
474#endif
475 return NOTIFY_DONE;
476
477 /* We are in atomic context and can't take RTNL mutex,
478 * so use spin_lock_bh to walk the neigh list and look for
479 * the relevant device. bh is used since netevent can be
480 * called from a softirq context.
481 */
482 spin_lock_bh(&neigh_update->encap_lock);
483 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
484 if (p->dev == nhe->m_neigh.dev) {
485 found = true;
486 break;
487 }
488 }
489 spin_unlock_bh(&neigh_update->encap_lock);
490 if (!found)
491 return NOTIFY_DONE;
492
493 neigh_update->min_interval = min_t(unsigned long,
494 NEIGH_VAR(p, DELAY_PROBE_TIME),
495 neigh_update->min_interval);
496 mlx5_fc_update_sampling_interval(priv->mdev,
497 neigh_update->min_interval);
498 break;
232c0013
HHZ
499 }
500 return NOTIFY_DONE;
501}
502
37b498ff
HHZ
503static const struct rhashtable_params mlx5e_neigh_ht_params = {
504 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
505 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
506 .key_len = sizeof(struct mlx5e_neigh),
507 .automatic_shrinking = true,
508};
509
510static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
511{
512 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
513 int err;
514
515 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
516 if (err)
517 return err;
37b498ff
HHZ
518
519 INIT_LIST_HEAD(&neigh_update->neigh_list);
232c0013 520 spin_lock_init(&neigh_update->encap_lock);
f6dfb4c3
HHZ
521 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
522 mlx5e_rep_neigh_stats_work);
523 mlx5e_rep_neigh_update_init_interval(rpriv);
232c0013
HHZ
524
525 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
526 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
527 if (err)
528 goto out_err;
529 return 0;
530
531out_err:
532 rhashtable_destroy(&neigh_update->neigh_ht);
533 return err;
37b498ff
HHZ
534}
535
536static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
537{
538 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
539 struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
540
541 unregister_netevent_notifier(&neigh_update->netevent_nb);
542
543 flush_workqueue(priv->wq); /* flush neigh update works */
37b498ff 544
f6dfb4c3
HHZ
545 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
546
37b498ff
HHZ
547 rhashtable_destroy(&neigh_update->neigh_ht);
548}
549
550static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
551 struct mlx5e_neigh_hash_entry *nhe)
552{
553 struct mlx5e_rep_priv *rpriv = priv->ppriv;
554 int err;
555
556 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
557 &nhe->rhash_node,
558 mlx5e_neigh_ht_params);
559 if (err)
560 return err;
561
562 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
563
564 return err;
565}
566
567static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
568 struct mlx5e_neigh_hash_entry *nhe)
569{
570 struct mlx5e_rep_priv *rpriv = priv->ppriv;
571
232c0013
HHZ
572 spin_lock_bh(&rpriv->neigh_update.encap_lock);
573
37b498ff
HHZ
574 list_del(&nhe->neigh_list);
575
576 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
577 &nhe->rhash_node,
578 mlx5e_neigh_ht_params);
232c0013 579 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
37b498ff
HHZ
580}
581
232c0013
HHZ
582/* This function must only be called under RTNL lock or under the
583 * representor's encap_lock in case RTNL mutex can't be held.
584 */
37b498ff
HHZ
585static struct mlx5e_neigh_hash_entry *
586mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
587 struct mlx5e_neigh *m_neigh)
588{
589 struct mlx5e_rep_priv *rpriv = priv->ppriv;
590 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
591
592 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
593 mlx5e_neigh_ht_params);
594}
595
232c0013
HHZ
596static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
597 struct mlx5e_encap_entry *e,
598 struct mlx5e_neigh_hash_entry **nhe)
599{
600 int err;
601
602 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
603 if (!*nhe)
604 return -ENOMEM;
605
606 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
607 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
608 INIT_LIST_HEAD(&(*nhe)->encap_list);
609 refcount_set(&(*nhe)->refcnt, 1);
610
611 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
612 if (err)
613 goto out_free;
614 return 0;
615
616out_free:
617 kfree(*nhe);
618 return err;
619}
620
621static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
622 struct mlx5e_neigh_hash_entry *nhe)
623{
624 /* The neigh hash entry must be removed from the hash table regardless
625 * of the reference count value, so it won't be found by the next
626 * neigh notification call. The neigh hash entry reference count is
627 * incremented only during creation and neigh notification calls and
628 * protects from freeing the nhe struct.
629 */
630 mlx5e_rep_neigh_entry_remove(priv, nhe);
631 mlx5e_rep_neigh_entry_release(nhe);
632}
633
634int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
635 struct mlx5e_encap_entry *e)
636{
637 struct mlx5e_neigh_hash_entry *nhe;
638 int err;
639
640 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
641 if (!nhe) {
642 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
643 if (err)
644 return err;
645 }
646 list_add(&e->encap_list, &nhe->encap_list);
647 return 0;
648}
649
650void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
651 struct mlx5e_encap_entry *e)
652{
653 struct mlx5e_neigh_hash_entry *nhe;
654
655 list_del(&e->encap_list);
656 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
657
658 if (list_empty(&nhe->encap_list))
659 mlx5e_rep_neigh_entry_destroy(priv, nhe);
660}
661
20a1ea67
OG
662static int mlx5e_rep_open(struct net_device *dev)
663{
664 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
665 struct mlx5e_rep_priv *rpriv = priv->ppriv;
666 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
667 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
668 int err;
669
63bfd399
EBE
670 mutex_lock(&priv->state_lock);
671 err = mlx5e_open_locked(dev);
20a1ea67 672 if (err)
63bfd399 673 goto unlock;
20a1ea67 674
63bfd399
EBE
675 if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
676 MLX5_ESW_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
677 netif_carrier_on(dev);
678
63bfd399
EBE
679unlock:
680 mutex_unlock(&priv->state_lock);
681 return err;
20a1ea67
OG
682}
683
684static int mlx5e_rep_close(struct net_device *dev)
685{
686 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
687 struct mlx5e_rep_priv *rpriv = priv->ppriv;
688 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67 689 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
63bfd399 690 int ret;
20a1ea67 691
63bfd399 692 mutex_lock(&priv->state_lock);
20a1ea67 693 (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
694 ret = mlx5e_close_locked(dev);
695 mutex_unlock(&priv->state_lock);
696 return ret;
20a1ea67
OG
697}
698
cb67b832
HHZ
699static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
700 char *buf, size_t len)
701{
702 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
703 struct mlx5e_rep_priv *rpriv = priv->ppriv;
704 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832
HHZ
705 int ret;
706
707 ret = snprintf(buf, len, "%d", rep->vport - 1);
708 if (ret >= len)
709 return -EOPNOTSUPP;
710
711 return 0;
712}
713
de4784ca 714static int
855afa09 715mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
de4784ca 716 struct tc_cls_flower_offload *cls_flower)
d957b4e3 717{
855afa09 718 if (cls_flower->common.chain_index)
d957b4e3
OG
719 return -EOPNOTSUPP;
720
8c818c27
JP
721 switch (cls_flower->command) {
722 case TC_CLSFLOWER_REPLACE:
5fd9fc4e 723 return mlx5e_configure_flower(priv, cls_flower);
8c818c27
JP
724 case TC_CLSFLOWER_DESTROY:
725 return mlx5e_delete_flower(priv, cls_flower);
726 case TC_CLSFLOWER_STATS:
727 return mlx5e_stats_flower(priv, cls_flower);
728 default:
a5fcf8a6 729 return -EOPNOTSUPP;
8c818c27
JP
730 }
731}
a5fcf8a6 732
855afa09
JP
733static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
734 void *cb_priv)
735{
736 struct mlx5e_priv *priv = cb_priv;
737
44ae12a7
JP
738 if (!tc_can_offload(priv->netdev))
739 return -EOPNOTSUPP;
740
855afa09
JP
741 switch (type) {
742 case TC_SETUP_CLSFLOWER:
743 return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
744 default:
745 return -EOPNOTSUPP;
746 }
747}
748
749static int mlx5e_rep_setup_tc_block(struct net_device *dev,
750 struct tc_block_offload *f)
751{
752 struct mlx5e_priv *priv = netdev_priv(dev);
753
754 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
755 return -EOPNOTSUPP;
756
757 switch (f->command) {
758 case TC_BLOCK_BIND:
759 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
760 priv, priv);
761 case TC_BLOCK_UNBIND:
762 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
763 return 0;
764 default:
765 return -EOPNOTSUPP;
766 }
767}
768
8c818c27 769static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 770 void *type_data)
8c818c27 771{
2572ac53 772 switch (type) {
855afa09
JP
773 case TC_SETUP_BLOCK:
774 return mlx5e_rep_setup_tc_block(dev, type_data);
d957b4e3
OG
775 default:
776 return -EOPNOTSUPP;
777 }
778}
779
370bad0f
OG
780bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
781{
370bad0f 782 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
783 struct mlx5e_rep_priv *rpriv = priv->ppriv;
784 struct mlx5_eswitch_rep *rep;
785
786 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
787 return false;
370bad0f 788
1d447a39
SM
789 rep = rpriv->rep;
790 if (esw->mode == SRIOV_OFFLOADS &&
791 rep && rep->vport == FDB_UPLINK_VPORT)
370bad0f
OG
792 return true;
793
794 return false;
795}
796
20fd0c19 797static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
370bad0f 798{
1d447a39
SM
799 struct mlx5e_rep_priv *rpriv = priv->ppriv;
800 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
801
802 if (rep && rep->vport != FDB_UPLINK_VPORT)
803 return true;
804
805 return false;
806}
807
808bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
809{
810 struct mlx5e_priv *priv = netdev_priv(dev);
811
812 switch (attr_id) {
813 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
814 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
815 return true;
816 }
817
818 return false;
819}
820
821static int
822mlx5e_get_sw_stats64(const struct net_device *dev,
823 struct rtnl_link_stats64 *stats)
824{
825 struct mlx5e_priv *priv = netdev_priv(dev);
826 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
827
828 stats->rx_packets = sstats->rx_packets;
829 stats->rx_bytes = sstats->rx_bytes;
830 stats->tx_packets = sstats->tx_packets;
831 stats->tx_bytes = sstats->tx_bytes;
832
833 stats->tx_dropped = sstats->tx_queue_dropped;
834
835 return 0;
836}
837
838int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
839 void *sp)
840{
841 switch (attr_id) {
842 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
843 return mlx5e_get_sw_stats64(dev, sp);
844 }
845
846 return -EINVAL;
847}
848
bc1f4470 849static void
370bad0f
OG
850mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
851{
852 struct mlx5e_priv *priv = netdev_priv(dev);
853
854 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
855}
856
cb67b832
HHZ
857static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
858 .switchdev_port_attr_get = mlx5e_attr_get,
859};
860
861static const struct net_device_ops mlx5e_netdev_ops_rep = {
20a1ea67
OG
862 .ndo_open = mlx5e_rep_open,
863 .ndo_stop = mlx5e_rep_close,
cb67b832
HHZ
864 .ndo_start_xmit = mlx5e_xmit,
865 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
8c818c27 866 .ndo_setup_tc = mlx5e_rep_setup_tc,
370bad0f
OG
867 .ndo_get_stats64 = mlx5e_rep_get_stats,
868 .ndo_has_offload_stats = mlx5e_has_offload_stats,
869 .ndo_get_offload_stats = mlx5e_get_offload_stats,
cb67b832
HHZ
870};
871
6a9764ef
SM
872static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
873 struct mlx5e_params *params)
cb67b832 874{
cb67b832
HHZ
875 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
876 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
877 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
878
6a9764ef
SM
879 params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
880 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
881 params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
cb67b832 882
6a9764ef
SM
883 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
884 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 885
6a9764ef
SM
886 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
887 params->num_tc = 1;
888 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
5f195c2c
CM
889
890 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
cb67b832
HHZ
891}
892
893static void mlx5e_build_rep_netdev(struct net_device *netdev)
894{
895 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
896
897 netdev->watchdog_timeo = 15 * HZ;
898
899 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
900
901#ifdef CONFIG_NET_SWITCHDEV
902 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
903#endif
904
abd32772 905 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
d957b4e3 906 netdev->hw_features |= NETIF_F_HW_TC;
cb67b832
HHZ
907
908 eth_hw_addr_random(netdev);
909}
910
911static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
912 struct net_device *netdev,
913 const struct mlx5e_profile *profile,
914 void *ppriv)
915{
6a9764ef
SM
916 struct mlx5e_priv *priv = netdev_priv(netdev);
917
918 priv->mdev = mdev;
919 priv->netdev = netdev;
920 priv->profile = profile;
921 priv->ppriv = ppriv;
922
923 mutex_init(&priv->state_lock);
924
925 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
926
927 priv->channels.params.num_channels = profile->max_nch(mdev);
c139dbfd
ES
928
929 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
930
6a9764ef 931 mlx5e_build_rep_params(mdev, &priv->channels.params);
cb67b832
HHZ
932 mlx5e_build_rep_netdev(netdev);
933}
934
935static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
936{
937 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
938 struct mlx5e_rep_priv *rpriv = priv->ppriv;
939 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 940 struct mlx5_flow_handle *flow_rule;
cb67b832 941 int err;
cb67b832 942
2c3b5bee
SM
943 mlx5e_init_l2_addr(priv);
944
cb67b832 945 err = mlx5e_create_direct_rqts(priv);
8f493ffd 946 if (err)
cb67b832 947 return err;
cb67b832
HHZ
948
949 err = mlx5e_create_direct_tirs(priv);
8f493ffd 950 if (err)
cb67b832 951 goto err_destroy_direct_rqts;
cb67b832
HHZ
952
953 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
954 rep->vport,
955 priv->direct_tir[0].tirn);
956 if (IS_ERR(flow_rule)) {
957 err = PTR_ERR(flow_rule);
958 goto err_destroy_direct_tirs;
959 }
960 rep->vport_rx_rule = flow_rule;
961
d957b4e3
OG
962 err = mlx5e_tc_init(priv);
963 if (err)
964 goto err_del_flow_rule;
965
cb67b832
HHZ
966 return 0;
967
d957b4e3 968err_del_flow_rule:
74491de9 969 mlx5_del_flow_rules(rep->vport_rx_rule);
cb67b832
HHZ
970err_destroy_direct_tirs:
971 mlx5e_destroy_direct_tirs(priv);
972err_destroy_direct_rqts:
8f493ffd 973 mlx5e_destroy_direct_rqts(priv);
cb67b832
HHZ
974 return err;
975}
976
977static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
978{
1d447a39
SM
979 struct mlx5e_rep_priv *rpriv = priv->ppriv;
980 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 981
d957b4e3 982 mlx5e_tc_cleanup(priv);
74491de9 983 mlx5_del_flow_rules(rep->vport_rx_rule);
cb67b832 984 mlx5e_destroy_direct_tirs(priv);
8f493ffd 985 mlx5e_destroy_direct_rqts(priv);
cb67b832
HHZ
986}
987
988static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
989{
990 int err;
991
992 err = mlx5e_create_tises(priv);
993 if (err) {
994 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
995 return err;
996 }
997 return 0;
998}
999
1000static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
1001{
1002#define MLX5E_PORT_REPRESENTOR_NCH 1
1003 return MLX5E_PORT_REPRESENTOR_NCH;
1004}
1005
39a7e589 1006static const struct mlx5e_profile mlx5e_rep_profile = {
cb67b832
HHZ
1007 .init = mlx5e_init_rep,
1008 .init_rx = mlx5e_init_rep_rx,
1009 .cleanup_rx = mlx5e_cleanup_rep_rx,
1010 .init_tx = mlx5e_init_rep_tx,
1011 .cleanup_tx = mlx5e_cleanup_nic_tx,
370bad0f 1012 .update_stats = mlx5e_rep_update_stats,
cb67b832 1013 .max_nch = mlx5e_get_rep_max_num_channels,
7ca42c80 1014 .update_carrier = NULL,
20fd0c19
SM
1015 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1016 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
cb67b832
HHZ
1017 .max_tc = 1,
1018};
1019
1d447a39
SM
1020/* e-Switch vport representors */
1021
1022static int
4c66df01 1023mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1024{
37b498ff
HHZ
1025 struct mlx5e_priv *priv = netdev_priv(rep->netdev);
1026 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1027
1028 int err;
1029
1030 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1031 err = mlx5e_add_sqs_fwd_rules(priv);
1032 if (err)
1033 return err;
1034 }
1035
1036 err = mlx5e_rep_neigh_init(rpriv);
1037 if (err)
1038 goto err_remove_sqs;
1d447a39 1039
1d447a39 1040 return 0;
37b498ff
HHZ
1041
1042err_remove_sqs:
1043 mlx5e_remove_sqs_fwd_rules(priv);
1044 return err;
1d447a39
SM
1045}
1046
1047static void
4c66df01 1048mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1049{
37b498ff
HHZ
1050 struct mlx5e_priv *priv = netdev_priv(rep->netdev);
1051 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1d447a39
SM
1052
1053 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1054 mlx5e_remove_sqs_fwd_rules(priv);
1055
1056 /* clean (and re-init) existing uplink offloaded TC rules */
1057 mlx5e_tc_cleanup(priv);
1058 mlx5e_tc_init(priv);
37b498ff
HHZ
1059
1060 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39
SM
1061}
1062
1063static int
4c66df01 1064mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39
SM
1065{
1066 struct mlx5e_rep_priv *rpriv;
26e59d80 1067 struct net_device *netdev;
d6c862ba 1068 struct mlx5e_priv *upriv;
26e59d80
MHY
1069 int err;
1070
1d447a39
SM
1071 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1072 if (!rpriv)
1073 return -ENOMEM;
1074
4c66df01 1075 netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
26e59d80
MHY
1076 if (!netdev) {
1077 pr_warn("Failed to create representor netdev for vport %d\n",
1078 rep->vport);
1d447a39 1079 kfree(rpriv);
cb67b832
HHZ
1080 return -EINVAL;
1081 }
26e59d80 1082
726293f1 1083 rep->netdev = netdev;
1d447a39 1084 rpriv->rep = rep;
26e59d80 1085
2c3b5bee 1086 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80
MHY
1087 if (err) {
1088 pr_warn("Failed to attach representor netdev for vport %d\n",
1089 rep->vport);
1090 goto err_destroy_netdev;
1091 }
1092
37b498ff
HHZ
1093 err = mlx5e_rep_neigh_init(rpriv);
1094 if (err) {
1095 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1096 rep->vport);
1097 goto err_detach_netdev;
1098 }
1099
4c66df01 1100 upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(dev->priv.eswitch));
d6c862ba
JP
1101 err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
1102 upriv);
717503b9
JP
1103 if (err)
1104 goto err_neigh_cleanup;
1105
26e59d80
MHY
1106 err = register_netdev(netdev);
1107 if (err) {
1108 pr_warn("Failed to register representor netdev for vport %d\n",
1109 rep->vport);
717503b9 1110 goto err_egdev_cleanup;
26e59d80
MHY
1111 }
1112
cb67b832 1113 return 0;
26e59d80 1114
717503b9 1115err_egdev_cleanup:
d6c862ba
JP
1116 tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1117 upriv);
717503b9 1118
37b498ff
HHZ
1119err_neigh_cleanup:
1120 mlx5e_rep_neigh_cleanup(rpriv);
1121
26e59d80 1122err_detach_netdev:
2c3b5bee 1123 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80
MHY
1124
1125err_destroy_netdev:
2c3b5bee 1126 mlx5e_destroy_netdev(netdev_priv(netdev));
1d447a39 1127 kfree(rpriv);
26e59d80 1128 return err;
cb67b832
HHZ
1129}
1130
1d447a39 1131static void
4c66df01 1132mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1133{
726293f1 1134 struct net_device *netdev = rep->netdev;
1d447a39 1135 struct mlx5e_priv *priv = netdev_priv(netdev);
37b498ff 1136 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1d447a39 1137 void *ppriv = priv->ppriv;
d6c862ba 1138 struct mlx5e_priv *upriv;
cb67b832 1139
37b498ff 1140 unregister_netdev(rep->netdev);
4c66df01 1141 upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(priv->mdev->priv.eswitch));
d6c862ba
JP
1142 tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1143 upriv);
37b498ff 1144 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39
SM
1145 mlx5e_detach_netdev(priv);
1146 mlx5e_destroy_netdev(priv);
1147 kfree(ppriv); /* mlx5e_rep_priv */
1148}
1149
1150static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1151{
1152 struct mlx5_core_dev *mdev = priv->mdev;
1153 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1154 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1155 int vport;
1d447a39
SM
1156
1157 for (vport = 1; vport < total_vfs; vport++) {
1158 struct mlx5_eswitch_rep rep;
1159
1160 rep.load = mlx5e_vport_rep_load;
1161 rep.unload = mlx5e_vport_rep_unload;
1d447a39
SM
1162 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
1163 }
1164}
1165
1166static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1167{
1168 struct mlx5_core_dev *mdev = priv->mdev;
1169 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1170 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1171 int vport;
1172
1173 for (vport = 1; vport < total_vfs; vport++)
1174 mlx5_eswitch_unregister_vport_rep(esw, vport);
1175}
1176
1177void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1178{
1179 struct mlx5_core_dev *mdev = priv->mdev;
1180 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1181 struct mlx5_eswitch_rep rep;
1182
1d447a39
SM
1183 rep.load = mlx5e_nic_rep_load;
1184 rep.unload = mlx5e_nic_rep_unload;
1d447a39
SM
1185 rep.netdev = priv->netdev;
1186 mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
1187
1188 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1189}
1190
1191void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1192{
1193 struct mlx5_core_dev *mdev = priv->mdev;
1194 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1195
1196 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1197 mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
cb67b832 1198}
07c9f1e5
SM
1199
1200void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1201{
1202 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1203 struct mlx5e_rep_priv *rpriv;
1204
1205 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1206 if (!rpriv)
1207 return NULL;
1208
1209 rpriv->rep = &esw->offloads.vport_reps[0];
1210 return rpriv;
1211}