]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
Merge tag 'omap-for-v5.0/fixes-rc7-signed' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
d957b4e3 36#include <net/pkt_cls.h>
717503b9 37#include <net/act_api.h>
232c0013
HHZ
38#include <net/netevent.h>
39#include <net/arp.h>
cb67b832
HHZ
40
41#include "eswitch.h"
42#include "en.h"
1d447a39 43#include "en_rep.h"
adb4c123 44#include "en_tc.h"
101f4de9 45#include "en/tc_tun.h"
f6dfb4c3 46#include "fs_core.h"
cb67b832 47
4c8fb298 48#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
e7164313 49 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
8956f001 50#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
4246f698 51
cb67b832
HHZ
52static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
53
f5bc2c5d
OS
54struct mlx5e_rep_indr_block_priv {
55 struct net_device *netdev;
56 struct mlx5e_rep_priv *rpriv;
57
58 struct list_head list;
59};
60
25f2d0e7
EB
61static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
62 struct net_device *netdev);
f5bc2c5d 63
cb67b832
HHZ
64static void mlx5e_rep_get_drvinfo(struct net_device *dev,
65 struct ethtool_drvinfo *drvinfo)
66{
67 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
68 sizeof(drvinfo->driver));
69 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
70}
71
72static const struct counter_desc sw_rep_stats_desc[] = {
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
77};
78
a228060a
OG
79struct vport_stats {
80 u64 vport_rx_packets;
81 u64 vport_tx_packets;
82 u64 vport_rx_bytes;
83 u64 vport_tx_bytes;
84};
85
86static const struct counter_desc vport_rep_stats_desc[] = {
87 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
88 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
89 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
90 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
91};
92
93#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
94#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
cb67b832
HHZ
95
96static void mlx5e_rep_get_strings(struct net_device *dev,
97 u32 stringset, uint8_t *data)
98{
a228060a 99 int i, j;
cb67b832
HHZ
100
101 switch (stringset) {
102 case ETH_SS_STATS:
a228060a 103 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
104 strcpy(data + (i * ETH_GSTRING_LEN),
105 sw_rep_stats_desc[i].format);
a228060a
OG
106 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
107 strcpy(data + (i * ETH_GSTRING_LEN),
108 vport_rep_stats_desc[j].format);
cb67b832
HHZ
109 break;
110 }
111}
112
d9ee0491 113static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
370bad0f
OG
114{
115 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
116 struct mlx5e_rep_priv *rpriv = priv->ppriv;
117 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
118 struct rtnl_link_stats64 *vport_stats;
119 struct ifla_vf_stats vf_stats;
120 int err;
121
122 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
123 if (err) {
124 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
125 return;
126 }
127
128 vport_stats = &priv->stats.vf_vport;
129 /* flip tx/rx as we are reporting the counters for the switch vport */
130 vport_stats->rx_packets = vf_stats.tx_packets;
131 vport_stats->rx_bytes = vf_stats.tx_bytes;
132 vport_stats->tx_packets = vf_stats.rx_packets;
133 vport_stats->tx_bytes = vf_stats.rx_bytes;
134}
135
d9ee0491
OG
136static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
137{
138 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
139 struct rtnl_link_stats64 *vport_stats;
140
141 mlx5e_grp_802_3_update_stats(priv);
142
143 vport_stats = &priv->stats.vf_vport;
144
145 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
146 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
147 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
148 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
149}
150
151static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
152{
153 struct mlx5e_rep_priv *rpriv = priv->ppriv;
154 struct mlx5_eswitch_rep *rep = rpriv->rep;
155
156 if (rep->vport == FDB_UPLINK_VPORT)
157 mlx5e_uplink_rep_update_hw_counters(priv);
158 else
159 mlx5e_vf_rep_update_hw_counters(priv);
160}
161
370bad0f 162static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
cb67b832
HHZ
163{
164 struct mlx5e_sw_stats *s = &priv->stats.sw;
165 struct mlx5e_rq_stats *rq_stats;
166 struct mlx5e_sq_stats *sq_stats;
167 int i, j;
168
169 memset(s, 0, sizeof(*s));
ff9c852f
SM
170 for (i = 0; i < priv->channels.num; i++) {
171 struct mlx5e_channel *c = priv->channels.c[i];
172
05909bab 173 rq_stats = c->rq.stats;
cb67b832
HHZ
174
175 s->rx_packets += rq_stats->packets;
176 s->rx_bytes += rq_stats->bytes;
177
6a9764ef 178 for (j = 0; j < priv->channels.params.num_tc; j++) {
05909bab 179 sq_stats = c->sq[j].stats;
cb67b832
HHZ
180
181 s->tx_packets += sq_stats->packets;
182 s->tx_bytes += sq_stats->bytes;
7fdc1adc 183 s->tx_queue_dropped += sq_stats->dropped;
cb67b832
HHZ
184 }
185 }
370bad0f
OG
186}
187
cb67b832
HHZ
188static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
189 struct ethtool_stats *stats, u64 *data)
190{
191 struct mlx5e_priv *priv = netdev_priv(dev);
a228060a 192 int i, j;
cb67b832
HHZ
193
194 if (!data)
195 return;
196
197 mutex_lock(&priv->state_lock);
198 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
370bad0f 199 mlx5e_rep_update_sw_counters(priv);
a228060a 200 mlx5e_rep_update_hw_counters(priv);
cb67b832
HHZ
201 mutex_unlock(&priv->state_lock);
202
a228060a 203 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
204 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
205 sw_rep_stats_desc, i);
a228060a
OG
206
207 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
208 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
209 vport_rep_stats_desc, j);
cb67b832
HHZ
210}
211
212static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
213{
214 switch (sset) {
215 case ETH_SS_STATS:
a228060a 216 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
cb67b832
HHZ
217 default:
218 return -EOPNOTSUPP;
219 }
220}
221
f128f138
GT
222static void mlx5e_rep_get_ringparam(struct net_device *dev,
223 struct ethtool_ringparam *param)
224{
225 struct mlx5e_priv *priv = netdev_priv(dev);
226
227 mlx5e_ethtool_get_ringparam(priv, param);
228}
229
230static int mlx5e_rep_set_ringparam(struct net_device *dev,
231 struct ethtool_ringparam *param)
232{
233 struct mlx5e_priv *priv = netdev_priv(dev);
234
235 return mlx5e_ethtool_set_ringparam(priv, param);
236}
237
84a09733
GT
238static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
239 struct mlx5_flow_destination *dest)
240{
241 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
242 struct mlx5e_rep_priv *rpriv = priv->ppriv;
243 struct mlx5_eswitch_rep *rep = rpriv->rep;
244 struct mlx5_flow_handle *flow_rule;
245
246 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
247 rep->vport,
248 dest);
249 if (IS_ERR(flow_rule))
250 return PTR_ERR(flow_rule);
251
252 mlx5_del_flow_rules(rpriv->vport_rx_rule);
253 rpriv->vport_rx_rule = flow_rule;
254 return 0;
255}
256
257static void mlx5e_rep_get_channels(struct net_device *dev,
258 struct ethtool_channels *ch)
259{
260 struct mlx5e_priv *priv = netdev_priv(dev);
261
262 mlx5e_ethtool_get_channels(priv, ch);
263}
264
265static int mlx5e_rep_set_channels(struct net_device *dev,
266 struct ethtool_channels *ch)
267{
268 struct mlx5e_priv *priv = netdev_priv(dev);
269 u16 curr_channels_amount = priv->channels.params.num_channels;
270 u32 new_channels_amount = ch->combined_count;
271 struct mlx5_flow_destination new_dest;
272 int err = 0;
273
274 err = mlx5e_ethtool_set_channels(priv, ch);
275 if (err)
276 return err;
277
278 if (curr_channels_amount == 1 && new_channels_amount > 1) {
279 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
280 new_dest.ft = priv->fs.ttc.ft.t;
281 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
282 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
283 new_dest.tir_num = priv->direct_tir[0].tirn;
284 } else {
285 return 0;
286 }
287
288 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
289 if (err) {
290 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
291 curr_channels_amount, new_channels_amount);
292 return err;
293 }
294
295 return 0;
296}
297
ff9b85de
OG
298static int mlx5e_rep_get_coalesce(struct net_device *netdev,
299 struct ethtool_coalesce *coal)
300{
301 struct mlx5e_priv *priv = netdev_priv(netdev);
302
303 return mlx5e_ethtool_get_coalesce(priv, coal);
304}
305
306static int mlx5e_rep_set_coalesce(struct net_device *netdev,
307 struct ethtool_coalesce *coal)
308{
309 struct mlx5e_priv *priv = netdev_priv(netdev);
310
311 return mlx5e_ethtool_set_coalesce(priv, coal);
312}
313
84a09733
GT
314static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
315{
316 struct mlx5e_priv *priv = netdev_priv(netdev);
317
318 return mlx5e_ethtool_get_rxfh_key_size(priv);
319}
320
321static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
322{
323 struct mlx5e_priv *priv = netdev_priv(netdev);
324
325 return mlx5e_ethtool_get_rxfh_indir_size(priv);
326}
327
ff9b85de
OG
328static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
329 struct ethtool_pauseparam *pauseparam)
330{
331 struct mlx5e_priv *priv = netdev_priv(netdev);
332
333 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
334}
335
336static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
337 struct ethtool_pauseparam *pauseparam)
338{
339 struct mlx5e_priv *priv = netdev_priv(netdev);
340
341 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
342}
343
344static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
345 struct ethtool_link_ksettings *link_ksettings)
346{
347 struct mlx5e_priv *priv = netdev_priv(netdev);
348
349 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
350}
351
352static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
353 const struct ethtool_link_ksettings *link_ksettings)
354{
355 struct mlx5e_priv *priv = netdev_priv(netdev);
356
357 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
358}
359
360static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
361 .get_drvinfo = mlx5e_rep_get_drvinfo,
362 .get_link = ethtool_op_get_link,
363 .get_strings = mlx5e_rep_get_strings,
364 .get_sset_count = mlx5e_rep_get_sset_count,
365 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
366 .get_ringparam = mlx5e_rep_get_ringparam,
367 .set_ringparam = mlx5e_rep_set_ringparam,
368 .get_channels = mlx5e_rep_get_channels,
369 .set_channels = mlx5e_rep_set_channels,
370 .get_coalesce = mlx5e_rep_get_coalesce,
371 .set_coalesce = mlx5e_rep_set_coalesce,
372 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
373 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
374};
375
376static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
cb67b832
HHZ
377 .get_drvinfo = mlx5e_rep_get_drvinfo,
378 .get_link = ethtool_op_get_link,
379 .get_strings = mlx5e_rep_get_strings,
380 .get_sset_count = mlx5e_rep_get_sset_count,
381 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
f128f138
GT
382 .get_ringparam = mlx5e_rep_get_ringparam,
383 .set_ringparam = mlx5e_rep_set_ringparam,
84a09733
GT
384 .get_channels = mlx5e_rep_get_channels,
385 .set_channels = mlx5e_rep_set_channels,
ff9b85de
OG
386 .get_coalesce = mlx5e_rep_get_coalesce,
387 .set_coalesce = mlx5e_rep_set_coalesce,
388 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
389 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
84a09733
GT
390 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
391 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
ff9b85de
OG
392 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
393 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
cb67b832
HHZ
394};
395
d9ee0491 396static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
cb67b832
HHZ
397{
398 struct mlx5e_priv *priv = netdev_priv(dev);
399 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
491c37e4
RL
400 struct net_device *uplink_upper = NULL;
401 struct mlx5e_priv *uplink_priv = NULL;
402 struct net_device *uplink_dev;
cb67b832
HHZ
403
404 if (esw->mode == SRIOV_NONE)
405 return -EOPNOTSUPP;
406
491c37e4
RL
407 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
408 if (uplink_dev) {
409 uplink_upper = netdev_master_upper_dev_get(uplink_dev);
410 uplink_priv = netdev_priv(uplink_dev);
411 }
412
cb67b832
HHZ
413 switch (attr->id) {
414 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
cb67b832 415 attr->u.ppid.id_len = ETH_ALEN;
7c34ec19 416 if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
491c37e4
RL
417 ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
418 } else {
419 struct mlx5e_rep_priv *rpriv = priv->ppriv;
420 struct mlx5_eswitch_rep *rep = rpriv->rep;
421
422 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
423 }
cb67b832
HHZ
424 break;
425 default:
426 return -EOPNOTSUPP;
427 }
428
429 return 0;
430}
431
f7a68945
MB
432static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
433 struct mlx5_eswitch_rep *rep)
434{
2c47bf80 435 struct mlx5e_rep_sq *rep_sq, *tmp;
5ed99fb4 436 struct mlx5e_rep_priv *rpriv;
f7a68945
MB
437
438 if (esw->mode != SRIOV_OFFLOADS)
439 return;
440
5ed99fb4 441 rpriv = mlx5e_rep_to_rep_priv(rep);
2c47bf80
MB
442 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
443 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
444 list_del(&rep_sq->list);
445 kfree(rep_sq);
f7a68945
MB
446 }
447}
448
449static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
450 struct mlx5_eswitch_rep *rep,
5ecadff0 451 u32 *sqns_array, int sqns_num)
f7a68945
MB
452{
453 struct mlx5_flow_handle *flow_rule;
5ed99fb4 454 struct mlx5e_rep_priv *rpriv;
2c47bf80 455 struct mlx5e_rep_sq *rep_sq;
f7a68945
MB
456 int err;
457 int i;
458
459 if (esw->mode != SRIOV_OFFLOADS)
460 return 0;
461
5ed99fb4 462 rpriv = mlx5e_rep_to_rep_priv(rep);
f7a68945 463 for (i = 0; i < sqns_num; i++) {
2c47bf80
MB
464 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
465 if (!rep_sq) {
f7a68945
MB
466 err = -ENOMEM;
467 goto out_err;
468 }
469
470 /* Add re-inject rule to the PF/representor sqs */
471 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
472 rep->vport,
473 sqns_array[i]);
474 if (IS_ERR(flow_rule)) {
475 err = PTR_ERR(flow_rule);
2c47bf80 476 kfree(rep_sq);
f7a68945
MB
477 goto out_err;
478 }
2c47bf80
MB
479 rep_sq->send_to_vport_rule = flow_rule;
480 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
f7a68945
MB
481 }
482 return 0;
483
484out_err:
485 mlx5e_sqs2vport_stop(esw, rep);
486 return err;
487}
488
cb67b832 489int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
490{
491 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
492 struct mlx5e_rep_priv *rpriv = priv->ppriv;
493 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 494 struct mlx5e_channel *c;
9008ae07
SM
495 int n, tc, num_sqs = 0;
496 int err = -ENOMEM;
5ecadff0 497 u32 *sqs;
cb67b832 498
5ecadff0 499 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
cb67b832 500 if (!sqs)
9008ae07 501 goto out;
cb67b832 502
ff9c852f
SM
503 for (n = 0; n < priv->channels.num; n++) {
504 c = priv->channels.c[n];
cb67b832
HHZ
505 for (tc = 0; tc < c->num_tc; tc++)
506 sqs[num_sqs++] = c->sq[tc].sqn;
507 }
508
f7a68945 509 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 510 kfree(sqs);
9008ae07
SM
511
512out:
513 if (err)
514 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
515 return err;
516}
517
cb67b832
HHZ
518void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
519{
520 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
521 struct mlx5e_rep_priv *rpriv = priv->ppriv;
522 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 523
f7a68945 524 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
525}
526
f6dfb4c3
HHZ
527static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
528{
529#if IS_ENABLED(CONFIG_IPV6)
423c9db2 530 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
f6dfb4c3
HHZ
531 DELAY_PROBE_TIME);
532#else
533 unsigned long ipv6_interval = ~0UL;
534#endif
535 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
536 DELAY_PROBE_TIME);
5ed99fb4 537 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
538 struct mlx5e_priv *priv = netdev_priv(netdev);
539
540 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
541 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
542}
543
544void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
545{
546 struct mlx5e_rep_priv *rpriv = priv->ppriv;
547 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
548
549 mlx5_fc_queue_stats_work(priv->mdev,
550 &neigh_update->neigh_stats_work,
551 neigh_update->min_interval);
552}
553
554static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
555{
556 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
557 neigh_update.neigh_stats_work.work);
5ed99fb4 558 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
559 struct mlx5e_priv *priv = netdev_priv(netdev);
560 struct mlx5e_neigh_hash_entry *nhe;
561
562 rtnl_lock();
563 if (!list_empty(&rpriv->neigh_update.neigh_list))
564 mlx5e_rep_queue_neigh_stats_work(priv);
565
566 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
567 mlx5e_tc_update_neigh_used_value(nhe);
568
569 rtnl_unlock();
570}
571
232c0013
HHZ
572static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
573{
574 refcount_inc(&nhe->refcnt);
575}
576
577static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
578{
579 if (refcount_dec_and_test(&nhe->refcnt))
580 kfree(nhe);
581}
582
583static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
584 struct mlx5e_encap_entry *e,
585 bool neigh_connected,
586 unsigned char ha[ETH_ALEN])
587{
588 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
589
590 ASSERT_RTNL();
591
61c806da
OG
592 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
593 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
232c0013
HHZ
594 mlx5e_tc_encap_flows_del(priv, e);
595
596 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
597 ether_addr_copy(e->h_dest, ha);
598 ether_addr_copy(eth->h_dest, ha);
6707f74b
TZ
599 /* Update the encap source mac, in case that we delete
600 * the flows when encap source mac changed.
601 */
602 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
232c0013
HHZ
603
604 mlx5e_tc_encap_flows_add(priv, e);
605 }
606}
607
608static void mlx5e_rep_neigh_update(struct work_struct *work)
609{
610 struct mlx5e_neigh_hash_entry *nhe =
611 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
612 struct neighbour *n = nhe->n;
613 struct mlx5e_encap_entry *e;
614 unsigned char ha[ETH_ALEN];
615 struct mlx5e_priv *priv;
616 bool neigh_connected;
617 bool encap_connected;
618 u8 nud_state, dead;
619
620 rtnl_lock();
621
622 /* If these parameters are changed after we release the lock,
623 * we'll receive another event letting us know about it.
624 * We use this lock to avoid inconsistency between the neigh validity
625 * and it's hw address.
626 */
627 read_lock_bh(&n->lock);
628 memcpy(ha, n->ha, ETH_ALEN);
629 nud_state = n->nud_state;
630 dead = n->dead;
631 read_unlock_bh(&n->lock);
632
633 neigh_connected = (nud_state & NUD_VALID) && !dead;
634
635 list_for_each_entry(e, &nhe->encap_list, encap_list) {
636 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
637 priv = netdev_priv(e->out_dev);
638
639 if (encap_connected != neigh_connected ||
640 !ether_addr_equal(e->h_dest, ha))
641 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
642 }
643 mlx5e_rep_neigh_entry_release(nhe);
644 rtnl_unlock();
645 neigh_release(n);
646}
647
f5bc2c5d
OS
648static struct mlx5e_rep_indr_block_priv *
649mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
650 struct net_device *netdev)
651{
652 struct mlx5e_rep_indr_block_priv *cb_priv;
653
654 /* All callback list access should be protected by RTNL. */
655 ASSERT_RTNL();
656
657 list_for_each_entry(cb_priv,
658 &rpriv->uplink_priv.tc_indr_block_priv_list,
659 list)
660 if (cb_priv->netdev == netdev)
661 return cb_priv;
662
663 return NULL;
664}
665
666static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
667{
668 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
669 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
670
671 list_for_each_entry_safe(cb_priv, temp, head, list) {
25f2d0e7 672 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
f5bc2c5d
OS
673 kfree(cb_priv);
674 }
675}
676
677static int
678mlx5e_rep_indr_offload(struct net_device *netdev,
679 struct tc_cls_flower_offload *flower,
680 struct mlx5e_rep_indr_block_priv *indr_priv)
681{
ef381359 682 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
d9ee0491
OG
683 int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
684 int err = 0;
ef381359
OS
685
686 switch (flower->command) {
687 case TC_CLSFLOWER_REPLACE:
d9ee0491 688 err = mlx5e_configure_flower(netdev, priv, flower, flags);
ef381359
OS
689 break;
690 case TC_CLSFLOWER_DESTROY:
d9ee0491 691 err = mlx5e_delete_flower(netdev, priv, flower, flags);
ef381359
OS
692 break;
693 case TC_CLSFLOWER_STATS:
d9ee0491 694 err = mlx5e_stats_flower(netdev, priv, flower, flags);
ef381359
OS
695 break;
696 default:
697 err = -EOPNOTSUPP;
698 }
699
700 return err;
f5bc2c5d
OS
701}
702
703static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
704 void *type_data, void *indr_priv)
705{
706 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
707
708 switch (type) {
709 case TC_SETUP_CLSFLOWER:
710 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
711 default:
712 return -EOPNOTSUPP;
713 }
714}
715
716static int
717mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
718 struct mlx5e_rep_priv *rpriv,
719 struct tc_block_offload *f)
720{
721 struct mlx5e_rep_indr_block_priv *indr_priv;
722 int err = 0;
723
724 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
725 return -EOPNOTSUPP;
726
727 switch (f->command) {
728 case TC_BLOCK_BIND:
729 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
730 if (indr_priv)
731 return -EEXIST;
732
733 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
734 if (!indr_priv)
735 return -ENOMEM;
736
737 indr_priv->netdev = netdev;
738 indr_priv->rpriv = rpriv;
739 list_add(&indr_priv->list,
740 &rpriv->uplink_priv.tc_indr_block_priv_list);
741
742 err = tcf_block_cb_register(f->block,
743 mlx5e_rep_indr_setup_block_cb,
25f2d0e7 744 indr_priv, indr_priv, f->extack);
f5bc2c5d
OS
745 if (err) {
746 list_del(&indr_priv->list);
747 kfree(indr_priv);
748 }
749
750 return err;
751 case TC_BLOCK_UNBIND:
25f2d0e7
EB
752 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
753 if (!indr_priv)
754 return -ENOENT;
755
f5bc2c5d
OS
756 tcf_block_cb_unregister(f->block,
757 mlx5e_rep_indr_setup_block_cb,
25f2d0e7
EB
758 indr_priv);
759 list_del(&indr_priv->list);
760 kfree(indr_priv);
f5bc2c5d
OS
761
762 return 0;
763 default:
764 return -EOPNOTSUPP;
765 }
766 return 0;
767}
768
769static
770int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
771 enum tc_setup_type type, void *type_data)
772{
773 switch (type) {
774 case TC_SETUP_BLOCK:
775 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
776 type_data);
777 default:
778 return -EOPNOTSUPP;
779 }
780}
781
782static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
783 struct net_device *netdev)
784{
785 int err;
786
787 err = __tc_indr_block_cb_register(netdev, rpriv,
788 mlx5e_rep_indr_setup_tc_cb,
25f2d0e7 789 rpriv);
f5bc2c5d
OS
790 if (err) {
791 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
792
793 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
794 netdev_name(netdev), err);
795 }
796 return err;
797}
798
25f2d0e7
EB
799static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
800 struct net_device *netdev)
f5bc2c5d
OS
801{
802 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
25f2d0e7 803 rpriv);
f5bc2c5d
OS
804}
805
806static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
807 unsigned long event, void *ptr)
808{
809 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
810 uplink_priv.netdevice_nb);
811 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
812 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
813
814 if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
815 return NOTIFY_OK;
816
817 switch (event) {
818 case NETDEV_REGISTER:
819 mlx5e_rep_indr_register_block(rpriv, netdev);
820 break;
821 case NETDEV_UNREGISTER:
25f2d0e7 822 mlx5e_rep_indr_unregister_block(rpriv, netdev);
f5bc2c5d
OS
823 break;
824 }
825 return NOTIFY_OK;
826}
827
232c0013
HHZ
828static struct mlx5e_neigh_hash_entry *
829mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
830 struct mlx5e_neigh *m_neigh);
831
832static int mlx5e_rep_netevent_event(struct notifier_block *nb,
833 unsigned long event, void *ptr)
834{
835 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
836 neigh_update.netevent_nb);
837 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 838 struct net_device *netdev = rpriv->netdev;
232c0013
HHZ
839 struct mlx5e_priv *priv = netdev_priv(netdev);
840 struct mlx5e_neigh_hash_entry *nhe = NULL;
841 struct mlx5e_neigh m_neigh = {};
a2fa1fe5 842 struct neigh_parms *p;
232c0013 843 struct neighbour *n;
a2fa1fe5 844 bool found = false;
232c0013
HHZ
845
846 switch (event) {
847 case NETEVENT_NEIGH_UPDATE:
848 n = ptr;
849#if IS_ENABLED(CONFIG_IPV6)
423c9db2 850 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
232c0013
HHZ
851#else
852 if (n->tbl != &arp_tbl)
853#endif
854 return NOTIFY_DONE;
855
856 m_neigh.dev = n->dev;
f6dfb4c3 857 m_neigh.family = n->ops->family;
232c0013
HHZ
858 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
859
860 /* We are in atomic context and can't take RTNL mutex, so use
861 * spin_lock_bh to lookup the neigh table. bh is used since
862 * netevent can be called from a softirq context.
863 */
864 spin_lock_bh(&neigh_update->encap_lock);
865 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
866 if (!nhe) {
867 spin_unlock_bh(&neigh_update->encap_lock);
868 return NOTIFY_DONE;
869 }
870
871 /* This assignment is valid as long as the the neigh reference
872 * is taken
873 */
874 nhe->n = n;
875
876 /* Take a reference to ensure the neighbour and mlx5 encap
877 * entry won't be destructed until we drop the reference in
878 * delayed work.
879 */
880 neigh_hold(n);
881 mlx5e_rep_neigh_entry_hold(nhe);
882
883 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
884 mlx5e_rep_neigh_entry_release(nhe);
885 neigh_release(n);
886 }
887 spin_unlock_bh(&neigh_update->encap_lock);
888 break;
a2fa1fe5
HHZ
889
890 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
891 p = ptr;
892
893 /* We check the device is present since we don't care about
894 * changes in the default table, we only care about changes
895 * done per device delay prob time parameter.
896 */
897#if IS_ENABLED(CONFIG_IPV6)
423c9db2 898 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
a2fa1fe5
HHZ
899#else
900 if (!p->dev || p->tbl != &arp_tbl)
901#endif
902 return NOTIFY_DONE;
903
904 /* We are in atomic context and can't take RTNL mutex,
905 * so use spin_lock_bh to walk the neigh list and look for
906 * the relevant device. bh is used since netevent can be
907 * called from a softirq context.
908 */
909 spin_lock_bh(&neigh_update->encap_lock);
910 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
911 if (p->dev == nhe->m_neigh.dev) {
912 found = true;
913 break;
914 }
915 }
916 spin_unlock_bh(&neigh_update->encap_lock);
917 if (!found)
918 return NOTIFY_DONE;
919
920 neigh_update->min_interval = min_t(unsigned long,
921 NEIGH_VAR(p, DELAY_PROBE_TIME),
922 neigh_update->min_interval);
923 mlx5_fc_update_sampling_interval(priv->mdev,
924 neigh_update->min_interval);
925 break;
232c0013
HHZ
926 }
927 return NOTIFY_DONE;
928}
929
37b498ff
HHZ
930static const struct rhashtable_params mlx5e_neigh_ht_params = {
931 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
932 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
933 .key_len = sizeof(struct mlx5e_neigh),
934 .automatic_shrinking = true,
935};
936
937static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
938{
939 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
940 int err;
941
942 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
943 if (err)
944 return err;
37b498ff
HHZ
945
946 INIT_LIST_HEAD(&neigh_update->neigh_list);
232c0013 947 spin_lock_init(&neigh_update->encap_lock);
f6dfb4c3
HHZ
948 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
949 mlx5e_rep_neigh_stats_work);
950 mlx5e_rep_neigh_update_init_interval(rpriv);
232c0013
HHZ
951
952 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
953 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
954 if (err)
955 goto out_err;
956 return 0;
957
958out_err:
959 rhashtable_destroy(&neigh_update->neigh_ht);
960 return err;
37b498ff
HHZ
961}
962
963static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
964{
965 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 966 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
232c0013
HHZ
967
968 unregister_netevent_notifier(&neigh_update->netevent_nb);
969
970 flush_workqueue(priv->wq); /* flush neigh update works */
37b498ff 971
f6dfb4c3
HHZ
972 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
973
37b498ff
HHZ
974 rhashtable_destroy(&neigh_update->neigh_ht);
975}
976
977static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
978 struct mlx5e_neigh_hash_entry *nhe)
979{
980 struct mlx5e_rep_priv *rpriv = priv->ppriv;
981 int err;
982
983 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
984 &nhe->rhash_node,
985 mlx5e_neigh_ht_params);
986 if (err)
987 return err;
988
989 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
990
991 return err;
992}
993
994static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
995 struct mlx5e_neigh_hash_entry *nhe)
996{
997 struct mlx5e_rep_priv *rpriv = priv->ppriv;
998
232c0013
HHZ
999 spin_lock_bh(&rpriv->neigh_update.encap_lock);
1000
37b498ff
HHZ
1001 list_del(&nhe->neigh_list);
1002
1003 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1004 &nhe->rhash_node,
1005 mlx5e_neigh_ht_params);
232c0013 1006 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
37b498ff
HHZ
1007}
1008
232c0013
HHZ
1009/* This function must only be called under RTNL lock or under the
1010 * representor's encap_lock in case RTNL mutex can't be held.
1011 */
37b498ff
HHZ
1012static struct mlx5e_neigh_hash_entry *
1013mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1014 struct mlx5e_neigh *m_neigh)
1015{
1016 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1017 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1018
1019 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1020 mlx5e_neigh_ht_params);
1021}
1022
232c0013
HHZ
1023static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1024 struct mlx5e_encap_entry *e,
1025 struct mlx5e_neigh_hash_entry **nhe)
1026{
1027 int err;
1028
1029 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1030 if (!*nhe)
1031 return -ENOMEM;
1032
1033 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1034 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1035 INIT_LIST_HEAD(&(*nhe)->encap_list);
1036 refcount_set(&(*nhe)->refcnt, 1);
1037
1038 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1039 if (err)
1040 goto out_free;
1041 return 0;
1042
1043out_free:
1044 kfree(*nhe);
1045 return err;
1046}
1047
1048static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
1049 struct mlx5e_neigh_hash_entry *nhe)
1050{
1051 /* The neigh hash entry must be removed from the hash table regardless
1052 * of the reference count value, so it won't be found by the next
1053 * neigh notification call. The neigh hash entry reference count is
1054 * incremented only during creation and neigh notification calls and
1055 * protects from freeing the nhe struct.
1056 */
1057 mlx5e_rep_neigh_entry_remove(priv, nhe);
1058 mlx5e_rep_neigh_entry_release(nhe);
1059}
1060
1061int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1062 struct mlx5e_encap_entry *e)
1063{
1064 struct mlx5e_neigh_hash_entry *nhe;
1065 int err;
1066
1067 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1068 if (!nhe) {
1069 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1070 if (err)
1071 return err;
1072 }
1073 list_add(&e->encap_list, &nhe->encap_list);
1074 return 0;
1075}
1076
1077void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1078 struct mlx5e_encap_entry *e)
1079{
1080 struct mlx5e_neigh_hash_entry *nhe;
1081
1082 list_del(&e->encap_list);
1083 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1084
1085 if (list_empty(&nhe->encap_list))
1086 mlx5e_rep_neigh_entry_destroy(priv, nhe);
1087}
1088
d9ee0491 1089static int mlx5e_vf_rep_open(struct net_device *dev)
20a1ea67
OG
1090{
1091 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1092 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1093 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
1094 int err;
1095
63bfd399
EBE
1096 mutex_lock(&priv->state_lock);
1097 err = mlx5e_open_locked(dev);
20a1ea67 1098 if (err)
63bfd399 1099 goto unlock;
20a1ea67 1100
84c9c8f2 1101 if (!mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8
EBE
1102 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1103 rep->vport, MLX5_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
1104 netif_carrier_on(dev);
1105
63bfd399
EBE
1106unlock:
1107 mutex_unlock(&priv->state_lock);
1108 return err;
20a1ea67
OG
1109}
1110
d9ee0491 1111static int mlx5e_vf_rep_close(struct net_device *dev)
20a1ea67
OG
1112{
1113 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1114 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1115 struct mlx5_eswitch_rep *rep = rpriv->rep;
63bfd399 1116 int ret;
20a1ea67 1117
63bfd399 1118 mutex_lock(&priv->state_lock);
84c9c8f2 1119 mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8
EBE
1120 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1121 rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
1122 ret = mlx5e_close_locked(dev);
1123 mutex_unlock(&priv->state_lock);
1124 return ret;
20a1ea67
OG
1125}
1126
cb67b832
HHZ
1127static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1128 char *buf, size_t len)
1129{
1130 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1131 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1132 struct mlx5_eswitch_rep *rep = rpriv->rep;
c12ecc23
OG
1133 int ret, pf_num;
1134
1135 ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
1136 if (ret)
1137 return ret;
1138
1139 if (rep->vport == FDB_UPLINK_VPORT)
1140 ret = snprintf(buf, len, "p%d", pf_num);
1141 else
1142 ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
cb67b832 1143
cb67b832
HHZ
1144 if (ret >= len)
1145 return -EOPNOTSUPP;
1146
1147 return 0;
1148}
1149
de4784ca 1150static int
855afa09 1151mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
60bd4af8 1152 struct tc_cls_flower_offload *cls_flower, int flags)
d957b4e3 1153{
8c818c27
JP
1154 switch (cls_flower->command) {
1155 case TC_CLSFLOWER_REPLACE:
71d82d2a
OS
1156 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1157 flags);
8c818c27 1158 case TC_CLSFLOWER_DESTROY:
71d82d2a
OS
1159 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1160 flags);
8c818c27 1161 case TC_CLSFLOWER_STATS:
71d82d2a
OS
1162 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1163 flags);
60bd4af8
OG
1164 default:
1165 return -EOPNOTSUPP;
1166 }
1167}
1168
855afa09
JP
1169static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1170 void *cb_priv)
1171{
1172 struct mlx5e_priv *priv = cb_priv;
1173
1174 switch (type) {
1175 case TC_SETUP_CLSFLOWER:
d9ee0491
OG
1176 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
1177 MLX5E_TC_ESW_OFFLOAD);
855afa09
JP
1178 default:
1179 return -EOPNOTSUPP;
1180 }
1181}
1182
1183static int mlx5e_rep_setup_tc_block(struct net_device *dev,
1184 struct tc_block_offload *f)
1185{
1186 struct mlx5e_priv *priv = netdev_priv(dev);
1187
1188 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1189 return -EOPNOTSUPP;
1190
1191 switch (f->command) {
1192 case TC_BLOCK_BIND:
1193 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
60513bd8 1194 priv, priv, f->extack);
855afa09
JP
1195 case TC_BLOCK_UNBIND:
1196 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
1197 return 0;
1198 default:
1199 return -EOPNOTSUPP;
1200 }
1201}
1202
8c818c27 1203static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1204 void *type_data)
8c818c27 1205{
2572ac53 1206 switch (type) {
855afa09
JP
1207 case TC_SETUP_BLOCK:
1208 return mlx5e_rep_setup_tc_block(dev, type_data);
d957b4e3
OG
1209 default:
1210 return -EOPNOTSUPP;
1211 }
1212}
1213
370bad0f
OG
1214bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1215{
1d447a39
SM
1216 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1217 struct mlx5_eswitch_rep *rep;
1218
733d3e54 1219 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 1220 return false;
370bad0f 1221
d9ee0491
OG
1222 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1223 return false;
370bad0f 1224
d9ee0491
OG
1225 rep = rpriv->rep;
1226 return (rep->vport == FDB_UPLINK_VPORT);
370bad0f
OG
1227}
1228
13e509a4 1229static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
370bad0f 1230{
370bad0f
OG
1231 switch (attr_id) {
1232 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
370bad0f
OG
1233 return true;
1234 }
1235
1236 return false;
1237}
1238
1239static int
1240mlx5e_get_sw_stats64(const struct net_device *dev,
1241 struct rtnl_link_stats64 *stats)
1242{
1243 struct mlx5e_priv *priv = netdev_priv(dev);
1244 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
1245
868a01a2
SL
1246 mlx5e_rep_update_sw_counters(priv);
1247
370bad0f
OG
1248 stats->rx_packets = sstats->rx_packets;
1249 stats->rx_bytes = sstats->rx_bytes;
1250 stats->tx_packets = sstats->tx_packets;
1251 stats->tx_bytes = sstats->tx_bytes;
1252
1253 stats->tx_dropped = sstats->tx_queue_dropped;
1254
1255 return 0;
1256}
1257
13e509a4
OG
1258static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1259 void *sp)
370bad0f
OG
1260{
1261 switch (attr_id) {
1262 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1263 return mlx5e_get_sw_stats64(dev, sp);
1264 }
1265
1266 return -EINVAL;
1267}
1268
bc1f4470 1269static void
d9ee0491 1270mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
370bad0f
OG
1271{
1272 struct mlx5e_priv *priv = netdev_priv(dev);
1273
ed56c519 1274 /* update HW stats in background for next time */
cdeef2b1 1275 mlx5e_queue_update_stats(priv);
370bad0f 1276 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
1277}
1278
d9ee0491
OG
1279static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
1280{
1281 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1282}
1283
b36cdb42 1284static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491 1285{
b36cdb42 1286 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
d9ee0491
OG
1287}
1288
b36cdb42 1289static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
d9ee0491 1290{
b36cdb42
OG
1291 struct sockaddr *saddr = addr;
1292
1293 if (!is_valid_ether_addr(saddr->sa_data))
1294 return -EADDRNOTAVAIL;
1295
1296 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1297 return 0;
d9ee0491
OG
1298}
1299
6ce966fd
OG
1300static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1301 __be16 vlan_proto)
1302{
1303 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1304
1305 if (vlan != 0)
1306 return -EOPNOTSUPP;
1307
1308 /* allow setting 0-vid for compatibility with libvirt */
1309 return 0;
1310}
1311
cb67b832
HHZ
1312static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
1313 .switchdev_port_attr_get = mlx5e_attr_get,
1314};
1315
d9ee0491
OG
1316static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
1317 .ndo_open = mlx5e_vf_rep_open,
1318 .ndo_stop = mlx5e_vf_rep_close,
1319 .ndo_start_xmit = mlx5e_xmit,
1320 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1321 .ndo_setup_tc = mlx5e_rep_setup_tc,
1322 .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
13e509a4
OG
1323 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1324 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
d9ee0491
OG
1325 .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
1326};
250a42b6 1327
d9ee0491 1328static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
b36cdb42 1329 .ndo_open = mlx5e_open,
d9ee0491 1330 .ndo_stop = mlx5e_close,
cb67b832 1331 .ndo_start_xmit = mlx5e_xmit,
b36cdb42 1332 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
cb67b832 1333 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
8c818c27 1334 .ndo_setup_tc = mlx5e_rep_setup_tc,
d9ee0491 1335 .ndo_get_stats64 = mlx5e_get_stats,
13e509a4
OG
1336 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1337 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
d9ee0491 1338 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
073caf50
OG
1339 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1340 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1341 .ndo_features_check = mlx5e_features_check,
1342 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1343 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1344 .ndo_get_vf_config = mlx5e_get_vf_config,
1345 .ndo_get_vf_stats = mlx5e_get_vf_stats,
6ce966fd 1346 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
cb67b832
HHZ
1347};
1348
a0646c88
EB
1349bool mlx5e_eswitch_rep(struct net_device *netdev)
1350{
1351 if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
1352 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1353 return true;
1354
1355 return false;
1356}
1357
025380b2 1358static void mlx5e_build_rep_params(struct net_device *netdev)
cb67b832 1359{
025380b2 1360 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1361 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1362 struct mlx5_eswitch_rep *rep = rpriv->rep;
025380b2
OG
1363 struct mlx5_core_dev *mdev = priv->mdev;
1364 struct mlx5e_params *params;
1365
cb67b832
HHZ
1366 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1367 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1368 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1369
025380b2 1370 params = &priv->channels.params;
472a1e44 1371 params->hard_mtu = MLX5E_ETH_HARD_MTU;
025380b2 1372 params->sw_mtu = netdev->mtu;
d9ee0491
OG
1373
1374 /* SQ */
1375 if (rep->vport == FDB_UPLINK_VPORT)
1376 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1377 else
5d1f7354 1378 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
cb67b832 1379
749359f4
GT
1380 /* RQ */
1381 mlx5e_build_rq_params(mdev, params);
1382
1383 /* CQ moderation params */
9a317425 1384 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
6a9764ef 1385 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 1386
6a9764ef 1387 params->num_tc = 1;
5f195c2c
CM
1388
1389 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
84a09733
GT
1390
1391 /* RSS */
025380b2 1392 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
cb67b832
HHZ
1393}
1394
1395static void mlx5e_build_rep_netdev(struct net_device *netdev)
1396{
250a42b6 1397 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1398 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1399 struct mlx5_eswitch_rep *rep = rpriv->rep;
250a42b6 1400 struct mlx5_core_dev *mdev = priv->mdev;
250a42b6 1401
d9ee0491
OG
1402 if (rep->vport == FDB_UPLINK_VPORT) {
1403 SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
1404 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1405 /* we want a persistent mac for the uplink rep */
1406 mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
ff9b85de 1407 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
b36cdb42
OG
1408#ifdef CONFIG_MLX5_CORE_EN_DCB
1409 if (MLX5_CAP_GEN(mdev, qos))
1410 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1411#endif
d9ee0491
OG
1412 } else {
1413 netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
1414 eth_hw_addr_random(netdev);
ff9b85de 1415 netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
d9ee0491 1416 }
cb67b832
HHZ
1417
1418 netdev->watchdog_timeo = 15 * HZ;
1419
cb67b832 1420
cb67b832 1421 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
cb67b832 1422
1ee4457c 1423 netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
d957b4e3 1424 netdev->hw_features |= NETIF_F_HW_TC;
cb67b832 1425
dabeb3b0
GT
1426 netdev->hw_features |= NETIF_F_SG;
1427 netdev->hw_features |= NETIF_F_IP_CSUM;
1428 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1429 netdev->hw_features |= NETIF_F_GRO;
1430 netdev->hw_features |= NETIF_F_TSO;
1431 netdev->hw_features |= NETIF_F_TSO6;
1432 netdev->hw_features |= NETIF_F_RXCSUM;
1433
1ee4457c
OG
1434 if (rep->vport != FDB_UPLINK_VPORT)
1435 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1436
dabeb3b0 1437 netdev->features |= netdev->hw_features;
cb67b832
HHZ
1438}
1439
182570b2
FD
1440static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1441 struct net_device *netdev,
1442 const struct mlx5e_profile *profile,
1443 void *ppriv)
cb67b832 1444{
6a9764ef 1445 struct mlx5e_priv *priv = netdev_priv(netdev);
182570b2 1446 int err;
6a9764ef 1447
519a0bf5 1448 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
182570b2
FD
1449 if (err)
1450 return err;
6a9764ef 1451
8956f001 1452 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
c139dbfd 1453
025380b2 1454 mlx5e_build_rep_params(netdev);
cb67b832 1455 mlx5e_build_rep_netdev(netdev);
237f258c
FD
1456
1457 mlx5e_timestamp_init(priv);
182570b2
FD
1458
1459 return 0;
1460}
1461
1462static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1463{
1464 mlx5e_netdev_cleanup(priv->netdev, priv);
cb67b832
HHZ
1465}
1466
84a09733
GT
1467static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1468{
1469 struct ttc_params ttc_params = {};
1470 int tt, err;
1471
1472 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1473 MLX5_FLOW_NAMESPACE_KERNEL);
1474
1475 /* The inner_ttc in the ttc params is intentionally not set */
1476 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1477 mlx5e_set_ttc_ft_params(&ttc_params);
1478 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1479 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1480
1481 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1482 if (err) {
1483 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1484 return err;
1485 }
1486 return 0;
1487}
1488
092297e0 1489static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
cb67b832
HHZ
1490{
1491 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
1492 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1493 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 1494 struct mlx5_flow_handle *flow_rule;
c966f7d5 1495 struct mlx5_flow_destination dest;
092297e0 1496
c966f7d5
GT
1497 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1498 dest.tir_num = priv->direct_tir[0].tirn;
092297e0
GT
1499 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1500 rep->vport,
c966f7d5 1501 &dest);
092297e0
GT
1502 if (IS_ERR(flow_rule))
1503 return PTR_ERR(flow_rule);
1504 rpriv->vport_rx_rule = flow_rule;
1505 return 0;
1506}
1507
1508static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1509{
1510 struct mlx5_core_dev *mdev = priv->mdev;
cb67b832 1511 int err;
cb67b832 1512
2c3b5bee
SM
1513 mlx5e_init_l2_addr(priv);
1514
1462e48d
RD
1515 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1516 if (err) {
1517 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1518 return err;
1519 }
1520
84a09733 1521 err = mlx5e_create_indirect_rqt(priv);
8f493ffd 1522 if (err)
1462e48d 1523 goto err_close_drop_rq;
cb67b832 1524
84a09733
GT
1525 err = mlx5e_create_direct_rqts(priv);
1526 if (err)
1527 goto err_destroy_indirect_rqts;
1528
1529 err = mlx5e_create_indirect_tirs(priv, false);
8f493ffd 1530 if (err)
cb67b832 1531 goto err_destroy_direct_rqts;
cb67b832 1532
84a09733
GT
1533 err = mlx5e_create_direct_tirs(priv);
1534 if (err)
1535 goto err_destroy_indirect_tirs;
1536
1537 err = mlx5e_create_rep_ttc_table(priv);
092297e0 1538 if (err)
cb67b832 1539 goto err_destroy_direct_tirs;
cb67b832 1540
84a09733
GT
1541 err = mlx5e_create_rep_vport_rx_rule(priv);
1542 if (err)
1543 goto err_destroy_ttc_table;
1544
cb67b832
HHZ
1545 return 0;
1546
84a09733
GT
1547err_destroy_ttc_table:
1548 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
cb67b832
HHZ
1549err_destroy_direct_tirs:
1550 mlx5e_destroy_direct_tirs(priv);
84a09733
GT
1551err_destroy_indirect_tirs:
1552 mlx5e_destroy_indirect_tirs(priv, false);
cb67b832 1553err_destroy_direct_rqts:
8f493ffd 1554 mlx5e_destroy_direct_rqts(priv);
84a09733
GT
1555err_destroy_indirect_rqts:
1556 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
1557err_close_drop_rq:
1558 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1559 return err;
1560}
1561
1562static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1563{
1d447a39 1564 struct mlx5e_rep_priv *rpriv = priv->ppriv;
cb67b832 1565
5ed99fb4 1566 mlx5_del_flow_rules(rpriv->vport_rx_rule);
84a09733 1567 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
cb67b832 1568 mlx5e_destroy_direct_tirs(priv);
84a09733 1569 mlx5e_destroy_indirect_tirs(priv, false);
8f493ffd 1570 mlx5e_destroy_direct_rqts(priv);
84a09733 1571 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d 1572 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1573}
1574
1575static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1576{
d9ee0491
OG
1577 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1578 struct mlx5_rep_uplink_priv *uplink_priv;
1579 int tc, err;
cb67b832
HHZ
1580
1581 err = mlx5e_create_tises(priv);
1582 if (err) {
1583 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1584 return err;
1585 }
d9ee0491
OG
1586
1587 if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
1588 uplink_priv = &rpriv->uplink_priv;
1589
1590 /* init shared tc flow table */
1591 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1592 if (err)
1593 goto destroy_tises;
1594
1595 /* init indirect block notifications */
1596 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1597 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1598 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1599 if (err) {
1600 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1601 goto tc_esw_cleanup;
1602 }
1603 }
1604
cb67b832 1605 return 0;
d9ee0491
OG
1606
1607tc_esw_cleanup:
1608 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1609destroy_tises:
1610 for (tc = 0; tc < priv->profile->max_tc; tc++)
1611 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1612 return err;
1613}
1614
1615static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1616{
1617 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1618 int tc;
1619
1620 for (tc = 0; tc < priv->profile->max_tc; tc++)
1621 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1622
1623 if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
1624 /* clean indirect TC block notifications */
1625 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1626 mlx5e_rep_indr_clean_block_privs(rpriv);
1627
1628 /* delete shared tc flow table */
1629 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1630 }
cb67b832
HHZ
1631}
1632
b36cdb42
OG
1633static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
1634{
1635 struct net_device *netdev = priv->netdev;
1636 struct mlx5_core_dev *mdev = priv->mdev;
1637 u16 max_mtu;
1638
1639 netdev->min_mtu = ETH_MIN_MTU;
1640 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1641 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1642}
1643
1644static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1645{
1646 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1647 struct mlx5_eqe *eqe = data;
1648
1649 if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
1650 return NOTIFY_DONE;
1651
1652 switch (eqe->sub_type) {
1653 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1654 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1655 queue_work(priv->wq, &priv->update_carrier_work);
1656 break;
1657 default:
1658 return NOTIFY_DONE;
1659 }
1660
1661 return NOTIFY_OK;
1662}
1663
1664static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1665{
1666 struct net_device *netdev = priv->netdev;
1667 struct mlx5_core_dev *mdev = priv->mdev;
1668 u16 max_mtu;
1669
1670 netdev->min_mtu = ETH_MIN_MTU;
1671 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1672 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1673 mlx5e_set_dev_port_mtu(priv);
1674
1675 mlx5_lag_add(mdev, netdev);
1676 priv->events_nb.notifier_call = uplink_rep_async_event;
1677 mlx5_notifier_register(mdev, &priv->events_nb);
1678#ifdef CONFIG_MLX5_CORE_EN_DCB
1679 mlx5e_dcbnl_initialize(priv);
1680 mlx5e_dcbnl_init_app(priv);
1681#endif
1682}
1683
1684static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1685{
1686 struct mlx5_core_dev *mdev = priv->mdev;
1687
1688#ifdef CONFIG_MLX5_CORE_EN_DCB
1689 mlx5e_dcbnl_delete_app(priv);
1690#endif
1691 mlx5_notifier_unregister(mdev, &priv->events_nb);
1692 mlx5_lag_remove(mdev);
1693}
1694
1695static const struct mlx5e_profile mlx5e_vf_rep_profile = {
cb67b832 1696 .init = mlx5e_init_rep,
182570b2 1697 .cleanup = mlx5e_cleanup_rep,
cb67b832
HHZ
1698 .init_rx = mlx5e_init_rep_rx,
1699 .cleanup_rx = mlx5e_cleanup_rep_rx,
1700 .init_tx = mlx5e_init_rep_tx,
d9ee0491 1701 .cleanup_tx = mlx5e_cleanup_rep_tx,
b36cdb42
OG
1702 .enable = mlx5e_vf_rep_enable,
1703 .update_stats = mlx5e_vf_rep_update_hw_counters,
20fd0c19 1704 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
749359f4 1705 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
cb67b832
HHZ
1706 .max_tc = 1,
1707};
1708
b36cdb42
OG
1709static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1710 .init = mlx5e_init_rep,
1711 .cleanup = mlx5e_cleanup_rep,
1712 .init_rx = mlx5e_init_rep_rx,
1713 .cleanup_rx = mlx5e_cleanup_rep_rx,
1714 .init_tx = mlx5e_init_rep_tx,
1715 .cleanup_tx = mlx5e_cleanup_rep_tx,
1716 .enable = mlx5e_uplink_rep_enable,
1717 .disable = mlx5e_uplink_rep_disable,
1718 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1719 .update_carrier = mlx5e_update_carrier,
1720 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1721 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1722 .max_tc = MLX5E_MAX_NUM_TC,
1723};
1724
1d447a39 1725/* e-Switch vport representors */
1d447a39 1726static int
4c66df01 1727mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1728{
b36cdb42 1729 const struct mlx5e_profile *profile;
1d447a39 1730 struct mlx5e_rep_priv *rpriv;
26e59d80 1731 struct net_device *netdev;
779d986d 1732 int nch, err;
26e59d80 1733
1d447a39
SM
1734 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1735 if (!rpriv)
1736 return -ENOMEM;
1737
d9ee0491
OG
1738 /* rpriv->rep to be looked up when profile->init() is called */
1739 rpriv->rep = rep;
1740
779d986d 1741 nch = mlx5e_get_max_num_channels(dev);
b36cdb42
OG
1742 profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
1743 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
26e59d80
MHY
1744 if (!netdev) {
1745 pr_warn("Failed to create representor netdev for vport %d\n",
1746 rep->vport);
1d447a39 1747 kfree(rpriv);
cb67b832
HHZ
1748 return -EINVAL;
1749 }
26e59d80 1750
5ed99fb4 1751 rpriv->netdev = netdev;
a4b97ab4 1752 rep->rep_if[REP_ETH].priv = rpriv;
5ed99fb4 1753 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
26e59d80 1754
aec002f6
OG
1755 if (rep->vport == FDB_UPLINK_VPORT) {
1756 err = mlx5e_create_mdev_resources(dev);
1757 if (err)
1758 goto err_destroy_netdev;
1759 }
1760
2c3b5bee 1761 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80
MHY
1762 if (err) {
1763 pr_warn("Failed to attach representor netdev for vport %d\n",
1764 rep->vport);
aec002f6 1765 goto err_destroy_mdev_resources;
26e59d80
MHY
1766 }
1767
37b498ff
HHZ
1768 err = mlx5e_rep_neigh_init(rpriv);
1769 if (err) {
1770 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1771 rep->vport);
1772 goto err_detach_netdev;
1773 }
1774
26e59d80
MHY
1775 err = register_netdev(netdev);
1776 if (err) {
1777 pr_warn("Failed to register representor netdev for vport %d\n",
1778 rep->vport);
ef381359 1779 goto err_neigh_cleanup;
26e59d80
MHY
1780 }
1781
cb67b832 1782 return 0;
26e59d80 1783
37b498ff
HHZ
1784err_neigh_cleanup:
1785 mlx5e_rep_neigh_cleanup(rpriv);
1786
26e59d80 1787err_detach_netdev:
2c3b5bee 1788 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80 1789
aec002f6
OG
1790err_destroy_mdev_resources:
1791 if (rep->vport == FDB_UPLINK_VPORT)
1792 mlx5e_destroy_mdev_resources(dev);
1793
26e59d80 1794err_destroy_netdev:
2c3b5bee 1795 mlx5e_destroy_netdev(netdev_priv(netdev));
1d447a39 1796 kfree(rpriv);
26e59d80 1797 return err;
cb67b832
HHZ
1798}
1799
1d447a39 1800static void
4c66df01 1801mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1802{
5ed99fb4
MB
1803 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1804 struct net_device *netdev = rpriv->netdev;
1d447a39
SM
1805 struct mlx5e_priv *priv = netdev_priv(netdev);
1806 void *ppriv = priv->ppriv;
cb67b832 1807
5ed99fb4 1808 unregister_netdev(netdev);
37b498ff 1809 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39 1810 mlx5e_detach_netdev(priv);
aec002f6
OG
1811 if (rep->vport == FDB_UPLINK_VPORT)
1812 mlx5e_destroy_mdev_resources(priv->mdev);
1d447a39
SM
1813 mlx5e_destroy_netdev(priv);
1814 kfree(ppriv); /* mlx5e_rep_priv */
1815}
1816
22215908
MB
1817static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1818{
1819 struct mlx5e_rep_priv *rpriv;
1820
1821 rpriv = mlx5e_rep_to_rep_priv(rep);
1822
1823 return rpriv->netdev;
1824}
1825
aec002f6 1826void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 1827{
aec002f6 1828 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39
SM
1829 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1830 int vport;
1d447a39 1831
d9ee0491 1832 for (vport = 0; vport < total_vfs; vport++) {
a4b97ab4 1833 struct mlx5_eswitch_rep_if rep_if = {};
1d447a39 1834
a4b97ab4
MB
1835 rep_if.load = mlx5e_vport_rep_load;
1836 rep_if.unload = mlx5e_vport_rep_unload;
22215908 1837 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
a4b97ab4 1838 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1d447a39
SM
1839 }
1840}
1841
aec002f6 1842void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 1843{
1d447a39
SM
1844 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1845 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1846 int vport;
1847
d9ee0491 1848 for (vport = total_vfs - 1; vport >= 0; vport--)
a4b97ab4 1849 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1d447a39 1850}