]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
net/mlx5: Handle host PF vport mac/guid for ECPF
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
d957b4e3 36#include <net/pkt_cls.h>
717503b9 37#include <net/act_api.h>
232c0013
HHZ
38#include <net/netevent.h>
39#include <net/arp.h>
cb67b832
HHZ
40
41#include "eswitch.h"
42#include "en.h"
1d447a39 43#include "en_rep.h"
adb4c123 44#include "en_tc.h"
101f4de9 45#include "en/tc_tun.h"
f6dfb4c3 46#include "fs_core.h"
97417f61 47#include "lib/port_tun.h"
cb67b832 48
4c8fb298 49#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
e7164313 50 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
8956f001 51#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
4246f698 52
cb67b832
HHZ
53static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
54
f5bc2c5d
OS
55struct mlx5e_rep_indr_block_priv {
56 struct net_device *netdev;
57 struct mlx5e_rep_priv *rpriv;
58
59 struct list_head list;
60};
61
25f2d0e7
EB
62static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
63 struct net_device *netdev);
f5bc2c5d 64
cb67b832
HHZ
65static void mlx5e_rep_get_drvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo)
67{
68 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
69 sizeof(drvinfo->driver));
70 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
71}
72
73static const struct counter_desc sw_rep_stats_desc[] = {
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
78};
79
a228060a
OG
80struct vport_stats {
81 u64 vport_rx_packets;
82 u64 vport_tx_packets;
83 u64 vport_rx_bytes;
84 u64 vport_tx_bytes;
85};
86
87static const struct counter_desc vport_rep_stats_desc[] = {
88 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
89 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
90 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
91 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
92};
93
94#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
95#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
cb67b832
HHZ
96
97static void mlx5e_rep_get_strings(struct net_device *dev,
98 u32 stringset, uint8_t *data)
99{
a228060a 100 int i, j;
cb67b832
HHZ
101
102 switch (stringset) {
103 case ETH_SS_STATS:
a228060a 104 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
105 strcpy(data + (i * ETH_GSTRING_LEN),
106 sw_rep_stats_desc[i].format);
a228060a
OG
107 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
108 strcpy(data + (i * ETH_GSTRING_LEN),
109 vport_rep_stats_desc[j].format);
cb67b832
HHZ
110 break;
111 }
112}
113
d9ee0491 114static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
370bad0f
OG
115{
116 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
117 struct mlx5e_rep_priv *rpriv = priv->ppriv;
118 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
119 struct rtnl_link_stats64 *vport_stats;
120 struct ifla_vf_stats vf_stats;
121 int err;
122
123 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
124 if (err) {
125 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
126 return;
127 }
128
129 vport_stats = &priv->stats.vf_vport;
130 /* flip tx/rx as we are reporting the counters for the switch vport */
131 vport_stats->rx_packets = vf_stats.tx_packets;
132 vport_stats->rx_bytes = vf_stats.tx_bytes;
133 vport_stats->tx_packets = vf_stats.rx_packets;
134 vport_stats->tx_bytes = vf_stats.rx_bytes;
135}
136
d9ee0491
OG
137static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
138{
139 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
140 struct rtnl_link_stats64 *vport_stats;
141
142 mlx5e_grp_802_3_update_stats(priv);
143
144 vport_stats = &priv->stats.vf_vport;
145
146 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
147 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
148 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
149 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
150}
151
152static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
153{
154 struct mlx5e_rep_priv *rpriv = priv->ppriv;
155 struct mlx5_eswitch_rep *rep = rpriv->rep;
156
b05af6aa 157 if (rep->vport == MLX5_VPORT_UPLINK)
d9ee0491
OG
158 mlx5e_uplink_rep_update_hw_counters(priv);
159 else
160 mlx5e_vf_rep_update_hw_counters(priv);
161}
162
370bad0f 163static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
cb67b832
HHZ
164{
165 struct mlx5e_sw_stats *s = &priv->stats.sw;
b832d4fd 166 struct rtnl_link_stats64 stats64 = {};
cb67b832
HHZ
167
168 memset(s, 0, sizeof(*s));
b832d4fd 169 mlx5e_fold_sw_stats64(priv, &stats64);
cb67b832 170
b832d4fd
SM
171 s->rx_packets = stats64.rx_packets;
172 s->rx_bytes = stats64.rx_bytes;
173 s->tx_packets = stats64.tx_packets;
174 s->tx_bytes = stats64.tx_bytes;
175 s->tx_queue_dropped = stats64.tx_dropped;
370bad0f
OG
176}
177
cb67b832
HHZ
178static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
179 struct ethtool_stats *stats, u64 *data)
180{
181 struct mlx5e_priv *priv = netdev_priv(dev);
a228060a 182 int i, j;
cb67b832
HHZ
183
184 if (!data)
185 return;
186
187 mutex_lock(&priv->state_lock);
168af00a 188 mlx5e_rep_update_sw_counters(priv);
a228060a 189 mlx5e_rep_update_hw_counters(priv);
cb67b832
HHZ
190 mutex_unlock(&priv->state_lock);
191
a228060a 192 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
193 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
194 sw_rep_stats_desc, i);
a228060a
OG
195
196 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
197 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
198 vport_rep_stats_desc, j);
cb67b832
HHZ
199}
200
201static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
202{
203 switch (sset) {
204 case ETH_SS_STATS:
a228060a 205 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
cb67b832
HHZ
206 default:
207 return -EOPNOTSUPP;
208 }
209}
210
f128f138
GT
211static void mlx5e_rep_get_ringparam(struct net_device *dev,
212 struct ethtool_ringparam *param)
213{
214 struct mlx5e_priv *priv = netdev_priv(dev);
215
216 mlx5e_ethtool_get_ringparam(priv, param);
217}
218
219static int mlx5e_rep_set_ringparam(struct net_device *dev,
220 struct ethtool_ringparam *param)
221{
222 struct mlx5e_priv *priv = netdev_priv(dev);
223
224 return mlx5e_ethtool_set_ringparam(priv, param);
225}
226
84a09733
GT
227static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
228 struct mlx5_flow_destination *dest)
229{
230 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
231 struct mlx5e_rep_priv *rpriv = priv->ppriv;
232 struct mlx5_eswitch_rep *rep = rpriv->rep;
233 struct mlx5_flow_handle *flow_rule;
234
235 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
236 rep->vport,
237 dest);
238 if (IS_ERR(flow_rule))
239 return PTR_ERR(flow_rule);
240
241 mlx5_del_flow_rules(rpriv->vport_rx_rule);
242 rpriv->vport_rx_rule = flow_rule;
243 return 0;
244}
245
246static void mlx5e_rep_get_channels(struct net_device *dev,
247 struct ethtool_channels *ch)
248{
249 struct mlx5e_priv *priv = netdev_priv(dev);
250
251 mlx5e_ethtool_get_channels(priv, ch);
252}
253
254static int mlx5e_rep_set_channels(struct net_device *dev,
255 struct ethtool_channels *ch)
256{
257 struct mlx5e_priv *priv = netdev_priv(dev);
258 u16 curr_channels_amount = priv->channels.params.num_channels;
259 u32 new_channels_amount = ch->combined_count;
260 struct mlx5_flow_destination new_dest;
261 int err = 0;
262
263 err = mlx5e_ethtool_set_channels(priv, ch);
264 if (err)
265 return err;
266
267 if (curr_channels_amount == 1 && new_channels_amount > 1) {
268 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
269 new_dest.ft = priv->fs.ttc.ft.t;
270 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
271 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
272 new_dest.tir_num = priv->direct_tir[0].tirn;
273 } else {
274 return 0;
275 }
276
277 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
278 if (err) {
279 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
280 curr_channels_amount, new_channels_amount);
281 return err;
282 }
283
284 return 0;
285}
286
ff9b85de
OG
287static int mlx5e_rep_get_coalesce(struct net_device *netdev,
288 struct ethtool_coalesce *coal)
289{
290 struct mlx5e_priv *priv = netdev_priv(netdev);
291
292 return mlx5e_ethtool_get_coalesce(priv, coal);
293}
294
295static int mlx5e_rep_set_coalesce(struct net_device *netdev,
296 struct ethtool_coalesce *coal)
297{
298 struct mlx5e_priv *priv = netdev_priv(netdev);
299
300 return mlx5e_ethtool_set_coalesce(priv, coal);
301}
302
84a09733
GT
303static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
304{
305 struct mlx5e_priv *priv = netdev_priv(netdev);
306
307 return mlx5e_ethtool_get_rxfh_key_size(priv);
308}
309
310static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
311{
312 struct mlx5e_priv *priv = netdev_priv(netdev);
313
314 return mlx5e_ethtool_get_rxfh_indir_size(priv);
315}
316
ff9b85de
OG
317static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
318 struct ethtool_pauseparam *pauseparam)
319{
320 struct mlx5e_priv *priv = netdev_priv(netdev);
321
322 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
323}
324
325static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
326 struct ethtool_pauseparam *pauseparam)
327{
328 struct mlx5e_priv *priv = netdev_priv(netdev);
329
330 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
331}
332
333static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
334 struct ethtool_link_ksettings *link_ksettings)
335{
336 struct mlx5e_priv *priv = netdev_priv(netdev);
337
338 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
339}
340
341static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
342 const struct ethtool_link_ksettings *link_ksettings)
343{
344 struct mlx5e_priv *priv = netdev_priv(netdev);
345
346 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
347}
348
349static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
350 .get_drvinfo = mlx5e_rep_get_drvinfo,
351 .get_link = ethtool_op_get_link,
352 .get_strings = mlx5e_rep_get_strings,
353 .get_sset_count = mlx5e_rep_get_sset_count,
354 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
355 .get_ringparam = mlx5e_rep_get_ringparam,
356 .set_ringparam = mlx5e_rep_set_ringparam,
357 .get_channels = mlx5e_rep_get_channels,
358 .set_channels = mlx5e_rep_set_channels,
359 .get_coalesce = mlx5e_rep_get_coalesce,
360 .set_coalesce = mlx5e_rep_set_coalesce,
361 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
362 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
363};
364
365static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
cb67b832
HHZ
366 .get_drvinfo = mlx5e_rep_get_drvinfo,
367 .get_link = ethtool_op_get_link,
368 .get_strings = mlx5e_rep_get_strings,
369 .get_sset_count = mlx5e_rep_get_sset_count,
370 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
f128f138
GT
371 .get_ringparam = mlx5e_rep_get_ringparam,
372 .set_ringparam = mlx5e_rep_set_ringparam,
84a09733
GT
373 .get_channels = mlx5e_rep_get_channels,
374 .set_channels = mlx5e_rep_set_channels,
ff9b85de
OG
375 .get_coalesce = mlx5e_rep_get_coalesce,
376 .set_coalesce = mlx5e_rep_set_coalesce,
377 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
378 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
84a09733
GT
379 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
380 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
ff9b85de
OG
381 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
382 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
cb67b832
HHZ
383};
384
6dcfa234
FF
385static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
386 struct netdev_phys_item_id *ppid)
cb67b832
HHZ
387{
388 struct mlx5e_priv *priv = netdev_priv(dev);
389 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
491c37e4
RL
390 struct net_device *uplink_upper = NULL;
391 struct mlx5e_priv *uplink_priv = NULL;
392 struct net_device *uplink_dev;
cb67b832
HHZ
393
394 if (esw->mode == SRIOV_NONE)
395 return -EOPNOTSUPP;
396
491c37e4
RL
397 uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
398 if (uplink_dev) {
399 uplink_upper = netdev_master_upper_dev_get(uplink_dev);
400 uplink_priv = netdev_priv(uplink_dev);
401 }
402
6dcfa234
FF
403 ppid->id_len = ETH_ALEN;
404 if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
405 ether_addr_copy(ppid->id, uplink_upper->dev_addr);
406 } else {
407 struct mlx5e_rep_priv *rpriv = priv->ppriv;
408 struct mlx5_eswitch_rep *rep = rpriv->rep;
491c37e4 409
6dcfa234 410 ether_addr_copy(ppid->id, rep->hw_id);
cb67b832
HHZ
411 }
412
413 return 0;
414}
415
f7a68945
MB
416static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
417 struct mlx5_eswitch_rep *rep)
418{
2c47bf80 419 struct mlx5e_rep_sq *rep_sq, *tmp;
5ed99fb4 420 struct mlx5e_rep_priv *rpriv;
f7a68945
MB
421
422 if (esw->mode != SRIOV_OFFLOADS)
423 return;
424
5ed99fb4 425 rpriv = mlx5e_rep_to_rep_priv(rep);
2c47bf80
MB
426 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
427 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
428 list_del(&rep_sq->list);
429 kfree(rep_sq);
f7a68945
MB
430 }
431}
432
433static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
434 struct mlx5_eswitch_rep *rep,
5ecadff0 435 u32 *sqns_array, int sqns_num)
f7a68945
MB
436{
437 struct mlx5_flow_handle *flow_rule;
5ed99fb4 438 struct mlx5e_rep_priv *rpriv;
2c47bf80 439 struct mlx5e_rep_sq *rep_sq;
f7a68945
MB
440 int err;
441 int i;
442
443 if (esw->mode != SRIOV_OFFLOADS)
444 return 0;
445
5ed99fb4 446 rpriv = mlx5e_rep_to_rep_priv(rep);
f7a68945 447 for (i = 0; i < sqns_num; i++) {
2c47bf80
MB
448 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
449 if (!rep_sq) {
f7a68945
MB
450 err = -ENOMEM;
451 goto out_err;
452 }
453
454 /* Add re-inject rule to the PF/representor sqs */
455 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
456 rep->vport,
457 sqns_array[i]);
458 if (IS_ERR(flow_rule)) {
459 err = PTR_ERR(flow_rule);
2c47bf80 460 kfree(rep_sq);
f7a68945
MB
461 goto out_err;
462 }
2c47bf80
MB
463 rep_sq->send_to_vport_rule = flow_rule;
464 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
f7a68945
MB
465 }
466 return 0;
467
468out_err:
469 mlx5e_sqs2vport_stop(esw, rep);
470 return err;
471}
472
cb67b832 473int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
474{
475 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
476 struct mlx5e_rep_priv *rpriv = priv->ppriv;
477 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 478 struct mlx5e_channel *c;
9008ae07
SM
479 int n, tc, num_sqs = 0;
480 int err = -ENOMEM;
5ecadff0 481 u32 *sqs;
cb67b832 482
5ecadff0 483 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
cb67b832 484 if (!sqs)
9008ae07 485 goto out;
cb67b832 486
ff9c852f
SM
487 for (n = 0; n < priv->channels.num; n++) {
488 c = priv->channels.c[n];
cb67b832
HHZ
489 for (tc = 0; tc < c->num_tc; tc++)
490 sqs[num_sqs++] = c->sq[tc].sqn;
491 }
492
f7a68945 493 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 494 kfree(sqs);
9008ae07
SM
495
496out:
497 if (err)
498 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
499 return err;
500}
501
cb67b832
HHZ
502void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
503{
504 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
505 struct mlx5e_rep_priv *rpriv = priv->ppriv;
506 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 507
f7a68945 508 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
509}
510
f6dfb4c3
HHZ
511static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
512{
513#if IS_ENABLED(CONFIG_IPV6)
423c9db2 514 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
f6dfb4c3
HHZ
515 DELAY_PROBE_TIME);
516#else
517 unsigned long ipv6_interval = ~0UL;
518#endif
519 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
520 DELAY_PROBE_TIME);
5ed99fb4 521 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
522 struct mlx5e_priv *priv = netdev_priv(netdev);
523
524 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
525 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
526}
527
528void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
529{
530 struct mlx5e_rep_priv *rpriv = priv->ppriv;
531 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
532
533 mlx5_fc_queue_stats_work(priv->mdev,
534 &neigh_update->neigh_stats_work,
535 neigh_update->min_interval);
536}
537
538static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
539{
540 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
541 neigh_update.neigh_stats_work.work);
5ed99fb4 542 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
543 struct mlx5e_priv *priv = netdev_priv(netdev);
544 struct mlx5e_neigh_hash_entry *nhe;
545
546 rtnl_lock();
547 if (!list_empty(&rpriv->neigh_update.neigh_list))
548 mlx5e_rep_queue_neigh_stats_work(priv);
549
550 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
551 mlx5e_tc_update_neigh_used_value(nhe);
552
553 rtnl_unlock();
554}
555
232c0013
HHZ
556static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
557{
558 refcount_inc(&nhe->refcnt);
559}
560
561static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
562{
563 if (refcount_dec_and_test(&nhe->refcnt))
564 kfree(nhe);
565}
566
567static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
568 struct mlx5e_encap_entry *e,
569 bool neigh_connected,
570 unsigned char ha[ETH_ALEN])
571{
572 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
573
574 ASSERT_RTNL();
575
61c806da
OG
576 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
577 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
232c0013
HHZ
578 mlx5e_tc_encap_flows_del(priv, e);
579
580 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
581 ether_addr_copy(e->h_dest, ha);
582 ether_addr_copy(eth->h_dest, ha);
6707f74b
TZ
583 /* Update the encap source mac, in case that we delete
584 * the flows when encap source mac changed.
585 */
586 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
232c0013
HHZ
587
588 mlx5e_tc_encap_flows_add(priv, e);
589 }
590}
591
592static void mlx5e_rep_neigh_update(struct work_struct *work)
593{
594 struct mlx5e_neigh_hash_entry *nhe =
595 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
596 struct neighbour *n = nhe->n;
597 struct mlx5e_encap_entry *e;
598 unsigned char ha[ETH_ALEN];
599 struct mlx5e_priv *priv;
600 bool neigh_connected;
601 bool encap_connected;
602 u8 nud_state, dead;
603
604 rtnl_lock();
605
606 /* If these parameters are changed after we release the lock,
607 * we'll receive another event letting us know about it.
608 * We use this lock to avoid inconsistency between the neigh validity
609 * and it's hw address.
610 */
611 read_lock_bh(&n->lock);
612 memcpy(ha, n->ha, ETH_ALEN);
613 nud_state = n->nud_state;
614 dead = n->dead;
615 read_unlock_bh(&n->lock);
616
617 neigh_connected = (nud_state & NUD_VALID) && !dead;
618
619 list_for_each_entry(e, &nhe->encap_list, encap_list) {
620 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
621 priv = netdev_priv(e->out_dev);
622
623 if (encap_connected != neigh_connected ||
624 !ether_addr_equal(e->h_dest, ha))
625 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
626 }
627 mlx5e_rep_neigh_entry_release(nhe);
628 rtnl_unlock();
629 neigh_release(n);
630}
631
f5bc2c5d
OS
632static struct mlx5e_rep_indr_block_priv *
633mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
634 struct net_device *netdev)
635{
636 struct mlx5e_rep_indr_block_priv *cb_priv;
637
638 /* All callback list access should be protected by RTNL. */
639 ASSERT_RTNL();
640
641 list_for_each_entry(cb_priv,
642 &rpriv->uplink_priv.tc_indr_block_priv_list,
643 list)
644 if (cb_priv->netdev == netdev)
645 return cb_priv;
646
647 return NULL;
648}
649
650static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
651{
652 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
653 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
654
655 list_for_each_entry_safe(cb_priv, temp, head, list) {
25f2d0e7 656 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
f5bc2c5d
OS
657 kfree(cb_priv);
658 }
659}
660
661static int
662mlx5e_rep_indr_offload(struct net_device *netdev,
663 struct tc_cls_flower_offload *flower,
664 struct mlx5e_rep_indr_block_priv *indr_priv)
665{
ef381359 666 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
d9ee0491
OG
667 int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
668 int err = 0;
ef381359
OS
669
670 switch (flower->command) {
671 case TC_CLSFLOWER_REPLACE:
d9ee0491 672 err = mlx5e_configure_flower(netdev, priv, flower, flags);
ef381359
OS
673 break;
674 case TC_CLSFLOWER_DESTROY:
d9ee0491 675 err = mlx5e_delete_flower(netdev, priv, flower, flags);
ef381359
OS
676 break;
677 case TC_CLSFLOWER_STATS:
d9ee0491 678 err = mlx5e_stats_flower(netdev, priv, flower, flags);
ef381359
OS
679 break;
680 default:
681 err = -EOPNOTSUPP;
682 }
683
684 return err;
f5bc2c5d
OS
685}
686
687static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
688 void *type_data, void *indr_priv)
689{
690 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
691
692 switch (type) {
693 case TC_SETUP_CLSFLOWER:
694 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
695 default:
696 return -EOPNOTSUPP;
697 }
698}
699
700static int
701mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
702 struct mlx5e_rep_priv *rpriv,
703 struct tc_block_offload *f)
704{
705 struct mlx5e_rep_indr_block_priv *indr_priv;
706 int err = 0;
707
708 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
709 return -EOPNOTSUPP;
710
711 switch (f->command) {
712 case TC_BLOCK_BIND:
713 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
714 if (indr_priv)
715 return -EEXIST;
716
717 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
718 if (!indr_priv)
719 return -ENOMEM;
720
721 indr_priv->netdev = netdev;
722 indr_priv->rpriv = rpriv;
723 list_add(&indr_priv->list,
724 &rpriv->uplink_priv.tc_indr_block_priv_list);
725
726 err = tcf_block_cb_register(f->block,
727 mlx5e_rep_indr_setup_block_cb,
25f2d0e7 728 indr_priv, indr_priv, f->extack);
f5bc2c5d
OS
729 if (err) {
730 list_del(&indr_priv->list);
731 kfree(indr_priv);
732 }
733
734 return err;
735 case TC_BLOCK_UNBIND:
25f2d0e7
EB
736 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
737 if (!indr_priv)
738 return -ENOENT;
739
f5bc2c5d
OS
740 tcf_block_cb_unregister(f->block,
741 mlx5e_rep_indr_setup_block_cb,
25f2d0e7
EB
742 indr_priv);
743 list_del(&indr_priv->list);
744 kfree(indr_priv);
f5bc2c5d
OS
745
746 return 0;
747 default:
748 return -EOPNOTSUPP;
749 }
750 return 0;
751}
752
753static
754int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
755 enum tc_setup_type type, void *type_data)
756{
757 switch (type) {
758 case TC_SETUP_BLOCK:
759 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
760 type_data);
761 default:
762 return -EOPNOTSUPP;
763 }
764}
765
766static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
767 struct net_device *netdev)
768{
769 int err;
770
771 err = __tc_indr_block_cb_register(netdev, rpriv,
772 mlx5e_rep_indr_setup_tc_cb,
25f2d0e7 773 rpriv);
f5bc2c5d
OS
774 if (err) {
775 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
776
777 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
778 netdev_name(netdev), err);
779 }
780 return err;
781}
782
25f2d0e7
EB
783static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
784 struct net_device *netdev)
f5bc2c5d
OS
785{
786 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
25f2d0e7 787 rpriv);
f5bc2c5d
OS
788}
789
790static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
791 unsigned long event, void *ptr)
792{
793 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
794 uplink_priv.netdevice_nb);
795 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
796 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
797
35a605db
EB
798 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
799 !is_vlan_dev(netdev))
f5bc2c5d
OS
800 return NOTIFY_OK;
801
802 switch (event) {
803 case NETDEV_REGISTER:
804 mlx5e_rep_indr_register_block(rpriv, netdev);
805 break;
806 case NETDEV_UNREGISTER:
25f2d0e7 807 mlx5e_rep_indr_unregister_block(rpriv, netdev);
f5bc2c5d
OS
808 break;
809 }
810 return NOTIFY_OK;
811}
812
232c0013
HHZ
813static struct mlx5e_neigh_hash_entry *
814mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
815 struct mlx5e_neigh *m_neigh);
816
817static int mlx5e_rep_netevent_event(struct notifier_block *nb,
818 unsigned long event, void *ptr)
819{
820 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
821 neigh_update.netevent_nb);
822 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 823 struct net_device *netdev = rpriv->netdev;
232c0013
HHZ
824 struct mlx5e_priv *priv = netdev_priv(netdev);
825 struct mlx5e_neigh_hash_entry *nhe = NULL;
826 struct mlx5e_neigh m_neigh = {};
a2fa1fe5 827 struct neigh_parms *p;
232c0013 828 struct neighbour *n;
a2fa1fe5 829 bool found = false;
232c0013
HHZ
830
831 switch (event) {
832 case NETEVENT_NEIGH_UPDATE:
833 n = ptr;
834#if IS_ENABLED(CONFIG_IPV6)
423c9db2 835 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
232c0013
HHZ
836#else
837 if (n->tbl != &arp_tbl)
838#endif
839 return NOTIFY_DONE;
840
841 m_neigh.dev = n->dev;
f6dfb4c3 842 m_neigh.family = n->ops->family;
232c0013
HHZ
843 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
844
845 /* We are in atomic context and can't take RTNL mutex, so use
846 * spin_lock_bh to lookup the neigh table. bh is used since
847 * netevent can be called from a softirq context.
848 */
849 spin_lock_bh(&neigh_update->encap_lock);
850 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
851 if (!nhe) {
852 spin_unlock_bh(&neigh_update->encap_lock);
853 return NOTIFY_DONE;
854 }
855
856 /* This assignment is valid as long as the the neigh reference
857 * is taken
858 */
859 nhe->n = n;
860
861 /* Take a reference to ensure the neighbour and mlx5 encap
862 * entry won't be destructed until we drop the reference in
863 * delayed work.
864 */
865 neigh_hold(n);
866 mlx5e_rep_neigh_entry_hold(nhe);
867
868 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
869 mlx5e_rep_neigh_entry_release(nhe);
870 neigh_release(n);
871 }
872 spin_unlock_bh(&neigh_update->encap_lock);
873 break;
a2fa1fe5
HHZ
874
875 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
876 p = ptr;
877
878 /* We check the device is present since we don't care about
879 * changes in the default table, we only care about changes
880 * done per device delay prob time parameter.
881 */
882#if IS_ENABLED(CONFIG_IPV6)
423c9db2 883 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
a2fa1fe5
HHZ
884#else
885 if (!p->dev || p->tbl != &arp_tbl)
886#endif
887 return NOTIFY_DONE;
888
889 /* We are in atomic context and can't take RTNL mutex,
890 * so use spin_lock_bh to walk the neigh list and look for
891 * the relevant device. bh is used since netevent can be
892 * called from a softirq context.
893 */
894 spin_lock_bh(&neigh_update->encap_lock);
895 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
896 if (p->dev == nhe->m_neigh.dev) {
897 found = true;
898 break;
899 }
900 }
901 spin_unlock_bh(&neigh_update->encap_lock);
902 if (!found)
903 return NOTIFY_DONE;
904
905 neigh_update->min_interval = min_t(unsigned long,
906 NEIGH_VAR(p, DELAY_PROBE_TIME),
907 neigh_update->min_interval);
908 mlx5_fc_update_sampling_interval(priv->mdev,
909 neigh_update->min_interval);
910 break;
232c0013
HHZ
911 }
912 return NOTIFY_DONE;
913}
914
37b498ff
HHZ
915static const struct rhashtable_params mlx5e_neigh_ht_params = {
916 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
917 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
918 .key_len = sizeof(struct mlx5e_neigh),
919 .automatic_shrinking = true,
920};
921
922static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
923{
924 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
925 int err;
926
927 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
928 if (err)
929 return err;
37b498ff
HHZ
930
931 INIT_LIST_HEAD(&neigh_update->neigh_list);
232c0013 932 spin_lock_init(&neigh_update->encap_lock);
f6dfb4c3
HHZ
933 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
934 mlx5e_rep_neigh_stats_work);
935 mlx5e_rep_neigh_update_init_interval(rpriv);
232c0013
HHZ
936
937 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
938 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
939 if (err)
940 goto out_err;
941 return 0;
942
943out_err:
944 rhashtable_destroy(&neigh_update->neigh_ht);
945 return err;
37b498ff
HHZ
946}
947
948static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
949{
950 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 951 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
232c0013
HHZ
952
953 unregister_netevent_notifier(&neigh_update->netevent_nb);
954
955 flush_workqueue(priv->wq); /* flush neigh update works */
37b498ff 956
f6dfb4c3
HHZ
957 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
958
37b498ff
HHZ
959 rhashtable_destroy(&neigh_update->neigh_ht);
960}
961
962static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
963 struct mlx5e_neigh_hash_entry *nhe)
964{
965 struct mlx5e_rep_priv *rpriv = priv->ppriv;
966 int err;
967
968 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
969 &nhe->rhash_node,
970 mlx5e_neigh_ht_params);
971 if (err)
972 return err;
973
974 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
975
976 return err;
977}
978
979static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
980 struct mlx5e_neigh_hash_entry *nhe)
981{
982 struct mlx5e_rep_priv *rpriv = priv->ppriv;
983
232c0013
HHZ
984 spin_lock_bh(&rpriv->neigh_update.encap_lock);
985
37b498ff
HHZ
986 list_del(&nhe->neigh_list);
987
988 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
989 &nhe->rhash_node,
990 mlx5e_neigh_ht_params);
232c0013 991 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
37b498ff
HHZ
992}
993
232c0013
HHZ
994/* This function must only be called under RTNL lock or under the
995 * representor's encap_lock in case RTNL mutex can't be held.
996 */
37b498ff
HHZ
997static struct mlx5e_neigh_hash_entry *
998mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
999 struct mlx5e_neigh *m_neigh)
1000{
1001 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1002 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1003
1004 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1005 mlx5e_neigh_ht_params);
1006}
1007
232c0013
HHZ
1008static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1009 struct mlx5e_encap_entry *e,
1010 struct mlx5e_neigh_hash_entry **nhe)
1011{
1012 int err;
1013
1014 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1015 if (!*nhe)
1016 return -ENOMEM;
1017
1018 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1019 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1020 INIT_LIST_HEAD(&(*nhe)->encap_list);
1021 refcount_set(&(*nhe)->refcnt, 1);
1022
1023 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1024 if (err)
1025 goto out_free;
1026 return 0;
1027
1028out_free:
1029 kfree(*nhe);
1030 return err;
1031}
1032
1033static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
1034 struct mlx5e_neigh_hash_entry *nhe)
1035{
1036 /* The neigh hash entry must be removed from the hash table regardless
1037 * of the reference count value, so it won't be found by the next
1038 * neigh notification call. The neigh hash entry reference count is
1039 * incremented only during creation and neigh notification calls and
1040 * protects from freeing the nhe struct.
1041 */
1042 mlx5e_rep_neigh_entry_remove(priv, nhe);
1043 mlx5e_rep_neigh_entry_release(nhe);
1044}
1045
1046int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1047 struct mlx5e_encap_entry *e)
1048{
97417f61
EB
1049 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1050 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1051 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
232c0013
HHZ
1052 struct mlx5e_neigh_hash_entry *nhe;
1053 int err;
1054
97417f61
EB
1055 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1056 if (err)
1057 return err;
232c0013
HHZ
1058 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1059 if (!nhe) {
1060 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
97417f61
EB
1061 if (err) {
1062 mlx5_tun_entropy_refcount_dec(tun_entropy,
1063 e->reformat_type);
232c0013 1064 return err;
97417f61 1065 }
232c0013
HHZ
1066 }
1067 list_add(&e->encap_list, &nhe->encap_list);
1068 return 0;
1069}
1070
1071void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1072 struct mlx5e_encap_entry *e)
1073{
97417f61
EB
1074 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1075 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1076 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
232c0013
HHZ
1077 struct mlx5e_neigh_hash_entry *nhe;
1078
1079 list_del(&e->encap_list);
1080 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1081
1082 if (list_empty(&nhe->encap_list))
1083 mlx5e_rep_neigh_entry_destroy(priv, nhe);
97417f61 1084 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
232c0013
HHZ
1085}
1086
d9ee0491 1087static int mlx5e_vf_rep_open(struct net_device *dev)
20a1ea67
OG
1088{
1089 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1090 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1091 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
1092 int err;
1093
63bfd399
EBE
1094 mutex_lock(&priv->state_lock);
1095 err = mlx5e_open_locked(dev);
20a1ea67 1096 if (err)
63bfd399 1097 goto unlock;
20a1ea67 1098
84c9c8f2 1099 if (!mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 1100 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
1101 rep->vport, 1,
1102 MLX5_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
1103 netif_carrier_on(dev);
1104
63bfd399
EBE
1105unlock:
1106 mutex_unlock(&priv->state_lock);
1107 return err;
20a1ea67
OG
1108}
1109
d9ee0491 1110static int mlx5e_vf_rep_close(struct net_device *dev)
20a1ea67
OG
1111{
1112 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1113 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1114 struct mlx5_eswitch_rep *rep = rpriv->rep;
63bfd399 1115 int ret;
20a1ea67 1116
63bfd399 1117 mutex_lock(&priv->state_lock);
84c9c8f2 1118 mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 1119 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
1120 rep->vport, 1,
1121 MLX5_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
1122 ret = mlx5e_close_locked(dev);
1123 mutex_unlock(&priv->state_lock);
1124 return ret;
20a1ea67
OG
1125}
1126
cb67b832
HHZ
1127static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1128 char *buf, size_t len)
1129{
1130 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1131 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1132 struct mlx5_eswitch_rep *rep = rpriv->rep;
5b33eba9
RD
1133 unsigned int fn;
1134 int ret;
c12ecc23 1135
5b33eba9
RD
1136 fn = PCI_FUNC(priv->mdev->pdev->devfn);
1137 if (fn >= MLX5_MAX_PORTS)
1138 return -EOPNOTSUPP;
c12ecc23 1139
259fae5a 1140 if (rep->vport == MLX5_VPORT_UPLINK)
5b33eba9 1141 ret = snprintf(buf, len, "p%d", fn);
c12ecc23 1142 else
5b33eba9 1143 ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
cb67b832 1144
cb67b832
HHZ
1145 if (ret >= len)
1146 return -EOPNOTSUPP;
1147
1148 return 0;
1149}
1150
de4784ca 1151static int
855afa09 1152mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
60bd4af8 1153 struct tc_cls_flower_offload *cls_flower, int flags)
d957b4e3 1154{
8c818c27
JP
1155 switch (cls_flower->command) {
1156 case TC_CLSFLOWER_REPLACE:
71d82d2a
OS
1157 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1158 flags);
8c818c27 1159 case TC_CLSFLOWER_DESTROY:
71d82d2a
OS
1160 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1161 flags);
8c818c27 1162 case TC_CLSFLOWER_STATS:
71d82d2a
OS
1163 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1164 flags);
60bd4af8
OG
1165 default:
1166 return -EOPNOTSUPP;
1167 }
1168}
1169
855afa09
JP
1170static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1171 void *cb_priv)
1172{
1173 struct mlx5e_priv *priv = cb_priv;
1174
1175 switch (type) {
1176 case TC_SETUP_CLSFLOWER:
d9ee0491
OG
1177 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
1178 MLX5E_TC_ESW_OFFLOAD);
855afa09
JP
1179 default:
1180 return -EOPNOTSUPP;
1181 }
1182}
1183
1184static int mlx5e_rep_setup_tc_block(struct net_device *dev,
1185 struct tc_block_offload *f)
1186{
1187 struct mlx5e_priv *priv = netdev_priv(dev);
1188
1189 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1190 return -EOPNOTSUPP;
1191
1192 switch (f->command) {
1193 case TC_BLOCK_BIND:
1194 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
60513bd8 1195 priv, priv, f->extack);
855afa09
JP
1196 case TC_BLOCK_UNBIND:
1197 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
1198 return 0;
1199 default:
1200 return -EOPNOTSUPP;
1201 }
1202}
1203
8c818c27 1204static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1205 void *type_data)
8c818c27 1206{
2572ac53 1207 switch (type) {
855afa09
JP
1208 case TC_SETUP_BLOCK:
1209 return mlx5e_rep_setup_tc_block(dev, type_data);
d957b4e3
OG
1210 default:
1211 return -EOPNOTSUPP;
1212 }
1213}
1214
370bad0f
OG
1215bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1216{
1d447a39
SM
1217 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1218 struct mlx5_eswitch_rep *rep;
1219
733d3e54 1220 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 1221 return false;
370bad0f 1222
d9ee0491
OG
1223 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1224 return false;
370bad0f 1225
d9ee0491 1226 rep = rpriv->rep;
b05af6aa 1227 return (rep->vport == MLX5_VPORT_UPLINK);
370bad0f
OG
1228}
1229
13e509a4 1230static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
370bad0f 1231{
370bad0f
OG
1232 switch (attr_id) {
1233 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
370bad0f
OG
1234 return true;
1235 }
1236
1237 return false;
1238}
1239
1240static int
1241mlx5e_get_sw_stats64(const struct net_device *dev,
1242 struct rtnl_link_stats64 *stats)
1243{
1244 struct mlx5e_priv *priv = netdev_priv(dev);
370bad0f 1245
b832d4fd 1246 mlx5e_fold_sw_stats64(priv, stats);
370bad0f
OG
1247 return 0;
1248}
1249
13e509a4
OG
1250static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1251 void *sp)
370bad0f
OG
1252{
1253 switch (attr_id) {
1254 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1255 return mlx5e_get_sw_stats64(dev, sp);
1256 }
1257
1258 return -EINVAL;
1259}
1260
bc1f4470 1261static void
d9ee0491 1262mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
370bad0f
OG
1263{
1264 struct mlx5e_priv *priv = netdev_priv(dev);
1265
ed56c519 1266 /* update HW stats in background for next time */
cdeef2b1 1267 mlx5e_queue_update_stats(priv);
370bad0f 1268 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
1269}
1270
d9ee0491
OG
1271static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
1272{
1273 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1274}
1275
b36cdb42 1276static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491 1277{
b36cdb42 1278 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
d9ee0491
OG
1279}
1280
b36cdb42 1281static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
d9ee0491 1282{
b36cdb42
OG
1283 struct sockaddr *saddr = addr;
1284
1285 if (!is_valid_ether_addr(saddr->sa_data))
1286 return -EADDRNOTAVAIL;
1287
1288 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1289 return 0;
d9ee0491
OG
1290}
1291
6ce966fd
OG
1292static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1293 __be16 vlan_proto)
1294{
1295 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1296
1297 if (vlan != 0)
1298 return -EOPNOTSUPP;
1299
1300 /* allow setting 0-vid for compatibility with libvirt */
1301 return 0;
1302}
1303
d9ee0491
OG
1304static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
1305 .ndo_open = mlx5e_vf_rep_open,
1306 .ndo_stop = mlx5e_vf_rep_close,
1307 .ndo_start_xmit = mlx5e_xmit,
1308 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
1309 .ndo_setup_tc = mlx5e_rep_setup_tc,
1310 .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
13e509a4
OG
1311 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1312 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
d9ee0491 1313 .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
6dcfa234 1314 .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
d9ee0491 1315};
250a42b6 1316
d9ee0491 1317static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
b36cdb42 1318 .ndo_open = mlx5e_open,
d9ee0491 1319 .ndo_stop = mlx5e_close,
cb67b832 1320 .ndo_start_xmit = mlx5e_xmit,
b36cdb42 1321 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
cb67b832 1322 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
8c818c27 1323 .ndo_setup_tc = mlx5e_rep_setup_tc,
d9ee0491 1324 .ndo_get_stats64 = mlx5e_get_stats,
13e509a4
OG
1325 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1326 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
d9ee0491 1327 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
073caf50
OG
1328 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1329 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1330 .ndo_features_check = mlx5e_features_check,
1331 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1332 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1333 .ndo_get_vf_config = mlx5e_get_vf_config,
1334 .ndo_get_vf_stats = mlx5e_get_vf_stats,
6ce966fd 1335 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
6dcfa234 1336 .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id,
cb67b832
HHZ
1337};
1338
a0646c88
EB
1339bool mlx5e_eswitch_rep(struct net_device *netdev)
1340{
1341 if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
1342 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1343 return true;
1344
1345 return false;
1346}
1347
025380b2 1348static void mlx5e_build_rep_params(struct net_device *netdev)
cb67b832 1349{
025380b2 1350 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1351 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1352 struct mlx5_eswitch_rep *rep = rpriv->rep;
025380b2
OG
1353 struct mlx5_core_dev *mdev = priv->mdev;
1354 struct mlx5e_params *params;
1355
cb67b832
HHZ
1356 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1357 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1358 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1359
025380b2 1360 params = &priv->channels.params;
472a1e44 1361 params->hard_mtu = MLX5E_ETH_HARD_MTU;
025380b2 1362 params->sw_mtu = netdev->mtu;
d9ee0491
OG
1363
1364 /* SQ */
b05af6aa 1365 if (rep->vport == MLX5_VPORT_UPLINK)
d9ee0491
OG
1366 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1367 else
5d1f7354 1368 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
cb67b832 1369
749359f4
GT
1370 /* RQ */
1371 mlx5e_build_rq_params(mdev, params);
1372
1373 /* CQ moderation params */
9a317425 1374 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
6a9764ef 1375 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 1376
6a9764ef 1377 params->num_tc = 1;
69dad68d 1378 params->tunneled_offload_en = false;
5f195c2c
CM
1379
1380 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
84a09733
GT
1381
1382 /* RSS */
025380b2 1383 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
cb67b832
HHZ
1384}
1385
1386static void mlx5e_build_rep_netdev(struct net_device *netdev)
1387{
250a42b6 1388 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1389 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1390 struct mlx5_eswitch_rep *rep = rpriv->rep;
250a42b6 1391 struct mlx5_core_dev *mdev = priv->mdev;
250a42b6 1392
b05af6aa 1393 if (rep->vport == MLX5_VPORT_UPLINK) {
c42260f1 1394 SET_NETDEV_DEV(netdev, mdev->device);
d9ee0491
OG
1395 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1396 /* we want a persistent mac for the uplink rep */
e1d974d0 1397 mlx5_query_mac_address(mdev, netdev->dev_addr);
ff9b85de 1398 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
b36cdb42
OG
1399#ifdef CONFIG_MLX5_CORE_EN_DCB
1400 if (MLX5_CAP_GEN(mdev, qos))
1401 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1402#endif
d9ee0491
OG
1403 } else {
1404 netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
1405 eth_hw_addr_random(netdev);
ff9b85de 1406 netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
d9ee0491 1407 }
cb67b832
HHZ
1408
1409 netdev->watchdog_timeo = 15 * HZ;
1410
cb67b832 1411
1ee4457c 1412 netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
d957b4e3 1413 netdev->hw_features |= NETIF_F_HW_TC;
cb67b832 1414
dabeb3b0
GT
1415 netdev->hw_features |= NETIF_F_SG;
1416 netdev->hw_features |= NETIF_F_IP_CSUM;
1417 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1418 netdev->hw_features |= NETIF_F_GRO;
1419 netdev->hw_features |= NETIF_F_TSO;
1420 netdev->hw_features |= NETIF_F_TSO6;
1421 netdev->hw_features |= NETIF_F_RXCSUM;
1422
b05af6aa 1423 if (rep->vport != MLX5_VPORT_UPLINK)
1ee4457c
OG
1424 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1425
dabeb3b0 1426 netdev->features |= netdev->hw_features;
cb67b832
HHZ
1427}
1428
182570b2
FD
1429static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1430 struct net_device *netdev,
1431 const struct mlx5e_profile *profile,
1432 void *ppriv)
cb67b832 1433{
6a9764ef 1434 struct mlx5e_priv *priv = netdev_priv(netdev);
182570b2 1435 int err;
6a9764ef 1436
519a0bf5 1437 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
182570b2
FD
1438 if (err)
1439 return err;
6a9764ef 1440
8956f001 1441 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
c139dbfd 1442
025380b2 1443 mlx5e_build_rep_params(netdev);
cb67b832 1444 mlx5e_build_rep_netdev(netdev);
237f258c
FD
1445
1446 mlx5e_timestamp_init(priv);
182570b2
FD
1447
1448 return 0;
1449}
1450
1451static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1452{
1453 mlx5e_netdev_cleanup(priv->netdev, priv);
cb67b832
HHZ
1454}
1455
84a09733
GT
1456static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1457{
1458 struct ttc_params ttc_params = {};
1459 int tt, err;
1460
1461 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1462 MLX5_FLOW_NAMESPACE_KERNEL);
1463
1464 /* The inner_ttc in the ttc params is intentionally not set */
1465 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1466 mlx5e_set_ttc_ft_params(&ttc_params);
1467 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1468 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1469
1470 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1471 if (err) {
1472 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1473 return err;
1474 }
1475 return 0;
1476}
1477
092297e0 1478static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
cb67b832
HHZ
1479{
1480 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
1481 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1482 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 1483 struct mlx5_flow_handle *flow_rule;
c966f7d5 1484 struct mlx5_flow_destination dest;
092297e0 1485
c966f7d5
GT
1486 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1487 dest.tir_num = priv->direct_tir[0].tirn;
092297e0
GT
1488 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1489 rep->vport,
c966f7d5 1490 &dest);
092297e0
GT
1491 if (IS_ERR(flow_rule))
1492 return PTR_ERR(flow_rule);
1493 rpriv->vport_rx_rule = flow_rule;
1494 return 0;
1495}
1496
1497static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1498{
1499 struct mlx5_core_dev *mdev = priv->mdev;
cb67b832 1500 int err;
cb67b832 1501
2c3b5bee
SM
1502 mlx5e_init_l2_addr(priv);
1503
1462e48d
RD
1504 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1505 if (err) {
1506 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1507 return err;
1508 }
1509
84a09733 1510 err = mlx5e_create_indirect_rqt(priv);
8f493ffd 1511 if (err)
1462e48d 1512 goto err_close_drop_rq;
cb67b832 1513
84a09733
GT
1514 err = mlx5e_create_direct_rqts(priv);
1515 if (err)
1516 goto err_destroy_indirect_rqts;
1517
1518 err = mlx5e_create_indirect_tirs(priv, false);
8f493ffd 1519 if (err)
cb67b832 1520 goto err_destroy_direct_rqts;
cb67b832 1521
84a09733
GT
1522 err = mlx5e_create_direct_tirs(priv);
1523 if (err)
1524 goto err_destroy_indirect_tirs;
1525
1526 err = mlx5e_create_rep_ttc_table(priv);
092297e0 1527 if (err)
cb67b832 1528 goto err_destroy_direct_tirs;
cb67b832 1529
84a09733
GT
1530 err = mlx5e_create_rep_vport_rx_rule(priv);
1531 if (err)
1532 goto err_destroy_ttc_table;
1533
cb67b832
HHZ
1534 return 0;
1535
84a09733
GT
1536err_destroy_ttc_table:
1537 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
cb67b832
HHZ
1538err_destroy_direct_tirs:
1539 mlx5e_destroy_direct_tirs(priv);
84a09733
GT
1540err_destroy_indirect_tirs:
1541 mlx5e_destroy_indirect_tirs(priv, false);
cb67b832 1542err_destroy_direct_rqts:
8f493ffd 1543 mlx5e_destroy_direct_rqts(priv);
84a09733
GT
1544err_destroy_indirect_rqts:
1545 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
1546err_close_drop_rq:
1547 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1548 return err;
1549}
1550
1551static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1552{
1d447a39 1553 struct mlx5e_rep_priv *rpriv = priv->ppriv;
cb67b832 1554
5ed99fb4 1555 mlx5_del_flow_rules(rpriv->vport_rx_rule);
84a09733 1556 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
cb67b832 1557 mlx5e_destroy_direct_tirs(priv);
84a09733 1558 mlx5e_destroy_indirect_tirs(priv, false);
8f493ffd 1559 mlx5e_destroy_direct_rqts(priv);
84a09733 1560 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d 1561 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1562}
1563
1564static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1565{
d9ee0491
OG
1566 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1567 struct mlx5_rep_uplink_priv *uplink_priv;
1568 int tc, err;
cb67b832
HHZ
1569
1570 err = mlx5e_create_tises(priv);
1571 if (err) {
1572 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1573 return err;
1574 }
d9ee0491 1575
b05af6aa 1576 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
d9ee0491
OG
1577 uplink_priv = &rpriv->uplink_priv;
1578
b4a23329
RD
1579 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1580
d9ee0491
OG
1581 /* init shared tc flow table */
1582 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1583 if (err)
1584 goto destroy_tises;
1585
97417f61
EB
1586 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1587
d9ee0491
OG
1588 /* init indirect block notifications */
1589 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1590 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1591 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1592 if (err) {
1593 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1594 goto tc_esw_cleanup;
1595 }
1596 }
1597
cb67b832 1598 return 0;
d9ee0491
OG
1599
1600tc_esw_cleanup:
1601 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1602destroy_tises:
1603 for (tc = 0; tc < priv->profile->max_tc; tc++)
1604 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1605 return err;
1606}
1607
1608static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1609{
1610 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1611 int tc;
1612
1613 for (tc = 0; tc < priv->profile->max_tc; tc++)
1614 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1615
b05af6aa 1616 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
d9ee0491
OG
1617 /* clean indirect TC block notifications */
1618 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1619 mlx5e_rep_indr_clean_block_privs(rpriv);
1620
1621 /* delete shared tc flow table */
1622 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1623 }
cb67b832
HHZ
1624}
1625
b36cdb42
OG
1626static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
1627{
6d7ee2ed 1628 mlx5e_set_netdev_mtu_boundaries(priv);
b36cdb42
OG
1629}
1630
1631static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1632{
1633 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
b36cdb42 1634
b4a23329
RD
1635 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1636 struct mlx5_eqe *eqe = data;
b36cdb42 1637
b4a23329
RD
1638 switch (eqe->sub_type) {
1639 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1640 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1641 queue_work(priv->wq, &priv->update_carrier_work);
1642 break;
1643 default:
1644 return NOTIFY_DONE;
1645 }
1646
1647 return NOTIFY_OK;
b36cdb42
OG
1648 }
1649
b4a23329
RD
1650 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1651 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1652
1653 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1654
1655 return NOTIFY_OK;
1656 }
1657
1658 return NOTIFY_DONE;
b36cdb42
OG
1659}
1660
1661static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1662{
1663 struct net_device *netdev = priv->netdev;
1664 struct mlx5_core_dev *mdev = priv->mdev;
b4a23329 1665 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1666 u16 max_mtu;
1667
1668 netdev->min_mtu = ETH_MIN_MTU;
1669 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1670 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1671 mlx5e_set_dev_port_mtu(priv);
1672
b4a23329
RD
1673 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1674 mlx5e_tc_reoffload_flows_work);
1675
b36cdb42
OG
1676 mlx5_lag_add(mdev, netdev);
1677 priv->events_nb.notifier_call = uplink_rep_async_event;
1678 mlx5_notifier_register(mdev, &priv->events_nb);
1679#ifdef CONFIG_MLX5_CORE_EN_DCB
1680 mlx5e_dcbnl_initialize(priv);
1681 mlx5e_dcbnl_init_app(priv);
1682#endif
1683}
1684
1685static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1686{
1687 struct mlx5_core_dev *mdev = priv->mdev;
b4a23329 1688 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1689
1690#ifdef CONFIG_MLX5_CORE_EN_DCB
1691 mlx5e_dcbnl_delete_app(priv);
1692#endif
1693 mlx5_notifier_unregister(mdev, &priv->events_nb);
b4a23329 1694 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
b36cdb42
OG
1695 mlx5_lag_remove(mdev);
1696}
1697
1698static const struct mlx5e_profile mlx5e_vf_rep_profile = {
cb67b832 1699 .init = mlx5e_init_rep,
182570b2 1700 .cleanup = mlx5e_cleanup_rep,
cb67b832
HHZ
1701 .init_rx = mlx5e_init_rep_rx,
1702 .cleanup_rx = mlx5e_cleanup_rep_rx,
1703 .init_tx = mlx5e_init_rep_tx,
d9ee0491 1704 .cleanup_tx = mlx5e_cleanup_rep_tx,
b36cdb42
OG
1705 .enable = mlx5e_vf_rep_enable,
1706 .update_stats = mlx5e_vf_rep_update_hw_counters,
20fd0c19 1707 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
749359f4 1708 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
cb67b832
HHZ
1709 .max_tc = 1,
1710};
1711
b36cdb42
OG
1712static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1713 .init = mlx5e_init_rep,
1714 .cleanup = mlx5e_cleanup_rep,
1715 .init_rx = mlx5e_init_rep_rx,
1716 .cleanup_rx = mlx5e_cleanup_rep_rx,
1717 .init_tx = mlx5e_init_rep_tx,
1718 .cleanup_tx = mlx5e_cleanup_rep_tx,
1719 .enable = mlx5e_uplink_rep_enable,
1720 .disable = mlx5e_uplink_rep_disable,
1721 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1722 .update_carrier = mlx5e_update_carrier,
1723 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1724 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1725 .max_tc = MLX5E_MAX_NUM_TC,
1726};
1727
1d447a39 1728/* e-Switch vport representors */
1d447a39 1729static int
4c66df01 1730mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1731{
b36cdb42 1732 const struct mlx5e_profile *profile;
1d447a39 1733 struct mlx5e_rep_priv *rpriv;
26e59d80 1734 struct net_device *netdev;
779d986d 1735 int nch, err;
26e59d80 1736
1d447a39
SM
1737 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1738 if (!rpriv)
1739 return -ENOMEM;
1740
d9ee0491
OG
1741 /* rpriv->rep to be looked up when profile->init() is called */
1742 rpriv->rep = rep;
1743
779d986d 1744 nch = mlx5e_get_max_num_channels(dev);
b05af6aa 1745 profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
b36cdb42 1746 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
26e59d80
MHY
1747 if (!netdev) {
1748 pr_warn("Failed to create representor netdev for vport %d\n",
1749 rep->vport);
1d447a39 1750 kfree(rpriv);
cb67b832
HHZ
1751 return -EINVAL;
1752 }
26e59d80 1753
5ed99fb4 1754 rpriv->netdev = netdev;
8693115a 1755 rep->rep_data[REP_ETH].priv = rpriv;
5ed99fb4 1756 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
26e59d80 1757
b05af6aa 1758 if (rep->vport == MLX5_VPORT_UPLINK) {
aec002f6
OG
1759 err = mlx5e_create_mdev_resources(dev);
1760 if (err)
1761 goto err_destroy_netdev;
1762 }
1763
2c3b5bee 1764 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80
MHY
1765 if (err) {
1766 pr_warn("Failed to attach representor netdev for vport %d\n",
1767 rep->vport);
aec002f6 1768 goto err_destroy_mdev_resources;
26e59d80
MHY
1769 }
1770
37b498ff
HHZ
1771 err = mlx5e_rep_neigh_init(rpriv);
1772 if (err) {
1773 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1774 rep->vport);
1775 goto err_detach_netdev;
1776 }
1777
26e59d80
MHY
1778 err = register_netdev(netdev);
1779 if (err) {
1780 pr_warn("Failed to register representor netdev for vport %d\n",
1781 rep->vport);
ef381359 1782 goto err_neigh_cleanup;
26e59d80
MHY
1783 }
1784
cb67b832 1785 return 0;
26e59d80 1786
37b498ff
HHZ
1787err_neigh_cleanup:
1788 mlx5e_rep_neigh_cleanup(rpriv);
1789
26e59d80 1790err_detach_netdev:
2c3b5bee 1791 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80 1792
aec002f6 1793err_destroy_mdev_resources:
b05af6aa 1794 if (rep->vport == MLX5_VPORT_UPLINK)
aec002f6
OG
1795 mlx5e_destroy_mdev_resources(dev);
1796
26e59d80 1797err_destroy_netdev:
2c3b5bee 1798 mlx5e_destroy_netdev(netdev_priv(netdev));
1d447a39 1799 kfree(rpriv);
26e59d80 1800 return err;
cb67b832
HHZ
1801}
1802
1d447a39 1803static void
4c66df01 1804mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1805{
5ed99fb4
MB
1806 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1807 struct net_device *netdev = rpriv->netdev;
1d447a39
SM
1808 struct mlx5e_priv *priv = netdev_priv(netdev);
1809 void *ppriv = priv->ppriv;
cb67b832 1810
5ed99fb4 1811 unregister_netdev(netdev);
37b498ff 1812 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39 1813 mlx5e_detach_netdev(priv);
b05af6aa 1814 if (rep->vport == MLX5_VPORT_UPLINK)
aec002f6 1815 mlx5e_destroy_mdev_resources(priv->mdev);
1d447a39
SM
1816 mlx5e_destroy_netdev(priv);
1817 kfree(ppriv); /* mlx5e_rep_priv */
1818}
1819
22215908
MB
1820static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1821{
1822 struct mlx5e_rep_priv *rpriv;
1823
1824 rpriv = mlx5e_rep_to_rep_priv(rep);
1825
1826 return rpriv->netdev;
1827}
1828
8693115a
PP
1829static const struct mlx5_eswitch_rep_ops rep_ops = {
1830 .load = mlx5e_vport_rep_load,
1831 .unload = mlx5e_vport_rep_unload,
1832 .get_proto_dev = mlx5e_vport_rep_get_proto_dev
1833};
1834
aec002f6 1835void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 1836{
aec002f6 1837 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39 1838
8693115a 1839 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1d447a39
SM
1840}
1841
aec002f6 1842void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 1843{
1d447a39 1844 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39 1845
f8e8fa02 1846 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1d447a39 1847}