]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
net: flow_offload: add flow_block_cb_setup_simple()
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rep.c
CommitLineData
cb67b832
HHZ
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <generated/utsrelease.h>
34#include <linux/mlx5/fs.h>
35#include <net/switchdev.h>
d957b4e3 36#include <net/pkt_cls.h>
717503b9 37#include <net/act_api.h>
232c0013
HHZ
38#include <net/netevent.h>
39#include <net/arp.h>
f60f315d 40#include <net/devlink.h>
cb67b832
HHZ
41
42#include "eswitch.h"
43#include "en.h"
1d447a39 44#include "en_rep.h"
adb4c123 45#include "en_tc.h"
101f4de9 46#include "en/tc_tun.h"
f6dfb4c3 47#include "fs_core.h"
97417f61 48#include "lib/port_tun.h"
cb67b832 49
4c8fb298 50#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
e7164313 51 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
8956f001 52#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
4246f698 53
cb67b832
HHZ
54static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
55
f5bc2c5d
OS
56struct mlx5e_rep_indr_block_priv {
57 struct net_device *netdev;
58 struct mlx5e_rep_priv *rpriv;
59
60 struct list_head list;
61};
62
25f2d0e7
EB
63static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
64 struct net_device *netdev);
f5bc2c5d 65
cb67b832
HHZ
66static void mlx5e_rep_get_drvinfo(struct net_device *dev,
67 struct ethtool_drvinfo *drvinfo)
68{
cf83c8fd
DL
69 struct mlx5e_priv *priv = netdev_priv(dev);
70 struct mlx5_core_dev *mdev = priv->mdev;
71
cb67b832
HHZ
72 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
73 sizeof(drvinfo->driver));
74 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
cf83c8fd
DL
75 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
76 "%d.%d.%04d (%.16s)",
77 fw_rev_maj(mdev), fw_rev_min(mdev),
78 fw_rev_sub(mdev), mdev->board_id);
79}
80
81static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *drvinfo)
83{
84 struct mlx5e_priv *priv = netdev_priv(dev);
85
86 mlx5e_rep_get_drvinfo(dev, drvinfo);
87 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
88 sizeof(drvinfo->bus_info));
cb67b832
HHZ
89}
90
91static const struct counter_desc sw_rep_stats_desc[] = {
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
96};
97
a228060a
OG
98struct vport_stats {
99 u64 vport_rx_packets;
100 u64 vport_tx_packets;
101 u64 vport_rx_bytes;
102 u64 vport_tx_bytes;
103};
104
105static const struct counter_desc vport_rep_stats_desc[] = {
106 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
107 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
108 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
109 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
110};
111
112#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
113#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
cb67b832
HHZ
114
115static void mlx5e_rep_get_strings(struct net_device *dev,
116 u32 stringset, uint8_t *data)
117{
a228060a 118 int i, j;
cb67b832
HHZ
119
120 switch (stringset) {
121 case ETH_SS_STATS:
a228060a 122 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
123 strcpy(data + (i * ETH_GSTRING_LEN),
124 sw_rep_stats_desc[i].format);
a228060a
OG
125 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
126 strcpy(data + (i * ETH_GSTRING_LEN),
127 vport_rep_stats_desc[j].format);
cb67b832
HHZ
128 break;
129 }
130}
131
9b81d5a9 132static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
370bad0f
OG
133{
134 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
135 struct mlx5e_rep_priv *rpriv = priv->ppriv;
136 struct mlx5_eswitch_rep *rep = rpriv->rep;
370bad0f
OG
137 struct rtnl_link_stats64 *vport_stats;
138 struct ifla_vf_stats vf_stats;
139 int err;
140
141 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
142 if (err) {
143 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
144 return;
145 }
146
147 vport_stats = &priv->stats.vf_vport;
148 /* flip tx/rx as we are reporting the counters for the switch vport */
149 vport_stats->rx_packets = vf_stats.tx_packets;
150 vport_stats->rx_bytes = vf_stats.tx_bytes;
151 vport_stats->tx_packets = vf_stats.rx_packets;
152 vport_stats->tx_bytes = vf_stats.rx_bytes;
153}
154
d9ee0491
OG
155static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
156{
157 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
158 struct rtnl_link_stats64 *vport_stats;
159
160 mlx5e_grp_802_3_update_stats(priv);
161
162 vport_stats = &priv->stats.vf_vport;
163
164 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
165 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
166 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
167 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
168}
169
370bad0f 170static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
cb67b832
HHZ
171{
172 struct mlx5e_sw_stats *s = &priv->stats.sw;
b832d4fd 173 struct rtnl_link_stats64 stats64 = {};
cb67b832
HHZ
174
175 memset(s, 0, sizeof(*s));
b832d4fd 176 mlx5e_fold_sw_stats64(priv, &stats64);
cb67b832 177
b832d4fd
SM
178 s->rx_packets = stats64.rx_packets;
179 s->rx_bytes = stats64.rx_bytes;
180 s->tx_packets = stats64.tx_packets;
181 s->tx_bytes = stats64.tx_bytes;
182 s->tx_queue_dropped = stats64.tx_dropped;
370bad0f
OG
183}
184
cb67b832
HHZ
185static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
186 struct ethtool_stats *stats, u64 *data)
187{
188 struct mlx5e_priv *priv = netdev_priv(dev);
a228060a 189 int i, j;
cb67b832
HHZ
190
191 if (!data)
192 return;
193
194 mutex_lock(&priv->state_lock);
168af00a 195 mlx5e_rep_update_sw_counters(priv);
9b81d5a9 196 priv->profile->update_stats(priv);
cb67b832
HHZ
197 mutex_unlock(&priv->state_lock);
198
a228060a 199 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
cb67b832
HHZ
200 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
201 sw_rep_stats_desc, i);
a228060a
OG
202
203 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
204 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
205 vport_rep_stats_desc, j);
cb67b832
HHZ
206}
207
208static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
209{
210 switch (sset) {
211 case ETH_SS_STATS:
a228060a 212 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
cb67b832
HHZ
213 default:
214 return -EOPNOTSUPP;
215 }
216}
217
f128f138
GT
218static void mlx5e_rep_get_ringparam(struct net_device *dev,
219 struct ethtool_ringparam *param)
220{
221 struct mlx5e_priv *priv = netdev_priv(dev);
222
223 mlx5e_ethtool_get_ringparam(priv, param);
224}
225
226static int mlx5e_rep_set_ringparam(struct net_device *dev,
227 struct ethtool_ringparam *param)
228{
229 struct mlx5e_priv *priv = netdev_priv(dev);
230
231 return mlx5e_ethtool_set_ringparam(priv, param);
232}
233
84a09733
GT
234static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
235 struct mlx5_flow_destination *dest)
236{
237 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
238 struct mlx5e_rep_priv *rpriv = priv->ppriv;
239 struct mlx5_eswitch_rep *rep = rpriv->rep;
240 struct mlx5_flow_handle *flow_rule;
241
242 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
243 rep->vport,
244 dest);
245 if (IS_ERR(flow_rule))
246 return PTR_ERR(flow_rule);
247
248 mlx5_del_flow_rules(rpriv->vport_rx_rule);
249 rpriv->vport_rx_rule = flow_rule;
250 return 0;
251}
252
253static void mlx5e_rep_get_channels(struct net_device *dev,
254 struct ethtool_channels *ch)
255{
256 struct mlx5e_priv *priv = netdev_priv(dev);
257
258 mlx5e_ethtool_get_channels(priv, ch);
259}
260
261static int mlx5e_rep_set_channels(struct net_device *dev,
262 struct ethtool_channels *ch)
263{
264 struct mlx5e_priv *priv = netdev_priv(dev);
265 u16 curr_channels_amount = priv->channels.params.num_channels;
266 u32 new_channels_amount = ch->combined_count;
267 struct mlx5_flow_destination new_dest;
268 int err = 0;
269
270 err = mlx5e_ethtool_set_channels(priv, ch);
271 if (err)
272 return err;
273
274 if (curr_channels_amount == 1 && new_channels_amount > 1) {
275 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
276 new_dest.ft = priv->fs.ttc.ft.t;
277 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
278 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
279 new_dest.tir_num = priv->direct_tir[0].tirn;
280 } else {
281 return 0;
282 }
283
284 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
285 if (err) {
286 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
287 curr_channels_amount, new_channels_amount);
288 return err;
289 }
290
291 return 0;
292}
293
ff9b85de
OG
294static int mlx5e_rep_get_coalesce(struct net_device *netdev,
295 struct ethtool_coalesce *coal)
296{
297 struct mlx5e_priv *priv = netdev_priv(netdev);
298
299 return mlx5e_ethtool_get_coalesce(priv, coal);
300}
301
302static int mlx5e_rep_set_coalesce(struct net_device *netdev,
303 struct ethtool_coalesce *coal)
304{
305 struct mlx5e_priv *priv = netdev_priv(netdev);
306
307 return mlx5e_ethtool_set_coalesce(priv, coal);
308}
309
84a09733
GT
310static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
311{
312 struct mlx5e_priv *priv = netdev_priv(netdev);
313
314 return mlx5e_ethtool_get_rxfh_key_size(priv);
315}
316
317static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
318{
319 struct mlx5e_priv *priv = netdev_priv(netdev);
320
321 return mlx5e_ethtool_get_rxfh_indir_size(priv);
322}
323
ff9b85de
OG
324static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
325 struct ethtool_pauseparam *pauseparam)
326{
327 struct mlx5e_priv *priv = netdev_priv(netdev);
328
329 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
330}
331
332static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
333 struct ethtool_pauseparam *pauseparam)
334{
335 struct mlx5e_priv *priv = netdev_priv(netdev);
336
337 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
338}
339
340static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
341 struct ethtool_link_ksettings *link_ksettings)
342{
343 struct mlx5e_priv *priv = netdev_priv(netdev);
344
345 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
346}
347
348static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
349 const struct ethtool_link_ksettings *link_ksettings)
350{
351 struct mlx5e_priv *priv = netdev_priv(netdev);
352
353 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
354}
355
9b81d5a9 356static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
ff9b85de
OG
357 .get_drvinfo = mlx5e_rep_get_drvinfo,
358 .get_link = ethtool_op_get_link,
359 .get_strings = mlx5e_rep_get_strings,
360 .get_sset_count = mlx5e_rep_get_sset_count,
361 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
362 .get_ringparam = mlx5e_rep_get_ringparam,
363 .set_ringparam = mlx5e_rep_set_ringparam,
364 .get_channels = mlx5e_rep_get_channels,
365 .set_channels = mlx5e_rep_set_channels,
366 .get_coalesce = mlx5e_rep_get_coalesce,
367 .set_coalesce = mlx5e_rep_set_coalesce,
368 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
369 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
370};
371
372static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
cf83c8fd 373 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
cb67b832
HHZ
374 .get_link = ethtool_op_get_link,
375 .get_strings = mlx5e_rep_get_strings,
376 .get_sset_count = mlx5e_rep_get_sset_count,
377 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
f128f138
GT
378 .get_ringparam = mlx5e_rep_get_ringparam,
379 .set_ringparam = mlx5e_rep_set_ringparam,
84a09733
GT
380 .get_channels = mlx5e_rep_get_channels,
381 .set_channels = mlx5e_rep_set_channels,
ff9b85de
OG
382 .get_coalesce = mlx5e_rep_get_coalesce,
383 .set_coalesce = mlx5e_rep_set_coalesce,
384 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
385 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
84a09733
GT
386 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
387 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
ff9b85de
OG
388 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
389 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
cb67b832
HHZ
390};
391
6dcfa234
FF
392static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
393 struct netdev_phys_item_id *ppid)
cb67b832 394{
7ff40a46
PB
395 struct mlx5_eswitch *esw;
396 struct mlx5e_priv *priv;
397 u64 parent_id;
398
399 priv = netdev_priv(dev);
400 esw = priv->mdev->priv.eswitch;
cb67b832 401
f6455de0 402 if (esw->mode == MLX5_ESWITCH_NONE)
cb67b832
HHZ
403 return -EOPNOTSUPP;
404
7ff40a46
PB
405 parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
406 ppid->id_len = sizeof(parent_id);
407 memcpy(ppid->id, &parent_id, sizeof(parent_id));
cb67b832
HHZ
408
409 return 0;
410}
411
f7a68945
MB
412static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
413 struct mlx5_eswitch_rep *rep)
414{
2c47bf80 415 struct mlx5e_rep_sq *rep_sq, *tmp;
5ed99fb4 416 struct mlx5e_rep_priv *rpriv;
f7a68945 417
f6455de0 418 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
f7a68945
MB
419 return;
420
5ed99fb4 421 rpriv = mlx5e_rep_to_rep_priv(rep);
2c47bf80
MB
422 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
423 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
424 list_del(&rep_sq->list);
425 kfree(rep_sq);
f7a68945
MB
426 }
427}
428
429static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
430 struct mlx5_eswitch_rep *rep,
5ecadff0 431 u32 *sqns_array, int sqns_num)
f7a68945
MB
432{
433 struct mlx5_flow_handle *flow_rule;
5ed99fb4 434 struct mlx5e_rep_priv *rpriv;
2c47bf80 435 struct mlx5e_rep_sq *rep_sq;
f7a68945
MB
436 int err;
437 int i;
438
f6455de0 439 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
f7a68945
MB
440 return 0;
441
5ed99fb4 442 rpriv = mlx5e_rep_to_rep_priv(rep);
f7a68945 443 for (i = 0; i < sqns_num; i++) {
2c47bf80
MB
444 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
445 if (!rep_sq) {
f7a68945
MB
446 err = -ENOMEM;
447 goto out_err;
448 }
449
450 /* Add re-inject rule to the PF/representor sqs */
451 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
452 rep->vport,
453 sqns_array[i]);
454 if (IS_ERR(flow_rule)) {
455 err = PTR_ERR(flow_rule);
2c47bf80 456 kfree(rep_sq);
f7a68945
MB
457 goto out_err;
458 }
2c47bf80
MB
459 rep_sq->send_to_vport_rule = flow_rule;
460 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
f7a68945
MB
461 }
462 return 0;
463
464out_err:
465 mlx5e_sqs2vport_stop(esw, rep);
466 return err;
467}
468
cb67b832 469int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
cb67b832
HHZ
470{
471 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
472 struct mlx5e_rep_priv *rpriv = priv->ppriv;
473 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 474 struct mlx5e_channel *c;
9008ae07
SM
475 int n, tc, num_sqs = 0;
476 int err = -ENOMEM;
5ecadff0 477 u32 *sqs;
cb67b832 478
5ecadff0 479 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
cb67b832 480 if (!sqs)
9008ae07 481 goto out;
cb67b832 482
ff9c852f
SM
483 for (n = 0; n < priv->channels.num; n++) {
484 c = priv->channels.c[n];
cb67b832
HHZ
485 for (tc = 0; tc < c->num_tc; tc++)
486 sqs[num_sqs++] = c->sq[tc].sqn;
487 }
488
f7a68945 489 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
cb67b832 490 kfree(sqs);
9008ae07
SM
491
492out:
493 if (err)
494 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
cb67b832
HHZ
495 return err;
496}
497
cb67b832
HHZ
498void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
499{
500 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
501 struct mlx5e_rep_priv *rpriv = priv->ppriv;
502 struct mlx5_eswitch_rep *rep = rpriv->rep;
cb67b832 503
f7a68945 504 mlx5e_sqs2vport_stop(esw, rep);
cb67b832
HHZ
505}
506
f6dfb4c3
HHZ
507static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
508{
509#if IS_ENABLED(CONFIG_IPV6)
423c9db2 510 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
f6dfb4c3
HHZ
511 DELAY_PROBE_TIME);
512#else
513 unsigned long ipv6_interval = ~0UL;
514#endif
515 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
516 DELAY_PROBE_TIME);
5ed99fb4 517 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
518 struct mlx5e_priv *priv = netdev_priv(netdev);
519
520 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
521 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
522}
523
524void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
525{
526 struct mlx5e_rep_priv *rpriv = priv->ppriv;
527 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
528
529 mlx5_fc_queue_stats_work(priv->mdev,
530 &neigh_update->neigh_stats_work,
531 neigh_update->min_interval);
532}
533
534static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
535{
536 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
537 neigh_update.neigh_stats_work.work);
5ed99fb4 538 struct net_device *netdev = rpriv->netdev;
f6dfb4c3
HHZ
539 struct mlx5e_priv *priv = netdev_priv(netdev);
540 struct mlx5e_neigh_hash_entry *nhe;
541
542 rtnl_lock();
543 if (!list_empty(&rpriv->neigh_update.neigh_list))
544 mlx5e_rep_queue_neigh_stats_work(priv);
545
546 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
547 mlx5e_tc_update_neigh_used_value(nhe);
548
549 rtnl_unlock();
550}
551
232c0013
HHZ
552static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
553{
554 refcount_inc(&nhe->refcnt);
555}
556
557static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
558{
559 if (refcount_dec_and_test(&nhe->refcnt))
560 kfree(nhe);
561}
562
563static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
564 struct mlx5e_encap_entry *e,
565 bool neigh_connected,
566 unsigned char ha[ETH_ALEN])
567{
568 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
569
570 ASSERT_RTNL();
571
61c806da
OG
572 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
573 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
232c0013
HHZ
574 mlx5e_tc_encap_flows_del(priv, e);
575
576 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
577 ether_addr_copy(e->h_dest, ha);
578 ether_addr_copy(eth->h_dest, ha);
6707f74b
TZ
579 /* Update the encap source mac, in case that we delete
580 * the flows when encap source mac changed.
581 */
582 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
232c0013
HHZ
583
584 mlx5e_tc_encap_flows_add(priv, e);
585 }
586}
587
588static void mlx5e_rep_neigh_update(struct work_struct *work)
589{
590 struct mlx5e_neigh_hash_entry *nhe =
591 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
592 struct neighbour *n = nhe->n;
593 struct mlx5e_encap_entry *e;
594 unsigned char ha[ETH_ALEN];
595 struct mlx5e_priv *priv;
596 bool neigh_connected;
597 bool encap_connected;
598 u8 nud_state, dead;
599
600 rtnl_lock();
601
602 /* If these parameters are changed after we release the lock,
603 * we'll receive another event letting us know about it.
604 * We use this lock to avoid inconsistency between the neigh validity
605 * and it's hw address.
606 */
607 read_lock_bh(&n->lock);
608 memcpy(ha, n->ha, ETH_ALEN);
609 nud_state = n->nud_state;
610 dead = n->dead;
611 read_unlock_bh(&n->lock);
612
613 neigh_connected = (nud_state & NUD_VALID) && !dead;
614
615 list_for_each_entry(e, &nhe->encap_list, encap_list) {
616 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
617 priv = netdev_priv(e->out_dev);
618
619 if (encap_connected != neigh_connected ||
620 !ether_addr_equal(e->h_dest, ha))
621 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
622 }
623 mlx5e_rep_neigh_entry_release(nhe);
624 rtnl_unlock();
625 neigh_release(n);
626}
627
f5bc2c5d
OS
628static struct mlx5e_rep_indr_block_priv *
629mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
630 struct net_device *netdev)
631{
632 struct mlx5e_rep_indr_block_priv *cb_priv;
633
634 /* All callback list access should be protected by RTNL. */
635 ASSERT_RTNL();
636
637 list_for_each_entry(cb_priv,
638 &rpriv->uplink_priv.tc_indr_block_priv_list,
639 list)
640 if (cb_priv->netdev == netdev)
641 return cb_priv;
642
643 return NULL;
644}
645
646static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
647{
648 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
649 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
650
651 list_for_each_entry_safe(cb_priv, temp, head, list) {
25f2d0e7 652 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
f5bc2c5d
OS
653 kfree(cb_priv);
654 }
655}
656
657static int
658mlx5e_rep_indr_offload(struct net_device *netdev,
659 struct tc_cls_flower_offload *flower,
660 struct mlx5e_rep_indr_block_priv *indr_priv)
661{
ef381359 662 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
d9ee0491
OG
663 int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
664 int err = 0;
ef381359
OS
665
666 switch (flower->command) {
667 case TC_CLSFLOWER_REPLACE:
d9ee0491 668 err = mlx5e_configure_flower(netdev, priv, flower, flags);
ef381359
OS
669 break;
670 case TC_CLSFLOWER_DESTROY:
d9ee0491 671 err = mlx5e_delete_flower(netdev, priv, flower, flags);
ef381359
OS
672 break;
673 case TC_CLSFLOWER_STATS:
d9ee0491 674 err = mlx5e_stats_flower(netdev, priv, flower, flags);
ef381359
OS
675 break;
676 default:
677 err = -EOPNOTSUPP;
678 }
679
680 return err;
f5bc2c5d
OS
681}
682
683static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
684 void *type_data, void *indr_priv)
685{
686 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
687
688 switch (type) {
689 case TC_SETUP_CLSFLOWER:
690 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
691 default:
692 return -EOPNOTSUPP;
693 }
694}
695
696static int
697mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
698 struct mlx5e_rep_priv *rpriv,
699 struct tc_block_offload *f)
700{
701 struct mlx5e_rep_indr_block_priv *indr_priv;
702 int err = 0;
703
704 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
705 return -EOPNOTSUPP;
706
707 switch (f->command) {
708 case TC_BLOCK_BIND:
709 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
710 if (indr_priv)
711 return -EEXIST;
712
713 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
714 if (!indr_priv)
715 return -ENOMEM;
716
717 indr_priv->netdev = netdev;
718 indr_priv->rpriv = rpriv;
719 list_add(&indr_priv->list,
720 &rpriv->uplink_priv.tc_indr_block_priv_list);
721
722 err = tcf_block_cb_register(f->block,
723 mlx5e_rep_indr_setup_block_cb,
25f2d0e7 724 indr_priv, indr_priv, f->extack);
f5bc2c5d
OS
725 if (err) {
726 list_del(&indr_priv->list);
727 kfree(indr_priv);
728 }
729
730 return err;
731 case TC_BLOCK_UNBIND:
25f2d0e7
EB
732 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
733 if (!indr_priv)
734 return -ENOENT;
735
f5bc2c5d
OS
736 tcf_block_cb_unregister(f->block,
737 mlx5e_rep_indr_setup_block_cb,
25f2d0e7
EB
738 indr_priv);
739 list_del(&indr_priv->list);
740 kfree(indr_priv);
f5bc2c5d
OS
741
742 return 0;
743 default:
744 return -EOPNOTSUPP;
745 }
746 return 0;
747}
748
749static
750int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
751 enum tc_setup_type type, void *type_data)
752{
753 switch (type) {
754 case TC_SETUP_BLOCK:
755 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
756 type_data);
757 default:
758 return -EOPNOTSUPP;
759 }
760}
761
762static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
763 struct net_device *netdev)
764{
765 int err;
766
767 err = __tc_indr_block_cb_register(netdev, rpriv,
768 mlx5e_rep_indr_setup_tc_cb,
25f2d0e7 769 rpriv);
f5bc2c5d
OS
770 if (err) {
771 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
772
773 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
774 netdev_name(netdev), err);
775 }
776 return err;
777}
778
25f2d0e7
EB
779static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
780 struct net_device *netdev)
f5bc2c5d
OS
781{
782 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
25f2d0e7 783 rpriv);
f5bc2c5d
OS
784}
785
786static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
787 unsigned long event, void *ptr)
788{
789 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
790 uplink_priv.netdevice_nb);
791 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
792 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
793
35a605db 794 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
24bcd210 795 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
f5bc2c5d
OS
796 return NOTIFY_OK;
797
798 switch (event) {
799 case NETDEV_REGISTER:
800 mlx5e_rep_indr_register_block(rpriv, netdev);
801 break;
802 case NETDEV_UNREGISTER:
25f2d0e7 803 mlx5e_rep_indr_unregister_block(rpriv, netdev);
f5bc2c5d
OS
804 break;
805 }
806 return NOTIFY_OK;
807}
808
232c0013
HHZ
809static struct mlx5e_neigh_hash_entry *
810mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
811 struct mlx5e_neigh *m_neigh);
812
813static int mlx5e_rep_netevent_event(struct notifier_block *nb,
814 unsigned long event, void *ptr)
815{
816 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
817 neigh_update.netevent_nb);
818 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 819 struct net_device *netdev = rpriv->netdev;
232c0013
HHZ
820 struct mlx5e_priv *priv = netdev_priv(netdev);
821 struct mlx5e_neigh_hash_entry *nhe = NULL;
822 struct mlx5e_neigh m_neigh = {};
a2fa1fe5 823 struct neigh_parms *p;
232c0013 824 struct neighbour *n;
a2fa1fe5 825 bool found = false;
232c0013
HHZ
826
827 switch (event) {
828 case NETEVENT_NEIGH_UPDATE:
829 n = ptr;
830#if IS_ENABLED(CONFIG_IPV6)
423c9db2 831 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
232c0013
HHZ
832#else
833 if (n->tbl != &arp_tbl)
834#endif
835 return NOTIFY_DONE;
836
837 m_neigh.dev = n->dev;
f6dfb4c3 838 m_neigh.family = n->ops->family;
232c0013
HHZ
839 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
840
841 /* We are in atomic context and can't take RTNL mutex, so use
842 * spin_lock_bh to lookup the neigh table. bh is used since
843 * netevent can be called from a softirq context.
844 */
845 spin_lock_bh(&neigh_update->encap_lock);
846 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
847 if (!nhe) {
848 spin_unlock_bh(&neigh_update->encap_lock);
849 return NOTIFY_DONE;
850 }
851
852 /* This assignment is valid as long as the the neigh reference
853 * is taken
854 */
855 nhe->n = n;
856
857 /* Take a reference to ensure the neighbour and mlx5 encap
858 * entry won't be destructed until we drop the reference in
859 * delayed work.
860 */
861 neigh_hold(n);
862 mlx5e_rep_neigh_entry_hold(nhe);
863
864 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
865 mlx5e_rep_neigh_entry_release(nhe);
866 neigh_release(n);
867 }
868 spin_unlock_bh(&neigh_update->encap_lock);
869 break;
a2fa1fe5
HHZ
870
871 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
872 p = ptr;
873
874 /* We check the device is present since we don't care about
875 * changes in the default table, we only care about changes
876 * done per device delay prob time parameter.
877 */
878#if IS_ENABLED(CONFIG_IPV6)
423c9db2 879 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
a2fa1fe5
HHZ
880#else
881 if (!p->dev || p->tbl != &arp_tbl)
882#endif
883 return NOTIFY_DONE;
884
885 /* We are in atomic context and can't take RTNL mutex,
886 * so use spin_lock_bh to walk the neigh list and look for
887 * the relevant device. bh is used since netevent can be
888 * called from a softirq context.
889 */
890 spin_lock_bh(&neigh_update->encap_lock);
891 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
892 if (p->dev == nhe->m_neigh.dev) {
893 found = true;
894 break;
895 }
896 }
897 spin_unlock_bh(&neigh_update->encap_lock);
898 if (!found)
899 return NOTIFY_DONE;
900
901 neigh_update->min_interval = min_t(unsigned long,
902 NEIGH_VAR(p, DELAY_PROBE_TIME),
903 neigh_update->min_interval);
904 mlx5_fc_update_sampling_interval(priv->mdev,
905 neigh_update->min_interval);
906 break;
232c0013
HHZ
907 }
908 return NOTIFY_DONE;
909}
910
37b498ff
HHZ
911static const struct rhashtable_params mlx5e_neigh_ht_params = {
912 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
913 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
914 .key_len = sizeof(struct mlx5e_neigh),
915 .automatic_shrinking = true,
916};
917
918static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
919{
920 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
232c0013
HHZ
921 int err;
922
923 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
924 if (err)
925 return err;
37b498ff
HHZ
926
927 INIT_LIST_HEAD(&neigh_update->neigh_list);
232c0013 928 spin_lock_init(&neigh_update->encap_lock);
f6dfb4c3
HHZ
929 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
930 mlx5e_rep_neigh_stats_work);
931 mlx5e_rep_neigh_update_init_interval(rpriv);
232c0013
HHZ
932
933 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
934 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
935 if (err)
936 goto out_err;
937 return 0;
938
939out_err:
940 rhashtable_destroy(&neigh_update->neigh_ht);
941 return err;
37b498ff
HHZ
942}
943
944static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
945{
946 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
5ed99fb4 947 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
232c0013
HHZ
948
949 unregister_netevent_notifier(&neigh_update->netevent_nb);
950
951 flush_workqueue(priv->wq); /* flush neigh update works */
37b498ff 952
f6dfb4c3
HHZ
953 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
954
37b498ff
HHZ
955 rhashtable_destroy(&neigh_update->neigh_ht);
956}
957
958static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
959 struct mlx5e_neigh_hash_entry *nhe)
960{
961 struct mlx5e_rep_priv *rpriv = priv->ppriv;
962 int err;
963
964 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
965 &nhe->rhash_node,
966 mlx5e_neigh_ht_params);
967 if (err)
968 return err;
969
970 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
971
972 return err;
973}
974
975static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
976 struct mlx5e_neigh_hash_entry *nhe)
977{
978 struct mlx5e_rep_priv *rpriv = priv->ppriv;
979
232c0013
HHZ
980 spin_lock_bh(&rpriv->neigh_update.encap_lock);
981
37b498ff
HHZ
982 list_del(&nhe->neigh_list);
983
984 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
985 &nhe->rhash_node,
986 mlx5e_neigh_ht_params);
232c0013 987 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
37b498ff
HHZ
988}
989
232c0013
HHZ
990/* This function must only be called under RTNL lock or under the
991 * representor's encap_lock in case RTNL mutex can't be held.
992 */
37b498ff
HHZ
993static struct mlx5e_neigh_hash_entry *
994mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
995 struct mlx5e_neigh *m_neigh)
996{
997 struct mlx5e_rep_priv *rpriv = priv->ppriv;
998 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
999
1000 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1001 mlx5e_neigh_ht_params);
1002}
1003
232c0013
HHZ
1004static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1005 struct mlx5e_encap_entry *e,
1006 struct mlx5e_neigh_hash_entry **nhe)
1007{
1008 int err;
1009
1010 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1011 if (!*nhe)
1012 return -ENOMEM;
1013
1014 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1015 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1016 INIT_LIST_HEAD(&(*nhe)->encap_list);
1017 refcount_set(&(*nhe)->refcnt, 1);
1018
1019 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1020 if (err)
1021 goto out_free;
1022 return 0;
1023
1024out_free:
1025 kfree(*nhe);
1026 return err;
1027}
1028
1029static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
1030 struct mlx5e_neigh_hash_entry *nhe)
1031{
1032 /* The neigh hash entry must be removed from the hash table regardless
1033 * of the reference count value, so it won't be found by the next
1034 * neigh notification call. The neigh hash entry reference count is
1035 * incremented only during creation and neigh notification calls and
1036 * protects from freeing the nhe struct.
1037 */
1038 mlx5e_rep_neigh_entry_remove(priv, nhe);
1039 mlx5e_rep_neigh_entry_release(nhe);
1040}
1041
1042int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1043 struct mlx5e_encap_entry *e)
1044{
97417f61
EB
1045 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1046 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1047 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
232c0013
HHZ
1048 struct mlx5e_neigh_hash_entry *nhe;
1049 int err;
1050
97417f61
EB
1051 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1052 if (err)
1053 return err;
232c0013
HHZ
1054 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1055 if (!nhe) {
1056 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
97417f61
EB
1057 if (err) {
1058 mlx5_tun_entropy_refcount_dec(tun_entropy,
1059 e->reformat_type);
232c0013 1060 return err;
97417f61 1061 }
232c0013
HHZ
1062 }
1063 list_add(&e->encap_list, &nhe->encap_list);
1064 return 0;
1065}
1066
1067void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1068 struct mlx5e_encap_entry *e)
1069{
97417f61
EB
1070 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1071 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1072 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
232c0013
HHZ
1073 struct mlx5e_neigh_hash_entry *nhe;
1074
1075 list_del(&e->encap_list);
1076 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1077
1078 if (list_empty(&nhe->encap_list))
1079 mlx5e_rep_neigh_entry_destroy(priv, nhe);
97417f61 1080 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
232c0013
HHZ
1081}
1082
9b81d5a9 1083static int mlx5e_rep_open(struct net_device *dev)
20a1ea67
OG
1084{
1085 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1086 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1087 struct mlx5_eswitch_rep *rep = rpriv->rep;
20a1ea67
OG
1088 int err;
1089
63bfd399
EBE
1090 mutex_lock(&priv->state_lock);
1091 err = mlx5e_open_locked(dev);
20a1ea67 1092 if (err)
63bfd399 1093 goto unlock;
20a1ea67 1094
84c9c8f2 1095 if (!mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 1096 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
1097 rep->vport, 1,
1098 MLX5_VPORT_ADMIN_STATE_UP))
20a1ea67
OG
1099 netif_carrier_on(dev);
1100
63bfd399
EBE
1101unlock:
1102 mutex_unlock(&priv->state_lock);
1103 return err;
20a1ea67
OG
1104}
1105
9b81d5a9 1106static int mlx5e_rep_close(struct net_device *dev)
20a1ea67
OG
1107{
1108 struct mlx5e_priv *priv = netdev_priv(dev);
1d447a39
SM
1109 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1110 struct mlx5_eswitch_rep *rep = rpriv->rep;
63bfd399 1111 int ret;
20a1ea67 1112
63bfd399 1113 mutex_lock(&priv->state_lock);
84c9c8f2 1114 mlx5_modify_vport_admin_state(priv->mdev,
cc9c82a8 1115 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
cbc44e76
BW
1116 rep->vport, 1,
1117 MLX5_VPORT_ADMIN_STATE_DOWN);
63bfd399
EBE
1118 ret = mlx5e_close_locked(dev);
1119 mutex_unlock(&priv->state_lock);
1120 return ret;
20a1ea67
OG
1121}
1122
de4784ca 1123static int
855afa09 1124mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
60bd4af8 1125 struct tc_cls_flower_offload *cls_flower, int flags)
d957b4e3 1126{
8c818c27
JP
1127 switch (cls_flower->command) {
1128 case TC_CLSFLOWER_REPLACE:
71d82d2a
OS
1129 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1130 flags);
8c818c27 1131 case TC_CLSFLOWER_DESTROY:
71d82d2a
OS
1132 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1133 flags);
8c818c27 1134 case TC_CLSFLOWER_STATS:
71d82d2a
OS
1135 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1136 flags);
60bd4af8
OG
1137 default:
1138 return -EOPNOTSUPP;
1139 }
1140}
1141
855afa09
JP
1142static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1143 void *cb_priv)
1144{
1145 struct mlx5e_priv *priv = cb_priv;
1146
1147 switch (type) {
1148 case TC_SETUP_CLSFLOWER:
d9ee0491
OG
1149 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
1150 MLX5E_TC_ESW_OFFLOAD);
855afa09
JP
1151 default:
1152 return -EOPNOTSUPP;
1153 }
1154}
1155
8c818c27 1156static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 1157 void *type_data)
8c818c27 1158{
4e95bc26
PNA
1159 struct mlx5e_priv *priv = netdev_priv(dev);
1160
2572ac53 1161 switch (type) {
855afa09 1162 case TC_SETUP_BLOCK:
4e95bc26
PNA
1163 return flow_block_cb_setup_simple(type_data, NULL,
1164 mlx5e_rep_setup_tc_cb,
1165 priv, priv, true);
d957b4e3
OG
1166 default:
1167 return -EOPNOTSUPP;
1168 }
1169}
1170
370bad0f
OG
1171bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1172{
1d447a39
SM
1173 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1174 struct mlx5_eswitch_rep *rep;
1175
733d3e54 1176 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1d447a39 1177 return false;
370bad0f 1178
d9ee0491
OG
1179 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1180 return false;
370bad0f 1181
d9ee0491 1182 rep = rpriv->rep;
b05af6aa 1183 return (rep->vport == MLX5_VPORT_UPLINK);
370bad0f
OG
1184}
1185
13e509a4 1186static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
370bad0f 1187{
370bad0f
OG
1188 switch (attr_id) {
1189 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
370bad0f
OG
1190 return true;
1191 }
1192
1193 return false;
1194}
1195
1196static int
1197mlx5e_get_sw_stats64(const struct net_device *dev,
1198 struct rtnl_link_stats64 *stats)
1199{
1200 struct mlx5e_priv *priv = netdev_priv(dev);
370bad0f 1201
b832d4fd 1202 mlx5e_fold_sw_stats64(priv, stats);
370bad0f
OG
1203 return 0;
1204}
1205
13e509a4
OG
1206static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1207 void *sp)
370bad0f
OG
1208{
1209 switch (attr_id) {
1210 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1211 return mlx5e_get_sw_stats64(dev, sp);
1212 }
1213
1214 return -EINVAL;
1215}
1216
bc1f4470 1217static void
9b81d5a9 1218mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
370bad0f
OG
1219{
1220 struct mlx5e_priv *priv = netdev_priv(dev);
1221
ed56c519 1222 /* update HW stats in background for next time */
cdeef2b1 1223 mlx5e_queue_update_stats(priv);
370bad0f 1224 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
370bad0f
OG
1225}
1226
9b81d5a9 1227static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491
OG
1228{
1229 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1230}
1231
b36cdb42 1232static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
d9ee0491 1233{
b36cdb42 1234 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
d9ee0491
OG
1235}
1236
b36cdb42 1237static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
d9ee0491 1238{
b36cdb42
OG
1239 struct sockaddr *saddr = addr;
1240
1241 if (!is_valid_ether_addr(saddr->sa_data))
1242 return -EADDRNOTAVAIL;
1243
1244 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1245 return 0;
d9ee0491
OG
1246}
1247
6ce966fd
OG
1248static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1249 __be16 vlan_proto)
1250{
1251 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1252
1253 if (vlan != 0)
1254 return -EOPNOTSUPP;
1255
1256 /* allow setting 0-vid for compatibility with libvirt */
1257 return 0;
1258}
1259
f60f315d
PP
1260static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
1261{
1262 struct mlx5e_priv *priv = netdev_priv(dev);
1263 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1264
1265 return &rpriv->dl_port;
1266}
1267
9b81d5a9
VP
1268static const struct net_device_ops mlx5e_netdev_ops_rep = {
1269 .ndo_open = mlx5e_rep_open,
1270 .ndo_stop = mlx5e_rep_close,
d9ee0491 1271 .ndo_start_xmit = mlx5e_xmit,
d9ee0491 1272 .ndo_setup_tc = mlx5e_rep_setup_tc,
f60f315d 1273 .ndo_get_devlink_port = mlx5e_get_devlink_port,
9b81d5a9 1274 .ndo_get_stats64 = mlx5e_rep_get_stats,
13e509a4
OG
1275 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1276 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
9b81d5a9 1277 .ndo_change_mtu = mlx5e_rep_change_mtu,
d9ee0491 1278};
250a42b6 1279
d9ee0491 1280static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
b36cdb42 1281 .ndo_open = mlx5e_open,
d9ee0491 1282 .ndo_stop = mlx5e_close,
cb67b832 1283 .ndo_start_xmit = mlx5e_xmit,
b36cdb42 1284 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
8c818c27 1285 .ndo_setup_tc = mlx5e_rep_setup_tc,
f60f315d 1286 .ndo_get_devlink_port = mlx5e_get_devlink_port,
d9ee0491 1287 .ndo_get_stats64 = mlx5e_get_stats,
13e509a4
OG
1288 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1289 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
d9ee0491 1290 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
073caf50
OG
1291 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1292 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1293 .ndo_features_check = mlx5e_features_check,
1294 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1295 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1296 .ndo_get_vf_config = mlx5e_get_vf_config,
1297 .ndo_get_vf_stats = mlx5e_get_vf_stats,
6ce966fd 1298 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
d3cbd425 1299 .ndo_set_features = mlx5e_set_features,
cb67b832
HHZ
1300};
1301
a0646c88
EB
1302bool mlx5e_eswitch_rep(struct net_device *netdev)
1303{
9b81d5a9 1304 if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
a0646c88
EB
1305 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1306 return true;
1307
1308 return false;
1309}
1310
025380b2 1311static void mlx5e_build_rep_params(struct net_device *netdev)
cb67b832 1312{
025380b2 1313 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1314 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1315 struct mlx5_eswitch_rep *rep = rpriv->rep;
025380b2
OG
1316 struct mlx5_core_dev *mdev = priv->mdev;
1317 struct mlx5e_params *params;
1318
cb67b832
HHZ
1319 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1320 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1321 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1322
025380b2 1323 params = &priv->channels.params;
472a1e44 1324 params->hard_mtu = MLX5E_ETH_HARD_MTU;
025380b2 1325 params->sw_mtu = netdev->mtu;
d9ee0491
OG
1326
1327 /* SQ */
b05af6aa 1328 if (rep->vport == MLX5_VPORT_UPLINK)
d9ee0491
OG
1329 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1330 else
5d1f7354 1331 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
cb67b832 1332
749359f4
GT
1333 /* RQ */
1334 mlx5e_build_rq_params(mdev, params);
1335
1336 /* CQ moderation params */
9a317425 1337 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
6a9764ef 1338 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
cb67b832 1339
6a9764ef 1340 params->num_tc = 1;
69dad68d 1341 params->tunneled_offload_en = false;
5f195c2c
CM
1342
1343 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
84a09733
GT
1344
1345 /* RSS */
025380b2 1346 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
cb67b832
HHZ
1347}
1348
1349static void mlx5e_build_rep_netdev(struct net_device *netdev)
1350{
250a42b6 1351 struct mlx5e_priv *priv = netdev_priv(netdev);
d9ee0491
OG
1352 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1353 struct mlx5_eswitch_rep *rep = rpriv->rep;
250a42b6 1354 struct mlx5_core_dev *mdev = priv->mdev;
250a42b6 1355
b05af6aa 1356 if (rep->vport == MLX5_VPORT_UPLINK) {
c42260f1 1357 SET_NETDEV_DEV(netdev, mdev->device);
d9ee0491
OG
1358 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1359 /* we want a persistent mac for the uplink rep */
e1d974d0 1360 mlx5_query_mac_address(mdev, netdev->dev_addr);
ff9b85de 1361 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
b36cdb42
OG
1362#ifdef CONFIG_MLX5_CORE_EN_DCB
1363 if (MLX5_CAP_GEN(mdev, qos))
1364 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1365#endif
d9ee0491 1366 } else {
9b81d5a9 1367 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
d9ee0491 1368 eth_hw_addr_random(netdev);
9b81d5a9 1369 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
d9ee0491 1370 }
cb67b832
HHZ
1371
1372 netdev->watchdog_timeo = 15 * HZ;
1373
d3cbd425 1374 netdev->features |= NETIF_F_NETNS_LOCAL;
cb67b832 1375
d3cbd425 1376 netdev->hw_features |= NETIF_F_HW_TC;
dabeb3b0
GT
1377 netdev->hw_features |= NETIF_F_SG;
1378 netdev->hw_features |= NETIF_F_IP_CSUM;
1379 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1380 netdev->hw_features |= NETIF_F_GRO;
1381 netdev->hw_features |= NETIF_F_TSO;
1382 netdev->hw_features |= NETIF_F_TSO6;
1383 netdev->hw_features |= NETIF_F_RXCSUM;
1384
d3cbd425
CM
1385 if (rep->vport == MLX5_VPORT_UPLINK)
1386 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1387 else
1ee4457c
OG
1388 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1389
dabeb3b0 1390 netdev->features |= netdev->hw_features;
cb67b832
HHZ
1391}
1392
182570b2
FD
1393static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1394 struct net_device *netdev,
1395 const struct mlx5e_profile *profile,
1396 void *ppriv)
cb67b832 1397{
6a9764ef 1398 struct mlx5e_priv *priv = netdev_priv(netdev);
182570b2 1399 int err;
6a9764ef 1400
519a0bf5 1401 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
182570b2
FD
1402 if (err)
1403 return err;
6a9764ef 1404
8956f001 1405 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
c139dbfd 1406
025380b2 1407 mlx5e_build_rep_params(netdev);
cb67b832 1408 mlx5e_build_rep_netdev(netdev);
237f258c
FD
1409
1410 mlx5e_timestamp_init(priv);
182570b2
FD
1411
1412 return 0;
1413}
1414
1415static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1416{
1417 mlx5e_netdev_cleanup(priv->netdev, priv);
cb67b832
HHZ
1418}
1419
84a09733
GT
1420static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1421{
1422 struct ttc_params ttc_params = {};
1423 int tt, err;
1424
1425 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1426 MLX5_FLOW_NAMESPACE_KERNEL);
1427
1428 /* The inner_ttc in the ttc params is intentionally not set */
1429 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1430 mlx5e_set_ttc_ft_params(&ttc_params);
1431 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1432 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1433
1434 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1435 if (err) {
1436 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1437 return err;
1438 }
1439 return 0;
1440}
1441
092297e0 1442static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
cb67b832
HHZ
1443{
1444 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1d447a39
SM
1445 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1446 struct mlx5_eswitch_rep *rep = rpriv->rep;
74491de9 1447 struct mlx5_flow_handle *flow_rule;
c966f7d5 1448 struct mlx5_flow_destination dest;
092297e0 1449
c966f7d5
GT
1450 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1451 dest.tir_num = priv->direct_tir[0].tirn;
092297e0
GT
1452 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1453 rep->vport,
c966f7d5 1454 &dest);
092297e0
GT
1455 if (IS_ERR(flow_rule))
1456 return PTR_ERR(flow_rule);
1457 rpriv->vport_rx_rule = flow_rule;
1458 return 0;
1459}
1460
1461static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1462{
1463 struct mlx5_core_dev *mdev = priv->mdev;
cb67b832 1464 int err;
cb67b832 1465
2c3b5bee
SM
1466 mlx5e_init_l2_addr(priv);
1467
1462e48d
RD
1468 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1469 if (err) {
1470 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1471 return err;
1472 }
1473
84a09733 1474 err = mlx5e_create_indirect_rqt(priv);
8f493ffd 1475 if (err)
1462e48d 1476 goto err_close_drop_rq;
cb67b832 1477
db05815b 1478 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
84a09733
GT
1479 if (err)
1480 goto err_destroy_indirect_rqts;
1481
1482 err = mlx5e_create_indirect_tirs(priv, false);
8f493ffd 1483 if (err)
cb67b832 1484 goto err_destroy_direct_rqts;
cb67b832 1485
db05815b 1486 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
84a09733
GT
1487 if (err)
1488 goto err_destroy_indirect_tirs;
1489
1490 err = mlx5e_create_rep_ttc_table(priv);
092297e0 1491 if (err)
cb67b832 1492 goto err_destroy_direct_tirs;
cb67b832 1493
84a09733
GT
1494 err = mlx5e_create_rep_vport_rx_rule(priv);
1495 if (err)
1496 goto err_destroy_ttc_table;
1497
cb67b832
HHZ
1498 return 0;
1499
84a09733
GT
1500err_destroy_ttc_table:
1501 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
cb67b832 1502err_destroy_direct_tirs:
db05815b 1503 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
84a09733
GT
1504err_destroy_indirect_tirs:
1505 mlx5e_destroy_indirect_tirs(priv, false);
cb67b832 1506err_destroy_direct_rqts:
db05815b 1507 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
84a09733
GT
1508err_destroy_indirect_rqts:
1509 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d
RD
1510err_close_drop_rq:
1511 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1512 return err;
1513}
1514
1515static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1516{
1d447a39 1517 struct mlx5e_rep_priv *rpriv = priv->ppriv;
cb67b832 1518
5ed99fb4 1519 mlx5_del_flow_rules(rpriv->vport_rx_rule);
84a09733 1520 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
db05815b 1521 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
84a09733 1522 mlx5e_destroy_indirect_tirs(priv, false);
db05815b 1523 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
84a09733 1524 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1462e48d 1525 mlx5e_close_drop_rq(&priv->drop_rq);
cb67b832
HHZ
1526}
1527
1528static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1529{
d9ee0491
OG
1530 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1531 struct mlx5_rep_uplink_priv *uplink_priv;
1532 int tc, err;
cb67b832
HHZ
1533
1534 err = mlx5e_create_tises(priv);
1535 if (err) {
1536 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1537 return err;
1538 }
d9ee0491 1539
b05af6aa 1540 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
d9ee0491
OG
1541 uplink_priv = &rpriv->uplink_priv;
1542
b4a23329
RD
1543 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1544
d9ee0491
OG
1545 /* init shared tc flow table */
1546 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1547 if (err)
1548 goto destroy_tises;
1549
97417f61
EB
1550 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1551
d9ee0491
OG
1552 /* init indirect block notifications */
1553 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1554 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1555 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1556 if (err) {
1557 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1558 goto tc_esw_cleanup;
1559 }
1560 }
1561
cb67b832 1562 return 0;
d9ee0491
OG
1563
1564tc_esw_cleanup:
1565 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1566destroy_tises:
1567 for (tc = 0; tc < priv->profile->max_tc; tc++)
1568 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1569 return err;
1570}
1571
1572static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1573{
1574 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1575 int tc;
1576
1577 for (tc = 0; tc < priv->profile->max_tc; tc++)
1578 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1579
b05af6aa 1580 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
d9ee0491
OG
1581 /* clean indirect TC block notifications */
1582 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1583 mlx5e_rep_indr_clean_block_privs(rpriv);
1584
1585 /* delete shared tc flow table */
1586 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1587 }
cb67b832
HHZ
1588}
1589
9b81d5a9 1590static void mlx5e_rep_enable(struct mlx5e_priv *priv)
b36cdb42 1591{
6d7ee2ed 1592 mlx5e_set_netdev_mtu_boundaries(priv);
b36cdb42
OG
1593}
1594
a90f88fe
GT
1595static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1596{
1597 return 0;
1598}
1599
b36cdb42
OG
1600static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1601{
1602 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
b36cdb42 1603
b4a23329
RD
1604 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1605 struct mlx5_eqe *eqe = data;
b36cdb42 1606
b4a23329
RD
1607 switch (eqe->sub_type) {
1608 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1609 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1610 queue_work(priv->wq, &priv->update_carrier_work);
1611 break;
1612 default:
1613 return NOTIFY_DONE;
1614 }
1615
1616 return NOTIFY_OK;
b36cdb42
OG
1617 }
1618
b4a23329
RD
1619 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1620 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1621
1622 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1623
1624 return NOTIFY_OK;
1625 }
1626
1627 return NOTIFY_DONE;
b36cdb42
OG
1628}
1629
1630static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1631{
1632 struct net_device *netdev = priv->netdev;
1633 struct mlx5_core_dev *mdev = priv->mdev;
b4a23329 1634 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1635 u16 max_mtu;
1636
1637 netdev->min_mtu = ETH_MIN_MTU;
1638 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1639 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1640 mlx5e_set_dev_port_mtu(priv);
1641
b4a23329
RD
1642 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1643 mlx5e_tc_reoffload_flows_work);
1644
b36cdb42
OG
1645 mlx5_lag_add(mdev, netdev);
1646 priv->events_nb.notifier_call = uplink_rep_async_event;
1647 mlx5_notifier_register(mdev, &priv->events_nb);
1648#ifdef CONFIG_MLX5_CORE_EN_DCB
1649 mlx5e_dcbnl_initialize(priv);
1650 mlx5e_dcbnl_init_app(priv);
1651#endif
1652}
1653
1654static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1655{
1656 struct mlx5_core_dev *mdev = priv->mdev;
b4a23329 1657 struct mlx5e_rep_priv *rpriv = priv->ppriv;
b36cdb42
OG
1658
1659#ifdef CONFIG_MLX5_CORE_EN_DCB
1660 mlx5e_dcbnl_delete_app(priv);
1661#endif
1662 mlx5_notifier_unregister(mdev, &priv->events_nb);
b4a23329 1663 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
b36cdb42
OG
1664 mlx5_lag_remove(mdev);
1665}
1666
9b81d5a9 1667static const struct mlx5e_profile mlx5e_rep_profile = {
cb67b832 1668 .init = mlx5e_init_rep,
182570b2 1669 .cleanup = mlx5e_cleanup_rep,
cb67b832
HHZ
1670 .init_rx = mlx5e_init_rep_rx,
1671 .cleanup_rx = mlx5e_cleanup_rep_rx,
1672 .init_tx = mlx5e_init_rep_tx,
d9ee0491 1673 .cleanup_tx = mlx5e_cleanup_rep_tx,
9b81d5a9 1674 .enable = mlx5e_rep_enable,
a90f88fe 1675 .update_rx = mlx5e_update_rep_rx,
9b81d5a9 1676 .update_stats = mlx5e_rep_update_hw_counters,
20fd0c19 1677 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
749359f4 1678 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
cb67b832
HHZ
1679 .max_tc = 1,
1680};
1681
b36cdb42
OG
1682static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1683 .init = mlx5e_init_rep,
1684 .cleanup = mlx5e_cleanup_rep,
1685 .init_rx = mlx5e_init_rep_rx,
1686 .cleanup_rx = mlx5e_cleanup_rep_rx,
1687 .init_tx = mlx5e_init_rep_tx,
1688 .cleanup_tx = mlx5e_cleanup_rep_tx,
1689 .enable = mlx5e_uplink_rep_enable,
1690 .disable = mlx5e_uplink_rep_disable,
a90f88fe 1691 .update_rx = mlx5e_update_rep_rx,
b36cdb42
OG
1692 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1693 .update_carrier = mlx5e_update_carrier,
1694 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1695 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1696 .max_tc = MLX5E_MAX_NUM_TC,
1697};
1698
f60f315d
PP
1699static bool
1700is_devlink_port_supported(const struct mlx5_core_dev *dev,
1701 const struct mlx5e_rep_priv *rpriv)
1702{
1703 return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1704 rpriv->rep->vport == MLX5_VPORT_PF ||
1705 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1706}
1707
1708static int register_devlink_port(struct mlx5_core_dev *dev,
1709 struct mlx5e_rep_priv *rpriv)
1710{
1711 struct devlink *devlink = priv_to_devlink(dev);
1712 struct mlx5_eswitch_rep *rep = rpriv->rep;
1713 struct netdev_phys_item_id ppid = {};
1714 int ret;
1715
1716 if (!is_devlink_port_supported(dev, rpriv))
1717 return 0;
1718
1719 ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
1720 if (ret)
1721 return ret;
1722
1723 if (rep->vport == MLX5_VPORT_UPLINK)
1724 devlink_port_attrs_set(&rpriv->dl_port,
1725 DEVLINK_PORT_FLAVOUR_PHYSICAL,
1726 PCI_FUNC(dev->pdev->devfn), false, 0,
1727 &ppid.id[0], ppid.id_len);
1728 else if (rep->vport == MLX5_VPORT_PF)
1729 devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
1730 &ppid.id[0], ppid.id_len,
1731 dev->pdev->devfn);
1732 else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
1733 devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
1734 &ppid.id[0], ppid.id_len,
1735 dev->pdev->devfn,
1736 rep->vport - 1);
1737
1738 return devlink_port_register(devlink, &rpriv->dl_port, rep->vport);
1739}
1740
1741static void unregister_devlink_port(struct mlx5_core_dev *dev,
1742 struct mlx5e_rep_priv *rpriv)
1743{
1744 if (is_devlink_port_supported(dev, rpriv))
1745 devlink_port_unregister(&rpriv->dl_port);
1746}
1747
1d447a39 1748/* e-Switch vport representors */
1d447a39 1749static int
4c66df01 1750mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1d447a39 1751{
b36cdb42 1752 const struct mlx5e_profile *profile;
1d447a39 1753 struct mlx5e_rep_priv *rpriv;
26e59d80 1754 struct net_device *netdev;
779d986d 1755 int nch, err;
26e59d80 1756
1d447a39
SM
1757 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1758 if (!rpriv)
1759 return -ENOMEM;
1760
d9ee0491
OG
1761 /* rpriv->rep to be looked up when profile->init() is called */
1762 rpriv->rep = rep;
1763
779d986d 1764 nch = mlx5e_get_max_num_channels(dev);
9b81d5a9
VP
1765 profile = (rep->vport == MLX5_VPORT_UPLINK) ?
1766 &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
b36cdb42 1767 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
26e59d80
MHY
1768 if (!netdev) {
1769 pr_warn("Failed to create representor netdev for vport %d\n",
1770 rep->vport);
1d447a39 1771 kfree(rpriv);
cb67b832
HHZ
1772 return -EINVAL;
1773 }
26e59d80 1774
5ed99fb4 1775 rpriv->netdev = netdev;
8693115a 1776 rep->rep_data[REP_ETH].priv = rpriv;
5ed99fb4 1777 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
26e59d80 1778
b05af6aa 1779 if (rep->vport == MLX5_VPORT_UPLINK) {
aec002f6
OG
1780 err = mlx5e_create_mdev_resources(dev);
1781 if (err)
1782 goto err_destroy_netdev;
1783 }
1784
2c3b5bee 1785 err = mlx5e_attach_netdev(netdev_priv(netdev));
26e59d80
MHY
1786 if (err) {
1787 pr_warn("Failed to attach representor netdev for vport %d\n",
1788 rep->vport);
aec002f6 1789 goto err_destroy_mdev_resources;
26e59d80
MHY
1790 }
1791
37b498ff
HHZ
1792 err = mlx5e_rep_neigh_init(rpriv);
1793 if (err) {
1794 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1795 rep->vport);
1796 goto err_detach_netdev;
1797 }
1798
f60f315d
PP
1799 err = register_devlink_port(dev, rpriv);
1800 if (err) {
1801 esw_warn(dev, "Failed to register devlink port %d\n",
1802 rep->vport);
1803 goto err_neigh_cleanup;
1804 }
1805
26e59d80
MHY
1806 err = register_netdev(netdev);
1807 if (err) {
1808 pr_warn("Failed to register representor netdev for vport %d\n",
1809 rep->vport);
f60f315d 1810 goto err_devlink_cleanup;
26e59d80
MHY
1811 }
1812
f60f315d
PP
1813 if (is_devlink_port_supported(dev, rpriv))
1814 devlink_port_type_eth_set(&rpriv->dl_port, netdev);
cb67b832 1815 return 0;
26e59d80 1816
f60f315d
PP
1817err_devlink_cleanup:
1818 unregister_devlink_port(dev, rpriv);
1819
37b498ff
HHZ
1820err_neigh_cleanup:
1821 mlx5e_rep_neigh_cleanup(rpriv);
1822
26e59d80 1823err_detach_netdev:
2c3b5bee 1824 mlx5e_detach_netdev(netdev_priv(netdev));
26e59d80 1825
aec002f6 1826err_destroy_mdev_resources:
b05af6aa 1827 if (rep->vport == MLX5_VPORT_UPLINK)
aec002f6
OG
1828 mlx5e_destroy_mdev_resources(dev);
1829
26e59d80 1830err_destroy_netdev:
2c3b5bee 1831 mlx5e_destroy_netdev(netdev_priv(netdev));
1d447a39 1832 kfree(rpriv);
26e59d80 1833 return err;
cb67b832
HHZ
1834}
1835
1d447a39 1836static void
4c66df01 1837mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
cb67b832 1838{
5ed99fb4
MB
1839 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1840 struct net_device *netdev = rpriv->netdev;
1d447a39 1841 struct mlx5e_priv *priv = netdev_priv(netdev);
f60f315d 1842 struct mlx5_core_dev *dev = priv->mdev;
1d447a39 1843 void *ppriv = priv->ppriv;
cb67b832 1844
f60f315d
PP
1845 if (is_devlink_port_supported(dev, rpriv))
1846 devlink_port_type_clear(&rpriv->dl_port);
5ed99fb4 1847 unregister_netdev(netdev);
f60f315d 1848 unregister_devlink_port(dev, rpriv);
37b498ff 1849 mlx5e_rep_neigh_cleanup(rpriv);
1d447a39 1850 mlx5e_detach_netdev(priv);
b05af6aa 1851 if (rep->vport == MLX5_VPORT_UPLINK)
aec002f6 1852 mlx5e_destroy_mdev_resources(priv->mdev);
1d447a39
SM
1853 mlx5e_destroy_netdev(priv);
1854 kfree(ppriv); /* mlx5e_rep_priv */
1855}
1856
22215908
MB
1857static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1858{
1859 struct mlx5e_rep_priv *rpriv;
1860
1861 rpriv = mlx5e_rep_to_rep_priv(rep);
1862
1863 return rpriv->netdev;
1864}
1865
8693115a
PP
1866static const struct mlx5_eswitch_rep_ops rep_ops = {
1867 .load = mlx5e_vport_rep_load,
1868 .unload = mlx5e_vport_rep_unload,
1869 .get_proto_dev = mlx5e_vport_rep_get_proto_dev
1870};
1871
aec002f6 1872void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 1873{
aec002f6 1874 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39 1875
8693115a 1876 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1d447a39
SM
1877}
1878
aec002f6 1879void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1d447a39 1880{
1d447a39 1881 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1d447a39 1882
f8e8fa02 1883 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1d447a39 1884}