2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
45 #include "en/rep/tc.h"
46 #include "en/rep/neigh.h"
49 #define CREATE_TRACE_POINTS
50 #include "diag/en_rep_tracepoint.h"
52 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
53 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
54 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
56 static const char mlx5e_rep_driver_name
[] = "mlx5e_rep";
58 static void mlx5e_rep_get_drvinfo(struct net_device
*dev
,
59 struct ethtool_drvinfo
*drvinfo
)
61 struct mlx5e_priv
*priv
= netdev_priv(dev
);
62 struct mlx5_core_dev
*mdev
= priv
->mdev
;
64 strlcpy(drvinfo
->driver
, mlx5e_rep_driver_name
,
65 sizeof(drvinfo
->driver
));
66 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
68 fw_rev_maj(mdev
), fw_rev_min(mdev
),
69 fw_rev_sub(mdev
), mdev
->board_id
);
72 static void mlx5e_uplink_rep_get_drvinfo(struct net_device
*dev
,
73 struct ethtool_drvinfo
*drvinfo
)
75 struct mlx5e_priv
*priv
= netdev_priv(dev
);
77 mlx5e_rep_get_drvinfo(dev
, drvinfo
);
78 strlcpy(drvinfo
->bus_info
, pci_name(priv
->mdev
->pdev
),
79 sizeof(drvinfo
->bus_info
));
82 static const struct counter_desc sw_rep_stats_desc
[] = {
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, rx_packets
) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, rx_bytes
) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, tx_packets
) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, tx_bytes
) },
96 static const struct counter_desc vport_rep_stats_desc
[] = {
97 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_rx_packets
) },
98 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_rx_bytes
) },
99 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_tx_packets
) },
100 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_tx_bytes
) },
103 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
104 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
106 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep
)
108 return NUM_VPORT_REP_SW_COUNTERS
;
111 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep
)
115 for (i
= 0; i
< NUM_VPORT_REP_SW_COUNTERS
; i
++)
116 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
117 sw_rep_stats_desc
[i
].format
);
121 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep
)
125 for (i
= 0; i
< NUM_VPORT_REP_SW_COUNTERS
; i
++)
126 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->stats
.sw
,
127 sw_rep_stats_desc
, i
);
131 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep
)
133 struct mlx5e_sw_stats
*s
= &priv
->stats
.sw
;
134 struct rtnl_link_stats64 stats64
= {};
136 memset(s
, 0, sizeof(*s
));
137 mlx5e_fold_sw_stats64(priv
, &stats64
);
139 s
->rx_packets
= stats64
.rx_packets
;
140 s
->rx_bytes
= stats64
.rx_bytes
;
141 s
->tx_packets
= stats64
.tx_packets
;
142 s
->tx_bytes
= stats64
.tx_bytes
;
143 s
->tx_queue_dropped
= stats64
.tx_dropped
;
146 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep
)
148 return NUM_VPORT_REP_HW_COUNTERS
;
151 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep
)
155 for (i
= 0; i
< NUM_VPORT_REP_HW_COUNTERS
; i
++)
156 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, vport_rep_stats_desc
[i
].format
);
160 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep
)
164 for (i
= 0; i
< NUM_VPORT_REP_HW_COUNTERS
; i
++)
165 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->stats
.vf_vport
,
166 vport_rep_stats_desc
, i
);
170 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep
)
172 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
173 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
174 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
175 struct rtnl_link_stats64
*vport_stats
;
176 struct ifla_vf_stats vf_stats
;
179 err
= mlx5_eswitch_get_vport_stats(esw
, rep
->vport
, &vf_stats
);
181 netdev_warn(priv
->netdev
, "vport %d error %d reading stats\n",
186 vport_stats
= &priv
->stats
.vf_vport
;
187 /* flip tx/rx as we are reporting the counters for the switch vport */
188 vport_stats
->rx_packets
= vf_stats
.tx_packets
;
189 vport_stats
->rx_bytes
= vf_stats
.tx_bytes
;
190 vport_stats
->tx_packets
= vf_stats
.rx_packets
;
191 vport_stats
->tx_bytes
= vf_stats
.rx_bytes
;
194 static void mlx5e_rep_get_strings(struct net_device
*dev
,
195 u32 stringset
, uint8_t *data
)
197 struct mlx5e_priv
*priv
= netdev_priv(dev
);
201 mlx5e_stats_fill_strings(priv
, data
);
206 static void mlx5e_rep_get_ethtool_stats(struct net_device
*dev
,
207 struct ethtool_stats
*stats
, u64
*data
)
209 struct mlx5e_priv
*priv
= netdev_priv(dev
);
211 mlx5e_ethtool_get_ethtool_stats(priv
, stats
, data
);
214 static int mlx5e_rep_get_sset_count(struct net_device
*dev
, int sset
)
216 struct mlx5e_priv
*priv
= netdev_priv(dev
);
220 return mlx5e_stats_total_num(priv
);
226 static void mlx5e_rep_get_ringparam(struct net_device
*dev
,
227 struct ethtool_ringparam
*param
)
229 struct mlx5e_priv
*priv
= netdev_priv(dev
);
231 mlx5e_ethtool_get_ringparam(priv
, param
);
234 static int mlx5e_rep_set_ringparam(struct net_device
*dev
,
235 struct ethtool_ringparam
*param
)
237 struct mlx5e_priv
*priv
= netdev_priv(dev
);
239 return mlx5e_ethtool_set_ringparam(priv
, param
);
242 static void mlx5e_rep_get_channels(struct net_device
*dev
,
243 struct ethtool_channels
*ch
)
245 struct mlx5e_priv
*priv
= netdev_priv(dev
);
247 mlx5e_ethtool_get_channels(priv
, ch
);
250 static int mlx5e_rep_set_channels(struct net_device
*dev
,
251 struct ethtool_channels
*ch
)
253 struct mlx5e_priv
*priv
= netdev_priv(dev
);
255 return mlx5e_ethtool_set_channels(priv
, ch
);
258 static int mlx5e_rep_get_coalesce(struct net_device
*netdev
,
259 struct ethtool_coalesce
*coal
)
261 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
263 return mlx5e_ethtool_get_coalesce(priv
, coal
);
266 static int mlx5e_rep_set_coalesce(struct net_device
*netdev
,
267 struct ethtool_coalesce
*coal
)
269 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
271 return mlx5e_ethtool_set_coalesce(priv
, coal
);
274 static u32
mlx5e_rep_get_rxfh_key_size(struct net_device
*netdev
)
276 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
278 return mlx5e_ethtool_get_rxfh_key_size(priv
);
281 static u32
mlx5e_rep_get_rxfh_indir_size(struct net_device
*netdev
)
283 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
285 return mlx5e_ethtool_get_rxfh_indir_size(priv
);
288 static void mlx5e_uplink_rep_get_pause_stats(struct net_device
*netdev
,
289 struct ethtool_pause_stats
*stats
)
291 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
293 mlx5e_stats_pause_get(priv
, stats
);
296 static void mlx5e_uplink_rep_get_pauseparam(struct net_device
*netdev
,
297 struct ethtool_pauseparam
*pauseparam
)
299 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
301 mlx5e_ethtool_get_pauseparam(priv
, pauseparam
);
304 static int mlx5e_uplink_rep_set_pauseparam(struct net_device
*netdev
,
305 struct ethtool_pauseparam
*pauseparam
)
307 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
309 return mlx5e_ethtool_set_pauseparam(priv
, pauseparam
);
312 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device
*netdev
,
313 struct ethtool_link_ksettings
*link_ksettings
)
315 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
317 return mlx5e_ethtool_get_link_ksettings(priv
, link_ksettings
);
320 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device
*netdev
,
321 const struct ethtool_link_ksettings
*link_ksettings
)
323 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
325 return mlx5e_ethtool_set_link_ksettings(priv
, link_ksettings
);
328 static const struct ethtool_ops mlx5e_rep_ethtool_ops
= {
329 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
330 ETHTOOL_COALESCE_MAX_FRAMES
|
331 ETHTOOL_COALESCE_USE_ADAPTIVE
,
332 .get_drvinfo
= mlx5e_rep_get_drvinfo
,
333 .get_link
= ethtool_op_get_link
,
334 .get_strings
= mlx5e_rep_get_strings
,
335 .get_sset_count
= mlx5e_rep_get_sset_count
,
336 .get_ethtool_stats
= mlx5e_rep_get_ethtool_stats
,
337 .get_ringparam
= mlx5e_rep_get_ringparam
,
338 .set_ringparam
= mlx5e_rep_set_ringparam
,
339 .get_channels
= mlx5e_rep_get_channels
,
340 .set_channels
= mlx5e_rep_set_channels
,
341 .get_coalesce
= mlx5e_rep_get_coalesce
,
342 .set_coalesce
= mlx5e_rep_set_coalesce
,
343 .get_rxfh_key_size
= mlx5e_rep_get_rxfh_key_size
,
344 .get_rxfh_indir_size
= mlx5e_rep_get_rxfh_indir_size
,
347 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops
= {
348 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
349 ETHTOOL_COALESCE_MAX_FRAMES
|
350 ETHTOOL_COALESCE_USE_ADAPTIVE
,
351 .get_drvinfo
= mlx5e_uplink_rep_get_drvinfo
,
352 .get_link
= ethtool_op_get_link
,
353 .get_strings
= mlx5e_rep_get_strings
,
354 .get_sset_count
= mlx5e_rep_get_sset_count
,
355 .get_ethtool_stats
= mlx5e_rep_get_ethtool_stats
,
356 .get_ringparam
= mlx5e_rep_get_ringparam
,
357 .set_ringparam
= mlx5e_rep_set_ringparam
,
358 .get_channels
= mlx5e_rep_get_channels
,
359 .set_channels
= mlx5e_rep_set_channels
,
360 .get_coalesce
= mlx5e_rep_get_coalesce
,
361 .set_coalesce
= mlx5e_rep_set_coalesce
,
362 .get_link_ksettings
= mlx5e_uplink_rep_get_link_ksettings
,
363 .set_link_ksettings
= mlx5e_uplink_rep_set_link_ksettings
,
364 .get_rxfh_key_size
= mlx5e_rep_get_rxfh_key_size
,
365 .get_rxfh_indir_size
= mlx5e_rep_get_rxfh_indir_size
,
366 .get_rxfh
= mlx5e_get_rxfh
,
367 .set_rxfh
= mlx5e_set_rxfh
,
368 .get_rxnfc
= mlx5e_get_rxnfc
,
369 .set_rxnfc
= mlx5e_set_rxnfc
,
370 .get_pause_stats
= mlx5e_uplink_rep_get_pause_stats
,
371 .get_pauseparam
= mlx5e_uplink_rep_get_pauseparam
,
372 .set_pauseparam
= mlx5e_uplink_rep_set_pauseparam
,
375 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch
*esw
,
376 struct mlx5_eswitch_rep
*rep
)
378 struct mlx5e_rep_sq
*rep_sq
, *tmp
;
379 struct mlx5e_rep_priv
*rpriv
;
381 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
384 rpriv
= mlx5e_rep_to_rep_priv(rep
);
385 list_for_each_entry_safe(rep_sq
, tmp
, &rpriv
->vport_sqs_list
, list
) {
386 mlx5_eswitch_del_send_to_vport_rule(rep_sq
->send_to_vport_rule
);
387 list_del(&rep_sq
->list
);
392 static int mlx5e_sqs2vport_start(struct mlx5_eswitch
*esw
,
393 struct mlx5_eswitch_rep
*rep
,
394 u32
*sqns_array
, int sqns_num
)
396 struct mlx5_flow_handle
*flow_rule
;
397 struct mlx5e_rep_priv
*rpriv
;
398 struct mlx5e_rep_sq
*rep_sq
;
402 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
405 rpriv
= mlx5e_rep_to_rep_priv(rep
);
406 for (i
= 0; i
< sqns_num
; i
++) {
407 rep_sq
= kzalloc(sizeof(*rep_sq
), GFP_KERNEL
);
413 /* Add re-inject rule to the PF/representor sqs */
414 flow_rule
= mlx5_eswitch_add_send_to_vport_rule(esw
,
417 if (IS_ERR(flow_rule
)) {
418 err
= PTR_ERR(flow_rule
);
422 rep_sq
->send_to_vport_rule
= flow_rule
;
423 list_add(&rep_sq
->list
, &rpriv
->vport_sqs_list
);
428 mlx5e_sqs2vport_stop(esw
, rep
);
432 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv
*priv
)
434 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
435 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
436 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
437 struct mlx5e_channel
*c
;
438 int n
, tc
, num_sqs
= 0;
442 sqs
= kcalloc(priv
->channels
.num
* priv
->channels
.params
.num_tc
, sizeof(*sqs
), GFP_KERNEL
);
446 for (n
= 0; n
< priv
->channels
.num
; n
++) {
447 c
= priv
->channels
.c
[n
];
448 for (tc
= 0; tc
< c
->num_tc
; tc
++)
449 sqs
[num_sqs
++] = c
->sq
[tc
].sqn
;
452 err
= mlx5e_sqs2vport_start(esw
, rep
, sqs
, num_sqs
);
457 netdev_warn(priv
->netdev
, "Failed to add SQs FWD rules %d\n", err
);
461 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv
*priv
)
463 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
464 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
465 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
467 mlx5e_sqs2vport_stop(esw
, rep
);
470 static int mlx5e_rep_open(struct net_device
*dev
)
472 struct mlx5e_priv
*priv
= netdev_priv(dev
);
473 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
474 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
477 mutex_lock(&priv
->state_lock
);
478 err
= mlx5e_open_locked(dev
);
482 if (!mlx5_modify_vport_admin_state(priv
->mdev
,
483 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
485 MLX5_VPORT_ADMIN_STATE_UP
))
486 netif_carrier_on(dev
);
489 mutex_unlock(&priv
->state_lock
);
493 static int mlx5e_rep_close(struct net_device
*dev
)
495 struct mlx5e_priv
*priv
= netdev_priv(dev
);
496 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
497 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
500 mutex_lock(&priv
->state_lock
);
501 mlx5_modify_vport_admin_state(priv
->mdev
,
502 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
504 MLX5_VPORT_ADMIN_STATE_DOWN
);
505 ret
= mlx5e_close_locked(dev
);
506 mutex_unlock(&priv
->state_lock
);
510 bool mlx5e_is_uplink_rep(struct mlx5e_priv
*priv
)
512 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
513 struct mlx5_eswitch_rep
*rep
;
515 if (!MLX5_ESWITCH_MANAGER(priv
->mdev
))
518 if (!rpriv
) /* non vport rep mlx5e instances don't use this field */
522 return (rep
->vport
== MLX5_VPORT_UPLINK
);
525 static bool mlx5e_rep_has_offload_stats(const struct net_device
*dev
, int attr_id
)
528 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
536 mlx5e_get_sw_stats64(const struct net_device
*dev
,
537 struct rtnl_link_stats64
*stats
)
539 struct mlx5e_priv
*priv
= netdev_priv(dev
);
541 mlx5e_fold_sw_stats64(priv
, stats
);
545 static int mlx5e_rep_get_offload_stats(int attr_id
, const struct net_device
*dev
,
549 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
550 return mlx5e_get_sw_stats64(dev
, sp
);
557 mlx5e_rep_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
559 struct mlx5e_priv
*priv
= netdev_priv(dev
);
561 /* update HW stats in background for next time */
562 mlx5e_queue_update_stats(priv
);
563 memcpy(stats
, &priv
->stats
.vf_vport
, sizeof(*stats
));
566 static int mlx5e_rep_change_mtu(struct net_device
*netdev
, int new_mtu
)
568 return mlx5e_change_mtu(netdev
, new_mtu
, NULL
);
571 static int mlx5e_uplink_rep_change_mtu(struct net_device
*netdev
, int new_mtu
)
573 return mlx5e_change_mtu(netdev
, new_mtu
, mlx5e_set_dev_port_mtu_ctx
);
576 static int mlx5e_uplink_rep_set_mac(struct net_device
*netdev
, void *addr
)
578 struct sockaddr
*saddr
= addr
;
580 if (!is_valid_ether_addr(saddr
->sa_data
))
581 return -EADDRNOTAVAIL
;
583 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
587 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
,
590 netdev_warn_once(dev
, "legacy vf vlan setting isn't supported in switchdev mode\n");
595 /* allow setting 0-vid for compatibility with libvirt */
599 static struct devlink_port
*mlx5e_rep_get_devlink_port(struct net_device
*netdev
)
601 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
602 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
603 struct mlx5_core_dev
*dev
= priv
->mdev
;
605 return mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
608 static int mlx5e_rep_change_carrier(struct net_device
*dev
, bool new_carrier
)
610 struct mlx5e_priv
*priv
= netdev_priv(dev
);
611 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
612 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
616 err
= mlx5_modify_vport_admin_state(priv
->mdev
, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
617 rep
->vport
, 1, MLX5_VPORT_ADMIN_STATE_UP
);
620 netif_carrier_on(dev
);
622 err
= mlx5_modify_vport_admin_state(priv
->mdev
, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
623 rep
->vport
, 1, MLX5_VPORT_ADMIN_STATE_DOWN
);
626 netif_carrier_off(dev
);
631 static const struct net_device_ops mlx5e_netdev_ops_rep
= {
632 .ndo_open
= mlx5e_rep_open
,
633 .ndo_stop
= mlx5e_rep_close
,
634 .ndo_start_xmit
= mlx5e_xmit
,
635 .ndo_setup_tc
= mlx5e_rep_setup_tc
,
636 .ndo_get_devlink_port
= mlx5e_rep_get_devlink_port
,
637 .ndo_get_stats64
= mlx5e_rep_get_stats
,
638 .ndo_has_offload_stats
= mlx5e_rep_has_offload_stats
,
639 .ndo_get_offload_stats
= mlx5e_rep_get_offload_stats
,
640 .ndo_change_mtu
= mlx5e_rep_change_mtu
,
641 .ndo_change_carrier
= mlx5e_rep_change_carrier
,
644 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep
= {
645 .ndo_open
= mlx5e_open
,
646 .ndo_stop
= mlx5e_close
,
647 .ndo_start_xmit
= mlx5e_xmit
,
648 .ndo_set_mac_address
= mlx5e_uplink_rep_set_mac
,
649 .ndo_setup_tc
= mlx5e_rep_setup_tc
,
650 .ndo_get_devlink_port
= mlx5e_rep_get_devlink_port
,
651 .ndo_get_stats64
= mlx5e_get_stats
,
652 .ndo_has_offload_stats
= mlx5e_rep_has_offload_stats
,
653 .ndo_get_offload_stats
= mlx5e_rep_get_offload_stats
,
654 .ndo_change_mtu
= mlx5e_uplink_rep_change_mtu
,
655 .ndo_features_check
= mlx5e_features_check
,
656 .ndo_set_vf_mac
= mlx5e_set_vf_mac
,
657 .ndo_set_vf_rate
= mlx5e_set_vf_rate
,
658 .ndo_get_vf_config
= mlx5e_get_vf_config
,
659 .ndo_get_vf_stats
= mlx5e_get_vf_stats
,
660 .ndo_set_vf_vlan
= mlx5e_uplink_rep_set_vf_vlan
,
661 .ndo_set_features
= mlx5e_set_features
,
664 bool mlx5e_eswitch_uplink_rep(struct net_device
*netdev
)
666 return netdev
->netdev_ops
== &mlx5e_netdev_ops_uplink_rep
;
669 bool mlx5e_eswitch_vf_rep(struct net_device
*netdev
)
671 return netdev
->netdev_ops
== &mlx5e_netdev_ops_rep
;
674 static void mlx5e_build_rep_params(struct net_device
*netdev
)
676 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
677 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
678 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
679 struct mlx5_core_dev
*mdev
= priv
->mdev
;
680 struct mlx5e_params
*params
;
682 u8 cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
683 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
684 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
686 priv
->max_nch
= mlx5e_calc_max_nch(priv
, priv
->profile
);
687 params
= &priv
->channels
.params
;
689 params
->num_channels
= MLX5E_REP_PARAMS_DEF_NUM_CHANNELS
;
690 params
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
691 params
->sw_mtu
= netdev
->mtu
;
694 if (rep
->vport
== MLX5_VPORT_UPLINK
)
695 params
->log_sq_size
= MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
697 params
->log_sq_size
= MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE
;
700 mlx5e_build_rq_params(mdev
, params
);
702 /* CQ moderation params */
703 params
->rx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
704 mlx5e_set_rx_cq_mode_params(params
, cq_period_mode
);
707 params
->tunneled_offload_en
= false;
709 mlx5_query_min_inline(mdev
, ¶ms
->tx_min_inline_mode
);
712 mlx5e_build_rss_params(&priv
->rss_params
, params
->num_channels
);
715 static void mlx5e_build_rep_netdev(struct net_device
*netdev
,
716 struct mlx5_core_dev
*mdev
,
717 struct mlx5_eswitch_rep
*rep
)
719 SET_NETDEV_DEV(netdev
, mdev
->device
);
720 if (rep
->vport
== MLX5_VPORT_UPLINK
) {
721 netdev
->netdev_ops
= &mlx5e_netdev_ops_uplink_rep
;
722 /* we want a persistent mac for the uplink rep */
723 mlx5_query_mac_address(mdev
, netdev
->dev_addr
);
724 netdev
->ethtool_ops
= &mlx5e_uplink_rep_ethtool_ops
;
725 mlx5e_dcbnl_build_rep_netdev(netdev
);
727 netdev
->netdev_ops
= &mlx5e_netdev_ops_rep
;
728 eth_hw_addr_random(netdev
);
729 netdev
->ethtool_ops
= &mlx5e_rep_ethtool_ops
;
732 netdev
->watchdog_timeo
= 15 * HZ
;
734 netdev
->features
|= NETIF_F_NETNS_LOCAL
;
736 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
737 netdev
->hw_features
|= NETIF_F_HW_TC
;
739 netdev
->hw_features
|= NETIF_F_SG
;
740 netdev
->hw_features
|= NETIF_F_IP_CSUM
;
741 netdev
->hw_features
|= NETIF_F_IPV6_CSUM
;
742 netdev
->hw_features
|= NETIF_F_GRO
;
743 netdev
->hw_features
|= NETIF_F_TSO
;
744 netdev
->hw_features
|= NETIF_F_TSO6
;
745 netdev
->hw_features
|= NETIF_F_RXCSUM
;
747 if (rep
->vport
== MLX5_VPORT_UPLINK
)
748 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
750 netdev
->features
|= NETIF_F_VLAN_CHALLENGED
;
752 netdev
->features
|= netdev
->hw_features
;
755 static int mlx5e_init_rep(struct mlx5_core_dev
*mdev
,
756 struct net_device
*netdev
)
758 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
760 mlx5e_build_rep_params(netdev
);
761 mlx5e_timestamp_init(priv
);
766 static int mlx5e_init_ul_rep(struct mlx5_core_dev
*mdev
,
767 struct net_device
*netdev
)
769 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
771 mlx5e_vxlan_set_netdev_info(priv
);
772 return mlx5e_init_rep(mdev
, netdev
);
775 static void mlx5e_cleanup_rep(struct mlx5e_priv
*priv
)
779 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv
*priv
)
781 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
782 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
783 struct ttc_params ttc_params
= {};
786 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
787 MLX5_FLOW_NAMESPACE_KERNEL
);
789 /* The inner_ttc in the ttc params is intentionally not set */
790 ttc_params
.any_tt_tirn
= priv
->direct_tir
[0].tirn
;
791 mlx5e_set_ttc_ft_params(&ttc_params
);
793 if (rep
->vport
!= MLX5_VPORT_UPLINK
)
794 /* To give uplik rep TTC a lower level for chaining from root ft */
795 ttc_params
.ft_attr
.level
= MLX5E_TTC_FT_LEVEL
+ 1;
797 for (tt
= 0; tt
< MLX5E_NUM_INDIR_TIRS
; tt
++)
798 ttc_params
.indir_tirn
[tt
] = priv
->indir_tir
[tt
].tirn
;
800 err
= mlx5e_create_ttc_table(priv
, &ttc_params
, &priv
->fs
.ttc
);
802 netdev_err(priv
->netdev
, "Failed to create rep ttc table, err=%d\n", err
);
808 static int mlx5e_create_rep_root_ft(struct mlx5e_priv
*priv
)
810 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
811 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
812 struct mlx5_flow_table_attr ft_attr
= {};
813 struct mlx5_flow_namespace
*ns
;
816 if (rep
->vport
!= MLX5_VPORT_UPLINK
) {
817 /* non uplik reps will skip any bypass tables and go directly to
820 rpriv
->root_ft
= priv
->fs
.ttc
.ft
.t
;
824 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
825 ns
= mlx5_get_flow_namespace(priv
->mdev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
827 netdev_err(priv
->netdev
, "Failed to get reps offloads namespace\n");
831 ft_attr
.max_fte
= 0; /* Empty table, miss rule will always point to next table */
835 rpriv
->root_ft
= mlx5_create_flow_table(ns
, &ft_attr
);
836 if (IS_ERR(rpriv
->root_ft
)) {
837 err
= PTR_ERR(rpriv
->root_ft
);
838 rpriv
->root_ft
= NULL
;
844 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv
*priv
)
846 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
847 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
849 if (rep
->vport
!= MLX5_VPORT_UPLINK
)
851 mlx5_destroy_flow_table(rpriv
->root_ft
);
854 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv
*priv
)
856 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
857 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
858 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
859 struct mlx5_flow_handle
*flow_rule
;
860 struct mlx5_flow_destination dest
;
862 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
863 dest
.ft
= rpriv
->root_ft
;
865 flow_rule
= mlx5_eswitch_create_vport_rx_rule(esw
, rep
->vport
, &dest
);
866 if (IS_ERR(flow_rule
))
867 return PTR_ERR(flow_rule
);
868 rpriv
->vport_rx_rule
= flow_rule
;
872 static void rep_vport_rx_rule_destroy(struct mlx5e_priv
*priv
)
874 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
876 if (!rpriv
->vport_rx_rule
)
879 mlx5_del_flow_rules(rpriv
->vport_rx_rule
);
880 rpriv
->vport_rx_rule
= NULL
;
883 int mlx5e_rep_bond_update(struct mlx5e_priv
*priv
, bool cleanup
)
885 rep_vport_rx_rule_destroy(priv
);
887 return cleanup
? 0 : mlx5e_create_rep_vport_rx_rule(priv
);
890 static int mlx5e_init_rep_rx(struct mlx5e_priv
*priv
)
892 struct mlx5_core_dev
*mdev
= priv
->mdev
;
895 mlx5e_init_l2_addr(priv
);
897 err
= mlx5e_open_drop_rq(priv
, &priv
->drop_rq
);
899 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
903 err
= mlx5e_create_indirect_rqt(priv
);
905 goto err_close_drop_rq
;
907 err
= mlx5e_create_direct_rqts(priv
, priv
->direct_tir
);
909 goto err_destroy_indirect_rqts
;
911 err
= mlx5e_create_indirect_tirs(priv
, false);
913 goto err_destroy_direct_rqts
;
915 err
= mlx5e_create_direct_tirs(priv
, priv
->direct_tir
);
917 goto err_destroy_indirect_tirs
;
919 err
= mlx5e_create_rep_ttc_table(priv
);
921 goto err_destroy_direct_tirs
;
923 err
= mlx5e_create_rep_root_ft(priv
);
925 goto err_destroy_ttc_table
;
927 err
= mlx5e_create_rep_vport_rx_rule(priv
);
929 goto err_destroy_root_ft
;
931 mlx5e_ethtool_init_steering(priv
);
936 mlx5e_destroy_rep_root_ft(priv
);
937 err_destroy_ttc_table
:
938 mlx5e_destroy_ttc_table(priv
, &priv
->fs
.ttc
);
939 err_destroy_direct_tirs
:
940 mlx5e_destroy_direct_tirs(priv
, priv
->direct_tir
);
941 err_destroy_indirect_tirs
:
942 mlx5e_destroy_indirect_tirs(priv
);
943 err_destroy_direct_rqts
:
944 mlx5e_destroy_direct_rqts(priv
, priv
->direct_tir
);
945 err_destroy_indirect_rqts
:
946 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
948 mlx5e_close_drop_rq(&priv
->drop_rq
);
952 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv
*priv
)
954 mlx5e_ethtool_cleanup_steering(priv
);
955 rep_vport_rx_rule_destroy(priv
);
956 mlx5e_destroy_rep_root_ft(priv
);
957 mlx5e_destroy_ttc_table(priv
, &priv
->fs
.ttc
);
958 mlx5e_destroy_direct_tirs(priv
, priv
->direct_tir
);
959 mlx5e_destroy_indirect_tirs(priv
);
960 mlx5e_destroy_direct_rqts(priv
, priv
->direct_tir
);
961 mlx5e_destroy_rqt(priv
, &priv
->indir_rqt
);
962 mlx5e_close_drop_rq(&priv
->drop_rq
);
965 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv
*priv
)
967 mlx5e_create_q_counters(priv
);
968 return mlx5e_init_rep_rx(priv
);
971 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv
*priv
)
973 mlx5e_cleanup_rep_rx(priv
);
974 mlx5e_destroy_q_counters(priv
);
977 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv
*rpriv
)
979 struct mlx5_rep_uplink_priv
*uplink_priv
;
980 struct net_device
*netdev
;
981 struct mlx5e_priv
*priv
;
984 netdev
= rpriv
->netdev
;
985 priv
= netdev_priv(netdev
);
986 uplink_priv
= &rpriv
->uplink_priv
;
988 err
= mlx5e_rep_tc_init(rpriv
);
992 mlx5_init_port_tun_entropy(&uplink_priv
->tun_entropy
, priv
->mdev
);
994 mlx5e_rep_bond_init(rpriv
);
995 err
= mlx5e_rep_tc_netdevice_event_register(rpriv
);
997 mlx5_core_err(priv
->mdev
, "Failed to register netdev notifier, err: %d\n",
1005 mlx5e_rep_bond_cleanup(rpriv
);
1006 mlx5e_rep_tc_cleanup(rpriv
);
1010 static int mlx5e_init_rep_tx(struct mlx5e_priv
*priv
)
1012 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1015 err
= mlx5e_create_tises(priv
);
1017 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
1021 if (rpriv
->rep
->vport
== MLX5_VPORT_UPLINK
) {
1022 err
= mlx5e_init_uplink_rep_tx(rpriv
);
1030 mlx5e_destroy_tises(priv
);
1034 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv
*rpriv
)
1036 mlx5e_rep_tc_netdevice_event_unregister(rpriv
);
1037 mlx5e_rep_bond_cleanup(rpriv
);
1038 mlx5e_rep_tc_cleanup(rpriv
);
1041 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv
*priv
)
1043 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1045 mlx5e_destroy_tises(priv
);
1047 if (rpriv
->rep
->vport
== MLX5_VPORT_UPLINK
)
1048 mlx5e_cleanup_uplink_rep_tx(rpriv
);
1051 static void mlx5e_rep_enable(struct mlx5e_priv
*priv
)
1053 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1055 mlx5e_set_netdev_mtu_boundaries(priv
);
1056 mlx5e_rep_neigh_init(rpriv
);
1059 static void mlx5e_rep_disable(struct mlx5e_priv
*priv
)
1061 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1063 mlx5e_rep_neigh_cleanup(rpriv
);
1066 static int mlx5e_update_rep_rx(struct mlx5e_priv
*priv
)
1071 static int uplink_rep_async_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
1073 struct mlx5e_priv
*priv
= container_of(nb
, struct mlx5e_priv
, events_nb
);
1075 if (event
== MLX5_EVENT_TYPE_PORT_CHANGE
) {
1076 struct mlx5_eqe
*eqe
= data
;
1078 switch (eqe
->sub_type
) {
1079 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
1080 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
1081 queue_work(priv
->wq
, &priv
->update_carrier_work
);
1090 if (event
== MLX5_DEV_EVENT_PORT_AFFINITY
)
1091 return mlx5e_rep_tc_event_port_affinity(priv
);
1096 static void mlx5e_uplink_rep_enable(struct mlx5e_priv
*priv
)
1098 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1099 struct net_device
*netdev
= priv
->netdev
;
1100 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1103 netdev
->min_mtu
= ETH_MIN_MTU
;
1104 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
1105 netdev
->max_mtu
= MLX5E_HW2SW_MTU(&priv
->channels
.params
, max_mtu
);
1106 mlx5e_set_dev_port_mtu(priv
);
1108 mlx5e_rep_tc_enable(priv
);
1110 mlx5_modify_vport_admin_state(mdev
, MLX5_VPORT_STATE_OP_MOD_UPLINK
,
1111 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO
);
1112 mlx5_lag_add(mdev
, netdev
);
1113 priv
->events_nb
.notifier_call
= uplink_rep_async_event
;
1114 mlx5_notifier_register(mdev
, &priv
->events_nb
);
1115 mlx5e_dcbnl_initialize(priv
);
1116 mlx5e_dcbnl_init_app(priv
);
1117 mlx5e_rep_neigh_init(rpriv
);
1120 static void mlx5e_uplink_rep_disable(struct mlx5e_priv
*priv
)
1122 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1123 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1125 mlx5e_rep_neigh_cleanup(rpriv
);
1126 mlx5e_dcbnl_delete_app(priv
);
1127 mlx5_notifier_unregister(mdev
, &priv
->events_nb
);
1128 mlx5e_rep_tc_disable(priv
);
1129 mlx5_lag_remove(mdev
);
1132 static MLX5E_DEFINE_STATS_GRP(sw_rep
, 0);
1133 static MLX5E_DEFINE_STATS_GRP(vport_rep
, MLX5E_NDO_UPDATE_STATS
);
1135 /* The stats groups order is opposite to the update_stats() order calls */
1136 static mlx5e_stats_grp_t mlx5e_rep_stats_grps
[] = {
1137 &MLX5E_STATS_GRP(sw_rep
),
1138 &MLX5E_STATS_GRP(vport_rep
),
1141 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv
*priv
)
1143 return ARRAY_SIZE(mlx5e_rep_stats_grps
);
1146 /* The stats groups order is opposite to the update_stats() order calls */
1147 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps
[] = {
1148 &MLX5E_STATS_GRP(sw
),
1149 &MLX5E_STATS_GRP(qcnt
),
1150 &MLX5E_STATS_GRP(vnic_env
),
1151 &MLX5E_STATS_GRP(vport
),
1152 &MLX5E_STATS_GRP(802_3
),
1153 &MLX5E_STATS_GRP(2863),
1154 &MLX5E_STATS_GRP(2819),
1155 &MLX5E_STATS_GRP(phy
),
1156 &MLX5E_STATS_GRP(eth_ext
),
1157 &MLX5E_STATS_GRP(pcie
),
1158 &MLX5E_STATS_GRP(per_prio
),
1159 &MLX5E_STATS_GRP(pme
),
1160 &MLX5E_STATS_GRP(channels
),
1161 &MLX5E_STATS_GRP(per_port_buff_congest
),
1164 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv
*priv
)
1166 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps
);
1169 static const struct mlx5e_profile mlx5e_rep_profile
= {
1170 .init
= mlx5e_init_rep
,
1171 .cleanup
= mlx5e_cleanup_rep
,
1172 .init_rx
= mlx5e_init_rep_rx
,
1173 .cleanup_rx
= mlx5e_cleanup_rep_rx
,
1174 .init_tx
= mlx5e_init_rep_tx
,
1175 .cleanup_tx
= mlx5e_cleanup_rep_tx
,
1176 .enable
= mlx5e_rep_enable
,
1177 .disable
= mlx5e_rep_disable
,
1178 .update_rx
= mlx5e_update_rep_rx
,
1179 .update_stats
= mlx5e_stats_update_ndo_stats
,
1180 .rx_handlers
= &mlx5e_rx_handlers_rep
,
1182 .rq_groups
= MLX5E_NUM_RQ_GROUPS(REGULAR
),
1183 .stats_grps
= mlx5e_rep_stats_grps
,
1184 .stats_grps_num
= mlx5e_rep_stats_grps_num
,
1187 static const struct mlx5e_profile mlx5e_uplink_rep_profile
= {
1188 .init
= mlx5e_init_ul_rep
,
1189 .cleanup
= mlx5e_cleanup_rep
,
1190 .init_rx
= mlx5e_init_ul_rep_rx
,
1191 .cleanup_rx
= mlx5e_cleanup_ul_rep_rx
,
1192 .init_tx
= mlx5e_init_rep_tx
,
1193 .cleanup_tx
= mlx5e_cleanup_rep_tx
,
1194 .enable
= mlx5e_uplink_rep_enable
,
1195 .disable
= mlx5e_uplink_rep_disable
,
1196 .update_rx
= mlx5e_update_rep_rx
,
1197 .update_stats
= mlx5e_stats_update_ndo_stats
,
1198 .update_carrier
= mlx5e_update_carrier
,
1199 .rx_handlers
= &mlx5e_rx_handlers_rep
,
1200 .max_tc
= MLX5E_MAX_NUM_TC
,
1201 .rq_groups
= MLX5E_NUM_RQ_GROUPS(REGULAR
),
1202 .stats_grps
= mlx5e_ul_rep_stats_grps
,
1203 .stats_grps_num
= mlx5e_ul_rep_stats_grps_num
,
1206 /* e-Switch vport representors */
1208 mlx5e_vport_rep_load(struct mlx5_core_dev
*dev
, struct mlx5_eswitch_rep
*rep
)
1210 const struct mlx5e_profile
*profile
;
1211 struct mlx5e_rep_priv
*rpriv
;
1212 struct devlink_port
*dl_port
;
1213 struct net_device
*netdev
;
1214 struct mlx5e_priv
*priv
;
1215 unsigned int txqs
, rxqs
;
1218 rpriv
= kzalloc(sizeof(*rpriv
), GFP_KERNEL
);
1222 /* rpriv->rep to be looked up when profile->init() is called */
1225 profile
= (rep
->vport
== MLX5_VPORT_UPLINK
) ?
1226 &mlx5e_uplink_rep_profile
: &mlx5e_rep_profile
;
1228 nch
= mlx5e_get_max_num_channels(dev
);
1229 txqs
= nch
* profile
->max_tc
;
1230 rxqs
= nch
* profile
->rq_groups
;
1231 netdev
= mlx5e_create_netdev(dev
, txqs
, rxqs
);
1234 "Failed to create representor netdev for vport %d\n",
1240 mlx5e_build_rep_netdev(netdev
, dev
, rep
);
1242 rpriv
->netdev
= netdev
;
1243 rep
->rep_data
[REP_ETH
].priv
= rpriv
;
1244 INIT_LIST_HEAD(&rpriv
->vport_sqs_list
);
1246 if (rep
->vport
== MLX5_VPORT_UPLINK
) {
1247 err
= mlx5e_create_mdev_resources(dev
);
1249 goto err_destroy_netdev
;
1252 priv
= netdev_priv(netdev
);
1253 priv
->profile
= profile
;
1254 priv
->ppriv
= rpriv
;
1255 err
= profile
->init(dev
, netdev
);
1257 netdev_warn(netdev
, "rep profile init failed, %d\n", err
);
1258 goto err_destroy_mdev_resources
;
1261 err
= mlx5e_attach_netdev(netdev_priv(netdev
));
1264 "Failed to attach representor netdev for vport %d\n",
1266 goto err_cleanup_profile
;
1269 err
= register_netdev(netdev
);
1272 "Failed to register representor netdev for vport %d\n",
1274 goto err_detach_netdev
;
1277 dl_port
= mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
1279 devlink_port_type_eth_set(dl_port
, netdev
);
1283 mlx5e_detach_netdev(netdev_priv(netdev
));
1285 err_cleanup_profile
:
1286 priv
->profile
->cleanup(priv
);
1288 err_destroy_mdev_resources
:
1289 if (rep
->vport
== MLX5_VPORT_UPLINK
)
1290 mlx5e_destroy_mdev_resources(dev
);
1293 mlx5e_destroy_netdev(netdev_priv(netdev
));
1299 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep
*rep
)
1301 struct mlx5e_rep_priv
*rpriv
= mlx5e_rep_to_rep_priv(rep
);
1302 struct net_device
*netdev
= rpriv
->netdev
;
1303 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1304 struct mlx5_core_dev
*dev
= priv
->mdev
;
1305 struct devlink_port
*dl_port
;
1306 void *ppriv
= priv
->ppriv
;
1308 dl_port
= mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
1310 devlink_port_type_clear(dl_port
);
1311 unregister_netdev(netdev
);
1312 mlx5e_detach_netdev(priv
);
1313 priv
->profile
->cleanup(priv
);
1314 if (rep
->vport
== MLX5_VPORT_UPLINK
)
1315 mlx5e_destroy_mdev_resources(priv
->mdev
);
1316 mlx5e_destroy_netdev(priv
);
1317 kfree(ppriv
); /* mlx5e_rep_priv */
1320 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep
*rep
)
1322 struct mlx5e_rep_priv
*rpriv
;
1324 rpriv
= mlx5e_rep_to_rep_priv(rep
);
1326 return rpriv
->netdev
;
1329 static const struct mlx5_eswitch_rep_ops rep_ops
= {
1330 .load
= mlx5e_vport_rep_load
,
1331 .unload
= mlx5e_vport_rep_unload
,
1332 .get_proto_dev
= mlx5e_vport_rep_get_proto_dev
1335 static int mlx5e_rep_probe(struct auxiliary_device
*adev
,
1336 const struct auxiliary_device_id
*id
)
1338 struct mlx5_adev
*edev
= container_of(adev
, struct mlx5_adev
, adev
);
1339 struct mlx5_core_dev
*mdev
= edev
->mdev
;
1340 struct mlx5_eswitch
*esw
;
1342 esw
= mdev
->priv
.eswitch
;
1343 mlx5_eswitch_register_vport_reps(esw
, &rep_ops
, REP_ETH
);
1347 static void mlx5e_rep_remove(struct auxiliary_device
*adev
)
1349 struct mlx5_adev
*vdev
= container_of(adev
, struct mlx5_adev
, adev
);
1350 struct mlx5_core_dev
*mdev
= vdev
->mdev
;
1351 struct mlx5_eswitch
*esw
;
1353 esw
= mdev
->priv
.eswitch
;
1354 mlx5_eswitch_unregister_vport_reps(esw
, REP_ETH
);
1357 static const struct auxiliary_device_id mlx5e_rep_id_table
[] = {
1358 { .name
= MLX5_ADEV_NAME
".eth-rep", },
1362 MODULE_DEVICE_TABLE(auxiliary
, mlx5e_rep_id_table
);
1364 static struct auxiliary_driver mlx5e_rep_driver
= {
1366 .probe
= mlx5e_rep_probe
,
1367 .remove
= mlx5e_rep_remove
,
1368 .id_table
= mlx5e_rep_id_table
,
1371 int mlx5e_rep_init(void)
1373 return auxiliary_driver_register(&mlx5e_rep_driver
);
1376 void mlx5e_rep_cleanup(void)
1378 auxiliary_driver_unregister(&mlx5e_rep_driver
);