2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/fs.h>
34 #include <net/switchdev.h>
35 #include <net/pkt_cls.h>
36 #include <net/act_api.h>
37 #include <net/devlink.h>
38 #include <net/ipv6_stubs.h>
43 #include "en/params.h"
46 #include "en/rep/tc.h"
47 #include "en/rep/neigh.h"
48 #include "en/rep/bridge.h"
49 #include "en/devlink.h"
52 #include "lib/devcom.h"
53 #include "lib/vxlan.h"
54 #define CREATE_TRACE_POINTS
55 #include "diag/en_rep_tracepoint.h"
56 #include "en_accel/ipsec.h"
57 #include "en/tc/int_port.h"
59 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
60 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
61 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
63 static const char mlx5e_rep_driver_name
[] = "mlx5e_rep";
65 static void mlx5e_rep_get_drvinfo(struct net_device
*dev
,
66 struct ethtool_drvinfo
*drvinfo
)
68 struct mlx5e_priv
*priv
= netdev_priv(dev
);
69 struct mlx5_core_dev
*mdev
= priv
->mdev
;
71 strlcpy(drvinfo
->driver
, mlx5e_rep_driver_name
,
72 sizeof(drvinfo
->driver
));
73 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
75 fw_rev_maj(mdev
), fw_rev_min(mdev
),
76 fw_rev_sub(mdev
), mdev
->board_id
);
79 static const struct counter_desc sw_rep_stats_desc
[] = {
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, rx_packets
) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, rx_bytes
) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, tx_packets
) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats
, tx_bytes
) },
93 static const struct counter_desc vport_rep_stats_desc
[] = {
94 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_rx_packets
) },
95 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_rx_bytes
) },
96 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_tx_packets
) },
97 { MLX5E_DECLARE_STAT(struct vport_stats
, vport_tx_bytes
) },
100 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
101 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
103 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep
)
105 return NUM_VPORT_REP_SW_COUNTERS
;
108 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep
)
112 for (i
= 0; i
< NUM_VPORT_REP_SW_COUNTERS
; i
++)
113 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
,
114 sw_rep_stats_desc
[i
].format
);
118 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep
)
122 for (i
= 0; i
< NUM_VPORT_REP_SW_COUNTERS
; i
++)
123 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->stats
.sw
,
124 sw_rep_stats_desc
, i
);
128 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep
)
130 struct mlx5e_sw_stats
*s
= &priv
->stats
.sw
;
131 struct rtnl_link_stats64 stats64
= {};
133 memset(s
, 0, sizeof(*s
));
134 mlx5e_fold_sw_stats64(priv
, &stats64
);
136 s
->rx_packets
= stats64
.rx_packets
;
137 s
->rx_bytes
= stats64
.rx_bytes
;
138 s
->tx_packets
= stats64
.tx_packets
;
139 s
->tx_bytes
= stats64
.tx_bytes
;
140 s
->tx_queue_dropped
= stats64
.tx_dropped
;
143 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep
)
145 return NUM_VPORT_REP_HW_COUNTERS
;
148 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep
)
152 for (i
= 0; i
< NUM_VPORT_REP_HW_COUNTERS
; i
++)
153 strcpy(data
+ (idx
++) * ETH_GSTRING_LEN
, vport_rep_stats_desc
[i
].format
);
157 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep
)
161 for (i
= 0; i
< NUM_VPORT_REP_HW_COUNTERS
; i
++)
162 data
[idx
++] = MLX5E_READ_CTR64_CPU(&priv
->stats
.vf_vport
,
163 vport_rep_stats_desc
, i
);
167 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep
)
169 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
170 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
171 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
172 struct rtnl_link_stats64
*vport_stats
;
173 struct ifla_vf_stats vf_stats
;
176 err
= mlx5_eswitch_get_vport_stats(esw
, rep
->vport
, &vf_stats
);
178 netdev_warn(priv
->netdev
, "vport %d error %d reading stats\n",
183 vport_stats
= &priv
->stats
.vf_vport
;
184 /* flip tx/rx as we are reporting the counters for the switch vport */
185 vport_stats
->rx_packets
= vf_stats
.tx_packets
;
186 vport_stats
->rx_bytes
= vf_stats
.tx_bytes
;
187 vport_stats
->tx_packets
= vf_stats
.rx_packets
;
188 vport_stats
->tx_bytes
= vf_stats
.rx_bytes
;
191 static void mlx5e_rep_get_strings(struct net_device
*dev
,
192 u32 stringset
, uint8_t *data
)
194 struct mlx5e_priv
*priv
= netdev_priv(dev
);
198 mlx5e_stats_fill_strings(priv
, data
);
203 static void mlx5e_rep_get_ethtool_stats(struct net_device
*dev
,
204 struct ethtool_stats
*stats
, u64
*data
)
206 struct mlx5e_priv
*priv
= netdev_priv(dev
);
208 mlx5e_ethtool_get_ethtool_stats(priv
, stats
, data
);
211 static int mlx5e_rep_get_sset_count(struct net_device
*dev
, int sset
)
213 struct mlx5e_priv
*priv
= netdev_priv(dev
);
217 return mlx5e_stats_total_num(priv
);
223 static void mlx5e_rep_get_ringparam(struct net_device
*dev
,
224 struct ethtool_ringparam
*param
)
226 struct mlx5e_priv
*priv
= netdev_priv(dev
);
228 mlx5e_ethtool_get_ringparam(priv
, param
);
231 static int mlx5e_rep_set_ringparam(struct net_device
*dev
,
232 struct ethtool_ringparam
*param
)
234 struct mlx5e_priv
*priv
= netdev_priv(dev
);
236 return mlx5e_ethtool_set_ringparam(priv
, param
);
239 static void mlx5e_rep_get_channels(struct net_device
*dev
,
240 struct ethtool_channels
*ch
)
242 struct mlx5e_priv
*priv
= netdev_priv(dev
);
244 mlx5e_ethtool_get_channels(priv
, ch
);
247 static int mlx5e_rep_set_channels(struct net_device
*dev
,
248 struct ethtool_channels
*ch
)
250 struct mlx5e_priv
*priv
= netdev_priv(dev
);
252 return mlx5e_ethtool_set_channels(priv
, ch
);
255 static int mlx5e_rep_get_coalesce(struct net_device
*netdev
,
256 struct ethtool_coalesce
*coal
,
257 struct kernel_ethtool_coalesce
*kernel_coal
,
258 struct netlink_ext_ack
*extack
)
260 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
262 return mlx5e_ethtool_get_coalesce(priv
, coal
);
265 static int mlx5e_rep_set_coalesce(struct net_device
*netdev
,
266 struct ethtool_coalesce
*coal
,
267 struct kernel_ethtool_coalesce
*kernel_coal
,
268 struct netlink_ext_ack
*extack
)
270 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
272 return mlx5e_ethtool_set_coalesce(priv
, coal
);
275 static u32
mlx5e_rep_get_rxfh_key_size(struct net_device
*netdev
)
277 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
279 return mlx5e_ethtool_get_rxfh_key_size(priv
);
282 static u32
mlx5e_rep_get_rxfh_indir_size(struct net_device
*netdev
)
284 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
286 return mlx5e_ethtool_get_rxfh_indir_size(priv
);
289 static const struct ethtool_ops mlx5e_rep_ethtool_ops
= {
290 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
291 ETHTOOL_COALESCE_MAX_FRAMES
|
292 ETHTOOL_COALESCE_USE_ADAPTIVE
,
293 .get_drvinfo
= mlx5e_rep_get_drvinfo
,
294 .get_link
= ethtool_op_get_link
,
295 .get_strings
= mlx5e_rep_get_strings
,
296 .get_sset_count
= mlx5e_rep_get_sset_count
,
297 .get_ethtool_stats
= mlx5e_rep_get_ethtool_stats
,
298 .get_ringparam
= mlx5e_rep_get_ringparam
,
299 .set_ringparam
= mlx5e_rep_set_ringparam
,
300 .get_channels
= mlx5e_rep_get_channels
,
301 .set_channels
= mlx5e_rep_set_channels
,
302 .get_coalesce
= mlx5e_rep_get_coalesce
,
303 .set_coalesce
= mlx5e_rep_set_coalesce
,
304 .get_rxfh_key_size
= mlx5e_rep_get_rxfh_key_size
,
305 .get_rxfh_indir_size
= mlx5e_rep_get_rxfh_indir_size
,
308 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch
*esw
,
309 struct mlx5_eswitch_rep
*rep
)
311 struct mlx5e_rep_sq
*rep_sq
, *tmp
;
312 struct mlx5e_rep_priv
*rpriv
;
314 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
317 rpriv
= mlx5e_rep_to_rep_priv(rep
);
318 list_for_each_entry_safe(rep_sq
, tmp
, &rpriv
->vport_sqs_list
, list
) {
319 mlx5_eswitch_del_send_to_vport_rule(rep_sq
->send_to_vport_rule
);
320 if (rep_sq
->send_to_vport_rule_peer
)
321 mlx5_eswitch_del_send_to_vport_rule(rep_sq
->send_to_vport_rule_peer
);
322 list_del(&rep_sq
->list
);
327 static int mlx5e_sqs2vport_start(struct mlx5_eswitch
*esw
,
328 struct mlx5_eswitch_rep
*rep
,
329 u32
*sqns_array
, int sqns_num
)
331 struct mlx5_eswitch
*peer_esw
= NULL
;
332 struct mlx5_flow_handle
*flow_rule
;
333 struct mlx5e_rep_priv
*rpriv
;
334 struct mlx5e_rep_sq
*rep_sq
;
338 if (esw
->mode
!= MLX5_ESWITCH_OFFLOADS
)
341 rpriv
= mlx5e_rep_to_rep_priv(rep
);
342 if (mlx5_devcom_is_paired(esw
->dev
->priv
.devcom
, MLX5_DEVCOM_ESW_OFFLOADS
))
343 peer_esw
= mlx5_devcom_get_peer_data(esw
->dev
->priv
.devcom
,
344 MLX5_DEVCOM_ESW_OFFLOADS
);
346 for (i
= 0; i
< sqns_num
; i
++) {
347 rep_sq
= kzalloc(sizeof(*rep_sq
), GFP_KERNEL
);
353 /* Add re-inject rule to the PF/representor sqs */
354 flow_rule
= mlx5_eswitch_add_send_to_vport_rule(esw
, esw
, rep
,
356 if (IS_ERR(flow_rule
)) {
357 err
= PTR_ERR(flow_rule
);
361 rep_sq
->send_to_vport_rule
= flow_rule
;
362 rep_sq
->sqn
= sqns_array
[i
];
365 flow_rule
= mlx5_eswitch_add_send_to_vport_rule(peer_esw
, esw
,
367 if (IS_ERR(flow_rule
)) {
368 err
= PTR_ERR(flow_rule
);
369 mlx5_eswitch_del_send_to_vport_rule(rep_sq
->send_to_vport_rule
);
373 rep_sq
->send_to_vport_rule_peer
= flow_rule
;
376 list_add(&rep_sq
->list
, &rpriv
->vport_sqs_list
);
380 mlx5_devcom_release_peer_data(esw
->dev
->priv
.devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
385 mlx5e_sqs2vport_stop(esw
, rep
);
388 mlx5_devcom_release_peer_data(esw
->dev
->priv
.devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
393 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv
*priv
)
395 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
396 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
397 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
398 struct mlx5e_channel
*c
;
399 int n
, tc
, num_sqs
= 0;
403 sqs
= kcalloc(priv
->channels
.num
* mlx5e_get_dcb_num_tc(&priv
->channels
.params
),
404 sizeof(*sqs
), GFP_KERNEL
);
408 for (n
= 0; n
< priv
->channels
.num
; n
++) {
409 c
= priv
->channels
.c
[n
];
410 for (tc
= 0; tc
< c
->num_tc
; tc
++)
411 sqs
[num_sqs
++] = c
->sq
[tc
].sqn
;
414 err
= mlx5e_sqs2vport_start(esw
, rep
, sqs
, num_sqs
);
419 netdev_warn(priv
->netdev
, "Failed to add SQs FWD rules %d\n", err
);
423 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv
*priv
)
425 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
426 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
427 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
429 mlx5e_sqs2vport_stop(esw
, rep
);
432 static int mlx5e_rep_open(struct net_device
*dev
)
434 struct mlx5e_priv
*priv
= netdev_priv(dev
);
435 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
436 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
439 mutex_lock(&priv
->state_lock
);
440 err
= mlx5e_open_locked(dev
);
444 if (!mlx5_modify_vport_admin_state(priv
->mdev
,
445 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
447 MLX5_VPORT_ADMIN_STATE_UP
))
448 netif_carrier_on(dev
);
451 mutex_unlock(&priv
->state_lock
);
455 static int mlx5e_rep_close(struct net_device
*dev
)
457 struct mlx5e_priv
*priv
= netdev_priv(dev
);
458 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
459 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
462 mutex_lock(&priv
->state_lock
);
463 mlx5_modify_vport_admin_state(priv
->mdev
,
464 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
466 MLX5_VPORT_ADMIN_STATE_DOWN
);
467 ret
= mlx5e_close_locked(dev
);
468 mutex_unlock(&priv
->state_lock
);
472 bool mlx5e_is_uplink_rep(struct mlx5e_priv
*priv
)
474 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
475 struct mlx5_eswitch_rep
*rep
;
477 if (!MLX5_ESWITCH_MANAGER(priv
->mdev
))
480 if (!rpriv
) /* non vport rep mlx5e instances don't use this field */
484 return (rep
->vport
== MLX5_VPORT_UPLINK
);
487 bool mlx5e_rep_has_offload_stats(const struct net_device
*dev
, int attr_id
)
490 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
498 mlx5e_get_sw_stats64(const struct net_device
*dev
,
499 struct rtnl_link_stats64
*stats
)
501 struct mlx5e_priv
*priv
= netdev_priv(dev
);
503 mlx5e_fold_sw_stats64(priv
, stats
);
507 int mlx5e_rep_get_offload_stats(int attr_id
, const struct net_device
*dev
,
511 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
512 return mlx5e_get_sw_stats64(dev
, sp
);
519 mlx5e_rep_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
521 struct mlx5e_priv
*priv
= netdev_priv(dev
);
523 /* update HW stats in background for next time */
524 mlx5e_queue_update_stats(priv
);
525 memcpy(stats
, &priv
->stats
.vf_vport
, sizeof(*stats
));
528 static int mlx5e_rep_change_mtu(struct net_device
*netdev
, int new_mtu
)
530 return mlx5e_change_mtu(netdev
, new_mtu
, NULL
);
533 static struct devlink_port
*mlx5e_rep_get_devlink_port(struct net_device
*netdev
)
535 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
536 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
537 struct mlx5_core_dev
*dev
= priv
->mdev
;
539 return mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
542 static int mlx5e_rep_change_carrier(struct net_device
*dev
, bool new_carrier
)
544 struct mlx5e_priv
*priv
= netdev_priv(dev
);
545 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
546 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
550 err
= mlx5_modify_vport_admin_state(priv
->mdev
, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
551 rep
->vport
, 1, MLX5_VPORT_ADMIN_STATE_UP
);
554 netif_carrier_on(dev
);
556 err
= mlx5_modify_vport_admin_state(priv
->mdev
, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT
,
557 rep
->vport
, 1, MLX5_VPORT_ADMIN_STATE_DOWN
);
560 netif_carrier_off(dev
);
565 static const struct net_device_ops mlx5e_netdev_ops_rep
= {
566 .ndo_open
= mlx5e_rep_open
,
567 .ndo_stop
= mlx5e_rep_close
,
568 .ndo_start_xmit
= mlx5e_xmit
,
569 .ndo_setup_tc
= mlx5e_rep_setup_tc
,
570 .ndo_get_devlink_port
= mlx5e_rep_get_devlink_port
,
571 .ndo_get_stats64
= mlx5e_rep_get_stats
,
572 .ndo_has_offload_stats
= mlx5e_rep_has_offload_stats
,
573 .ndo_get_offload_stats
= mlx5e_rep_get_offload_stats
,
574 .ndo_change_mtu
= mlx5e_rep_change_mtu
,
575 .ndo_change_carrier
= mlx5e_rep_change_carrier
,
578 bool mlx5e_eswitch_uplink_rep(const struct net_device
*netdev
)
580 return netdev
->netdev_ops
== &mlx5e_netdev_ops
&&
581 mlx5e_is_uplink_rep(netdev_priv(netdev
));
584 bool mlx5e_eswitch_vf_rep(const struct net_device
*netdev
)
586 return netdev
->netdev_ops
== &mlx5e_netdev_ops_rep
;
589 static void mlx5e_build_rep_params(struct net_device
*netdev
)
591 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
592 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
593 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
594 struct mlx5_core_dev
*mdev
= priv
->mdev
;
595 struct mlx5e_params
*params
;
597 u8 cq_period_mode
= MLX5_CAP_GEN(mdev
, cq_period_start_from_cqe
) ?
598 MLX5_CQ_PERIOD_MODE_START_FROM_CQE
:
599 MLX5_CQ_PERIOD_MODE_START_FROM_EQE
;
601 params
= &priv
->channels
.params
;
603 params
->num_channels
= MLX5E_REP_PARAMS_DEF_NUM_CHANNELS
;
604 params
->hard_mtu
= MLX5E_ETH_HARD_MTU
;
605 params
->sw_mtu
= netdev
->mtu
;
608 if (rep
->vport
== MLX5_VPORT_UPLINK
)
609 params
->log_sq_size
= MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
611 params
->log_sq_size
= MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE
;
614 mlx5e_build_rq_params(mdev
, params
);
616 /* CQ moderation params */
617 params
->rx_dim_enabled
= MLX5_CAP_GEN(mdev
, cq_moderation
);
618 mlx5e_set_rx_cq_mode_params(params
, cq_period_mode
);
620 params
->mqprio
.num_tc
= 1;
621 params
->tunneled_offload_en
= false;
623 /* Set an initial non-zero value, so that mlx5e_select_queue won't
624 * divide by zero if called before first activating channels.
626 priv
->num_tc_x_num_ch
= params
->num_channels
* params
->mqprio
.num_tc
;
628 mlx5_query_min_inline(mdev
, ¶ms
->tx_min_inline_mode
);
631 static void mlx5e_build_rep_netdev(struct net_device
*netdev
,
632 struct mlx5_core_dev
*mdev
)
634 SET_NETDEV_DEV(netdev
, mdev
->device
);
635 netdev
->netdev_ops
= &mlx5e_netdev_ops_rep
;
636 eth_hw_addr_random(netdev
);
637 netdev
->ethtool_ops
= &mlx5e_rep_ethtool_ops
;
639 netdev
->watchdog_timeo
= 15 * HZ
;
641 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
642 netdev
->hw_features
|= NETIF_F_HW_TC
;
644 netdev
->hw_features
|= NETIF_F_SG
;
645 netdev
->hw_features
|= NETIF_F_IP_CSUM
;
646 netdev
->hw_features
|= NETIF_F_IPV6_CSUM
;
647 netdev
->hw_features
|= NETIF_F_GRO
;
648 netdev
->hw_features
|= NETIF_F_TSO
;
649 netdev
->hw_features
|= NETIF_F_TSO6
;
650 netdev
->hw_features
|= NETIF_F_RXCSUM
;
652 netdev
->features
|= netdev
->hw_features
;
653 netdev
->features
|= NETIF_F_NETNS_LOCAL
;
656 static int mlx5e_init_rep(struct mlx5_core_dev
*mdev
,
657 struct net_device
*netdev
)
659 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
661 mlx5e_build_rep_params(netdev
);
662 mlx5e_timestamp_init(priv
);
667 static int mlx5e_init_ul_rep(struct mlx5_core_dev
*mdev
,
668 struct net_device
*netdev
)
670 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
673 err
= mlx5e_ipsec_init(priv
);
675 mlx5_core_err(mdev
, "Uplink rep IPsec initialization failed, %d\n", err
);
677 mlx5e_vxlan_set_netdev_info(priv
);
678 return mlx5e_init_rep(mdev
, netdev
);
681 static void mlx5e_cleanup_rep(struct mlx5e_priv
*priv
)
683 mlx5e_ipsec_cleanup(priv
);
686 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv
*priv
)
688 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
689 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
690 struct ttc_params ttc_params
= {};
693 priv
->fs
.ns
= mlx5_get_flow_namespace(priv
->mdev
,
694 MLX5_FLOW_NAMESPACE_KERNEL
);
696 /* The inner_ttc in the ttc params is intentionally not set */
697 mlx5e_set_ttc_params(priv
, &ttc_params
, false);
699 if (rep
->vport
!= MLX5_VPORT_UPLINK
)
700 /* To give uplik rep TTC a lower level for chaining from root ft */
701 ttc_params
.ft_attr
.level
= MLX5E_TTC_FT_LEVEL
+ 1;
703 priv
->fs
.ttc
= mlx5_create_ttc_table(priv
->mdev
, &ttc_params
);
704 if (IS_ERR(priv
->fs
.ttc
)) {
705 err
= PTR_ERR(priv
->fs
.ttc
);
706 netdev_err(priv
->netdev
, "Failed to create rep ttc table, err=%d\n",
713 static int mlx5e_create_rep_root_ft(struct mlx5e_priv
*priv
)
715 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
716 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
717 struct mlx5_flow_table_attr ft_attr
= {};
718 struct mlx5_flow_namespace
*ns
;
721 if (rep
->vport
!= MLX5_VPORT_UPLINK
) {
722 /* non uplik reps will skip any bypass tables and go directly to
725 rpriv
->root_ft
= mlx5_get_ttc_flow_table(priv
->fs
.ttc
);
729 /* uplink root ft will be used to auto chain, to ethtool or ttc tables */
730 ns
= mlx5_get_flow_namespace(priv
->mdev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
732 netdev_err(priv
->netdev
, "Failed to get reps offloads namespace\n");
736 ft_attr
.max_fte
= 0; /* Empty table, miss rule will always point to next table */
740 rpriv
->root_ft
= mlx5_create_flow_table(ns
, &ft_attr
);
741 if (IS_ERR(rpriv
->root_ft
)) {
742 err
= PTR_ERR(rpriv
->root_ft
);
743 rpriv
->root_ft
= NULL
;
749 static void mlx5e_destroy_rep_root_ft(struct mlx5e_priv
*priv
)
751 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
752 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
754 if (rep
->vport
!= MLX5_VPORT_UPLINK
)
756 mlx5_destroy_flow_table(rpriv
->root_ft
);
759 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv
*priv
)
761 struct mlx5_eswitch
*esw
= priv
->mdev
->priv
.eswitch
;
762 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
763 struct mlx5_eswitch_rep
*rep
= rpriv
->rep
;
764 struct mlx5_flow_handle
*flow_rule
;
765 struct mlx5_flow_destination dest
;
767 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
768 dest
.ft
= rpriv
->root_ft
;
770 flow_rule
= mlx5_eswitch_create_vport_rx_rule(esw
, rep
->vport
, &dest
);
771 if (IS_ERR(flow_rule
))
772 return PTR_ERR(flow_rule
);
773 rpriv
->vport_rx_rule
= flow_rule
;
777 static void rep_vport_rx_rule_destroy(struct mlx5e_priv
*priv
)
779 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
781 if (!rpriv
->vport_rx_rule
)
784 mlx5_del_flow_rules(rpriv
->vport_rx_rule
);
785 rpriv
->vport_rx_rule
= NULL
;
788 int mlx5e_rep_bond_update(struct mlx5e_priv
*priv
, bool cleanup
)
790 rep_vport_rx_rule_destroy(priv
);
792 return cleanup
? 0 : mlx5e_create_rep_vport_rx_rule(priv
);
795 static int mlx5e_init_rep_rx(struct mlx5e_priv
*priv
)
797 struct mlx5_core_dev
*mdev
= priv
->mdev
;
800 priv
->rx_res
= mlx5e_rx_res_alloc();
804 mlx5e_init_l2_addr(priv
);
806 err
= mlx5e_open_drop_rq(priv
, &priv
->drop_rq
);
808 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
812 err
= mlx5e_rx_res_init(priv
->rx_res
, priv
->mdev
, 0,
813 priv
->max_nch
, priv
->drop_rq
.rqn
,
814 &priv
->channels
.params
.packet_merge
,
815 priv
->channels
.params
.num_channels
);
817 goto err_close_drop_rq
;
819 err
= mlx5e_create_rep_ttc_table(priv
);
821 goto err_destroy_rx_res
;
823 err
= mlx5e_create_rep_root_ft(priv
);
825 goto err_destroy_ttc_table
;
827 err
= mlx5e_create_rep_vport_rx_rule(priv
);
829 goto err_destroy_root_ft
;
831 mlx5e_ethtool_init_steering(priv
);
836 mlx5e_destroy_rep_root_ft(priv
);
837 err_destroy_ttc_table
:
838 mlx5_destroy_ttc_table(priv
->fs
.ttc
);
840 mlx5e_rx_res_destroy(priv
->rx_res
);
842 mlx5e_close_drop_rq(&priv
->drop_rq
);
843 mlx5e_rx_res_free(priv
->rx_res
);
848 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv
*priv
)
850 mlx5e_ethtool_cleanup_steering(priv
);
851 rep_vport_rx_rule_destroy(priv
);
852 mlx5e_destroy_rep_root_ft(priv
);
853 mlx5_destroy_ttc_table(priv
->fs
.ttc
);
854 mlx5e_rx_res_destroy(priv
->rx_res
);
855 mlx5e_close_drop_rq(&priv
->drop_rq
);
856 mlx5e_rx_res_free(priv
->rx_res
);
860 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv
*priv
)
864 mlx5e_create_q_counters(priv
);
865 err
= mlx5e_init_rep_rx(priv
);
869 mlx5e_tc_int_port_init_rep_rx(priv
);
875 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv
*priv
)
877 mlx5e_tc_int_port_cleanup_rep_rx(priv
);
878 mlx5e_cleanup_rep_rx(priv
);
879 mlx5e_destroy_q_counters(priv
);
882 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv
*rpriv
)
884 struct mlx5_rep_uplink_priv
*uplink_priv
;
885 struct net_device
*netdev
;
886 struct mlx5e_priv
*priv
;
889 netdev
= rpriv
->netdev
;
890 priv
= netdev_priv(netdev
);
891 uplink_priv
= &rpriv
->uplink_priv
;
893 err
= mlx5e_rep_tc_init(rpriv
);
897 mlx5_init_port_tun_entropy(&uplink_priv
->tun_entropy
, priv
->mdev
);
899 mlx5e_rep_bond_init(rpriv
);
900 err
= mlx5e_rep_tc_netdevice_event_register(rpriv
);
902 mlx5_core_err(priv
->mdev
, "Failed to register netdev notifier, err: %d\n",
910 mlx5e_rep_bond_cleanup(rpriv
);
911 mlx5e_rep_tc_cleanup(rpriv
);
915 static int mlx5e_init_rep_tx(struct mlx5e_priv
*priv
)
917 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
920 err
= mlx5e_create_tises(priv
);
922 mlx5_core_warn(priv
->mdev
, "create tises failed, %d\n", err
);
926 if (rpriv
->rep
->vport
== MLX5_VPORT_UPLINK
) {
927 err
= mlx5e_init_uplink_rep_tx(rpriv
);
935 mlx5e_destroy_tises(priv
);
939 static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv
*rpriv
)
941 mlx5e_rep_tc_netdevice_event_unregister(rpriv
);
942 mlx5e_rep_bond_cleanup(rpriv
);
943 mlx5e_rep_tc_cleanup(rpriv
);
946 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv
*priv
)
948 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
950 mlx5e_destroy_tises(priv
);
952 if (rpriv
->rep
->vport
== MLX5_VPORT_UPLINK
)
953 mlx5e_cleanup_uplink_rep_tx(rpriv
);
956 static void mlx5e_rep_enable(struct mlx5e_priv
*priv
)
958 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
960 mlx5e_set_netdev_mtu_boundaries(priv
);
961 mlx5e_rep_neigh_init(rpriv
);
964 static void mlx5e_rep_disable(struct mlx5e_priv
*priv
)
966 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
968 mlx5e_rep_neigh_cleanup(rpriv
);
971 static int mlx5e_update_rep_rx(struct mlx5e_priv
*priv
)
976 static int uplink_rep_async_event(struct notifier_block
*nb
, unsigned long event
, void *data
)
978 struct mlx5e_priv
*priv
= container_of(nb
, struct mlx5e_priv
, events_nb
);
980 if (event
== MLX5_EVENT_TYPE_PORT_CHANGE
) {
981 struct mlx5_eqe
*eqe
= data
;
983 switch (eqe
->sub_type
) {
984 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
985 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
986 queue_work(priv
->wq
, &priv
->update_carrier_work
);
995 if (event
== MLX5_DEV_EVENT_PORT_AFFINITY
)
996 return mlx5e_rep_tc_event_port_affinity(priv
);
1001 static void mlx5e_uplink_rep_enable(struct mlx5e_priv
*priv
)
1003 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1004 struct net_device
*netdev
= priv
->netdev
;
1005 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1008 netdev
->min_mtu
= ETH_MIN_MTU
;
1009 mlx5_query_port_max_mtu(priv
->mdev
, &max_mtu
, 1);
1010 netdev
->max_mtu
= MLX5E_HW2SW_MTU(&priv
->channels
.params
, max_mtu
);
1011 mlx5e_set_dev_port_mtu(priv
);
1013 mlx5e_rep_tc_enable(priv
);
1015 if (MLX5_CAP_GEN(mdev
, uplink_follow
))
1016 mlx5_modify_vport_admin_state(mdev
, MLX5_VPORT_STATE_OP_MOD_UPLINK
,
1017 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO
);
1018 mlx5_lag_add_netdev(mdev
, netdev
);
1019 priv
->events_nb
.notifier_call
= uplink_rep_async_event
;
1020 mlx5_notifier_register(mdev
, &priv
->events_nb
);
1021 mlx5e_dcbnl_initialize(priv
);
1022 mlx5e_dcbnl_init_app(priv
);
1023 mlx5e_rep_neigh_init(rpriv
);
1024 mlx5e_rep_bridge_init(priv
);
1026 netdev
->wanted_features
|= NETIF_F_HW_TC
;
1029 if (netif_running(netdev
))
1031 udp_tunnel_nic_reset_ntf(priv
->netdev
);
1032 netif_device_attach(netdev
);
1036 static void mlx5e_uplink_rep_disable(struct mlx5e_priv
*priv
)
1038 struct mlx5e_rep_priv
*rpriv
= priv
->ppriv
;
1039 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1042 if (netif_running(priv
->netdev
))
1043 mlx5e_close(priv
->netdev
);
1044 netif_device_detach(priv
->netdev
);
1047 mlx5e_rep_bridge_cleanup(priv
);
1048 mlx5e_rep_neigh_cleanup(rpriv
);
1049 mlx5e_dcbnl_delete_app(priv
);
1050 mlx5_notifier_unregister(mdev
, &priv
->events_nb
);
1051 mlx5e_rep_tc_disable(priv
);
1052 mlx5_lag_remove_netdev(mdev
, priv
->netdev
);
1053 mlx5_vxlan_reset_to_default(mdev
->vxlan
);
1056 static MLX5E_DEFINE_STATS_GRP(sw_rep
, 0);
1057 static MLX5E_DEFINE_STATS_GRP(vport_rep
, MLX5E_NDO_UPDATE_STATS
);
1059 /* The stats groups order is opposite to the update_stats() order calls */
1060 static mlx5e_stats_grp_t mlx5e_rep_stats_grps
[] = {
1061 &MLX5E_STATS_GRP(sw_rep
),
1062 &MLX5E_STATS_GRP(vport_rep
),
1065 static unsigned int mlx5e_rep_stats_grps_num(struct mlx5e_priv
*priv
)
1067 return ARRAY_SIZE(mlx5e_rep_stats_grps
);
1070 /* The stats groups order is opposite to the update_stats() order calls */
1071 static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps
[] = {
1072 &MLX5E_STATS_GRP(sw
),
1073 &MLX5E_STATS_GRP(qcnt
),
1074 &MLX5E_STATS_GRP(vnic_env
),
1075 &MLX5E_STATS_GRP(vport
),
1076 &MLX5E_STATS_GRP(802_3
),
1077 &MLX5E_STATS_GRP(2863),
1078 &MLX5E_STATS_GRP(2819),
1079 &MLX5E_STATS_GRP(phy
),
1080 &MLX5E_STATS_GRP(eth_ext
),
1081 &MLX5E_STATS_GRP(pcie
),
1082 &MLX5E_STATS_GRP(per_prio
),
1083 &MLX5E_STATS_GRP(pme
),
1084 &MLX5E_STATS_GRP(channels
),
1085 &MLX5E_STATS_GRP(per_port_buff_congest
),
1086 #ifdef CONFIG_MLX5_EN_IPSEC
1087 &MLX5E_STATS_GRP(ipsec_sw
),
1088 &MLX5E_STATS_GRP(ipsec_hw
),
1092 static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv
*priv
)
1094 return ARRAY_SIZE(mlx5e_ul_rep_stats_grps
);
1097 static const struct mlx5e_profile mlx5e_rep_profile
= {
1098 .init
= mlx5e_init_rep
,
1099 .cleanup
= mlx5e_cleanup_rep
,
1100 .init_rx
= mlx5e_init_rep_rx
,
1101 .cleanup_rx
= mlx5e_cleanup_rep_rx
,
1102 .init_tx
= mlx5e_init_rep_tx
,
1103 .cleanup_tx
= mlx5e_cleanup_rep_tx
,
1104 .enable
= mlx5e_rep_enable
,
1105 .disable
= mlx5e_rep_disable
,
1106 .update_rx
= mlx5e_update_rep_rx
,
1107 .update_stats
= mlx5e_stats_update_ndo_stats
,
1108 .rx_handlers
= &mlx5e_rx_handlers_rep
,
1110 .rq_groups
= MLX5E_NUM_RQ_GROUPS(REGULAR
),
1111 .stats_grps
= mlx5e_rep_stats_grps
,
1112 .stats_grps_num
= mlx5e_rep_stats_grps_num
,
1113 .rx_ptp_support
= false,
1116 static const struct mlx5e_profile mlx5e_uplink_rep_profile
= {
1117 .init
= mlx5e_init_ul_rep
,
1118 .cleanup
= mlx5e_cleanup_rep
,
1119 .init_rx
= mlx5e_init_ul_rep_rx
,
1120 .cleanup_rx
= mlx5e_cleanup_ul_rep_rx
,
1121 .init_tx
= mlx5e_init_rep_tx
,
1122 .cleanup_tx
= mlx5e_cleanup_rep_tx
,
1123 .enable
= mlx5e_uplink_rep_enable
,
1124 .disable
= mlx5e_uplink_rep_disable
,
1125 .update_rx
= mlx5e_update_rep_rx
,
1126 .update_stats
= mlx5e_stats_update_ndo_stats
,
1127 .update_carrier
= mlx5e_update_carrier
,
1128 .rx_handlers
= &mlx5e_rx_handlers_rep
,
1129 .max_tc
= MLX5E_MAX_NUM_TC
,
1130 /* XSK is needed so we can replace profile with NIC netdev */
1131 .rq_groups
= MLX5E_NUM_RQ_GROUPS(XSK
),
1132 .stats_grps
= mlx5e_ul_rep_stats_grps
,
1133 .stats_grps_num
= mlx5e_ul_rep_stats_grps_num
,
1134 .rx_ptp_support
= false,
1137 /* e-Switch vport representors */
1139 mlx5e_vport_uplink_rep_load(struct mlx5_core_dev
*dev
, struct mlx5_eswitch_rep
*rep
)
1141 struct mlx5e_priv
*priv
= netdev_priv(mlx5_uplink_netdev_get(dev
));
1142 struct mlx5e_rep_priv
*rpriv
= mlx5e_rep_to_rep_priv(rep
);
1143 struct devlink_port
*dl_port
;
1146 rpriv
->netdev
= priv
->netdev
;
1148 err
= mlx5e_netdev_change_profile(priv
, &mlx5e_uplink_rep_profile
,
1153 dl_port
= mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
1155 devlink_port_type_eth_set(dl_port
, rpriv
->netdev
);
1161 mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv
*rpriv
)
1163 struct net_device
*netdev
= rpriv
->netdev
;
1164 struct devlink_port
*dl_port
;
1165 struct mlx5_core_dev
*dev
;
1166 struct mlx5e_priv
*priv
;
1168 priv
= netdev_priv(netdev
);
1171 dl_port
= mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
1173 devlink_port_type_clear(dl_port
);
1174 mlx5e_netdev_attach_nic_profile(priv
);
1178 mlx5e_vport_vf_rep_load(struct mlx5_core_dev
*dev
, struct mlx5_eswitch_rep
*rep
)
1180 struct mlx5e_rep_priv
*rpriv
= mlx5e_rep_to_rep_priv(rep
);
1181 const struct mlx5e_profile
*profile
;
1182 struct devlink_port
*dl_port
;
1183 struct net_device
*netdev
;
1184 struct mlx5e_priv
*priv
;
1185 unsigned int txqs
, rxqs
;
1188 profile
= &mlx5e_rep_profile
;
1189 nch
= mlx5e_get_max_num_channels(dev
);
1190 txqs
= nch
* profile
->max_tc
;
1191 rxqs
= nch
* profile
->rq_groups
;
1192 netdev
= mlx5e_create_netdev(dev
, profile
, txqs
, rxqs
);
1195 "Failed to create representor netdev for vport %d\n",
1200 mlx5e_build_rep_netdev(netdev
, dev
);
1201 rpriv
->netdev
= netdev
;
1203 priv
= netdev_priv(netdev
);
1204 priv
->profile
= profile
;
1205 priv
->ppriv
= rpriv
;
1206 err
= profile
->init(dev
, netdev
);
1208 netdev_warn(netdev
, "rep profile init failed, %d\n", err
);
1209 goto err_destroy_netdev
;
1212 err
= mlx5e_attach_netdev(netdev_priv(netdev
));
1215 "Failed to attach representor netdev for vport %d\n",
1217 goto err_cleanup_profile
;
1220 err
= register_netdev(netdev
);
1223 "Failed to register representor netdev for vport %d\n",
1225 goto err_detach_netdev
;
1228 dl_port
= mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
1230 devlink_port_type_eth_set(dl_port
, netdev
);
1234 mlx5e_detach_netdev(netdev_priv(netdev
));
1236 err_cleanup_profile
:
1237 priv
->profile
->cleanup(priv
);
1240 mlx5e_destroy_netdev(netdev_priv(netdev
));
1245 mlx5e_vport_rep_load(struct mlx5_core_dev
*dev
, struct mlx5_eswitch_rep
*rep
)
1247 struct mlx5e_rep_priv
*rpriv
;
1250 rpriv
= kzalloc(sizeof(*rpriv
), GFP_KERNEL
);
1254 /* rpriv->rep to be looked up when profile->init() is called */
1256 rep
->rep_data
[REP_ETH
].priv
= rpriv
;
1257 INIT_LIST_HEAD(&rpriv
->vport_sqs_list
);
1259 if (rep
->vport
== MLX5_VPORT_UPLINK
)
1260 err
= mlx5e_vport_uplink_rep_load(dev
, rep
);
1262 err
= mlx5e_vport_vf_rep_load(dev
, rep
);
1271 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep
*rep
)
1273 struct mlx5e_rep_priv
*rpriv
= mlx5e_rep_to_rep_priv(rep
);
1274 struct net_device
*netdev
= rpriv
->netdev
;
1275 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1276 struct mlx5_core_dev
*dev
= priv
->mdev
;
1277 struct devlink_port
*dl_port
;
1278 void *ppriv
= priv
->ppriv
;
1280 if (rep
->vport
== MLX5_VPORT_UPLINK
) {
1281 mlx5e_vport_uplink_rep_unload(rpriv
);
1285 dl_port
= mlx5_esw_offloads_devlink_port(dev
->priv
.eswitch
, rpriv
->rep
->vport
);
1287 devlink_port_type_clear(dl_port
);
1288 unregister_netdev(netdev
);
1289 mlx5e_detach_netdev(priv
);
1290 priv
->profile
->cleanup(priv
);
1291 mlx5e_destroy_netdev(priv
);
1293 kfree(ppriv
); /* mlx5e_rep_priv */
1296 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep
*rep
)
1298 struct mlx5e_rep_priv
*rpriv
;
1300 rpriv
= mlx5e_rep_to_rep_priv(rep
);
1302 return rpriv
->netdev
;
1305 static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep
*rep
)
1307 struct mlx5e_rep_priv
*rpriv
;
1308 struct mlx5e_rep_sq
*rep_sq
;
1310 rpriv
= mlx5e_rep_to_rep_priv(rep
);
1311 list_for_each_entry(rep_sq
, &rpriv
->vport_sqs_list
, list
) {
1312 if (!rep_sq
->send_to_vport_rule_peer
)
1314 mlx5_eswitch_del_send_to_vport_rule(rep_sq
->send_to_vport_rule_peer
);
1315 rep_sq
->send_to_vport_rule_peer
= NULL
;
1319 static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch
*esw
,
1320 struct mlx5_eswitch_rep
*rep
,
1321 struct mlx5_eswitch
*peer_esw
)
1323 struct mlx5_flow_handle
*flow_rule
;
1324 struct mlx5e_rep_priv
*rpriv
;
1325 struct mlx5e_rep_sq
*rep_sq
;
1327 rpriv
= mlx5e_rep_to_rep_priv(rep
);
1328 list_for_each_entry(rep_sq
, &rpriv
->vport_sqs_list
, list
) {
1329 if (rep_sq
->send_to_vport_rule_peer
)
1331 flow_rule
= mlx5_eswitch_add_send_to_vport_rule(peer_esw
, esw
, rep
, rep_sq
->sqn
);
1332 if (IS_ERR(flow_rule
))
1334 rep_sq
->send_to_vport_rule_peer
= flow_rule
;
1339 mlx5e_vport_rep_event_unpair(rep
);
1340 return PTR_ERR(flow_rule
);
1343 static int mlx5e_vport_rep_event(struct mlx5_eswitch
*esw
,
1344 struct mlx5_eswitch_rep
*rep
,
1345 enum mlx5_switchdev_event event
,
1350 if (event
== MLX5_SWITCHDEV_EVENT_PAIR
)
1351 err
= mlx5e_vport_rep_event_pair(esw
, rep
, data
);
1352 else if (event
== MLX5_SWITCHDEV_EVENT_UNPAIR
)
1353 mlx5e_vport_rep_event_unpair(rep
);
1358 static const struct mlx5_eswitch_rep_ops rep_ops
= {
1359 .load
= mlx5e_vport_rep_load
,
1360 .unload
= mlx5e_vport_rep_unload
,
1361 .get_proto_dev
= mlx5e_vport_rep_get_proto_dev
,
1362 .event
= mlx5e_vport_rep_event
,
1365 static int mlx5e_rep_probe(struct auxiliary_device
*adev
,
1366 const struct auxiliary_device_id
*id
)
1368 struct mlx5_adev
*edev
= container_of(adev
, struct mlx5_adev
, adev
);
1369 struct mlx5_core_dev
*mdev
= edev
->mdev
;
1370 struct mlx5_eswitch
*esw
;
1372 esw
= mdev
->priv
.eswitch
;
1373 mlx5_eswitch_register_vport_reps(esw
, &rep_ops
, REP_ETH
);
1377 static void mlx5e_rep_remove(struct auxiliary_device
*adev
)
1379 struct mlx5_adev
*vdev
= container_of(adev
, struct mlx5_adev
, adev
);
1380 struct mlx5_core_dev
*mdev
= vdev
->mdev
;
1381 struct mlx5_eswitch
*esw
;
1383 esw
= mdev
->priv
.eswitch
;
1384 mlx5_eswitch_unregister_vport_reps(esw
, REP_ETH
);
1387 static const struct auxiliary_device_id mlx5e_rep_id_table
[] = {
1388 { .name
= MLX5_ADEV_NAME
".eth-rep", },
1392 MODULE_DEVICE_TABLE(auxiliary
, mlx5e_rep_id_table
);
1394 static struct auxiliary_driver mlx5e_rep_driver
= {
1396 .probe
= mlx5e_rep_probe
,
1397 .remove
= mlx5e_rep_remove
,
1398 .id_table
= mlx5e_rep_id_table
,
1401 int mlx5e_rep_init(void)
1403 return auxiliary_driver_register(&mlx5e_rep_driver
);
1406 void mlx5e_rep_cleanup(void)
1408 auxiliary_driver_unregister(&mlx5e_rep_driver
);