1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <net/switchdev.h>
26 #include <net/pkt_cls.h>
27 #include <net/tc_act/tc_mirred.h>
28 #include <net/netevent.h>
29 #include <net/tc_act/tc_sample.h>
30 #include <net/addrconf.h>
40 #include "spectrum_cnt.h"
41 #include "spectrum_dpipe.h"
42 #include "spectrum_acl_flex_actions.h"
43 #include "spectrum_span.h"
44 #include "../mlxfw/mlxfw.h"
46 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
48 #define MLXSW_SP1_FWREV_MAJOR 13
49 #define MLXSW_SP1_FWREV_MINOR 2000
50 #define MLXSW_SP1_FWREV_SUBMINOR 1122
51 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
53 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev
= {
54 .major
= MLXSW_SP1_FWREV_MAJOR
,
55 .minor
= MLXSW_SP1_FWREV_MINOR
,
56 .subminor
= MLXSW_SP1_FWREV_SUBMINOR
,
57 .can_reset_minor
= MLXSW_SP1_FWREV_CAN_RESET_MINOR
,
60 #define MLXSW_SP1_FW_FILENAME \
61 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
62 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
63 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
65 static const char mlxsw_sp1_driver_name
[] = "mlxsw_spectrum";
66 static const char mlxsw_sp2_driver_name
[] = "mlxsw_spectrum2";
67 static const char mlxsw_sp_driver_version
[] = "1.0";
69 static const unsigned char mlxsw_sp1_mac_mask
[ETH_ALEN
] = {
70 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
72 static const unsigned char mlxsw_sp2_mac_mask
[ETH_ALEN
] = {
73 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
80 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
83 * Packet control type.
84 * 0 - Ethernet control (e.g. EMADs, LACP)
87 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
90 * Packet protocol type. Must be set to 1 (Ethernet).
92 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
94 /* tx_hdr_rx_is_router
95 * Packet is sent from the router. Valid for data packets only.
97 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
100 * Indicates if the 'fid' field is valid and should be used for
101 * forwarding lookup. Valid for data packets only.
103 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
106 * Switch partition ID. Must be set to 0.
108 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
110 /* tx_hdr_control_tclass
111 * Indicates if the packet should use the control TClass and not one
112 * of the data TClasses.
114 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
117 * Egress TClass to be used on the egress device on the egress port.
119 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
122 * Destination local port for unicast packets.
123 * Destination multicast ID for multicast packets.
125 * Control packets are directed to a specific egress port, while data
126 * packets are transmitted through the CPU port (0) into the switch partition,
127 * where forwarding rules are applied.
129 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
132 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
133 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
134 * Valid for data packets only.
136 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
140 * 6 - Control packets
142 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
144 struct mlxsw_sp_mlxfw_dev
{
145 struct mlxfw_dev mlxfw_dev
;
146 struct mlxsw_sp
*mlxsw_sp
;
149 static int mlxsw_sp_component_query(struct mlxfw_dev
*mlxfw_dev
,
150 u16 component_index
, u32
*p_max_size
,
151 u8
*p_align_bits
, u16
*p_max_write_size
)
153 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
154 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
155 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
156 char mcqi_pl
[MLXSW_REG_MCQI_LEN
];
159 mlxsw_reg_mcqi_pack(mcqi_pl
, component_index
);
160 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcqi
), mcqi_pl
);
163 mlxsw_reg_mcqi_unpack(mcqi_pl
, p_max_size
, p_align_bits
,
166 *p_align_bits
= max_t(u8
, *p_align_bits
, 2);
167 *p_max_write_size
= min_t(u16
, *p_max_write_size
,
168 MLXSW_REG_MCDA_MAX_DATA_LEN
);
172 static int mlxsw_sp_fsm_lock(struct mlxfw_dev
*mlxfw_dev
, u32
*fwhandle
)
174 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
175 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
176 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
177 char mcc_pl
[MLXSW_REG_MCC_LEN
];
181 mlxsw_reg_mcc_pack(mcc_pl
, 0, 0, 0, 0);
182 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
186 mlxsw_reg_mcc_unpack(mcc_pl
, fwhandle
, NULL
, &control_state
);
187 if (control_state
!= MLXFW_FSM_STATE_IDLE
)
190 mlxsw_reg_mcc_pack(mcc_pl
,
191 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
,
193 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
196 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev
*mlxfw_dev
,
197 u32 fwhandle
, u16 component_index
,
200 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
201 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
202 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
203 char mcc_pl
[MLXSW_REG_MCC_LEN
];
205 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
,
206 component_index
, fwhandle
, component_size
);
207 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
210 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev
*mlxfw_dev
,
211 u32 fwhandle
, u8
*data
, u16 size
,
214 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
215 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
216 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
217 char mcda_pl
[MLXSW_REG_MCDA_LEN
];
219 mlxsw_reg_mcda_pack(mcda_pl
, fwhandle
, offset
, size
, data
);
220 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcda
), mcda_pl
);
223 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev
*mlxfw_dev
,
224 u32 fwhandle
, u16 component_index
)
226 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
227 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
228 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
229 char mcc_pl
[MLXSW_REG_MCC_LEN
];
231 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
,
232 component_index
, fwhandle
, 0);
233 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
236 static int mlxsw_sp_fsm_activate(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
238 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
239 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
240 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
241 char mcc_pl
[MLXSW_REG_MCC_LEN
];
243 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE
, 0,
245 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
248 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
249 enum mlxfw_fsm_state
*fsm_state
,
250 enum mlxfw_fsm_state_err
*fsm_state_err
)
252 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
253 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
254 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
255 char mcc_pl
[MLXSW_REG_MCC_LEN
];
260 mlxsw_reg_mcc_pack(mcc_pl
, 0, 0, fwhandle
, 0);
261 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
265 mlxsw_reg_mcc_unpack(mcc_pl
, NULL
, &error_code
, &control_state
);
266 *fsm_state
= control_state
;
267 *fsm_state_err
= min_t(enum mlxfw_fsm_state_err
, error_code
,
268 MLXFW_FSM_STATE_ERR_MAX
);
272 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
274 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
275 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
276 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
277 char mcc_pl
[MLXSW_REG_MCC_LEN
];
279 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_CANCEL
, 0,
281 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
284 static void mlxsw_sp_fsm_release(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
286 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
287 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
288 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
289 char mcc_pl
[MLXSW_REG_MCC_LEN
];
291 mlxsw_reg_mcc_pack(mcc_pl
,
292 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
, 0,
294 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
297 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops
= {
298 .component_query
= mlxsw_sp_component_query
,
299 .fsm_lock
= mlxsw_sp_fsm_lock
,
300 .fsm_component_update
= mlxsw_sp_fsm_component_update
,
301 .fsm_block_download
= mlxsw_sp_fsm_block_download
,
302 .fsm_component_verify
= mlxsw_sp_fsm_component_verify
,
303 .fsm_activate
= mlxsw_sp_fsm_activate
,
304 .fsm_query_state
= mlxsw_sp_fsm_query_state
,
305 .fsm_cancel
= mlxsw_sp_fsm_cancel
,
306 .fsm_release
= mlxsw_sp_fsm_release
309 static int mlxsw_sp_firmware_flash(struct mlxsw_sp
*mlxsw_sp
,
310 const struct firmware
*firmware
)
312 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev
= {
314 .ops
= &mlxsw_sp_mlxfw_dev_ops
,
315 .psid
= mlxsw_sp
->bus_info
->psid
,
316 .psid_size
= strlen(mlxsw_sp
->bus_info
->psid
),
322 mlxsw_core_fw_flash_start(mlxsw_sp
->core
);
323 err
= mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev
.mlxfw_dev
, firmware
);
324 mlxsw_core_fw_flash_end(mlxsw_sp
->core
);
329 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp
*mlxsw_sp
)
331 const struct mlxsw_fw_rev
*rev
= &mlxsw_sp
->bus_info
->fw_rev
;
332 const struct mlxsw_fw_rev
*req_rev
= mlxsw_sp
->req_rev
;
333 const char *fw_filename
= mlxsw_sp
->fw_filename
;
334 union devlink_param_value value
;
335 const struct firmware
*firmware
;
338 /* Don't check if driver does not require it */
339 if (!req_rev
|| !fw_filename
)
342 /* Don't check if devlink 'fw_load_policy' param is 'flash' */
343 err
= devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp
->core
),
344 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY
,
348 if (value
.vu8
== DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH
)
351 /* Validate driver & FW are compatible */
352 if (rev
->major
!= req_rev
->major
) {
353 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
354 rev
->major
, req_rev
->major
);
357 if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev
->minor
) ==
358 MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev
->minor
) &&
359 (rev
->minor
> req_rev
->minor
||
360 (rev
->minor
== req_rev
->minor
&&
361 rev
->subminor
>= req_rev
->subminor
)))
364 dev_info(mlxsw_sp
->bus_info
->dev
, "The firmware version %d.%d.%d is incompatible with the driver\n",
365 rev
->major
, rev
->minor
, rev
->subminor
);
366 dev_info(mlxsw_sp
->bus_info
->dev
, "Flashing firmware using file %s\n",
369 err
= request_firmware_direct(&firmware
, fw_filename
,
370 mlxsw_sp
->bus_info
->dev
);
372 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not request firmware file %s\n",
377 err
= mlxsw_sp_firmware_flash(mlxsw_sp
, firmware
);
378 release_firmware(firmware
);
380 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not upgrade firmware\n");
382 /* On FW flash success, tell the caller FW reset is needed
383 * if current FW supports it.
385 if (rev
->minor
>= req_rev
->can_reset_minor
)
386 return err
? err
: -EAGAIN
;
391 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
392 unsigned int counter_index
, u64
*packets
,
395 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
398 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
399 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
400 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
404 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
406 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
410 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
411 unsigned int counter_index
)
413 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
415 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
416 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
417 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
420 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
421 unsigned int *p_counter_index
)
425 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
429 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
431 goto err_counter_clear
;
435 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
440 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
441 unsigned int counter_index
)
443 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
447 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
448 const struct mlxsw_tx_info
*tx_info
)
450 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
452 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
454 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
455 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
456 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
457 mlxsw_tx_hdr_swid_set(txhdr
, 0);
458 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
459 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
460 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
463 enum mlxsw_reg_spms_state
mlxsw_sp_stp_spms_state(u8 state
)
466 case BR_STATE_FORWARDING
:
467 return MLXSW_REG_SPMS_STATE_FORWARDING
;
468 case BR_STATE_LEARNING
:
469 return MLXSW_REG_SPMS_STATE_LEARNING
;
470 case BR_STATE_LISTENING
: /* fall-through */
471 case BR_STATE_DISABLED
: /* fall-through */
472 case BR_STATE_BLOCKING
:
473 return MLXSW_REG_SPMS_STATE_DISCARDING
;
479 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
482 enum mlxsw_reg_spms_state spms_state
= mlxsw_sp_stp_spms_state(state
);
483 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
487 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
490 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
491 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
493 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
498 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
500 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
503 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
506 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
510 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
511 bool enable
, u32 rate
)
513 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
514 char mpsc_pl
[MLXSW_REG_MPSC_LEN
];
516 mlxsw_reg_mpsc_pack(mpsc_pl
, mlxsw_sp_port
->local_port
, enable
, rate
);
517 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpsc
), mpsc_pl
);
520 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
523 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
524 char paos_pl
[MLXSW_REG_PAOS_LEN
];
526 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
527 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
528 MLXSW_PORT_ADMIN_STATUS_DOWN
);
529 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
532 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
535 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
536 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
538 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
539 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
540 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
543 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
545 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
546 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
548 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
549 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
550 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
553 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
555 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
556 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
560 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
561 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
562 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
565 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
570 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
571 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
574 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
576 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
577 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
579 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sp_port
->local_port
);
580 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
583 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
, bool enable
)
585 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
586 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
588 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
589 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
592 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
595 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
599 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
602 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
604 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
609 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
612 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
613 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
615 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
616 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
619 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
622 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
623 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
625 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
626 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
629 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
634 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
638 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
641 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, true);
643 goto err_port_allow_untagged_set
;
646 mlxsw_sp_port
->pvid
= vid
;
649 err_port_allow_untagged_set
:
650 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
655 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
657 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
658 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
660 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
661 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
664 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
665 u8 local_port
, u8
*p_module
,
666 u8
*p_width
, u8
*p_lane
)
668 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
671 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
672 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
675 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
676 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
677 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
681 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port
*mlxsw_sp_port
,
682 u8 module
, u8 width
, u8 lane
)
684 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
685 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
688 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
689 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
690 for (i
= 0; i
< width
; i
++) {
691 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
692 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
695 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
698 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
)
700 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
701 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
703 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
704 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
705 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
708 static int mlxsw_sp_port_open(struct net_device
*dev
)
710 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
713 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
716 netif_start_queue(dev
);
720 static int mlxsw_sp_port_stop(struct net_device
*dev
)
722 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
724 netif_stop_queue(dev
);
725 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
728 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
729 struct net_device
*dev
)
731 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
732 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
733 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
734 const struct mlxsw_tx_info tx_info
= {
735 .local_port
= mlxsw_sp_port
->local_port
,
741 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
742 return NETDEV_TX_BUSY
;
744 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
745 struct sk_buff
*skb_orig
= skb
;
747 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
749 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
750 dev_kfree_skb_any(skb_orig
);
753 dev_consume_skb_any(skb_orig
);
756 if (eth_skb_pad(skb
)) {
757 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
761 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
762 /* TX header is consumed by HW on the way so we shouldn't count its
763 * bytes as being sent.
765 len
= skb
->len
- MLXSW_TXHDR_LEN
;
767 /* Due to a race we might fail here because of a full queue. In that
768 * unlikely case we simply drop the packet.
770 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
773 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
774 u64_stats_update_begin(&pcpu_stats
->syncp
);
775 pcpu_stats
->tx_packets
++;
776 pcpu_stats
->tx_bytes
+= len
;
777 u64_stats_update_end(&pcpu_stats
->syncp
);
779 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
780 dev_kfree_skb_any(skb
);
785 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
789 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
791 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
792 struct sockaddr
*addr
= p
;
795 if (!is_valid_ether_addr(addr
->sa_data
))
796 return -EADDRNOTAVAIL
;
798 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
801 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
805 static u16
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp
*mlxsw_sp
,
808 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
);
811 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
813 static u16
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
816 delay
= mlxsw_sp_bytes_cells(mlxsw_sp
, DIV_ROUND_UP(delay
,
818 return MLXSW_SP_CELL_FACTOR
* delay
+ mlxsw_sp_bytes_cells(mlxsw_sp
,
822 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
823 * Assumes 100m cable and maximum MTU.
825 #define MLXSW_SP_PAUSE_DELAY 58752
827 static u16
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
828 u16 delay
, bool pfc
, bool pause
)
831 return mlxsw_sp_pfc_delay_get(mlxsw_sp
, mtu
, delay
);
833 return mlxsw_sp_bytes_cells(mlxsw_sp
, MLXSW_SP_PAUSE_DELAY
);
838 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int index
, u16 size
, u16 thres
,
842 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, index
, size
);
844 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, index
, size
,
848 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
849 u8
*prio_tc
, bool pause_en
,
850 struct ieee_pfc
*my_pfc
)
852 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
853 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
854 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
855 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
856 u32 taken_headroom_cells
= 0;
857 u32 max_headroom_cells
;
860 max_headroom_cells
= mlxsw_sp_sb_max_headroom_cells(mlxsw_sp
);
862 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
863 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
867 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
868 bool configure
= false;
875 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
876 if (prio_tc
[j
] == i
) {
877 pfc
= pfc_en
& BIT(j
);
886 lossy
= !(pfc
|| pause_en
);
887 thres_cells
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, mtu
);
888 delay_cells
= mlxsw_sp_pg_buf_delay_get(mlxsw_sp
, mtu
, delay
,
890 total_cells
= thres_cells
+ delay_cells
;
892 taken_headroom_cells
+= total_cells
;
893 if (taken_headroom_cells
> max_headroom_cells
)
896 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, total_cells
,
900 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
903 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
904 int mtu
, bool pause_en
)
906 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
907 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
908 struct ieee_pfc
*my_pfc
;
911 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
912 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
914 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
918 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
920 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
921 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
924 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
927 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
929 goto err_span_port_mtu_update
;
930 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
932 goto err_port_mtu_set
;
937 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
938 err_span_port_mtu_update
:
939 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
944 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
945 struct rtnl_link_stats64
*stats
)
947 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
948 struct mlxsw_sp_port_pcpu_stats
*p
;
949 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
954 for_each_possible_cpu(i
) {
955 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
957 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
958 rx_packets
= p
->rx_packets
;
959 rx_bytes
= p
->rx_bytes
;
960 tx_packets
= p
->tx_packets
;
961 tx_bytes
= p
->tx_bytes
;
962 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
964 stats
->rx_packets
+= rx_packets
;
965 stats
->rx_bytes
+= rx_bytes
;
966 stats
->tx_packets
+= tx_packets
;
967 stats
->tx_bytes
+= tx_bytes
;
968 /* tx_dropped is u32, updated without syncp protection. */
969 tx_dropped
+= p
->tx_dropped
;
971 stats
->tx_dropped
= tx_dropped
;
975 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
978 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
985 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
989 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
990 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
996 static int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
997 int prio
, char *ppcnt_pl
)
999 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1000 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1002 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
1003 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1006 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
1007 struct rtnl_link_stats64
*stats
)
1009 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1012 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
1018 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
1020 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
1022 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
1024 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
1026 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
1028 stats
->rx_crc_errors
=
1029 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
1030 stats
->rx_frame_errors
=
1031 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
1033 stats
->rx_length_errors
= (
1034 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
1035 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
1036 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
1038 stats
->rx_errors
= (stats
->rx_crc_errors
+
1039 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
1046 mlxsw_sp_port_get_hw_xstats(struct net_device
*dev
,
1047 struct mlxsw_sp_port_xstats
*xstats
)
1049 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1052 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_EXT_CNT
, 0,
1055 xstats
->ecn
= mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl
);
1057 for (i
= 0; i
< TC_MAX_QUEUE
; i
++) {
1058 err
= mlxsw_sp_port_get_stats_raw(dev
,
1059 MLXSW_REG_PPCNT_TC_CONG_TC
,
1062 xstats
->wred_drop
[i
] =
1063 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl
);
1065 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_TC_CNT
,
1070 xstats
->backlog
[i
] =
1071 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
1072 xstats
->tail_drop
[i
] =
1073 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl
);
1076 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1077 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_PRIO_CNT
,
1082 xstats
->tx_packets
[i
] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl
);
1083 xstats
->tx_bytes
[i
] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl
);
1087 static void update_stats_cache(struct work_struct
*work
)
1089 struct mlxsw_sp_port
*mlxsw_sp_port
=
1090 container_of(work
, struct mlxsw_sp_port
,
1091 periodic_hw_stats
.update_dw
.work
);
1093 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
1096 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
1097 &mlxsw_sp_port
->periodic_hw_stats
.stats
);
1098 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port
->dev
,
1099 &mlxsw_sp_port
->periodic_hw_stats
.xstats
);
1102 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
1103 MLXSW_HW_STATS_UPDATE_TIME
);
1106 /* Return the stats from a cache that is updated periodically,
1107 * as this function might get called in an atomic context.
1110 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
1111 struct rtnl_link_stats64
*stats
)
1113 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1115 memcpy(stats
, &mlxsw_sp_port
->periodic_hw_stats
.stats
, sizeof(*stats
));
1118 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1119 u16 vid_begin
, u16 vid_end
,
1120 bool is_member
, bool untagged
)
1122 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1126 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
1130 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
1131 vid_end
, is_member
, untagged
);
1132 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
1137 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
1138 u16 vid_end
, bool is_member
, bool untagged
)
1143 for (vid
= vid_begin
; vid
<= vid_end
;
1144 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
1145 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
1148 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
1149 is_member
, untagged
);
1157 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
1160 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
, *tmp
;
1162 list_for_each_entry_safe(mlxsw_sp_port_vlan
, tmp
,
1163 &mlxsw_sp_port
->vlans_list
, list
) {
1164 if (!flush_default
&&
1165 mlxsw_sp_port_vlan
->vid
== MLXSW_SP_DEFAULT_VID
)
1167 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1172 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1174 if (mlxsw_sp_port_vlan
->bridge_port
)
1175 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
1176 else if (mlxsw_sp_port_vlan
->fid
)
1177 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
1180 struct mlxsw_sp_port_vlan
*
1181 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1183 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1184 bool untagged
= vid
== MLXSW_SP_DEFAULT_VID
;
1187 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1188 if (mlxsw_sp_port_vlan
)
1189 return ERR_PTR(-EEXIST
);
1191 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true, untagged
);
1193 return ERR_PTR(err
);
1195 mlxsw_sp_port_vlan
= kzalloc(sizeof(*mlxsw_sp_port_vlan
), GFP_KERNEL
);
1196 if (!mlxsw_sp_port_vlan
) {
1198 goto err_port_vlan_alloc
;
1201 mlxsw_sp_port_vlan
->mlxsw_sp_port
= mlxsw_sp_port
;
1202 mlxsw_sp_port_vlan
->vid
= vid
;
1203 list_add(&mlxsw_sp_port_vlan
->list
, &mlxsw_sp_port
->vlans_list
);
1205 return mlxsw_sp_port_vlan
;
1207 err_port_vlan_alloc
:
1208 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1209 return ERR_PTR(err
);
1212 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1214 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
1215 u16 vid
= mlxsw_sp_port_vlan
->vid
;
1217 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan
);
1218 list_del(&mlxsw_sp_port_vlan
->list
);
1219 kfree(mlxsw_sp_port_vlan
);
1220 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1223 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1224 __be16 __always_unused proto
, u16 vid
)
1226 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1228 /* VLAN 0 is added to HW filter when device goes up, but it is
1229 * reserved in our case, so simply return.
1234 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port
, vid
));
1237 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1238 __be16 __always_unused proto
, u16 vid
)
1240 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1241 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1243 /* VLAN 0 is removed from HW filter when device goes down, but
1244 * it is reserved in our case, so simply return.
1249 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1250 if (!mlxsw_sp_port_vlan
)
1252 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1257 static struct mlxsw_sp_port_mall_tc_entry
*
1258 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port
*port
,
1259 unsigned long cookie
) {
1260 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1262 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1263 if (mall_tc_entry
->cookie
== cookie
)
1264 return mall_tc_entry
;
1270 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1271 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
,
1272 const struct flow_action_entry
*act
,
1275 enum mlxsw_sp_span_type span_type
;
1278 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1282 mirror
->ingress
= ingress
;
1283 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1284 return mlxsw_sp_span_mirror_add(mlxsw_sp_port
, act
->dev
, span_type
,
1285 true, &mirror
->span_id
);
1289 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1290 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
)
1292 enum mlxsw_sp_span_type span_type
;
1294 span_type
= mirror
->ingress
?
1295 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1296 mlxsw_sp_span_mirror_del(mlxsw_sp_port
, mirror
->span_id
,
1301 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
,
1302 struct tc_cls_matchall_offload
*cls
,
1303 const struct flow_action_entry
*act
,
1308 if (!mlxsw_sp_port
->sample
)
1310 if (rtnl_dereference(mlxsw_sp_port
->sample
->psample_group
)) {
1311 netdev_err(mlxsw_sp_port
->dev
, "sample already active\n");
1314 if (act
->sample
.rate
> MLXSW_REG_MPSC_RATE_MAX
) {
1315 netdev_err(mlxsw_sp_port
->dev
, "sample rate not supported\n");
1319 rcu_assign_pointer(mlxsw_sp_port
->sample
->psample_group
,
1320 act
->sample
.psample_group
);
1321 mlxsw_sp_port
->sample
->truncate
= act
->sample
.truncate
;
1322 mlxsw_sp_port
->sample
->trunc_size
= act
->sample
.trunc_size
;
1323 mlxsw_sp_port
->sample
->rate
= act
->sample
.rate
;
1325 err
= mlxsw_sp_port_sample_set(mlxsw_sp_port
, true, act
->sample
.rate
);
1327 goto err_port_sample_set
;
1330 err_port_sample_set
:
1331 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1336 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
)
1338 if (!mlxsw_sp_port
->sample
)
1341 mlxsw_sp_port_sample_set(mlxsw_sp_port
, false, 1);
1342 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1345 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1346 struct tc_cls_matchall_offload
*f
,
1349 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1350 __be16 protocol
= f
->common
.protocol
;
1351 struct flow_action_entry
*act
;
1354 if (!flow_offload_has_one_action(&f
->rule
->action
)) {
1355 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1359 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1362 mall_tc_entry
->cookie
= f
->cookie
;
1364 act
= &f
->rule
->action
.entries
[0];
1366 if (act
->id
== FLOW_ACTION_MIRRED
&& protocol
== htons(ETH_P_ALL
)) {
1367 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
;
1369 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1370 mirror
= &mall_tc_entry
->mirror
;
1371 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
,
1374 } else if (act
->id
== FLOW_ACTION_SAMPLE
&&
1375 protocol
== htons(ETH_P_ALL
)) {
1376 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_SAMPLE
;
1377 err
= mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port
, f
,
1384 goto err_add_action
;
1386 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1390 kfree(mall_tc_entry
);
1394 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1395 struct tc_cls_matchall_offload
*f
)
1397 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1399 mall_tc_entry
= mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port
,
1401 if (!mall_tc_entry
) {
1402 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1405 list_del(&mall_tc_entry
->list
);
1407 switch (mall_tc_entry
->type
) {
1408 case MLXSW_SP_PORT_MALL_MIRROR
:
1409 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port
,
1410 &mall_tc_entry
->mirror
);
1412 case MLXSW_SP_PORT_MALL_SAMPLE
:
1413 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port
);
1419 kfree(mall_tc_entry
);
1422 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1423 struct tc_cls_matchall_offload
*f
,
1426 switch (f
->command
) {
1427 case TC_CLSMATCHALL_REPLACE
:
1428 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
, f
,
1430 case TC_CLSMATCHALL_DESTROY
:
1431 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
, f
);
1439 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_acl_block
*acl_block
,
1440 struct tc_cls_flower_offload
*f
)
1442 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_acl_block_mlxsw_sp(acl_block
);
1444 switch (f
->command
) {
1445 case TC_CLSFLOWER_REPLACE
:
1446 return mlxsw_sp_flower_replace(mlxsw_sp
, acl_block
, f
);
1447 case TC_CLSFLOWER_DESTROY
:
1448 mlxsw_sp_flower_destroy(mlxsw_sp
, acl_block
, f
);
1450 case TC_CLSFLOWER_STATS
:
1451 return mlxsw_sp_flower_stats(mlxsw_sp
, acl_block
, f
);
1452 case TC_CLSFLOWER_TMPLT_CREATE
:
1453 return mlxsw_sp_flower_tmplt_create(mlxsw_sp
, acl_block
, f
);
1454 case TC_CLSFLOWER_TMPLT_DESTROY
:
1455 mlxsw_sp_flower_tmplt_destroy(mlxsw_sp
, acl_block
, f
);
1462 static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type
,
1464 void *cb_priv
, bool ingress
)
1466 struct mlxsw_sp_port
*mlxsw_sp_port
= cb_priv
;
1469 case TC_SETUP_CLSMATCHALL
:
1470 if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port
->dev
,
1474 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port
, type_data
,
1476 case TC_SETUP_CLSFLOWER
:
1483 static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type
,
1487 return mlxsw_sp_setup_tc_block_cb_matchall(type
, type_data
,
1491 static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type
,
1495 return mlxsw_sp_setup_tc_block_cb_matchall(type
, type_data
,
1499 static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type
,
1500 void *type_data
, void *cb_priv
)
1502 struct mlxsw_sp_acl_block
*acl_block
= cb_priv
;
1505 case TC_SETUP_CLSMATCHALL
:
1507 case TC_SETUP_CLSFLOWER
:
1508 if (mlxsw_sp_acl_block_disabled(acl_block
))
1511 return mlxsw_sp_setup_tc_cls_flower(acl_block
, type_data
);
1518 mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port
*mlxsw_sp_port
,
1519 struct tcf_block
*block
, bool ingress
,
1520 struct netlink_ext_ack
*extack
)
1522 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1523 struct mlxsw_sp_acl_block
*acl_block
;
1524 struct tcf_block_cb
*block_cb
;
1527 block_cb
= tcf_block_cb_lookup(block
, mlxsw_sp_setup_tc_block_cb_flower
,
1530 acl_block
= mlxsw_sp_acl_block_create(mlxsw_sp
, block
->net
);
1533 block_cb
= __tcf_block_cb_register(block
,
1534 mlxsw_sp_setup_tc_block_cb_flower
,
1535 mlxsw_sp
, acl_block
, extack
);
1536 if (IS_ERR(block_cb
)) {
1537 err
= PTR_ERR(block_cb
);
1538 goto err_cb_register
;
1541 acl_block
= tcf_block_cb_priv(block_cb
);
1543 tcf_block_cb_incref(block_cb
);
1544 err
= mlxsw_sp_acl_block_bind(mlxsw_sp
, acl_block
,
1545 mlxsw_sp_port
, ingress
);
1547 goto err_block_bind
;
1550 mlxsw_sp_port
->ing_acl_block
= acl_block
;
1552 mlxsw_sp_port
->eg_acl_block
= acl_block
;
1557 if (!tcf_block_cb_decref(block_cb
)) {
1558 __tcf_block_cb_unregister(block
, block_cb
);
1560 mlxsw_sp_acl_block_destroy(acl_block
);
1566 mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port
*mlxsw_sp_port
,
1567 struct tcf_block
*block
, bool ingress
)
1569 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1570 struct mlxsw_sp_acl_block
*acl_block
;
1571 struct tcf_block_cb
*block_cb
;
1574 block_cb
= tcf_block_cb_lookup(block
, mlxsw_sp_setup_tc_block_cb_flower
,
1580 mlxsw_sp_port
->ing_acl_block
= NULL
;
1582 mlxsw_sp_port
->eg_acl_block
= NULL
;
1584 acl_block
= tcf_block_cb_priv(block_cb
);
1585 err
= mlxsw_sp_acl_block_unbind(mlxsw_sp
, acl_block
,
1586 mlxsw_sp_port
, ingress
);
1587 if (!err
&& !tcf_block_cb_decref(block_cb
)) {
1588 __tcf_block_cb_unregister(block
, block_cb
);
1589 mlxsw_sp_acl_block_destroy(acl_block
);
1593 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port
*mlxsw_sp_port
,
1594 struct tc_block_offload
*f
)
1600 if (f
->binder_type
== TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
) {
1601 cb
= mlxsw_sp_setup_tc_block_cb_matchall_ig
;
1603 } else if (f
->binder_type
== TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS
) {
1604 cb
= mlxsw_sp_setup_tc_block_cb_matchall_eg
;
1610 switch (f
->command
) {
1612 err
= tcf_block_cb_register(f
->block
, cb
, mlxsw_sp_port
,
1613 mlxsw_sp_port
, f
->extack
);
1616 err
= mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port
,
1620 tcf_block_cb_unregister(f
->block
, cb
, mlxsw_sp_port
);
1624 case TC_BLOCK_UNBIND
:
1625 mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port
,
1627 tcf_block_cb_unregister(f
->block
, cb
, mlxsw_sp_port
);
1634 static int mlxsw_sp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1637 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1640 case TC_SETUP_BLOCK
:
1641 return mlxsw_sp_setup_tc_block(mlxsw_sp_port
, type_data
);
1642 case TC_SETUP_QDISC_RED
:
1643 return mlxsw_sp_setup_tc_red(mlxsw_sp_port
, type_data
);
1644 case TC_SETUP_QDISC_PRIO
:
1645 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port
, type_data
);
1652 static int mlxsw_sp_feature_hw_tc(struct net_device
*dev
, bool enable
)
1654 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1657 if (mlxsw_sp_acl_block_rule_count(mlxsw_sp_port
->ing_acl_block
) ||
1658 mlxsw_sp_acl_block_rule_count(mlxsw_sp_port
->eg_acl_block
) ||
1659 !list_empty(&mlxsw_sp_port
->mall_tc_list
)) {
1660 netdev_err(dev
, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1663 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port
->ing_acl_block
);
1664 mlxsw_sp_acl_block_disable_inc(mlxsw_sp_port
->eg_acl_block
);
1666 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port
->ing_acl_block
);
1667 mlxsw_sp_acl_block_disable_dec(mlxsw_sp_port
->eg_acl_block
);
1672 static int mlxsw_sp_feature_loopback(struct net_device
*dev
, bool enable
)
1674 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1675 char pplr_pl
[MLXSW_REG_PPLR_LEN
];
1678 if (netif_running(dev
))
1679 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1681 mlxsw_reg_pplr_pack(pplr_pl
, mlxsw_sp_port
->local_port
, enable
);
1682 err
= mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pplr
),
1685 if (netif_running(dev
))
1686 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1691 typedef int (*mlxsw_sp_feature_handler
)(struct net_device
*dev
, bool enable
);
1693 static int mlxsw_sp_handle_feature(struct net_device
*dev
,
1694 netdev_features_t wanted_features
,
1695 netdev_features_t feature
,
1696 mlxsw_sp_feature_handler feature_handler
)
1698 netdev_features_t changes
= wanted_features
^ dev
->features
;
1699 bool enable
= !!(wanted_features
& feature
);
1702 if (!(changes
& feature
))
1705 err
= feature_handler(dev
, enable
);
1707 netdev_err(dev
, "%s feature %pNF failed, err %d\n",
1708 enable
? "Enable" : "Disable", &feature
, err
);
1713 dev
->features
|= feature
;
1715 dev
->features
&= ~feature
;
1719 static int mlxsw_sp_set_features(struct net_device
*dev
,
1720 netdev_features_t features
)
1722 netdev_features_t oper_features
= dev
->features
;
1725 err
|= mlxsw_sp_handle_feature(dev
, features
, NETIF_F_HW_TC
,
1726 mlxsw_sp_feature_hw_tc
);
1727 err
|= mlxsw_sp_handle_feature(dev
, features
, NETIF_F_LOOPBACK
,
1728 mlxsw_sp_feature_loopback
);
1731 dev
->features
= oper_features
;
1738 static struct devlink_port
*
1739 mlxsw_sp_port_get_devlink_port(struct net_device
*dev
)
1741 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1742 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1744 return mlxsw_core_port_devlink_port_get(mlxsw_sp
->core
,
1745 mlxsw_sp_port
->local_port
);
1748 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1749 .ndo_open
= mlxsw_sp_port_open
,
1750 .ndo_stop
= mlxsw_sp_port_stop
,
1751 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1752 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1753 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1754 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1755 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1756 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1757 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1758 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1759 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1760 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1761 .ndo_set_features
= mlxsw_sp_set_features
,
1762 .ndo_get_devlink_port
= mlxsw_sp_port_get_devlink_port
,
1765 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
1766 struct ethtool_drvinfo
*drvinfo
)
1768 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1769 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1771 strlcpy(drvinfo
->driver
, mlxsw_sp
->bus_info
->device_kind
,
1772 sizeof(drvinfo
->driver
));
1773 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
1774 sizeof(drvinfo
->version
));
1775 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1777 mlxsw_sp
->bus_info
->fw_rev
.major
,
1778 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1779 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1780 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1781 sizeof(drvinfo
->bus_info
));
1784 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1785 struct ethtool_pauseparam
*pause
)
1787 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1789 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1790 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1793 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1794 struct ethtool_pauseparam
*pause
)
1796 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1798 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1799 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1800 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1802 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1806 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1807 struct ethtool_pauseparam
*pause
)
1809 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1810 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1813 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1814 netdev_err(dev
, "PFC already enabled on port\n");
1818 if (pause
->autoneg
) {
1819 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1823 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1825 netdev_err(dev
, "Failed to configure port's headroom\n");
1829 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1831 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1832 goto err_port_pause_configure
;
1835 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1836 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1840 err_port_pause_configure
:
1841 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1842 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1846 struct mlxsw_sp_port_hw_stats
{
1847 char str
[ETH_GSTRING_LEN
];
1848 u64 (*getter
)(const char *payload
);
1852 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1854 .str
= "a_frames_transmitted_ok",
1855 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1858 .str
= "a_frames_received_ok",
1859 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1862 .str
= "a_frame_check_sequence_errors",
1863 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1866 .str
= "a_alignment_errors",
1867 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1870 .str
= "a_octets_transmitted_ok",
1871 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1874 .str
= "a_octets_received_ok",
1875 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1878 .str
= "a_multicast_frames_xmitted_ok",
1879 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1882 .str
= "a_broadcast_frames_xmitted_ok",
1883 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1886 .str
= "a_multicast_frames_received_ok",
1887 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1890 .str
= "a_broadcast_frames_received_ok",
1891 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1894 .str
= "a_in_range_length_errors",
1895 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1898 .str
= "a_out_of_range_length_field",
1899 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1902 .str
= "a_frame_too_long_errors",
1903 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1906 .str
= "a_symbol_error_during_carrier",
1907 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1910 .str
= "a_mac_control_frames_transmitted",
1911 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1914 .str
= "a_mac_control_frames_received",
1915 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1918 .str
= "a_unsupported_opcodes_received",
1919 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1922 .str
= "a_pause_mac_ctrl_frames_received",
1923 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1926 .str
= "a_pause_mac_ctrl_frames_xmitted",
1927 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1931 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1933 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats
[] = {
1935 .str
= "if_in_discards",
1936 .getter
= mlxsw_reg_ppcnt_if_in_discards_get
,
1939 .str
= "if_out_discards",
1940 .getter
= mlxsw_reg_ppcnt_if_out_discards_get
,
1943 .str
= "if_out_errors",
1944 .getter
= mlxsw_reg_ppcnt_if_out_errors_get
,
1948 #define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
1949 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
1951 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats
[] = {
1953 .str
= "ether_stats_undersize_pkts",
1954 .getter
= mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get
,
1957 .str
= "ether_stats_oversize_pkts",
1958 .getter
= mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get
,
1961 .str
= "ether_stats_fragments",
1962 .getter
= mlxsw_reg_ppcnt_ether_stats_fragments_get
,
1965 .str
= "ether_pkts64octets",
1966 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts64octets_get
,
1969 .str
= "ether_pkts65to127octets",
1970 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get
,
1973 .str
= "ether_pkts128to255octets",
1974 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get
,
1977 .str
= "ether_pkts256to511octets",
1978 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get
,
1981 .str
= "ether_pkts512to1023octets",
1982 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get
,
1985 .str
= "ether_pkts1024to1518octets",
1986 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get
,
1989 .str
= "ether_pkts1519to2047octets",
1990 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get
,
1993 .str
= "ether_pkts2048to4095octets",
1994 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get
,
1997 .str
= "ether_pkts4096to8191octets",
1998 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get
,
2001 .str
= "ether_pkts8192to10239octets",
2002 .getter
= mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get
,
2006 #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
2007 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
2009 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats
[] = {
2011 .str
= "dot3stats_fcs_errors",
2012 .getter
= mlxsw_reg_ppcnt_dot3stats_fcs_errors_get
,
2015 .str
= "dot3stats_symbol_errors",
2016 .getter
= mlxsw_reg_ppcnt_dot3stats_symbol_errors_get
,
2019 .str
= "dot3control_in_unknown_opcodes",
2020 .getter
= mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get
,
2023 .str
= "dot3in_pause_frames",
2024 .getter
= mlxsw_reg_ppcnt_dot3in_pause_frames_get
,
2028 #define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
2029 ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
2031 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats
[] = {
2033 .str
= "discard_ingress_general",
2034 .getter
= mlxsw_reg_ppcnt_ingress_general_get
,
2037 .str
= "discard_ingress_policy_engine",
2038 .getter
= mlxsw_reg_ppcnt_ingress_policy_engine_get
,
2041 .str
= "discard_ingress_vlan_membership",
2042 .getter
= mlxsw_reg_ppcnt_ingress_vlan_membership_get
,
2045 .str
= "discard_ingress_tag_frame_type",
2046 .getter
= mlxsw_reg_ppcnt_ingress_tag_frame_type_get
,
2049 .str
= "discard_egress_vlan_membership",
2050 .getter
= mlxsw_reg_ppcnt_egress_vlan_membership_get
,
2053 .str
= "discard_loopback_filter",
2054 .getter
= mlxsw_reg_ppcnt_loopback_filter_get
,
2057 .str
= "discard_egress_general",
2058 .getter
= mlxsw_reg_ppcnt_egress_general_get
,
2061 .str
= "discard_egress_hoq",
2062 .getter
= mlxsw_reg_ppcnt_egress_hoq_get
,
2065 .str
= "discard_egress_policy_engine",
2066 .getter
= mlxsw_reg_ppcnt_egress_policy_engine_get
,
2069 .str
= "discard_ingress_tx_link_down",
2070 .getter
= mlxsw_reg_ppcnt_ingress_tx_link_down_get
,
2073 .str
= "discard_egress_stp_filter",
2074 .getter
= mlxsw_reg_ppcnt_egress_stp_filter_get
,
2077 .str
= "discard_egress_sll",
2078 .getter
= mlxsw_reg_ppcnt_egress_sll_get
,
2082 #define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
2083 ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
2085 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
2087 .str
= "rx_octets_prio",
2088 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
2091 .str
= "rx_frames_prio",
2092 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
2095 .str
= "tx_octets_prio",
2096 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
2099 .str
= "tx_frames_prio",
2100 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
2103 .str
= "rx_pause_prio",
2104 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
2107 .str
= "rx_pause_duration_prio",
2108 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
2111 .str
= "tx_pause_prio",
2112 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
2115 .str
= "tx_pause_duration_prio",
2116 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
2120 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
2122 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
2124 .str
= "tc_transmit_queue_tc",
2125 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_get
,
2126 .cells_bytes
= true,
2129 .str
= "tc_no_buffer_discard_uc_tc",
2130 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
2134 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2136 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2137 MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
2138 MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
2139 MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
2140 MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
2141 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
2142 IEEE_8021QAZ_MAX_TCS) + \
2143 (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
2146 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
2150 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
2151 snprintf(*p
, ETH_GSTRING_LEN
, "%.29s_%.1d",
2152 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
2153 *p
+= ETH_GSTRING_LEN
;
2157 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
2161 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
2162 snprintf(*p
, ETH_GSTRING_LEN
, "%.29s_%.1d",
2163 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
2164 *p
+= ETH_GSTRING_LEN
;
2168 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
2169 u32 stringset
, u8
*data
)
2174 switch (stringset
) {
2176 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
2177 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
2179 p
+= ETH_GSTRING_LEN
;
2182 for (i
= 0; i
< MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN
; i
++) {
2183 memcpy(p
, mlxsw_sp_port_hw_rfc_2863_stats
[i
].str
,
2185 p
+= ETH_GSTRING_LEN
;
2188 for (i
= 0; i
< MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN
; i
++) {
2189 memcpy(p
, mlxsw_sp_port_hw_rfc_2819_stats
[i
].str
,
2191 p
+= ETH_GSTRING_LEN
;
2194 for (i
= 0; i
< MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN
; i
++) {
2195 memcpy(p
, mlxsw_sp_port_hw_rfc_3635_stats
[i
].str
,
2197 p
+= ETH_GSTRING_LEN
;
2200 for (i
= 0; i
< MLXSW_SP_PORT_HW_DISCARD_STATS_LEN
; i
++) {
2201 memcpy(p
, mlxsw_sp_port_hw_discard_stats
[i
].str
,
2203 p
+= ETH_GSTRING_LEN
;
2206 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
2207 mlxsw_sp_port_get_prio_strings(&p
, i
);
2209 for (i
= 0; i
< TC_MAX_QUEUE
; i
++)
2210 mlxsw_sp_port_get_tc_strings(&p
, i
);
2216 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
2217 enum ethtool_phys_id_state state
)
2219 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2220 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2221 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
2225 case ETHTOOL_ID_ACTIVE
:
2228 case ETHTOOL_ID_INACTIVE
:
2235 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
2236 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
2240 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
2241 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
2244 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
2245 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
2246 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
2248 case MLXSW_REG_PPCNT_RFC_2863_CNT
:
2249 *p_hw_stats
= mlxsw_sp_port_hw_rfc_2863_stats
;
2250 *p_len
= MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN
;
2252 case MLXSW_REG_PPCNT_RFC_2819_CNT
:
2253 *p_hw_stats
= mlxsw_sp_port_hw_rfc_2819_stats
;
2254 *p_len
= MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN
;
2256 case MLXSW_REG_PPCNT_RFC_3635_CNT
:
2257 *p_hw_stats
= mlxsw_sp_port_hw_rfc_3635_stats
;
2258 *p_len
= MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN
;
2260 case MLXSW_REG_PPCNT_DISCARD_CNT
:
2261 *p_hw_stats
= mlxsw_sp_port_hw_discard_stats
;
2262 *p_len
= MLXSW_SP_PORT_HW_DISCARD_STATS_LEN
;
2264 case MLXSW_REG_PPCNT_PRIO_CNT
:
2265 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
2266 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
2268 case MLXSW_REG_PPCNT_TC_CNT
:
2269 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
2270 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
2279 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
2280 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
2281 u64
*data
, int data_index
)
2283 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2284 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2285 struct mlxsw_sp_port_hw_stats
*hw_stats
;
2286 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
2290 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
2293 mlxsw_sp_port_get_stats_raw(dev
, grp
, prio
, ppcnt_pl
);
2294 for (i
= 0; i
< len
; i
++) {
2295 data
[data_index
+ i
] = hw_stats
[i
].getter(ppcnt_pl
);
2296 if (!hw_stats
[i
].cells_bytes
)
2298 data
[data_index
+ i
] = mlxsw_sp_cells_bytes(mlxsw_sp
,
2299 data
[data_index
+ i
]);
2303 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
2304 struct ethtool_stats
*stats
, u64
*data
)
2306 int i
, data_index
= 0;
2308 /* IEEE 802.3 Counters */
2309 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
2311 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
2313 /* RFC 2863 Counters */
2314 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_RFC_2863_CNT
, 0,
2316 data_index
+= MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN
;
2318 /* RFC 2819 Counters */
2319 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_RFC_2819_CNT
, 0,
2321 data_index
+= MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN
;
2323 /* RFC 3635 Counters */
2324 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_RFC_3635_CNT
, 0,
2326 data_index
+= MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN
;
2328 /* Discard Counters */
2329 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_DISCARD_CNT
, 0,
2331 data_index
+= MLXSW_SP_PORT_HW_DISCARD_STATS_LEN
;
2333 /* Per-Priority Counters */
2334 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2335 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
2337 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
2340 /* Per-TC Counters */
2341 for (i
= 0; i
< TC_MAX_QUEUE
; i
++) {
2342 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
2344 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
2348 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
2352 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
2358 struct mlxsw_sp1_port_link_mode
{
2359 enum ethtool_link_mode_bit_indices mask_ethtool
;
2364 static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode
[] = {
2366 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
2367 .mask_ethtool
= ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
2371 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
2372 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
2373 .mask_ethtool
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
2374 .speed
= SPEED_1000
,
2377 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
2378 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
2379 .speed
= SPEED_10000
,
2382 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
2383 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
2384 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
2385 .speed
= SPEED_10000
,
2388 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2389 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2390 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2391 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
2392 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
2393 .speed
= SPEED_10000
,
2396 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
2397 .mask_ethtool
= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
2398 .speed
= SPEED_20000
,
2401 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
2402 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
2403 .speed
= SPEED_40000
,
2406 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
2407 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
2408 .speed
= SPEED_40000
,
2411 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
2412 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
2413 .speed
= SPEED_40000
,
2416 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
2417 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
2418 .speed
= SPEED_40000
,
2421 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
,
2422 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
2423 .speed
= SPEED_25000
,
2426 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
,
2427 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
2428 .speed
= SPEED_25000
,
2431 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2432 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2433 .speed
= SPEED_25000
,
2436 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
,
2437 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
2438 .speed
= SPEED_50000
,
2441 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
2442 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
2443 .speed
= SPEED_50000
,
2446 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2
,
2447 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2448 .speed
= SPEED_50000
,
2451 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2452 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
,
2453 .speed
= SPEED_56000
,
2456 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2457 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT
,
2458 .speed
= SPEED_56000
,
2461 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2462 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
,
2463 .speed
= SPEED_56000
,
2466 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2467 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT
,
2468 .speed
= SPEED_56000
,
2471 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
,
2472 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2473 .speed
= SPEED_100000
,
2476 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
,
2477 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2478 .speed
= SPEED_100000
,
2481 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
,
2482 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2483 .speed
= SPEED_100000
,
2486 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
2487 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2488 .speed
= SPEED_100000
,
2492 #define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
2495 mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp
*mlxsw_sp
,
2497 struct ethtool_link_ksettings
*cmd
)
2499 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2500 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2501 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2502 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2503 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2504 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2505 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
2507 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2508 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2509 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2510 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
2511 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
2512 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
2516 mlxsw_sp1_from_ptys_link(struct mlxsw_sp
*mlxsw_sp
, u32 ptys_eth_proto
,
2517 unsigned long *mode
)
2521 for (i
= 0; i
< MLXSW_SP1_PORT_LINK_MODE_LEN
; i
++) {
2522 if (ptys_eth_proto
& mlxsw_sp1_port_link_mode
[i
].mask
)
2523 __set_bit(mlxsw_sp1_port_link_mode
[i
].mask_ethtool
,
2529 mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp
*mlxsw_sp
, bool carrier_ok
,
2531 struct ethtool_link_ksettings
*cmd
)
2533 u32 speed
= SPEED_UNKNOWN
;
2534 u8 duplex
= DUPLEX_UNKNOWN
;
2540 for (i
= 0; i
< MLXSW_SP1_PORT_LINK_MODE_LEN
; i
++) {
2541 if (ptys_eth_proto
& mlxsw_sp1_port_link_mode
[i
].mask
) {
2542 speed
= mlxsw_sp1_port_link_mode
[i
].speed
;
2543 duplex
= DUPLEX_FULL
;
2548 cmd
->base
.speed
= speed
;
2549 cmd
->base
.duplex
= duplex
;
2553 mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp
*mlxsw_sp
,
2554 const struct ethtool_link_ksettings
*cmd
)
2559 for (i
= 0; i
< MLXSW_SP1_PORT_LINK_MODE_LEN
; i
++) {
2560 if (test_bit(mlxsw_sp1_port_link_mode
[i
].mask_ethtool
,
2561 cmd
->link_modes
.advertising
))
2562 ptys_proto
|= mlxsw_sp1_port_link_mode
[i
].mask
;
2567 static u32
mlxsw_sp1_to_ptys_speed(struct mlxsw_sp
*mlxsw_sp
, u32 speed
)
2572 for (i
= 0; i
< MLXSW_SP1_PORT_LINK_MODE_LEN
; i
++) {
2573 if (speed
== mlxsw_sp1_port_link_mode
[i
].speed
)
2574 ptys_proto
|= mlxsw_sp1_port_link_mode
[i
].mask
;
2580 mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp
*mlxsw_sp
, u32 upper_speed
)
2585 for (i
= 0; i
< MLXSW_SP1_PORT_LINK_MODE_LEN
; i
++) {
2586 if (mlxsw_sp1_port_link_mode
[i
].speed
<= upper_speed
)
2587 ptys_proto
|= mlxsw_sp1_port_link_mode
[i
].mask
;
2593 mlxsw_sp1_port_speed_base(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2596 *base_speed
= MLXSW_SP_PORT_BASE_SPEED_25G
;
2601 mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp
*mlxsw_sp
, char *payload
,
2602 u8 local_port
, u32 proto_admin
, bool autoneg
)
2604 mlxsw_reg_ptys_eth_pack(payload
, local_port
, proto_admin
, autoneg
);
2608 mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp
*mlxsw_sp
, char *payload
,
2609 u32
*p_eth_proto_cap
, u32
*p_eth_proto_admin
,
2610 u32
*p_eth_proto_oper
)
2612 mlxsw_reg_ptys_eth_unpack(payload
, p_eth_proto_cap
, p_eth_proto_admin
,
2616 static const struct mlxsw_sp_port_type_speed_ops
2617 mlxsw_sp1_port_type_speed_ops
= {
2618 .from_ptys_supported_port
= mlxsw_sp1_from_ptys_supported_port
,
2619 .from_ptys_link
= mlxsw_sp1_from_ptys_link
,
2620 .from_ptys_speed_duplex
= mlxsw_sp1_from_ptys_speed_duplex
,
2621 .to_ptys_advert_link
= mlxsw_sp1_to_ptys_advert_link
,
2622 .to_ptys_speed
= mlxsw_sp1_to_ptys_speed
,
2623 .to_ptys_upper_speed
= mlxsw_sp1_to_ptys_upper_speed
,
2624 .port_speed_base
= mlxsw_sp1_port_speed_base
,
2625 .reg_ptys_eth_pack
= mlxsw_sp1_reg_ptys_eth_pack
,
2626 .reg_ptys_eth_unpack
= mlxsw_sp1_reg_ptys_eth_unpack
,
2629 static const enum ethtool_link_mode_bit_indices
2630 mlxsw_sp2_mask_ethtool_sgmii_100m
[] = {
2631 ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
2634 #define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
2635 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
2637 static const enum ethtool_link_mode_bit_indices
2638 mlxsw_sp2_mask_ethtool_1000base_x_sgmii
[] = {
2639 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
2640 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
2643 #define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
2644 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
2646 static const enum ethtool_link_mode_bit_indices
2647 mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii
[] = {
2648 ETHTOOL_LINK_MODE_2500baseX_Full_BIT
,
2651 #define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
2652 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
2654 static const enum ethtool_link_mode_bit_indices
2655 mlxsw_sp2_mask_ethtool_5gbase_r
[] = {
2656 ETHTOOL_LINK_MODE_5000baseT_Full_BIT
,
2659 #define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
2660 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
2662 static const enum ethtool_link_mode_bit_indices
2663 mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g
[] = {
2664 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
2665 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
2666 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
2667 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
2668 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
2669 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
2670 ETHTOOL_LINK_MODE_10000baseER_Full_BIT
,
2673 #define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
2674 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
2676 static const enum ethtool_link_mode_bit_indices
2677 mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g
[] = {
2678 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
2679 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
2680 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
2681 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
2684 #define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
2685 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
2687 static const enum ethtool_link_mode_bit_indices
2688 mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr
[] = {
2689 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
2690 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
2691 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2694 #define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
2695 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
2697 static const enum ethtool_link_mode_bit_indices
2698 mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2
[] = {
2699 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
2700 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
2701 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2704 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
2705 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
2707 static const enum ethtool_link_mode_bit_indices
2708 mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr
[] = {
2709 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
,
2710 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
,
2711 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
,
2712 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
2713 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT
,
2716 #define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
2717 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
2719 static const enum ethtool_link_mode_bit_indices
2720 mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4
[] = {
2721 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2722 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2723 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2724 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2727 #define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
2728 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
2730 static const enum ethtool_link_mode_bit_indices
2731 mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2
[] = {
2732 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
,
2733 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
,
2734 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
,
2735 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
,
2736 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT
,
2739 #define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
2740 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
2742 static const enum ethtool_link_mode_bit_indices
2743 mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4
[] = {
2744 ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT
,
2745 ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT
,
2746 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT
,
2747 ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT
,
2748 ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT
,
2751 #define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
2752 ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
2754 struct mlxsw_sp2_port_link_mode
{
2755 const enum ethtool_link_mode_bit_indices
*mask_ethtool
;
2761 static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode
[] = {
2763 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M
,
2764 .mask_ethtool
= mlxsw_sp2_mask_ethtool_sgmii_100m
,
2765 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN
,
2769 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII
,
2770 .mask_ethtool
= mlxsw_sp2_mask_ethtool_1000base_x_sgmii
,
2771 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN
,
2772 .speed
= SPEED_1000
,
2775 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII
,
2776 .mask_ethtool
= mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii
,
2777 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN
,
2778 .speed
= SPEED_2500
,
2781 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R
,
2782 .mask_ethtool
= mlxsw_sp2_mask_ethtool_5gbase_r
,
2783 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN
,
2784 .speed
= SPEED_5000
,
2787 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G
,
2788 .mask_ethtool
= mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g
,
2789 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN
,
2790 .speed
= SPEED_10000
,
2793 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G
,
2794 .mask_ethtool
= mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g
,
2795 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN
,
2796 .speed
= SPEED_40000
,
2799 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR
,
2800 .mask_ethtool
= mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr
,
2801 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN
,
2802 .speed
= SPEED_25000
,
2805 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2
,
2806 .mask_ethtool
= mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2
,
2807 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN
,
2808 .speed
= SPEED_50000
,
2811 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR
,
2812 .mask_ethtool
= mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr
,
2813 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN
,
2814 .speed
= SPEED_50000
,
2817 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4
,
2818 .mask_ethtool
= mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4
,
2819 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN
,
2820 .speed
= SPEED_100000
,
2823 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2
,
2824 .mask_ethtool
= mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2
,
2825 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN
,
2826 .speed
= SPEED_100000
,
2829 .mask
= MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4
,
2830 .mask_ethtool
= mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4
,
2831 .m_ethtool_len
= MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN
,
2832 .speed
= SPEED_200000
,
2836 #define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
2839 mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp
*mlxsw_sp
,
2841 struct ethtool_link_ksettings
*cmd
)
2843 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
2844 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
2848 mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode
*link_mode
,
2849 unsigned long *mode
)
2853 for (i
= 0; i
< link_mode
->m_ethtool_len
; i
++)
2854 __set_bit(link_mode
->mask_ethtool
[i
], mode
);
2858 mlxsw_sp2_from_ptys_link(struct mlxsw_sp
*mlxsw_sp
, u32 ptys_eth_proto
,
2859 unsigned long *mode
)
2863 for (i
= 0; i
< MLXSW_SP2_PORT_LINK_MODE_LEN
; i
++) {
2864 if (ptys_eth_proto
& mlxsw_sp2_port_link_mode
[i
].mask
)
2865 mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode
[i
],
2871 mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp
*mlxsw_sp
, bool carrier_ok
,
2873 struct ethtool_link_ksettings
*cmd
)
2875 u32 speed
= SPEED_UNKNOWN
;
2876 u8 duplex
= DUPLEX_UNKNOWN
;
2882 for (i
= 0; i
< MLXSW_SP2_PORT_LINK_MODE_LEN
; i
++) {
2883 if (ptys_eth_proto
& mlxsw_sp2_port_link_mode
[i
].mask
) {
2884 speed
= mlxsw_sp2_port_link_mode
[i
].speed
;
2885 duplex
= DUPLEX_FULL
;
2890 cmd
->base
.speed
= speed
;
2891 cmd
->base
.duplex
= duplex
;
2895 mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode
*link_mode
,
2896 const unsigned long *mode
)
2901 for (i
= 0; i
< link_mode
->m_ethtool_len
; i
++) {
2902 if (test_bit(link_mode
->mask_ethtool
[i
], mode
))
2906 return cnt
== link_mode
->m_ethtool_len
;
2910 mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp
*mlxsw_sp
,
2911 const struct ethtool_link_ksettings
*cmd
)
2916 for (i
= 0; i
< MLXSW_SP2_PORT_LINK_MODE_LEN
; i
++) {
2917 if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode
[i
],
2918 cmd
->link_modes
.advertising
))
2919 ptys_proto
|= mlxsw_sp2_port_link_mode
[i
].mask
;
2924 static u32
mlxsw_sp2_to_ptys_speed(struct mlxsw_sp
*mlxsw_sp
, u32 speed
)
2929 for (i
= 0; i
< MLXSW_SP2_PORT_LINK_MODE_LEN
; i
++) {
2930 if (speed
== mlxsw_sp2_port_link_mode
[i
].speed
)
2931 ptys_proto
|= mlxsw_sp2_port_link_mode
[i
].mask
;
2937 mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp
*mlxsw_sp
, u32 upper_speed
)
2942 for (i
= 0; i
< MLXSW_SP2_PORT_LINK_MODE_LEN
; i
++) {
2943 if (mlxsw_sp2_port_link_mode
[i
].speed
<= upper_speed
)
2944 ptys_proto
|= mlxsw_sp2_port_link_mode
[i
].mask
;
2950 mlxsw_sp2_port_speed_base(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2953 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2957 /* In Spectrum-2, the speed of 1x can change from port to port, so query
2960 mlxsw_reg_ptys_ext_eth_pack(ptys_pl
, local_port
, 0, false);
2961 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2964 mlxsw_reg_ptys_ext_eth_unpack(ptys_pl
, ð_proto_cap
, NULL
, NULL
);
2967 MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR
) {
2968 *base_speed
= MLXSW_SP_PORT_BASE_SPEED_50G
;
2973 MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR
) {
2974 *base_speed
= MLXSW_SP_PORT_BASE_SPEED_25G
;
2982 mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp
*mlxsw_sp
, char *payload
,
2983 u8 local_port
, u32 proto_admin
,
2986 mlxsw_reg_ptys_ext_eth_pack(payload
, local_port
, proto_admin
, autoneg
);
2990 mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp
*mlxsw_sp
, char *payload
,
2991 u32
*p_eth_proto_cap
, u32
*p_eth_proto_admin
,
2992 u32
*p_eth_proto_oper
)
2994 mlxsw_reg_ptys_ext_eth_unpack(payload
, p_eth_proto_cap
,
2995 p_eth_proto_admin
, p_eth_proto_oper
);
2998 static const struct mlxsw_sp_port_type_speed_ops
2999 mlxsw_sp2_port_type_speed_ops
= {
3000 .from_ptys_supported_port
= mlxsw_sp2_from_ptys_supported_port
,
3001 .from_ptys_link
= mlxsw_sp2_from_ptys_link
,
3002 .from_ptys_speed_duplex
= mlxsw_sp2_from_ptys_speed_duplex
,
3003 .to_ptys_advert_link
= mlxsw_sp2_to_ptys_advert_link
,
3004 .to_ptys_speed
= mlxsw_sp2_to_ptys_speed
,
3005 .to_ptys_upper_speed
= mlxsw_sp2_to_ptys_upper_speed
,
3006 .port_speed_base
= mlxsw_sp2_port_speed_base
,
3007 .reg_ptys_eth_pack
= mlxsw_sp2_reg_ptys_eth_pack
,
3008 .reg_ptys_eth_unpack
= mlxsw_sp2_reg_ptys_eth_unpack
,
3012 mlxsw_sp_port_get_link_supported(struct mlxsw_sp
*mlxsw_sp
, u32 eth_proto_cap
,
3013 struct ethtool_link_ksettings
*cmd
)
3015 const struct mlxsw_sp_port_type_speed_ops
*ops
;
3017 ops
= mlxsw_sp
->port_type_speed_ops
;
3019 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Asym_Pause
);
3020 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
3021 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
3023 ops
->from_ptys_supported_port(mlxsw_sp
, eth_proto_cap
, cmd
);
3024 ops
->from_ptys_link(mlxsw_sp
, eth_proto_cap
, cmd
->link_modes
.supported
);
3028 mlxsw_sp_port_get_link_advertise(struct mlxsw_sp
*mlxsw_sp
,
3029 u32 eth_proto_admin
, bool autoneg
,
3030 struct ethtool_link_ksettings
*cmd
)
3032 const struct mlxsw_sp_port_type_speed_ops
*ops
;
3034 ops
= mlxsw_sp
->port_type_speed_ops
;
3039 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
3040 ops
->from_ptys_link(mlxsw_sp
, eth_proto_admin
,
3041 cmd
->link_modes
.advertising
);
3045 mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type
)
3047 switch (connector_type
) {
3048 case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR
:
3050 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE
:
3052 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP
:
3054 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI
:
3056 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC
:
3058 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII
:
3060 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE
:
3062 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA
:
3064 case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER
:
3072 static int mlxsw_sp_port_get_link_ksettings(struct net_device
*dev
,
3073 struct ethtool_link_ksettings
*cmd
)
3075 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
;
3076 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
3077 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3078 const struct mlxsw_sp_port_type_speed_ops
*ops
;
3079 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
3084 ops
= mlxsw_sp
->port_type_speed_ops
;
3086 autoneg
= mlxsw_sp_port
->link
.autoneg
;
3087 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
3089 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
3092 ops
->reg_ptys_eth_unpack(mlxsw_sp
, ptys_pl
, ð_proto_cap
,
3093 ð_proto_admin
, ð_proto_oper
);
3095 mlxsw_sp_port_get_link_supported(mlxsw_sp
, eth_proto_cap
, cmd
);
3097 mlxsw_sp_port_get_link_advertise(mlxsw_sp
, eth_proto_admin
, autoneg
,
3100 cmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
3101 connector_type
= mlxsw_reg_ptys_connector_type_get(ptys_pl
);
3102 cmd
->base
.port
= mlxsw_sp_port_connector_port(connector_type
);
3103 ops
->from_ptys_speed_duplex(mlxsw_sp
, netif_carrier_ok(dev
),
3104 eth_proto_oper
, cmd
);
3110 mlxsw_sp_port_set_link_ksettings(struct net_device
*dev
,
3111 const struct ethtool_link_ksettings
*cmd
)
3113 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
3114 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3115 const struct mlxsw_sp_port_type_speed_ops
*ops
;
3116 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
3117 u32 eth_proto_cap
, eth_proto_new
;
3121 ops
= mlxsw_sp
->port_type_speed_ops
;
3123 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
3125 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
3128 ops
->reg_ptys_eth_unpack(mlxsw_sp
, ptys_pl
, ð_proto_cap
, NULL
, NULL
);
3130 autoneg
= cmd
->base
.autoneg
== AUTONEG_ENABLE
;
3131 if (!autoneg
&& cmd
->base
.speed
== SPEED_56000
) {
3132 netdev_err(dev
, "56G not supported with autoneg off\n");
3135 eth_proto_new
= autoneg
?
3136 ops
->to_ptys_advert_link(mlxsw_sp
, cmd
) :
3137 ops
->to_ptys_speed(mlxsw_sp
, cmd
->base
.speed
);
3139 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
3140 if (!eth_proto_new
) {
3141 netdev_err(dev
, "No supported speed requested\n");
3145 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
3146 eth_proto_new
, autoneg
);
3147 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
3151 mlxsw_sp_port
->link
.autoneg
= autoneg
;
3153 if (!netif_running(dev
))
3156 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
3157 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
3162 static int mlxsw_sp_flash_device(struct net_device
*dev
,
3163 struct ethtool_flash
*flash
)
3165 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
3166 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3167 const struct firmware
*firmware
;
3170 if (flash
->region
!= ETHTOOL_FLASH_ALL_REGIONS
)
3176 err
= request_firmware_direct(&firmware
, flash
->data
, &dev
->dev
);
3179 err
= mlxsw_sp_firmware_flash(mlxsw_sp
, firmware
);
3180 release_firmware(firmware
);
3187 static int mlxsw_sp_get_module_info(struct net_device
*netdev
,
3188 struct ethtool_modinfo
*modinfo
)
3190 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(netdev
);
3191 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3194 err
= mlxsw_env_get_module_info(mlxsw_sp
->core
,
3195 mlxsw_sp_port
->mapping
.module
,
3201 static int mlxsw_sp_get_module_eeprom(struct net_device
*netdev
,
3202 struct ethtool_eeprom
*ee
,
3205 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(netdev
);
3206 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3209 err
= mlxsw_env_get_module_eeprom(netdev
, mlxsw_sp
->core
,
3210 mlxsw_sp_port
->mapping
.module
, ee
,
3216 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
3217 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
3218 .get_link
= ethtool_op_get_link
,
3219 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
3220 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
3221 .get_strings
= mlxsw_sp_port_get_strings
,
3222 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
3223 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
3224 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
3225 .get_link_ksettings
= mlxsw_sp_port_get_link_ksettings
,
3226 .set_link_ksettings
= mlxsw_sp_port_set_link_ksettings
,
3227 .flash_device
= mlxsw_sp_flash_device
,
3228 .get_module_info
= mlxsw_sp_get_module_info
,
3229 .get_module_eeprom
= mlxsw_sp_get_module_eeprom
,
3233 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
3235 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3236 const struct mlxsw_sp_port_type_speed_ops
*ops
;
3237 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
3238 u32 eth_proto_admin
;
3243 ops
= mlxsw_sp
->port_type_speed_ops
;
3245 err
= ops
->port_speed_base(mlxsw_sp
, mlxsw_sp_port
->local_port
,
3249 upper_speed
= base_speed
* width
;
3251 eth_proto_admin
= ops
->to_ptys_upper_speed(mlxsw_sp
, upper_speed
);
3252 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
3253 eth_proto_admin
, mlxsw_sp_port
->link
.autoneg
);
3254 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
3257 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3258 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
3259 bool dwrr
, u8 dwrr_weight
)
3261 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3262 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
3264 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
3266 mlxsw_reg_qeec_de_set(qeec_pl
, true);
3267 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
3268 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
3269 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
3272 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3273 enum mlxsw_reg_qeec_hr hr
, u8 index
,
3274 u8 next_index
, u32 maxrate
)
3276 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3277 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
3279 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
3281 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
3282 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
3283 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
3286 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3287 enum mlxsw_reg_qeec_hr hr
, u8 index
,
3288 u8 next_index
, u32 minrate
)
3290 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3291 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
3293 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
3295 mlxsw_reg_qeec_mise_set(qeec_pl
, true);
3296 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl
, minrate
);
3298 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
3301 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3302 u8 switch_prio
, u8 tclass
)
3304 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3305 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
3307 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
3309 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
3312 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
3316 /* Setup the elements hierarcy, so that each TC is linked to
3317 * one subgroup, which are all member in the same group.
3319 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3320 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
3324 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3325 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3326 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
3331 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3332 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3333 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
3338 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
3339 MLXSW_REG_QEEC_HIERARCY_TC
,
3346 /* Make sure the max shaper is disabled in all hierarchies that
3349 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3350 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
3351 MLXSW_REG_QEEC_MAS_DIS
);
3354 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3355 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3356 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
3358 MLXSW_REG_QEEC_MAS_DIS
);
3362 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3363 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3364 MLXSW_REG_QEEC_HIERARCY_TC
,
3366 MLXSW_REG_QEEC_MAS_DIS
);
3370 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
3371 MLXSW_REG_QEEC_HIERARCY_TC
,
3373 MLXSW_REG_QEEC_MAS_DIS
);
3378 /* Configure the min shaper for multicast TCs. */
3379 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3380 err
= mlxsw_sp_port_min_bw_set(mlxsw_sp_port
,
3381 MLXSW_REG_QEEC_HIERARCY_TC
,
3383 MLXSW_REG_QEEC_MIS_MIN
);
3388 /* Map all priorities to traffic class 0. */
3389 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
3390 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
3398 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3401 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3402 char qtctm_pl
[MLXSW_REG_QTCTM_LEN
];
3404 mlxsw_reg_qtctm_pack(qtctm_pl
, mlxsw_sp_port
->local_port
, enable
);
3405 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtctm
), qtctm_pl
);
3408 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
3409 bool split
, u8 module
, u8 width
, u8 lane
)
3411 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
3412 struct mlxsw_sp_port
*mlxsw_sp_port
;
3413 struct net_device
*dev
;
3416 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
,
3417 module
+ 1, split
, lane
/ width
,
3419 sizeof(mlxsw_sp
->base_mac
));
3421 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
3426 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
3429 goto err_alloc_etherdev
;
3431 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
3432 mlxsw_sp_port
= netdev_priv(dev
);
3433 mlxsw_sp_port
->dev
= dev
;
3434 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
3435 mlxsw_sp_port
->local_port
= local_port
;
3436 mlxsw_sp_port
->pvid
= MLXSW_SP_DEFAULT_VID
;
3437 mlxsw_sp_port
->split
= split
;
3438 mlxsw_sp_port
->mapping
.module
= module
;
3439 mlxsw_sp_port
->mapping
.width
= width
;
3440 mlxsw_sp_port
->mapping
.lane
= lane
;
3441 mlxsw_sp_port
->link
.autoneg
= 1;
3442 INIT_LIST_HEAD(&mlxsw_sp_port
->vlans_list
);
3443 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
3445 mlxsw_sp_port
->pcpu_stats
=
3446 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
3447 if (!mlxsw_sp_port
->pcpu_stats
) {
3449 goto err_alloc_stats
;
3452 mlxsw_sp_port
->sample
= kzalloc(sizeof(*mlxsw_sp_port
->sample
),
3454 if (!mlxsw_sp_port
->sample
) {
3456 goto err_alloc_sample
;
3459 INIT_DELAYED_WORK(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
3460 &update_stats_cache
);
3462 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
3463 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
3465 err
= mlxsw_sp_port_module_map(mlxsw_sp_port
, module
, width
, lane
);
3467 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to map module\n",
3468 mlxsw_sp_port
->local_port
);
3469 goto err_port_module_map
;
3472 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
3474 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
3475 mlxsw_sp_port
->local_port
);
3476 goto err_port_swid_set
;
3479 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
3481 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
3482 mlxsw_sp_port
->local_port
);
3483 goto err_dev_addr_init
;
3486 netif_carrier_off(dev
);
3488 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
3489 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
3490 dev
->hw_features
|= NETIF_F_HW_TC
| NETIF_F_LOOPBACK
;
3493 dev
->max_mtu
= ETH_MAX_MTU
;
3495 /* Each packet needs to have a Tx header (metadata) on top all other
3498 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
3500 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
3502 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
3503 mlxsw_sp_port
->local_port
);
3504 goto err_port_system_port_mapping_set
;
3507 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
3509 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
3510 mlxsw_sp_port
->local_port
);
3511 goto err_port_speed_by_width_set
;
3514 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
3516 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
3517 mlxsw_sp_port
->local_port
);
3518 goto err_port_mtu_set
;
3521 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
3523 goto err_port_admin_status_set
;
3525 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
3527 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
3528 mlxsw_sp_port
->local_port
);
3529 goto err_port_buffers_init
;
3532 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
3534 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
3535 mlxsw_sp_port
->local_port
);
3536 goto err_port_ets_init
;
3539 err
= mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, true);
3541 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC MC mode\n",
3542 mlxsw_sp_port
->local_port
);
3543 goto err_port_tc_mc_mode
;
3546 /* ETS and buffers must be initialized before DCB. */
3547 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
3549 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
3550 mlxsw_sp_port
->local_port
);
3551 goto err_port_dcb_init
;
3554 err
= mlxsw_sp_port_fids_init(mlxsw_sp_port
);
3556 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize FIDs\n",
3557 mlxsw_sp_port
->local_port
);
3558 goto err_port_fids_init
;
3561 err
= mlxsw_sp_tc_qdisc_init(mlxsw_sp_port
);
3563 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC qdiscs\n",
3564 mlxsw_sp_port
->local_port
);
3565 goto err_port_qdiscs_init
;
3568 err
= mlxsw_sp_port_nve_init(mlxsw_sp_port
);
3570 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize NVE\n",
3571 mlxsw_sp_port
->local_port
);
3572 goto err_port_nve_init
;
3575 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, MLXSW_SP_DEFAULT_VID
);
3577 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set PVID\n",
3578 mlxsw_sp_port
->local_port
);
3579 goto err_port_pvid_set
;
3582 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_create(mlxsw_sp_port
,
3583 MLXSW_SP_DEFAULT_VID
);
3584 if (IS_ERR(mlxsw_sp_port_vlan
)) {
3585 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create VID 1\n",
3586 mlxsw_sp_port
->local_port
);
3587 err
= PTR_ERR(mlxsw_sp_port_vlan
);
3588 goto err_port_vlan_create
;
3590 mlxsw_sp_port
->default_vlan
= mlxsw_sp_port_vlan
;
3592 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
3593 err
= register_netdev(dev
);
3595 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
3596 mlxsw_sp_port
->local_port
);
3597 goto err_register_netdev
;
3600 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
3601 mlxsw_sp_port
, dev
);
3602 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
, 0);
3605 err_register_netdev
:
3606 mlxsw_sp
->ports
[local_port
] = NULL
;
3607 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
3608 err_port_vlan_create
:
3610 mlxsw_sp_port_nve_fini(mlxsw_sp_port
);
3612 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
3613 err_port_qdiscs_init
:
3614 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
3616 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
3618 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, false);
3619 err_port_tc_mc_mode
:
3621 err_port_buffers_init
:
3622 err_port_admin_status_set
:
3624 err_port_speed_by_width_set
:
3625 err_port_system_port_mapping_set
:
3627 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
3629 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
3630 err_port_module_map
:
3631 kfree(mlxsw_sp_port
->sample
);
3633 free_percpu(mlxsw_sp_port
->pcpu_stats
);
3637 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
3641 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
3643 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3645 cancel_delayed_work_sync(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
);
3646 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
3647 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
3648 mlxsw_sp
->ports
[local_port
] = NULL
;
3649 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
, true);
3650 mlxsw_sp_port_nve_fini(mlxsw_sp_port
);
3651 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
3652 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
3653 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
3654 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, false);
3655 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
3656 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
3657 kfree(mlxsw_sp_port
->sample
);
3658 free_percpu(mlxsw_sp_port
->pcpu_stats
);
3659 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vlans_list
));
3660 free_netdev(mlxsw_sp_port
->dev
);
3661 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
3664 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
3666 return mlxsw_sp
->ports
[local_port
] != NULL
;
3669 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
3673 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
3674 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
3675 mlxsw_sp_port_remove(mlxsw_sp
, i
);
3676 kfree(mlxsw_sp
->port_to_module
);
3677 kfree(mlxsw_sp
->ports
);
3680 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
3682 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
3683 u8 module
, width
, lane
;
3688 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
3689 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
3690 if (!mlxsw_sp
->ports
)
3693 mlxsw_sp
->port_to_module
= kmalloc_array(max_ports
, sizeof(int),
3695 if (!mlxsw_sp
->port_to_module
) {
3697 goto err_port_to_module_alloc
;
3700 for (i
= 1; i
< max_ports
; i
++) {
3701 /* Mark as invalid */
3702 mlxsw_sp
->port_to_module
[i
] = -1;
3704 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
3707 goto err_port_module_info_get
;
3710 mlxsw_sp
->port_to_module
[i
] = module
;
3711 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false,
3712 module
, width
, lane
);
3714 goto err_port_create
;
3719 err_port_module_info_get
:
3720 for (i
--; i
>= 1; i
--)
3721 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
3722 mlxsw_sp_port_remove(mlxsw_sp
, i
);
3723 kfree(mlxsw_sp
->port_to_module
);
3724 err_port_to_module_alloc
:
3725 kfree(mlxsw_sp
->ports
);
3729 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
3731 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
3733 return local_port
- offset
;
3736 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
3737 u8 module
, unsigned int count
, u8 offset
)
3739 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
3742 for (i
= 0; i
< count
; i
++) {
3743 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
* offset
,
3744 true, module
, width
, i
* width
);
3746 goto err_port_create
;
3752 for (i
--; i
>= 0; i
--)
3753 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
3754 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
3758 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
3759 u8 base_port
, unsigned int count
)
3761 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
3764 /* Split by four means we need to re-create two ports, otherwise
3769 for (i
= 0; i
< count
; i
++) {
3770 local_port
= base_port
+ i
* 2;
3771 if (mlxsw_sp
->port_to_module
[local_port
] < 0)
3773 module
= mlxsw_sp
->port_to_module
[local_port
];
3775 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
3780 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
3782 struct netlink_ext_ack
*extack
)
3784 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3785 u8 local_ports_in_1x
, local_ports_in_2x
, offset
;
3786 struct mlxsw_sp_port
*mlxsw_sp_port
;
3787 u8 module
, cur_width
, base_port
;
3791 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, LOCAL_PORTS_IN_1X
) ||
3792 !MLXSW_CORE_RES_VALID(mlxsw_core
, LOCAL_PORTS_IN_2X
))
3795 local_ports_in_1x
= MLXSW_CORE_RES_GET(mlxsw_core
, LOCAL_PORTS_IN_1X
);
3796 local_ports_in_2x
= MLXSW_CORE_RES_GET(mlxsw_core
, LOCAL_PORTS_IN_2X
);
3798 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3799 if (!mlxsw_sp_port
) {
3800 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
3802 NL_SET_ERR_MSG_MOD(extack
, "Port number does not exist");
3806 module
= mlxsw_sp_port
->mapping
.module
;
3807 cur_width
= mlxsw_sp_port
->mapping
.width
;
3809 if (count
!= 2 && count
!= 4) {
3810 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
3811 NL_SET_ERR_MSG_MOD(extack
, "Port can only be split into 2 or 4 ports");
3815 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
3816 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
3817 NL_SET_ERR_MSG_MOD(extack
, "Port cannot be split further");
3821 /* Make sure we have enough slave (even) ports for the split. */
3823 offset
= local_ports_in_2x
;
3824 base_port
= local_port
;
3825 if (mlxsw_sp
->ports
[base_port
+ local_ports_in_2x
]) {
3826 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
3827 NL_SET_ERR_MSG_MOD(extack
, "Invalid split configuration");
3831 offset
= local_ports_in_1x
;
3832 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
3833 if (mlxsw_sp
->ports
[base_port
+ 1] ||
3834 mlxsw_sp
->ports
[base_port
+ 3]) {
3835 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
3836 NL_SET_ERR_MSG_MOD(extack
, "Invalid split configuration");
3841 for (i
= 0; i
< count
; i
++)
3842 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
3843 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
3845 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
,
3848 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
3849 goto err_port_split_create
;
3854 err_port_split_create
:
3855 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
3859 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
3860 struct netlink_ext_ack
*extack
)
3862 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3863 u8 local_ports_in_1x
, local_ports_in_2x
, offset
;
3864 struct mlxsw_sp_port
*mlxsw_sp_port
;
3865 u8 cur_width
, base_port
;
3869 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, LOCAL_PORTS_IN_1X
) ||
3870 !MLXSW_CORE_RES_VALID(mlxsw_core
, LOCAL_PORTS_IN_2X
))
3873 local_ports_in_1x
= MLXSW_CORE_RES_GET(mlxsw_core
, LOCAL_PORTS_IN_1X
);
3874 local_ports_in_2x
= MLXSW_CORE_RES_GET(mlxsw_core
, LOCAL_PORTS_IN_2X
);
3876 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3877 if (!mlxsw_sp_port
) {
3878 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
3880 NL_SET_ERR_MSG_MOD(extack
, "Port number does not exist");
3884 if (!mlxsw_sp_port
->split
) {
3885 netdev_err(mlxsw_sp_port
->dev
, "Port was not split\n");
3886 NL_SET_ERR_MSG_MOD(extack
, "Port was not split");
3890 cur_width
= mlxsw_sp_port
->mapping
.width
;
3891 count
= cur_width
== 1 ? 4 : 2;
3894 offset
= local_ports_in_2x
;
3896 offset
= local_ports_in_1x
;
3898 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
3900 /* Determine which ports to remove. */
3901 if (count
== 2 && local_port
>= base_port
+ 2)
3902 base_port
= base_port
+ 2;
3904 for (i
= 0; i
< count
; i
++)
3905 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
3906 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
3908 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
3913 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
3914 char *pude_pl
, void *priv
)
3916 struct mlxsw_sp
*mlxsw_sp
= priv
;
3917 struct mlxsw_sp_port
*mlxsw_sp_port
;
3918 enum mlxsw_reg_pude_oper_status status
;
3921 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
3922 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3926 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
3927 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
3928 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
3929 netif_carrier_on(mlxsw_sp_port
->dev
);
3931 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
3932 netif_carrier_off(mlxsw_sp_port
->dev
);
3936 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
3937 u8 local_port
, void *priv
)
3939 struct mlxsw_sp
*mlxsw_sp
= priv
;
3940 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3941 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
3943 if (unlikely(!mlxsw_sp_port
)) {
3944 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
3949 skb
->dev
= mlxsw_sp_port
->dev
;
3951 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
3952 u64_stats_update_begin(&pcpu_stats
->syncp
);
3953 pcpu_stats
->rx_packets
++;
3954 pcpu_stats
->rx_bytes
+= skb
->len
;
3955 u64_stats_update_end(&pcpu_stats
->syncp
);
3957 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3958 netif_receive_skb(skb
);
3961 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
3964 skb
->offload_fwd_mark
= 1;
3965 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
3968 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff
*skb
,
3969 u8 local_port
, void *priv
)
3971 skb
->offload_l3_fwd_mark
= 1;
3972 skb
->offload_fwd_mark
= 1;
3973 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
3976 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff
*skb
, u8 local_port
,
3979 struct mlxsw_sp
*mlxsw_sp
= priv
;
3980 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3981 struct psample_group
*psample_group
;
3984 if (unlikely(!mlxsw_sp_port
)) {
3985 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
3989 if (unlikely(!mlxsw_sp_port
->sample
)) {
3990 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received on unsupported port\n",
3995 size
= mlxsw_sp_port
->sample
->truncate
?
3996 mlxsw_sp_port
->sample
->trunc_size
: skb
->len
;
3999 psample_group
= rcu_dereference(mlxsw_sp_port
->sample
->psample_group
);
4002 psample_sample_packet(psample_group
, skb
, size
,
4003 mlxsw_sp_port
->dev
->ifindex
, 0,
4004 mlxsw_sp_port
->sample
->rate
);
4011 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
4012 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
4013 _is_ctrl, SP_##_trap_group, DISCARD)
4015 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
4016 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
4017 _is_ctrl, SP_##_trap_group, DISCARD)
4019 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
4020 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
4021 _is_ctrl, SP_##_trap_group, DISCARD)
4023 #define MLXSW_SP_EVENTL(_func, _trap_id) \
4024 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
4026 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
4028 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
4030 MLXSW_SP_RXL_NO_MARK(STP
, TRAP_TO_CPU
, STP
, true),
4031 MLXSW_SP_RXL_NO_MARK(LACP
, TRAP_TO_CPU
, LACP
, true),
4032 MLXSW_SP_RXL_NO_MARK(LLDP
, TRAP_TO_CPU
, LLDP
, true),
4033 MLXSW_SP_RXL_MARK(DHCP
, MIRROR_TO_CPU
, DHCP
, false),
4034 MLXSW_SP_RXL_MARK(IGMP_QUERY
, MIRROR_TO_CPU
, IGMP
, false),
4035 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT
, TRAP_TO_CPU
, IGMP
, false),
4036 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT
, TRAP_TO_CPU
, IGMP
, false),
4037 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE
, TRAP_TO_CPU
, IGMP
, false),
4038 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT
, TRAP_TO_CPU
, IGMP
, false),
4039 MLXSW_SP_RXL_MARK(ARPBC
, MIRROR_TO_CPU
, ARP
, false),
4040 MLXSW_SP_RXL_MARK(ARPUC
, MIRROR_TO_CPU
, ARP
, false),
4041 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, IP2ME
, false),
4042 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY
, MIRROR_TO_CPU
, IPV6_MLD
,
4044 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT
, TRAP_TO_CPU
, IPV6_MLD
,
4046 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE
, TRAP_TO_CPU
, IPV6_MLD
,
4048 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT
, TRAP_TO_CPU
, IPV6_MLD
,
4051 MLXSW_SP_RXL_MARK(MTUERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4052 MLXSW_SP_RXL_MARK(TTLERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4053 MLXSW_SP_RXL_L3_MARK(LBERROR
, MIRROR_TO_CPU
, LBERROR
, false),
4054 MLXSW_SP_RXL_MARK(IP2ME
, TRAP_TO_CPU
, IP2ME
, false),
4055 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS
, TRAP_TO_CPU
, ROUTER_EXP
,
4057 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4058 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4059 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4060 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK
, TRAP_TO_CPU
, ROUTER_EXP
,
4062 MLXSW_SP_RXL_MARK(IPV4_OSPF
, TRAP_TO_CPU
, OSPF
, false),
4063 MLXSW_SP_RXL_MARK(IPV6_OSPF
, TRAP_TO_CPU
, OSPF
, false),
4064 MLXSW_SP_RXL_MARK(IPV6_DHCP
, TRAP_TO_CPU
, DHCP
, false),
4065 MLXSW_SP_RXL_MARK(RTR_INGRESS0
, TRAP_TO_CPU
, REMOTE_ROUTE
, false),
4066 MLXSW_SP_RXL_MARK(IPV4_BGP
, TRAP_TO_CPU
, BGP
, false),
4067 MLXSW_SP_RXL_MARK(IPV6_BGP
, TRAP_TO_CPU
, BGP
, false),
4068 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION
, TRAP_TO_CPU
, IPV6_ND
,
4070 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT
, TRAP_TO_CPU
, IPV6_ND
,
4072 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION
, TRAP_TO_CPU
, IPV6_ND
,
4074 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT
, TRAP_TO_CPU
, IPV6_ND
,
4076 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION
, TRAP_TO_CPU
, IPV6_ND
, false),
4077 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST
, TRAP_TO_CPU
, ROUTER_EXP
,
4079 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4
, TRAP_TO_CPU
, HOST_MISS
, false),
4080 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6
, TRAP_TO_CPU
, HOST_MISS
, false),
4081 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4082 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4083 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4084 MLXSW_SP_RXL_MARK(DECAP_ECN0
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4085 MLXSW_SP_RXL_MARK(IPV4_VRRP
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4086 MLXSW_SP_RXL_MARK(IPV6_VRRP
, TRAP_TO_CPU
, ROUTER_EXP
, false),
4087 /* PKT Sample trap */
4088 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func
, PKT_SAMPLE
, MIRROR_TO_CPU
,
4089 false, SP_IP2ME
, DISCARD
),
4091 MLXSW_SP_RXL_NO_MARK(ACL0
, TRAP_TO_CPU
, IP2ME
, false),
4092 /* Multicast Router Traps */
4093 MLXSW_SP_RXL_MARK(IPV4_PIM
, TRAP_TO_CPU
, PIM
, false),
4094 MLXSW_SP_RXL_MARK(IPV6_PIM
, TRAP_TO_CPU
, PIM
, false),
4095 MLXSW_SP_RXL_MARK(RPF
, TRAP_TO_CPU
, RPF
, false),
4096 MLXSW_SP_RXL_MARK(ACL1
, TRAP_TO_CPU
, MULTICAST
, false),
4097 MLXSW_SP_RXL_L3_MARK(ACL2
, TRAP_TO_CPU
, MULTICAST
, false),
4099 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP
, TRAP_TO_CPU
, ARP
, false),
4100 MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP
, TRAP_TO_CPU
, ARP
, false),
4103 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
4105 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
4106 enum mlxsw_reg_qpcr_ir_units ir_units
;
4107 int max_cpu_policers
;
4113 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
4116 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
4118 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
4119 for (i
= 0; i
< max_cpu_policers
; i
++) {
4122 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
4123 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
4124 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
4125 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
4126 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM
:
4127 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF
:
4128 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR
:
4132 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
4133 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD
:
4137 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP
:
4138 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
4139 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
4140 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS
:
4141 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
4142 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
4143 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND
:
4144 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
4148 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
4156 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
4158 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
4166 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
4168 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
4169 enum mlxsw_reg_htgt_trap_group i
;
4170 int max_cpu_policers
;
4171 int max_trap_groups
;
4176 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
4179 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
4180 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
4182 for (i
= 0; i
< max_trap_groups
; i
++) {
4185 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
4186 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
4187 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
4188 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
4189 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM
:
4193 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP
:
4194 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
4198 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
4199 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
4200 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD
:
4204 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
4205 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND
:
4206 case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF
:
4210 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS
:
4211 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
4212 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
4213 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
4214 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR
:
4218 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
4219 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
4220 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
4221 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
4227 if (max_cpu_policers
<= policer_id
&&
4228 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
4231 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
4232 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
4240 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
4245 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
4249 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
4253 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
4254 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
4255 &mlxsw_sp_listener
[i
],
4258 goto err_listener_register
;
4263 err_listener_register
:
4264 for (i
--; i
>= 0; i
--) {
4265 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
4266 &mlxsw_sp_listener
[i
],
4272 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
4276 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
4277 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
4278 &mlxsw_sp_listener
[i
],
4283 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
4285 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
4289 seed
= jhash(mlxsw_sp
->base_mac
, sizeof(mlxsw_sp
->base_mac
), 0);
4290 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
4291 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
4292 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
4293 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
4294 MLXSW_REG_SLCR_LAG_HASH_SIP
|
4295 MLXSW_REG_SLCR_LAG_HASH_DIP
|
4296 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
4297 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
4298 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
, seed
);
4299 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
4303 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
4304 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
4307 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
4308 sizeof(struct mlxsw_sp_upper
),
4310 if (!mlxsw_sp
->lags
)
4316 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
4318 kfree(mlxsw_sp
->lags
);
4321 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
4323 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
4325 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
4326 MLXSW_REG_HTGT_INVALID_POLICER
,
4327 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
4328 MLXSW_REG_HTGT_DEFAULT_TC
);
4329 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
4332 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4333 unsigned long event
, void *ptr
);
4335 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
4336 const struct mlxsw_bus_info
*mlxsw_bus_info
)
4338 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4341 mlxsw_sp
->core
= mlxsw_core
;
4342 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
4344 err
= mlxsw_sp_fw_rev_validate(mlxsw_sp
);
4348 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
4350 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
4354 err
= mlxsw_sp_kvdl_init(mlxsw_sp
);
4356 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize KVDL\n");
4360 err
= mlxsw_sp_fids_init(mlxsw_sp
);
4362 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize FIDs\n");
4366 err
= mlxsw_sp_traps_init(mlxsw_sp
);
4368 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
4369 goto err_traps_init
;
4372 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
4374 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
4375 goto err_buffers_init
;
4378 err
= mlxsw_sp_lag_init(mlxsw_sp
);
4380 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
4384 /* Initialize SPAN before router and switchdev, so that those components
4385 * can call mlxsw_sp_span_respin().
4387 err
= mlxsw_sp_span_init(mlxsw_sp
);
4389 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
4393 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
4395 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
4396 goto err_switchdev_init
;
4399 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
4401 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
4402 goto err_counter_pool_init
;
4405 err
= mlxsw_sp_afa_init(mlxsw_sp
);
4407 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL actions\n");
4411 err
= mlxsw_sp_nve_init(mlxsw_sp
);
4413 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize NVE\n");
4417 err
= mlxsw_sp_acl_init(mlxsw_sp
);
4419 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
4423 err
= mlxsw_sp_router_init(mlxsw_sp
);
4425 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
4426 goto err_router_init
;
4429 /* Initialize netdevice notifier after router and SPAN is initialized,
4430 * so that the event handler can use router structures and call SPAN
4433 mlxsw_sp
->netdevice_nb
.notifier_call
= mlxsw_sp_netdevice_event
;
4434 err
= register_netdevice_notifier(&mlxsw_sp
->netdevice_nb
);
4436 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register netdev notifier\n");
4437 goto err_netdev_notifier
;
4440 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
4442 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
4443 goto err_dpipe_init
;
4446 err
= mlxsw_sp_ports_create(mlxsw_sp
);
4448 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
4449 goto err_ports_create
;
4455 mlxsw_sp_dpipe_fini(mlxsw_sp
);
4457 unregister_netdevice_notifier(&mlxsw_sp
->netdevice_nb
);
4458 err_netdev_notifier
:
4459 mlxsw_sp_router_fini(mlxsw_sp
);
4461 mlxsw_sp_acl_fini(mlxsw_sp
);
4463 mlxsw_sp_nve_fini(mlxsw_sp
);
4465 mlxsw_sp_afa_fini(mlxsw_sp
);
4467 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
4468 err_counter_pool_init
:
4469 mlxsw_sp_switchdev_fini(mlxsw_sp
);
4471 mlxsw_sp_span_fini(mlxsw_sp
);
4473 mlxsw_sp_lag_fini(mlxsw_sp
);
4475 mlxsw_sp_buffers_fini(mlxsw_sp
);
4477 mlxsw_sp_traps_fini(mlxsw_sp
);
4479 mlxsw_sp_fids_fini(mlxsw_sp
);
4481 mlxsw_sp_kvdl_fini(mlxsw_sp
);
4485 static int mlxsw_sp1_init(struct mlxsw_core
*mlxsw_core
,
4486 const struct mlxsw_bus_info
*mlxsw_bus_info
)
4488 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4490 mlxsw_sp
->req_rev
= &mlxsw_sp1_fw_rev
;
4491 mlxsw_sp
->fw_filename
= MLXSW_SP1_FW_FILENAME
;
4492 mlxsw_sp
->kvdl_ops
= &mlxsw_sp1_kvdl_ops
;
4493 mlxsw_sp
->afa_ops
= &mlxsw_sp1_act_afa_ops
;
4494 mlxsw_sp
->afk_ops
= &mlxsw_sp1_afk_ops
;
4495 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp1_mr_tcam_ops
;
4496 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp1_acl_tcam_ops
;
4497 mlxsw_sp
->nve_ops_arr
= mlxsw_sp1_nve_ops_arr
;
4498 mlxsw_sp
->mac_mask
= mlxsw_sp1_mac_mask
;
4499 mlxsw_sp
->rif_ops_arr
= mlxsw_sp1_rif_ops_arr
;
4500 mlxsw_sp
->sb_vals
= &mlxsw_sp1_sb_vals
;
4501 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp1_port_type_speed_ops
;
4503 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
);
4506 static int mlxsw_sp2_init(struct mlxsw_core
*mlxsw_core
,
4507 const struct mlxsw_bus_info
*mlxsw_bus_info
)
4509 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4511 mlxsw_sp
->kvdl_ops
= &mlxsw_sp2_kvdl_ops
;
4512 mlxsw_sp
->afa_ops
= &mlxsw_sp2_act_afa_ops
;
4513 mlxsw_sp
->afk_ops
= &mlxsw_sp2_afk_ops
;
4514 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp2_mr_tcam_ops
;
4515 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp2_acl_tcam_ops
;
4516 mlxsw_sp
->nve_ops_arr
= mlxsw_sp2_nve_ops_arr
;
4517 mlxsw_sp
->mac_mask
= mlxsw_sp2_mac_mask
;
4518 mlxsw_sp
->rif_ops_arr
= mlxsw_sp2_rif_ops_arr
;
4519 mlxsw_sp
->sb_vals
= &mlxsw_sp2_sb_vals
;
4520 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp2_port_type_speed_ops
;
4522 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
);
4525 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
4527 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4529 mlxsw_sp_ports_remove(mlxsw_sp
);
4530 mlxsw_sp_dpipe_fini(mlxsw_sp
);
4531 unregister_netdevice_notifier(&mlxsw_sp
->netdevice_nb
);
4532 mlxsw_sp_router_fini(mlxsw_sp
);
4533 mlxsw_sp_acl_fini(mlxsw_sp
);
4534 mlxsw_sp_nve_fini(mlxsw_sp
);
4535 mlxsw_sp_afa_fini(mlxsw_sp
);
4536 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
4537 mlxsw_sp_switchdev_fini(mlxsw_sp
);
4538 mlxsw_sp_span_fini(mlxsw_sp
);
4539 mlxsw_sp_lag_fini(mlxsw_sp
);
4540 mlxsw_sp_buffers_fini(mlxsw_sp
);
4541 mlxsw_sp_traps_fini(mlxsw_sp
);
4542 mlxsw_sp_fids_fini(mlxsw_sp
);
4543 mlxsw_sp_kvdl_fini(mlxsw_sp
);
4546 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
4549 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
4552 static const struct mlxsw_config_profile mlxsw_sp1_config_profile
= {
4554 .max_mid
= MLXSW_SP_MID_MAX
,
4555 .used_flood_tables
= 1,
4556 .used_flood_mode
= 1,
4558 .max_fid_flood_tables
= 3,
4559 .fid_flood_table_size
= MLXSW_SP_FID_FLOOD_TABLE_SIZE
,
4560 .used_max_ib_mc
= 1,
4564 .used_kvd_sizes
= 1,
4565 .kvd_hash_single_parts
= 59,
4566 .kvd_hash_double_parts
= 41,
4567 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
4571 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
4576 static const struct mlxsw_config_profile mlxsw_sp2_config_profile
= {
4578 .max_mid
= MLXSW_SP_MID_MAX
,
4579 .used_flood_tables
= 1,
4580 .used_flood_mode
= 1,
4582 .max_fid_flood_tables
= 3,
4583 .fid_flood_table_size
= MLXSW_SP_FID_FLOOD_TABLE_SIZE
,
4584 .used_max_ib_mc
= 1,
4591 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
4597 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core
*mlxsw_core
,
4598 struct devlink_resource_size_params
*kvd_size_params
,
4599 struct devlink_resource_size_params
*linear_size_params
,
4600 struct devlink_resource_size_params
*hash_double_size_params
,
4601 struct devlink_resource_size_params
*hash_single_size_params
)
4603 u32 single_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
4604 KVD_SINGLE_MIN_SIZE
);
4605 u32 double_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
4606 KVD_DOUBLE_MIN_SIZE
);
4607 u32 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
4608 u32 linear_size_min
= 0;
4610 devlink_resource_size_params_init(kvd_size_params
, kvd_size
, kvd_size
,
4611 MLXSW_SP_KVD_GRANULARITY
,
4612 DEVLINK_RESOURCE_UNIT_ENTRY
);
4613 devlink_resource_size_params_init(linear_size_params
, linear_size_min
,
4614 kvd_size
- single_size_min
-
4616 MLXSW_SP_KVD_GRANULARITY
,
4617 DEVLINK_RESOURCE_UNIT_ENTRY
);
4618 devlink_resource_size_params_init(hash_double_size_params
,
4620 kvd_size
- single_size_min
-
4622 MLXSW_SP_KVD_GRANULARITY
,
4623 DEVLINK_RESOURCE_UNIT_ENTRY
);
4624 devlink_resource_size_params_init(hash_single_size_params
,
4626 kvd_size
- double_size_min
-
4628 MLXSW_SP_KVD_GRANULARITY
,
4629 DEVLINK_RESOURCE_UNIT_ENTRY
);
4632 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core
*mlxsw_core
)
4634 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
4635 struct devlink_resource_size_params hash_single_size_params
;
4636 struct devlink_resource_size_params hash_double_size_params
;
4637 struct devlink_resource_size_params linear_size_params
;
4638 struct devlink_resource_size_params kvd_size_params
;
4639 u32 kvd_size
, single_size
, double_size
, linear_size
;
4640 const struct mlxsw_config_profile
*profile
;
4643 profile
= &mlxsw_sp1_config_profile
;
4644 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SIZE
))
4647 mlxsw_sp_resource_size_params_prepare(mlxsw_core
, &kvd_size_params
,
4648 &linear_size_params
,
4649 &hash_double_size_params
,
4650 &hash_single_size_params
);
4652 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
4653 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD
,
4654 kvd_size
, MLXSW_SP_RESOURCE_KVD
,
4655 DEVLINK_RESOURCE_ID_PARENT_TOP
,
4660 linear_size
= profile
->kvd_linear_size
;
4661 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR
,
4663 MLXSW_SP_RESOURCE_KVD_LINEAR
,
4664 MLXSW_SP_RESOURCE_KVD
,
4665 &linear_size_params
);
4669 err
= mlxsw_sp1_kvdl_resources_register(mlxsw_core
);
4673 double_size
= kvd_size
- linear_size
;
4674 double_size
*= profile
->kvd_hash_double_parts
;
4675 double_size
/= profile
->kvd_hash_double_parts
+
4676 profile
->kvd_hash_single_parts
;
4677 double_size
= rounddown(double_size
, MLXSW_SP_KVD_GRANULARITY
);
4678 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE
,
4680 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
4681 MLXSW_SP_RESOURCE_KVD
,
4682 &hash_double_size_params
);
4686 single_size
= kvd_size
- double_size
- linear_size
;
4687 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE
,
4689 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
4690 MLXSW_SP_RESOURCE_KVD
,
4691 &hash_single_size_params
);
4698 static int mlxsw_sp1_resources_register(struct mlxsw_core
*mlxsw_core
)
4700 return mlxsw_sp1_resources_kvd_register(mlxsw_core
);
4703 static int mlxsw_sp2_resources_register(struct mlxsw_core
*mlxsw_core
)
4708 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core
*mlxsw_core
,
4709 const struct mlxsw_config_profile
*profile
,
4710 u64
*p_single_size
, u64
*p_double_size
,
4713 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
4717 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
4718 !MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
))
4721 /* The hash part is what left of the kvd without the
4722 * linear part. It is split to the single size and
4723 * double size by the parts ratio from the profile.
4724 * Both sizes must be a multiplications of the
4725 * granularity from the profile. In case the user
4726 * provided the sizes they are obtained via devlink.
4728 err
= devlink_resource_size_get(devlink
,
4729 MLXSW_SP_RESOURCE_KVD_LINEAR
,
4732 *p_linear_size
= profile
->kvd_linear_size
;
4734 err
= devlink_resource_size_get(devlink
,
4735 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
4738 double_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
4740 double_size
*= profile
->kvd_hash_double_parts
;
4741 double_size
/= profile
->kvd_hash_double_parts
+
4742 profile
->kvd_hash_single_parts
;
4743 *p_double_size
= rounddown(double_size
,
4744 MLXSW_SP_KVD_GRANULARITY
);
4747 err
= devlink_resource_size_get(devlink
,
4748 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
4751 *p_single_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
4752 *p_double_size
- *p_linear_size
;
4754 /* Check results are legal. */
4755 if (*p_single_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
4756 *p_double_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
) ||
4757 MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) < *p_linear_size
)
4764 mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink
*devlink
, u32 id
,
4765 union devlink_param_value val
,
4766 struct netlink_ext_ack
*extack
)
4768 if ((val
.vu8
!= DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER
) &&
4769 (val
.vu8
!= DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH
)) {
4770 NL_SET_ERR_MSG_MOD(extack
, "'fw_load_policy' must be 'driver' or 'flash'");
4777 static const struct devlink_param mlxsw_sp_devlink_params
[] = {
4778 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY
,
4779 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT
),
4781 mlxsw_sp_devlink_param_fw_load_policy_validate
),
4784 static int mlxsw_sp_params_register(struct mlxsw_core
*mlxsw_core
)
4786 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
4787 union devlink_param_value value
;
4790 err
= devlink_params_register(devlink
, mlxsw_sp_devlink_params
,
4791 ARRAY_SIZE(mlxsw_sp_devlink_params
));
4795 value
.vu8
= DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER
;
4796 devlink_param_driverinit_value_set(devlink
,
4797 DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY
,
4802 static void mlxsw_sp_params_unregister(struct mlxsw_core
*mlxsw_core
)
4804 devlink_params_unregister(priv_to_devlink(mlxsw_core
),
4805 mlxsw_sp_devlink_params
,
4806 ARRAY_SIZE(mlxsw_sp_devlink_params
));
4810 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink
*devlink
, u32 id
,
4811 struct devlink_param_gset_ctx
*ctx
)
4813 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
4814 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4816 ctx
->val
.vu32
= mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp
);
4821 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink
*devlink
, u32 id
,
4822 struct devlink_param_gset_ctx
*ctx
)
4824 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
4825 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
4827 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp
, ctx
->val
.vu32
);
4830 static const struct devlink_param mlxsw_sp2_devlink_params
[] = {
4831 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL
,
4832 "acl_region_rehash_interval",
4833 DEVLINK_PARAM_TYPE_U32
,
4834 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
4835 mlxsw_sp_params_acl_region_rehash_intrvl_get
,
4836 mlxsw_sp_params_acl_region_rehash_intrvl_set
,
4840 static int mlxsw_sp2_params_register(struct mlxsw_core
*mlxsw_core
)
4842 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
4843 union devlink_param_value value
;
4846 err
= mlxsw_sp_params_register(mlxsw_core
);
4850 err
= devlink_params_register(devlink
, mlxsw_sp2_devlink_params
,
4851 ARRAY_SIZE(mlxsw_sp2_devlink_params
));
4853 goto err_devlink_params_register
;
4856 devlink_param_driverinit_value_set(devlink
,
4857 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL
,
4861 err_devlink_params_register
:
4862 mlxsw_sp_params_unregister(mlxsw_core
);
4866 static void mlxsw_sp2_params_unregister(struct mlxsw_core
*mlxsw_core
)
4868 devlink_params_unregister(priv_to_devlink(mlxsw_core
),
4869 mlxsw_sp2_devlink_params
,
4870 ARRAY_SIZE(mlxsw_sp2_devlink_params
));
4871 mlxsw_sp_params_unregister(mlxsw_core
);
4874 static struct mlxsw_driver mlxsw_sp1_driver
= {
4875 .kind
= mlxsw_sp1_driver_name
,
4876 .priv_size
= sizeof(struct mlxsw_sp
),
4877 .init
= mlxsw_sp1_init
,
4878 .fini
= mlxsw_sp_fini
,
4879 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
4880 .port_split
= mlxsw_sp_port_split
,
4881 .port_unsplit
= mlxsw_sp_port_unsplit
,
4882 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
4883 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
4884 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
4885 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
4886 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
4887 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
4888 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
4889 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
4890 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
4891 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
4892 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
4893 .resources_register
= mlxsw_sp1_resources_register
,
4894 .kvd_sizes_get
= mlxsw_sp_kvd_sizes_get
,
4895 .params_register
= mlxsw_sp_params_register
,
4896 .params_unregister
= mlxsw_sp_params_unregister
,
4897 .txhdr_len
= MLXSW_TXHDR_LEN
,
4898 .profile
= &mlxsw_sp1_config_profile
,
4899 .res_query_enabled
= true,
4902 static struct mlxsw_driver mlxsw_sp2_driver
= {
4903 .kind
= mlxsw_sp2_driver_name
,
4904 .priv_size
= sizeof(struct mlxsw_sp
),
4905 .init
= mlxsw_sp2_init
,
4906 .fini
= mlxsw_sp_fini
,
4907 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
4908 .port_split
= mlxsw_sp_port_split
,
4909 .port_unsplit
= mlxsw_sp_port_unsplit
,
4910 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
4911 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
4912 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
4913 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
4914 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
4915 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
4916 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
4917 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
4918 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
4919 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
4920 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
4921 .resources_register
= mlxsw_sp2_resources_register
,
4922 .params_register
= mlxsw_sp2_params_register
,
4923 .params_unregister
= mlxsw_sp2_params_unregister
,
4924 .txhdr_len
= MLXSW_TXHDR_LEN
,
4925 .profile
= &mlxsw_sp2_config_profile
,
4926 .res_query_enabled
= true,
4929 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
4931 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
4934 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
4936 struct mlxsw_sp_port
**p_mlxsw_sp_port
= data
;
4939 if (mlxsw_sp_port_dev_check(lower_dev
)) {
4940 *p_mlxsw_sp_port
= netdev_priv(lower_dev
);
4947 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
4949 struct mlxsw_sp_port
*mlxsw_sp_port
;
4951 if (mlxsw_sp_port_dev_check(dev
))
4952 return netdev_priv(dev
);
4954 mlxsw_sp_port
= NULL
;
4955 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &mlxsw_sp_port
);
4957 return mlxsw_sp_port
;
4960 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
4962 struct mlxsw_sp_port
*mlxsw_sp_port
;
4964 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
4965 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
4968 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
4970 struct mlxsw_sp_port
*mlxsw_sp_port
;
4972 if (mlxsw_sp_port_dev_check(dev
))
4973 return netdev_priv(dev
);
4975 mlxsw_sp_port
= NULL
;
4976 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
4979 return mlxsw_sp_port
;
4982 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
4984 struct mlxsw_sp_port
*mlxsw_sp_port
;
4987 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
4989 dev_hold(mlxsw_sp_port
->dev
);
4991 return mlxsw_sp_port
;
4994 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
4996 dev_put(mlxsw_sp_port
->dev
);
5000 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port
*mlxsw_sp_port
,
5001 struct net_device
*lag_dev
)
5003 struct net_device
*br_dev
= netdev_master_upper_dev_get(lag_dev
);
5004 struct net_device
*upper_dev
;
5005 struct list_head
*iter
;
5007 if (netif_is_bridge_port(lag_dev
))
5008 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, lag_dev
, br_dev
);
5010 netdev_for_each_upper_dev_rcu(lag_dev
, upper_dev
, iter
) {
5011 if (!netif_is_bridge_port(upper_dev
))
5013 br_dev
= netdev_master_upper_dev_get(upper_dev
);
5014 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, upper_dev
, br_dev
);
5018 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
5020 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
5022 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
5023 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
5026 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
5028 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
5030 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
5031 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
5034 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
5035 u16 lag_id
, u8 port_index
)
5037 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5038 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
5040 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
5041 lag_id
, port_index
);
5042 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
5045 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
5048 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5049 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
5051 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
5053 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
5056 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
5059 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5060 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
5062 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
5064 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
5067 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
5070 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5071 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
5073 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
5075 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
5078 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
5079 struct net_device
*lag_dev
,
5082 struct mlxsw_sp_upper
*lag
;
5083 int free_lag_id
= -1;
5087 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
5088 for (i
= 0; i
< max_lag
; i
++) {
5089 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
5090 if (lag
->ref_count
) {
5091 if (lag
->dev
== lag_dev
) {
5095 } else if (free_lag_id
< 0) {
5099 if (free_lag_id
< 0)
5101 *p_lag_id
= free_lag_id
;
5106 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
5107 struct net_device
*lag_dev
,
5108 struct netdev_lag_upper_info
*lag_upper_info
,
5109 struct netlink_ext_ack
*extack
)
5113 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0) {
5114 NL_SET_ERR_MSG_MOD(extack
, "Exceeded number of supported LAG devices");
5117 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
) {
5118 NL_SET_ERR_MSG_MOD(extack
, "LAG device using unsupported Tx type");
5124 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
5125 u16 lag_id
, u8
*p_port_index
)
5127 u64 max_lag_members
;
5130 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
5132 for (i
= 0; i
< max_lag_members
; i
++) {
5133 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
5141 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
5142 struct net_device
*lag_dev
)
5144 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5145 struct mlxsw_sp_upper
*lag
;
5150 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
5153 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
5154 if (!lag
->ref_count
) {
5155 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
5161 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
5164 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
5166 goto err_col_port_add
;
5168 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
5169 mlxsw_sp_port
->local_port
);
5170 mlxsw_sp_port
->lag_id
= lag_id
;
5171 mlxsw_sp_port
->lagged
= 1;
5174 /* Port is no longer usable as a router interface */
5175 if (mlxsw_sp_port
->default_vlan
->fid
)
5176 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port
->default_vlan
);
5181 if (!lag
->ref_count
)
5182 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
5186 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
5187 struct net_device
*lag_dev
)
5189 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5190 u16 lag_id
= mlxsw_sp_port
->lag_id
;
5191 struct mlxsw_sp_upper
*lag
;
5193 if (!mlxsw_sp_port
->lagged
)
5195 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
5196 WARN_ON(lag
->ref_count
== 0);
5198 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
5200 /* Any VLANs configured on the port are no longer valid */
5201 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
, false);
5202 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port
->default_vlan
);
5203 /* Make the LAG and its directly linked uppers leave bridges they
5206 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port
, lag_dev
);
5208 if (lag
->ref_count
== 1)
5209 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
5211 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
5212 mlxsw_sp_port
->local_port
);
5213 mlxsw_sp_port
->lagged
= 0;
5216 /* Make sure untagged frames are allowed to ingress */
5217 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, MLXSW_SP_DEFAULT_VID
);
5220 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
5223 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5224 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
5226 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
5227 mlxsw_sp_port
->local_port
);
5228 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
5231 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
5234 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5235 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
5237 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
5238 mlxsw_sp_port
->local_port
);
5239 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
5243 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port
*mlxsw_sp_port
)
5247 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
,
5248 mlxsw_sp_port
->lag_id
);
5252 err
= mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
5254 goto err_dist_port_add
;
5259 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
5264 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port
*mlxsw_sp_port
)
5268 err
= mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
5269 mlxsw_sp_port
->lag_id
);
5273 err
= mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
,
5274 mlxsw_sp_port
->lag_id
);
5276 goto err_col_port_disable
;
5280 err_col_port_disable
:
5281 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
5285 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
5286 struct netdev_lag_lower_state_info
*info
)
5288 if (info
->tx_enabled
)
5289 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port
);
5291 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port
);
5294 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
5297 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5298 enum mlxsw_reg_spms_state spms_state
;
5303 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
5304 MLXSW_REG_SPMS_STATE_DISCARDING
;
5306 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
5309 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
5311 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
5312 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
5314 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
5319 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
5324 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
5327 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
5329 goto err_port_stp_set
;
5330 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 1, VLAN_N_VID
- 2,
5333 goto err_port_vlan_set
;
5335 for (; vid
<= VLAN_N_VID
- 1; vid
++) {
5336 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
5339 goto err_vid_learning_set
;
5344 err_vid_learning_set
:
5345 for (vid
--; vid
>= 1; vid
--)
5346 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
5348 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
5350 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
5354 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
5358 for (vid
= VLAN_N_VID
- 1; vid
>= 1; vid
--)
5359 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
5362 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 1, VLAN_N_VID
- 2,
5364 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
5365 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
5368 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device
*br_dev
)
5370 unsigned int num_vxlans
= 0;
5371 struct net_device
*dev
;
5372 struct list_head
*iter
;
5374 netdev_for_each_lower_dev(br_dev
, dev
, iter
) {
5375 if (netif_is_vxlan(dev
))
5379 return num_vxlans
> 1;
5382 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device
*br_dev
)
5384 DECLARE_BITMAP(vlans
, VLAN_N_VID
) = {0};
5385 struct net_device
*dev
;
5386 struct list_head
*iter
;
5388 netdev_for_each_lower_dev(br_dev
, dev
, iter
) {
5392 if (!netif_is_vxlan(dev
))
5395 err
= mlxsw_sp_vxlan_mapped_vid(dev
, &pvid
);
5399 if (test_and_set_bit(pvid
, vlans
))
5406 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device
*br_dev
,
5407 struct netlink_ext_ack
*extack
)
5409 if (br_multicast_enabled(br_dev
)) {
5410 NL_SET_ERR_MSG_MOD(extack
, "Multicast can not be enabled on a bridge with a VxLAN device");
5414 if (!br_vlan_enabled(br_dev
) &&
5415 mlxsw_sp_bridge_has_multiple_vxlans(br_dev
)) {
5416 NL_SET_ERR_MSG_MOD(extack
, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
5420 if (br_vlan_enabled(br_dev
) &&
5421 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev
)) {
5422 NL_SET_ERR_MSG_MOD(extack
, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
5429 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*lower_dev
,
5430 struct net_device
*dev
,
5431 unsigned long event
, void *ptr
)
5433 struct netdev_notifier_changeupper_info
*info
;
5434 struct mlxsw_sp_port
*mlxsw_sp_port
;
5435 struct netlink_ext_ack
*extack
;
5436 struct net_device
*upper_dev
;
5437 struct mlxsw_sp
*mlxsw_sp
;
5440 mlxsw_sp_port
= netdev_priv(dev
);
5441 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5443 extack
= netdev_notifier_info_to_extack(&info
->info
);
5446 case NETDEV_PRECHANGEUPPER
:
5447 upper_dev
= info
->upper_dev
;
5448 if (!is_vlan_dev(upper_dev
) &&
5449 !netif_is_lag_master(upper_dev
) &&
5450 !netif_is_bridge_master(upper_dev
) &&
5451 !netif_is_ovs_master(upper_dev
) &&
5452 !netif_is_macvlan(upper_dev
)) {
5453 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
5458 if (netif_is_bridge_master(upper_dev
) &&
5459 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
, upper_dev
) &&
5460 mlxsw_sp_bridge_has_vxlan(upper_dev
) &&
5461 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
5463 if (netdev_has_any_upper_dev(upper_dev
) &&
5464 (!netif_is_bridge_master(upper_dev
) ||
5465 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
5467 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a device that already has an upper device is not supported");
5470 if (netif_is_lag_master(upper_dev
) &&
5471 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
5472 info
->upper_info
, extack
))
5474 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
)) {
5475 NL_SET_ERR_MSG_MOD(extack
, "Master device is a LAG master and this device has a VLAN");
5478 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
5479 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
))) {
5480 NL_SET_ERR_MSG_MOD(extack
, "Can not put a VLAN on a LAG port");
5483 if (netif_is_macvlan(upper_dev
) &&
5484 !mlxsw_sp_rif_find_by_dev(mlxsw_sp
, lower_dev
)) {
5485 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
5488 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
)) {
5489 NL_SET_ERR_MSG_MOD(extack
, "Master device is an OVS master and this device has a VLAN");
5492 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
)) {
5493 NL_SET_ERR_MSG_MOD(extack
, "Can not put a VLAN on an OVS port");
5497 case NETDEV_CHANGEUPPER
:
5498 upper_dev
= info
->upper_dev
;
5499 if (netif_is_bridge_master(upper_dev
)) {
5501 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
5506 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
5509 } else if (netif_is_lag_master(upper_dev
)) {
5510 if (info
->linking
) {
5511 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
5514 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port
);
5515 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
5518 } else if (netif_is_ovs_master(upper_dev
)) {
5520 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
5522 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
5523 } else if (netif_is_macvlan(upper_dev
)) {
5525 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
5526 } else if (is_vlan_dev(upper_dev
)) {
5527 struct net_device
*br_dev
;
5529 if (!netif_is_bridge_port(upper_dev
))
5533 br_dev
= netdev_master_upper_dev_get(upper_dev
);
5534 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, upper_dev
,
5543 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
5544 unsigned long event
, void *ptr
)
5546 struct netdev_notifier_changelowerstate_info
*info
;
5547 struct mlxsw_sp_port
*mlxsw_sp_port
;
5550 mlxsw_sp_port
= netdev_priv(dev
);
5554 case NETDEV_CHANGELOWERSTATE
:
5555 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
5556 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
5557 info
->lower_state_info
);
5559 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
5567 static int mlxsw_sp_netdevice_port_event(struct net_device
*lower_dev
,
5568 struct net_device
*port_dev
,
5569 unsigned long event
, void *ptr
)
5572 case NETDEV_PRECHANGEUPPER
:
5573 case NETDEV_CHANGEUPPER
:
5574 return mlxsw_sp_netdevice_port_upper_event(lower_dev
, port_dev
,
5576 case NETDEV_CHANGELOWERSTATE
:
5577 return mlxsw_sp_netdevice_port_lower_event(port_dev
, event
,
5584 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
5585 unsigned long event
, void *ptr
)
5587 struct net_device
*dev
;
5588 struct list_head
*iter
;
5591 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
5592 if (mlxsw_sp_port_dev_check(dev
)) {
5593 ret
= mlxsw_sp_netdevice_port_event(lag_dev
, dev
, event
,
5603 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device
*vlan_dev
,
5604 struct net_device
*dev
,
5605 unsigned long event
, void *ptr
,
5608 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
5609 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
5610 struct netdev_notifier_changeupper_info
*info
= ptr
;
5611 struct netlink_ext_ack
*extack
;
5612 struct net_device
*upper_dev
;
5615 extack
= netdev_notifier_info_to_extack(&info
->info
);
5618 case NETDEV_PRECHANGEUPPER
:
5619 upper_dev
= info
->upper_dev
;
5620 if (!netif_is_bridge_master(upper_dev
) &&
5621 !netif_is_macvlan(upper_dev
)) {
5622 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
5627 if (netif_is_bridge_master(upper_dev
) &&
5628 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
, upper_dev
) &&
5629 mlxsw_sp_bridge_has_vxlan(upper_dev
) &&
5630 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
5632 if (netdev_has_any_upper_dev(upper_dev
) &&
5633 (!netif_is_bridge_master(upper_dev
) ||
5634 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
5636 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a device that already has an upper device is not supported");
5639 if (netif_is_macvlan(upper_dev
) &&
5640 !mlxsw_sp_rif_find_by_dev(mlxsw_sp
, vlan_dev
)) {
5641 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
5645 case NETDEV_CHANGEUPPER
:
5646 upper_dev
= info
->upper_dev
;
5647 if (netif_is_bridge_master(upper_dev
)) {
5649 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
5654 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
5657 } else if (netif_is_macvlan(upper_dev
)) {
5659 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
5670 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device
*vlan_dev
,
5671 struct net_device
*lag_dev
,
5672 unsigned long event
,
5675 struct net_device
*dev
;
5676 struct list_head
*iter
;
5679 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
5680 if (mlxsw_sp_port_dev_check(dev
)) {
5681 ret
= mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, dev
,
5692 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device
*vlan_dev
,
5693 struct net_device
*br_dev
,
5694 unsigned long event
, void *ptr
,
5697 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
5698 struct netdev_notifier_changeupper_info
*info
= ptr
;
5699 struct netlink_ext_ack
*extack
;
5700 struct net_device
*upper_dev
;
5705 extack
= netdev_notifier_info_to_extack(&info
->info
);
5708 case NETDEV_PRECHANGEUPPER
:
5709 upper_dev
= info
->upper_dev
;
5710 if (!netif_is_macvlan(upper_dev
)) {
5711 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
5716 if (netif_is_macvlan(upper_dev
) &&
5717 !mlxsw_sp_rif_find_by_dev(mlxsw_sp
, vlan_dev
)) {
5718 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
5722 case NETDEV_CHANGEUPPER
:
5723 upper_dev
= info
->upper_dev
;
5726 if (netif_is_macvlan(upper_dev
))
5727 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
5734 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
5735 unsigned long event
, void *ptr
)
5737 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
5738 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
5740 if (mlxsw_sp_port_dev_check(real_dev
))
5741 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, real_dev
,
5743 else if (netif_is_lag_master(real_dev
))
5744 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev
,
5747 else if (netif_is_bridge_master(real_dev
))
5748 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev
, real_dev
,
5754 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
5755 unsigned long event
, void *ptr
)
5757 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
5758 struct netdev_notifier_changeupper_info
*info
= ptr
;
5759 struct netlink_ext_ack
*extack
;
5760 struct net_device
*upper_dev
;
5765 extack
= netdev_notifier_info_to_extack(&info
->info
);
5768 case NETDEV_PRECHANGEUPPER
:
5769 upper_dev
= info
->upper_dev
;
5770 if (!is_vlan_dev(upper_dev
) && !netif_is_macvlan(upper_dev
)) {
5771 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
5776 if (netif_is_macvlan(upper_dev
) &&
5777 !mlxsw_sp_rif_find_by_dev(mlxsw_sp
, br_dev
)) {
5778 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
5782 case NETDEV_CHANGEUPPER
:
5783 upper_dev
= info
->upper_dev
;
5786 if (is_vlan_dev(upper_dev
))
5787 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp
, upper_dev
);
5788 if (netif_is_macvlan(upper_dev
))
5789 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
5796 static int mlxsw_sp_netdevice_macvlan_event(struct net_device
*macvlan_dev
,
5797 unsigned long event
, void *ptr
)
5799 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(macvlan_dev
);
5800 struct netdev_notifier_changeupper_info
*info
= ptr
;
5801 struct netlink_ext_ack
*extack
;
5803 if (!mlxsw_sp
|| event
!= NETDEV_PRECHANGEUPPER
)
5806 extack
= netdev_notifier_info_to_extack(&info
->info
);
5808 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
5809 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
5814 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
5816 struct netdev_notifier_changeupper_info
*info
= ptr
;
5818 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
5820 return netif_is_l3_master(info
->upper_dev
);
5823 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp
*mlxsw_sp
,
5824 struct net_device
*dev
,
5825 unsigned long event
, void *ptr
)
5827 struct netdev_notifier_changeupper_info
*cu_info
;
5828 struct netdev_notifier_info
*info
= ptr
;
5829 struct netlink_ext_ack
*extack
;
5830 struct net_device
*upper_dev
;
5832 extack
= netdev_notifier_info_to_extack(info
);
5835 case NETDEV_CHANGEUPPER
:
5836 cu_info
= container_of(info
,
5837 struct netdev_notifier_changeupper_info
,
5839 upper_dev
= cu_info
->upper_dev
;
5840 if (!netif_is_bridge_master(upper_dev
))
5842 if (!mlxsw_sp_lower_get(upper_dev
))
5844 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
5846 if (cu_info
->linking
) {
5847 if (!netif_running(dev
))
5849 /* When the bridge is VLAN-aware, the VNI of the VxLAN
5850 * device needs to be mapped to a VLAN, but at this
5851 * point no VLANs are configured on the VxLAN device
5853 if (br_vlan_enabled(upper_dev
))
5855 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp
, upper_dev
,
5858 /* VLANs were already flushed, which triggered the
5861 if (br_vlan_enabled(upper_dev
))
5863 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp
, dev
);
5867 upper_dev
= netdev_master_upper_dev_get(dev
);
5870 if (!netif_is_bridge_master(upper_dev
))
5872 if (!mlxsw_sp_lower_get(upper_dev
))
5874 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp
, upper_dev
, dev
, 0,
5877 upper_dev
= netdev_master_upper_dev_get(dev
);
5880 if (!netif_is_bridge_master(upper_dev
))
5882 if (!mlxsw_sp_lower_get(upper_dev
))
5884 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp
, dev
);
5891 static int mlxsw_sp_netdevice_event(struct notifier_block
*nb
,
5892 unsigned long event
, void *ptr
)
5894 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
5895 struct mlxsw_sp_span_entry
*span_entry
;
5896 struct mlxsw_sp
*mlxsw_sp
;
5899 mlxsw_sp
= container_of(nb
, struct mlxsw_sp
, netdevice_nb
);
5900 if (event
== NETDEV_UNREGISTER
) {
5901 span_entry
= mlxsw_sp_span_entry_find_by_port(mlxsw_sp
, dev
);
5903 mlxsw_sp_span_entry_invalidate(mlxsw_sp
, span_entry
);
5905 mlxsw_sp_span_respin(mlxsw_sp
);
5907 if (netif_is_vxlan(dev
))
5908 err
= mlxsw_sp_netdevice_vxlan_event(mlxsw_sp
, dev
, event
, ptr
);
5909 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp
, dev
))
5910 err
= mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp
, dev
,
5912 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp
, dev
))
5913 err
= mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp
, dev
,
5915 else if (event
== NETDEV_PRE_CHANGEADDR
||
5916 event
== NETDEV_CHANGEADDR
||
5917 event
== NETDEV_CHANGEMTU
)
5918 err
= mlxsw_sp_netdevice_router_port_event(dev
, event
, ptr
);
5919 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
5920 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
5921 else if (mlxsw_sp_port_dev_check(dev
))
5922 err
= mlxsw_sp_netdevice_port_event(dev
, dev
, event
, ptr
);
5923 else if (netif_is_lag_master(dev
))
5924 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
5925 else if (is_vlan_dev(dev
))
5926 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
5927 else if (netif_is_bridge_master(dev
))
5928 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
5929 else if (netif_is_macvlan(dev
))
5930 err
= mlxsw_sp_netdevice_macvlan_event(dev
, event
, ptr
);
5932 return notifier_from_errno(err
);
5935 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly
= {
5936 .notifier_call
= mlxsw_sp_inetaddr_valid_event
,
5939 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly
= {
5940 .notifier_call
= mlxsw_sp_inet6addr_valid_event
,
5943 static const struct pci_device_id mlxsw_sp1_pci_id_table
[] = {
5944 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
5948 static struct pci_driver mlxsw_sp1_pci_driver
= {
5949 .name
= mlxsw_sp1_driver_name
,
5950 .id_table
= mlxsw_sp1_pci_id_table
,
5953 static const struct pci_device_id mlxsw_sp2_pci_id_table
[] = {
5954 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM2
), 0},
5958 static struct pci_driver mlxsw_sp2_pci_driver
= {
5959 .name
= mlxsw_sp2_driver_name
,
5960 .id_table
= mlxsw_sp2_pci_id_table
,
5963 static int __init
mlxsw_sp_module_init(void)
5967 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
5968 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
5970 err
= mlxsw_core_driver_register(&mlxsw_sp1_driver
);
5972 goto err_sp1_core_driver_register
;
5974 err
= mlxsw_core_driver_register(&mlxsw_sp2_driver
);
5976 goto err_sp2_core_driver_register
;
5978 err
= mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver
);
5980 goto err_sp1_pci_driver_register
;
5982 err
= mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver
);
5984 goto err_sp2_pci_driver_register
;
5988 err_sp2_pci_driver_register
:
5989 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver
);
5990 err_sp1_pci_driver_register
:
5991 mlxsw_core_driver_unregister(&mlxsw_sp2_driver
);
5992 err_sp2_core_driver_register
:
5993 mlxsw_core_driver_unregister(&mlxsw_sp1_driver
);
5994 err_sp1_core_driver_register
:
5995 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
5996 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
6000 static void __exit
mlxsw_sp_module_exit(void)
6002 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver
);
6003 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver
);
6004 mlxsw_core_driver_unregister(&mlxsw_sp2_driver
);
6005 mlxsw_core_driver_unregister(&mlxsw_sp1_driver
);
6006 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
6007 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
6010 module_init(mlxsw_sp_module_init
);
6011 module_exit(mlxsw_sp_module_exit
);
6013 MODULE_LICENSE("Dual BSD/GPL");
6014 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
6015 MODULE_DESCRIPTION("Mellanox Spectrum driver");
6016 MODULE_DEVICE_TABLE(pci
, mlxsw_sp1_pci_id_table
);
6017 MODULE_DEVICE_TABLE(pci
, mlxsw_sp2_pci_id_table
);
6018 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME
);