1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 2008
48 #define MLXSW_SP1_FWREV_SUBMINOR 1310
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev
= {
52 .major
= MLXSW_SP1_FWREV_MAJOR
,
53 .minor
= MLXSW_SP1_FWREV_MINOR
,
54 .subminor
= MLXSW_SP1_FWREV_SUBMINOR
,
55 .can_reset_minor
= MLXSW_SP1_FWREV_CAN_RESET_MINOR
,
58 #define MLXSW_SP1_FW_FILENAME \
59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
63 #define MLXSW_SP2_FWREV_MAJOR 29
64 #define MLXSW_SP2_FWREV_MINOR 2008
65 #define MLXSW_SP2_FWREV_SUBMINOR 1310
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev
= {
68 .major
= MLXSW_SP2_FWREV_MAJOR
,
69 .minor
= MLXSW_SP2_FWREV_MINOR
,
70 .subminor
= MLXSW_SP2_FWREV_SUBMINOR
,
73 #define MLXSW_SP2_FW_FILENAME \
74 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
78 #define MLXSW_SP3_FWREV_MAJOR 30
79 #define MLXSW_SP3_FWREV_MINOR 2008
80 #define MLXSW_SP3_FWREV_SUBMINOR 1310
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev
= {
83 .major
= MLXSW_SP3_FWREV_MAJOR
,
84 .minor
= MLXSW_SP3_FWREV_MINOR
,
85 .subminor
= MLXSW_SP3_FWREV_SUBMINOR
,
88 #define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
93 static const char mlxsw_sp1_driver_name
[] = "mlxsw_spectrum";
94 static const char mlxsw_sp2_driver_name
[] = "mlxsw_spectrum2";
95 static const char mlxsw_sp3_driver_name
[] = "mlxsw_spectrum3";
97 static const unsigned char mlxsw_sp1_mac_mask
[ETH_ALEN
] = {
98 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
100 static const unsigned char mlxsw_sp2_mac_mask
[ETH_ALEN
] = {
101 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
111 * Packet control type.
112 * 0 - Ethernet control (e.g. EMADs, LACP)
115 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
118 * Packet protocol type. Must be set to 1 (Ethernet).
120 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
122 /* tx_hdr_rx_is_router
123 * Packet is sent from the router. Valid for data packets only.
125 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
128 * Indicates if the 'fid' field is valid and should be used for
129 * forwarding lookup. Valid for data packets only.
131 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
134 * Switch partition ID. Must be set to 0.
136 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
138 /* tx_hdr_control_tclass
139 * Indicates if the packet should use the control TClass and not one
140 * of the data TClasses.
142 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
145 * Egress TClass to be used on the egress device on the egress port.
147 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
150 * Destination local port for unicast packets.
151 * Destination multicast ID for multicast packets.
153 * Control packets are directed to a specific egress port, while data
154 * packets are transmitted through the CPU port (0) into the switch partition,
155 * where forwarding rules are applied.
157 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
160 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162 * Valid for data packets only.
164 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
168 * 6 - Control packets
170 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
173 unsigned int counter_index
, u64
*packets
,
176 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
179 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
180 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
181 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
185 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
187 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
192 unsigned int counter_index
)
194 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
196 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
197 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES
);
198 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
202 unsigned int *p_counter_index
)
206 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
210 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
212 goto err_counter_clear
;
216 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
222 unsigned int counter_index
)
224 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
228 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
229 const struct mlxsw_tx_info
*tx_info
)
231 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
233 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
235 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
236 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
237 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
238 mlxsw_tx_hdr_swid_set(txhdr
, 0);
239 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
240 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
241 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
244 enum mlxsw_reg_spms_state
mlxsw_sp_stp_spms_state(u8 state
)
247 case BR_STATE_FORWARDING
:
248 return MLXSW_REG_SPMS_STATE_FORWARDING
;
249 case BR_STATE_LEARNING
:
250 return MLXSW_REG_SPMS_STATE_LEARNING
;
251 case BR_STATE_LISTENING
:
252 case BR_STATE_DISABLED
:
253 case BR_STATE_BLOCKING
:
254 return MLXSW_REG_SPMS_STATE_DISCARDING
;
260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
263 enum mlxsw_reg_spms_state spms_state
= mlxsw_sp_stp_spms_state(state
);
264 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
268 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
271 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
272 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
274 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
281 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
284 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
287 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
294 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
295 char paos_pl
[MLXSW_REG_PAOS_LEN
];
297 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
298 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
299 MLXSW_PORT_ADMIN_STATUS_DOWN
);
300 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
306 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
307 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
309 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
310 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
311 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
316 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
317 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
319 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
320 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
321 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port
*mlxsw_sp_port
, int *p_max_mtu
)
326 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
327 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
330 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
331 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
335 *p_max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
341 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
342 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
344 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
345 if (mtu
> mlxsw_sp_port
->max_mtu
)
348 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
349 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
354 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
355 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
357 mlxsw_reg_pspa_pack(pspa_pl
, swid
, mlxsw_sp_port
->local_port
);
358 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
, bool enable
)
363 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
364 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
366 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
367 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
373 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
377 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
380 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
382 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
387 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
390 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
391 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
393 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
394 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
397 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
400 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
401 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
403 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
404 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
407 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
412 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
416 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
419 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, true);
421 goto err_port_allow_untagged_set
;
424 mlxsw_sp_port
->pvid
= vid
;
427 err_port_allow_untagged_set
:
428 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
433 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
435 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
436 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
438 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
439 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
443 mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
444 struct mlxsw_sp_port_mapping
*port_mapping
)
446 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
453 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
454 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
457 module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
458 width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
459 separate_rxtx
= mlxsw_reg_pmlp_rxtx_get(pmlp_pl
);
461 if (width
&& !is_power_of_2(width
)) {
462 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: width value is not power of 2\n",
467 for (i
= 0; i
< width
; i
++) {
468 if (mlxsw_reg_pmlp_module_get(pmlp_pl
, i
) != module
) {
469 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: contains multiple modules\n",
474 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, i
) !=
475 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl
, i
)) {
476 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
480 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, i
) != i
) {
481 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
487 port_mapping
->module
= module
;
488 port_mapping
->width
= width
;
489 port_mapping
->lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
493 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port
*mlxsw_sp_port
)
495 struct mlxsw_sp_port_mapping
*port_mapping
= &mlxsw_sp_port
->mapping
;
496 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
497 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
500 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
501 mlxsw_reg_pmlp_width_set(pmlp_pl
, port_mapping
->width
);
502 for (i
= 0; i
< port_mapping
->width
; i
++) {
503 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, port_mapping
->module
);
504 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, port_mapping
->lane
+ i
); /* Rx & Tx */
507 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
510 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
)
512 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
513 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
515 mlxsw_reg_pmlp_pack(pmlp_pl
, mlxsw_sp_port
->local_port
);
516 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
517 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
520 static int mlxsw_sp_port_open(struct net_device
*dev
)
522 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
525 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
528 netif_start_queue(dev
);
532 static int mlxsw_sp_port_stop(struct net_device
*dev
)
534 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
536 netif_stop_queue(dev
);
537 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
540 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
541 struct net_device
*dev
)
543 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
544 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
545 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
546 const struct mlxsw_tx_info tx_info
= {
547 .local_port
= mlxsw_sp_port
->local_port
,
553 if (skb_cow_head(skb
, MLXSW_TXHDR_LEN
)) {
554 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
555 dev_kfree_skb_any(skb
);
559 memset(skb
->cb
, 0, sizeof(struct mlxsw_skb_cb
));
561 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
562 return NETDEV_TX_BUSY
;
564 if (eth_skb_pad(skb
)) {
565 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
569 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
570 /* TX header is consumed by HW on the way so we shouldn't count its
571 * bytes as being sent.
573 len
= skb
->len
- MLXSW_TXHDR_LEN
;
575 /* Due to a race we might fail here because of a full queue. In that
576 * unlikely case we simply drop the packet.
578 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
581 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
582 u64_stats_update_begin(&pcpu_stats
->syncp
);
583 pcpu_stats
->tx_packets
++;
584 pcpu_stats
->tx_bytes
+= len
;
585 u64_stats_update_end(&pcpu_stats
->syncp
);
587 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
588 dev_kfree_skb_any(skb
);
593 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
597 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
599 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
600 struct sockaddr
*addr
= p
;
603 if (!is_valid_ether_addr(addr
->sa_data
))
604 return -EADDRNOTAVAIL
;
606 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
609 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
613 static u16
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp
*mlxsw_sp
,
616 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
);
619 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int index
, u16 size
, u16 thres
,
623 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, index
, size
);
625 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, index
, size
,
629 static u16
mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp
*mlxsw_sp
,
630 const struct mlxsw_sp_hdroom
*hdroom
)
634 delay_cells
= mlxsw_sp_bytes_cells(mlxsw_sp
, hdroom
->delay_bytes
);
636 /* In the worst case scenario the delay will be made up of packets that
637 * are all of size CELL_SIZE + 1, which means each packet will require
638 * almost twice its true size when buffered in the switch. We therefore
639 * multiply this value by the "cell factor", which is close to 2.
641 * Another MTU is added in case the transmitting host already started
642 * transmitting a maximum length frame when the PFC packet was received.
644 return 2 * delay_cells
+ mlxsw_sp_bytes_cells(mlxsw_sp
, hdroom
->mtu
);
647 static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom
*hdroom
, int buf
)
651 for (prio
= 0; prio
< IEEE_8021QAZ_MAX_TCS
; prio
++) {
652 if (hdroom
->prios
.prio
[prio
].buf_idx
== buf
)
658 void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port
*mlxsw_sp_port
,
659 struct mlxsw_sp_hdroom
*hdroom
)
661 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
664 for (i
= 0; i
< DCBX_MAX_BUFFERS
; i
++) {
665 struct mlxsw_sp_hdroom_buf
*buf
= &hdroom
->bufs
.buf
[i
];
669 if (!mlxsw_sp_hdroom_buf_is_used(hdroom
, i
)) {
672 } else if (buf
->lossy
) {
673 thres_cells
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, hdroom
->mtu
);
676 thres_cells
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, hdroom
->mtu
);
677 delay_cells
= mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp
, hdroom
);
680 thres_cells
= mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port
, thres_cells
);
681 delay_cells
= mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port
, delay_cells
);
683 buf
->thres_cells
= thres_cells
;
684 buf
->size_cells
= thres_cells
+ delay_cells
;
688 static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port
*mlxsw_sp_port
,
689 const struct mlxsw_sp_hdroom
*hdroom
, bool force
)
691 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
692 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
697 dirty
= memcmp(&mlxsw_sp_port
->hdroom
->bufs
, &hdroom
->bufs
, sizeof(hdroom
->bufs
));
698 if (!dirty
&& !force
)
701 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
702 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
706 for (i
= 0; i
< DCBX_MAX_BUFFERS
; i
++) {
707 const struct mlxsw_sp_hdroom_buf
*buf
= &hdroom
->bufs
.buf
[i
];
709 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, buf
->size_cells
, buf
->thres_cells
, buf
->lossy
);
712 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX
, 0);
713 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
717 mlxsw_sp_port
->hdroom
->bufs
= hdroom
->bufs
;
721 static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp
*mlxsw_sp
,
722 const struct mlxsw_sp_hdroom
*hdroom
)
724 u32 taken_headroom_cells
= 0;
725 u32 max_headroom_cells
;
728 for (i
= 0; i
< MLXSW_SP_PB_COUNT
; i
++)
729 taken_headroom_cells
+= hdroom
->bufs
.buf
[i
].size_cells
;
731 max_headroom_cells
= mlxsw_sp_sb_max_headroom_cells(mlxsw_sp
);
732 return taken_headroom_cells
<= max_headroom_cells
;
735 static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port
*mlxsw_sp_port
,
736 const struct mlxsw_sp_hdroom
*hdroom
, bool force
)
740 if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port
->mlxsw_sp
, hdroom
))
743 err
= mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port
, hdroom
, false);
747 *mlxsw_sp_port
->hdroom
= *hdroom
;
751 int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port
*mlxsw_sp_port
,
752 const struct mlxsw_sp_hdroom
*hdroom
)
754 return __mlxsw_sp_hdroom_configure(mlxsw_sp_port
, hdroom
, false);
757 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
759 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
760 struct mlxsw_sp_hdroom orig_hdroom
;
761 struct mlxsw_sp_hdroom hdroom
;
764 orig_hdroom
= *mlxsw_sp_port
->hdroom
;
766 hdroom
= orig_hdroom
;
768 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port
, &hdroom
);
770 err
= mlxsw_sp_hdroom_configure(mlxsw_sp_port
, &hdroom
);
772 netdev_err(dev
, "Failed to configure port's headroom\n");
776 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
778 goto err_port_mtu_set
;
783 mlxsw_sp_hdroom_configure(mlxsw_sp_port
, &orig_hdroom
);
788 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
789 struct rtnl_link_stats64
*stats
)
791 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
792 struct mlxsw_sp_port_pcpu_stats
*p
;
793 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
798 for_each_possible_cpu(i
) {
799 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
801 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
802 rx_packets
= p
->rx_packets
;
803 rx_bytes
= p
->rx_bytes
;
804 tx_packets
= p
->tx_packets
;
805 tx_bytes
= p
->tx_bytes
;
806 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
808 stats
->rx_packets
+= rx_packets
;
809 stats
->rx_bytes
+= rx_bytes
;
810 stats
->tx_packets
+= tx_packets
;
811 stats
->tx_bytes
+= tx_bytes
;
812 /* tx_dropped is u32, updated without syncp protection. */
813 tx_dropped
+= p
->tx_dropped
;
815 stats
->tx_dropped
= tx_dropped
;
819 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
822 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
829 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
833 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
834 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
840 int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
841 int prio
, char *ppcnt_pl
)
843 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
844 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
846 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
847 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
850 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
851 struct rtnl_link_stats64
*stats
)
853 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
856 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
862 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
864 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
866 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
868 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
870 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
872 stats
->rx_crc_errors
=
873 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
874 stats
->rx_frame_errors
=
875 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
877 stats
->rx_length_errors
= (
878 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
879 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
880 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
882 stats
->rx_errors
= (stats
->rx_crc_errors
+
883 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
890 mlxsw_sp_port_get_hw_xstats(struct net_device
*dev
,
891 struct mlxsw_sp_port_xstats
*xstats
)
893 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
896 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_EXT_CNT
, 0,
899 xstats
->ecn
= mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl
);
901 for (i
= 0; i
< TC_MAX_QUEUE
; i
++) {
902 err
= mlxsw_sp_port_get_stats_raw(dev
,
903 MLXSW_REG_PPCNT_TC_CONG_TC
,
906 xstats
->wred_drop
[i
] =
907 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl
);
909 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_TC_CNT
,
915 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
916 xstats
->tail_drop
[i
] =
917 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl
);
920 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
921 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_PRIO_CNT
,
926 xstats
->tx_packets
[i
] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl
);
927 xstats
->tx_bytes
[i
] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl
);
931 static void update_stats_cache(struct work_struct
*work
)
933 struct mlxsw_sp_port
*mlxsw_sp_port
=
934 container_of(work
, struct mlxsw_sp_port
,
935 periodic_hw_stats
.update_dw
.work
);
937 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
938 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
939 * necessary when port goes down.
943 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
944 &mlxsw_sp_port
->periodic_hw_stats
.stats
);
945 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port
->dev
,
946 &mlxsw_sp_port
->periodic_hw_stats
.xstats
);
949 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
950 MLXSW_HW_STATS_UPDATE_TIME
);
953 /* Return the stats from a cache that is updated periodically,
954 * as this function might get called in an atomic context.
957 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
958 struct rtnl_link_stats64
*stats
)
960 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
962 memcpy(stats
, &mlxsw_sp_port
->periodic_hw_stats
.stats
, sizeof(*stats
));
965 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
966 u16 vid_begin
, u16 vid_end
,
967 bool is_member
, bool untagged
)
969 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
973 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
977 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
978 vid_end
, is_member
, untagged
);
979 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
984 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
985 u16 vid_end
, bool is_member
, bool untagged
)
990 for (vid
= vid_begin
; vid
<= vid_end
;
991 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
992 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
995 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
996 is_member
, untagged
);
1004 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
1007 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
, *tmp
;
1009 list_for_each_entry_safe(mlxsw_sp_port_vlan
, tmp
,
1010 &mlxsw_sp_port
->vlans_list
, list
) {
1011 if (!flush_default
&&
1012 mlxsw_sp_port_vlan
->vid
== MLXSW_SP_DEFAULT_VID
)
1014 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1019 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1021 if (mlxsw_sp_port_vlan
->bridge_port
)
1022 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
1023 else if (mlxsw_sp_port_vlan
->fid
)
1024 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
1027 struct mlxsw_sp_port_vlan
*
1028 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1030 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1031 bool untagged
= vid
== MLXSW_SP_DEFAULT_VID
;
1034 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1035 if (mlxsw_sp_port_vlan
)
1036 return ERR_PTR(-EEXIST
);
1038 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true, untagged
);
1040 return ERR_PTR(err
);
1042 mlxsw_sp_port_vlan
= kzalloc(sizeof(*mlxsw_sp_port_vlan
), GFP_KERNEL
);
1043 if (!mlxsw_sp_port_vlan
) {
1045 goto err_port_vlan_alloc
;
1048 mlxsw_sp_port_vlan
->mlxsw_sp_port
= mlxsw_sp_port
;
1049 mlxsw_sp_port_vlan
->vid
= vid
;
1050 list_add(&mlxsw_sp_port_vlan
->list
, &mlxsw_sp_port
->vlans_list
);
1052 return mlxsw_sp_port_vlan
;
1054 err_port_vlan_alloc
:
1055 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1056 return ERR_PTR(err
);
1059 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1061 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
1062 u16 vid
= mlxsw_sp_port_vlan
->vid
;
1064 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan
);
1065 list_del(&mlxsw_sp_port_vlan
->list
);
1066 kfree(mlxsw_sp_port_vlan
);
1067 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1070 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1071 __be16 __always_unused proto
, u16 vid
)
1073 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1075 /* VLAN 0 is added to HW filter when device goes up, but it is
1076 * reserved in our case, so simply return.
1081 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port
, vid
));
1084 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1085 __be16 __always_unused proto
, u16 vid
)
1087 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1088 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1090 /* VLAN 0 is removed from HW filter when device goes down, but
1091 * it is reserved in our case, so simply return.
1096 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1097 if (!mlxsw_sp_port_vlan
)
1099 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1104 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port
*mlxsw_sp_port
,
1105 struct flow_block_offload
*f
)
1107 switch (f
->binder_type
) {
1108 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
:
1109 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port
, f
, true);
1110 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS
:
1111 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port
, f
, false);
1112 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP
:
1113 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port
, f
);
1119 static int mlxsw_sp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1122 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1125 case TC_SETUP_BLOCK
:
1126 return mlxsw_sp_setup_tc_block(mlxsw_sp_port
, type_data
);
1127 case TC_SETUP_QDISC_RED
:
1128 return mlxsw_sp_setup_tc_red(mlxsw_sp_port
, type_data
);
1129 case TC_SETUP_QDISC_PRIO
:
1130 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port
, type_data
);
1131 case TC_SETUP_QDISC_ETS
:
1132 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port
, type_data
);
1133 case TC_SETUP_QDISC_TBF
:
1134 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port
, type_data
);
1135 case TC_SETUP_QDISC_FIFO
:
1136 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port
, type_data
);
1142 static int mlxsw_sp_feature_hw_tc(struct net_device
*dev
, bool enable
)
1144 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1147 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port
->ing_flow_block
) ||
1148 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port
->eg_flow_block
)) {
1149 netdev_err(dev
, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1152 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port
->ing_flow_block
);
1153 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port
->eg_flow_block
);
1155 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port
->ing_flow_block
);
1156 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port
->eg_flow_block
);
1161 static int mlxsw_sp_feature_loopback(struct net_device
*dev
, bool enable
)
1163 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1164 char pplr_pl
[MLXSW_REG_PPLR_LEN
];
1167 if (netif_running(dev
))
1168 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1170 mlxsw_reg_pplr_pack(pplr_pl
, mlxsw_sp_port
->local_port
, enable
);
1171 err
= mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pplr
),
1174 if (netif_running(dev
))
1175 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1180 typedef int (*mlxsw_sp_feature_handler
)(struct net_device
*dev
, bool enable
);
1182 static int mlxsw_sp_handle_feature(struct net_device
*dev
,
1183 netdev_features_t wanted_features
,
1184 netdev_features_t feature
,
1185 mlxsw_sp_feature_handler feature_handler
)
1187 netdev_features_t changes
= wanted_features
^ dev
->features
;
1188 bool enable
= !!(wanted_features
& feature
);
1191 if (!(changes
& feature
))
1194 err
= feature_handler(dev
, enable
);
1196 netdev_err(dev
, "%s feature %pNF failed, err %d\n",
1197 enable
? "Enable" : "Disable", &feature
, err
);
1202 dev
->features
|= feature
;
1204 dev
->features
&= ~feature
;
1208 static int mlxsw_sp_set_features(struct net_device
*dev
,
1209 netdev_features_t features
)
1211 netdev_features_t oper_features
= dev
->features
;
1214 err
|= mlxsw_sp_handle_feature(dev
, features
, NETIF_F_HW_TC
,
1215 mlxsw_sp_feature_hw_tc
);
1216 err
|= mlxsw_sp_handle_feature(dev
, features
, NETIF_F_LOOPBACK
,
1217 mlxsw_sp_feature_loopback
);
1220 dev
->features
= oper_features
;
1227 static struct devlink_port
*
1228 mlxsw_sp_port_get_devlink_port(struct net_device
*dev
)
1230 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1231 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1233 return mlxsw_core_port_devlink_port_get(mlxsw_sp
->core
,
1234 mlxsw_sp_port
->local_port
);
1237 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1240 struct hwtstamp_config config
;
1243 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
1246 err
= mlxsw_sp_port
->mlxsw_sp
->ptp_ops
->hwtstamp_set(mlxsw_sp_port
,
1251 if (copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)))
1257 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
1260 struct hwtstamp_config config
;
1263 err
= mlxsw_sp_port
->mlxsw_sp
->ptp_ops
->hwtstamp_get(mlxsw_sp_port
,
1268 if (copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)))
1274 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port
*mlxsw_sp_port
)
1276 struct hwtstamp_config config
= {0};
1278 mlxsw_sp_port
->mlxsw_sp
->ptp_ops
->hwtstamp_set(mlxsw_sp_port
, &config
);
1282 mlxsw_sp_port_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1284 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1288 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port
, ifr
);
1290 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port
, ifr
);
1296 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1297 .ndo_open
= mlxsw_sp_port_open
,
1298 .ndo_stop
= mlxsw_sp_port_stop
,
1299 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1300 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1301 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1302 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1303 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1304 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1305 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1306 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1307 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1308 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1309 .ndo_set_features
= mlxsw_sp_set_features
,
1310 .ndo_get_devlink_port
= mlxsw_sp_port_get_devlink_port
,
1311 .ndo_do_ioctl
= mlxsw_sp_port_ioctl
,
1315 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
1317 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1318 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
;
1319 const struct mlxsw_sp_port_type_speed_ops
*ops
;
1320 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1323 ops
= mlxsw_sp
->port_type_speed_ops
;
1325 /* Set advertised speeds to supported speeds. */
1326 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
1328 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1332 ops
->reg_ptys_eth_unpack(mlxsw_sp
, ptys_pl
, ð_proto_cap
,
1333 ð_proto_admin
, ð_proto_oper
);
1334 ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
, mlxsw_sp_port
->local_port
,
1335 eth_proto_cap
, mlxsw_sp_port
->link
.autoneg
);
1336 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1339 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port
*mlxsw_sp_port
, u32
*speed
)
1341 const struct mlxsw_sp_port_type_speed_ops
*port_type_speed_ops
;
1342 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1343 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1347 port_type_speed_ops
= mlxsw_sp
->port_type_speed_ops
;
1348 port_type_speed_ops
->reg_ptys_eth_pack(mlxsw_sp
, ptys_pl
,
1349 mlxsw_sp_port
->local_port
, 0,
1351 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1354 port_type_speed_ops
->reg_ptys_eth_unpack(mlxsw_sp
, ptys_pl
, NULL
, NULL
,
1356 *speed
= port_type_speed_ops
->from_ptys_speed(mlxsw_sp
, eth_proto_oper
);
1360 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1361 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
1362 bool dwrr
, u8 dwrr_weight
)
1364 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1365 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1367 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1369 mlxsw_reg_qeec_de_set(qeec_pl
, true);
1370 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
1371 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
1372 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1375 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1376 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1377 u8 next_index
, u32 maxrate
, u8 burst_size
)
1379 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1380 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1382 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1384 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
1385 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
1386 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl
, burst_size
);
1387 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1390 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1391 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1392 u8 next_index
, u32 minrate
)
1394 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1395 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1397 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1399 mlxsw_reg_qeec_mise_set(qeec_pl
, true);
1400 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl
, minrate
);
1402 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1405 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1406 u8 switch_prio
, u8 tclass
)
1408 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1409 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
1411 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
1413 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
1416 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1420 /* Setup the elements hierarcy, so that each TC is linked to
1421 * one subgroup, which are all member in the same group.
1423 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1424 MLXSW_REG_QEEC_HR_GROUP
, 0, 0, false, 0);
1427 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1428 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1429 MLXSW_REG_QEEC_HR_SUBGROUP
, i
,
1434 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1435 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1436 MLXSW_REG_QEEC_HR_TC
, i
, i
,
1441 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1442 MLXSW_REG_QEEC_HR_TC
,
1449 /* Make sure the max shaper is disabled in all hierarchies that support
1450 * it. Note that this disables ptps (PTP shaper), but that is intended
1451 * for the initial configuration.
1453 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1454 MLXSW_REG_QEEC_HR_PORT
, 0, 0,
1455 MLXSW_REG_QEEC_MAS_DIS
, 0);
1458 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1459 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1460 MLXSW_REG_QEEC_HR_SUBGROUP
,
1462 MLXSW_REG_QEEC_MAS_DIS
, 0);
1466 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1467 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1468 MLXSW_REG_QEEC_HR_TC
,
1470 MLXSW_REG_QEEC_MAS_DIS
, 0);
1474 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1475 MLXSW_REG_QEEC_HR_TC
,
1477 MLXSW_REG_QEEC_MAS_DIS
, 0);
1482 /* Configure the min shaper for multicast TCs. */
1483 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1484 err
= mlxsw_sp_port_min_bw_set(mlxsw_sp_port
,
1485 MLXSW_REG_QEEC_HR_TC
,
1487 MLXSW_REG_QEEC_MIS_MIN
);
1492 /* Map all priorities to traffic class 0. */
1493 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1494 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
1502 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1505 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1506 char qtctm_pl
[MLXSW_REG_QTCTM_LEN
];
1508 mlxsw_reg_qtctm_pack(qtctm_pl
, mlxsw_sp_port
->local_port
, enable
);
1509 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtctm
), qtctm_pl
);
1512 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1513 u8 split_base_local_port
,
1514 struct mlxsw_sp_port_mapping
*port_mapping
)
1516 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1517 bool split
= !!split_base_local_port
;
1518 struct mlxsw_sp_port
*mlxsw_sp_port
;
1519 u32 lanes
= port_mapping
->width
;
1520 struct net_device
*dev
;
1524 splittable
= lanes
> 1 && !split
;
1525 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
,
1526 port_mapping
->module
+ 1, split
,
1527 port_mapping
->lane
/ lanes
,
1530 sizeof(mlxsw_sp
->base_mac
));
1532 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
1537 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
1540 goto err_alloc_etherdev
;
1542 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
1543 dev_net_set(dev
, mlxsw_sp_net(mlxsw_sp
));
1544 mlxsw_sp_port
= netdev_priv(dev
);
1545 mlxsw_sp_port
->dev
= dev
;
1546 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1547 mlxsw_sp_port
->local_port
= local_port
;
1548 mlxsw_sp_port
->pvid
= MLXSW_SP_DEFAULT_VID
;
1549 mlxsw_sp_port
->split
= split
;
1550 mlxsw_sp_port
->split_base_local_port
= split_base_local_port
;
1551 mlxsw_sp_port
->mapping
= *port_mapping
;
1552 mlxsw_sp_port
->link
.autoneg
= 1;
1553 INIT_LIST_HEAD(&mlxsw_sp_port
->vlans_list
);
1555 mlxsw_sp_port
->pcpu_stats
=
1556 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
1557 if (!mlxsw_sp_port
->pcpu_stats
) {
1559 goto err_alloc_stats
;
1562 INIT_DELAYED_WORK(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
,
1563 &update_stats_cache
);
1565 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
1566 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
1568 err
= mlxsw_sp_port_module_map(mlxsw_sp_port
);
1570 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to map module\n",
1571 mlxsw_sp_port
->local_port
);
1572 goto err_port_module_map
;
1575 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
1577 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1578 mlxsw_sp_port
->local_port
);
1579 goto err_port_swid_set
;
1582 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
1584 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
1585 mlxsw_sp_port
->local_port
);
1586 goto err_dev_addr_init
;
1589 netif_carrier_off(dev
);
1591 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1592 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
1593 dev
->hw_features
|= NETIF_F_HW_TC
| NETIF_F_LOOPBACK
;
1596 dev
->max_mtu
= ETH_MAX_MTU
;
1598 /* Each packet needs to have a Tx header (metadata) on top all other
1601 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
1603 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
1605 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1606 mlxsw_sp_port
->local_port
);
1607 goto err_port_system_port_mapping_set
;
1610 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
);
1612 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
1613 mlxsw_sp_port
->local_port
);
1614 goto err_port_speed_by_width_set
;
1617 err
= mlxsw_sp
->port_type_speed_ops
->ptys_max_speed(mlxsw_sp_port
,
1618 &mlxsw_sp_port
->max_speed
);
1620 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to get maximum speed\n",
1621 mlxsw_sp_port
->local_port
);
1622 goto err_max_speed_get
;
1625 err
= mlxsw_sp_port_max_mtu_get(mlxsw_sp_port
, &mlxsw_sp_port
->max_mtu
);
1627 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to get maximum MTU\n",
1628 mlxsw_sp_port
->local_port
);
1629 goto err_port_max_mtu_get
;
1632 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
1634 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1635 mlxsw_sp_port
->local_port
);
1636 goto err_port_mtu_set
;
1639 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1641 goto err_port_admin_status_set
;
1643 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
1645 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
1646 mlxsw_sp_port
->local_port
);
1647 goto err_port_buffers_init
;
1650 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
1652 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
1653 mlxsw_sp_port
->local_port
);
1654 goto err_port_ets_init
;
1657 err
= mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, true);
1659 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC MC mode\n",
1660 mlxsw_sp_port
->local_port
);
1661 goto err_port_tc_mc_mode
;
1664 /* ETS and buffers must be initialized before DCB. */
1665 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
1667 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
1668 mlxsw_sp_port
->local_port
);
1669 goto err_port_dcb_init
;
1672 err
= mlxsw_sp_port_fids_init(mlxsw_sp_port
);
1674 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize FIDs\n",
1675 mlxsw_sp_port
->local_port
);
1676 goto err_port_fids_init
;
1679 err
= mlxsw_sp_tc_qdisc_init(mlxsw_sp_port
);
1681 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize TC qdiscs\n",
1682 mlxsw_sp_port
->local_port
);
1683 goto err_port_qdiscs_init
;
1686 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 0, VLAN_N_VID
- 1, false,
1689 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to clear VLAN filter\n",
1690 mlxsw_sp_port
->local_port
);
1691 goto err_port_vlan_clear
;
1694 err
= mlxsw_sp_port_nve_init(mlxsw_sp_port
);
1696 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize NVE\n",
1697 mlxsw_sp_port
->local_port
);
1698 goto err_port_nve_init
;
1701 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, MLXSW_SP_DEFAULT_VID
);
1703 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set PVID\n",
1704 mlxsw_sp_port
->local_port
);
1705 goto err_port_pvid_set
;
1708 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_create(mlxsw_sp_port
,
1709 MLXSW_SP_DEFAULT_VID
);
1710 if (IS_ERR(mlxsw_sp_port_vlan
)) {
1711 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create VID 1\n",
1712 mlxsw_sp_port
->local_port
);
1713 err
= PTR_ERR(mlxsw_sp_port_vlan
);
1714 goto err_port_vlan_create
;
1716 mlxsw_sp_port
->default_vlan
= mlxsw_sp_port_vlan
;
1718 INIT_DELAYED_WORK(&mlxsw_sp_port
->ptp
.shaper_dw
,
1719 mlxsw_sp
->ptp_ops
->shaper_work
);
1721 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
1722 err
= register_netdev(dev
);
1724 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1725 mlxsw_sp_port
->local_port
);
1726 goto err_register_netdev
;
1729 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
1730 mlxsw_sp_port
, dev
);
1731 mlxsw_core_schedule_dw(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
, 0);
1734 err_register_netdev
:
1735 mlxsw_sp
->ports
[local_port
] = NULL
;
1736 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1737 err_port_vlan_create
:
1739 mlxsw_sp_port_nve_fini(mlxsw_sp_port
);
1741 err_port_vlan_clear
:
1742 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
1743 err_port_qdiscs_init
:
1744 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
1746 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1748 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, false);
1749 err_port_tc_mc_mode
:
1751 mlxsw_sp_port_buffers_fini(mlxsw_sp_port
);
1752 err_port_buffers_init
:
1753 err_port_admin_status_set
:
1755 err_port_max_mtu_get
:
1757 err_port_speed_by_width_set
:
1758 err_port_system_port_mapping_set
:
1760 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1762 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
1763 err_port_module_map
:
1764 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1768 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
1772 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1774 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1776 cancel_delayed_work_sync(&mlxsw_sp_port
->periodic_hw_stats
.update_dw
);
1777 cancel_delayed_work_sync(&mlxsw_sp_port
->ptp
.shaper_dw
);
1778 mlxsw_sp_port_ptp_clear(mlxsw_sp_port
);
1779 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
1780 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
1781 mlxsw_sp
->ports
[local_port
] = NULL
;
1782 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
, true);
1783 mlxsw_sp_port_nve_fini(mlxsw_sp_port
);
1784 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port
);
1785 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
1786 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1787 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port
, false);
1788 mlxsw_sp_port_buffers_fini(mlxsw_sp_port
);
1789 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1790 mlxsw_sp_port_module_unmap(mlxsw_sp_port
);
1791 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1792 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vlans_list
));
1793 free_netdev(mlxsw_sp_port
->dev
);
1794 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
1797 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp
*mlxsw_sp
)
1799 struct mlxsw_sp_port
*mlxsw_sp_port
;
1802 mlxsw_sp_port
= kzalloc(sizeof(*mlxsw_sp_port
), GFP_KERNEL
);
1806 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1807 mlxsw_sp_port
->local_port
= MLXSW_PORT_CPU_PORT
;
1809 err
= mlxsw_core_cpu_port_init(mlxsw_sp
->core
,
1812 sizeof(mlxsw_sp
->base_mac
));
1814 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize core CPU port\n");
1815 goto err_core_cpu_port_init
;
1818 mlxsw_sp
->ports
[MLXSW_PORT_CPU_PORT
] = mlxsw_sp_port
;
1821 err_core_cpu_port_init
:
1822 kfree(mlxsw_sp_port
);
1826 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp
*mlxsw_sp
)
1828 struct mlxsw_sp_port
*mlxsw_sp_port
=
1829 mlxsw_sp
->ports
[MLXSW_PORT_CPU_PORT
];
1831 mlxsw_core_cpu_port_fini(mlxsw_sp
->core
);
1832 mlxsw_sp
->ports
[MLXSW_PORT_CPU_PORT
] = NULL
;
1833 kfree(mlxsw_sp_port
);
1836 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1838 return mlxsw_sp
->ports
[local_port
] != NULL
;
1841 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
1845 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
1846 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
1847 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1848 mlxsw_sp_cpu_port_remove(mlxsw_sp
);
1849 kfree(mlxsw_sp
->ports
);
1850 mlxsw_sp
->ports
= NULL
;
1853 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
1855 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
1856 struct mlxsw_sp_port_mapping
*port_mapping
;
1861 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
1862 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1863 if (!mlxsw_sp
->ports
)
1866 err
= mlxsw_sp_cpu_port_create(mlxsw_sp
);
1868 goto err_cpu_port_create
;
1870 for (i
= 1; i
< max_ports
; i
++) {
1871 port_mapping
= mlxsw_sp
->port_mapping
[i
];
1874 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, 0, port_mapping
);
1876 goto err_port_create
;
1881 for (i
--; i
>= 1; i
--)
1882 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
1883 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1884 mlxsw_sp_cpu_port_remove(mlxsw_sp
);
1885 err_cpu_port_create
:
1886 kfree(mlxsw_sp
->ports
);
1887 mlxsw_sp
->ports
= NULL
;
1891 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp
*mlxsw_sp
)
1893 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
1894 struct mlxsw_sp_port_mapping port_mapping
;
1898 mlxsw_sp
->port_mapping
= kcalloc(max_ports
,
1899 sizeof(struct mlxsw_sp_port_mapping
*),
1901 if (!mlxsw_sp
->port_mapping
)
1904 for (i
= 1; i
< max_ports
; i
++) {
1905 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &port_mapping
);
1907 goto err_port_module_info_get
;
1908 if (!port_mapping
.width
)
1911 mlxsw_sp
->port_mapping
[i
] = kmemdup(&port_mapping
,
1912 sizeof(port_mapping
),
1914 if (!mlxsw_sp
->port_mapping
[i
]) {
1916 goto err_port_module_info_dup
;
1921 err_port_module_info_get
:
1922 err_port_module_info_dup
:
1923 for (i
--; i
>= 1; i
--)
1924 kfree(mlxsw_sp
->port_mapping
[i
]);
1925 kfree(mlxsw_sp
->port_mapping
);
1929 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp
*mlxsw_sp
)
1933 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
1934 kfree(mlxsw_sp
->port_mapping
[i
]);
1935 kfree(mlxsw_sp
->port_mapping
);
1938 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
, unsigned int max_width
)
1940 u8 offset
= (local_port
- 1) % max_width
;
1942 return local_port
- offset
;
1946 mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
1947 struct mlxsw_sp_port_mapping
*port_mapping
,
1948 unsigned int count
, u8 offset
)
1950 struct mlxsw_sp_port_mapping split_port_mapping
;
1953 split_port_mapping
= *port_mapping
;
1954 split_port_mapping
.width
/= count
;
1955 for (i
= 0; i
< count
; i
++) {
1956 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
* offset
,
1957 base_port
, &split_port_mapping
);
1959 goto err_port_create
;
1960 split_port_mapping
.lane
+= split_port_mapping
.width
;
1966 for (i
--; i
>= 0; i
--)
1967 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
1968 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
1972 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
1974 unsigned int count
, u8 offset
)
1976 struct mlxsw_sp_port_mapping
*port_mapping
;
1979 /* Go over original unsplit ports in the gap and recreate them. */
1980 for (i
= 0; i
< count
* offset
; i
++) {
1981 port_mapping
= mlxsw_sp
->port_mapping
[base_port
+ i
];
1984 mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, 0, port_mapping
);
1988 static int mlxsw_sp_local_ports_offset(struct mlxsw_core
*mlxsw_core
,
1990 unsigned int max_width
)
1992 enum mlxsw_res_id local_ports_in_x_res_id
;
1993 int split_width
= max_width
/ count
;
1995 if (split_width
== 1)
1996 local_ports_in_x_res_id
= MLXSW_RES_ID_LOCAL_PORTS_IN_1X
;
1997 else if (split_width
== 2)
1998 local_ports_in_x_res_id
= MLXSW_RES_ID_LOCAL_PORTS_IN_2X
;
1999 else if (split_width
== 4)
2000 local_ports_in_x_res_id
= MLXSW_RES_ID_LOCAL_PORTS_IN_4X
;
2004 if (!mlxsw_core_res_valid(mlxsw_core
, local_ports_in_x_res_id
))
2006 return mlxsw_core_res_get(mlxsw_core
, local_ports_in_x_res_id
);
2009 static struct mlxsw_sp_port
*
2010 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2012 if (mlxsw_sp
->ports
&& mlxsw_sp
->ports
[local_port
])
2013 return mlxsw_sp
->ports
[local_port
];
2017 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2019 struct netlink_ext_ack
*extack
)
2021 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2022 struct mlxsw_sp_port_mapping port_mapping
;
2023 struct mlxsw_sp_port
*mlxsw_sp_port
;
2030 mlxsw_sp_port
= mlxsw_sp_port_get_by_local_port(mlxsw_sp
, local_port
);
2031 if (!mlxsw_sp_port
) {
2032 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2034 NL_SET_ERR_MSG_MOD(extack
, "Port number does not exist");
2038 max_width
= mlxsw_core_module_max_width(mlxsw_core
,
2039 mlxsw_sp_port
->mapping
.module
);
2040 if (max_width
< 0) {
2041 netdev_err(mlxsw_sp_port
->dev
, "Cannot get max width of port module\n");
2042 NL_SET_ERR_MSG_MOD(extack
, "Cannot get max width of port module");
2046 /* Split port with non-max cannot be split. */
2047 if (mlxsw_sp_port
->mapping
.width
!= max_width
) {
2048 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split\n");
2049 NL_SET_ERR_MSG_MOD(extack
, "Port cannot be split");
2053 offset
= mlxsw_sp_local_ports_offset(mlxsw_core
, count
, max_width
);
2055 netdev_err(mlxsw_sp_port
->dev
, "Cannot obtain local port offset\n");
2056 NL_SET_ERR_MSG_MOD(extack
, "Cannot obtain local port offset");
2060 /* Only in case max split is being done, the local port and
2061 * base port may differ.
2063 base_port
= count
== max_width
?
2064 mlxsw_sp_cluster_base_port_get(local_port
, max_width
) :
2067 for (i
= 0; i
< count
* offset
; i
++) {
2068 /* Expect base port to exist and also the one in the middle in
2069 * case of maximal split count.
2071 if (i
== 0 || (count
== max_width
&& i
== count
/ 2))
2074 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
)) {
2075 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2076 NL_SET_ERR_MSG_MOD(extack
, "Invalid split configuration");
2081 port_mapping
= mlxsw_sp_port
->mapping
;
2083 for (i
= 0; i
< count
; i
++)
2084 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
2085 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
2087 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, &port_mapping
,
2090 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2091 goto err_port_split_create
;
2096 err_port_split_create
:
2097 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
, offset
);
2101 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2102 struct netlink_ext_ack
*extack
)
2104 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2105 struct mlxsw_sp_port
*mlxsw_sp_port
;
2112 mlxsw_sp_port
= mlxsw_sp_port_get_by_local_port(mlxsw_sp
, local_port
);
2113 if (!mlxsw_sp_port
) {
2114 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2116 NL_SET_ERR_MSG_MOD(extack
, "Port number does not exist");
2120 if (!mlxsw_sp_port
->split
) {
2121 netdev_err(mlxsw_sp_port
->dev
, "Port was not split\n");
2122 NL_SET_ERR_MSG_MOD(extack
, "Port was not split");
2126 max_width
= mlxsw_core_module_max_width(mlxsw_core
,
2127 mlxsw_sp_port
->mapping
.module
);
2128 if (max_width
< 0) {
2129 netdev_err(mlxsw_sp_port
->dev
, "Cannot get max width of port module\n");
2130 NL_SET_ERR_MSG_MOD(extack
, "Cannot get max width of port module");
2134 count
= max_width
/ mlxsw_sp_port
->mapping
.width
;
2136 offset
= mlxsw_sp_local_ports_offset(mlxsw_core
, count
, max_width
);
2137 if (WARN_ON(offset
< 0)) {
2138 netdev_err(mlxsw_sp_port
->dev
, "Cannot obtain local port offset\n");
2139 NL_SET_ERR_MSG_MOD(extack
, "Cannot obtain local port offset");
2143 base_port
= mlxsw_sp_port
->split_base_local_port
;
2145 for (i
= 0; i
< count
; i
++)
2146 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
* offset
))
2147 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
* offset
);
2149 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
, offset
);
2155 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port
*mlxsw_sp_port
)
2159 for (i
= 0; i
< TC_MAX_QUEUE
; i
++)
2160 mlxsw_sp_port
->periodic_hw_stats
.xstats
.backlog
[i
] = 0;
2163 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2164 char *pude_pl
, void *priv
)
2166 struct mlxsw_sp
*mlxsw_sp
= priv
;
2167 struct mlxsw_sp_port
*mlxsw_sp_port
;
2168 enum mlxsw_reg_pude_oper_status status
;
2171 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2172 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2176 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2177 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2178 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2179 netif_carrier_on(mlxsw_sp_port
->dev
);
2180 mlxsw_core_schedule_dw(&mlxsw_sp_port
->ptp
.shaper_dw
, 0);
2182 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2183 netif_carrier_off(mlxsw_sp_port
->dev
);
2184 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port
);
2188 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp
*mlxsw_sp
,
2189 char *mtpptr_pl
, bool ingress
)
2195 local_port
= mlxsw_reg_mtpptr_local_port_get(mtpptr_pl
);
2196 num_rec
= mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl
);
2197 for (i
= 0; i
< num_rec
; i
++) {
2203 mlxsw_reg_mtpptr_unpack(mtpptr_pl
, i
, &message_type
,
2204 &domain_number
, &sequence_id
,
2206 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp
, ingress
, local_port
,
2207 message_type
, domain_number
,
2208 sequence_id
, timestamp
);
2212 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info
*reg
,
2213 char *mtpptr_pl
, void *priv
)
2215 struct mlxsw_sp
*mlxsw_sp
= priv
;
2217 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp
, mtpptr_pl
, true);
2220 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info
*reg
,
2221 char *mtpptr_pl
, void *priv
)
2223 struct mlxsw_sp
*mlxsw_sp
= priv
;
2225 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp
, mtpptr_pl
, false);
2228 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
2229 u8 local_port
, void *priv
)
2231 struct mlxsw_sp
*mlxsw_sp
= priv
;
2232 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2233 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2235 if (unlikely(!mlxsw_sp_port
)) {
2236 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2241 skb
->dev
= mlxsw_sp_port
->dev
;
2243 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2244 u64_stats_update_begin(&pcpu_stats
->syncp
);
2245 pcpu_stats
->rx_packets
++;
2246 pcpu_stats
->rx_bytes
+= skb
->len
;
2247 u64_stats_update_end(&pcpu_stats
->syncp
);
2249 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2250 netif_receive_skb(skb
);
2253 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
2256 skb
->offload_fwd_mark
= 1;
2257 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2260 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff
*skb
,
2261 u8 local_port
, void *priv
)
2263 skb
->offload_l3_fwd_mark
= 1;
2264 skb
->offload_fwd_mark
= 1;
2265 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2268 void mlxsw_sp_ptp_receive(struct mlxsw_sp
*mlxsw_sp
, struct sk_buff
*skb
,
2271 mlxsw_sp
->ptp_ops
->receive(mlxsw_sp
, skb
, local_port
);
2274 void mlxsw_sp_sample_receive(struct mlxsw_sp
*mlxsw_sp
, struct sk_buff
*skb
,
2277 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2278 struct mlxsw_sp_port_sample
*sample
;
2281 if (unlikely(!mlxsw_sp_port
)) {
2282 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
2288 sample
= rcu_dereference(mlxsw_sp_port
->sample
);
2291 size
= sample
->truncate
? sample
->trunc_size
: skb
->len
;
2292 psample_sample_packet(sample
->psample_group
, skb
, size
,
2293 mlxsw_sp_port
->dev
->ifindex
, 0, sample
->rate
);
2300 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2301 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2302 _is_ctrl, SP_##_trap_group, DISCARD)
2304 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2305 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2306 _is_ctrl, SP_##_trap_group, DISCARD)
2308 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2309 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2310 _is_ctrl, SP_##_trap_group, DISCARD)
2312 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2313 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2315 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
2317 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
2319 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, FID_MISS
, false),
2321 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS
, TRAP_TO_CPU
, ROUTER_EXP
,
2323 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC
, TRAP_TO_CPU
, ROUTER_EXP
, false),
2324 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST
, TRAP_TO_CPU
, ROUTER_EXP
,
2326 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E
, FORWARD
,
2328 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC
, FORWARD
,
2330 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP
, FORWARD
,
2332 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL
, FORWARD
,
2334 /* Multicast Router Traps */
2335 MLXSW_SP_RXL_MARK(ACL1
, TRAP_TO_CPU
, MULTICAST
, false),
2336 MLXSW_SP_RXL_L3_MARK(ACL2
, TRAP_TO_CPU
, MULTICAST
, false),
2338 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP
, TRAP_TO_CPU
, NEIGH_DISCOVERY
, false),
2341 static const struct mlxsw_listener mlxsw_sp1_listener
[] = {
2343 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func
, PTP_EGR_FIFO
, SP_PTP0
),
2344 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func
, PTP_ING_FIFO
, SP_PTP0
),
2347 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
2349 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2350 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
2351 enum mlxsw_reg_qpcr_ir_units ir_units
;
2352 int max_cpu_policers
;
2358 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
2361 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
2363 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
2364 for (i
= 0; i
< max_cpu_policers
; i
++) {
2367 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
2368 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
2369 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS
:
2377 __set_bit(i
, mlxsw_sp
->trap
->policers_usage
);
2378 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
2380 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
2388 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
2390 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2391 enum mlxsw_reg_htgt_trap_group i
;
2392 int max_cpu_policers
;
2393 int max_trap_groups
;
2398 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
2401 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
2402 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
2404 for (i
= 0; i
< max_trap_groups
; i
++) {
2407 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
2408 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST
:
2409 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS
:
2413 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
2414 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
2415 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
2416 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
2422 if (max_cpu_policers
<= policer_id
&&
2423 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
2426 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
2427 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2435 static int mlxsw_sp_traps_register(struct mlxsw_sp
*mlxsw_sp
,
2436 const struct mlxsw_listener listeners
[],
2437 size_t listeners_count
)
2442 for (i
= 0; i
< listeners_count
; i
++) {
2443 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
2447 goto err_listener_register
;
2452 err_listener_register
:
2453 for (i
--; i
>= 0; i
--) {
2454 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
2461 static void mlxsw_sp_traps_unregister(struct mlxsw_sp
*mlxsw_sp
,
2462 const struct mlxsw_listener listeners
[],
2463 size_t listeners_count
)
2467 for (i
= 0; i
< listeners_count
; i
++) {
2468 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
2474 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2476 struct mlxsw_sp_trap
*trap
;
2480 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_CPU_POLICERS
))
2482 max_policers
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_CPU_POLICERS
);
2483 trap
= kzalloc(struct_size(trap
, policers_usage
,
2484 BITS_TO_LONGS(max_policers
)), GFP_KERNEL
);
2487 trap
->max_policers
= max_policers
;
2488 mlxsw_sp
->trap
= trap
;
2490 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
2492 goto err_cpu_policers_set
;
2494 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
2496 goto err_trap_groups_set
;
2498 err
= mlxsw_sp_traps_register(mlxsw_sp
, mlxsw_sp_listener
,
2499 ARRAY_SIZE(mlxsw_sp_listener
));
2501 goto err_traps_register
;
2503 err
= mlxsw_sp_traps_register(mlxsw_sp
, mlxsw_sp
->listeners
,
2504 mlxsw_sp
->listeners_count
);
2506 goto err_extra_traps_init
;
2510 err_extra_traps_init
:
2511 mlxsw_sp_traps_unregister(mlxsw_sp
, mlxsw_sp_listener
,
2512 ARRAY_SIZE(mlxsw_sp_listener
));
2514 err_trap_groups_set
:
2515 err_cpu_policers_set
:
2520 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2522 mlxsw_sp_traps_unregister(mlxsw_sp
, mlxsw_sp
->listeners
,
2523 mlxsw_sp
->listeners_count
);
2524 mlxsw_sp_traps_unregister(mlxsw_sp
, mlxsw_sp_listener
,
2525 ARRAY_SIZE(mlxsw_sp_listener
));
2526 kfree(mlxsw_sp
->trap
);
2529 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2531 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2533 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2537 seed
= jhash(mlxsw_sp
->base_mac
, sizeof(mlxsw_sp
->base_mac
),
2538 MLXSW_SP_LAG_SEED_INIT
);
2539 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2540 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2541 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2542 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2543 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2544 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2545 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
2546 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
2547 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
, seed
);
2548 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
2552 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
2553 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
2556 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
2557 sizeof(struct mlxsw_sp_upper
),
2559 if (!mlxsw_sp
->lags
)
2565 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
2567 kfree(mlxsw_sp
->lags
);
2570 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
2572 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2575 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
2576 MLXSW_REG_HTGT_INVALID_POLICER
,
2577 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
2578 MLXSW_REG_HTGT_DEFAULT_TC
);
2579 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2583 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_MFDE
,
2584 MLXSW_REG_HTGT_INVALID_POLICER
,
2585 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
2586 MLXSW_REG_HTGT_DEFAULT_TC
);
2587 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2590 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops
= {
2591 .clock_init
= mlxsw_sp1_ptp_clock_init
,
2592 .clock_fini
= mlxsw_sp1_ptp_clock_fini
,
2593 .init
= mlxsw_sp1_ptp_init
,
2594 .fini
= mlxsw_sp1_ptp_fini
,
2595 .receive
= mlxsw_sp1_ptp_receive
,
2596 .transmitted
= mlxsw_sp1_ptp_transmitted
,
2597 .hwtstamp_get
= mlxsw_sp1_ptp_hwtstamp_get
,
2598 .hwtstamp_set
= mlxsw_sp1_ptp_hwtstamp_set
,
2599 .shaper_work
= mlxsw_sp1_ptp_shaper_work
,
2600 .get_ts_info
= mlxsw_sp1_ptp_get_ts_info
,
2601 .get_stats_count
= mlxsw_sp1_get_stats_count
,
2602 .get_stats_strings
= mlxsw_sp1_get_stats_strings
,
2603 .get_stats
= mlxsw_sp1_get_stats
,
2606 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops
= {
2607 .clock_init
= mlxsw_sp2_ptp_clock_init
,
2608 .clock_fini
= mlxsw_sp2_ptp_clock_fini
,
2609 .init
= mlxsw_sp2_ptp_init
,
2610 .fini
= mlxsw_sp2_ptp_fini
,
2611 .receive
= mlxsw_sp2_ptp_receive
,
2612 .transmitted
= mlxsw_sp2_ptp_transmitted
,
2613 .hwtstamp_get
= mlxsw_sp2_ptp_hwtstamp_get
,
2614 .hwtstamp_set
= mlxsw_sp2_ptp_hwtstamp_set
,
2615 .shaper_work
= mlxsw_sp2_ptp_shaper_work
,
2616 .get_ts_info
= mlxsw_sp2_ptp_get_ts_info
,
2617 .get_stats_count
= mlxsw_sp2_get_stats_count
,
2618 .get_stats_strings
= mlxsw_sp2_get_stats_strings
,
2619 .get_stats
= mlxsw_sp2_get_stats
,
2622 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
2623 unsigned long event
, void *ptr
);
2625 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
2626 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2627 struct netlink_ext_ack
*extack
)
2629 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2632 mlxsw_sp
->core
= mlxsw_core
;
2633 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
2635 mlxsw_core_emad_string_tlv_enable(mlxsw_core
);
2637 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
2639 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
2643 err
= mlxsw_sp_kvdl_init(mlxsw_sp
);
2645 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize KVDL\n");
2649 err
= mlxsw_sp_fids_init(mlxsw_sp
);
2651 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize FIDs\n");
2655 err
= mlxsw_sp_policers_init(mlxsw_sp
);
2657 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize policers\n");
2658 goto err_policers_init
;
2661 err
= mlxsw_sp_traps_init(mlxsw_sp
);
2663 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
2664 goto err_traps_init
;
2667 err
= mlxsw_sp_devlink_traps_init(mlxsw_sp
);
2669 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize devlink traps\n");
2670 goto err_devlink_traps_init
;
2673 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
2675 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
2676 goto err_buffers_init
;
2679 err
= mlxsw_sp_lag_init(mlxsw_sp
);
2681 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
2685 /* Initialize SPAN before router and switchdev, so that those components
2686 * can call mlxsw_sp_span_respin().
2688 err
= mlxsw_sp_span_init(mlxsw_sp
);
2690 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
2694 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
2696 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
2697 goto err_switchdev_init
;
2700 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
2702 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
2703 goto err_counter_pool_init
;
2706 err
= mlxsw_sp_afa_init(mlxsw_sp
);
2708 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL actions\n");
2712 err
= mlxsw_sp_nve_init(mlxsw_sp
);
2714 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize NVE\n");
2718 err
= mlxsw_sp_acl_init(mlxsw_sp
);
2720 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
2724 err
= mlxsw_sp_router_init(mlxsw_sp
, extack
);
2726 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
2727 goto err_router_init
;
2730 if (mlxsw_sp
->bus_info
->read_frc_capable
) {
2731 /* NULL is a valid return value from clock_init */
2733 mlxsw_sp
->ptp_ops
->clock_init(mlxsw_sp
,
2734 mlxsw_sp
->bus_info
->dev
);
2735 if (IS_ERR(mlxsw_sp
->clock
)) {
2736 err
= PTR_ERR(mlxsw_sp
->clock
);
2737 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init ptp clock\n");
2738 goto err_ptp_clock_init
;
2742 if (mlxsw_sp
->clock
) {
2743 /* NULL is a valid return value from ptp_ops->init */
2744 mlxsw_sp
->ptp_state
= mlxsw_sp
->ptp_ops
->init(mlxsw_sp
);
2745 if (IS_ERR(mlxsw_sp
->ptp_state
)) {
2746 err
= PTR_ERR(mlxsw_sp
->ptp_state
);
2747 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize PTP\n");
2752 /* Initialize netdevice notifier after router and SPAN is initialized,
2753 * so that the event handler can use router structures and call SPAN
2756 mlxsw_sp
->netdevice_nb
.notifier_call
= mlxsw_sp_netdevice_event
;
2757 err
= register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp
),
2758 &mlxsw_sp
->netdevice_nb
);
2760 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register netdev notifier\n");
2761 goto err_netdev_notifier
;
2764 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
2766 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
2767 goto err_dpipe_init
;
2770 err
= mlxsw_sp_port_module_info_init(mlxsw_sp
);
2772 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init port module info\n");
2773 goto err_port_module_info_init
;
2776 err
= mlxsw_sp_ports_create(mlxsw_sp
);
2778 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
2779 goto err_ports_create
;
2785 mlxsw_sp_port_module_info_fini(mlxsw_sp
);
2786 err_port_module_info_init
:
2787 mlxsw_sp_dpipe_fini(mlxsw_sp
);
2789 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp
),
2790 &mlxsw_sp
->netdevice_nb
);
2791 err_netdev_notifier
:
2792 if (mlxsw_sp
->clock
)
2793 mlxsw_sp
->ptp_ops
->fini(mlxsw_sp
->ptp_state
);
2795 if (mlxsw_sp
->clock
)
2796 mlxsw_sp
->ptp_ops
->clock_fini(mlxsw_sp
->clock
);
2798 mlxsw_sp_router_fini(mlxsw_sp
);
2800 mlxsw_sp_acl_fini(mlxsw_sp
);
2802 mlxsw_sp_nve_fini(mlxsw_sp
);
2804 mlxsw_sp_afa_fini(mlxsw_sp
);
2806 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
2807 err_counter_pool_init
:
2808 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2810 mlxsw_sp_span_fini(mlxsw_sp
);
2812 mlxsw_sp_lag_fini(mlxsw_sp
);
2814 mlxsw_sp_buffers_fini(mlxsw_sp
);
2816 mlxsw_sp_devlink_traps_fini(mlxsw_sp
);
2817 err_devlink_traps_init
:
2818 mlxsw_sp_traps_fini(mlxsw_sp
);
2820 mlxsw_sp_policers_fini(mlxsw_sp
);
2822 mlxsw_sp_fids_fini(mlxsw_sp
);
2824 mlxsw_sp_kvdl_fini(mlxsw_sp
);
2828 static int mlxsw_sp1_init(struct mlxsw_core
*mlxsw_core
,
2829 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2830 struct netlink_ext_ack
*extack
)
2832 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2834 mlxsw_sp
->kvdl_ops
= &mlxsw_sp1_kvdl_ops
;
2835 mlxsw_sp
->afa_ops
= &mlxsw_sp1_act_afa_ops
;
2836 mlxsw_sp
->afk_ops
= &mlxsw_sp1_afk_ops
;
2837 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp1_mr_tcam_ops
;
2838 mlxsw_sp
->acl_rulei_ops
= &mlxsw_sp1_acl_rulei_ops
;
2839 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp1_acl_tcam_ops
;
2840 mlxsw_sp
->nve_ops_arr
= mlxsw_sp1_nve_ops_arr
;
2841 mlxsw_sp
->mac_mask
= mlxsw_sp1_mac_mask
;
2842 mlxsw_sp
->rif_ops_arr
= mlxsw_sp1_rif_ops_arr
;
2843 mlxsw_sp
->sb_vals
= &mlxsw_sp1_sb_vals
;
2844 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp1_port_type_speed_ops
;
2845 mlxsw_sp
->ptp_ops
= &mlxsw_sp1_ptp_ops
;
2846 mlxsw_sp
->span_ops
= &mlxsw_sp1_span_ops
;
2847 mlxsw_sp
->policer_core_ops
= &mlxsw_sp1_policer_core_ops
;
2848 mlxsw_sp
->trap_ops
= &mlxsw_sp1_trap_ops
;
2849 mlxsw_sp
->listeners
= mlxsw_sp1_listener
;
2850 mlxsw_sp
->listeners_count
= ARRAY_SIZE(mlxsw_sp1_listener
);
2851 mlxsw_sp
->lowest_shaper_bs
= MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1
;
2853 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
, extack
);
2856 static int mlxsw_sp2_init(struct mlxsw_core
*mlxsw_core
,
2857 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2858 struct netlink_ext_ack
*extack
)
2860 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2862 mlxsw_sp
->kvdl_ops
= &mlxsw_sp2_kvdl_ops
;
2863 mlxsw_sp
->afa_ops
= &mlxsw_sp2_act_afa_ops
;
2864 mlxsw_sp
->afk_ops
= &mlxsw_sp2_afk_ops
;
2865 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp2_mr_tcam_ops
;
2866 mlxsw_sp
->acl_rulei_ops
= &mlxsw_sp2_acl_rulei_ops
;
2867 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp2_acl_tcam_ops
;
2868 mlxsw_sp
->nve_ops_arr
= mlxsw_sp2_nve_ops_arr
;
2869 mlxsw_sp
->mac_mask
= mlxsw_sp2_mac_mask
;
2870 mlxsw_sp
->rif_ops_arr
= mlxsw_sp2_rif_ops_arr
;
2871 mlxsw_sp
->sb_vals
= &mlxsw_sp2_sb_vals
;
2872 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp2_port_type_speed_ops
;
2873 mlxsw_sp
->ptp_ops
= &mlxsw_sp2_ptp_ops
;
2874 mlxsw_sp
->span_ops
= &mlxsw_sp2_span_ops
;
2875 mlxsw_sp
->policer_core_ops
= &mlxsw_sp2_policer_core_ops
;
2876 mlxsw_sp
->trap_ops
= &mlxsw_sp2_trap_ops
;
2877 mlxsw_sp
->lowest_shaper_bs
= MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2
;
2879 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
, extack
);
2882 static int mlxsw_sp3_init(struct mlxsw_core
*mlxsw_core
,
2883 const struct mlxsw_bus_info
*mlxsw_bus_info
,
2884 struct netlink_ext_ack
*extack
)
2886 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2888 mlxsw_sp
->kvdl_ops
= &mlxsw_sp2_kvdl_ops
;
2889 mlxsw_sp
->afa_ops
= &mlxsw_sp2_act_afa_ops
;
2890 mlxsw_sp
->afk_ops
= &mlxsw_sp2_afk_ops
;
2891 mlxsw_sp
->mr_tcam_ops
= &mlxsw_sp2_mr_tcam_ops
;
2892 mlxsw_sp
->acl_rulei_ops
= &mlxsw_sp2_acl_rulei_ops
;
2893 mlxsw_sp
->acl_tcam_ops
= &mlxsw_sp2_acl_tcam_ops
;
2894 mlxsw_sp
->nve_ops_arr
= mlxsw_sp2_nve_ops_arr
;
2895 mlxsw_sp
->mac_mask
= mlxsw_sp2_mac_mask
;
2896 mlxsw_sp
->rif_ops_arr
= mlxsw_sp2_rif_ops_arr
;
2897 mlxsw_sp
->sb_vals
= &mlxsw_sp2_sb_vals
;
2898 mlxsw_sp
->port_type_speed_ops
= &mlxsw_sp2_port_type_speed_ops
;
2899 mlxsw_sp
->ptp_ops
= &mlxsw_sp2_ptp_ops
;
2900 mlxsw_sp
->span_ops
= &mlxsw_sp3_span_ops
;
2901 mlxsw_sp
->policer_core_ops
= &mlxsw_sp2_policer_core_ops
;
2902 mlxsw_sp
->trap_ops
= &mlxsw_sp2_trap_ops
;
2903 mlxsw_sp
->lowest_shaper_bs
= MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3
;
2905 return mlxsw_sp_init(mlxsw_core
, mlxsw_bus_info
, extack
);
2908 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
2910 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2912 mlxsw_sp_ports_remove(mlxsw_sp
);
2913 mlxsw_sp_port_module_info_fini(mlxsw_sp
);
2914 mlxsw_sp_dpipe_fini(mlxsw_sp
);
2915 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp
),
2916 &mlxsw_sp
->netdevice_nb
);
2917 if (mlxsw_sp
->clock
) {
2918 mlxsw_sp
->ptp_ops
->fini(mlxsw_sp
->ptp_state
);
2919 mlxsw_sp
->ptp_ops
->clock_fini(mlxsw_sp
->clock
);
2921 mlxsw_sp_router_fini(mlxsw_sp
);
2922 mlxsw_sp_acl_fini(mlxsw_sp
);
2923 mlxsw_sp_nve_fini(mlxsw_sp
);
2924 mlxsw_sp_afa_fini(mlxsw_sp
);
2925 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
2926 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2927 mlxsw_sp_span_fini(mlxsw_sp
);
2928 mlxsw_sp_lag_fini(mlxsw_sp
);
2929 mlxsw_sp_buffers_fini(mlxsw_sp
);
2930 mlxsw_sp_devlink_traps_fini(mlxsw_sp
);
2931 mlxsw_sp_traps_fini(mlxsw_sp
);
2932 mlxsw_sp_policers_fini(mlxsw_sp
);
2933 mlxsw_sp_fids_fini(mlxsw_sp
);
2934 mlxsw_sp_kvdl_fini(mlxsw_sp
);
2937 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2940 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
2943 static const struct mlxsw_config_profile mlxsw_sp1_config_profile
= {
2945 .max_mid
= MLXSW_SP_MID_MAX
,
2946 .used_flood_tables
= 1,
2947 .used_flood_mode
= 1,
2949 .max_fid_flood_tables
= 3,
2950 .fid_flood_table_size
= MLXSW_SP_FID_FLOOD_TABLE_SIZE
,
2951 .used_max_ib_mc
= 1,
2955 .used_kvd_sizes
= 1,
2956 .kvd_hash_single_parts
= 59,
2957 .kvd_hash_double_parts
= 41,
2958 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
2962 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2967 static const struct mlxsw_config_profile mlxsw_sp2_config_profile
= {
2969 .max_mid
= MLXSW_SP_MID_MAX
,
2970 .used_flood_tables
= 1,
2971 .used_flood_mode
= 1,
2973 .max_fid_flood_tables
= 3,
2974 .fid_flood_table_size
= MLXSW_SP_FID_FLOOD_TABLE_SIZE
,
2975 .used_max_ib_mc
= 1,
2982 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2988 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core
*mlxsw_core
,
2989 struct devlink_resource_size_params
*kvd_size_params
,
2990 struct devlink_resource_size_params
*linear_size_params
,
2991 struct devlink_resource_size_params
*hash_double_size_params
,
2992 struct devlink_resource_size_params
*hash_single_size_params
)
2994 u32 single_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
2995 KVD_SINGLE_MIN_SIZE
);
2996 u32 double_size_min
= MLXSW_CORE_RES_GET(mlxsw_core
,
2997 KVD_DOUBLE_MIN_SIZE
);
2998 u32 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
2999 u32 linear_size_min
= 0;
3001 devlink_resource_size_params_init(kvd_size_params
, kvd_size
, kvd_size
,
3002 MLXSW_SP_KVD_GRANULARITY
,
3003 DEVLINK_RESOURCE_UNIT_ENTRY
);
3004 devlink_resource_size_params_init(linear_size_params
, linear_size_min
,
3005 kvd_size
- single_size_min
-
3007 MLXSW_SP_KVD_GRANULARITY
,
3008 DEVLINK_RESOURCE_UNIT_ENTRY
);
3009 devlink_resource_size_params_init(hash_double_size_params
,
3011 kvd_size
- single_size_min
-
3013 MLXSW_SP_KVD_GRANULARITY
,
3014 DEVLINK_RESOURCE_UNIT_ENTRY
);
3015 devlink_resource_size_params_init(hash_single_size_params
,
3017 kvd_size
- double_size_min
-
3019 MLXSW_SP_KVD_GRANULARITY
,
3020 DEVLINK_RESOURCE_UNIT_ENTRY
);
3023 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core
*mlxsw_core
)
3025 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3026 struct devlink_resource_size_params hash_single_size_params
;
3027 struct devlink_resource_size_params hash_double_size_params
;
3028 struct devlink_resource_size_params linear_size_params
;
3029 struct devlink_resource_size_params kvd_size_params
;
3030 u32 kvd_size
, single_size
, double_size
, linear_size
;
3031 const struct mlxsw_config_profile
*profile
;
3034 profile
= &mlxsw_sp1_config_profile
;
3035 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SIZE
))
3038 mlxsw_sp_resource_size_params_prepare(mlxsw_core
, &kvd_size_params
,
3039 &linear_size_params
,
3040 &hash_double_size_params
,
3041 &hash_single_size_params
);
3043 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
3044 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD
,
3045 kvd_size
, MLXSW_SP_RESOURCE_KVD
,
3046 DEVLINK_RESOURCE_ID_PARENT_TOP
,
3051 linear_size
= profile
->kvd_linear_size
;
3052 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR
,
3054 MLXSW_SP_RESOURCE_KVD_LINEAR
,
3055 MLXSW_SP_RESOURCE_KVD
,
3056 &linear_size_params
);
3060 err
= mlxsw_sp1_kvdl_resources_register(mlxsw_core
);
3064 double_size
= kvd_size
- linear_size
;
3065 double_size
*= profile
->kvd_hash_double_parts
;
3066 double_size
/= profile
->kvd_hash_double_parts
+
3067 profile
->kvd_hash_single_parts
;
3068 double_size
= rounddown(double_size
, MLXSW_SP_KVD_GRANULARITY
);
3069 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE
,
3071 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
3072 MLXSW_SP_RESOURCE_KVD
,
3073 &hash_double_size_params
);
3077 single_size
= kvd_size
- double_size
- linear_size
;
3078 err
= devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE
,
3080 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
3081 MLXSW_SP_RESOURCE_KVD
,
3082 &hash_single_size_params
);
3089 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core
*mlxsw_core
)
3091 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3092 struct devlink_resource_size_params kvd_size_params
;
3095 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SIZE
))
3098 kvd_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
);
3099 devlink_resource_size_params_init(&kvd_size_params
, kvd_size
, kvd_size
,
3100 MLXSW_SP_KVD_GRANULARITY
,
3101 DEVLINK_RESOURCE_UNIT_ENTRY
);
3103 return devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_KVD
,
3104 kvd_size
, MLXSW_SP_RESOURCE_KVD
,
3105 DEVLINK_RESOURCE_ID_PARENT_TOP
,
3109 static int mlxsw_sp_resources_span_register(struct mlxsw_core
*mlxsw_core
)
3111 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3112 struct devlink_resource_size_params span_size_params
;
3115 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_SPAN
))
3118 max_span
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_SPAN
);
3119 devlink_resource_size_params_init(&span_size_params
, max_span
, max_span
,
3120 1, DEVLINK_RESOURCE_UNIT_ENTRY
);
3122 return devlink_resource_register(devlink
, MLXSW_SP_RESOURCE_NAME_SPAN
,
3123 max_span
, MLXSW_SP_RESOURCE_SPAN
,
3124 DEVLINK_RESOURCE_ID_PARENT_TOP
,
3128 static int mlxsw_sp1_resources_register(struct mlxsw_core
*mlxsw_core
)
3132 err
= mlxsw_sp1_resources_kvd_register(mlxsw_core
);
3136 err
= mlxsw_sp_resources_span_register(mlxsw_core
);
3138 goto err_resources_span_register
;
3140 err
= mlxsw_sp_counter_resources_register(mlxsw_core
);
3142 goto err_resources_counter_register
;
3144 err
= mlxsw_sp_policer_resources_register(mlxsw_core
);
3146 goto err_resources_counter_register
;
3150 err_resources_counter_register
:
3151 err_resources_span_register
:
3152 devlink_resources_unregister(priv_to_devlink(mlxsw_core
), NULL
);
3156 static int mlxsw_sp2_resources_register(struct mlxsw_core
*mlxsw_core
)
3160 err
= mlxsw_sp2_resources_kvd_register(mlxsw_core
);
3164 err
= mlxsw_sp_resources_span_register(mlxsw_core
);
3166 goto err_resources_span_register
;
3168 err
= mlxsw_sp_counter_resources_register(mlxsw_core
);
3170 goto err_resources_counter_register
;
3172 err
= mlxsw_sp_policer_resources_register(mlxsw_core
);
3174 goto err_resources_counter_register
;
3178 err_resources_counter_register
:
3179 err_resources_span_register
:
3180 devlink_resources_unregister(priv_to_devlink(mlxsw_core
), NULL
);
3184 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core
*mlxsw_core
,
3185 const struct mlxsw_config_profile
*profile
,
3186 u64
*p_single_size
, u64
*p_double_size
,
3189 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3193 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
3194 !MLXSW_CORE_RES_VALID(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
))
3197 /* The hash part is what left of the kvd without the
3198 * linear part. It is split to the single size and
3199 * double size by the parts ratio from the profile.
3200 * Both sizes must be a multiplications of the
3201 * granularity from the profile. In case the user
3202 * provided the sizes they are obtained via devlink.
3204 err
= devlink_resource_size_get(devlink
,
3205 MLXSW_SP_RESOURCE_KVD_LINEAR
,
3208 *p_linear_size
= profile
->kvd_linear_size
;
3210 err
= devlink_resource_size_get(devlink
,
3211 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE
,
3214 double_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
3216 double_size
*= profile
->kvd_hash_double_parts
;
3217 double_size
/= profile
->kvd_hash_double_parts
+
3218 profile
->kvd_hash_single_parts
;
3219 *p_double_size
= rounddown(double_size
,
3220 MLXSW_SP_KVD_GRANULARITY
);
3223 err
= devlink_resource_size_get(devlink
,
3224 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE
,
3227 *p_single_size
= MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) -
3228 *p_double_size
- *p_linear_size
;
3230 /* Check results are legal. */
3231 if (*p_single_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SINGLE_MIN_SIZE
) ||
3232 *p_double_size
< MLXSW_CORE_RES_GET(mlxsw_core
, KVD_DOUBLE_MIN_SIZE
) ||
3233 MLXSW_CORE_RES_GET(mlxsw_core
, KVD_SIZE
) < *p_linear_size
)
3240 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink
*devlink
, u32 id
,
3241 struct devlink_param_gset_ctx
*ctx
)
3243 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
3244 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3246 ctx
->val
.vu32
= mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp
);
3251 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink
*devlink
, u32 id
,
3252 struct devlink_param_gset_ctx
*ctx
)
3254 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
3255 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3257 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp
, ctx
->val
.vu32
);
3260 static const struct devlink_param mlxsw_sp2_devlink_params
[] = {
3261 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL
,
3262 "acl_region_rehash_interval",
3263 DEVLINK_PARAM_TYPE_U32
,
3264 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
3265 mlxsw_sp_params_acl_region_rehash_intrvl_get
,
3266 mlxsw_sp_params_acl_region_rehash_intrvl_set
,
3270 static int mlxsw_sp2_params_register(struct mlxsw_core
*mlxsw_core
)
3272 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
3273 union devlink_param_value value
;
3276 err
= devlink_params_register(devlink
, mlxsw_sp2_devlink_params
,
3277 ARRAY_SIZE(mlxsw_sp2_devlink_params
));
3282 devlink_param_driverinit_value_set(devlink
,
3283 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL
,
3288 static void mlxsw_sp2_params_unregister(struct mlxsw_core
*mlxsw_core
)
3290 devlink_params_unregister(priv_to_devlink(mlxsw_core
),
3291 mlxsw_sp2_devlink_params
,
3292 ARRAY_SIZE(mlxsw_sp2_devlink_params
));
3295 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core
*mlxsw_core
,
3296 struct sk_buff
*skb
, u8 local_port
)
3298 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3300 skb_pull(skb
, MLXSW_TXHDR_LEN
);
3301 mlxsw_sp
->ptp_ops
->transmitted(mlxsw_sp
, skb
, local_port
);
3304 static struct mlxsw_driver mlxsw_sp1_driver
= {
3305 .kind
= mlxsw_sp1_driver_name
,
3306 .priv_size
= sizeof(struct mlxsw_sp
),
3307 .fw_req_rev
= &mlxsw_sp1_fw_rev
,
3308 .fw_filename
= MLXSW_SP1_FW_FILENAME
,
3309 .init
= mlxsw_sp1_init
,
3310 .fini
= mlxsw_sp_fini
,
3311 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3312 .port_split
= mlxsw_sp_port_split
,
3313 .port_unsplit
= mlxsw_sp_port_unsplit
,
3314 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3315 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3316 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3317 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3318 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3319 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3320 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3321 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3322 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3323 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3324 .trap_init
= mlxsw_sp_trap_init
,
3325 .trap_fini
= mlxsw_sp_trap_fini
,
3326 .trap_action_set
= mlxsw_sp_trap_action_set
,
3327 .trap_group_init
= mlxsw_sp_trap_group_init
,
3328 .trap_group_set
= mlxsw_sp_trap_group_set
,
3329 .trap_policer_init
= mlxsw_sp_trap_policer_init
,
3330 .trap_policer_fini
= mlxsw_sp_trap_policer_fini
,
3331 .trap_policer_set
= mlxsw_sp_trap_policer_set
,
3332 .trap_policer_counter_get
= mlxsw_sp_trap_policer_counter_get
,
3333 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3334 .resources_register
= mlxsw_sp1_resources_register
,
3335 .kvd_sizes_get
= mlxsw_sp_kvd_sizes_get
,
3336 .ptp_transmitted
= mlxsw_sp_ptp_transmitted
,
3337 .txhdr_len
= MLXSW_TXHDR_LEN
,
3338 .profile
= &mlxsw_sp1_config_profile
,
3339 .res_query_enabled
= true,
3340 .fw_fatal_enabled
= true,
3343 static struct mlxsw_driver mlxsw_sp2_driver
= {
3344 .kind
= mlxsw_sp2_driver_name
,
3345 .priv_size
= sizeof(struct mlxsw_sp
),
3346 .fw_req_rev
= &mlxsw_sp2_fw_rev
,
3347 .fw_filename
= MLXSW_SP2_FW_FILENAME
,
3348 .init
= mlxsw_sp2_init
,
3349 .fini
= mlxsw_sp_fini
,
3350 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3351 .port_split
= mlxsw_sp_port_split
,
3352 .port_unsplit
= mlxsw_sp_port_unsplit
,
3353 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3354 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3355 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3356 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3357 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3358 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3359 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3360 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3361 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3362 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3363 .trap_init
= mlxsw_sp_trap_init
,
3364 .trap_fini
= mlxsw_sp_trap_fini
,
3365 .trap_action_set
= mlxsw_sp_trap_action_set
,
3366 .trap_group_init
= mlxsw_sp_trap_group_init
,
3367 .trap_group_set
= mlxsw_sp_trap_group_set
,
3368 .trap_policer_init
= mlxsw_sp_trap_policer_init
,
3369 .trap_policer_fini
= mlxsw_sp_trap_policer_fini
,
3370 .trap_policer_set
= mlxsw_sp_trap_policer_set
,
3371 .trap_policer_counter_get
= mlxsw_sp_trap_policer_counter_get
,
3372 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3373 .resources_register
= mlxsw_sp2_resources_register
,
3374 .params_register
= mlxsw_sp2_params_register
,
3375 .params_unregister
= mlxsw_sp2_params_unregister
,
3376 .ptp_transmitted
= mlxsw_sp_ptp_transmitted
,
3377 .txhdr_len
= MLXSW_TXHDR_LEN
,
3378 .profile
= &mlxsw_sp2_config_profile
,
3379 .res_query_enabled
= true,
3380 .fw_fatal_enabled
= true,
3383 static struct mlxsw_driver mlxsw_sp3_driver
= {
3384 .kind
= mlxsw_sp3_driver_name
,
3385 .priv_size
= sizeof(struct mlxsw_sp
),
3386 .fw_req_rev
= &mlxsw_sp3_fw_rev
,
3387 .fw_filename
= MLXSW_SP3_FW_FILENAME
,
3388 .init
= mlxsw_sp3_init
,
3389 .fini
= mlxsw_sp_fini
,
3390 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3391 .port_split
= mlxsw_sp_port_split
,
3392 .port_unsplit
= mlxsw_sp_port_unsplit
,
3393 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3394 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3395 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3396 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3397 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3398 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3399 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3400 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3401 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3402 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3403 .trap_init
= mlxsw_sp_trap_init
,
3404 .trap_fini
= mlxsw_sp_trap_fini
,
3405 .trap_action_set
= mlxsw_sp_trap_action_set
,
3406 .trap_group_init
= mlxsw_sp_trap_group_init
,
3407 .trap_group_set
= mlxsw_sp_trap_group_set
,
3408 .trap_policer_init
= mlxsw_sp_trap_policer_init
,
3409 .trap_policer_fini
= mlxsw_sp_trap_policer_fini
,
3410 .trap_policer_set
= mlxsw_sp_trap_policer_set
,
3411 .trap_policer_counter_get
= mlxsw_sp_trap_policer_counter_get
,
3412 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3413 .resources_register
= mlxsw_sp2_resources_register
,
3414 .params_register
= mlxsw_sp2_params_register
,
3415 .params_unregister
= mlxsw_sp2_params_unregister
,
3416 .ptp_transmitted
= mlxsw_sp_ptp_transmitted
,
3417 .txhdr_len
= MLXSW_TXHDR_LEN
,
3418 .profile
= &mlxsw_sp2_config_profile
,
3419 .res_query_enabled
= true,
3420 .fw_fatal_enabled
= true,
3423 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
3425 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
3428 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
3430 struct mlxsw_sp_port
**p_mlxsw_sp_port
= data
;
3433 if (mlxsw_sp_port_dev_check(lower_dev
)) {
3434 *p_mlxsw_sp_port
= netdev_priv(lower_dev
);
3441 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
3443 struct mlxsw_sp_port
*mlxsw_sp_port
;
3445 if (mlxsw_sp_port_dev_check(dev
))
3446 return netdev_priv(dev
);
3448 mlxsw_sp_port
= NULL
;
3449 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &mlxsw_sp_port
);
3451 return mlxsw_sp_port
;
3454 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3456 struct mlxsw_sp_port
*mlxsw_sp_port
;
3458 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3459 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3462 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3464 struct mlxsw_sp_port
*mlxsw_sp_port
;
3466 if (mlxsw_sp_port_dev_check(dev
))
3467 return netdev_priv(dev
);
3469 mlxsw_sp_port
= NULL
;
3470 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
3473 return mlxsw_sp_port
;
3476 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3478 struct mlxsw_sp_port
*mlxsw_sp_port
;
3481 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3483 dev_hold(mlxsw_sp_port
->dev
);
3485 return mlxsw_sp_port
;
3488 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3490 dev_put(mlxsw_sp_port
->dev
);
3494 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port
*mlxsw_sp_port
,
3495 struct net_device
*lag_dev
)
3497 struct net_device
*br_dev
= netdev_master_upper_dev_get(lag_dev
);
3498 struct net_device
*upper_dev
;
3499 struct list_head
*iter
;
3501 if (netif_is_bridge_port(lag_dev
))
3502 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, lag_dev
, br_dev
);
3504 netdev_for_each_upper_dev_rcu(lag_dev
, upper_dev
, iter
) {
3505 if (!netif_is_bridge_port(upper_dev
))
3507 br_dev
= netdev_master_upper_dev_get(upper_dev
);
3508 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, upper_dev
, br_dev
);
3512 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3514 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3516 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3517 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3520 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3522 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3524 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3525 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3528 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3529 u16 lag_id
, u8 port_index
)
3531 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3532 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3534 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3535 lag_id
, port_index
);
3536 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3539 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3542 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3543 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3545 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3547 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3550 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3553 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3554 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3556 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3558 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3561 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3564 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3565 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3567 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3569 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3572 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3573 struct net_device
*lag_dev
,
3576 struct mlxsw_sp_upper
*lag
;
3577 int free_lag_id
= -1;
3581 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
3582 for (i
= 0; i
< max_lag
; i
++) {
3583 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3584 if (lag
->ref_count
) {
3585 if (lag
->dev
== lag_dev
) {
3589 } else if (free_lag_id
< 0) {
3593 if (free_lag_id
< 0)
3595 *p_lag_id
= free_lag_id
;
3600 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3601 struct net_device
*lag_dev
,
3602 struct netdev_lag_upper_info
*lag_upper_info
,
3603 struct netlink_ext_ack
*extack
)
3607 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0) {
3608 NL_SET_ERR_MSG_MOD(extack
, "Exceeded number of supported LAG devices");
3611 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
) {
3612 NL_SET_ERR_MSG_MOD(extack
, "LAG device using unsupported Tx type");
3618 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3619 u16 lag_id
, u8
*p_port_index
)
3621 u64 max_lag_members
;
3624 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3626 for (i
= 0; i
< max_lag_members
; i
++) {
3627 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3635 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3636 struct net_device
*lag_dev
)
3638 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3639 struct mlxsw_sp_upper
*lag
;
3644 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3647 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3648 if (!lag
->ref_count
) {
3649 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3655 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3658 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3660 goto err_col_port_add
;
3662 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3663 mlxsw_sp_port
->local_port
);
3664 mlxsw_sp_port
->lag_id
= lag_id
;
3665 mlxsw_sp_port
->lagged
= 1;
3668 /* Port is no longer usable as a router interface */
3669 if (mlxsw_sp_port
->default_vlan
->fid
)
3670 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port
->default_vlan
);
3675 if (!lag
->ref_count
)
3676 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3680 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3681 struct net_device
*lag_dev
)
3683 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3684 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3685 struct mlxsw_sp_upper
*lag
;
3687 if (!mlxsw_sp_port
->lagged
)
3689 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3690 WARN_ON(lag
->ref_count
== 0);
3692 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3694 /* Any VLANs configured on the port are no longer valid */
3695 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
, false);
3696 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port
->default_vlan
);
3697 /* Make the LAG and its directly linked uppers leave bridges they
3700 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port
, lag_dev
);
3702 if (lag
->ref_count
== 1)
3703 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3705 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3706 mlxsw_sp_port
->local_port
);
3707 mlxsw_sp_port
->lagged
= 0;
3710 /* Make sure untagged frames are allowed to ingress */
3711 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, MLXSW_SP_DEFAULT_VID
);
3714 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3717 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3718 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3720 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
3721 mlxsw_sp_port
->local_port
);
3722 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3725 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3728 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3729 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3731 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
3732 mlxsw_sp_port
->local_port
);
3733 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3737 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port
*mlxsw_sp_port
)
3741 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
,
3742 mlxsw_sp_port
->lag_id
);
3746 err
= mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
3748 goto err_dist_port_add
;
3753 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
3758 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port
*mlxsw_sp_port
)
3762 err
= mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
3763 mlxsw_sp_port
->lag_id
);
3767 err
= mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
,
3768 mlxsw_sp_port
->lag_id
);
3770 goto err_col_port_disable
;
3774 err_col_port_disable
:
3775 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
, mlxsw_sp_port
->lag_id
);
3779 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
3780 struct netdev_lag_lower_state_info
*info
)
3782 if (info
->tx_enabled
)
3783 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port
);
3785 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port
);
3788 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3791 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3792 enum mlxsw_reg_spms_state spms_state
;
3797 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
3798 MLXSW_REG_SPMS_STATE_DISCARDING
;
3800 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
3803 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
3805 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
3806 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
3808 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
3813 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
3818 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
3821 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
3823 goto err_port_stp_set
;
3824 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 1, VLAN_N_VID
- 2,
3827 goto err_port_vlan_set
;
3829 for (; vid
<= VLAN_N_VID
- 1; vid
++) {
3830 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
3833 goto err_vid_learning_set
;
3838 err_vid_learning_set
:
3839 for (vid
--; vid
>= 1; vid
--)
3840 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, true);
3842 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
3844 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
3848 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3852 for (vid
= VLAN_N_VID
- 1; vid
>= 1; vid
--)
3853 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
,
3856 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 1, VLAN_N_VID
- 2,
3858 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
3859 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
3862 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device
*br_dev
)
3864 unsigned int num_vxlans
= 0;
3865 struct net_device
*dev
;
3866 struct list_head
*iter
;
3868 netdev_for_each_lower_dev(br_dev
, dev
, iter
) {
3869 if (netif_is_vxlan(dev
))
3873 return num_vxlans
> 1;
3876 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device
*br_dev
)
3878 DECLARE_BITMAP(vlans
, VLAN_N_VID
) = {0};
3879 struct net_device
*dev
;
3880 struct list_head
*iter
;
3882 netdev_for_each_lower_dev(br_dev
, dev
, iter
) {
3886 if (!netif_is_vxlan(dev
))
3889 err
= mlxsw_sp_vxlan_mapped_vid(dev
, &pvid
);
3893 if (test_and_set_bit(pvid
, vlans
))
3900 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device
*br_dev
,
3901 struct netlink_ext_ack
*extack
)
3903 if (br_multicast_enabled(br_dev
)) {
3904 NL_SET_ERR_MSG_MOD(extack
, "Multicast can not be enabled on a bridge with a VxLAN device");
3908 if (!br_vlan_enabled(br_dev
) &&
3909 mlxsw_sp_bridge_has_multiple_vxlans(br_dev
)) {
3910 NL_SET_ERR_MSG_MOD(extack
, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3914 if (br_vlan_enabled(br_dev
) &&
3915 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev
)) {
3916 NL_SET_ERR_MSG_MOD(extack
, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3923 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*lower_dev
,
3924 struct net_device
*dev
,
3925 unsigned long event
, void *ptr
)
3927 struct netdev_notifier_changeupper_info
*info
;
3928 struct mlxsw_sp_port
*mlxsw_sp_port
;
3929 struct netlink_ext_ack
*extack
;
3930 struct net_device
*upper_dev
;
3931 struct mlxsw_sp
*mlxsw_sp
;
3934 mlxsw_sp_port
= netdev_priv(dev
);
3935 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3937 extack
= netdev_notifier_info_to_extack(&info
->info
);
3940 case NETDEV_PRECHANGEUPPER
:
3941 upper_dev
= info
->upper_dev
;
3942 if (!is_vlan_dev(upper_dev
) &&
3943 !netif_is_lag_master(upper_dev
) &&
3944 !netif_is_bridge_master(upper_dev
) &&
3945 !netif_is_ovs_master(upper_dev
) &&
3946 !netif_is_macvlan(upper_dev
)) {
3947 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
3952 if (netif_is_bridge_master(upper_dev
) &&
3953 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
, upper_dev
) &&
3954 mlxsw_sp_bridge_has_vxlan(upper_dev
) &&
3955 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
3957 if (netdev_has_any_upper_dev(upper_dev
) &&
3958 (!netif_is_bridge_master(upper_dev
) ||
3959 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
3961 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a device that already has an upper device is not supported");
3964 if (netif_is_lag_master(upper_dev
) &&
3965 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
3966 info
->upper_info
, extack
))
3968 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
)) {
3969 NL_SET_ERR_MSG_MOD(extack
, "Master device is a LAG master and this device has a VLAN");
3972 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
3973 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
))) {
3974 NL_SET_ERR_MSG_MOD(extack
, "Can not put a VLAN on a LAG port");
3977 if (netif_is_macvlan(upper_dev
) &&
3978 !mlxsw_sp_rif_exists(mlxsw_sp
, lower_dev
)) {
3979 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
3982 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
)) {
3983 NL_SET_ERR_MSG_MOD(extack
, "Master device is an OVS master and this device has a VLAN");
3986 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
)) {
3987 NL_SET_ERR_MSG_MOD(extack
, "Can not put a VLAN on an OVS port");
3991 case NETDEV_CHANGEUPPER
:
3992 upper_dev
= info
->upper_dev
;
3993 if (netif_is_bridge_master(upper_dev
)) {
3995 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4000 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4003 } else if (netif_is_lag_master(upper_dev
)) {
4004 if (info
->linking
) {
4005 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4008 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port
);
4009 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4012 } else if (netif_is_ovs_master(upper_dev
)) {
4014 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
4016 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
4017 } else if (netif_is_macvlan(upper_dev
)) {
4019 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4020 } else if (is_vlan_dev(upper_dev
)) {
4021 struct net_device
*br_dev
;
4023 if (!netif_is_bridge_port(upper_dev
))
4027 br_dev
= netdev_master_upper_dev_get(upper_dev
);
4028 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, upper_dev
,
4037 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4038 unsigned long event
, void *ptr
)
4040 struct netdev_notifier_changelowerstate_info
*info
;
4041 struct mlxsw_sp_port
*mlxsw_sp_port
;
4044 mlxsw_sp_port
= netdev_priv(dev
);
4048 case NETDEV_CHANGELOWERSTATE
:
4049 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4050 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4051 info
->lower_state_info
);
4053 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4061 static int mlxsw_sp_netdevice_port_event(struct net_device
*lower_dev
,
4062 struct net_device
*port_dev
,
4063 unsigned long event
, void *ptr
)
4066 case NETDEV_PRECHANGEUPPER
:
4067 case NETDEV_CHANGEUPPER
:
4068 return mlxsw_sp_netdevice_port_upper_event(lower_dev
, port_dev
,
4070 case NETDEV_CHANGELOWERSTATE
:
4071 return mlxsw_sp_netdevice_port_lower_event(port_dev
, event
,
4078 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4079 unsigned long event
, void *ptr
)
4081 struct net_device
*dev
;
4082 struct list_head
*iter
;
4085 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4086 if (mlxsw_sp_port_dev_check(dev
)) {
4087 ret
= mlxsw_sp_netdevice_port_event(lag_dev
, dev
, event
,
4097 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device
*vlan_dev
,
4098 struct net_device
*dev
,
4099 unsigned long event
, void *ptr
,
4102 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4103 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4104 struct netdev_notifier_changeupper_info
*info
= ptr
;
4105 struct netlink_ext_ack
*extack
;
4106 struct net_device
*upper_dev
;
4109 extack
= netdev_notifier_info_to_extack(&info
->info
);
4112 case NETDEV_PRECHANGEUPPER
:
4113 upper_dev
= info
->upper_dev
;
4114 if (!netif_is_bridge_master(upper_dev
) &&
4115 !netif_is_macvlan(upper_dev
)) {
4116 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4121 if (netif_is_bridge_master(upper_dev
) &&
4122 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
, upper_dev
) &&
4123 mlxsw_sp_bridge_has_vxlan(upper_dev
) &&
4124 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
4126 if (netdev_has_any_upper_dev(upper_dev
) &&
4127 (!netif_is_bridge_master(upper_dev
) ||
4128 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp
,
4130 NL_SET_ERR_MSG_MOD(extack
, "Enslaving a port to a device that already has an upper device is not supported");
4133 if (netif_is_macvlan(upper_dev
) &&
4134 !mlxsw_sp_rif_exists(mlxsw_sp
, vlan_dev
)) {
4135 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
4139 case NETDEV_CHANGEUPPER
:
4140 upper_dev
= info
->upper_dev
;
4141 if (netif_is_bridge_master(upper_dev
)) {
4143 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4148 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4151 } else if (netif_is_macvlan(upper_dev
)) {
4153 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4164 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device
*vlan_dev
,
4165 struct net_device
*lag_dev
,
4166 unsigned long event
,
4169 struct net_device
*dev
;
4170 struct list_head
*iter
;
4173 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4174 if (mlxsw_sp_port_dev_check(dev
)) {
4175 ret
= mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, dev
,
4186 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device
*vlan_dev
,
4187 struct net_device
*br_dev
,
4188 unsigned long event
, void *ptr
,
4191 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
4192 struct netdev_notifier_changeupper_info
*info
= ptr
;
4193 struct netlink_ext_ack
*extack
;
4194 struct net_device
*upper_dev
;
4199 extack
= netdev_notifier_info_to_extack(&info
->info
);
4202 case NETDEV_PRECHANGEUPPER
:
4203 upper_dev
= info
->upper_dev
;
4204 if (!netif_is_macvlan(upper_dev
)) {
4205 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4210 if (netif_is_macvlan(upper_dev
) &&
4211 !mlxsw_sp_rif_exists(mlxsw_sp
, vlan_dev
)) {
4212 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
4216 case NETDEV_CHANGEUPPER
:
4217 upper_dev
= info
->upper_dev
;
4220 if (netif_is_macvlan(upper_dev
))
4221 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4228 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4229 unsigned long event
, void *ptr
)
4231 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4232 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4234 if (mlxsw_sp_port_dev_check(real_dev
))
4235 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, real_dev
,
4237 else if (netif_is_lag_master(real_dev
))
4238 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev
,
4241 else if (netif_is_bridge_master(real_dev
))
4242 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev
, real_dev
,
4248 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
4249 unsigned long event
, void *ptr
)
4251 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
4252 struct netdev_notifier_changeupper_info
*info
= ptr
;
4253 struct netlink_ext_ack
*extack
;
4254 struct net_device
*upper_dev
;
4259 extack
= netdev_notifier_info_to_extack(&info
->info
);
4262 case NETDEV_PRECHANGEUPPER
:
4263 upper_dev
= info
->upper_dev
;
4264 if (!is_vlan_dev(upper_dev
) && !netif_is_macvlan(upper_dev
)) {
4265 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4270 if (netif_is_macvlan(upper_dev
) &&
4271 !mlxsw_sp_rif_exists(mlxsw_sp
, br_dev
)) {
4272 NL_SET_ERR_MSG_MOD(extack
, "macvlan is only supported on top of router interfaces");
4276 case NETDEV_CHANGEUPPER
:
4277 upper_dev
= info
->upper_dev
;
4280 if (is_vlan_dev(upper_dev
))
4281 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp
, upper_dev
);
4282 if (netif_is_macvlan(upper_dev
))
4283 mlxsw_sp_rif_macvlan_del(mlxsw_sp
, upper_dev
);
4290 static int mlxsw_sp_netdevice_macvlan_event(struct net_device
*macvlan_dev
,
4291 unsigned long event
, void *ptr
)
4293 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(macvlan_dev
);
4294 struct netdev_notifier_changeupper_info
*info
= ptr
;
4295 struct netlink_ext_ack
*extack
;
4297 if (!mlxsw_sp
|| event
!= NETDEV_PRECHANGEUPPER
)
4300 extack
= netdev_notifier_info_to_extack(&info
->info
);
4302 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4303 NL_SET_ERR_MSG_MOD(extack
, "Unknown upper device type");
4308 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
4310 struct netdev_notifier_changeupper_info
*info
= ptr
;
4312 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
4314 return netif_is_l3_master(info
->upper_dev
);
4317 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp
*mlxsw_sp
,
4318 struct net_device
*dev
,
4319 unsigned long event
, void *ptr
)
4321 struct netdev_notifier_changeupper_info
*cu_info
;
4322 struct netdev_notifier_info
*info
= ptr
;
4323 struct netlink_ext_ack
*extack
;
4324 struct net_device
*upper_dev
;
4326 extack
= netdev_notifier_info_to_extack(info
);
4329 case NETDEV_CHANGEUPPER
:
4330 cu_info
= container_of(info
,
4331 struct netdev_notifier_changeupper_info
,
4333 upper_dev
= cu_info
->upper_dev
;
4334 if (!netif_is_bridge_master(upper_dev
))
4336 if (!mlxsw_sp_lower_get(upper_dev
))
4338 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev
, extack
))
4340 if (cu_info
->linking
) {
4341 if (!netif_running(dev
))
4343 /* When the bridge is VLAN-aware, the VNI of the VxLAN
4344 * device needs to be mapped to a VLAN, but at this
4345 * point no VLANs are configured on the VxLAN device
4347 if (br_vlan_enabled(upper_dev
))
4349 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp
, upper_dev
,
4352 /* VLANs were already flushed, which triggered the
4355 if (br_vlan_enabled(upper_dev
))
4357 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp
, dev
);
4361 upper_dev
= netdev_master_upper_dev_get(dev
);
4364 if (!netif_is_bridge_master(upper_dev
))
4366 if (!mlxsw_sp_lower_get(upper_dev
))
4368 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp
, upper_dev
, dev
, 0,
4371 upper_dev
= netdev_master_upper_dev_get(dev
);
4374 if (!netif_is_bridge_master(upper_dev
))
4376 if (!mlxsw_sp_lower_get(upper_dev
))
4378 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp
, dev
);
4385 static int mlxsw_sp_netdevice_event(struct notifier_block
*nb
,
4386 unsigned long event
, void *ptr
)
4388 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4389 struct mlxsw_sp_span_entry
*span_entry
;
4390 struct mlxsw_sp
*mlxsw_sp
;
4393 mlxsw_sp
= container_of(nb
, struct mlxsw_sp
, netdevice_nb
);
4394 if (event
== NETDEV_UNREGISTER
) {
4395 span_entry
= mlxsw_sp_span_entry_find_by_port(mlxsw_sp
, dev
);
4397 mlxsw_sp_span_entry_invalidate(mlxsw_sp
, span_entry
);
4399 mlxsw_sp_span_respin(mlxsw_sp
);
4401 if (netif_is_vxlan(dev
))
4402 err
= mlxsw_sp_netdevice_vxlan_event(mlxsw_sp
, dev
, event
, ptr
);
4403 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp
, dev
))
4404 err
= mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp
, dev
,
4406 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp
, dev
))
4407 err
= mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp
, dev
,
4409 else if (event
== NETDEV_PRE_CHANGEADDR
||
4410 event
== NETDEV_CHANGEADDR
||
4411 event
== NETDEV_CHANGEMTU
)
4412 err
= mlxsw_sp_netdevice_router_port_event(dev
, event
, ptr
);
4413 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
4414 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
4415 else if (mlxsw_sp_port_dev_check(dev
))
4416 err
= mlxsw_sp_netdevice_port_event(dev
, dev
, event
, ptr
);
4417 else if (netif_is_lag_master(dev
))
4418 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4419 else if (is_vlan_dev(dev
))
4420 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4421 else if (netif_is_bridge_master(dev
))
4422 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4423 else if (netif_is_macvlan(dev
))
4424 err
= mlxsw_sp_netdevice_macvlan_event(dev
, event
, ptr
);
4426 return notifier_from_errno(err
);
4429 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly
= {
4430 .notifier_call
= mlxsw_sp_inetaddr_valid_event
,
4433 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly
= {
4434 .notifier_call
= mlxsw_sp_inet6addr_valid_event
,
4437 static const struct pci_device_id mlxsw_sp1_pci_id_table
[] = {
4438 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
4442 static struct pci_driver mlxsw_sp1_pci_driver
= {
4443 .name
= mlxsw_sp1_driver_name
,
4444 .id_table
= mlxsw_sp1_pci_id_table
,
4447 static const struct pci_device_id mlxsw_sp2_pci_id_table
[] = {
4448 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM2
), 0},
4452 static struct pci_driver mlxsw_sp2_pci_driver
= {
4453 .name
= mlxsw_sp2_driver_name
,
4454 .id_table
= mlxsw_sp2_pci_id_table
,
4457 static const struct pci_device_id mlxsw_sp3_pci_id_table
[] = {
4458 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM3
), 0},
4462 static struct pci_driver mlxsw_sp3_pci_driver
= {
4463 .name
= mlxsw_sp3_driver_name
,
4464 .id_table
= mlxsw_sp3_pci_id_table
,
4467 static int __init
mlxsw_sp_module_init(void)
4471 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
4472 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
4474 err
= mlxsw_core_driver_register(&mlxsw_sp1_driver
);
4476 goto err_sp1_core_driver_register
;
4478 err
= mlxsw_core_driver_register(&mlxsw_sp2_driver
);
4480 goto err_sp2_core_driver_register
;
4482 err
= mlxsw_core_driver_register(&mlxsw_sp3_driver
);
4484 goto err_sp3_core_driver_register
;
4486 err
= mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver
);
4488 goto err_sp1_pci_driver_register
;
4490 err
= mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver
);
4492 goto err_sp2_pci_driver_register
;
4494 err
= mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver
);
4496 goto err_sp3_pci_driver_register
;
4500 err_sp3_pci_driver_register
:
4501 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver
);
4502 err_sp2_pci_driver_register
:
4503 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver
);
4504 err_sp1_pci_driver_register
:
4505 mlxsw_core_driver_unregister(&mlxsw_sp3_driver
);
4506 err_sp3_core_driver_register
:
4507 mlxsw_core_driver_unregister(&mlxsw_sp2_driver
);
4508 err_sp2_core_driver_register
:
4509 mlxsw_core_driver_unregister(&mlxsw_sp1_driver
);
4510 err_sp1_core_driver_register
:
4511 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
4512 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
4516 static void __exit
mlxsw_sp_module_exit(void)
4518 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver
);
4519 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver
);
4520 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver
);
4521 mlxsw_core_driver_unregister(&mlxsw_sp3_driver
);
4522 mlxsw_core_driver_unregister(&mlxsw_sp2_driver
);
4523 mlxsw_core_driver_unregister(&mlxsw_sp1_driver
);
4524 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb
);
4525 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb
);
4528 module_init(mlxsw_sp_module_init
);
4529 module_exit(mlxsw_sp_module_exit
);
4531 MODULE_LICENSE("Dual BSD/GPL");
4532 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4533 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4534 MODULE_DEVICE_TABLE(pci
, mlxsw_sp1_pci_id_table
);
4535 MODULE_DEVICE_TABLE(pci
, mlxsw_sp2_pci_id_table
);
4536 MODULE_DEVICE_TABLE(pci
, mlxsw_sp3_pci_id_table
);
4537 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME
);
4538 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME
);
4539 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME
);