]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum.c
48e48c398142bd8384c100c1bb9046771ff0b3f2
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <net/switchdev.h>
27 #include <net/pkt_cls.h>
28 #include <net/netevent.h>
29 #include <net/addrconf.h>
30
31 #include "spectrum.h"
32 #include "pci.h"
33 #include "core.h"
34 #include "core_env.h"
35 #include "reg.h"
36 #include "port.h"
37 #include "trap.h"
38 #include "txheader.h"
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_acl_flex_actions.h"
42 #include "spectrum_span.h"
43 #include "spectrum_ptp.h"
44 #include "spectrum_trap.h"
45
46 #define MLXSW_SP1_FWREV_MAJOR 13
47 #define MLXSW_SP1_FWREV_MINOR 2008
48 #define MLXSW_SP1_FWREV_SUBMINOR 1310
49 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50
51 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
52 .major = MLXSW_SP1_FWREV_MAJOR,
53 .minor = MLXSW_SP1_FWREV_MINOR,
54 .subminor = MLXSW_SP1_FWREV_SUBMINOR,
55 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
56 };
57
58 #define MLXSW_SP1_FW_FILENAME \
59 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
60 "." __stringify(MLXSW_SP1_FWREV_MINOR) \
61 "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
62
63 #define MLXSW_SP2_FWREV_MAJOR 29
64 #define MLXSW_SP2_FWREV_MINOR 2008
65 #define MLXSW_SP2_FWREV_SUBMINOR 1310
66
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68 .major = MLXSW_SP2_FWREV_MAJOR,
69 .minor = MLXSW_SP2_FWREV_MINOR,
70 .subminor = MLXSW_SP2_FWREV_SUBMINOR,
71 };
72
73 #define MLXSW_SP2_FW_FILENAME \
74 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
75 "." __stringify(MLXSW_SP2_FWREV_MINOR) \
76 "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
77
78 #define MLXSW_SP3_FWREV_MAJOR 30
79 #define MLXSW_SP3_FWREV_MINOR 2008
80 #define MLXSW_SP3_FWREV_SUBMINOR 1310
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83 .major = MLXSW_SP3_FWREV_MAJOR,
84 .minor = MLXSW_SP3_FWREV_MINOR,
85 .subminor = MLXSW_SP3_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP3_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
92
93 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
94 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
95 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
96
97 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
98 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
99 };
100 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
101 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
102 };
103
104 /* tx_hdr_version
105 * Tx header version.
106 * Must be set to 1.
107 */
108 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
109
110 /* tx_hdr_ctl
111 * Packet control type.
112 * 0 - Ethernet control (e.g. EMADs, LACP)
113 * 1 - Ethernet data
114 */
115 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
116
117 /* tx_hdr_proto
118 * Packet protocol type. Must be set to 1 (Ethernet).
119 */
120 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
121
122 /* tx_hdr_rx_is_router
123 * Packet is sent from the router. Valid for data packets only.
124 */
125 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
126
127 /* tx_hdr_fid_valid
128 * Indicates if the 'fid' field is valid and should be used for
129 * forwarding lookup. Valid for data packets only.
130 */
131 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
132
133 /* tx_hdr_swid
134 * Switch partition ID. Must be set to 0.
135 */
136 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
137
138 /* tx_hdr_control_tclass
139 * Indicates if the packet should use the control TClass and not one
140 * of the data TClasses.
141 */
142 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
143
144 /* tx_hdr_etclass
145 * Egress TClass to be used on the egress device on the egress port.
146 */
147 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
148
149 /* tx_hdr_port_mid
150 * Destination local port for unicast packets.
151 * Destination multicast ID for multicast packets.
152 *
153 * Control packets are directed to a specific egress port, while data
154 * packets are transmitted through the CPU port (0) into the switch partition,
155 * where forwarding rules are applied.
156 */
157 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
158
159 /* tx_hdr_fid
160 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
161 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
162 * Valid for data packets only.
163 */
164 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
165
166 /* tx_hdr_type
167 * 0 - Data packets
168 * 6 - Control packets
169 */
170 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
171
172 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
173 unsigned int counter_index, u64 *packets,
174 u64 *bytes)
175 {
176 char mgpc_pl[MLXSW_REG_MGPC_LEN];
177 int err;
178
179 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
180 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
182 if (err)
183 return err;
184 if (packets)
185 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
186 if (bytes)
187 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
188 return 0;
189 }
190
191 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
192 unsigned int counter_index)
193 {
194 char mgpc_pl[MLXSW_REG_MGPC_LEN];
195
196 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
197 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
199 }
200
201 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
202 unsigned int *p_counter_index)
203 {
204 int err;
205
206 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
207 p_counter_index);
208 if (err)
209 return err;
210 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
211 if (err)
212 goto err_counter_clear;
213 return 0;
214
215 err_counter_clear:
216 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
217 *p_counter_index);
218 return err;
219 }
220
221 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
222 unsigned int counter_index)
223 {
224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225 counter_index);
226 }
227
228 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
229 const struct mlxsw_tx_info *tx_info)
230 {
231 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
232
233 memset(txhdr, 0, MLXSW_TXHDR_LEN);
234
235 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
236 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
237 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
238 mlxsw_tx_hdr_swid_set(txhdr, 0);
239 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
240 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
241 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
242 }
243
244 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
245 {
246 switch (state) {
247 case BR_STATE_FORWARDING:
248 return MLXSW_REG_SPMS_STATE_FORWARDING;
249 case BR_STATE_LEARNING:
250 return MLXSW_REG_SPMS_STATE_LEARNING;
251 case BR_STATE_LISTENING:
252 case BR_STATE_DISABLED:
253 case BR_STATE_BLOCKING:
254 return MLXSW_REG_SPMS_STATE_DISCARDING;
255 default:
256 BUG();
257 }
258 }
259
260 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
261 u8 state)
262 {
263 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
264 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
265 char *spms_pl;
266 int err;
267
268 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
269 if (!spms_pl)
270 return -ENOMEM;
271 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
272 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
273
274 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
275 kfree(spms_pl);
276 return err;
277 }
278
279 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
280 {
281 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
282 int err;
283
284 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
285 if (err)
286 return err;
287 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
288 return 0;
289 }
290
291 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
292 bool is_up)
293 {
294 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
295 char paos_pl[MLXSW_REG_PAOS_LEN];
296
297 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
298 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
299 MLXSW_PORT_ADMIN_STATUS_DOWN);
300 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
301 }
302
303 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
304 unsigned char *addr)
305 {
306 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
307 char ppad_pl[MLXSW_REG_PPAD_LEN];
308
309 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
310 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
311 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
312 }
313
314 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
315 {
316 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
317 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
318
319 ether_addr_copy(addr, mlxsw_sp->base_mac);
320 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
321 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
322 }
323
324 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
325 {
326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327 char pmtu_pl[MLXSW_REG_PMTU_LEN];
328 int err;
329
330 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
331 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
332 if (err)
333 return err;
334
335 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
336 return 0;
337 }
338
339 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
340 {
341 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
342 char pmtu_pl[MLXSW_REG_PMTU_LEN];
343
344 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
345 if (mtu > mlxsw_sp_port->max_mtu)
346 return -EINVAL;
347
348 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
349 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
350 }
351
352 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
353 {
354 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
355 char pspa_pl[MLXSW_REG_PSPA_LEN];
356
357 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
358 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
359 }
360
361 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
362 {
363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364 char svpe_pl[MLXSW_REG_SVPE_LEN];
365
366 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
367 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
368 }
369
370 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
371 bool learn_enable)
372 {
373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
374 char *spvmlr_pl;
375 int err;
376
377 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
378 if (!spvmlr_pl)
379 return -ENOMEM;
380 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
381 learn_enable);
382 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
383 kfree(spvmlr_pl);
384 return err;
385 }
386
387 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
388 u16 vid)
389 {
390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391 char spvid_pl[MLXSW_REG_SPVID_LEN];
392
393 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
394 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
395 }
396
397 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
398 bool allow)
399 {
400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401 char spaft_pl[MLXSW_REG_SPAFT_LEN];
402
403 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
404 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
405 }
406
407 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
408 {
409 int err;
410
411 if (!vid) {
412 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
413 if (err)
414 return err;
415 } else {
416 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
417 if (err)
418 return err;
419 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
420 if (err)
421 goto err_port_allow_untagged_set;
422 }
423
424 mlxsw_sp_port->pvid = vid;
425 return 0;
426
427 err_port_allow_untagged_set:
428 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
429 return err;
430 }
431
432 static int
433 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
434 {
435 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
436 char sspr_pl[MLXSW_REG_SSPR_LEN];
437
438 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
439 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
440 }
441
442 static int
443 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
444 struct mlxsw_sp_port_mapping *port_mapping)
445 {
446 char pmlp_pl[MLXSW_REG_PMLP_LEN];
447 bool separate_rxtx;
448 u8 module;
449 u8 width;
450 int err;
451 int i;
452
453 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
454 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
455 if (err)
456 return err;
457 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
458 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
459 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
460
461 if (width && !is_power_of_2(width)) {
462 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
463 local_port);
464 return -EINVAL;
465 }
466
467 for (i = 0; i < width; i++) {
468 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
469 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
470 local_port);
471 return -EINVAL;
472 }
473 if (separate_rxtx &&
474 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
475 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
476 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
477 local_port);
478 return -EINVAL;
479 }
480 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i) {
481 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
482 local_port);
483 return -EINVAL;
484 }
485 }
486
487 port_mapping->module = module;
488 port_mapping->width = width;
489 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
490 return 0;
491 }
492
493 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
494 {
495 struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
496 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
497 char pmlp_pl[MLXSW_REG_PMLP_LEN];
498 int i;
499
500 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
501 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
502 for (i = 0; i < port_mapping->width; i++) {
503 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
504 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
505 }
506
507 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
508 }
509
510 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
511 {
512 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
513 char pmlp_pl[MLXSW_REG_PMLP_LEN];
514
515 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
516 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
517 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
518 }
519
520 static int mlxsw_sp_port_open(struct net_device *dev)
521 {
522 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
523 int err;
524
525 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
526 if (err)
527 return err;
528 netif_start_queue(dev);
529 return 0;
530 }
531
532 static int mlxsw_sp_port_stop(struct net_device *dev)
533 {
534 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
535
536 netif_stop_queue(dev);
537 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
538 }
539
540 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
541 struct net_device *dev)
542 {
543 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
544 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
545 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
546 const struct mlxsw_tx_info tx_info = {
547 .local_port = mlxsw_sp_port->local_port,
548 .is_emad = false,
549 };
550 u64 len;
551 int err;
552
553 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
554 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
555 dev_kfree_skb_any(skb);
556 return NETDEV_TX_OK;
557 }
558
559 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
560
561 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
562 return NETDEV_TX_BUSY;
563
564 if (eth_skb_pad(skb)) {
565 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
566 return NETDEV_TX_OK;
567 }
568
569 mlxsw_sp_txhdr_construct(skb, &tx_info);
570 /* TX header is consumed by HW on the way so we shouldn't count its
571 * bytes as being sent.
572 */
573 len = skb->len - MLXSW_TXHDR_LEN;
574
575 /* Due to a race we might fail here because of a full queue. In that
576 * unlikely case we simply drop the packet.
577 */
578 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
579
580 if (!err) {
581 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
582 u64_stats_update_begin(&pcpu_stats->syncp);
583 pcpu_stats->tx_packets++;
584 pcpu_stats->tx_bytes += len;
585 u64_stats_update_end(&pcpu_stats->syncp);
586 } else {
587 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
588 dev_kfree_skb_any(skb);
589 }
590 return NETDEV_TX_OK;
591 }
592
593 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
594 {
595 }
596
597 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
598 {
599 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
600 struct sockaddr *addr = p;
601 int err;
602
603 if (!is_valid_ether_addr(addr->sa_data))
604 return -EADDRNOTAVAIL;
605
606 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
607 if (err)
608 return err;
609 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
610 return 0;
611 }
612
613 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
614 int mtu)
615 {
616 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
617 }
618
619 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
620 bool lossy)
621 {
622 if (lossy)
623 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
624 else
625 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
626 thres);
627 }
628
629 static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
630 const struct mlxsw_sp_hdroom *hdroom)
631 {
632 u16 delay_cells;
633
634 delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
635
636 /* In the worst case scenario the delay will be made up of packets that
637 * are all of size CELL_SIZE + 1, which means each packet will require
638 * almost twice its true size when buffered in the switch. We therefore
639 * multiply this value by the "cell factor", which is close to 2.
640 *
641 * Another MTU is added in case the transmitting host already started
642 * transmitting a maximum length frame when the PFC packet was received.
643 */
644 return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
645 }
646
647 static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
648 {
649 int prio;
650
651 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
652 if (hdroom->prios.prio[prio].buf_idx == buf)
653 return true;
654 }
655 return false;
656 }
657
658 void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
659 struct mlxsw_sp_hdroom *hdroom)
660 {
661 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
662 int i;
663
664 for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
665 struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
666 u16 thres_cells;
667 u16 delay_cells;
668
669 if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
670 thres_cells = 0;
671 delay_cells = 0;
672 } else if (buf->lossy) {
673 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, hdroom->mtu);
674 delay_cells = 0;
675 } else {
676 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, hdroom->mtu);
677 delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
678 }
679
680 thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
681 delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
682
683 buf->thres_cells = thres_cells;
684 buf->size_cells = thres_cells + delay_cells;
685 }
686 }
687
688 static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
689 const struct mlxsw_sp_hdroom *hdroom, bool force)
690 {
691 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
692 char pbmc_pl[MLXSW_REG_PBMC_LEN];
693 bool dirty;
694 int err;
695 int i;
696
697 dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
698 if (!dirty && !force)
699 return 0;
700
701 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
702 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
703 if (err)
704 return err;
705
706 for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
707 const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
708
709 mlxsw_sp_pg_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
710 }
711
712 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
713 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
714 if (err)
715 return err;
716
717 mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
718 return 0;
719 }
720
721 static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
722 const struct mlxsw_sp_hdroom *hdroom)
723 {
724 u32 taken_headroom_cells = 0;
725 u32 max_headroom_cells;
726 int i;
727
728 for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
729 taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
730
731 max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
732 return taken_headroom_cells <= max_headroom_cells;
733 }
734
735 static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
736 const struct mlxsw_sp_hdroom *hdroom, bool force)
737 {
738 int err;
739
740 if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
741 return -ENOBUFS;
742
743 err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
744 if (err)
745 return err;
746
747 *mlxsw_sp_port->hdroom = *hdroom;
748 return 0;
749 }
750
751 int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
752 const struct mlxsw_sp_hdroom *hdroom)
753 {
754 return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
755 }
756
757 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
758 {
759 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
760 struct mlxsw_sp_hdroom orig_hdroom;
761 struct mlxsw_sp_hdroom hdroom;
762 int err;
763
764 orig_hdroom = *mlxsw_sp_port->hdroom;
765
766 hdroom = orig_hdroom;
767 hdroom.mtu = mtu;
768 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
769
770 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
771 if (err) {
772 netdev_err(dev, "Failed to configure port's headroom\n");
773 return err;
774 }
775
776 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
777 if (err)
778 goto err_port_mtu_set;
779 dev->mtu = mtu;
780 return 0;
781
782 err_port_mtu_set:
783 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
784 return err;
785 }
786
787 static int
788 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
789 struct rtnl_link_stats64 *stats)
790 {
791 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
792 struct mlxsw_sp_port_pcpu_stats *p;
793 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
794 u32 tx_dropped = 0;
795 unsigned int start;
796 int i;
797
798 for_each_possible_cpu(i) {
799 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
800 do {
801 start = u64_stats_fetch_begin_irq(&p->syncp);
802 rx_packets = p->rx_packets;
803 rx_bytes = p->rx_bytes;
804 tx_packets = p->tx_packets;
805 tx_bytes = p->tx_bytes;
806 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
807
808 stats->rx_packets += rx_packets;
809 stats->rx_bytes += rx_bytes;
810 stats->tx_packets += tx_packets;
811 stats->tx_bytes += tx_bytes;
812 /* tx_dropped is u32, updated without syncp protection. */
813 tx_dropped += p->tx_dropped;
814 }
815 stats->tx_dropped = tx_dropped;
816 return 0;
817 }
818
819 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
820 {
821 switch (attr_id) {
822 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
823 return true;
824 }
825
826 return false;
827 }
828
829 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
830 void *sp)
831 {
832 switch (attr_id) {
833 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
834 return mlxsw_sp_port_get_sw_stats64(dev, sp);
835 }
836
837 return -EINVAL;
838 }
839
840 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
841 int prio, char *ppcnt_pl)
842 {
843 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
845
846 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
847 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
848 }
849
850 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
851 struct rtnl_link_stats64 *stats)
852 {
853 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
854 int err;
855
856 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
857 0, ppcnt_pl);
858 if (err)
859 goto out;
860
861 stats->tx_packets =
862 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
863 stats->rx_packets =
864 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
865 stats->tx_bytes =
866 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
867 stats->rx_bytes =
868 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
869 stats->multicast =
870 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
871
872 stats->rx_crc_errors =
873 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
874 stats->rx_frame_errors =
875 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
876
877 stats->rx_length_errors = (
878 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
879 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
880 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
881
882 stats->rx_errors = (stats->rx_crc_errors +
883 stats->rx_frame_errors + stats->rx_length_errors);
884
885 out:
886 return err;
887 }
888
889 static void
890 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
891 struct mlxsw_sp_port_xstats *xstats)
892 {
893 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
894 int err, i;
895
896 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
897 ppcnt_pl);
898 if (!err)
899 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
900
901 for (i = 0; i < TC_MAX_QUEUE; i++) {
902 err = mlxsw_sp_port_get_stats_raw(dev,
903 MLXSW_REG_PPCNT_TC_CONG_TC,
904 i, ppcnt_pl);
905 if (!err)
906 xstats->wred_drop[i] =
907 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
908
909 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
910 i, ppcnt_pl);
911 if (err)
912 continue;
913
914 xstats->backlog[i] =
915 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
916 xstats->tail_drop[i] =
917 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
918 }
919
920 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
921 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
922 i, ppcnt_pl);
923 if (err)
924 continue;
925
926 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
927 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
928 }
929 }
930
931 static void update_stats_cache(struct work_struct *work)
932 {
933 struct mlxsw_sp_port *mlxsw_sp_port =
934 container_of(work, struct mlxsw_sp_port,
935 periodic_hw_stats.update_dw.work);
936
937 if (!netif_carrier_ok(mlxsw_sp_port->dev))
938 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
939 * necessary when port goes down.
940 */
941 goto out;
942
943 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
944 &mlxsw_sp_port->periodic_hw_stats.stats);
945 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
946 &mlxsw_sp_port->periodic_hw_stats.xstats);
947
948 out:
949 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
950 MLXSW_HW_STATS_UPDATE_TIME);
951 }
952
953 /* Return the stats from a cache that is updated periodically,
954 * as this function might get called in an atomic context.
955 */
956 static void
957 mlxsw_sp_port_get_stats64(struct net_device *dev,
958 struct rtnl_link_stats64 *stats)
959 {
960 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
961
962 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
963 }
964
965 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
966 u16 vid_begin, u16 vid_end,
967 bool is_member, bool untagged)
968 {
969 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
970 char *spvm_pl;
971 int err;
972
973 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
974 if (!spvm_pl)
975 return -ENOMEM;
976
977 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
978 vid_end, is_member, untagged);
979 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
980 kfree(spvm_pl);
981 return err;
982 }
983
984 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
985 u16 vid_end, bool is_member, bool untagged)
986 {
987 u16 vid, vid_e;
988 int err;
989
990 for (vid = vid_begin; vid <= vid_end;
991 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
992 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
993 vid_end);
994
995 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
996 is_member, untagged);
997 if (err)
998 return err;
999 }
1000
1001 return 0;
1002 }
1003
1004 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1005 bool flush_default)
1006 {
1007 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1008
1009 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1010 &mlxsw_sp_port->vlans_list, list) {
1011 if (!flush_default &&
1012 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1013 continue;
1014 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1015 }
1016 }
1017
1018 static void
1019 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1020 {
1021 if (mlxsw_sp_port_vlan->bridge_port)
1022 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1023 else if (mlxsw_sp_port_vlan->fid)
1024 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1025 }
1026
1027 struct mlxsw_sp_port_vlan *
1028 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1029 {
1030 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1031 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1032 int err;
1033
1034 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1035 if (mlxsw_sp_port_vlan)
1036 return ERR_PTR(-EEXIST);
1037
1038 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1039 if (err)
1040 return ERR_PTR(err);
1041
1042 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1043 if (!mlxsw_sp_port_vlan) {
1044 err = -ENOMEM;
1045 goto err_port_vlan_alloc;
1046 }
1047
1048 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1049 mlxsw_sp_port_vlan->vid = vid;
1050 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1051
1052 return mlxsw_sp_port_vlan;
1053
1054 err_port_vlan_alloc:
1055 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1056 return ERR_PTR(err);
1057 }
1058
1059 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1060 {
1061 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1062 u16 vid = mlxsw_sp_port_vlan->vid;
1063
1064 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1065 list_del(&mlxsw_sp_port_vlan->list);
1066 kfree(mlxsw_sp_port_vlan);
1067 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1068 }
1069
1070 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1071 __be16 __always_unused proto, u16 vid)
1072 {
1073 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1074
1075 /* VLAN 0 is added to HW filter when device goes up, but it is
1076 * reserved in our case, so simply return.
1077 */
1078 if (!vid)
1079 return 0;
1080
1081 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1082 }
1083
1084 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1085 __be16 __always_unused proto, u16 vid)
1086 {
1087 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1088 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1089
1090 /* VLAN 0 is removed from HW filter when device goes down, but
1091 * it is reserved in our case, so simply return.
1092 */
1093 if (!vid)
1094 return 0;
1095
1096 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1097 if (!mlxsw_sp_port_vlan)
1098 return 0;
1099 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1100
1101 return 0;
1102 }
1103
1104 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1105 struct flow_block_offload *f)
1106 {
1107 switch (f->binder_type) {
1108 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1109 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1110 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1111 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1112 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1113 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1114 default:
1115 return -EOPNOTSUPP;
1116 }
1117 }
1118
1119 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1120 void *type_data)
1121 {
1122 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1123
1124 switch (type) {
1125 case TC_SETUP_BLOCK:
1126 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1127 case TC_SETUP_QDISC_RED:
1128 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1129 case TC_SETUP_QDISC_PRIO:
1130 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1131 case TC_SETUP_QDISC_ETS:
1132 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1133 case TC_SETUP_QDISC_TBF:
1134 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1135 case TC_SETUP_QDISC_FIFO:
1136 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1137 default:
1138 return -EOPNOTSUPP;
1139 }
1140 }
1141
1142 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1143 {
1144 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1145
1146 if (!enable) {
1147 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1148 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1149 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1150 return -EINVAL;
1151 }
1152 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1153 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1154 } else {
1155 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1156 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1157 }
1158 return 0;
1159 }
1160
1161 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1162 {
1163 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1164 char pplr_pl[MLXSW_REG_PPLR_LEN];
1165 int err;
1166
1167 if (netif_running(dev))
1168 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1169
1170 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1171 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1172 pplr_pl);
1173
1174 if (netif_running(dev))
1175 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1176
1177 return err;
1178 }
1179
1180 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1181
1182 static int mlxsw_sp_handle_feature(struct net_device *dev,
1183 netdev_features_t wanted_features,
1184 netdev_features_t feature,
1185 mlxsw_sp_feature_handler feature_handler)
1186 {
1187 netdev_features_t changes = wanted_features ^ dev->features;
1188 bool enable = !!(wanted_features & feature);
1189 int err;
1190
1191 if (!(changes & feature))
1192 return 0;
1193
1194 err = feature_handler(dev, enable);
1195 if (err) {
1196 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1197 enable ? "Enable" : "Disable", &feature, err);
1198 return err;
1199 }
1200
1201 if (enable)
1202 dev->features |= feature;
1203 else
1204 dev->features &= ~feature;
1205
1206 return 0;
1207 }
1208 static int mlxsw_sp_set_features(struct net_device *dev,
1209 netdev_features_t features)
1210 {
1211 netdev_features_t oper_features = dev->features;
1212 int err = 0;
1213
1214 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1215 mlxsw_sp_feature_hw_tc);
1216 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1217 mlxsw_sp_feature_loopback);
1218
1219 if (err) {
1220 dev->features = oper_features;
1221 return -EINVAL;
1222 }
1223
1224 return 0;
1225 }
1226
1227 static struct devlink_port *
1228 mlxsw_sp_port_get_devlink_port(struct net_device *dev)
1229 {
1230 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1232
1233 return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
1234 mlxsw_sp_port->local_port);
1235 }
1236
1237 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1238 struct ifreq *ifr)
1239 {
1240 struct hwtstamp_config config;
1241 int err;
1242
1243 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1244 return -EFAULT;
1245
1246 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1247 &config);
1248 if (err)
1249 return err;
1250
1251 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1252 return -EFAULT;
1253
1254 return 0;
1255 }
1256
1257 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1258 struct ifreq *ifr)
1259 {
1260 struct hwtstamp_config config;
1261 int err;
1262
1263 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1264 &config);
1265 if (err)
1266 return err;
1267
1268 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1269 return -EFAULT;
1270
1271 return 0;
1272 }
1273
1274 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1275 {
1276 struct hwtstamp_config config = {0};
1277
1278 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1279 }
1280
1281 static int
1282 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1283 {
1284 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1285
1286 switch (cmd) {
1287 case SIOCSHWTSTAMP:
1288 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1289 case SIOCGHWTSTAMP:
1290 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1291 default:
1292 return -EOPNOTSUPP;
1293 }
1294 }
1295
1296 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1297 .ndo_open = mlxsw_sp_port_open,
1298 .ndo_stop = mlxsw_sp_port_stop,
1299 .ndo_start_xmit = mlxsw_sp_port_xmit,
1300 .ndo_setup_tc = mlxsw_sp_setup_tc,
1301 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1302 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1303 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1304 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1305 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1306 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1307 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1308 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1309 .ndo_set_features = mlxsw_sp_set_features,
1310 .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
1311 .ndo_do_ioctl = mlxsw_sp_port_ioctl,
1312 };
1313
1314 static int
1315 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1316 {
1317 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1318 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1319 const struct mlxsw_sp_port_type_speed_ops *ops;
1320 char ptys_pl[MLXSW_REG_PTYS_LEN];
1321 int err;
1322
1323 ops = mlxsw_sp->port_type_speed_ops;
1324
1325 /* Set advertised speeds to supported speeds. */
1326 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1327 0, false);
1328 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1329 if (err)
1330 return err;
1331
1332 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1333 &eth_proto_admin, &eth_proto_oper);
1334 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1335 eth_proto_cap, mlxsw_sp_port->link.autoneg);
1336 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1337 }
1338
1339 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1340 {
1341 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1342 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1343 char ptys_pl[MLXSW_REG_PTYS_LEN];
1344 u32 eth_proto_oper;
1345 int err;
1346
1347 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1348 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1349 mlxsw_sp_port->local_port, 0,
1350 false);
1351 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1352 if (err)
1353 return err;
1354 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1355 &eth_proto_oper);
1356 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1357 return 0;
1358 }
1359
1360 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1361 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1362 bool dwrr, u8 dwrr_weight)
1363 {
1364 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1365 char qeec_pl[MLXSW_REG_QEEC_LEN];
1366
1367 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1368 next_index);
1369 mlxsw_reg_qeec_de_set(qeec_pl, true);
1370 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1371 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1372 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1373 }
1374
1375 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1376 enum mlxsw_reg_qeec_hr hr, u8 index,
1377 u8 next_index, u32 maxrate, u8 burst_size)
1378 {
1379 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1380 char qeec_pl[MLXSW_REG_QEEC_LEN];
1381
1382 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1383 next_index);
1384 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1385 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1386 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1387 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1388 }
1389
1390 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1391 enum mlxsw_reg_qeec_hr hr, u8 index,
1392 u8 next_index, u32 minrate)
1393 {
1394 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1395 char qeec_pl[MLXSW_REG_QEEC_LEN];
1396
1397 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1398 next_index);
1399 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1400 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1401
1402 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1403 }
1404
1405 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1406 u8 switch_prio, u8 tclass)
1407 {
1408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1409 char qtct_pl[MLXSW_REG_QTCT_LEN];
1410
1411 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1412 tclass);
1413 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1414 }
1415
1416 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1417 {
1418 int err, i;
1419
1420 /* Setup the elements hierarcy, so that each TC is linked to
1421 * one subgroup, which are all member in the same group.
1422 */
1423 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1424 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1425 if (err)
1426 return err;
1427 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1428 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1429 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1430 0, false, 0);
1431 if (err)
1432 return err;
1433 }
1434 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1435 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1436 MLXSW_REG_QEEC_HR_TC, i, i,
1437 false, 0);
1438 if (err)
1439 return err;
1440
1441 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1442 MLXSW_REG_QEEC_HR_TC,
1443 i + 8, i,
1444 true, 100);
1445 if (err)
1446 return err;
1447 }
1448
1449 /* Make sure the max shaper is disabled in all hierarchies that support
1450 * it. Note that this disables ptps (PTP shaper), but that is intended
1451 * for the initial configuration.
1452 */
1453 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1454 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1455 MLXSW_REG_QEEC_MAS_DIS, 0);
1456 if (err)
1457 return err;
1458 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1459 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1460 MLXSW_REG_QEEC_HR_SUBGROUP,
1461 i, 0,
1462 MLXSW_REG_QEEC_MAS_DIS, 0);
1463 if (err)
1464 return err;
1465 }
1466 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1467 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1468 MLXSW_REG_QEEC_HR_TC,
1469 i, i,
1470 MLXSW_REG_QEEC_MAS_DIS, 0);
1471 if (err)
1472 return err;
1473
1474 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1475 MLXSW_REG_QEEC_HR_TC,
1476 i + 8, i,
1477 MLXSW_REG_QEEC_MAS_DIS, 0);
1478 if (err)
1479 return err;
1480 }
1481
1482 /* Configure the min shaper for multicast TCs. */
1483 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1484 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1485 MLXSW_REG_QEEC_HR_TC,
1486 i + 8, i,
1487 MLXSW_REG_QEEC_MIS_MIN);
1488 if (err)
1489 return err;
1490 }
1491
1492 /* Map all priorities to traffic class 0. */
1493 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1494 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1495 if (err)
1496 return err;
1497 }
1498
1499 return 0;
1500 }
1501
1502 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1503 bool enable)
1504 {
1505 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1506 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1507
1508 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1509 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1510 }
1511
1512 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1513 u8 split_base_local_port,
1514 struct mlxsw_sp_port_mapping *port_mapping)
1515 {
1516 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1517 bool split = !!split_base_local_port;
1518 struct mlxsw_sp_port *mlxsw_sp_port;
1519 u32 lanes = port_mapping->width;
1520 struct net_device *dev;
1521 bool splittable;
1522 int err;
1523
1524 splittable = lanes > 1 && !split;
1525 err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
1526 port_mapping->module + 1, split,
1527 port_mapping->lane / lanes,
1528 splittable, lanes,
1529 mlxsw_sp->base_mac,
1530 sizeof(mlxsw_sp->base_mac));
1531 if (err) {
1532 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1533 local_port);
1534 return err;
1535 }
1536
1537 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1538 if (!dev) {
1539 err = -ENOMEM;
1540 goto err_alloc_etherdev;
1541 }
1542 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1543 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1544 mlxsw_sp_port = netdev_priv(dev);
1545 mlxsw_sp_port->dev = dev;
1546 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1547 mlxsw_sp_port->local_port = local_port;
1548 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1549 mlxsw_sp_port->split = split;
1550 mlxsw_sp_port->split_base_local_port = split_base_local_port;
1551 mlxsw_sp_port->mapping = *port_mapping;
1552 mlxsw_sp_port->link.autoneg = 1;
1553 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1554
1555 mlxsw_sp_port->pcpu_stats =
1556 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1557 if (!mlxsw_sp_port->pcpu_stats) {
1558 err = -ENOMEM;
1559 goto err_alloc_stats;
1560 }
1561
1562 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1563 &update_stats_cache);
1564
1565 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1566 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1567
1568 err = mlxsw_sp_port_module_map(mlxsw_sp_port);
1569 if (err) {
1570 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1571 mlxsw_sp_port->local_port);
1572 goto err_port_module_map;
1573 }
1574
1575 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1576 if (err) {
1577 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1578 mlxsw_sp_port->local_port);
1579 goto err_port_swid_set;
1580 }
1581
1582 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1583 if (err) {
1584 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1585 mlxsw_sp_port->local_port);
1586 goto err_dev_addr_init;
1587 }
1588
1589 netif_carrier_off(dev);
1590
1591 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1592 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1593 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1594
1595 dev->min_mtu = 0;
1596 dev->max_mtu = ETH_MAX_MTU;
1597
1598 /* Each packet needs to have a Tx header (metadata) on top all other
1599 * headers.
1600 */
1601 dev->needed_headroom = MLXSW_TXHDR_LEN;
1602
1603 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1604 if (err) {
1605 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1606 mlxsw_sp_port->local_port);
1607 goto err_port_system_port_mapping_set;
1608 }
1609
1610 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1611 if (err) {
1612 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1613 mlxsw_sp_port->local_port);
1614 goto err_port_speed_by_width_set;
1615 }
1616
1617 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1618 &mlxsw_sp_port->max_speed);
1619 if (err) {
1620 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1621 mlxsw_sp_port->local_port);
1622 goto err_max_speed_get;
1623 }
1624
1625 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1626 if (err) {
1627 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1628 mlxsw_sp_port->local_port);
1629 goto err_port_max_mtu_get;
1630 }
1631
1632 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1633 if (err) {
1634 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1635 mlxsw_sp_port->local_port);
1636 goto err_port_mtu_set;
1637 }
1638
1639 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1640 if (err)
1641 goto err_port_admin_status_set;
1642
1643 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1644 if (err) {
1645 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1646 mlxsw_sp_port->local_port);
1647 goto err_port_buffers_init;
1648 }
1649
1650 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1651 if (err) {
1652 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1653 mlxsw_sp_port->local_port);
1654 goto err_port_ets_init;
1655 }
1656
1657 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1658 if (err) {
1659 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1660 mlxsw_sp_port->local_port);
1661 goto err_port_tc_mc_mode;
1662 }
1663
1664 /* ETS and buffers must be initialized before DCB. */
1665 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1666 if (err) {
1667 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1668 mlxsw_sp_port->local_port);
1669 goto err_port_dcb_init;
1670 }
1671
1672 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1673 if (err) {
1674 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1675 mlxsw_sp_port->local_port);
1676 goto err_port_fids_init;
1677 }
1678
1679 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1680 if (err) {
1681 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1682 mlxsw_sp_port->local_port);
1683 goto err_port_qdiscs_init;
1684 }
1685
1686 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1687 false);
1688 if (err) {
1689 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1690 mlxsw_sp_port->local_port);
1691 goto err_port_vlan_clear;
1692 }
1693
1694 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1695 if (err) {
1696 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1697 mlxsw_sp_port->local_port);
1698 goto err_port_nve_init;
1699 }
1700
1701 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1702 if (err) {
1703 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1704 mlxsw_sp_port->local_port);
1705 goto err_port_pvid_set;
1706 }
1707
1708 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1709 MLXSW_SP_DEFAULT_VID);
1710 if (IS_ERR(mlxsw_sp_port_vlan)) {
1711 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1712 mlxsw_sp_port->local_port);
1713 err = PTR_ERR(mlxsw_sp_port_vlan);
1714 goto err_port_vlan_create;
1715 }
1716 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1717
1718 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1719 mlxsw_sp->ptp_ops->shaper_work);
1720
1721 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1722 err = register_netdev(dev);
1723 if (err) {
1724 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1725 mlxsw_sp_port->local_port);
1726 goto err_register_netdev;
1727 }
1728
1729 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
1730 mlxsw_sp_port, dev);
1731 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1732 return 0;
1733
1734 err_register_netdev:
1735 mlxsw_sp->ports[local_port] = NULL;
1736 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1737 err_port_vlan_create:
1738 err_port_pvid_set:
1739 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1740 err_port_nve_init:
1741 err_port_vlan_clear:
1742 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1743 err_port_qdiscs_init:
1744 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1745 err_port_fids_init:
1746 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1747 err_port_dcb_init:
1748 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1749 err_port_tc_mc_mode:
1750 err_port_ets_init:
1751 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1752 err_port_buffers_init:
1753 err_port_admin_status_set:
1754 err_port_mtu_set:
1755 err_port_max_mtu_get:
1756 err_max_speed_get:
1757 err_port_speed_by_width_set:
1758 err_port_system_port_mapping_set:
1759 err_dev_addr_init:
1760 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1761 err_port_swid_set:
1762 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1763 err_port_module_map:
1764 free_percpu(mlxsw_sp_port->pcpu_stats);
1765 err_alloc_stats:
1766 free_netdev(dev);
1767 err_alloc_etherdev:
1768 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1769 return err;
1770 }
1771
1772 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1773 {
1774 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1775
1776 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1777 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1778 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1779 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
1780 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1781 mlxsw_sp->ports[local_port] = NULL;
1782 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1783 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1784 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1785 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1786 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1787 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1788 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1789 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1790 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
1791 free_percpu(mlxsw_sp_port->pcpu_stats);
1792 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1793 free_netdev(mlxsw_sp_port->dev);
1794 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1795 }
1796
1797 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1798 {
1799 struct mlxsw_sp_port *mlxsw_sp_port;
1800 int err;
1801
1802 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1803 if (!mlxsw_sp_port)
1804 return -ENOMEM;
1805
1806 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1807 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1808
1809 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1810 mlxsw_sp_port,
1811 mlxsw_sp->base_mac,
1812 sizeof(mlxsw_sp->base_mac));
1813 if (err) {
1814 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1815 goto err_core_cpu_port_init;
1816 }
1817
1818 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1819 return 0;
1820
1821 err_core_cpu_port_init:
1822 kfree(mlxsw_sp_port);
1823 return err;
1824 }
1825
1826 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1827 {
1828 struct mlxsw_sp_port *mlxsw_sp_port =
1829 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1830
1831 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1832 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1833 kfree(mlxsw_sp_port);
1834 }
1835
1836 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1837 {
1838 return mlxsw_sp->ports[local_port] != NULL;
1839 }
1840
1841 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1842 {
1843 int i;
1844
1845 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1846 if (mlxsw_sp_port_created(mlxsw_sp, i))
1847 mlxsw_sp_port_remove(mlxsw_sp, i);
1848 mlxsw_sp_cpu_port_remove(mlxsw_sp);
1849 kfree(mlxsw_sp->ports);
1850 mlxsw_sp->ports = NULL;
1851 }
1852
1853 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1854 {
1855 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1856 struct mlxsw_sp_port_mapping *port_mapping;
1857 size_t alloc_size;
1858 int i;
1859 int err;
1860
1861 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
1862 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1863 if (!mlxsw_sp->ports)
1864 return -ENOMEM;
1865
1866 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
1867 if (err)
1868 goto err_cpu_port_create;
1869
1870 for (i = 1; i < max_ports; i++) {
1871 port_mapping = mlxsw_sp->port_mapping[i];
1872 if (!port_mapping)
1873 continue;
1874 err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
1875 if (err)
1876 goto err_port_create;
1877 }
1878 return 0;
1879
1880 err_port_create:
1881 for (i--; i >= 1; i--)
1882 if (mlxsw_sp_port_created(mlxsw_sp, i))
1883 mlxsw_sp_port_remove(mlxsw_sp, i);
1884 mlxsw_sp_cpu_port_remove(mlxsw_sp);
1885 err_cpu_port_create:
1886 kfree(mlxsw_sp->ports);
1887 mlxsw_sp->ports = NULL;
1888 return err;
1889 }
1890
1891 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
1892 {
1893 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1894 struct mlxsw_sp_port_mapping port_mapping;
1895 int i;
1896 int err;
1897
1898 mlxsw_sp->port_mapping = kcalloc(max_ports,
1899 sizeof(struct mlxsw_sp_port_mapping *),
1900 GFP_KERNEL);
1901 if (!mlxsw_sp->port_mapping)
1902 return -ENOMEM;
1903
1904 for (i = 1; i < max_ports; i++) {
1905 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping);
1906 if (err)
1907 goto err_port_module_info_get;
1908 if (!port_mapping.width)
1909 continue;
1910
1911 mlxsw_sp->port_mapping[i] = kmemdup(&port_mapping,
1912 sizeof(port_mapping),
1913 GFP_KERNEL);
1914 if (!mlxsw_sp->port_mapping[i]) {
1915 err = -ENOMEM;
1916 goto err_port_module_info_dup;
1917 }
1918 }
1919 return 0;
1920
1921 err_port_module_info_get:
1922 err_port_module_info_dup:
1923 for (i--; i >= 1; i--)
1924 kfree(mlxsw_sp->port_mapping[i]);
1925 kfree(mlxsw_sp->port_mapping);
1926 return err;
1927 }
1928
1929 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
1930 {
1931 int i;
1932
1933 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1934 kfree(mlxsw_sp->port_mapping[i]);
1935 kfree(mlxsw_sp->port_mapping);
1936 }
1937
1938 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
1939 {
1940 u8 offset = (local_port - 1) % max_width;
1941
1942 return local_port - offset;
1943 }
1944
1945 static int
1946 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1947 struct mlxsw_sp_port_mapping *port_mapping,
1948 unsigned int count, u8 offset)
1949 {
1950 struct mlxsw_sp_port_mapping split_port_mapping;
1951 int err, i;
1952
1953 split_port_mapping = *port_mapping;
1954 split_port_mapping.width /= count;
1955 for (i = 0; i < count; i++) {
1956 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
1957 base_port, &split_port_mapping);
1958 if (err)
1959 goto err_port_create;
1960 split_port_mapping.lane += split_port_mapping.width;
1961 }
1962
1963 return 0;
1964
1965 err_port_create:
1966 for (i--; i >= 0; i--)
1967 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
1968 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
1969 return err;
1970 }
1971
1972 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1973 u8 base_port,
1974 unsigned int count, u8 offset)
1975 {
1976 struct mlxsw_sp_port_mapping *port_mapping;
1977 int i;
1978
1979 /* Go over original unsplit ports in the gap and recreate them. */
1980 for (i = 0; i < count * offset; i++) {
1981 port_mapping = mlxsw_sp->port_mapping[base_port + i];
1982 if (!port_mapping)
1983 continue;
1984 mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
1985 }
1986 }
1987
1988 static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
1989 unsigned int count,
1990 unsigned int max_width)
1991 {
1992 enum mlxsw_res_id local_ports_in_x_res_id;
1993 int split_width = max_width / count;
1994
1995 if (split_width == 1)
1996 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
1997 else if (split_width == 2)
1998 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
1999 else if (split_width == 4)
2000 local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
2001 else
2002 return -EINVAL;
2003
2004 if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
2005 return -EINVAL;
2006 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
2007 }
2008
2009 static struct mlxsw_sp_port *
2010 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2011 {
2012 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2013 return mlxsw_sp->ports[local_port];
2014 return NULL;
2015 }
2016
2017 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2018 unsigned int count,
2019 struct netlink_ext_ack *extack)
2020 {
2021 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2022 struct mlxsw_sp_port_mapping port_mapping;
2023 struct mlxsw_sp_port *mlxsw_sp_port;
2024 int max_width;
2025 u8 base_port;
2026 int offset;
2027 int i;
2028 int err;
2029
2030 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2031 if (!mlxsw_sp_port) {
2032 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2033 local_port);
2034 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2035 return -EINVAL;
2036 }
2037
2038 max_width = mlxsw_core_module_max_width(mlxsw_core,
2039 mlxsw_sp_port->mapping.module);
2040 if (max_width < 0) {
2041 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2042 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2043 return max_width;
2044 }
2045
2046 /* Split port with non-max cannot be split. */
2047 if (mlxsw_sp_port->mapping.width != max_width) {
2048 netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
2049 NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
2050 return -EINVAL;
2051 }
2052
2053 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2054 if (offset < 0) {
2055 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2056 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2057 return -EINVAL;
2058 }
2059
2060 /* Only in case max split is being done, the local port and
2061 * base port may differ.
2062 */
2063 base_port = count == max_width ?
2064 mlxsw_sp_cluster_base_port_get(local_port, max_width) :
2065 local_port;
2066
2067 for (i = 0; i < count * offset; i++) {
2068 /* Expect base port to exist and also the one in the middle in
2069 * case of maximal split count.
2070 */
2071 if (i == 0 || (count == max_width && i == count / 2))
2072 continue;
2073
2074 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
2075 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2076 NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
2077 return -EINVAL;
2078 }
2079 }
2080
2081 port_mapping = mlxsw_sp_port->mapping;
2082
2083 for (i = 0; i < count; i++)
2084 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2085 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2086
2087 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
2088 count, offset);
2089 if (err) {
2090 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2091 goto err_port_split_create;
2092 }
2093
2094 return 0;
2095
2096 err_port_split_create:
2097 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2098 return err;
2099 }
2100
2101 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
2102 struct netlink_ext_ack *extack)
2103 {
2104 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2105 struct mlxsw_sp_port *mlxsw_sp_port;
2106 unsigned int count;
2107 int max_width;
2108 u8 base_port;
2109 int offset;
2110 int i;
2111
2112 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2113 if (!mlxsw_sp_port) {
2114 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2115 local_port);
2116 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2117 return -EINVAL;
2118 }
2119
2120 if (!mlxsw_sp_port->split) {
2121 netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
2122 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2123 return -EINVAL;
2124 }
2125
2126 max_width = mlxsw_core_module_max_width(mlxsw_core,
2127 mlxsw_sp_port->mapping.module);
2128 if (max_width < 0) {
2129 netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
2130 NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
2131 return max_width;
2132 }
2133
2134 count = max_width / mlxsw_sp_port->mapping.width;
2135
2136 offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
2137 if (WARN_ON(offset < 0)) {
2138 netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
2139 NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
2140 return -EINVAL;
2141 }
2142
2143 base_port = mlxsw_sp_port->split_base_local_port;
2144
2145 for (i = 0; i < count; i++)
2146 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
2147 mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
2148
2149 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
2150
2151 return 0;
2152 }
2153
2154 static void
2155 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2156 {
2157 int i;
2158
2159 for (i = 0; i < TC_MAX_QUEUE; i++)
2160 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2161 }
2162
2163 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2164 char *pude_pl, void *priv)
2165 {
2166 struct mlxsw_sp *mlxsw_sp = priv;
2167 struct mlxsw_sp_port *mlxsw_sp_port;
2168 enum mlxsw_reg_pude_oper_status status;
2169 u8 local_port;
2170
2171 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2172 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2173 if (!mlxsw_sp_port)
2174 return;
2175
2176 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2177 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2178 netdev_info(mlxsw_sp_port->dev, "link up\n");
2179 netif_carrier_on(mlxsw_sp_port->dev);
2180 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2181 } else {
2182 netdev_info(mlxsw_sp_port->dev, "link down\n");
2183 netif_carrier_off(mlxsw_sp_port->dev);
2184 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2185 }
2186 }
2187
2188 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2189 char *mtpptr_pl, bool ingress)
2190 {
2191 u8 local_port;
2192 u8 num_rec;
2193 int i;
2194
2195 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2196 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2197 for (i = 0; i < num_rec; i++) {
2198 u8 domain_number;
2199 u8 message_type;
2200 u16 sequence_id;
2201 u64 timestamp;
2202
2203 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2204 &domain_number, &sequence_id,
2205 &timestamp);
2206 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2207 message_type, domain_number,
2208 sequence_id, timestamp);
2209 }
2210 }
2211
2212 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2213 char *mtpptr_pl, void *priv)
2214 {
2215 struct mlxsw_sp *mlxsw_sp = priv;
2216
2217 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2218 }
2219
2220 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2221 char *mtpptr_pl, void *priv)
2222 {
2223 struct mlxsw_sp *mlxsw_sp = priv;
2224
2225 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2226 }
2227
2228 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2229 u8 local_port, void *priv)
2230 {
2231 struct mlxsw_sp *mlxsw_sp = priv;
2232 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2233 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2234
2235 if (unlikely(!mlxsw_sp_port)) {
2236 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2237 local_port);
2238 return;
2239 }
2240
2241 skb->dev = mlxsw_sp_port->dev;
2242
2243 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2244 u64_stats_update_begin(&pcpu_stats->syncp);
2245 pcpu_stats->rx_packets++;
2246 pcpu_stats->rx_bytes += skb->len;
2247 u64_stats_update_end(&pcpu_stats->syncp);
2248
2249 skb->protocol = eth_type_trans(skb, skb->dev);
2250 netif_receive_skb(skb);
2251 }
2252
2253 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2254 void *priv)
2255 {
2256 skb->offload_fwd_mark = 1;
2257 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2258 }
2259
2260 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2261 u8 local_port, void *priv)
2262 {
2263 skb->offload_l3_fwd_mark = 1;
2264 skb->offload_fwd_mark = 1;
2265 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2266 }
2267
2268 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2269 u8 local_port)
2270 {
2271 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2272 }
2273
2274 void mlxsw_sp_sample_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2275 u8 local_port)
2276 {
2277 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2278 struct mlxsw_sp_port_sample *sample;
2279 u32 size;
2280
2281 if (unlikely(!mlxsw_sp_port)) {
2282 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2283 local_port);
2284 goto out;
2285 }
2286
2287 rcu_read_lock();
2288 sample = rcu_dereference(mlxsw_sp_port->sample);
2289 if (!sample)
2290 goto out_unlock;
2291 size = sample->truncate ? sample->trunc_size : skb->len;
2292 psample_sample_packet(sample->psample_group, skb, size,
2293 mlxsw_sp_port->dev->ifindex, 0, sample->rate);
2294 out_unlock:
2295 rcu_read_unlock();
2296 out:
2297 consume_skb(skb);
2298 }
2299
2300 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2301 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2302 _is_ctrl, SP_##_trap_group, DISCARD)
2303
2304 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2305 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2306 _is_ctrl, SP_##_trap_group, DISCARD)
2307
2308 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2309 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2310 _is_ctrl, SP_##_trap_group, DISCARD)
2311
2312 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2313 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2314
2315 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2316 /* Events */
2317 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2318 /* L2 traps */
2319 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2320 /* L3 traps */
2321 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2322 false),
2323 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2324 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2325 false),
2326 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2327 ROUTER_EXP, false),
2328 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2329 ROUTER_EXP, false),
2330 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2331 ROUTER_EXP, false),
2332 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2333 ROUTER_EXP, false),
2334 /* Multicast Router Traps */
2335 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2336 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2337 /* NVE traps */
2338 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2339 };
2340
2341 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2342 /* Events */
2343 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2344 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2345 };
2346
2347 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2348 {
2349 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2350 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2351 enum mlxsw_reg_qpcr_ir_units ir_units;
2352 int max_cpu_policers;
2353 bool is_bytes;
2354 u8 burst_size;
2355 u32 rate;
2356 int i, err;
2357
2358 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2359 return -EIO;
2360
2361 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2362
2363 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2364 for (i = 0; i < max_cpu_policers; i++) {
2365 is_bytes = false;
2366 switch (i) {
2367 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2368 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2369 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2370 rate = 1024;
2371 burst_size = 7;
2372 break;
2373 default:
2374 continue;
2375 }
2376
2377 __set_bit(i, mlxsw_sp->trap->policers_usage);
2378 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2379 burst_size);
2380 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2381 if (err)
2382 return err;
2383 }
2384
2385 return 0;
2386 }
2387
2388 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2389 {
2390 char htgt_pl[MLXSW_REG_HTGT_LEN];
2391 enum mlxsw_reg_htgt_trap_group i;
2392 int max_cpu_policers;
2393 int max_trap_groups;
2394 u8 priority, tc;
2395 u16 policer_id;
2396 int err;
2397
2398 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2399 return -EIO;
2400
2401 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2402 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2403
2404 for (i = 0; i < max_trap_groups; i++) {
2405 policer_id = i;
2406 switch (i) {
2407 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2408 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2409 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2410 priority = 1;
2411 tc = 1;
2412 break;
2413 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2414 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2415 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2416 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2417 break;
2418 default:
2419 continue;
2420 }
2421
2422 if (max_cpu_policers <= policer_id &&
2423 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2424 return -EIO;
2425
2426 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2427 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2428 if (err)
2429 return err;
2430 }
2431
2432 return 0;
2433 }
2434
2435 static int mlxsw_sp_traps_register(struct mlxsw_sp *mlxsw_sp,
2436 const struct mlxsw_listener listeners[],
2437 size_t listeners_count)
2438 {
2439 int i;
2440 int err;
2441
2442 for (i = 0; i < listeners_count; i++) {
2443 err = mlxsw_core_trap_register(mlxsw_sp->core,
2444 &listeners[i],
2445 mlxsw_sp);
2446 if (err)
2447 goto err_listener_register;
2448
2449 }
2450 return 0;
2451
2452 err_listener_register:
2453 for (i--; i >= 0; i--) {
2454 mlxsw_core_trap_unregister(mlxsw_sp->core,
2455 &listeners[i],
2456 mlxsw_sp);
2457 }
2458 return err;
2459 }
2460
2461 static void mlxsw_sp_traps_unregister(struct mlxsw_sp *mlxsw_sp,
2462 const struct mlxsw_listener listeners[],
2463 size_t listeners_count)
2464 {
2465 int i;
2466
2467 for (i = 0; i < listeners_count; i++) {
2468 mlxsw_core_trap_unregister(mlxsw_sp->core,
2469 &listeners[i],
2470 mlxsw_sp);
2471 }
2472 }
2473
2474 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2475 {
2476 struct mlxsw_sp_trap *trap;
2477 u64 max_policers;
2478 int err;
2479
2480 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2481 return -EIO;
2482 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2483 trap = kzalloc(struct_size(trap, policers_usage,
2484 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2485 if (!trap)
2486 return -ENOMEM;
2487 trap->max_policers = max_policers;
2488 mlxsw_sp->trap = trap;
2489
2490 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2491 if (err)
2492 goto err_cpu_policers_set;
2493
2494 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2495 if (err)
2496 goto err_trap_groups_set;
2497
2498 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp_listener,
2499 ARRAY_SIZE(mlxsw_sp_listener));
2500 if (err)
2501 goto err_traps_register;
2502
2503 err = mlxsw_sp_traps_register(mlxsw_sp, mlxsw_sp->listeners,
2504 mlxsw_sp->listeners_count);
2505 if (err)
2506 goto err_extra_traps_init;
2507
2508 return 0;
2509
2510 err_extra_traps_init:
2511 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2512 ARRAY_SIZE(mlxsw_sp_listener));
2513 err_traps_register:
2514 err_trap_groups_set:
2515 err_cpu_policers_set:
2516 kfree(trap);
2517 return err;
2518 }
2519
2520 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2521 {
2522 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp->listeners,
2523 mlxsw_sp->listeners_count);
2524 mlxsw_sp_traps_unregister(mlxsw_sp, mlxsw_sp_listener,
2525 ARRAY_SIZE(mlxsw_sp_listener));
2526 kfree(mlxsw_sp->trap);
2527 }
2528
2529 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2530
2531 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2532 {
2533 char slcr_pl[MLXSW_REG_SLCR_LEN];
2534 u32 seed;
2535 int err;
2536
2537 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2538 MLXSW_SP_LAG_SEED_INIT);
2539 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2540 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2541 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2542 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2543 MLXSW_REG_SLCR_LAG_HASH_SIP |
2544 MLXSW_REG_SLCR_LAG_HASH_DIP |
2545 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2546 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2547 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2548 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2549 if (err)
2550 return err;
2551
2552 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
2553 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2554 return -EIO;
2555
2556 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
2557 sizeof(struct mlxsw_sp_upper),
2558 GFP_KERNEL);
2559 if (!mlxsw_sp->lags)
2560 return -ENOMEM;
2561
2562 return 0;
2563 }
2564
2565 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2566 {
2567 kfree(mlxsw_sp->lags);
2568 }
2569
2570 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
2571 {
2572 char htgt_pl[MLXSW_REG_HTGT_LEN];
2573 int err;
2574
2575 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
2576 MLXSW_REG_HTGT_INVALID_POLICER,
2577 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2578 MLXSW_REG_HTGT_DEFAULT_TC);
2579 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2580 if (err)
2581 return err;
2582
2583 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
2584 MLXSW_REG_HTGT_INVALID_POLICER,
2585 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
2586 MLXSW_REG_HTGT_DEFAULT_TC);
2587 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2588 }
2589
2590 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2591 .clock_init = mlxsw_sp1_ptp_clock_init,
2592 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2593 .init = mlxsw_sp1_ptp_init,
2594 .fini = mlxsw_sp1_ptp_fini,
2595 .receive = mlxsw_sp1_ptp_receive,
2596 .transmitted = mlxsw_sp1_ptp_transmitted,
2597 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2598 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2599 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2600 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2601 .get_stats_count = mlxsw_sp1_get_stats_count,
2602 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2603 .get_stats = mlxsw_sp1_get_stats,
2604 };
2605
2606 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2607 .clock_init = mlxsw_sp2_ptp_clock_init,
2608 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2609 .init = mlxsw_sp2_ptp_init,
2610 .fini = mlxsw_sp2_ptp_fini,
2611 .receive = mlxsw_sp2_ptp_receive,
2612 .transmitted = mlxsw_sp2_ptp_transmitted,
2613 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2614 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2615 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2616 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2617 .get_stats_count = mlxsw_sp2_get_stats_count,
2618 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2619 .get_stats = mlxsw_sp2_get_stats,
2620 };
2621
2622 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2623 unsigned long event, void *ptr);
2624
2625 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2626 const struct mlxsw_bus_info *mlxsw_bus_info,
2627 struct netlink_ext_ack *extack)
2628 {
2629 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2630 int err;
2631
2632 mlxsw_sp->core = mlxsw_core;
2633 mlxsw_sp->bus_info = mlxsw_bus_info;
2634
2635 mlxsw_core_emad_string_tlv_enable(mlxsw_core);
2636
2637 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2638 if (err) {
2639 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2640 return err;
2641 }
2642
2643 err = mlxsw_sp_kvdl_init(mlxsw_sp);
2644 if (err) {
2645 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
2646 return err;
2647 }
2648
2649 err = mlxsw_sp_fids_init(mlxsw_sp);
2650 if (err) {
2651 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
2652 goto err_fids_init;
2653 }
2654
2655 err = mlxsw_sp_policers_init(mlxsw_sp);
2656 if (err) {
2657 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
2658 goto err_policers_init;
2659 }
2660
2661 err = mlxsw_sp_traps_init(mlxsw_sp);
2662 if (err) {
2663 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
2664 goto err_traps_init;
2665 }
2666
2667 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
2668 if (err) {
2669 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
2670 goto err_devlink_traps_init;
2671 }
2672
2673 err = mlxsw_sp_buffers_init(mlxsw_sp);
2674 if (err) {
2675 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2676 goto err_buffers_init;
2677 }
2678
2679 err = mlxsw_sp_lag_init(mlxsw_sp);
2680 if (err) {
2681 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2682 goto err_lag_init;
2683 }
2684
2685 /* Initialize SPAN before router and switchdev, so that those components
2686 * can call mlxsw_sp_span_respin().
2687 */
2688 err = mlxsw_sp_span_init(mlxsw_sp);
2689 if (err) {
2690 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
2691 goto err_span_init;
2692 }
2693
2694 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2695 if (err) {
2696 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2697 goto err_switchdev_init;
2698 }
2699
2700 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
2701 if (err) {
2702 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
2703 goto err_counter_pool_init;
2704 }
2705
2706 err = mlxsw_sp_afa_init(mlxsw_sp);
2707 if (err) {
2708 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
2709 goto err_afa_init;
2710 }
2711
2712 err = mlxsw_sp_nve_init(mlxsw_sp);
2713 if (err) {
2714 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
2715 goto err_nve_init;
2716 }
2717
2718 err = mlxsw_sp_acl_init(mlxsw_sp);
2719 if (err) {
2720 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
2721 goto err_acl_init;
2722 }
2723
2724 err = mlxsw_sp_router_init(mlxsw_sp, extack);
2725 if (err) {
2726 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2727 goto err_router_init;
2728 }
2729
2730 if (mlxsw_sp->bus_info->read_frc_capable) {
2731 /* NULL is a valid return value from clock_init */
2732 mlxsw_sp->clock =
2733 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
2734 mlxsw_sp->bus_info->dev);
2735 if (IS_ERR(mlxsw_sp->clock)) {
2736 err = PTR_ERR(mlxsw_sp->clock);
2737 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
2738 goto err_ptp_clock_init;
2739 }
2740 }
2741
2742 if (mlxsw_sp->clock) {
2743 /* NULL is a valid return value from ptp_ops->init */
2744 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
2745 if (IS_ERR(mlxsw_sp->ptp_state)) {
2746 err = PTR_ERR(mlxsw_sp->ptp_state);
2747 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
2748 goto err_ptp_init;
2749 }
2750 }
2751
2752 /* Initialize netdevice notifier after router and SPAN is initialized,
2753 * so that the event handler can use router structures and call SPAN
2754 * respin.
2755 */
2756 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
2757 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2758 &mlxsw_sp->netdevice_nb);
2759 if (err) {
2760 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
2761 goto err_netdev_notifier;
2762 }
2763
2764 err = mlxsw_sp_dpipe_init(mlxsw_sp);
2765 if (err) {
2766 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
2767 goto err_dpipe_init;
2768 }
2769
2770 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
2771 if (err) {
2772 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
2773 goto err_port_module_info_init;
2774 }
2775
2776 err = mlxsw_sp_ports_create(mlxsw_sp);
2777 if (err) {
2778 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2779 goto err_ports_create;
2780 }
2781
2782 return 0;
2783
2784 err_ports_create:
2785 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2786 err_port_module_info_init:
2787 mlxsw_sp_dpipe_fini(mlxsw_sp);
2788 err_dpipe_init:
2789 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2790 &mlxsw_sp->netdevice_nb);
2791 err_netdev_notifier:
2792 if (mlxsw_sp->clock)
2793 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2794 err_ptp_init:
2795 if (mlxsw_sp->clock)
2796 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2797 err_ptp_clock_init:
2798 mlxsw_sp_router_fini(mlxsw_sp);
2799 err_router_init:
2800 mlxsw_sp_acl_fini(mlxsw_sp);
2801 err_acl_init:
2802 mlxsw_sp_nve_fini(mlxsw_sp);
2803 err_nve_init:
2804 mlxsw_sp_afa_fini(mlxsw_sp);
2805 err_afa_init:
2806 mlxsw_sp_counter_pool_fini(mlxsw_sp);
2807 err_counter_pool_init:
2808 mlxsw_sp_switchdev_fini(mlxsw_sp);
2809 err_switchdev_init:
2810 mlxsw_sp_span_fini(mlxsw_sp);
2811 err_span_init:
2812 mlxsw_sp_lag_fini(mlxsw_sp);
2813 err_lag_init:
2814 mlxsw_sp_buffers_fini(mlxsw_sp);
2815 err_buffers_init:
2816 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2817 err_devlink_traps_init:
2818 mlxsw_sp_traps_fini(mlxsw_sp);
2819 err_traps_init:
2820 mlxsw_sp_policers_fini(mlxsw_sp);
2821 err_policers_init:
2822 mlxsw_sp_fids_fini(mlxsw_sp);
2823 err_fids_init:
2824 mlxsw_sp_kvdl_fini(mlxsw_sp);
2825 return err;
2826 }
2827
2828 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
2829 const struct mlxsw_bus_info *mlxsw_bus_info,
2830 struct netlink_ext_ack *extack)
2831 {
2832 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2833
2834 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
2835 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
2836 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
2837 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
2838 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
2839 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
2840 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
2841 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
2842 mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
2843 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
2844 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
2845 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
2846 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
2847 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
2848 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
2849 mlxsw_sp->listeners = mlxsw_sp1_listener;
2850 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
2851 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
2852
2853 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2854 }
2855
2856 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
2857 const struct mlxsw_bus_info *mlxsw_bus_info,
2858 struct netlink_ext_ack *extack)
2859 {
2860 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2861
2862 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2863 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2864 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2865 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2866 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2867 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2868 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2869 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2870 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2871 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2872 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2873 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2874 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
2875 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2876 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2877 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
2878
2879 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2880 }
2881
2882 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
2883 const struct mlxsw_bus_info *mlxsw_bus_info,
2884 struct netlink_ext_ack *extack)
2885 {
2886 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2887
2888 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
2889 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
2890 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
2891 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
2892 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
2893 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
2894 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
2895 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
2896 mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
2897 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
2898 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
2899 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
2900 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
2901 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
2902 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
2903 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
2904
2905 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
2906 }
2907
2908 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2909 {
2910 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2911
2912 mlxsw_sp_ports_remove(mlxsw_sp);
2913 mlxsw_sp_port_module_info_fini(mlxsw_sp);
2914 mlxsw_sp_dpipe_fini(mlxsw_sp);
2915 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
2916 &mlxsw_sp->netdevice_nb);
2917 if (mlxsw_sp->clock) {
2918 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
2919 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
2920 }
2921 mlxsw_sp_router_fini(mlxsw_sp);
2922 mlxsw_sp_acl_fini(mlxsw_sp);
2923 mlxsw_sp_nve_fini(mlxsw_sp);
2924 mlxsw_sp_afa_fini(mlxsw_sp);
2925 mlxsw_sp_counter_pool_fini(mlxsw_sp);
2926 mlxsw_sp_switchdev_fini(mlxsw_sp);
2927 mlxsw_sp_span_fini(mlxsw_sp);
2928 mlxsw_sp_lag_fini(mlxsw_sp);
2929 mlxsw_sp_buffers_fini(mlxsw_sp);
2930 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
2931 mlxsw_sp_traps_fini(mlxsw_sp);
2932 mlxsw_sp_policers_fini(mlxsw_sp);
2933 mlxsw_sp_fids_fini(mlxsw_sp);
2934 mlxsw_sp_kvdl_fini(mlxsw_sp);
2935 }
2936
2937 /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
2938 * 802.1Q FIDs
2939 */
2940 #define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
2941 VLAN_VID_MASK - 1)
2942
2943 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
2944 .used_max_mid = 1,
2945 .max_mid = MLXSW_SP_MID_MAX,
2946 .used_flood_tables = 1,
2947 .used_flood_mode = 1,
2948 .flood_mode = 3,
2949 .max_fid_flood_tables = 3,
2950 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2951 .used_max_ib_mc = 1,
2952 .max_ib_mc = 0,
2953 .used_max_pkey = 1,
2954 .max_pkey = 0,
2955 .used_kvd_sizes = 1,
2956 .kvd_hash_single_parts = 59,
2957 .kvd_hash_double_parts = 41,
2958 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
2959 .swid_config = {
2960 {
2961 .used_type = 1,
2962 .type = MLXSW_PORT_SWID_TYPE_ETH,
2963 }
2964 },
2965 };
2966
2967 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
2968 .used_max_mid = 1,
2969 .max_mid = MLXSW_SP_MID_MAX,
2970 .used_flood_tables = 1,
2971 .used_flood_mode = 1,
2972 .flood_mode = 3,
2973 .max_fid_flood_tables = 3,
2974 .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
2975 .used_max_ib_mc = 1,
2976 .max_ib_mc = 0,
2977 .used_max_pkey = 1,
2978 .max_pkey = 0,
2979 .swid_config = {
2980 {
2981 .used_type = 1,
2982 .type = MLXSW_PORT_SWID_TYPE_ETH,
2983 }
2984 },
2985 };
2986
2987 static void
2988 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
2989 struct devlink_resource_size_params *kvd_size_params,
2990 struct devlink_resource_size_params *linear_size_params,
2991 struct devlink_resource_size_params *hash_double_size_params,
2992 struct devlink_resource_size_params *hash_single_size_params)
2993 {
2994 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2995 KVD_SINGLE_MIN_SIZE);
2996 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
2997 KVD_DOUBLE_MIN_SIZE);
2998 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
2999 u32 linear_size_min = 0;
3000
3001 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3002 MLXSW_SP_KVD_GRANULARITY,
3003 DEVLINK_RESOURCE_UNIT_ENTRY);
3004 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3005 kvd_size - single_size_min -
3006 double_size_min,
3007 MLXSW_SP_KVD_GRANULARITY,
3008 DEVLINK_RESOURCE_UNIT_ENTRY);
3009 devlink_resource_size_params_init(hash_double_size_params,
3010 double_size_min,
3011 kvd_size - single_size_min -
3012 linear_size_min,
3013 MLXSW_SP_KVD_GRANULARITY,
3014 DEVLINK_RESOURCE_UNIT_ENTRY);
3015 devlink_resource_size_params_init(hash_single_size_params,
3016 single_size_min,
3017 kvd_size - double_size_min -
3018 linear_size_min,
3019 MLXSW_SP_KVD_GRANULARITY,
3020 DEVLINK_RESOURCE_UNIT_ENTRY);
3021 }
3022
3023 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3024 {
3025 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3026 struct devlink_resource_size_params hash_single_size_params;
3027 struct devlink_resource_size_params hash_double_size_params;
3028 struct devlink_resource_size_params linear_size_params;
3029 struct devlink_resource_size_params kvd_size_params;
3030 u32 kvd_size, single_size, double_size, linear_size;
3031 const struct mlxsw_config_profile *profile;
3032 int err;
3033
3034 profile = &mlxsw_sp1_config_profile;
3035 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3036 return -EIO;
3037
3038 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3039 &linear_size_params,
3040 &hash_double_size_params,
3041 &hash_single_size_params);
3042
3043 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3044 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3045 kvd_size, MLXSW_SP_RESOURCE_KVD,
3046 DEVLINK_RESOURCE_ID_PARENT_TOP,
3047 &kvd_size_params);
3048 if (err)
3049 return err;
3050
3051 linear_size = profile->kvd_linear_size;
3052 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3053 linear_size,
3054 MLXSW_SP_RESOURCE_KVD_LINEAR,
3055 MLXSW_SP_RESOURCE_KVD,
3056 &linear_size_params);
3057 if (err)
3058 return err;
3059
3060 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3061 if (err)
3062 return err;
3063
3064 double_size = kvd_size - linear_size;
3065 double_size *= profile->kvd_hash_double_parts;
3066 double_size /= profile->kvd_hash_double_parts +
3067 profile->kvd_hash_single_parts;
3068 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3069 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3070 double_size,
3071 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3072 MLXSW_SP_RESOURCE_KVD,
3073 &hash_double_size_params);
3074 if (err)
3075 return err;
3076
3077 single_size = kvd_size - double_size - linear_size;
3078 err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3079 single_size,
3080 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3081 MLXSW_SP_RESOURCE_KVD,
3082 &hash_single_size_params);
3083 if (err)
3084 return err;
3085
3086 return 0;
3087 }
3088
3089 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3090 {
3091 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3092 struct devlink_resource_size_params kvd_size_params;
3093 u32 kvd_size;
3094
3095 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3096 return -EIO;
3097
3098 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3099 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3100 MLXSW_SP_KVD_GRANULARITY,
3101 DEVLINK_RESOURCE_UNIT_ENTRY);
3102
3103 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3104 kvd_size, MLXSW_SP_RESOURCE_KVD,
3105 DEVLINK_RESOURCE_ID_PARENT_TOP,
3106 &kvd_size_params);
3107 }
3108
3109 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3110 {
3111 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3112 struct devlink_resource_size_params span_size_params;
3113 u32 max_span;
3114
3115 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3116 return -EIO;
3117
3118 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3119 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3120 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3121
3122 return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3123 max_span, MLXSW_SP_RESOURCE_SPAN,
3124 DEVLINK_RESOURCE_ID_PARENT_TOP,
3125 &span_size_params);
3126 }
3127
3128 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3129 {
3130 int err;
3131
3132 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3133 if (err)
3134 return err;
3135
3136 err = mlxsw_sp_resources_span_register(mlxsw_core);
3137 if (err)
3138 goto err_resources_span_register;
3139
3140 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3141 if (err)
3142 goto err_resources_counter_register;
3143
3144 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3145 if (err)
3146 goto err_resources_counter_register;
3147
3148 return 0;
3149
3150 err_resources_counter_register:
3151 err_resources_span_register:
3152 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3153 return err;
3154 }
3155
3156 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3157 {
3158 int err;
3159
3160 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3161 if (err)
3162 return err;
3163
3164 err = mlxsw_sp_resources_span_register(mlxsw_core);
3165 if (err)
3166 goto err_resources_span_register;
3167
3168 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3169 if (err)
3170 goto err_resources_counter_register;
3171
3172 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3173 if (err)
3174 goto err_resources_counter_register;
3175
3176 return 0;
3177
3178 err_resources_counter_register:
3179 err_resources_span_register:
3180 devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
3181 return err;
3182 }
3183
3184 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3185 const struct mlxsw_config_profile *profile,
3186 u64 *p_single_size, u64 *p_double_size,
3187 u64 *p_linear_size)
3188 {
3189 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3190 u32 double_size;
3191 int err;
3192
3193 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3194 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3195 return -EIO;
3196
3197 /* The hash part is what left of the kvd without the
3198 * linear part. It is split to the single size and
3199 * double size by the parts ratio from the profile.
3200 * Both sizes must be a multiplications of the
3201 * granularity from the profile. In case the user
3202 * provided the sizes they are obtained via devlink.
3203 */
3204 err = devlink_resource_size_get(devlink,
3205 MLXSW_SP_RESOURCE_KVD_LINEAR,
3206 p_linear_size);
3207 if (err)
3208 *p_linear_size = profile->kvd_linear_size;
3209
3210 err = devlink_resource_size_get(devlink,
3211 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3212 p_double_size);
3213 if (err) {
3214 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3215 *p_linear_size;
3216 double_size *= profile->kvd_hash_double_parts;
3217 double_size /= profile->kvd_hash_double_parts +
3218 profile->kvd_hash_single_parts;
3219 *p_double_size = rounddown(double_size,
3220 MLXSW_SP_KVD_GRANULARITY);
3221 }
3222
3223 err = devlink_resource_size_get(devlink,
3224 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3225 p_single_size);
3226 if (err)
3227 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3228 *p_double_size - *p_linear_size;
3229
3230 /* Check results are legal. */
3231 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3232 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3233 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3234 return -EIO;
3235
3236 return 0;
3237 }
3238
3239 static int
3240 mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
3241 struct devlink_param_gset_ctx *ctx)
3242 {
3243 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3244 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3245
3246 ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
3247 return 0;
3248 }
3249
3250 static int
3251 mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
3252 struct devlink_param_gset_ctx *ctx)
3253 {
3254 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
3255 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3256
3257 return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
3258 }
3259
3260 static const struct devlink_param mlxsw_sp2_devlink_params[] = {
3261 DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3262 "acl_region_rehash_interval",
3263 DEVLINK_PARAM_TYPE_U32,
3264 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3265 mlxsw_sp_params_acl_region_rehash_intrvl_get,
3266 mlxsw_sp_params_acl_region_rehash_intrvl_set,
3267 NULL),
3268 };
3269
3270 static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
3271 {
3272 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3273 union devlink_param_value value;
3274 int err;
3275
3276 err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
3277 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3278 if (err)
3279 return err;
3280
3281 value.vu32 = 0;
3282 devlink_param_driverinit_value_set(devlink,
3283 MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
3284 value);
3285 return 0;
3286 }
3287
3288 static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
3289 {
3290 devlink_params_unregister(priv_to_devlink(mlxsw_core),
3291 mlxsw_sp2_devlink_params,
3292 ARRAY_SIZE(mlxsw_sp2_devlink_params));
3293 }
3294
3295 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3296 struct sk_buff *skb, u8 local_port)
3297 {
3298 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3299
3300 skb_pull(skb, MLXSW_TXHDR_LEN);
3301 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3302 }
3303
3304 static struct mlxsw_driver mlxsw_sp1_driver = {
3305 .kind = mlxsw_sp1_driver_name,
3306 .priv_size = sizeof(struct mlxsw_sp),
3307 .fw_req_rev = &mlxsw_sp1_fw_rev,
3308 .fw_filename = MLXSW_SP1_FW_FILENAME,
3309 .init = mlxsw_sp1_init,
3310 .fini = mlxsw_sp_fini,
3311 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3312 .port_split = mlxsw_sp_port_split,
3313 .port_unsplit = mlxsw_sp_port_unsplit,
3314 .sb_pool_get = mlxsw_sp_sb_pool_get,
3315 .sb_pool_set = mlxsw_sp_sb_pool_set,
3316 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3317 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3318 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3319 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3320 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3321 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3322 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3323 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3324 .trap_init = mlxsw_sp_trap_init,
3325 .trap_fini = mlxsw_sp_trap_fini,
3326 .trap_action_set = mlxsw_sp_trap_action_set,
3327 .trap_group_init = mlxsw_sp_trap_group_init,
3328 .trap_group_set = mlxsw_sp_trap_group_set,
3329 .trap_policer_init = mlxsw_sp_trap_policer_init,
3330 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3331 .trap_policer_set = mlxsw_sp_trap_policer_set,
3332 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3333 .txhdr_construct = mlxsw_sp_txhdr_construct,
3334 .resources_register = mlxsw_sp1_resources_register,
3335 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
3336 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3337 .txhdr_len = MLXSW_TXHDR_LEN,
3338 .profile = &mlxsw_sp1_config_profile,
3339 .res_query_enabled = true,
3340 .fw_fatal_enabled = true,
3341 };
3342
3343 static struct mlxsw_driver mlxsw_sp2_driver = {
3344 .kind = mlxsw_sp2_driver_name,
3345 .priv_size = sizeof(struct mlxsw_sp),
3346 .fw_req_rev = &mlxsw_sp2_fw_rev,
3347 .fw_filename = MLXSW_SP2_FW_FILENAME,
3348 .init = mlxsw_sp2_init,
3349 .fini = mlxsw_sp_fini,
3350 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3351 .port_split = mlxsw_sp_port_split,
3352 .port_unsplit = mlxsw_sp_port_unsplit,
3353 .sb_pool_get = mlxsw_sp_sb_pool_get,
3354 .sb_pool_set = mlxsw_sp_sb_pool_set,
3355 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3356 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3357 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3358 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3359 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3360 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3361 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3362 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3363 .trap_init = mlxsw_sp_trap_init,
3364 .trap_fini = mlxsw_sp_trap_fini,
3365 .trap_action_set = mlxsw_sp_trap_action_set,
3366 .trap_group_init = mlxsw_sp_trap_group_init,
3367 .trap_group_set = mlxsw_sp_trap_group_set,
3368 .trap_policer_init = mlxsw_sp_trap_policer_init,
3369 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3370 .trap_policer_set = mlxsw_sp_trap_policer_set,
3371 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3372 .txhdr_construct = mlxsw_sp_txhdr_construct,
3373 .resources_register = mlxsw_sp2_resources_register,
3374 .params_register = mlxsw_sp2_params_register,
3375 .params_unregister = mlxsw_sp2_params_unregister,
3376 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3377 .txhdr_len = MLXSW_TXHDR_LEN,
3378 .profile = &mlxsw_sp2_config_profile,
3379 .res_query_enabled = true,
3380 .fw_fatal_enabled = true,
3381 };
3382
3383 static struct mlxsw_driver mlxsw_sp3_driver = {
3384 .kind = mlxsw_sp3_driver_name,
3385 .priv_size = sizeof(struct mlxsw_sp),
3386 .fw_req_rev = &mlxsw_sp3_fw_rev,
3387 .fw_filename = MLXSW_SP3_FW_FILENAME,
3388 .init = mlxsw_sp3_init,
3389 .fini = mlxsw_sp_fini,
3390 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3391 .port_split = mlxsw_sp_port_split,
3392 .port_unsplit = mlxsw_sp_port_unsplit,
3393 .sb_pool_get = mlxsw_sp_sb_pool_get,
3394 .sb_pool_set = mlxsw_sp_sb_pool_set,
3395 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3396 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3397 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3398 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3399 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3400 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3401 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3402 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3403 .trap_init = mlxsw_sp_trap_init,
3404 .trap_fini = mlxsw_sp_trap_fini,
3405 .trap_action_set = mlxsw_sp_trap_action_set,
3406 .trap_group_init = mlxsw_sp_trap_group_init,
3407 .trap_group_set = mlxsw_sp_trap_group_set,
3408 .trap_policer_init = mlxsw_sp_trap_policer_init,
3409 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
3410 .trap_policer_set = mlxsw_sp_trap_policer_set,
3411 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
3412 .txhdr_construct = mlxsw_sp_txhdr_construct,
3413 .resources_register = mlxsw_sp2_resources_register,
3414 .params_register = mlxsw_sp2_params_register,
3415 .params_unregister = mlxsw_sp2_params_unregister,
3416 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
3417 .txhdr_len = MLXSW_TXHDR_LEN,
3418 .profile = &mlxsw_sp2_config_profile,
3419 .res_query_enabled = true,
3420 .fw_fatal_enabled = true,
3421 };
3422
3423 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3424 {
3425 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3426 }
3427
3428 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3429 {
3430 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3431 int ret = 0;
3432
3433 if (mlxsw_sp_port_dev_check(lower_dev)) {
3434 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3435 ret = 1;
3436 }
3437
3438 return ret;
3439 }
3440
3441 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3442 {
3443 struct mlxsw_sp_port *mlxsw_sp_port;
3444
3445 if (mlxsw_sp_port_dev_check(dev))
3446 return netdev_priv(dev);
3447
3448 mlxsw_sp_port = NULL;
3449 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3450
3451 return mlxsw_sp_port;
3452 }
3453
3454 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3455 {
3456 struct mlxsw_sp_port *mlxsw_sp_port;
3457
3458 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3459 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3460 }
3461
3462 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3463 {
3464 struct mlxsw_sp_port *mlxsw_sp_port;
3465
3466 if (mlxsw_sp_port_dev_check(dev))
3467 return netdev_priv(dev);
3468
3469 mlxsw_sp_port = NULL;
3470 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3471 &mlxsw_sp_port);
3472
3473 return mlxsw_sp_port;
3474 }
3475
3476 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3477 {
3478 struct mlxsw_sp_port *mlxsw_sp_port;
3479
3480 rcu_read_lock();
3481 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3482 if (mlxsw_sp_port)
3483 dev_hold(mlxsw_sp_port->dev);
3484 rcu_read_unlock();
3485 return mlxsw_sp_port;
3486 }
3487
3488 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3489 {
3490 dev_put(mlxsw_sp_port->dev);
3491 }
3492
3493 static void
3494 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3495 struct net_device *lag_dev)
3496 {
3497 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3498 struct net_device *upper_dev;
3499 struct list_head *iter;
3500
3501 if (netif_is_bridge_port(lag_dev))
3502 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3503
3504 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3505 if (!netif_is_bridge_port(upper_dev))
3506 continue;
3507 br_dev = netdev_master_upper_dev_get(upper_dev);
3508 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3509 }
3510 }
3511
3512 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3513 {
3514 char sldr_pl[MLXSW_REG_SLDR_LEN];
3515
3516 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3517 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3518 }
3519
3520 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3521 {
3522 char sldr_pl[MLXSW_REG_SLDR_LEN];
3523
3524 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3525 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3526 }
3527
3528 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3529 u16 lag_id, u8 port_index)
3530 {
3531 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3532 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3533
3534 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3535 lag_id, port_index);
3536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3537 }
3538
3539 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3540 u16 lag_id)
3541 {
3542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3543 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3544
3545 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3546 lag_id);
3547 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3548 }
3549
3550 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3551 u16 lag_id)
3552 {
3553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3554 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3555
3556 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3557 lag_id);
3558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3559 }
3560
3561 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3562 u16 lag_id)
3563 {
3564 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3565 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3566
3567 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3568 lag_id);
3569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3570 }
3571
3572 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3573 struct net_device *lag_dev,
3574 u16 *p_lag_id)
3575 {
3576 struct mlxsw_sp_upper *lag;
3577 int free_lag_id = -1;
3578 u64 max_lag;
3579 int i;
3580
3581 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3582 for (i = 0; i < max_lag; i++) {
3583 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3584 if (lag->ref_count) {
3585 if (lag->dev == lag_dev) {
3586 *p_lag_id = i;
3587 return 0;
3588 }
3589 } else if (free_lag_id < 0) {
3590 free_lag_id = i;
3591 }
3592 }
3593 if (free_lag_id < 0)
3594 return -EBUSY;
3595 *p_lag_id = free_lag_id;
3596 return 0;
3597 }
3598
3599 static bool
3600 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3601 struct net_device *lag_dev,
3602 struct netdev_lag_upper_info *lag_upper_info,
3603 struct netlink_ext_ack *extack)
3604 {
3605 u16 lag_id;
3606
3607 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
3608 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
3609 return false;
3610 }
3611 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
3612 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
3613 return false;
3614 }
3615 return true;
3616 }
3617
3618 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3619 u16 lag_id, u8 *p_port_index)
3620 {
3621 u64 max_lag_members;
3622 int i;
3623
3624 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3625 MAX_LAG_MEMBERS);
3626 for (i = 0; i < max_lag_members; i++) {
3627 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3628 *p_port_index = i;
3629 return 0;
3630 }
3631 }
3632 return -EBUSY;
3633 }
3634
3635 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3636 struct net_device *lag_dev)
3637 {
3638 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3639 struct mlxsw_sp_upper *lag;
3640 u16 lag_id;
3641 u8 port_index;
3642 int err;
3643
3644 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3645 if (err)
3646 return err;
3647 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3648 if (!lag->ref_count) {
3649 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3650 if (err)
3651 return err;
3652 lag->dev = lag_dev;
3653 }
3654
3655 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3656 if (err)
3657 return err;
3658 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3659 if (err)
3660 goto err_col_port_add;
3661
3662 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3663 mlxsw_sp_port->local_port);
3664 mlxsw_sp_port->lag_id = lag_id;
3665 mlxsw_sp_port->lagged = 1;
3666 lag->ref_count++;
3667
3668 /* Port is no longer usable as a router interface */
3669 if (mlxsw_sp_port->default_vlan->fid)
3670 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
3671
3672 return 0;
3673
3674 err_col_port_add:
3675 if (!lag->ref_count)
3676 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3677 return err;
3678 }
3679
3680 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3681 struct net_device *lag_dev)
3682 {
3683 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3684 u16 lag_id = mlxsw_sp_port->lag_id;
3685 struct mlxsw_sp_upper *lag;
3686
3687 if (!mlxsw_sp_port->lagged)
3688 return;
3689 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3690 WARN_ON(lag->ref_count == 0);
3691
3692 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3693
3694 /* Any VLANs configured on the port are no longer valid */
3695 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
3696 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
3697 /* Make the LAG and its directly linked uppers leave bridges they
3698 * are memeber in
3699 */
3700 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
3701
3702 if (lag->ref_count == 1)
3703 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3704
3705 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3706 mlxsw_sp_port->local_port);
3707 mlxsw_sp_port->lagged = 0;
3708 lag->ref_count--;
3709
3710 /* Make sure untagged frames are allowed to ingress */
3711 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
3712 }
3713
3714 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3715 u16 lag_id)
3716 {
3717 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3718 char sldr_pl[MLXSW_REG_SLDR_LEN];
3719
3720 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3721 mlxsw_sp_port->local_port);
3722 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3723 }
3724
3725 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3726 u16 lag_id)
3727 {
3728 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3729 char sldr_pl[MLXSW_REG_SLDR_LEN];
3730
3731 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3732 mlxsw_sp_port->local_port);
3733 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3734 }
3735
3736 static int
3737 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
3738 {
3739 int err;
3740
3741 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
3742 mlxsw_sp_port->lag_id);
3743 if (err)
3744 return err;
3745
3746 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3747 if (err)
3748 goto err_dist_port_add;
3749
3750 return 0;
3751
3752 err_dist_port_add:
3753 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3754 return err;
3755 }
3756
3757 static int
3758 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
3759 {
3760 int err;
3761
3762 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3763 mlxsw_sp_port->lag_id);
3764 if (err)
3765 return err;
3766
3767 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
3768 mlxsw_sp_port->lag_id);
3769 if (err)
3770 goto err_col_port_disable;
3771
3772 return 0;
3773
3774 err_col_port_disable:
3775 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
3776 return err;
3777 }
3778
3779 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3780 struct netdev_lag_lower_state_info *info)
3781 {
3782 if (info->tx_enabled)
3783 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
3784 else
3785 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
3786 }
3787
3788 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3789 bool enable)
3790 {
3791 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3792 enum mlxsw_reg_spms_state spms_state;
3793 char *spms_pl;
3794 u16 vid;
3795 int err;
3796
3797 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3798 MLXSW_REG_SPMS_STATE_DISCARDING;
3799
3800 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3801 if (!spms_pl)
3802 return -ENOMEM;
3803 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3804
3805 for (vid = 0; vid < VLAN_N_VID; vid++)
3806 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3807
3808 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3809 kfree(spms_pl);
3810 return err;
3811 }
3812
3813 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3814 {
3815 u16 vid = 1;
3816 int err;
3817
3818 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3819 if (err)
3820 return err;
3821 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3822 if (err)
3823 goto err_port_stp_set;
3824 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3825 true, false);
3826 if (err)
3827 goto err_port_vlan_set;
3828
3829 for (; vid <= VLAN_N_VID - 1; vid++) {
3830 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3831 vid, false);
3832 if (err)
3833 goto err_vid_learning_set;
3834 }
3835
3836 return 0;
3837
3838 err_vid_learning_set:
3839 for (vid--; vid >= 1; vid--)
3840 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3841 err_port_vlan_set:
3842 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3843 err_port_stp_set:
3844 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3845 return err;
3846 }
3847
3848 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3849 {
3850 u16 vid;
3851
3852 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
3853 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
3854 vid, true);
3855
3856 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
3857 false, false);
3858 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3859 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3860 }
3861
3862 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
3863 {
3864 unsigned int num_vxlans = 0;
3865 struct net_device *dev;
3866 struct list_head *iter;
3867
3868 netdev_for_each_lower_dev(br_dev, dev, iter) {
3869 if (netif_is_vxlan(dev))
3870 num_vxlans++;
3871 }
3872
3873 return num_vxlans > 1;
3874 }
3875
3876 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
3877 {
3878 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
3879 struct net_device *dev;
3880 struct list_head *iter;
3881
3882 netdev_for_each_lower_dev(br_dev, dev, iter) {
3883 u16 pvid;
3884 int err;
3885
3886 if (!netif_is_vxlan(dev))
3887 continue;
3888
3889 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
3890 if (err || !pvid)
3891 continue;
3892
3893 if (test_and_set_bit(pvid, vlans))
3894 return false;
3895 }
3896
3897 return true;
3898 }
3899
3900 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
3901 struct netlink_ext_ack *extack)
3902 {
3903 if (br_multicast_enabled(br_dev)) {
3904 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
3905 return false;
3906 }
3907
3908 if (!br_vlan_enabled(br_dev) &&
3909 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
3910 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
3911 return false;
3912 }
3913
3914 if (br_vlan_enabled(br_dev) &&
3915 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
3916 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
3917 return false;
3918 }
3919
3920 return true;
3921 }
3922
3923 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
3924 struct net_device *dev,
3925 unsigned long event, void *ptr)
3926 {
3927 struct netdev_notifier_changeupper_info *info;
3928 struct mlxsw_sp_port *mlxsw_sp_port;
3929 struct netlink_ext_ack *extack;
3930 struct net_device *upper_dev;
3931 struct mlxsw_sp *mlxsw_sp;
3932 int err = 0;
3933
3934 mlxsw_sp_port = netdev_priv(dev);
3935 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3936 info = ptr;
3937 extack = netdev_notifier_info_to_extack(&info->info);
3938
3939 switch (event) {
3940 case NETDEV_PRECHANGEUPPER:
3941 upper_dev = info->upper_dev;
3942 if (!is_vlan_dev(upper_dev) &&
3943 !netif_is_lag_master(upper_dev) &&
3944 !netif_is_bridge_master(upper_dev) &&
3945 !netif_is_ovs_master(upper_dev) &&
3946 !netif_is_macvlan(upper_dev)) {
3947 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
3948 return -EINVAL;
3949 }
3950 if (!info->linking)
3951 break;
3952 if (netif_is_bridge_master(upper_dev) &&
3953 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
3954 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
3955 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
3956 return -EOPNOTSUPP;
3957 if (netdev_has_any_upper_dev(upper_dev) &&
3958 (!netif_is_bridge_master(upper_dev) ||
3959 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
3960 upper_dev))) {
3961 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
3962 return -EINVAL;
3963 }
3964 if (netif_is_lag_master(upper_dev) &&
3965 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3966 info->upper_info, extack))
3967 return -EINVAL;
3968 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
3969 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
3970 return -EINVAL;
3971 }
3972 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3973 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
3974 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
3975 return -EINVAL;
3976 }
3977 if (netif_is_macvlan(upper_dev) &&
3978 !mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
3979 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
3980 return -EOPNOTSUPP;
3981 }
3982 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
3983 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
3984 return -EINVAL;
3985 }
3986 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
3987 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
3988 return -EINVAL;
3989 }
3990 break;
3991 case NETDEV_CHANGEUPPER:
3992 upper_dev = info->upper_dev;
3993 if (netif_is_bridge_master(upper_dev)) {
3994 if (info->linking)
3995 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3996 lower_dev,
3997 upper_dev,
3998 extack);
3999 else
4000 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4001 lower_dev,
4002 upper_dev);
4003 } else if (netif_is_lag_master(upper_dev)) {
4004 if (info->linking) {
4005 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4006 upper_dev);
4007 } else {
4008 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4009 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4010 upper_dev);
4011 }
4012 } else if (netif_is_ovs_master(upper_dev)) {
4013 if (info->linking)
4014 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4015 else
4016 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4017 } else if (netif_is_macvlan(upper_dev)) {
4018 if (!info->linking)
4019 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4020 } else if (is_vlan_dev(upper_dev)) {
4021 struct net_device *br_dev;
4022
4023 if (!netif_is_bridge_port(upper_dev))
4024 break;
4025 if (info->linking)
4026 break;
4027 br_dev = netdev_master_upper_dev_get(upper_dev);
4028 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4029 br_dev);
4030 }
4031 break;
4032 }
4033
4034 return err;
4035 }
4036
4037 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4038 unsigned long event, void *ptr)
4039 {
4040 struct netdev_notifier_changelowerstate_info *info;
4041 struct mlxsw_sp_port *mlxsw_sp_port;
4042 int err;
4043
4044 mlxsw_sp_port = netdev_priv(dev);
4045 info = ptr;
4046
4047 switch (event) {
4048 case NETDEV_CHANGELOWERSTATE:
4049 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4050 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4051 info->lower_state_info);
4052 if (err)
4053 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4054 }
4055 break;
4056 }
4057
4058 return 0;
4059 }
4060
4061 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4062 struct net_device *port_dev,
4063 unsigned long event, void *ptr)
4064 {
4065 switch (event) {
4066 case NETDEV_PRECHANGEUPPER:
4067 case NETDEV_CHANGEUPPER:
4068 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4069 event, ptr);
4070 case NETDEV_CHANGELOWERSTATE:
4071 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4072 ptr);
4073 }
4074
4075 return 0;
4076 }
4077
4078 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4079 unsigned long event, void *ptr)
4080 {
4081 struct net_device *dev;
4082 struct list_head *iter;
4083 int ret;
4084
4085 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4086 if (mlxsw_sp_port_dev_check(dev)) {
4087 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4088 ptr);
4089 if (ret)
4090 return ret;
4091 }
4092 }
4093
4094 return 0;
4095 }
4096
4097 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4098 struct net_device *dev,
4099 unsigned long event, void *ptr,
4100 u16 vid)
4101 {
4102 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4104 struct netdev_notifier_changeupper_info *info = ptr;
4105 struct netlink_ext_ack *extack;
4106 struct net_device *upper_dev;
4107 int err = 0;
4108
4109 extack = netdev_notifier_info_to_extack(&info->info);
4110
4111 switch (event) {
4112 case NETDEV_PRECHANGEUPPER:
4113 upper_dev = info->upper_dev;
4114 if (!netif_is_bridge_master(upper_dev) &&
4115 !netif_is_macvlan(upper_dev)) {
4116 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4117 return -EINVAL;
4118 }
4119 if (!info->linking)
4120 break;
4121 if (netif_is_bridge_master(upper_dev) &&
4122 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4123 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4124 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4125 return -EOPNOTSUPP;
4126 if (netdev_has_any_upper_dev(upper_dev) &&
4127 (!netif_is_bridge_master(upper_dev) ||
4128 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4129 upper_dev))) {
4130 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
4131 return -EINVAL;
4132 }
4133 if (netif_is_macvlan(upper_dev) &&
4134 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4135 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4136 return -EOPNOTSUPP;
4137 }
4138 break;
4139 case NETDEV_CHANGEUPPER:
4140 upper_dev = info->upper_dev;
4141 if (netif_is_bridge_master(upper_dev)) {
4142 if (info->linking)
4143 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4144 vlan_dev,
4145 upper_dev,
4146 extack);
4147 else
4148 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4149 vlan_dev,
4150 upper_dev);
4151 } else if (netif_is_macvlan(upper_dev)) {
4152 if (!info->linking)
4153 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4154 } else {
4155 err = -EINVAL;
4156 WARN_ON(1);
4157 }
4158 break;
4159 }
4160
4161 return err;
4162 }
4163
4164 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4165 struct net_device *lag_dev,
4166 unsigned long event,
4167 void *ptr, u16 vid)
4168 {
4169 struct net_device *dev;
4170 struct list_head *iter;
4171 int ret;
4172
4173 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4174 if (mlxsw_sp_port_dev_check(dev)) {
4175 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4176 event, ptr,
4177 vid);
4178 if (ret)
4179 return ret;
4180 }
4181 }
4182
4183 return 0;
4184 }
4185
4186 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4187 struct net_device *br_dev,
4188 unsigned long event, void *ptr,
4189 u16 vid)
4190 {
4191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4192 struct netdev_notifier_changeupper_info *info = ptr;
4193 struct netlink_ext_ack *extack;
4194 struct net_device *upper_dev;
4195
4196 if (!mlxsw_sp)
4197 return 0;
4198
4199 extack = netdev_notifier_info_to_extack(&info->info);
4200
4201 switch (event) {
4202 case NETDEV_PRECHANGEUPPER:
4203 upper_dev = info->upper_dev;
4204 if (!netif_is_macvlan(upper_dev)) {
4205 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4206 return -EOPNOTSUPP;
4207 }
4208 if (!info->linking)
4209 break;
4210 if (netif_is_macvlan(upper_dev) &&
4211 !mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
4212 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4213 return -EOPNOTSUPP;
4214 }
4215 break;
4216 case NETDEV_CHANGEUPPER:
4217 upper_dev = info->upper_dev;
4218 if (info->linking)
4219 break;
4220 if (netif_is_macvlan(upper_dev))
4221 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4222 break;
4223 }
4224
4225 return 0;
4226 }
4227
4228 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4229 unsigned long event, void *ptr)
4230 {
4231 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4232 u16 vid = vlan_dev_vlan_id(vlan_dev);
4233
4234 if (mlxsw_sp_port_dev_check(real_dev))
4235 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4236 event, ptr, vid);
4237 else if (netif_is_lag_master(real_dev))
4238 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4239 real_dev, event,
4240 ptr, vid);
4241 else if (netif_is_bridge_master(real_dev))
4242 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
4243 event, ptr, vid);
4244
4245 return 0;
4246 }
4247
4248 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4249 unsigned long event, void *ptr)
4250 {
4251 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4252 struct netdev_notifier_changeupper_info *info = ptr;
4253 struct netlink_ext_ack *extack;
4254 struct net_device *upper_dev;
4255
4256 if (!mlxsw_sp)
4257 return 0;
4258
4259 extack = netdev_notifier_info_to_extack(&info->info);
4260
4261 switch (event) {
4262 case NETDEV_PRECHANGEUPPER:
4263 upper_dev = info->upper_dev;
4264 if (!is_vlan_dev(upper_dev) && !netif_is_macvlan(upper_dev)) {
4265 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4266 return -EOPNOTSUPP;
4267 }
4268 if (!info->linking)
4269 break;
4270 if (netif_is_macvlan(upper_dev) &&
4271 !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
4272 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
4273 return -EOPNOTSUPP;
4274 }
4275 break;
4276 case NETDEV_CHANGEUPPER:
4277 upper_dev = info->upper_dev;
4278 if (info->linking)
4279 break;
4280 if (is_vlan_dev(upper_dev))
4281 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
4282 if (netif_is_macvlan(upper_dev))
4283 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4284 break;
4285 }
4286
4287 return 0;
4288 }
4289
4290 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
4291 unsigned long event, void *ptr)
4292 {
4293 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
4294 struct netdev_notifier_changeupper_info *info = ptr;
4295 struct netlink_ext_ack *extack;
4296
4297 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
4298 return 0;
4299
4300 extack = netdev_notifier_info_to_extack(&info->info);
4301
4302 /* VRF enslavement is handled in mlxsw_sp_netdevice_vrf_event() */
4303 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4304
4305 return -EOPNOTSUPP;
4306 }
4307
4308 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4309 {
4310 struct netdev_notifier_changeupper_info *info = ptr;
4311
4312 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4313 return false;
4314 return netif_is_l3_master(info->upper_dev);
4315 }
4316
4317 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
4318 struct net_device *dev,
4319 unsigned long event, void *ptr)
4320 {
4321 struct netdev_notifier_changeupper_info *cu_info;
4322 struct netdev_notifier_info *info = ptr;
4323 struct netlink_ext_ack *extack;
4324 struct net_device *upper_dev;
4325
4326 extack = netdev_notifier_info_to_extack(info);
4327
4328 switch (event) {
4329 case NETDEV_CHANGEUPPER:
4330 cu_info = container_of(info,
4331 struct netdev_notifier_changeupper_info,
4332 info);
4333 upper_dev = cu_info->upper_dev;
4334 if (!netif_is_bridge_master(upper_dev))
4335 return 0;
4336 if (!mlxsw_sp_lower_get(upper_dev))
4337 return 0;
4338 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4339 return -EOPNOTSUPP;
4340 if (cu_info->linking) {
4341 if (!netif_running(dev))
4342 return 0;
4343 /* When the bridge is VLAN-aware, the VNI of the VxLAN
4344 * device needs to be mapped to a VLAN, but at this
4345 * point no VLANs are configured on the VxLAN device
4346 */
4347 if (br_vlan_enabled(upper_dev))
4348 return 0;
4349 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
4350 dev, 0, extack);
4351 } else {
4352 /* VLANs were already flushed, which triggered the
4353 * necessary cleanup
4354 */
4355 if (br_vlan_enabled(upper_dev))
4356 return 0;
4357 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4358 }
4359 break;
4360 case NETDEV_PRE_UP:
4361 upper_dev = netdev_master_upper_dev_get(dev);
4362 if (!upper_dev)
4363 return 0;
4364 if (!netif_is_bridge_master(upper_dev))
4365 return 0;
4366 if (!mlxsw_sp_lower_get(upper_dev))
4367 return 0;
4368 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
4369 extack);
4370 case NETDEV_DOWN:
4371 upper_dev = netdev_master_upper_dev_get(dev);
4372 if (!upper_dev)
4373 return 0;
4374 if (!netif_is_bridge_master(upper_dev))
4375 return 0;
4376 if (!mlxsw_sp_lower_get(upper_dev))
4377 return 0;
4378 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
4379 break;
4380 }
4381
4382 return 0;
4383 }
4384
4385 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
4386 unsigned long event, void *ptr)
4387 {
4388 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4389 struct mlxsw_sp_span_entry *span_entry;
4390 struct mlxsw_sp *mlxsw_sp;
4391 int err = 0;
4392
4393 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
4394 if (event == NETDEV_UNREGISTER) {
4395 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
4396 if (span_entry)
4397 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
4398 }
4399 mlxsw_sp_span_respin(mlxsw_sp);
4400
4401 if (netif_is_vxlan(dev))
4402 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
4403 if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
4404 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
4405 event, ptr);
4406 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
4407 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
4408 event, ptr);
4409 else if (event == NETDEV_PRE_CHANGEADDR ||
4410 event == NETDEV_CHANGEADDR ||
4411 event == NETDEV_CHANGEMTU)
4412 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
4413 else if (mlxsw_sp_is_vrf_event(event, ptr))
4414 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4415 else if (mlxsw_sp_port_dev_check(dev))
4416 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4417 else if (netif_is_lag_master(dev))
4418 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4419 else if (is_vlan_dev(dev))
4420 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4421 else if (netif_is_bridge_master(dev))
4422 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4423 else if (netif_is_macvlan(dev))
4424 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
4425
4426 return notifier_from_errno(err);
4427 }
4428
4429 static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
4430 .notifier_call = mlxsw_sp_inetaddr_valid_event,
4431 };
4432
4433 static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
4434 .notifier_call = mlxsw_sp_inet6addr_valid_event,
4435 };
4436
4437 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
4438 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4439 {0, },
4440 };
4441
4442 static struct pci_driver mlxsw_sp1_pci_driver = {
4443 .name = mlxsw_sp1_driver_name,
4444 .id_table = mlxsw_sp1_pci_id_table,
4445 };
4446
4447 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
4448 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
4449 {0, },
4450 };
4451
4452 static struct pci_driver mlxsw_sp2_pci_driver = {
4453 .name = mlxsw_sp2_driver_name,
4454 .id_table = mlxsw_sp2_pci_id_table,
4455 };
4456
4457 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
4458 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
4459 {0, },
4460 };
4461
4462 static struct pci_driver mlxsw_sp3_pci_driver = {
4463 .name = mlxsw_sp3_driver_name,
4464 .id_table = mlxsw_sp3_pci_id_table,
4465 };
4466
4467 static int __init mlxsw_sp_module_init(void)
4468 {
4469 int err;
4470
4471 register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4472 register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4473
4474 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
4475 if (err)
4476 goto err_sp1_core_driver_register;
4477
4478 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
4479 if (err)
4480 goto err_sp2_core_driver_register;
4481
4482 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
4483 if (err)
4484 goto err_sp3_core_driver_register;
4485
4486 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
4487 if (err)
4488 goto err_sp1_pci_driver_register;
4489
4490 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
4491 if (err)
4492 goto err_sp2_pci_driver_register;
4493
4494 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
4495 if (err)
4496 goto err_sp3_pci_driver_register;
4497
4498 return 0;
4499
4500 err_sp3_pci_driver_register:
4501 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4502 err_sp2_pci_driver_register:
4503 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4504 err_sp1_pci_driver_register:
4505 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4506 err_sp3_core_driver_register:
4507 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4508 err_sp2_core_driver_register:
4509 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4510 err_sp1_core_driver_register:
4511 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4512 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4513 return err;
4514 }
4515
4516 static void __exit mlxsw_sp_module_exit(void)
4517 {
4518 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
4519 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
4520 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
4521 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
4522 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
4523 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
4524 unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
4525 unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
4526 }
4527
4528 module_init(mlxsw_sp_module_init);
4529 module_exit(mlxsw_sp_module_exit);
4530
4531 MODULE_LICENSE("Dual BSD/GPL");
4532 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4533 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4534 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
4535 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
4536 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
4537 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
4538 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
4539 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);