]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum.c
mlxsw: spectrum: Implement common FID core
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
61
62 #include "spectrum.h"
63 #include "pci.h"
64 #include "core.h"
65 #include "reg.h"
66 #include "port.h"
67 #include "trap.h"
68 #include "txheader.h"
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
71 #include "../mlxfw/mlxfw.h"
72
73 #define MLXSW_FWREV_MAJOR 13
74 #define MLXSW_FWREV_MINOR 1420
75 #define MLXSW_FWREV_SUBMINOR 122
76
77 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
78 .major = MLXSW_FWREV_MAJOR,
79 .minor = MLXSW_FWREV_MINOR,
80 .subminor = MLXSW_FWREV_SUBMINOR
81 };
82
83 #define MLXSW_SP_FW_FILENAME \
84 "mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
85 "." __stringify(MLXSW_FWREV_MINOR) \
86 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
87
88 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
89 static const char mlxsw_sp_driver_version[] = "1.0";
90
91 /* tx_hdr_version
92 * Tx header version.
93 * Must be set to 1.
94 */
95 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
96
97 /* tx_hdr_ctl
98 * Packet control type.
99 * 0 - Ethernet control (e.g. EMADs, LACP)
100 * 1 - Ethernet data
101 */
102 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
103
104 /* tx_hdr_proto
105 * Packet protocol type. Must be set to 1 (Ethernet).
106 */
107 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
108
109 /* tx_hdr_rx_is_router
110 * Packet is sent from the router. Valid for data packets only.
111 */
112 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
113
114 /* tx_hdr_fid_valid
115 * Indicates if the 'fid' field is valid and should be used for
116 * forwarding lookup. Valid for data packets only.
117 */
118 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
119
120 /* tx_hdr_swid
121 * Switch partition ID. Must be set to 0.
122 */
123 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
124
125 /* tx_hdr_control_tclass
126 * Indicates if the packet should use the control TClass and not one
127 * of the data TClasses.
128 */
129 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
130
131 /* tx_hdr_etclass
132 * Egress TClass to be used on the egress device on the egress port.
133 */
134 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
135
136 /* tx_hdr_port_mid
137 * Destination local port for unicast packets.
138 * Destination multicast ID for multicast packets.
139 *
140 * Control packets are directed to a specific egress port, while data
141 * packets are transmitted through the CPU port (0) into the switch partition,
142 * where forwarding rules are applied.
143 */
144 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
145
146 /* tx_hdr_fid
147 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
148 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
149 * Valid for data packets only.
150 */
151 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
152
153 /* tx_hdr_type
154 * 0 - Data packets
155 * 6 - Control packets
156 */
157 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
158
159 struct mlxsw_sp_mlxfw_dev {
160 struct mlxfw_dev mlxfw_dev;
161 struct mlxsw_sp *mlxsw_sp;
162 };
163
164 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
165 u16 component_index, u32 *p_max_size,
166 u8 *p_align_bits, u16 *p_max_write_size)
167 {
168 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
169 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
170 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
171 char mcqi_pl[MLXSW_REG_MCQI_LEN];
172 int err;
173
174 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
175 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
176 if (err)
177 return err;
178 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
179 p_max_write_size);
180
181 *p_align_bits = max_t(u8, *p_align_bits, 2);
182 *p_max_write_size = min_t(u16, *p_max_write_size,
183 MLXSW_REG_MCDA_MAX_DATA_LEN);
184 return 0;
185 }
186
187 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
188 {
189 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
190 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
191 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
192 char mcc_pl[MLXSW_REG_MCC_LEN];
193 u8 control_state;
194 int err;
195
196 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
197 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
198 if (err)
199 return err;
200
201 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
202 if (control_state != MLXFW_FSM_STATE_IDLE)
203 return -EBUSY;
204
205 mlxsw_reg_mcc_pack(mcc_pl,
206 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
207 0, *fwhandle, 0);
208 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
209 }
210
211 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
212 u32 fwhandle, u16 component_index,
213 u32 component_size)
214 {
215 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
216 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
218 char mcc_pl[MLXSW_REG_MCC_LEN];
219
220 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
221 component_index, fwhandle, component_size);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
223 }
224
225 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
226 u32 fwhandle, u8 *data, u16 size,
227 u32 offset)
228 {
229 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
230 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
232 char mcda_pl[MLXSW_REG_MCDA_LEN];
233
234 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
235 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
236 }
237
238 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
239 u32 fwhandle, u16 component_index)
240 {
241 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
242 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
243 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
244 char mcc_pl[MLXSW_REG_MCC_LEN];
245
246 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
247 component_index, fwhandle, 0);
248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
249 }
250
251 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
252 {
253 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
254 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
256 char mcc_pl[MLXSW_REG_MCC_LEN];
257
258 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
259 fwhandle, 0);
260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
261 }
262
263 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
264 enum mlxfw_fsm_state *fsm_state,
265 enum mlxfw_fsm_state_err *fsm_state_err)
266 {
267 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
268 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
269 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
270 char mcc_pl[MLXSW_REG_MCC_LEN];
271 u8 control_state;
272 u8 error_code;
273 int err;
274
275 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
276 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
277 if (err)
278 return err;
279
280 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
281 *fsm_state = control_state;
282 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
283 MLXFW_FSM_STATE_ERR_MAX);
284 return 0;
285 }
286
287 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
288 {
289 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
290 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
291 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
292 char mcc_pl[MLXSW_REG_MCC_LEN];
293
294 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
295 fwhandle, 0);
296 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
297 }
298
299 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
300 {
301 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
302 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
303 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
304 char mcc_pl[MLXSW_REG_MCC_LEN];
305
306 mlxsw_reg_mcc_pack(mcc_pl,
307 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
308 fwhandle, 0);
309 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
310 }
311
312 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
313 .component_query = mlxsw_sp_component_query,
314 .fsm_lock = mlxsw_sp_fsm_lock,
315 .fsm_component_update = mlxsw_sp_fsm_component_update,
316 .fsm_block_download = mlxsw_sp_fsm_block_download,
317 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
318 .fsm_activate = mlxsw_sp_fsm_activate,
319 .fsm_query_state = mlxsw_sp_fsm_query_state,
320 .fsm_cancel = mlxsw_sp_fsm_cancel,
321 .fsm_release = mlxsw_sp_fsm_release
322 };
323
324 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
325 const struct mlxsw_fw_rev *b)
326 {
327 if (a->major != b->major)
328 return a->major > b->major;
329 if (a->minor != b->minor)
330 return a->minor > b->minor;
331 return a->subminor >= b->subminor;
332 }
333
334 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
335 {
336 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
337 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
338 .mlxfw_dev = {
339 .ops = &mlxsw_sp_mlxfw_dev_ops,
340 .psid = mlxsw_sp->bus_info->psid,
341 .psid_size = strlen(mlxsw_sp->bus_info->psid),
342 },
343 .mlxsw_sp = mlxsw_sp
344 };
345 const struct firmware *firmware;
346 int err;
347
348 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
349 return 0;
350
351 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
352 rev->major, rev->minor, rev->subminor);
353 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
354 MLXSW_SP_FW_FILENAME);
355
356 err = request_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
357 mlxsw_sp->bus_info->dev);
358 if (err) {
359 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
360 MLXSW_SP_FW_FILENAME);
361 return err;
362 }
363
364 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
365 release_firmware(firmware);
366 return err;
367 }
368
369 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
370 unsigned int counter_index, u64 *packets,
371 u64 *bytes)
372 {
373 char mgpc_pl[MLXSW_REG_MGPC_LEN];
374 int err;
375
376 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
377 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
378 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
379 if (err)
380 return err;
381 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
382 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
383 return 0;
384 }
385
386 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
387 unsigned int counter_index)
388 {
389 char mgpc_pl[MLXSW_REG_MGPC_LEN];
390
391 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
392 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
393 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
394 }
395
396 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
397 unsigned int *p_counter_index)
398 {
399 int err;
400
401 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
402 p_counter_index);
403 if (err)
404 return err;
405 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
406 if (err)
407 goto err_counter_clear;
408 return 0;
409
410 err_counter_clear:
411 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
412 *p_counter_index);
413 return err;
414 }
415
416 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
417 unsigned int counter_index)
418 {
419 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
420 counter_index);
421 }
422
423 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
424 const struct mlxsw_tx_info *tx_info)
425 {
426 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
427
428 memset(txhdr, 0, MLXSW_TXHDR_LEN);
429
430 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
431 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
432 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
433 mlxsw_tx_hdr_swid_set(txhdr, 0);
434 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
435 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
436 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
437 }
438
439 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
440 u8 state)
441 {
442 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
443 enum mlxsw_reg_spms_state spms_state;
444 char *spms_pl;
445 int err;
446
447 switch (state) {
448 case BR_STATE_FORWARDING:
449 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
450 break;
451 case BR_STATE_LEARNING:
452 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
453 break;
454 case BR_STATE_LISTENING: /* fall-through */
455 case BR_STATE_DISABLED: /* fall-through */
456 case BR_STATE_BLOCKING:
457 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
458 break;
459 default:
460 BUG();
461 }
462
463 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
464 if (!spms_pl)
465 return -ENOMEM;
466 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
467 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
468
469 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
470 kfree(spms_pl);
471 return err;
472 }
473
474 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
475 {
476 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
477 int err;
478
479 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
480 if (err)
481 return err;
482 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
483 return 0;
484 }
485
486 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
487 {
488 int i;
489
490 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
491 return -EIO;
492
493 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
494 MAX_SPAN);
495 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
496 sizeof(struct mlxsw_sp_span_entry),
497 GFP_KERNEL);
498 if (!mlxsw_sp->span.entries)
499 return -ENOMEM;
500
501 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
502 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
503
504 return 0;
505 }
506
507 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
508 {
509 int i;
510
511 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
512 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
513
514 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
515 }
516 kfree(mlxsw_sp->span.entries);
517 }
518
519 static struct mlxsw_sp_span_entry *
520 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
521 {
522 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
523 struct mlxsw_sp_span_entry *span_entry;
524 char mpat_pl[MLXSW_REG_MPAT_LEN];
525 u8 local_port = port->local_port;
526 int index;
527 int i;
528 int err;
529
530 /* find a free entry to use */
531 index = -1;
532 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
533 if (!mlxsw_sp->span.entries[i].used) {
534 index = i;
535 span_entry = &mlxsw_sp->span.entries[i];
536 break;
537 }
538 }
539 if (index < 0)
540 return NULL;
541
542 /* create a new port analayzer entry for local_port */
543 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
544 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
545 if (err)
546 return NULL;
547
548 span_entry->used = true;
549 span_entry->id = index;
550 span_entry->ref_count = 1;
551 span_entry->local_port = local_port;
552 return span_entry;
553 }
554
555 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
556 struct mlxsw_sp_span_entry *span_entry)
557 {
558 u8 local_port = span_entry->local_port;
559 char mpat_pl[MLXSW_REG_MPAT_LEN];
560 int pa_id = span_entry->id;
561
562 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
563 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
564 span_entry->used = false;
565 }
566
567 static struct mlxsw_sp_span_entry *
568 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
569 {
570 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
571 int i;
572
573 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
574 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
575
576 if (curr->used && curr->local_port == port->local_port)
577 return curr;
578 }
579 return NULL;
580 }
581
582 static struct mlxsw_sp_span_entry
583 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
584 {
585 struct mlxsw_sp_span_entry *span_entry;
586
587 span_entry = mlxsw_sp_span_entry_find(port);
588 if (span_entry) {
589 /* Already exists, just take a reference */
590 span_entry->ref_count++;
591 return span_entry;
592 }
593
594 return mlxsw_sp_span_entry_create(port);
595 }
596
597 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
598 struct mlxsw_sp_span_entry *span_entry)
599 {
600 WARN_ON(!span_entry->ref_count);
601 if (--span_entry->ref_count == 0)
602 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
603 return 0;
604 }
605
606 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
607 {
608 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
609 struct mlxsw_sp_span_inspected_port *p;
610 int i;
611
612 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
613 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
614
615 list_for_each_entry(p, &curr->bound_ports_list, list)
616 if (p->local_port == port->local_port &&
617 p->type == MLXSW_SP_SPAN_EGRESS)
618 return true;
619 }
620
621 return false;
622 }
623
624 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
625 int mtu)
626 {
627 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
628 }
629
630 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
631 {
632 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
633 char sbib_pl[MLXSW_REG_SBIB_LEN];
634 int err;
635
636 /* If port is egress mirrored, the shared buffer size should be
637 * updated according to the mtu value
638 */
639 if (mlxsw_sp_span_is_egress_mirror(port)) {
640 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
641
642 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
643 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
644 if (err) {
645 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
646 return err;
647 }
648 }
649
650 return 0;
651 }
652
653 static struct mlxsw_sp_span_inspected_port *
654 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
655 struct mlxsw_sp_span_entry *span_entry)
656 {
657 struct mlxsw_sp_span_inspected_port *p;
658
659 list_for_each_entry(p, &span_entry->bound_ports_list, list)
660 if (port->local_port == p->local_port)
661 return p;
662 return NULL;
663 }
664
665 static int
666 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
667 struct mlxsw_sp_span_entry *span_entry,
668 enum mlxsw_sp_span_type type)
669 {
670 struct mlxsw_sp_span_inspected_port *inspected_port;
671 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
672 char mpar_pl[MLXSW_REG_MPAR_LEN];
673 char sbib_pl[MLXSW_REG_SBIB_LEN];
674 int pa_id = span_entry->id;
675 int err;
676
677 /* if it is an egress SPAN, bind a shared buffer to it */
678 if (type == MLXSW_SP_SPAN_EGRESS) {
679 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
680 port->dev->mtu);
681
682 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
683 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
684 if (err) {
685 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
686 return err;
687 }
688 }
689
690 /* bind the port to the SPAN entry */
691 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
692 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
693 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
694 if (err)
695 goto err_mpar_reg_write;
696
697 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
698 if (!inspected_port) {
699 err = -ENOMEM;
700 goto err_inspected_port_alloc;
701 }
702 inspected_port->local_port = port->local_port;
703 inspected_port->type = type;
704 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
705
706 return 0;
707
708 err_mpar_reg_write:
709 err_inspected_port_alloc:
710 if (type == MLXSW_SP_SPAN_EGRESS) {
711 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
712 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
713 }
714 return err;
715 }
716
717 static void
718 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
719 struct mlxsw_sp_span_entry *span_entry,
720 enum mlxsw_sp_span_type type)
721 {
722 struct mlxsw_sp_span_inspected_port *inspected_port;
723 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
724 char mpar_pl[MLXSW_REG_MPAR_LEN];
725 char sbib_pl[MLXSW_REG_SBIB_LEN];
726 int pa_id = span_entry->id;
727
728 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
729 if (!inspected_port)
730 return;
731
732 /* remove the inspected port */
733 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
734 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
735 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
736
737 /* remove the SBIB buffer if it was egress SPAN */
738 if (type == MLXSW_SP_SPAN_EGRESS) {
739 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
740 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
741 }
742
743 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
744
745 list_del(&inspected_port->list);
746 kfree(inspected_port);
747 }
748
749 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
750 struct mlxsw_sp_port *to,
751 enum mlxsw_sp_span_type type)
752 {
753 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
754 struct mlxsw_sp_span_entry *span_entry;
755 int err;
756
757 span_entry = mlxsw_sp_span_entry_get(to);
758 if (!span_entry)
759 return -ENOENT;
760
761 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
762 span_entry->id);
763
764 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
765 if (err)
766 goto err_port_bind;
767
768 return 0;
769
770 err_port_bind:
771 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
772 return err;
773 }
774
775 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
776 struct mlxsw_sp_port *to,
777 enum mlxsw_sp_span_type type)
778 {
779 struct mlxsw_sp_span_entry *span_entry;
780
781 span_entry = mlxsw_sp_span_entry_find(to);
782 if (!span_entry) {
783 netdev_err(from->dev, "no span entry found\n");
784 return;
785 }
786
787 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
788 span_entry->id);
789 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
790 }
791
792 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
793 bool enable, u32 rate)
794 {
795 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
796 char mpsc_pl[MLXSW_REG_MPSC_LEN];
797
798 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
799 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
800 }
801
802 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
803 bool is_up)
804 {
805 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
806 char paos_pl[MLXSW_REG_PAOS_LEN];
807
808 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
809 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
810 MLXSW_PORT_ADMIN_STATUS_DOWN);
811 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
812 }
813
814 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
815 unsigned char *addr)
816 {
817 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
818 char ppad_pl[MLXSW_REG_PPAD_LEN];
819
820 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
821 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
822 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
823 }
824
825 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
826 {
827 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
828 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
829
830 ether_addr_copy(addr, mlxsw_sp->base_mac);
831 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
832 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
833 }
834
835 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
836 {
837 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
838 char pmtu_pl[MLXSW_REG_PMTU_LEN];
839 int max_mtu;
840 int err;
841
842 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
843 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
844 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
845 if (err)
846 return err;
847 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
848
849 if (mtu > max_mtu)
850 return -EINVAL;
851
852 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
853 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
854 }
855
856 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
857 u8 swid)
858 {
859 char pspa_pl[MLXSW_REG_PSPA_LEN];
860
861 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
862 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
863 }
864
865 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
866 {
867 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
868
869 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
870 swid);
871 }
872
873 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
874 {
875 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
876 char svpe_pl[MLXSW_REG_SVPE_LEN];
877
878 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
879 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
880 }
881
882 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
883 bool learn_enable)
884 {
885 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
886 char *spvmlr_pl;
887 int err;
888
889 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
890 if (!spvmlr_pl)
891 return -ENOMEM;
892 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
893 learn_enable);
894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
895 kfree(spvmlr_pl);
896 return err;
897 }
898
899 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
900 u16 vid)
901 {
902 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
903 char spvid_pl[MLXSW_REG_SPVID_LEN];
904
905 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
906 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
907 }
908
909 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
910 bool allow)
911 {
912 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
913 char spaft_pl[MLXSW_REG_SPAFT_LEN];
914
915 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
916 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
917 }
918
919 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
920 {
921 int err;
922
923 if (!vid) {
924 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
925 if (err)
926 return err;
927 } else {
928 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
929 if (err)
930 return err;
931 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
932 if (err)
933 goto err_port_allow_untagged_set;
934 }
935
936 mlxsw_sp_port->pvid = vid;
937 return 0;
938
939 err_port_allow_untagged_set:
940 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
941 return err;
942 }
943
944 static int
945 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
946 {
947 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
948 char sspr_pl[MLXSW_REG_SSPR_LEN];
949
950 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
951 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
952 }
953
954 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
955 u8 local_port, u8 *p_module,
956 u8 *p_width, u8 *p_lane)
957 {
958 char pmlp_pl[MLXSW_REG_PMLP_LEN];
959 int err;
960
961 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
962 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
963 if (err)
964 return err;
965 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
966 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
967 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
968 return 0;
969 }
970
971 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
972 u8 module, u8 width, u8 lane)
973 {
974 char pmlp_pl[MLXSW_REG_PMLP_LEN];
975 int i;
976
977 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
978 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
979 for (i = 0; i < width; i++) {
980 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
981 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
982 }
983
984 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
985 }
986
987 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
988 {
989 char pmlp_pl[MLXSW_REG_PMLP_LEN];
990
991 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
992 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
993 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
994 }
995
996 static int mlxsw_sp_port_open(struct net_device *dev)
997 {
998 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
999 int err;
1000
1001 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1002 if (err)
1003 return err;
1004 netif_start_queue(dev);
1005 return 0;
1006 }
1007
1008 static int mlxsw_sp_port_stop(struct net_device *dev)
1009 {
1010 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1011
1012 netif_stop_queue(dev);
1013 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1014 }
1015
1016 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1017 struct net_device *dev)
1018 {
1019 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1021 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1022 const struct mlxsw_tx_info tx_info = {
1023 .local_port = mlxsw_sp_port->local_port,
1024 .is_emad = false,
1025 };
1026 u64 len;
1027 int err;
1028
1029 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
1030 return NETDEV_TX_BUSY;
1031
1032 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1033 struct sk_buff *skb_orig = skb;
1034
1035 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1036 if (!skb) {
1037 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1038 dev_kfree_skb_any(skb_orig);
1039 return NETDEV_TX_OK;
1040 }
1041 dev_consume_skb_any(skb_orig);
1042 }
1043
1044 if (eth_skb_pad(skb)) {
1045 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1046 return NETDEV_TX_OK;
1047 }
1048
1049 mlxsw_sp_txhdr_construct(skb, &tx_info);
1050 /* TX header is consumed by HW on the way so we shouldn't count its
1051 * bytes as being sent.
1052 */
1053 len = skb->len - MLXSW_TXHDR_LEN;
1054
1055 /* Due to a race we might fail here because of a full queue. In that
1056 * unlikely case we simply drop the packet.
1057 */
1058 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
1059
1060 if (!err) {
1061 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1062 u64_stats_update_begin(&pcpu_stats->syncp);
1063 pcpu_stats->tx_packets++;
1064 pcpu_stats->tx_bytes += len;
1065 u64_stats_update_end(&pcpu_stats->syncp);
1066 } else {
1067 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1068 dev_kfree_skb_any(skb);
1069 }
1070 return NETDEV_TX_OK;
1071 }
1072
1073 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1074 {
1075 }
1076
1077 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1078 {
1079 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1080 struct sockaddr *addr = p;
1081 int err;
1082
1083 if (!is_valid_ether_addr(addr->sa_data))
1084 return -EADDRNOTAVAIL;
1085
1086 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1087 if (err)
1088 return err;
1089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1090 return 0;
1091 }
1092
1093 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1094 int mtu)
1095 {
1096 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
1097 }
1098
1099 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
1100
1101 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1102 u16 delay)
1103 {
1104 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1105 BITS_PER_BYTE));
1106 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1107 mtu);
1108 }
1109
1110 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1111 * Assumes 100m cable and maximum MTU.
1112 */
1113 #define MLXSW_SP_PAUSE_DELAY 58752
1114
1115 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1116 u16 delay, bool pfc, bool pause)
1117 {
1118 if (pfc)
1119 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
1120 else if (pause)
1121 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
1122 else
1123 return 0;
1124 }
1125
1126 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1127 bool lossy)
1128 {
1129 if (lossy)
1130 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1131 else
1132 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1133 thres);
1134 }
1135
1136 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
1137 u8 *prio_tc, bool pause_en,
1138 struct ieee_pfc *my_pfc)
1139 {
1140 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1141 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1142 u16 delay = !!my_pfc ? my_pfc->delay : 0;
1143 char pbmc_pl[MLXSW_REG_PBMC_LEN];
1144 int i, j, err;
1145
1146 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1147 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1148 if (err)
1149 return err;
1150
1151 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1152 bool configure = false;
1153 bool pfc = false;
1154 bool lossy;
1155 u16 thres;
1156
1157 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1158 if (prio_tc[j] == i) {
1159 pfc = pfc_en & BIT(j);
1160 configure = true;
1161 break;
1162 }
1163 }
1164
1165 if (!configure)
1166 continue;
1167
1168 lossy = !(pfc || pause_en);
1169 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1170 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
1171 pause_en);
1172 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
1173 }
1174
1175 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1176 }
1177
1178 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
1179 int mtu, bool pause_en)
1180 {
1181 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1182 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
1183 struct ieee_pfc *my_pfc;
1184 u8 *prio_tc;
1185
1186 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
1187 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
1188
1189 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
1190 pause_en, my_pfc);
1191 }
1192
1193 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1194 {
1195 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1196 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1197 int err;
1198
1199 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
1200 if (err)
1201 return err;
1202 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1203 if (err)
1204 goto err_span_port_mtu_update;
1205 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1206 if (err)
1207 goto err_port_mtu_set;
1208 dev->mtu = mtu;
1209 return 0;
1210
1211 err_port_mtu_set:
1212 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1213 err_span_port_mtu_update:
1214 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1215 return err;
1216 }
1217
1218 static int
1219 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1220 struct rtnl_link_stats64 *stats)
1221 {
1222 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1223 struct mlxsw_sp_port_pcpu_stats *p;
1224 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1225 u32 tx_dropped = 0;
1226 unsigned int start;
1227 int i;
1228
1229 for_each_possible_cpu(i) {
1230 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1231 do {
1232 start = u64_stats_fetch_begin_irq(&p->syncp);
1233 rx_packets = p->rx_packets;
1234 rx_bytes = p->rx_bytes;
1235 tx_packets = p->tx_packets;
1236 tx_bytes = p->tx_bytes;
1237 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1238
1239 stats->rx_packets += rx_packets;
1240 stats->rx_bytes += rx_bytes;
1241 stats->tx_packets += tx_packets;
1242 stats->tx_bytes += tx_bytes;
1243 /* tx_dropped is u32, updated without syncp protection. */
1244 tx_dropped += p->tx_dropped;
1245 }
1246 stats->tx_dropped = tx_dropped;
1247 return 0;
1248 }
1249
1250 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1251 {
1252 switch (attr_id) {
1253 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1254 return true;
1255 }
1256
1257 return false;
1258 }
1259
1260 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1261 void *sp)
1262 {
1263 switch (attr_id) {
1264 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1265 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1266 }
1267
1268 return -EINVAL;
1269 }
1270
1271 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1272 int prio, char *ppcnt_pl)
1273 {
1274 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1275 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1276
1277 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1278 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1279 }
1280
1281 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1282 struct rtnl_link_stats64 *stats)
1283 {
1284 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1285 int err;
1286
1287 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1288 0, ppcnt_pl);
1289 if (err)
1290 goto out;
1291
1292 stats->tx_packets =
1293 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1294 stats->rx_packets =
1295 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1296 stats->tx_bytes =
1297 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1298 stats->rx_bytes =
1299 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1300 stats->multicast =
1301 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1302
1303 stats->rx_crc_errors =
1304 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1305 stats->rx_frame_errors =
1306 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1307
1308 stats->rx_length_errors = (
1309 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1310 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1311 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1312
1313 stats->rx_errors = (stats->rx_crc_errors +
1314 stats->rx_frame_errors + stats->rx_length_errors);
1315
1316 out:
1317 return err;
1318 }
1319
1320 static void update_stats_cache(struct work_struct *work)
1321 {
1322 struct mlxsw_sp_port *mlxsw_sp_port =
1323 container_of(work, struct mlxsw_sp_port,
1324 hw_stats.update_dw.work);
1325
1326 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1327 goto out;
1328
1329 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1330 mlxsw_sp_port->hw_stats.cache);
1331
1332 out:
1333 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1334 MLXSW_HW_STATS_UPDATE_TIME);
1335 }
1336
1337 /* Return the stats from a cache that is updated periodically,
1338 * as this function might get called in an atomic context.
1339 */
1340 static void
1341 mlxsw_sp_port_get_stats64(struct net_device *dev,
1342 struct rtnl_link_stats64 *stats)
1343 {
1344 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1345
1346 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1347 }
1348
1349 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1350 u16 vid_begin, u16 vid_end,
1351 bool is_member, bool untagged)
1352 {
1353 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1354 char *spvm_pl;
1355 int err;
1356
1357 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1358 if (!spvm_pl)
1359 return -ENOMEM;
1360
1361 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1362 vid_end, is_member, untagged);
1363 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1364 kfree(spvm_pl);
1365 return err;
1366 }
1367
1368 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1369 u16 vid_end, bool is_member, bool untagged)
1370 {
1371 u16 vid, vid_e;
1372 int err;
1373
1374 for (vid = vid_begin; vid <= vid_end;
1375 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1376 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1377 vid_end);
1378
1379 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1380 is_member, untagged);
1381 if (err)
1382 return err;
1383 }
1384
1385 return 0;
1386 }
1387
1388 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1389 {
1390 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1391
1392 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1393 &mlxsw_sp_port->vlans_list, list)
1394 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1395 }
1396
1397 static struct mlxsw_sp_port_vlan *
1398 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1399 {
1400 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1401 bool untagged = vid == 1;
1402 int err;
1403
1404 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1405 if (err)
1406 return ERR_PTR(err);
1407
1408 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1409 if (!mlxsw_sp_port_vlan) {
1410 err = -ENOMEM;
1411 goto err_port_vlan_alloc;
1412 }
1413
1414 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1415 mlxsw_sp_port_vlan->vid = vid;
1416 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1417
1418 return mlxsw_sp_port_vlan;
1419
1420 err_port_vlan_alloc:
1421 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1422 return ERR_PTR(err);
1423 }
1424
1425 static void
1426 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1427 {
1428 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1429 u16 vid = mlxsw_sp_port_vlan->vid;
1430
1431 list_del(&mlxsw_sp_port_vlan->list);
1432 kfree(mlxsw_sp_port_vlan);
1433 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1434 }
1435
1436 struct mlxsw_sp_port_vlan *
1437 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1438 {
1439 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1440
1441 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1442 if (mlxsw_sp_port_vlan)
1443 return mlxsw_sp_port_vlan;
1444
1445 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1446 }
1447
1448 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1449 {
1450 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1451
1452 if (mlxsw_sp_port_vlan->bridge_port)
1453 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1454 else if (fid)
1455 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1456
1457 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1458 }
1459
1460 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1461 __be16 __always_unused proto, u16 vid)
1462 {
1463 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1464
1465 /* VLAN 0 is added to HW filter when device goes up, but it is
1466 * reserved in our case, so simply return.
1467 */
1468 if (!vid)
1469 return 0;
1470
1471 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1472 }
1473
1474 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1475 __be16 __always_unused proto, u16 vid)
1476 {
1477 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1478 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1479
1480 /* VLAN 0 is removed from HW filter when device goes down, but
1481 * it is reserved in our case, so simply return.
1482 */
1483 if (!vid)
1484 return 0;
1485
1486 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1487 if (!mlxsw_sp_port_vlan)
1488 return 0;
1489 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1490
1491 return 0;
1492 }
1493
1494 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1495 size_t len)
1496 {
1497 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1498 u8 module = mlxsw_sp_port->mapping.module;
1499 u8 width = mlxsw_sp_port->mapping.width;
1500 u8 lane = mlxsw_sp_port->mapping.lane;
1501 int err;
1502
1503 if (!mlxsw_sp_port->split)
1504 err = snprintf(name, len, "p%d", module + 1);
1505 else
1506 err = snprintf(name, len, "p%ds%d", module + 1,
1507 lane / width);
1508
1509 if (err >= len)
1510 return -EINVAL;
1511
1512 return 0;
1513 }
1514
1515 static struct mlxsw_sp_port_mall_tc_entry *
1516 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1517 unsigned long cookie) {
1518 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1519
1520 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1521 if (mall_tc_entry->cookie == cookie)
1522 return mall_tc_entry;
1523
1524 return NULL;
1525 }
1526
1527 static int
1528 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1529 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1530 const struct tc_action *a,
1531 bool ingress)
1532 {
1533 struct net *net = dev_net(mlxsw_sp_port->dev);
1534 enum mlxsw_sp_span_type span_type;
1535 struct mlxsw_sp_port *to_port;
1536 struct net_device *to_dev;
1537 int ifindex;
1538
1539 ifindex = tcf_mirred_ifindex(a);
1540 to_dev = __dev_get_by_index(net, ifindex);
1541 if (!to_dev) {
1542 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1543 return -EINVAL;
1544 }
1545
1546 if (!mlxsw_sp_port_dev_check(to_dev)) {
1547 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1548 return -EOPNOTSUPP;
1549 }
1550 to_port = netdev_priv(to_dev);
1551
1552 mirror->to_local_port = to_port->local_port;
1553 mirror->ingress = ingress;
1554 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1555 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1556 }
1557
1558 static void
1559 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1560 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1561 {
1562 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1563 enum mlxsw_sp_span_type span_type;
1564 struct mlxsw_sp_port *to_port;
1565
1566 to_port = mlxsw_sp->ports[mirror->to_local_port];
1567 span_type = mirror->ingress ?
1568 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1569 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1570 }
1571
1572 static int
1573 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1574 struct tc_cls_matchall_offload *cls,
1575 const struct tc_action *a,
1576 bool ingress)
1577 {
1578 int err;
1579
1580 if (!mlxsw_sp_port->sample)
1581 return -EOPNOTSUPP;
1582 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1583 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1584 return -EEXIST;
1585 }
1586 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1587 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1588 return -EOPNOTSUPP;
1589 }
1590
1591 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1592 tcf_sample_psample_group(a));
1593 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1594 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1595 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1596
1597 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1598 if (err)
1599 goto err_port_sample_set;
1600 return 0;
1601
1602 err_port_sample_set:
1603 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1604 return err;
1605 }
1606
1607 static void
1608 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1609 {
1610 if (!mlxsw_sp_port->sample)
1611 return;
1612
1613 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1614 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1615 }
1616
1617 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1618 __be16 protocol,
1619 struct tc_cls_matchall_offload *cls,
1620 bool ingress)
1621 {
1622 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1623 const struct tc_action *a;
1624 LIST_HEAD(actions);
1625 int err;
1626
1627 if (!tc_single_action(cls->exts)) {
1628 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1629 return -EOPNOTSUPP;
1630 }
1631
1632 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1633 if (!mall_tc_entry)
1634 return -ENOMEM;
1635 mall_tc_entry->cookie = cls->cookie;
1636
1637 tcf_exts_to_list(cls->exts, &actions);
1638 a = list_first_entry(&actions, struct tc_action, list);
1639
1640 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1641 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1642
1643 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1644 mirror = &mall_tc_entry->mirror;
1645 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1646 mirror, a, ingress);
1647 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1648 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1649 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1650 a, ingress);
1651 } else {
1652 err = -EOPNOTSUPP;
1653 }
1654
1655 if (err)
1656 goto err_add_action;
1657
1658 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1659 return 0;
1660
1661 err_add_action:
1662 kfree(mall_tc_entry);
1663 return err;
1664 }
1665
1666 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1667 struct tc_cls_matchall_offload *cls)
1668 {
1669 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1670
1671 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1672 cls->cookie);
1673 if (!mall_tc_entry) {
1674 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1675 return;
1676 }
1677 list_del(&mall_tc_entry->list);
1678
1679 switch (mall_tc_entry->type) {
1680 case MLXSW_SP_PORT_MALL_MIRROR:
1681 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1682 &mall_tc_entry->mirror);
1683 break;
1684 case MLXSW_SP_PORT_MALL_SAMPLE:
1685 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1686 break;
1687 default:
1688 WARN_ON(1);
1689 }
1690
1691 kfree(mall_tc_entry);
1692 }
1693
1694 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1695 __be16 proto, struct tc_to_netdev *tc)
1696 {
1697 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1698 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1699
1700 switch (tc->type) {
1701 case TC_SETUP_MATCHALL:
1702 switch (tc->cls_mall->command) {
1703 case TC_CLSMATCHALL_REPLACE:
1704 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1705 proto,
1706 tc->cls_mall,
1707 ingress);
1708 case TC_CLSMATCHALL_DESTROY:
1709 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1710 tc->cls_mall);
1711 return 0;
1712 default:
1713 return -EOPNOTSUPP;
1714 }
1715 case TC_SETUP_CLSFLOWER:
1716 switch (tc->cls_flower->command) {
1717 case TC_CLSFLOWER_REPLACE:
1718 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1719 proto, tc->cls_flower);
1720 case TC_CLSFLOWER_DESTROY:
1721 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1722 tc->cls_flower);
1723 return 0;
1724 case TC_CLSFLOWER_STATS:
1725 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1726 tc->cls_flower);
1727 default:
1728 return -EOPNOTSUPP;
1729 }
1730 }
1731
1732 return -EOPNOTSUPP;
1733 }
1734
1735 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1736 .ndo_open = mlxsw_sp_port_open,
1737 .ndo_stop = mlxsw_sp_port_stop,
1738 .ndo_start_xmit = mlxsw_sp_port_xmit,
1739 .ndo_setup_tc = mlxsw_sp_setup_tc,
1740 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1741 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1742 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1743 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1744 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1745 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1746 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1747 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1748 .ndo_fdb_add = switchdev_port_fdb_add,
1749 .ndo_fdb_del = switchdev_port_fdb_del,
1750 .ndo_fdb_dump = switchdev_port_fdb_dump,
1751 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1752 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1753 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
1754 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1755 };
1756
1757 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1758 struct ethtool_drvinfo *drvinfo)
1759 {
1760 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1761 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1762
1763 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1764 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1765 sizeof(drvinfo->version));
1766 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1767 "%d.%d.%d",
1768 mlxsw_sp->bus_info->fw_rev.major,
1769 mlxsw_sp->bus_info->fw_rev.minor,
1770 mlxsw_sp->bus_info->fw_rev.subminor);
1771 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1772 sizeof(drvinfo->bus_info));
1773 }
1774
1775 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1776 struct ethtool_pauseparam *pause)
1777 {
1778 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1779
1780 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1781 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1782 }
1783
1784 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1785 struct ethtool_pauseparam *pause)
1786 {
1787 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1788
1789 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1790 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1791 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1792
1793 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1794 pfcc_pl);
1795 }
1796
1797 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1798 struct ethtool_pauseparam *pause)
1799 {
1800 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1801 bool pause_en = pause->tx_pause || pause->rx_pause;
1802 int err;
1803
1804 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1805 netdev_err(dev, "PFC already enabled on port\n");
1806 return -EINVAL;
1807 }
1808
1809 if (pause->autoneg) {
1810 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1811 return -EINVAL;
1812 }
1813
1814 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1815 if (err) {
1816 netdev_err(dev, "Failed to configure port's headroom\n");
1817 return err;
1818 }
1819
1820 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1821 if (err) {
1822 netdev_err(dev, "Failed to set PAUSE parameters\n");
1823 goto err_port_pause_configure;
1824 }
1825
1826 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1827 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1828
1829 return 0;
1830
1831 err_port_pause_configure:
1832 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1833 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1834 return err;
1835 }
1836
1837 struct mlxsw_sp_port_hw_stats {
1838 char str[ETH_GSTRING_LEN];
1839 u64 (*getter)(const char *payload);
1840 bool cells_bytes;
1841 };
1842
1843 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1844 {
1845 .str = "a_frames_transmitted_ok",
1846 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1847 },
1848 {
1849 .str = "a_frames_received_ok",
1850 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1851 },
1852 {
1853 .str = "a_frame_check_sequence_errors",
1854 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1855 },
1856 {
1857 .str = "a_alignment_errors",
1858 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1859 },
1860 {
1861 .str = "a_octets_transmitted_ok",
1862 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1863 },
1864 {
1865 .str = "a_octets_received_ok",
1866 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1867 },
1868 {
1869 .str = "a_multicast_frames_xmitted_ok",
1870 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1871 },
1872 {
1873 .str = "a_broadcast_frames_xmitted_ok",
1874 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1875 },
1876 {
1877 .str = "a_multicast_frames_received_ok",
1878 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1879 },
1880 {
1881 .str = "a_broadcast_frames_received_ok",
1882 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1883 },
1884 {
1885 .str = "a_in_range_length_errors",
1886 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1887 },
1888 {
1889 .str = "a_out_of_range_length_field",
1890 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1891 },
1892 {
1893 .str = "a_frame_too_long_errors",
1894 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1895 },
1896 {
1897 .str = "a_symbol_error_during_carrier",
1898 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1899 },
1900 {
1901 .str = "a_mac_control_frames_transmitted",
1902 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1903 },
1904 {
1905 .str = "a_mac_control_frames_received",
1906 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1907 },
1908 {
1909 .str = "a_unsupported_opcodes_received",
1910 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1911 },
1912 {
1913 .str = "a_pause_mac_ctrl_frames_received",
1914 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1915 },
1916 {
1917 .str = "a_pause_mac_ctrl_frames_xmitted",
1918 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1919 },
1920 };
1921
1922 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1923
1924 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1925 {
1926 .str = "rx_octets_prio",
1927 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1928 },
1929 {
1930 .str = "rx_frames_prio",
1931 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1932 },
1933 {
1934 .str = "tx_octets_prio",
1935 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1936 },
1937 {
1938 .str = "tx_frames_prio",
1939 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1940 },
1941 {
1942 .str = "rx_pause_prio",
1943 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1944 },
1945 {
1946 .str = "rx_pause_duration_prio",
1947 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1948 },
1949 {
1950 .str = "tx_pause_prio",
1951 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1952 },
1953 {
1954 .str = "tx_pause_duration_prio",
1955 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1956 },
1957 };
1958
1959 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1960
1961 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1962 {
1963 .str = "tc_transmit_queue_tc",
1964 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1965 .cells_bytes = true,
1966 },
1967 {
1968 .str = "tc_no_buffer_discard_uc_tc",
1969 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1970 },
1971 };
1972
1973 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1974
1975 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1976 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1977 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1978 IEEE_8021QAZ_MAX_TCS)
1979
1980 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1981 {
1982 int i;
1983
1984 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1985 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1986 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1987 *p += ETH_GSTRING_LEN;
1988 }
1989 }
1990
1991 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1992 {
1993 int i;
1994
1995 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1996 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1997 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1998 *p += ETH_GSTRING_LEN;
1999 }
2000 }
2001
2002 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2003 u32 stringset, u8 *data)
2004 {
2005 u8 *p = data;
2006 int i;
2007
2008 switch (stringset) {
2009 case ETH_SS_STATS:
2010 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2011 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2012 ETH_GSTRING_LEN);
2013 p += ETH_GSTRING_LEN;
2014 }
2015
2016 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2017 mlxsw_sp_port_get_prio_strings(&p, i);
2018
2019 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2020 mlxsw_sp_port_get_tc_strings(&p, i);
2021
2022 break;
2023 }
2024 }
2025
2026 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2027 enum ethtool_phys_id_state state)
2028 {
2029 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2030 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2031 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2032 bool active;
2033
2034 switch (state) {
2035 case ETHTOOL_ID_ACTIVE:
2036 active = true;
2037 break;
2038 case ETHTOOL_ID_INACTIVE:
2039 active = false;
2040 break;
2041 default:
2042 return -EOPNOTSUPP;
2043 }
2044
2045 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2046 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2047 }
2048
2049 static int
2050 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2051 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2052 {
2053 switch (grp) {
2054 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2055 *p_hw_stats = mlxsw_sp_port_hw_stats;
2056 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2057 break;
2058 case MLXSW_REG_PPCNT_PRIO_CNT:
2059 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2060 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2061 break;
2062 case MLXSW_REG_PPCNT_TC_CNT:
2063 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2064 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2065 break;
2066 default:
2067 WARN_ON(1);
2068 return -EOPNOTSUPP;
2069 }
2070 return 0;
2071 }
2072
2073 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2074 enum mlxsw_reg_ppcnt_grp grp, int prio,
2075 u64 *data, int data_index)
2076 {
2077 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2078 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2079 struct mlxsw_sp_port_hw_stats *hw_stats;
2080 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2081 int i, len;
2082 int err;
2083
2084 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2085 if (err)
2086 return;
2087 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2088 for (i = 0; i < len; i++) {
2089 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2090 if (!hw_stats[i].cells_bytes)
2091 continue;
2092 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2093 data[data_index + i]);
2094 }
2095 }
2096
2097 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2098 struct ethtool_stats *stats, u64 *data)
2099 {
2100 int i, data_index = 0;
2101
2102 /* IEEE 802.3 Counters */
2103 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2104 data, data_index);
2105 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2106
2107 /* Per-Priority Counters */
2108 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2109 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2110 data, data_index);
2111 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2112 }
2113
2114 /* Per-TC Counters */
2115 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2116 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2117 data, data_index);
2118 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2119 }
2120 }
2121
2122 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2123 {
2124 switch (sset) {
2125 case ETH_SS_STATS:
2126 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2127 default:
2128 return -EOPNOTSUPP;
2129 }
2130 }
2131
2132 struct mlxsw_sp_port_link_mode {
2133 enum ethtool_link_mode_bit_indices mask_ethtool;
2134 u32 mask;
2135 u32 speed;
2136 };
2137
2138 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2139 {
2140 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2141 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2142 .speed = SPEED_100,
2143 },
2144 {
2145 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2146 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2147 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2148 .speed = SPEED_1000,
2149 },
2150 {
2151 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2152 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2153 .speed = SPEED_10000,
2154 },
2155 {
2156 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2157 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2158 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2159 .speed = SPEED_10000,
2160 },
2161 {
2162 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2163 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2164 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2165 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2166 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2167 .speed = SPEED_10000,
2168 },
2169 {
2170 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2171 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2172 .speed = SPEED_20000,
2173 },
2174 {
2175 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2176 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2177 .speed = SPEED_40000,
2178 },
2179 {
2180 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2181 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2182 .speed = SPEED_40000,
2183 },
2184 {
2185 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2186 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2187 .speed = SPEED_40000,
2188 },
2189 {
2190 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2191 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2192 .speed = SPEED_40000,
2193 },
2194 {
2195 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2196 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2197 .speed = SPEED_25000,
2198 },
2199 {
2200 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2201 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2202 .speed = SPEED_25000,
2203 },
2204 {
2205 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2206 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2207 .speed = SPEED_25000,
2208 },
2209 {
2210 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2211 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2212 .speed = SPEED_25000,
2213 },
2214 {
2215 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2216 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2217 .speed = SPEED_50000,
2218 },
2219 {
2220 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2221 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2222 .speed = SPEED_50000,
2223 },
2224 {
2225 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2226 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2227 .speed = SPEED_50000,
2228 },
2229 {
2230 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2231 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2232 .speed = SPEED_56000,
2233 },
2234 {
2235 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2236 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2237 .speed = SPEED_56000,
2238 },
2239 {
2240 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2241 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2242 .speed = SPEED_56000,
2243 },
2244 {
2245 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2246 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2247 .speed = SPEED_56000,
2248 },
2249 {
2250 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2251 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2252 .speed = SPEED_100000,
2253 },
2254 {
2255 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2256 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2257 .speed = SPEED_100000,
2258 },
2259 {
2260 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2261 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2262 .speed = SPEED_100000,
2263 },
2264 {
2265 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2266 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2267 .speed = SPEED_100000,
2268 },
2269 };
2270
2271 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2272
2273 static void
2274 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2275 struct ethtool_link_ksettings *cmd)
2276 {
2277 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2278 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2279 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2280 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2281 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2282 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2283 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2284
2285 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2286 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2287 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2288 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2289 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2290 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2291 }
2292
2293 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2294 {
2295 int i;
2296
2297 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2298 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2299 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2300 mode);
2301 }
2302 }
2303
2304 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2305 struct ethtool_link_ksettings *cmd)
2306 {
2307 u32 speed = SPEED_UNKNOWN;
2308 u8 duplex = DUPLEX_UNKNOWN;
2309 int i;
2310
2311 if (!carrier_ok)
2312 goto out;
2313
2314 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2315 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2316 speed = mlxsw_sp_port_link_mode[i].speed;
2317 duplex = DUPLEX_FULL;
2318 break;
2319 }
2320 }
2321 out:
2322 cmd->base.speed = speed;
2323 cmd->base.duplex = duplex;
2324 }
2325
2326 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2327 {
2328 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2329 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2330 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2331 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2332 return PORT_FIBRE;
2333
2334 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2335 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2336 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2337 return PORT_DA;
2338
2339 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2340 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2341 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2342 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2343 return PORT_NONE;
2344
2345 return PORT_OTHER;
2346 }
2347
2348 static u32
2349 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2350 {
2351 u32 ptys_proto = 0;
2352 int i;
2353
2354 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2355 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2356 cmd->link_modes.advertising))
2357 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2358 }
2359 return ptys_proto;
2360 }
2361
2362 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2363 {
2364 u32 ptys_proto = 0;
2365 int i;
2366
2367 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2368 if (speed == mlxsw_sp_port_link_mode[i].speed)
2369 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2370 }
2371 return ptys_proto;
2372 }
2373
2374 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2375 {
2376 u32 ptys_proto = 0;
2377 int i;
2378
2379 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2380 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2381 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2382 }
2383 return ptys_proto;
2384 }
2385
2386 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2387 struct ethtool_link_ksettings *cmd)
2388 {
2389 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2390 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2391 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2392
2393 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2394 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2395 }
2396
2397 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2398 struct ethtool_link_ksettings *cmd)
2399 {
2400 if (!autoneg)
2401 return;
2402
2403 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2404 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2405 }
2406
2407 static void
2408 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2409 struct ethtool_link_ksettings *cmd)
2410 {
2411 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2412 return;
2413
2414 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2415 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2416 }
2417
2418 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2419 struct ethtool_link_ksettings *cmd)
2420 {
2421 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2422 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2424 char ptys_pl[MLXSW_REG_PTYS_LEN];
2425 u8 autoneg_status;
2426 bool autoneg;
2427 int err;
2428
2429 autoneg = mlxsw_sp_port->link.autoneg;
2430 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2431 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2432 if (err)
2433 return err;
2434 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
2435 &eth_proto_oper);
2436
2437 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2438
2439 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2440
2441 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2442 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2443 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2444
2445 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2446 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2447 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2448 cmd);
2449
2450 return 0;
2451 }
2452
2453 static int
2454 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2455 const struct ethtool_link_ksettings *cmd)
2456 {
2457 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2458 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2459 char ptys_pl[MLXSW_REG_PTYS_LEN];
2460 u32 eth_proto_cap, eth_proto_new;
2461 bool autoneg;
2462 int err;
2463
2464 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2465 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2466 if (err)
2467 return err;
2468 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
2469
2470 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2471 eth_proto_new = autoneg ?
2472 mlxsw_sp_to_ptys_advert_link(cmd) :
2473 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2474
2475 eth_proto_new = eth_proto_new & eth_proto_cap;
2476 if (!eth_proto_new) {
2477 netdev_err(dev, "No supported speed requested\n");
2478 return -EINVAL;
2479 }
2480
2481 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2482 eth_proto_new);
2483 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2484 if (err)
2485 return err;
2486
2487 if (!netif_running(dev))
2488 return 0;
2489
2490 mlxsw_sp_port->link.autoneg = autoneg;
2491
2492 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2493 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2494
2495 return 0;
2496 }
2497
2498 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2499 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2500 .get_link = ethtool_op_get_link,
2501 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2502 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2503 .get_strings = mlxsw_sp_port_get_strings,
2504 .set_phys_id = mlxsw_sp_port_set_phys_id,
2505 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2506 .get_sset_count = mlxsw_sp_port_get_sset_count,
2507 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2508 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2509 };
2510
2511 static int
2512 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2513 {
2514 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2515 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2516 char ptys_pl[MLXSW_REG_PTYS_LEN];
2517 u32 eth_proto_admin;
2518
2519 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2520 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2521 eth_proto_admin);
2522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2523 }
2524
2525 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2526 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2527 bool dwrr, u8 dwrr_weight)
2528 {
2529 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2530 char qeec_pl[MLXSW_REG_QEEC_LEN];
2531
2532 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2533 next_index);
2534 mlxsw_reg_qeec_de_set(qeec_pl, true);
2535 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2536 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2537 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2538 }
2539
2540 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2541 enum mlxsw_reg_qeec_hr hr, u8 index,
2542 u8 next_index, u32 maxrate)
2543 {
2544 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2545 char qeec_pl[MLXSW_REG_QEEC_LEN];
2546
2547 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2548 next_index);
2549 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2550 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2552 }
2553
2554 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2555 u8 switch_prio, u8 tclass)
2556 {
2557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2558 char qtct_pl[MLXSW_REG_QTCT_LEN];
2559
2560 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2561 tclass);
2562 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2563 }
2564
2565 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2566 {
2567 int err, i;
2568
2569 /* Setup the elements hierarcy, so that each TC is linked to
2570 * one subgroup, which are all member in the same group.
2571 */
2572 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2573 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2574 0);
2575 if (err)
2576 return err;
2577 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2578 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2579 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2580 0, false, 0);
2581 if (err)
2582 return err;
2583 }
2584 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2585 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2586 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2587 false, 0);
2588 if (err)
2589 return err;
2590 }
2591
2592 /* Make sure the max shaper is disabled in all hierarcies that
2593 * support it.
2594 */
2595 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2596 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2597 MLXSW_REG_QEEC_MAS_DIS);
2598 if (err)
2599 return err;
2600 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2601 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2602 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2603 i, 0,
2604 MLXSW_REG_QEEC_MAS_DIS);
2605 if (err)
2606 return err;
2607 }
2608 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2609 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2610 MLXSW_REG_QEEC_HIERARCY_TC,
2611 i, i,
2612 MLXSW_REG_QEEC_MAS_DIS);
2613 if (err)
2614 return err;
2615 }
2616
2617 /* Map all priorities to traffic class 0. */
2618 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2619 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2620 if (err)
2621 return err;
2622 }
2623
2624 return 0;
2625 }
2626
2627 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2628 bool split, u8 module, u8 width, u8 lane)
2629 {
2630 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2631 struct mlxsw_sp_port *mlxsw_sp_port;
2632 struct net_device *dev;
2633 int err;
2634
2635 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2636 if (!dev)
2637 return -ENOMEM;
2638 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2639 mlxsw_sp_port = netdev_priv(dev);
2640 mlxsw_sp_port->dev = dev;
2641 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2642 mlxsw_sp_port->local_port = local_port;
2643 mlxsw_sp_port->pvid = 1;
2644 mlxsw_sp_port->split = split;
2645 mlxsw_sp_port->mapping.module = module;
2646 mlxsw_sp_port->mapping.width = width;
2647 mlxsw_sp_port->mapping.lane = lane;
2648 mlxsw_sp_port->link.autoneg = 1;
2649 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2650 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2651
2652 mlxsw_sp_port->pcpu_stats =
2653 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2654 if (!mlxsw_sp_port->pcpu_stats) {
2655 err = -ENOMEM;
2656 goto err_alloc_stats;
2657 }
2658
2659 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2660 GFP_KERNEL);
2661 if (!mlxsw_sp_port->sample) {
2662 err = -ENOMEM;
2663 goto err_alloc_sample;
2664 }
2665
2666 mlxsw_sp_port->hw_stats.cache =
2667 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2668
2669 if (!mlxsw_sp_port->hw_stats.cache) {
2670 err = -ENOMEM;
2671 goto err_alloc_hw_stats;
2672 }
2673 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2674 &update_stats_cache);
2675
2676 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2677 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2678
2679 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2680 if (err) {
2681 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2682 mlxsw_sp_port->local_port);
2683 goto err_port_swid_set;
2684 }
2685
2686 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2687 if (err) {
2688 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2689 mlxsw_sp_port->local_port);
2690 goto err_dev_addr_init;
2691 }
2692
2693 netif_carrier_off(dev);
2694
2695 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2696 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2697 dev->hw_features |= NETIF_F_HW_TC;
2698
2699 dev->min_mtu = 0;
2700 dev->max_mtu = ETH_MAX_MTU;
2701
2702 /* Each packet needs to have a Tx header (metadata) on top all other
2703 * headers.
2704 */
2705 dev->needed_headroom = MLXSW_TXHDR_LEN;
2706
2707 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2708 if (err) {
2709 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2710 mlxsw_sp_port->local_port);
2711 goto err_port_system_port_mapping_set;
2712 }
2713
2714 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2715 if (err) {
2716 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2717 mlxsw_sp_port->local_port);
2718 goto err_port_speed_by_width_set;
2719 }
2720
2721 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2722 if (err) {
2723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2724 mlxsw_sp_port->local_port);
2725 goto err_port_mtu_set;
2726 }
2727
2728 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2729 if (err)
2730 goto err_port_admin_status_set;
2731
2732 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2733 if (err) {
2734 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2735 mlxsw_sp_port->local_port);
2736 goto err_port_buffers_init;
2737 }
2738
2739 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2740 if (err) {
2741 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2742 mlxsw_sp_port->local_port);
2743 goto err_port_ets_init;
2744 }
2745
2746 /* ETS and buffers must be initialized before DCB. */
2747 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2748 if (err) {
2749 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2750 mlxsw_sp_port->local_port);
2751 goto err_port_dcb_init;
2752 }
2753
2754 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
2755 if (err) {
2756 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
2757 mlxsw_sp_port->local_port);
2758 goto err_port_fids_init;
2759 }
2760
2761 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2762 if (IS_ERR(mlxsw_sp_port_vlan)) {
2763 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
2764 mlxsw_sp_port->local_port);
2765 goto err_port_vlan_get;
2766 }
2767
2768 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2769 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2770 err = register_netdev(dev);
2771 if (err) {
2772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2773 mlxsw_sp_port->local_port);
2774 goto err_register_netdev;
2775 }
2776
2777 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2778 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2779 module);
2780 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2781 return 0;
2782
2783 err_register_netdev:
2784 mlxsw_sp->ports[local_port] = NULL;
2785 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2786 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
2787 err_port_vlan_get:
2788 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2789 err_port_fids_init:
2790 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2791 err_port_dcb_init:
2792 err_port_ets_init:
2793 err_port_buffers_init:
2794 err_port_admin_status_set:
2795 err_port_mtu_set:
2796 err_port_speed_by_width_set:
2797 err_port_system_port_mapping_set:
2798 err_dev_addr_init:
2799 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2800 err_port_swid_set:
2801 kfree(mlxsw_sp_port->hw_stats.cache);
2802 err_alloc_hw_stats:
2803 kfree(mlxsw_sp_port->sample);
2804 err_alloc_sample:
2805 free_percpu(mlxsw_sp_port->pcpu_stats);
2806 err_alloc_stats:
2807 free_netdev(dev);
2808 return err;
2809 }
2810
2811 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2812 bool split, u8 module, u8 width, u8 lane)
2813 {
2814 int err;
2815
2816 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2817 if (err) {
2818 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2819 local_port);
2820 return err;
2821 }
2822 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2823 module, width, lane);
2824 if (err)
2825 goto err_port_create;
2826 return 0;
2827
2828 err_port_create:
2829 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2830 return err;
2831 }
2832
2833 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2834 {
2835 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2836
2837 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2838 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2839 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2840 mlxsw_sp->ports[local_port] = NULL;
2841 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2842 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
2843 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
2844 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2845 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2846 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2847 kfree(mlxsw_sp_port->hw_stats.cache);
2848 kfree(mlxsw_sp_port->sample);
2849 free_percpu(mlxsw_sp_port->pcpu_stats);
2850 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
2851 free_netdev(mlxsw_sp_port->dev);
2852 }
2853
2854 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2855 {
2856 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2857 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2858 }
2859
2860 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2861 {
2862 return mlxsw_sp->ports[local_port] != NULL;
2863 }
2864
2865 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2866 {
2867 int i;
2868
2869 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2870 if (mlxsw_sp_port_created(mlxsw_sp, i))
2871 mlxsw_sp_port_remove(mlxsw_sp, i);
2872 kfree(mlxsw_sp->port_to_module);
2873 kfree(mlxsw_sp->ports);
2874 }
2875
2876 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2877 {
2878 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2879 u8 module, width, lane;
2880 size_t alloc_size;
2881 int i;
2882 int err;
2883
2884 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2885 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2886 if (!mlxsw_sp->ports)
2887 return -ENOMEM;
2888
2889 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2890 if (!mlxsw_sp->port_to_module) {
2891 err = -ENOMEM;
2892 goto err_port_to_module_alloc;
2893 }
2894
2895 for (i = 1; i < max_ports; i++) {
2896 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2897 &width, &lane);
2898 if (err)
2899 goto err_port_module_info_get;
2900 if (!width)
2901 continue;
2902 mlxsw_sp->port_to_module[i] = module;
2903 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2904 module, width, lane);
2905 if (err)
2906 goto err_port_create;
2907 }
2908 return 0;
2909
2910 err_port_create:
2911 err_port_module_info_get:
2912 for (i--; i >= 1; i--)
2913 if (mlxsw_sp_port_created(mlxsw_sp, i))
2914 mlxsw_sp_port_remove(mlxsw_sp, i);
2915 kfree(mlxsw_sp->port_to_module);
2916 err_port_to_module_alloc:
2917 kfree(mlxsw_sp->ports);
2918 return err;
2919 }
2920
2921 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2922 {
2923 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2924
2925 return local_port - offset;
2926 }
2927
2928 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2929 u8 module, unsigned int count)
2930 {
2931 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2932 int err, i;
2933
2934 for (i = 0; i < count; i++) {
2935 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2936 width, i * width);
2937 if (err)
2938 goto err_port_module_map;
2939 }
2940
2941 for (i = 0; i < count; i++) {
2942 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2943 if (err)
2944 goto err_port_swid_set;
2945 }
2946
2947 for (i = 0; i < count; i++) {
2948 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2949 module, width, i * width);
2950 if (err)
2951 goto err_port_create;
2952 }
2953
2954 return 0;
2955
2956 err_port_create:
2957 for (i--; i >= 0; i--)
2958 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2959 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2960 i = count;
2961 err_port_swid_set:
2962 for (i--; i >= 0; i--)
2963 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2964 MLXSW_PORT_SWID_DISABLED_PORT);
2965 i = count;
2966 err_port_module_map:
2967 for (i--; i >= 0; i--)
2968 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2969 return err;
2970 }
2971
2972 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2973 u8 base_port, unsigned int count)
2974 {
2975 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2976 int i;
2977
2978 /* Split by four means we need to re-create two ports, otherwise
2979 * only one.
2980 */
2981 count = count / 2;
2982
2983 for (i = 0; i < count; i++) {
2984 local_port = base_port + i * 2;
2985 module = mlxsw_sp->port_to_module[local_port];
2986
2987 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2988 0);
2989 }
2990
2991 for (i = 0; i < count; i++)
2992 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2993
2994 for (i = 0; i < count; i++) {
2995 local_port = base_port + i * 2;
2996 module = mlxsw_sp->port_to_module[local_port];
2997
2998 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2999 width, 0);
3000 }
3001 }
3002
3003 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3004 unsigned int count)
3005 {
3006 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3007 struct mlxsw_sp_port *mlxsw_sp_port;
3008 u8 module, cur_width, base_port;
3009 int i;
3010 int err;
3011
3012 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3013 if (!mlxsw_sp_port) {
3014 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3015 local_port);
3016 return -EINVAL;
3017 }
3018
3019 module = mlxsw_sp_port->mapping.module;
3020 cur_width = mlxsw_sp_port->mapping.width;
3021
3022 if (count != 2 && count != 4) {
3023 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3024 return -EINVAL;
3025 }
3026
3027 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3028 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3029 return -EINVAL;
3030 }
3031
3032 /* Make sure we have enough slave (even) ports for the split. */
3033 if (count == 2) {
3034 base_port = local_port;
3035 if (mlxsw_sp->ports[base_port + 1]) {
3036 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3037 return -EINVAL;
3038 }
3039 } else {
3040 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3041 if (mlxsw_sp->ports[base_port + 1] ||
3042 mlxsw_sp->ports[base_port + 3]) {
3043 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3044 return -EINVAL;
3045 }
3046 }
3047
3048 for (i = 0; i < count; i++)
3049 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3050 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3051
3052 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3053 if (err) {
3054 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3055 goto err_port_split_create;
3056 }
3057
3058 return 0;
3059
3060 err_port_split_create:
3061 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3062 return err;
3063 }
3064
3065 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3066 {
3067 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3068 struct mlxsw_sp_port *mlxsw_sp_port;
3069 u8 cur_width, base_port;
3070 unsigned int count;
3071 int i;
3072
3073 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3074 if (!mlxsw_sp_port) {
3075 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3076 local_port);
3077 return -EINVAL;
3078 }
3079
3080 if (!mlxsw_sp_port->split) {
3081 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3082 return -EINVAL;
3083 }
3084
3085 cur_width = mlxsw_sp_port->mapping.width;
3086 count = cur_width == 1 ? 4 : 2;
3087
3088 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3089
3090 /* Determine which ports to remove. */
3091 if (count == 2 && local_port >= base_port + 2)
3092 base_port = base_port + 2;
3093
3094 for (i = 0; i < count; i++)
3095 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3096 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3097
3098 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3099
3100 return 0;
3101 }
3102
3103 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3104 char *pude_pl, void *priv)
3105 {
3106 struct mlxsw_sp *mlxsw_sp = priv;
3107 struct mlxsw_sp_port *mlxsw_sp_port;
3108 enum mlxsw_reg_pude_oper_status status;
3109 u8 local_port;
3110
3111 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3112 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3113 if (!mlxsw_sp_port)
3114 return;
3115
3116 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3117 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3118 netdev_info(mlxsw_sp_port->dev, "link up\n");
3119 netif_carrier_on(mlxsw_sp_port->dev);
3120 } else {
3121 netdev_info(mlxsw_sp_port->dev, "link down\n");
3122 netif_carrier_off(mlxsw_sp_port->dev);
3123 }
3124 }
3125
3126 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3127 u8 local_port, void *priv)
3128 {
3129 struct mlxsw_sp *mlxsw_sp = priv;
3130 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3131 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3132
3133 if (unlikely(!mlxsw_sp_port)) {
3134 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3135 local_port);
3136 return;
3137 }
3138
3139 skb->dev = mlxsw_sp_port->dev;
3140
3141 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3142 u64_stats_update_begin(&pcpu_stats->syncp);
3143 pcpu_stats->rx_packets++;
3144 pcpu_stats->rx_bytes += skb->len;
3145 u64_stats_update_end(&pcpu_stats->syncp);
3146
3147 skb->protocol = eth_type_trans(skb, skb->dev);
3148 netif_receive_skb(skb);
3149 }
3150
3151 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3152 void *priv)
3153 {
3154 skb->offload_fwd_mark = 1;
3155 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3156 }
3157
3158 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3159 void *priv)
3160 {
3161 struct mlxsw_sp *mlxsw_sp = priv;
3162 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3163 struct psample_group *psample_group;
3164 u32 size;
3165
3166 if (unlikely(!mlxsw_sp_port)) {
3167 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3168 local_port);
3169 goto out;
3170 }
3171 if (unlikely(!mlxsw_sp_port->sample)) {
3172 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3173 local_port);
3174 goto out;
3175 }
3176
3177 size = mlxsw_sp_port->sample->truncate ?
3178 mlxsw_sp_port->sample->trunc_size : skb->len;
3179
3180 rcu_read_lock();
3181 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3182 if (!psample_group)
3183 goto out_unlock;
3184 psample_sample_packet(psample_group, skb, size,
3185 mlxsw_sp_port->dev->ifindex, 0,
3186 mlxsw_sp_port->sample->rate);
3187 out_unlock:
3188 rcu_read_unlock();
3189 out:
3190 consume_skb(skb);
3191 }
3192
3193 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3194 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3195 _is_ctrl, SP_##_trap_group, DISCARD)
3196
3197 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3198 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3199 _is_ctrl, SP_##_trap_group, DISCARD)
3200
3201 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3202 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3203
3204 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3205 /* Events */
3206 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3207 /* L2 traps */
3208 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3209 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3210 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3211 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3212 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3213 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3214 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3215 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3216 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3217 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3218 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3219 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3220 /* L3 traps */
3221 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3222 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3223 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3224 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
3225 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3226 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3227 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
3228 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
3229 /* PKT Sample trap */
3230 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3231 false, SP_IP2ME, DISCARD)
3232 };
3233
3234 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3235 {
3236 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3237 enum mlxsw_reg_qpcr_ir_units ir_units;
3238 int max_cpu_policers;
3239 bool is_bytes;
3240 u8 burst_size;
3241 u32 rate;
3242 int i, err;
3243
3244 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3245 return -EIO;
3246
3247 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3248
3249 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3250 for (i = 0; i < max_cpu_policers; i++) {
3251 is_bytes = false;
3252 switch (i) {
3253 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3254 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3255 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3256 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3257 rate = 128;
3258 burst_size = 7;
3259 break;
3260 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3261 rate = 16 * 1024;
3262 burst_size = 10;
3263 break;
3264 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3265 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3266 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3267 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3268 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3269 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3270 rate = 1024;
3271 burst_size = 7;
3272 break;
3273 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3274 is_bytes = true;
3275 rate = 4 * 1024;
3276 burst_size = 4;
3277 break;
3278 default:
3279 continue;
3280 }
3281
3282 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3283 burst_size);
3284 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3285 if (err)
3286 return err;
3287 }
3288
3289 return 0;
3290 }
3291
3292 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3293 {
3294 char htgt_pl[MLXSW_REG_HTGT_LEN];
3295 enum mlxsw_reg_htgt_trap_group i;
3296 int max_cpu_policers;
3297 int max_trap_groups;
3298 u8 priority, tc;
3299 u16 policer_id;
3300 int err;
3301
3302 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3303 return -EIO;
3304
3305 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3306 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3307
3308 for (i = 0; i < max_trap_groups; i++) {
3309 policer_id = i;
3310 switch (i) {
3311 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3312 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3313 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3314 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3315 priority = 5;
3316 tc = 5;
3317 break;
3318 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3319 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3320 priority = 4;
3321 tc = 4;
3322 break;
3323 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3324 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3325 priority = 3;
3326 tc = 3;
3327 break;
3328 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3329 priority = 2;
3330 tc = 2;
3331 break;
3332 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3333 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3334 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3335 priority = 1;
3336 tc = 1;
3337 break;
3338 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3339 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3340 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3341 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3342 break;
3343 default:
3344 continue;
3345 }
3346
3347 if (max_cpu_policers <= policer_id &&
3348 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3349 return -EIO;
3350
3351 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3352 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3353 if (err)
3354 return err;
3355 }
3356
3357 return 0;
3358 }
3359
3360 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3361 {
3362 int i;
3363 int err;
3364
3365 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3366 if (err)
3367 return err;
3368
3369 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3370 if (err)
3371 return err;
3372
3373 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3374 err = mlxsw_core_trap_register(mlxsw_sp->core,
3375 &mlxsw_sp_listener[i],
3376 mlxsw_sp);
3377 if (err)
3378 goto err_listener_register;
3379
3380 }
3381 return 0;
3382
3383 err_listener_register:
3384 for (i--; i >= 0; i--) {
3385 mlxsw_core_trap_unregister(mlxsw_sp->core,
3386 &mlxsw_sp_listener[i],
3387 mlxsw_sp);
3388 }
3389 return err;
3390 }
3391
3392 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3393 {
3394 int i;
3395
3396 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3397 mlxsw_core_trap_unregister(mlxsw_sp->core,
3398 &mlxsw_sp_listener[i],
3399 mlxsw_sp);
3400 }
3401 }
3402
3403 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3404 {
3405 char slcr_pl[MLXSW_REG_SLCR_LEN];
3406 int err;
3407
3408 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3409 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3410 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3411 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3412 MLXSW_REG_SLCR_LAG_HASH_SIP |
3413 MLXSW_REG_SLCR_LAG_HASH_DIP |
3414 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3415 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3416 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3417 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3418 if (err)
3419 return err;
3420
3421 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3422 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3423 return -EIO;
3424
3425 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3426 sizeof(struct mlxsw_sp_upper),
3427 GFP_KERNEL);
3428 if (!mlxsw_sp->lags)
3429 return -ENOMEM;
3430
3431 return 0;
3432 }
3433
3434 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3435 {
3436 kfree(mlxsw_sp->lags);
3437 }
3438
3439 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3440 {
3441 char htgt_pl[MLXSW_REG_HTGT_LEN];
3442
3443 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3444 MLXSW_REG_HTGT_INVALID_POLICER,
3445 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3446 MLXSW_REG_HTGT_DEFAULT_TC);
3447 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3448 }
3449
3450 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3451 const struct mlxsw_bus_info *mlxsw_bus_info)
3452 {
3453 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3454 int err;
3455
3456 mlxsw_sp->core = mlxsw_core;
3457 mlxsw_sp->bus_info = mlxsw_bus_info;
3458
3459 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3460 if (err) {
3461 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3462 return err;
3463 }
3464
3465 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3466 if (err) {
3467 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3468 return err;
3469 }
3470
3471 err = mlxsw_sp_fids_init(mlxsw_sp);
3472 if (err) {
3473 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3474 return err;
3475 }
3476
3477 err = mlxsw_sp_traps_init(mlxsw_sp);
3478 if (err) {
3479 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3480 goto err_traps_init;
3481 }
3482
3483 err = mlxsw_sp_buffers_init(mlxsw_sp);
3484 if (err) {
3485 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3486 goto err_buffers_init;
3487 }
3488
3489 err = mlxsw_sp_lag_init(mlxsw_sp);
3490 if (err) {
3491 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3492 goto err_lag_init;
3493 }
3494
3495 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3496 if (err) {
3497 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3498 goto err_switchdev_init;
3499 }
3500
3501 err = mlxsw_sp_router_init(mlxsw_sp);
3502 if (err) {
3503 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3504 goto err_router_init;
3505 }
3506
3507 err = mlxsw_sp_span_init(mlxsw_sp);
3508 if (err) {
3509 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3510 goto err_span_init;
3511 }
3512
3513 err = mlxsw_sp_acl_init(mlxsw_sp);
3514 if (err) {
3515 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3516 goto err_acl_init;
3517 }
3518
3519 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3520 if (err) {
3521 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3522 goto err_counter_pool_init;
3523 }
3524
3525 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3526 if (err) {
3527 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3528 goto err_dpipe_init;
3529 }
3530
3531 err = mlxsw_sp_ports_create(mlxsw_sp);
3532 if (err) {
3533 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3534 goto err_ports_create;
3535 }
3536
3537 return 0;
3538
3539 err_ports_create:
3540 mlxsw_sp_dpipe_fini(mlxsw_sp);
3541 err_dpipe_init:
3542 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3543 err_counter_pool_init:
3544 mlxsw_sp_acl_fini(mlxsw_sp);
3545 err_acl_init:
3546 mlxsw_sp_span_fini(mlxsw_sp);
3547 err_span_init:
3548 mlxsw_sp_router_fini(mlxsw_sp);
3549 err_router_init:
3550 mlxsw_sp_switchdev_fini(mlxsw_sp);
3551 err_switchdev_init:
3552 mlxsw_sp_lag_fini(mlxsw_sp);
3553 err_lag_init:
3554 mlxsw_sp_buffers_fini(mlxsw_sp);
3555 err_buffers_init:
3556 mlxsw_sp_traps_fini(mlxsw_sp);
3557 err_traps_init:
3558 mlxsw_sp_fids_fini(mlxsw_sp);
3559 return err;
3560 }
3561
3562 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3563 {
3564 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3565
3566 mlxsw_sp_ports_remove(mlxsw_sp);
3567 mlxsw_sp_dpipe_fini(mlxsw_sp);
3568 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3569 mlxsw_sp_acl_fini(mlxsw_sp);
3570 mlxsw_sp_span_fini(mlxsw_sp);
3571 mlxsw_sp_router_fini(mlxsw_sp);
3572 mlxsw_sp_switchdev_fini(mlxsw_sp);
3573 mlxsw_sp_lag_fini(mlxsw_sp);
3574 mlxsw_sp_buffers_fini(mlxsw_sp);
3575 mlxsw_sp_traps_fini(mlxsw_sp);
3576 mlxsw_sp_fids_fini(mlxsw_sp);
3577 }
3578
3579 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3580 .used_max_vepa_channels = 1,
3581 .max_vepa_channels = 0,
3582 .used_max_mid = 1,
3583 .max_mid = MLXSW_SP_MID_MAX,
3584 .used_max_pgt = 1,
3585 .max_pgt = 0,
3586 .used_flood_tables = 1,
3587 .used_flood_mode = 1,
3588 .flood_mode = 3,
3589 .max_fid_offset_flood_tables = 3,
3590 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3591 .max_fid_flood_tables = 3,
3592 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
3593 .used_max_ib_mc = 1,
3594 .max_ib_mc = 0,
3595 .used_max_pkey = 1,
3596 .max_pkey = 0,
3597 .used_kvd_split_data = 1,
3598 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3599 .kvd_hash_single_parts = 2,
3600 .kvd_hash_double_parts = 1,
3601 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3602 .swid_config = {
3603 {
3604 .used_type = 1,
3605 .type = MLXSW_PORT_SWID_TYPE_ETH,
3606 }
3607 },
3608 .resource_query_enable = 1,
3609 };
3610
3611 static struct mlxsw_driver mlxsw_sp_driver = {
3612 .kind = mlxsw_sp_driver_name,
3613 .priv_size = sizeof(struct mlxsw_sp),
3614 .init = mlxsw_sp_init,
3615 .fini = mlxsw_sp_fini,
3616 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3617 .port_split = mlxsw_sp_port_split,
3618 .port_unsplit = mlxsw_sp_port_unsplit,
3619 .sb_pool_get = mlxsw_sp_sb_pool_get,
3620 .sb_pool_set = mlxsw_sp_sb_pool_set,
3621 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3622 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3623 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3624 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3625 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3626 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3627 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3628 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3629 .txhdr_construct = mlxsw_sp_txhdr_construct,
3630 .txhdr_len = MLXSW_TXHDR_LEN,
3631 .profile = &mlxsw_sp_config_profile,
3632 };
3633
3634 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3635 {
3636 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3637 }
3638
3639 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3640 {
3641 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3642 int ret = 0;
3643
3644 if (mlxsw_sp_port_dev_check(lower_dev)) {
3645 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3646 ret = 1;
3647 }
3648
3649 return ret;
3650 }
3651
3652 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3653 {
3654 struct mlxsw_sp_port *mlxsw_sp_port;
3655
3656 if (mlxsw_sp_port_dev_check(dev))
3657 return netdev_priv(dev);
3658
3659 mlxsw_sp_port = NULL;
3660 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3661
3662 return mlxsw_sp_port;
3663 }
3664
3665 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3666 {
3667 struct mlxsw_sp_port *mlxsw_sp_port;
3668
3669 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3670 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3671 }
3672
3673 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3674 {
3675 struct mlxsw_sp_port *mlxsw_sp_port;
3676
3677 if (mlxsw_sp_port_dev_check(dev))
3678 return netdev_priv(dev);
3679
3680 mlxsw_sp_port = NULL;
3681 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3682 &mlxsw_sp_port);
3683
3684 return mlxsw_sp_port;
3685 }
3686
3687 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3688 {
3689 struct mlxsw_sp_port *mlxsw_sp_port;
3690
3691 rcu_read_lock();
3692 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3693 if (mlxsw_sp_port)
3694 dev_hold(mlxsw_sp_port->dev);
3695 rcu_read_unlock();
3696 return mlxsw_sp_port;
3697 }
3698
3699 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3700 {
3701 dev_put(mlxsw_sp_port->dev);
3702 }
3703
3704 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3705 {
3706 char sldr_pl[MLXSW_REG_SLDR_LEN];
3707
3708 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3709 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3710 }
3711
3712 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3713 {
3714 char sldr_pl[MLXSW_REG_SLDR_LEN];
3715
3716 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3717 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3718 }
3719
3720 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3721 u16 lag_id, u8 port_index)
3722 {
3723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3724 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3725
3726 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3727 lag_id, port_index);
3728 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3729 }
3730
3731 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3732 u16 lag_id)
3733 {
3734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3735 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3736
3737 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3738 lag_id);
3739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3740 }
3741
3742 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3743 u16 lag_id)
3744 {
3745 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3746 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3747
3748 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3749 lag_id);
3750 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3751 }
3752
3753 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3754 u16 lag_id)
3755 {
3756 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3757 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3758
3759 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3760 lag_id);
3761 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3762 }
3763
3764 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3765 struct net_device *lag_dev,
3766 u16 *p_lag_id)
3767 {
3768 struct mlxsw_sp_upper *lag;
3769 int free_lag_id = -1;
3770 u64 max_lag;
3771 int i;
3772
3773 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3774 for (i = 0; i < max_lag; i++) {
3775 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3776 if (lag->ref_count) {
3777 if (lag->dev == lag_dev) {
3778 *p_lag_id = i;
3779 return 0;
3780 }
3781 } else if (free_lag_id < 0) {
3782 free_lag_id = i;
3783 }
3784 }
3785 if (free_lag_id < 0)
3786 return -EBUSY;
3787 *p_lag_id = free_lag_id;
3788 return 0;
3789 }
3790
3791 static bool
3792 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3793 struct net_device *lag_dev,
3794 struct netdev_lag_upper_info *lag_upper_info)
3795 {
3796 u16 lag_id;
3797
3798 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3799 return false;
3800 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3801 return false;
3802 return true;
3803 }
3804
3805 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3806 u16 lag_id, u8 *p_port_index)
3807 {
3808 u64 max_lag_members;
3809 int i;
3810
3811 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3812 MAX_LAG_MEMBERS);
3813 for (i = 0; i < max_lag_members; i++) {
3814 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3815 *p_port_index = i;
3816 return 0;
3817 }
3818 }
3819 return -EBUSY;
3820 }
3821
3822 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3823 struct net_device *lag_dev)
3824 {
3825 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3826 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3827 struct mlxsw_sp_upper *lag;
3828 u16 lag_id;
3829 u8 port_index;
3830 int err;
3831
3832 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3833 if (err)
3834 return err;
3835 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3836 if (!lag->ref_count) {
3837 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3838 if (err)
3839 return err;
3840 lag->dev = lag_dev;
3841 }
3842
3843 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3844 if (err)
3845 return err;
3846 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3847 if (err)
3848 goto err_col_port_add;
3849 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3850 if (err)
3851 goto err_col_port_enable;
3852
3853 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3854 mlxsw_sp_port->local_port);
3855 mlxsw_sp_port->lag_id = lag_id;
3856 mlxsw_sp_port->lagged = 1;
3857 lag->ref_count++;
3858
3859 /* Port is no longer usable as a router interface */
3860 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
3861 if (mlxsw_sp_port_vlan->fid)
3862 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
3863
3864 return 0;
3865
3866 err_col_port_enable:
3867 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3868 err_col_port_add:
3869 if (!lag->ref_count)
3870 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3871 return err;
3872 }
3873
3874 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3875 struct net_device *lag_dev)
3876 {
3877 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3878 u16 lag_id = mlxsw_sp_port->lag_id;
3879 struct mlxsw_sp_upper *lag;
3880
3881 if (!mlxsw_sp_port->lagged)
3882 return;
3883 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3884 WARN_ON(lag->ref_count == 0);
3885
3886 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3887 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3888
3889 /* Any VLANs configured on the port are no longer valid */
3890 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
3891
3892 if (lag->ref_count == 1)
3893 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3894
3895 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3896 mlxsw_sp_port->local_port);
3897 mlxsw_sp_port->lagged = 0;
3898 lag->ref_count--;
3899
3900 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
3901 /* Make sure untagged frames are allowed to ingress */
3902 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3903 }
3904
3905 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3906 u16 lag_id)
3907 {
3908 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3909 char sldr_pl[MLXSW_REG_SLDR_LEN];
3910
3911 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3912 mlxsw_sp_port->local_port);
3913 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3914 }
3915
3916 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3917 u16 lag_id)
3918 {
3919 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3920 char sldr_pl[MLXSW_REG_SLDR_LEN];
3921
3922 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3923 mlxsw_sp_port->local_port);
3924 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3925 }
3926
3927 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3928 bool lag_tx_enabled)
3929 {
3930 if (lag_tx_enabled)
3931 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3932 mlxsw_sp_port->lag_id);
3933 else
3934 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3935 mlxsw_sp_port->lag_id);
3936 }
3937
3938 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3939 struct netdev_lag_lower_state_info *info)
3940 {
3941 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3942 }
3943
3944 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
3945 bool enable)
3946 {
3947 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3948 enum mlxsw_reg_spms_state spms_state;
3949 char *spms_pl;
3950 u16 vid;
3951 int err;
3952
3953 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
3954 MLXSW_REG_SPMS_STATE_DISCARDING;
3955
3956 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
3957 if (!spms_pl)
3958 return -ENOMEM;
3959 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
3960
3961 for (vid = 0; vid < VLAN_N_VID; vid++)
3962 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
3963
3964 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
3965 kfree(spms_pl);
3966 return err;
3967 }
3968
3969 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
3970 {
3971 int err;
3972
3973 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
3974 if (err)
3975 return err;
3976 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
3977 if (err)
3978 goto err_port_stp_set;
3979 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
3980 true, false);
3981 if (err)
3982 goto err_port_vlan_set;
3983 return 0;
3984
3985 err_port_vlan_set:
3986 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3987 err_port_stp_set:
3988 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3989 return err;
3990 }
3991
3992 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3993 {
3994 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
3995 false, false);
3996 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
3997 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
3998 }
3999
4000 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4001 struct net_device *dev,
4002 unsigned long event, void *ptr)
4003 {
4004 struct netdev_notifier_changeupper_info *info;
4005 struct mlxsw_sp_port *mlxsw_sp_port;
4006 struct net_device *upper_dev;
4007 struct mlxsw_sp *mlxsw_sp;
4008 int err = 0;
4009
4010 mlxsw_sp_port = netdev_priv(dev);
4011 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4012 info = ptr;
4013
4014 switch (event) {
4015 case NETDEV_PRECHANGEUPPER:
4016 upper_dev = info->upper_dev;
4017 if (!is_vlan_dev(upper_dev) &&
4018 !netif_is_lag_master(upper_dev) &&
4019 !netif_is_bridge_master(upper_dev) &&
4020 !netif_is_ovs_master(upper_dev))
4021 return -EINVAL;
4022 if (!info->linking)
4023 break;
4024 if (netif_is_lag_master(upper_dev) &&
4025 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4026 info->upper_info))
4027 return -EINVAL;
4028 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4029 return -EINVAL;
4030 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4031 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4032 return -EINVAL;
4033 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4034 return -EINVAL;
4035 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4036 return -EINVAL;
4037 break;
4038 case NETDEV_CHANGEUPPER:
4039 upper_dev = info->upper_dev;
4040 if (netif_is_bridge_master(upper_dev)) {
4041 if (info->linking)
4042 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4043 lower_dev,
4044 upper_dev);
4045 else
4046 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4047 lower_dev,
4048 upper_dev);
4049 } else if (netif_is_lag_master(upper_dev)) {
4050 if (info->linking)
4051 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4052 upper_dev);
4053 else
4054 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4055 upper_dev);
4056 } else if (netif_is_ovs_master(upper_dev)) {
4057 if (info->linking)
4058 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4059 else
4060 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4061 }
4062 break;
4063 }
4064
4065 return err;
4066 }
4067
4068 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4069 unsigned long event, void *ptr)
4070 {
4071 struct netdev_notifier_changelowerstate_info *info;
4072 struct mlxsw_sp_port *mlxsw_sp_port;
4073 int err;
4074
4075 mlxsw_sp_port = netdev_priv(dev);
4076 info = ptr;
4077
4078 switch (event) {
4079 case NETDEV_CHANGELOWERSTATE:
4080 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4081 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4082 info->lower_state_info);
4083 if (err)
4084 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4085 }
4086 break;
4087 }
4088
4089 return 0;
4090 }
4091
4092 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4093 struct net_device *port_dev,
4094 unsigned long event, void *ptr)
4095 {
4096 switch (event) {
4097 case NETDEV_PRECHANGEUPPER:
4098 case NETDEV_CHANGEUPPER:
4099 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4100 event, ptr);
4101 case NETDEV_CHANGELOWERSTATE:
4102 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4103 ptr);
4104 }
4105
4106 return 0;
4107 }
4108
4109 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4110 unsigned long event, void *ptr)
4111 {
4112 struct net_device *dev;
4113 struct list_head *iter;
4114 int ret;
4115
4116 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4117 if (mlxsw_sp_port_dev_check(dev)) {
4118 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4119 ptr);
4120 if (ret)
4121 return ret;
4122 }
4123 }
4124
4125 return 0;
4126 }
4127
4128 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4129 struct net_device *dev,
4130 unsigned long event, void *ptr,
4131 u16 vid)
4132 {
4133 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4134 struct netdev_notifier_changeupper_info *info = ptr;
4135 struct net_device *upper_dev;
4136 int err = 0;
4137
4138 switch (event) {
4139 case NETDEV_PRECHANGEUPPER:
4140 upper_dev = info->upper_dev;
4141 if (!netif_is_bridge_master(upper_dev))
4142 return -EINVAL;
4143 break;
4144 case NETDEV_CHANGEUPPER:
4145 upper_dev = info->upper_dev;
4146 if (netif_is_bridge_master(upper_dev)) {
4147 if (info->linking)
4148 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4149 vlan_dev,
4150 upper_dev);
4151 else
4152 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4153 vlan_dev,
4154 upper_dev);
4155 } else {
4156 err = -EINVAL;
4157 WARN_ON(1);
4158 }
4159 break;
4160 }
4161
4162 return err;
4163 }
4164
4165 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4166 struct net_device *lag_dev,
4167 unsigned long event,
4168 void *ptr, u16 vid)
4169 {
4170 struct net_device *dev;
4171 struct list_head *iter;
4172 int ret;
4173
4174 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4175 if (mlxsw_sp_port_dev_check(dev)) {
4176 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4177 event, ptr,
4178 vid);
4179 if (ret)
4180 return ret;
4181 }
4182 }
4183
4184 return 0;
4185 }
4186
4187 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4188 unsigned long event, void *ptr)
4189 {
4190 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4191 u16 vid = vlan_dev_vlan_id(vlan_dev);
4192
4193 if (mlxsw_sp_port_dev_check(real_dev))
4194 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4195 event, ptr, vid);
4196 else if (netif_is_lag_master(real_dev))
4197 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4198 real_dev, event,
4199 ptr, vid);
4200
4201 return 0;
4202 }
4203
4204 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4205 {
4206 struct netdev_notifier_changeupper_info *info = ptr;
4207
4208 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4209 return false;
4210 return netif_is_l3_master(info->upper_dev);
4211 }
4212
4213 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4214 unsigned long event, void *ptr)
4215 {
4216 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4217 int err = 0;
4218
4219 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4220 err = mlxsw_sp_netdevice_router_port_event(dev);
4221 else if (mlxsw_sp_is_vrf_event(event, ptr))
4222 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4223 else if (mlxsw_sp_port_dev_check(dev))
4224 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4225 else if (netif_is_lag_master(dev))
4226 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4227 else if (is_vlan_dev(dev))
4228 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4229
4230 return notifier_from_errno(err);
4231 }
4232
4233 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4234 .notifier_call = mlxsw_sp_netdevice_event,
4235 };
4236
4237 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4238 .notifier_call = mlxsw_sp_inetaddr_event,
4239 .priority = 10, /* Must be called before FIB notifier block */
4240 };
4241
4242 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4243 .notifier_call = mlxsw_sp_router_netevent_event,
4244 };
4245
4246 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4247 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4248 {0, },
4249 };
4250
4251 static struct pci_driver mlxsw_sp_pci_driver = {
4252 .name = mlxsw_sp_driver_name,
4253 .id_table = mlxsw_sp_pci_id_table,
4254 };
4255
4256 static int __init mlxsw_sp_module_init(void)
4257 {
4258 int err;
4259
4260 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4261 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4262 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4263
4264 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4265 if (err)
4266 goto err_core_driver_register;
4267
4268 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4269 if (err)
4270 goto err_pci_driver_register;
4271
4272 return 0;
4273
4274 err_pci_driver_register:
4275 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4276 err_core_driver_register:
4277 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4278 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4279 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4280 return err;
4281 }
4282
4283 static void __exit mlxsw_sp_module_exit(void)
4284 {
4285 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4286 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4287 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4288 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4289 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4290 }
4291
4292 module_init(mlxsw_sp_module_init);
4293 module_exit(mlxsw_sp_module_exit);
4294
4295 MODULE_LICENSE("Dual BSD/GPL");
4296 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4297 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4298 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);
4299 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME);