2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
71 #include "../mlxfw/mlxfw.h"
73 #define MLXSW_FWREV_MAJOR 13
74 #define MLXSW_FWREV_MINOR 1420
75 #define MLXSW_FWREV_SUBMINOR 122
77 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev
= {
78 .major
= MLXSW_FWREV_MAJOR
,
79 .minor
= MLXSW_FWREV_MINOR
,
80 .subminor
= MLXSW_FWREV_SUBMINOR
83 #define MLXSW_SP_FW_FILENAME \
84 "mlxsw_spectrum-" __stringify(MLXSW_FWREV_MAJOR) \
85 "." __stringify(MLXSW_FWREV_MINOR) \
86 "." __stringify(MLXSW_FWREV_SUBMINOR) ".mfa2"
88 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
89 static const char mlxsw_sp_driver_version
[] = "1.0";
95 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
98 * Packet control type.
99 * 0 - Ethernet control (e.g. EMADs, LACP)
102 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
105 * Packet protocol type. Must be set to 1 (Ethernet).
107 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
109 /* tx_hdr_rx_is_router
110 * Packet is sent from the router. Valid for data packets only.
112 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
115 * Indicates if the 'fid' field is valid and should be used for
116 * forwarding lookup. Valid for data packets only.
118 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
121 * Switch partition ID. Must be set to 0.
123 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
125 /* tx_hdr_control_tclass
126 * Indicates if the packet should use the control TClass and not one
127 * of the data TClasses.
129 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
132 * Egress TClass to be used on the egress device on the egress port.
134 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
137 * Destination local port for unicast packets.
138 * Destination multicast ID for multicast packets.
140 * Control packets are directed to a specific egress port, while data
141 * packets are transmitted through the CPU port (0) into the switch partition,
142 * where forwarding rules are applied.
144 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
147 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
148 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
149 * Valid for data packets only.
151 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
155 * 6 - Control packets
157 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
159 struct mlxsw_sp_mlxfw_dev
{
160 struct mlxfw_dev mlxfw_dev
;
161 struct mlxsw_sp
*mlxsw_sp
;
164 static int mlxsw_sp_component_query(struct mlxfw_dev
*mlxfw_dev
,
165 u16 component_index
, u32
*p_max_size
,
166 u8
*p_align_bits
, u16
*p_max_write_size
)
168 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
169 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
170 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
171 char mcqi_pl
[MLXSW_REG_MCQI_LEN
];
174 mlxsw_reg_mcqi_pack(mcqi_pl
, component_index
);
175 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcqi
), mcqi_pl
);
178 mlxsw_reg_mcqi_unpack(mcqi_pl
, p_max_size
, p_align_bits
,
181 *p_align_bits
= max_t(u8
, *p_align_bits
, 2);
182 *p_max_write_size
= min_t(u16
, *p_max_write_size
,
183 MLXSW_REG_MCDA_MAX_DATA_LEN
);
187 static int mlxsw_sp_fsm_lock(struct mlxfw_dev
*mlxfw_dev
, u32
*fwhandle
)
189 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
190 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
191 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
192 char mcc_pl
[MLXSW_REG_MCC_LEN
];
196 mlxsw_reg_mcc_pack(mcc_pl
, 0, 0, 0, 0);
197 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
201 mlxsw_reg_mcc_unpack(mcc_pl
, fwhandle
, NULL
, &control_state
);
202 if (control_state
!= MLXFW_FSM_STATE_IDLE
)
205 mlxsw_reg_mcc_pack(mcc_pl
,
206 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE
,
208 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
211 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev
*mlxfw_dev
,
212 u32 fwhandle
, u16 component_index
,
215 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
216 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
217 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
218 char mcc_pl
[MLXSW_REG_MCC_LEN
];
220 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT
,
221 component_index
, fwhandle
, component_size
);
222 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
225 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev
*mlxfw_dev
,
226 u32 fwhandle
, u8
*data
, u16 size
,
229 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
230 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
231 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
232 char mcda_pl
[MLXSW_REG_MCDA_LEN
];
234 mlxsw_reg_mcda_pack(mcda_pl
, fwhandle
, offset
, size
, data
);
235 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcda
), mcda_pl
);
238 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev
*mlxfw_dev
,
239 u32 fwhandle
, u16 component_index
)
241 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
242 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
243 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
244 char mcc_pl
[MLXSW_REG_MCC_LEN
];
246 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT
,
247 component_index
, fwhandle
, 0);
248 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
251 static int mlxsw_sp_fsm_activate(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
253 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
254 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
255 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
256 char mcc_pl
[MLXSW_REG_MCC_LEN
];
258 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE
, 0,
260 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
263 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
,
264 enum mlxfw_fsm_state
*fsm_state
,
265 enum mlxfw_fsm_state_err
*fsm_state_err
)
267 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
268 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
269 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
270 char mcc_pl
[MLXSW_REG_MCC_LEN
];
275 mlxsw_reg_mcc_pack(mcc_pl
, 0, 0, fwhandle
, 0);
276 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
280 mlxsw_reg_mcc_unpack(mcc_pl
, NULL
, &error_code
, &control_state
);
281 *fsm_state
= control_state
;
282 *fsm_state_err
= min_t(enum mlxfw_fsm_state_err
, error_code
,
283 MLXFW_FSM_STATE_ERR_MAX
);
287 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
289 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
290 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
291 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
292 char mcc_pl
[MLXSW_REG_MCC_LEN
];
294 mlxsw_reg_mcc_pack(mcc_pl
, MLXSW_REG_MCC_INSTRUCTION_CANCEL
, 0,
296 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
299 static void mlxsw_sp_fsm_release(struct mlxfw_dev
*mlxfw_dev
, u32 fwhandle
)
301 struct mlxsw_sp_mlxfw_dev
*mlxsw_sp_mlxfw_dev
=
302 container_of(mlxfw_dev
, struct mlxsw_sp_mlxfw_dev
, mlxfw_dev
);
303 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_mlxfw_dev
->mlxsw_sp
;
304 char mcc_pl
[MLXSW_REG_MCC_LEN
];
306 mlxsw_reg_mcc_pack(mcc_pl
,
307 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE
, 0,
309 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mcc
), mcc_pl
);
312 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops
= {
313 .component_query
= mlxsw_sp_component_query
,
314 .fsm_lock
= mlxsw_sp_fsm_lock
,
315 .fsm_component_update
= mlxsw_sp_fsm_component_update
,
316 .fsm_block_download
= mlxsw_sp_fsm_block_download
,
317 .fsm_component_verify
= mlxsw_sp_fsm_component_verify
,
318 .fsm_activate
= mlxsw_sp_fsm_activate
,
319 .fsm_query_state
= mlxsw_sp_fsm_query_state
,
320 .fsm_cancel
= mlxsw_sp_fsm_cancel
,
321 .fsm_release
= mlxsw_sp_fsm_release
324 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev
*a
,
325 const struct mlxsw_fw_rev
*b
)
327 if (a
->major
!= b
->major
)
328 return a
->major
> b
->major
;
329 if (a
->minor
!= b
->minor
)
330 return a
->minor
> b
->minor
;
331 return a
->subminor
>= b
->subminor
;
334 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp
*mlxsw_sp
)
336 const struct mlxsw_fw_rev
*rev
= &mlxsw_sp
->bus_info
->fw_rev
;
337 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev
= {
339 .ops
= &mlxsw_sp_mlxfw_dev_ops
,
340 .psid
= mlxsw_sp
->bus_info
->psid
,
341 .psid_size
= strlen(mlxsw_sp
->bus_info
->psid
),
345 const struct firmware
*firmware
;
348 if (mlxsw_sp_fw_rev_ge(rev
, &mlxsw_sp_supported_fw_rev
))
351 dev_info(mlxsw_sp
->bus_info
->dev
, "The firmware version %d.%d.%d out of data\n",
352 rev
->major
, rev
->minor
, rev
->subminor
);
353 dev_info(mlxsw_sp
->bus_info
->dev
, "Upgrading firmware using file %s\n",
354 MLXSW_SP_FW_FILENAME
);
356 err
= request_firmware_direct(&firmware
, MLXSW_SP_FW_FILENAME
,
357 mlxsw_sp
->bus_info
->dev
);
359 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not request firmware file %s\n",
360 MLXSW_SP_FW_FILENAME
);
364 err
= mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev
.mlxfw_dev
, firmware
);
365 release_firmware(firmware
);
369 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
370 unsigned int counter_index
, u64
*packets
,
373 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
376 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
377 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES
);
378 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
381 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
382 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
386 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
387 unsigned int counter_index
)
389 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
391 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
392 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES
);
393 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
396 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
397 unsigned int *p_counter_index
)
401 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
405 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
407 goto err_counter_clear
;
411 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
416 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
417 unsigned int counter_index
)
419 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
423 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
424 const struct mlxsw_tx_info
*tx_info
)
426 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
428 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
430 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
431 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
432 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
433 mlxsw_tx_hdr_swid_set(txhdr
, 0);
434 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
435 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
436 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
439 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
442 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
443 enum mlxsw_reg_spms_state spms_state
;
448 case BR_STATE_FORWARDING
:
449 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
451 case BR_STATE_LEARNING
:
452 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
454 case BR_STATE_LISTENING
: /* fall-through */
455 case BR_STATE_DISABLED
: /* fall-through */
456 case BR_STATE_BLOCKING
:
457 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
463 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
466 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
467 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
469 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
474 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
476 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
479 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
482 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
486 static int mlxsw_sp_span_init(struct mlxsw_sp
*mlxsw_sp
)
490 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_SPAN
))
493 mlxsw_sp
->span
.entries_count
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
495 mlxsw_sp
->span
.entries
= kcalloc(mlxsw_sp
->span
.entries_count
,
496 sizeof(struct mlxsw_sp_span_entry
),
498 if (!mlxsw_sp
->span
.entries
)
501 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++)
502 INIT_LIST_HEAD(&mlxsw_sp
->span
.entries
[i
].bound_ports_list
);
507 static void mlxsw_sp_span_fini(struct mlxsw_sp
*mlxsw_sp
)
511 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
512 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
514 WARN_ON_ONCE(!list_empty(&curr
->bound_ports_list
));
516 kfree(mlxsw_sp
->span
.entries
);
519 static struct mlxsw_sp_span_entry
*
520 mlxsw_sp_span_entry_create(struct mlxsw_sp_port
*port
)
522 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
523 struct mlxsw_sp_span_entry
*span_entry
;
524 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
525 u8 local_port
= port
->local_port
;
530 /* find a free entry to use */
532 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
533 if (!mlxsw_sp
->span
.entries
[i
].used
) {
535 span_entry
= &mlxsw_sp
->span
.entries
[i
];
542 /* create a new port analayzer entry for local_port */
543 mlxsw_reg_mpat_pack(mpat_pl
, index
, local_port
, true);
544 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
548 span_entry
->used
= true;
549 span_entry
->id
= index
;
550 span_entry
->ref_count
= 1;
551 span_entry
->local_port
= local_port
;
555 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
556 struct mlxsw_sp_span_entry
*span_entry
)
558 u8 local_port
= span_entry
->local_port
;
559 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
560 int pa_id
= span_entry
->id
;
562 mlxsw_reg_mpat_pack(mpat_pl
, pa_id
, local_port
, false);
563 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
564 span_entry
->used
= false;
567 static struct mlxsw_sp_span_entry
*
568 mlxsw_sp_span_entry_find(struct mlxsw_sp_port
*port
)
570 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
573 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
574 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
576 if (curr
->used
&& curr
->local_port
== port
->local_port
)
582 static struct mlxsw_sp_span_entry
583 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port
*port
)
585 struct mlxsw_sp_span_entry
*span_entry
;
587 span_entry
= mlxsw_sp_span_entry_find(port
);
589 /* Already exists, just take a reference */
590 span_entry
->ref_count
++;
594 return mlxsw_sp_span_entry_create(port
);
597 static int mlxsw_sp_span_entry_put(struct mlxsw_sp
*mlxsw_sp
,
598 struct mlxsw_sp_span_entry
*span_entry
)
600 WARN_ON(!span_entry
->ref_count
);
601 if (--span_entry
->ref_count
== 0)
602 mlxsw_sp_span_entry_destroy(mlxsw_sp
, span_entry
);
606 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port
*port
)
608 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
609 struct mlxsw_sp_span_inspected_port
*p
;
612 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
613 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
615 list_for_each_entry(p
, &curr
->bound_ports_list
, list
)
616 if (p
->local_port
== port
->local_port
&&
617 p
->type
== MLXSW_SP_SPAN_EGRESS
)
624 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp
*mlxsw_sp
,
627 return mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
* 5 / 2) + 1;
630 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port
*port
, u16 mtu
)
632 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
633 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
636 /* If port is egress mirrored, the shared buffer size should be
637 * updated according to the mtu value
639 if (mlxsw_sp_span_is_egress_mirror(port
)) {
640 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
, mtu
);
642 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
643 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
645 netdev_err(port
->dev
, "Could not update shared buffer for mirroring\n");
653 static struct mlxsw_sp_span_inspected_port
*
654 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port
*port
,
655 struct mlxsw_sp_span_entry
*span_entry
)
657 struct mlxsw_sp_span_inspected_port
*p
;
659 list_for_each_entry(p
, &span_entry
->bound_ports_list
, list
)
660 if (port
->local_port
== p
->local_port
)
666 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port
*port
,
667 struct mlxsw_sp_span_entry
*span_entry
,
668 enum mlxsw_sp_span_type type
)
670 struct mlxsw_sp_span_inspected_port
*inspected_port
;
671 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
672 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
673 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
674 int pa_id
= span_entry
->id
;
677 /* if it is an egress SPAN, bind a shared buffer to it */
678 if (type
== MLXSW_SP_SPAN_EGRESS
) {
679 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
,
682 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
683 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
685 netdev_err(port
->dev
, "Could not create shared buffer for mirroring\n");
690 /* bind the port to the SPAN entry */
691 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
692 (enum mlxsw_reg_mpar_i_e
) type
, true, pa_id
);
693 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
695 goto err_mpar_reg_write
;
697 inspected_port
= kzalloc(sizeof(*inspected_port
), GFP_KERNEL
);
698 if (!inspected_port
) {
700 goto err_inspected_port_alloc
;
702 inspected_port
->local_port
= port
->local_port
;
703 inspected_port
->type
= type
;
704 list_add_tail(&inspected_port
->list
, &span_entry
->bound_ports_list
);
709 err_inspected_port_alloc
:
710 if (type
== MLXSW_SP_SPAN_EGRESS
) {
711 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
712 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
718 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port
*port
,
719 struct mlxsw_sp_span_entry
*span_entry
,
720 enum mlxsw_sp_span_type type
)
722 struct mlxsw_sp_span_inspected_port
*inspected_port
;
723 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
724 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
725 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
726 int pa_id
= span_entry
->id
;
728 inspected_port
= mlxsw_sp_span_entry_bound_port_find(port
, span_entry
);
732 /* remove the inspected port */
733 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
734 (enum mlxsw_reg_mpar_i_e
) type
, false, pa_id
);
735 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
737 /* remove the SBIB buffer if it was egress SPAN */
738 if (type
== MLXSW_SP_SPAN_EGRESS
) {
739 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
740 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
743 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
745 list_del(&inspected_port
->list
);
746 kfree(inspected_port
);
749 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port
*from
,
750 struct mlxsw_sp_port
*to
,
751 enum mlxsw_sp_span_type type
)
753 struct mlxsw_sp
*mlxsw_sp
= from
->mlxsw_sp
;
754 struct mlxsw_sp_span_entry
*span_entry
;
757 span_entry
= mlxsw_sp_span_entry_get(to
);
761 netdev_dbg(from
->dev
, "Adding inspected port to SPAN entry %d\n",
764 err
= mlxsw_sp_span_inspected_port_bind(from
, span_entry
, type
);
771 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
775 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port
*from
,
776 struct mlxsw_sp_port
*to
,
777 enum mlxsw_sp_span_type type
)
779 struct mlxsw_sp_span_entry
*span_entry
;
781 span_entry
= mlxsw_sp_span_entry_find(to
);
783 netdev_err(from
->dev
, "no span entry found\n");
787 netdev_dbg(from
->dev
, "removing inspected port from SPAN entry %d\n",
789 mlxsw_sp_span_inspected_port_unbind(from
, span_entry
, type
);
792 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
793 bool enable
, u32 rate
)
795 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
796 char mpsc_pl
[MLXSW_REG_MPSC_LEN
];
798 mlxsw_reg_mpsc_pack(mpsc_pl
, mlxsw_sp_port
->local_port
, enable
, rate
);
799 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpsc
), mpsc_pl
);
802 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
805 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
806 char paos_pl
[MLXSW_REG_PAOS_LEN
];
808 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
809 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
810 MLXSW_PORT_ADMIN_STATUS_DOWN
);
811 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
814 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
817 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
818 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
820 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
821 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
822 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
825 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
827 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
828 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
830 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
831 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
832 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
835 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
837 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
838 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
842 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
843 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
844 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
847 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
852 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
853 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
856 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
859 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
861 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
862 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
865 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
867 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
869 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
873 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
, bool enable
)
875 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
876 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
878 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
879 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
882 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
885 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
889 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
892 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
894 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
899 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
902 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
903 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
905 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
906 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
909 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
912 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
913 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
915 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
916 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
919 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
924 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
928 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
931 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, true);
933 goto err_port_allow_untagged_set
;
936 mlxsw_sp_port
->pvid
= vid
;
939 err_port_allow_untagged_set
:
940 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
945 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
947 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
948 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
950 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
951 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
954 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
955 u8 local_port
, u8
*p_module
,
956 u8
*p_width
, u8
*p_lane
)
958 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
961 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
962 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
965 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
966 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
967 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
971 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
972 u8 module
, u8 width
, u8 lane
)
974 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
977 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
978 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
979 for (i
= 0; i
< width
; i
++) {
980 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
981 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
984 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
987 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
989 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
991 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
992 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
993 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
996 static int mlxsw_sp_port_open(struct net_device
*dev
)
998 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1001 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1004 netif_start_queue(dev
);
1008 static int mlxsw_sp_port_stop(struct net_device
*dev
)
1010 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1012 netif_stop_queue(dev
);
1013 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1016 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
1017 struct net_device
*dev
)
1019 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1020 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1021 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
1022 const struct mlxsw_tx_info tx_info
= {
1023 .local_port
= mlxsw_sp_port
->local_port
,
1029 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
1030 return NETDEV_TX_BUSY
;
1032 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
1033 struct sk_buff
*skb_orig
= skb
;
1035 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
1037 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
1038 dev_kfree_skb_any(skb_orig
);
1039 return NETDEV_TX_OK
;
1041 dev_consume_skb_any(skb_orig
);
1044 if (eth_skb_pad(skb
)) {
1045 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
1046 return NETDEV_TX_OK
;
1049 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
1050 /* TX header is consumed by HW on the way so we shouldn't count its
1051 * bytes as being sent.
1053 len
= skb
->len
- MLXSW_TXHDR_LEN
;
1055 /* Due to a race we might fail here because of a full queue. In that
1056 * unlikely case we simply drop the packet.
1058 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
1061 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
1062 u64_stats_update_begin(&pcpu_stats
->syncp
);
1063 pcpu_stats
->tx_packets
++;
1064 pcpu_stats
->tx_bytes
+= len
;
1065 u64_stats_update_end(&pcpu_stats
->syncp
);
1067 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
1068 dev_kfree_skb_any(skb
);
1070 return NETDEV_TX_OK
;
1073 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
1077 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
1079 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1080 struct sockaddr
*addr
= p
;
1083 if (!is_valid_ether_addr(addr
->sa_data
))
1084 return -EADDRNOTAVAIL
;
1086 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
1089 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1093 static u16
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp
*mlxsw_sp
,
1096 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
);
1099 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
1101 static u16
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
1104 delay
= mlxsw_sp_bytes_cells(mlxsw_sp
, DIV_ROUND_UP(delay
,
1106 return MLXSW_SP_CELL_FACTOR
* delay
+ mlxsw_sp_bytes_cells(mlxsw_sp
,
1110 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1111 * Assumes 100m cable and maximum MTU.
1113 #define MLXSW_SP_PAUSE_DELAY 58752
1115 static u16
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
1116 u16 delay
, bool pfc
, bool pause
)
1119 return mlxsw_sp_pfc_delay_get(mlxsw_sp
, mtu
, delay
);
1121 return mlxsw_sp_bytes_cells(mlxsw_sp
, MLXSW_SP_PAUSE_DELAY
);
1126 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int index
, u16 size
, u16 thres
,
1130 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, index
, size
);
1132 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, index
, size
,
1136 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
1137 u8
*prio_tc
, bool pause_en
,
1138 struct ieee_pfc
*my_pfc
)
1140 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1141 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
1142 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
1143 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
1146 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
1147 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
1151 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1152 bool configure
= false;
1157 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
1158 if (prio_tc
[j
] == i
) {
1159 pfc
= pfc_en
& BIT(j
);
1168 lossy
= !(pfc
|| pause_en
);
1169 thres
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, mtu
);
1170 delay
= mlxsw_sp_pg_buf_delay_get(mlxsw_sp
, mtu
, delay
, pfc
,
1172 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, thres
+ delay
, thres
, lossy
);
1175 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
1178 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1179 int mtu
, bool pause_en
)
1181 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
1182 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
1183 struct ieee_pfc
*my_pfc
;
1186 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
1187 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
1189 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
1193 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
1195 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1196 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1199 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
1202 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
1204 goto err_span_port_mtu_update
;
1205 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
1207 goto err_port_mtu_set
;
1212 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
1213 err_span_port_mtu_update
:
1214 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1219 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
1220 struct rtnl_link_stats64
*stats
)
1222 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1223 struct mlxsw_sp_port_pcpu_stats
*p
;
1224 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
1229 for_each_possible_cpu(i
) {
1230 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
1232 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
1233 rx_packets
= p
->rx_packets
;
1234 rx_bytes
= p
->rx_bytes
;
1235 tx_packets
= p
->tx_packets
;
1236 tx_bytes
= p
->tx_bytes
;
1237 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
1239 stats
->rx_packets
+= rx_packets
;
1240 stats
->rx_bytes
+= rx_bytes
;
1241 stats
->tx_packets
+= tx_packets
;
1242 stats
->tx_bytes
+= tx_bytes
;
1243 /* tx_dropped is u32, updated without syncp protection. */
1244 tx_dropped
+= p
->tx_dropped
;
1246 stats
->tx_dropped
= tx_dropped
;
1250 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
1253 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
1260 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
1264 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
1265 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
1271 static int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
1272 int prio
, char *ppcnt_pl
)
1274 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1275 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1277 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
1278 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1281 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
1282 struct rtnl_link_stats64
*stats
)
1284 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1287 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
1293 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
1295 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
1297 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
1299 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
1301 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
1303 stats
->rx_crc_errors
=
1304 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
1305 stats
->rx_frame_errors
=
1306 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
1308 stats
->rx_length_errors
= (
1309 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
1310 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
1311 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
1313 stats
->rx_errors
= (stats
->rx_crc_errors
+
1314 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
1320 static void update_stats_cache(struct work_struct
*work
)
1322 struct mlxsw_sp_port
*mlxsw_sp_port
=
1323 container_of(work
, struct mlxsw_sp_port
,
1324 hw_stats
.update_dw
.work
);
1326 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
1329 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
1330 mlxsw_sp_port
->hw_stats
.cache
);
1333 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
,
1334 MLXSW_HW_STATS_UPDATE_TIME
);
1337 /* Return the stats from a cache that is updated periodically,
1338 * as this function might get called in an atomic context.
1341 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
1342 struct rtnl_link_stats64
*stats
)
1344 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1346 memcpy(stats
, mlxsw_sp_port
->hw_stats
.cache
, sizeof(*stats
));
1349 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1350 u16 vid_begin
, u16 vid_end
,
1351 bool is_member
, bool untagged
)
1353 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1357 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
1361 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
1362 vid_end
, is_member
, untagged
);
1363 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
1368 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
1369 u16 vid_end
, bool is_member
, bool untagged
)
1374 for (vid
= vid_begin
; vid
<= vid_end
;
1375 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
1376 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
1379 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
1380 is_member
, untagged
);
1388 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port
*mlxsw_sp_port
)
1390 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
, *tmp
;
1392 list_for_each_entry_safe(mlxsw_sp_port_vlan
, tmp
,
1393 &mlxsw_sp_port
->vlans_list
, list
)
1394 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1397 static struct mlxsw_sp_port_vlan
*
1398 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1400 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1401 bool untagged
= vid
== 1;
1404 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true, untagged
);
1406 return ERR_PTR(err
);
1408 mlxsw_sp_port_vlan
= kzalloc(sizeof(*mlxsw_sp_port_vlan
), GFP_KERNEL
);
1409 if (!mlxsw_sp_port_vlan
) {
1411 goto err_port_vlan_alloc
;
1414 mlxsw_sp_port_vlan
->mlxsw_sp_port
= mlxsw_sp_port
;
1415 mlxsw_sp_port_vlan
->vid
= vid
;
1416 list_add(&mlxsw_sp_port_vlan
->list
, &mlxsw_sp_port
->vlans_list
);
1418 return mlxsw_sp_port_vlan
;
1420 err_port_vlan_alloc
:
1421 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1422 return ERR_PTR(err
);
1426 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1428 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
1429 u16 vid
= mlxsw_sp_port_vlan
->vid
;
1431 list_del(&mlxsw_sp_port_vlan
->list
);
1432 kfree(mlxsw_sp_port_vlan
);
1433 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1436 struct mlxsw_sp_port_vlan
*
1437 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1439 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1441 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1442 if (mlxsw_sp_port_vlan
)
1443 return mlxsw_sp_port_vlan
;
1445 return mlxsw_sp_port_vlan_create(mlxsw_sp_port
, vid
);
1448 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
1450 struct mlxsw_sp_fid
*fid
= mlxsw_sp_port_vlan
->fid
;
1452 if (mlxsw_sp_port_vlan
->bridge_port
)
1453 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
1455 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
1457 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan
);
1460 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1461 __be16 __always_unused proto
, u16 vid
)
1463 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1465 /* VLAN 0 is added to HW filter when device goes up, but it is
1466 * reserved in our case, so simply return.
1471 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port
, vid
));
1474 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1475 __be16 __always_unused proto
, u16 vid
)
1477 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1478 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1480 /* VLAN 0 is removed from HW filter when device goes down, but
1481 * it is reserved in our case, so simply return.
1486 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1487 if (!mlxsw_sp_port_vlan
)
1489 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1494 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
1497 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1498 u8 module
= mlxsw_sp_port
->mapping
.module
;
1499 u8 width
= mlxsw_sp_port
->mapping
.width
;
1500 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
1503 if (!mlxsw_sp_port
->split
)
1504 err
= snprintf(name
, len
, "p%d", module
+ 1);
1506 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
1515 static struct mlxsw_sp_port_mall_tc_entry
*
1516 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port
*port
,
1517 unsigned long cookie
) {
1518 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1520 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1521 if (mall_tc_entry
->cookie
== cookie
)
1522 return mall_tc_entry
;
1528 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1529 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
,
1530 const struct tc_action
*a
,
1533 struct net
*net
= dev_net(mlxsw_sp_port
->dev
);
1534 enum mlxsw_sp_span_type span_type
;
1535 struct mlxsw_sp_port
*to_port
;
1536 struct net_device
*to_dev
;
1539 ifindex
= tcf_mirred_ifindex(a
);
1540 to_dev
= __dev_get_by_index(net
, ifindex
);
1542 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1546 if (!mlxsw_sp_port_dev_check(to_dev
)) {
1547 netdev_err(mlxsw_sp_port
->dev
, "Cannot mirror to a non-spectrum port");
1550 to_port
= netdev_priv(to_dev
);
1552 mirror
->to_local_port
= to_port
->local_port
;
1553 mirror
->ingress
= ingress
;
1554 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1555 return mlxsw_sp_span_mirror_add(mlxsw_sp_port
, to_port
, span_type
);
1559 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1560 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
)
1562 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1563 enum mlxsw_sp_span_type span_type
;
1564 struct mlxsw_sp_port
*to_port
;
1566 to_port
= mlxsw_sp
->ports
[mirror
->to_local_port
];
1567 span_type
= mirror
->ingress
?
1568 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1569 mlxsw_sp_span_mirror_remove(mlxsw_sp_port
, to_port
, span_type
);
1573 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
,
1574 struct tc_cls_matchall_offload
*cls
,
1575 const struct tc_action
*a
,
1580 if (!mlxsw_sp_port
->sample
)
1582 if (rtnl_dereference(mlxsw_sp_port
->sample
->psample_group
)) {
1583 netdev_err(mlxsw_sp_port
->dev
, "sample already active\n");
1586 if (tcf_sample_rate(a
) > MLXSW_REG_MPSC_RATE_MAX
) {
1587 netdev_err(mlxsw_sp_port
->dev
, "sample rate not supported\n");
1591 rcu_assign_pointer(mlxsw_sp_port
->sample
->psample_group
,
1592 tcf_sample_psample_group(a
));
1593 mlxsw_sp_port
->sample
->truncate
= tcf_sample_truncate(a
);
1594 mlxsw_sp_port
->sample
->trunc_size
= tcf_sample_trunc_size(a
);
1595 mlxsw_sp_port
->sample
->rate
= tcf_sample_rate(a
);
1597 err
= mlxsw_sp_port_sample_set(mlxsw_sp_port
, true, tcf_sample_rate(a
));
1599 goto err_port_sample_set
;
1602 err_port_sample_set
:
1603 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1608 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
)
1610 if (!mlxsw_sp_port
->sample
)
1613 mlxsw_sp_port_sample_set(mlxsw_sp_port
, false, 1);
1614 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1617 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1619 struct tc_cls_matchall_offload
*cls
,
1622 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1623 const struct tc_action
*a
;
1627 if (!tc_single_action(cls
->exts
)) {
1628 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1632 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1635 mall_tc_entry
->cookie
= cls
->cookie
;
1637 tcf_exts_to_list(cls
->exts
, &actions
);
1638 a
= list_first_entry(&actions
, struct tc_action
, list
);
1640 if (is_tcf_mirred_egress_mirror(a
) && protocol
== htons(ETH_P_ALL
)) {
1641 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
;
1643 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1644 mirror
= &mall_tc_entry
->mirror
;
1645 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
,
1646 mirror
, a
, ingress
);
1647 } else if (is_tcf_sample(a
) && protocol
== htons(ETH_P_ALL
)) {
1648 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_SAMPLE
;
1649 err
= mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port
, cls
,
1656 goto err_add_action
;
1658 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1662 kfree(mall_tc_entry
);
1666 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1667 struct tc_cls_matchall_offload
*cls
)
1669 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1671 mall_tc_entry
= mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port
,
1673 if (!mall_tc_entry
) {
1674 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1677 list_del(&mall_tc_entry
->list
);
1679 switch (mall_tc_entry
->type
) {
1680 case MLXSW_SP_PORT_MALL_MIRROR
:
1681 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port
,
1682 &mall_tc_entry
->mirror
);
1684 case MLXSW_SP_PORT_MALL_SAMPLE
:
1685 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port
);
1691 kfree(mall_tc_entry
);
1694 static int mlxsw_sp_setup_tc(struct net_device
*dev
, u32 handle
,
1695 __be16 proto
, struct tc_to_netdev
*tc
)
1697 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1698 bool ingress
= TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
);
1701 case TC_SETUP_MATCHALL
:
1702 switch (tc
->cls_mall
->command
) {
1703 case TC_CLSMATCHALL_REPLACE
:
1704 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
,
1708 case TC_CLSMATCHALL_DESTROY
:
1709 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
,
1715 case TC_SETUP_CLSFLOWER
:
1716 switch (tc
->cls_flower
->command
) {
1717 case TC_CLSFLOWER_REPLACE
:
1718 return mlxsw_sp_flower_replace(mlxsw_sp_port
, ingress
,
1719 proto
, tc
->cls_flower
);
1720 case TC_CLSFLOWER_DESTROY
:
1721 mlxsw_sp_flower_destroy(mlxsw_sp_port
, ingress
,
1724 case TC_CLSFLOWER_STATS
:
1725 return mlxsw_sp_flower_stats(mlxsw_sp_port
, ingress
,
1735 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1736 .ndo_open
= mlxsw_sp_port_open
,
1737 .ndo_stop
= mlxsw_sp_port_stop
,
1738 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1739 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1740 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1741 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1742 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1743 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1744 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1745 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1746 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1747 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1748 .ndo_fdb_add
= switchdev_port_fdb_add
,
1749 .ndo_fdb_del
= switchdev_port_fdb_del
,
1750 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
1751 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
1752 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
1753 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
1754 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
1757 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
1758 struct ethtool_drvinfo
*drvinfo
)
1760 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1761 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1763 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
1764 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
1765 sizeof(drvinfo
->version
));
1766 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1768 mlxsw_sp
->bus_info
->fw_rev
.major
,
1769 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1770 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1771 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1772 sizeof(drvinfo
->bus_info
));
1775 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1776 struct ethtool_pauseparam
*pause
)
1778 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1780 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1781 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1784 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1785 struct ethtool_pauseparam
*pause
)
1787 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1789 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1790 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1791 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1793 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1797 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1798 struct ethtool_pauseparam
*pause
)
1800 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1801 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1804 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1805 netdev_err(dev
, "PFC already enabled on port\n");
1809 if (pause
->autoneg
) {
1810 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1814 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1816 netdev_err(dev
, "Failed to configure port's headroom\n");
1820 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1822 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1823 goto err_port_pause_configure
;
1826 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1827 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1831 err_port_pause_configure
:
1832 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1833 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1837 struct mlxsw_sp_port_hw_stats
{
1838 char str
[ETH_GSTRING_LEN
];
1839 u64 (*getter
)(const char *payload
);
1843 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1845 .str
= "a_frames_transmitted_ok",
1846 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1849 .str
= "a_frames_received_ok",
1850 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1853 .str
= "a_frame_check_sequence_errors",
1854 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1857 .str
= "a_alignment_errors",
1858 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1861 .str
= "a_octets_transmitted_ok",
1862 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1865 .str
= "a_octets_received_ok",
1866 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1869 .str
= "a_multicast_frames_xmitted_ok",
1870 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1873 .str
= "a_broadcast_frames_xmitted_ok",
1874 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1877 .str
= "a_multicast_frames_received_ok",
1878 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1881 .str
= "a_broadcast_frames_received_ok",
1882 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1885 .str
= "a_in_range_length_errors",
1886 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1889 .str
= "a_out_of_range_length_field",
1890 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1893 .str
= "a_frame_too_long_errors",
1894 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1897 .str
= "a_symbol_error_during_carrier",
1898 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1901 .str
= "a_mac_control_frames_transmitted",
1902 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1905 .str
= "a_mac_control_frames_received",
1906 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1909 .str
= "a_unsupported_opcodes_received",
1910 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1913 .str
= "a_pause_mac_ctrl_frames_received",
1914 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1917 .str
= "a_pause_mac_ctrl_frames_xmitted",
1918 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1922 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1924 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
1926 .str
= "rx_octets_prio",
1927 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
1930 .str
= "rx_frames_prio",
1931 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
1934 .str
= "tx_octets_prio",
1935 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
1938 .str
= "tx_frames_prio",
1939 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
1942 .str
= "rx_pause_prio",
1943 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
1946 .str
= "rx_pause_duration_prio",
1947 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
1950 .str
= "tx_pause_prio",
1951 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
1954 .str
= "tx_pause_duration_prio",
1955 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
1959 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1961 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
1963 .str
= "tc_transmit_queue_tc",
1964 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_get
,
1965 .cells_bytes
= true,
1968 .str
= "tc_no_buffer_discard_uc_tc",
1969 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
1973 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1975 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1976 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1977 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1978 IEEE_8021QAZ_MAX_TCS)
1980 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
1984 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
1985 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1986 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
1987 *p
+= ETH_GSTRING_LEN
;
1991 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
1995 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
1996 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1997 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
1998 *p
+= ETH_GSTRING_LEN
;
2002 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
2003 u32 stringset
, u8
*data
)
2008 switch (stringset
) {
2010 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
2011 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
2013 p
+= ETH_GSTRING_LEN
;
2016 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
2017 mlxsw_sp_port_get_prio_strings(&p
, i
);
2019 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
2020 mlxsw_sp_port_get_tc_strings(&p
, i
);
2026 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
2027 enum ethtool_phys_id_state state
)
2029 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2030 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2031 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
2035 case ETHTOOL_ID_ACTIVE
:
2038 case ETHTOOL_ID_INACTIVE
:
2045 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
2046 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
2050 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
2051 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
2054 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
2055 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
2056 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
2058 case MLXSW_REG_PPCNT_PRIO_CNT
:
2059 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
2060 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
2062 case MLXSW_REG_PPCNT_TC_CNT
:
2063 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
2064 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
2073 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
2074 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
2075 u64
*data
, int data_index
)
2077 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2078 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2079 struct mlxsw_sp_port_hw_stats
*hw_stats
;
2080 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
2084 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
2087 mlxsw_sp_port_get_stats_raw(dev
, grp
, prio
, ppcnt_pl
);
2088 for (i
= 0; i
< len
; i
++) {
2089 data
[data_index
+ i
] = hw_stats
[i
].getter(ppcnt_pl
);
2090 if (!hw_stats
[i
].cells_bytes
)
2092 data
[data_index
+ i
] = mlxsw_sp_cells_bytes(mlxsw_sp
,
2093 data
[data_index
+ i
]);
2097 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
2098 struct ethtool_stats
*stats
, u64
*data
)
2100 int i
, data_index
= 0;
2102 /* IEEE 802.3 Counters */
2103 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
2105 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
2107 /* Per-Priority Counters */
2108 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2109 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
2111 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
2114 /* Per-TC Counters */
2115 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2116 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
2118 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
2122 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
2126 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
2132 struct mlxsw_sp_port_link_mode
{
2133 enum ethtool_link_mode_bit_indices mask_ethtool
;
2138 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
2140 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
2141 .mask_ethtool
= ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
2145 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
2146 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
2147 .mask_ethtool
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
2148 .speed
= SPEED_1000
,
2151 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
2152 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
2153 .speed
= SPEED_10000
,
2156 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
2157 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
2158 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
2159 .speed
= SPEED_10000
,
2162 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2163 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2164 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2165 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
2166 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
2167 .speed
= SPEED_10000
,
2170 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
2171 .mask_ethtool
= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
2172 .speed
= SPEED_20000
,
2175 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
2176 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
2177 .speed
= SPEED_40000
,
2180 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
2181 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
2182 .speed
= SPEED_40000
,
2185 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
2186 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
2187 .speed
= SPEED_40000
,
2190 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
2191 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
2192 .speed
= SPEED_40000
,
2195 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
,
2196 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
2197 .speed
= SPEED_25000
,
2200 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
,
2201 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
2202 .speed
= SPEED_25000
,
2205 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2206 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2207 .speed
= SPEED_25000
,
2210 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2211 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2212 .speed
= SPEED_25000
,
2215 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
,
2216 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
2217 .speed
= SPEED_50000
,
2220 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
2221 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
2222 .speed
= SPEED_50000
,
2225 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2
,
2226 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2227 .speed
= SPEED_50000
,
2230 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2231 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
,
2232 .speed
= SPEED_56000
,
2235 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2236 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT
,
2237 .speed
= SPEED_56000
,
2240 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2241 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
,
2242 .speed
= SPEED_56000
,
2245 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2246 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT
,
2247 .speed
= SPEED_56000
,
2250 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
,
2251 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2252 .speed
= SPEED_100000
,
2255 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
,
2256 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2257 .speed
= SPEED_100000
,
2260 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
,
2261 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2262 .speed
= SPEED_100000
,
2265 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
2266 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2267 .speed
= SPEED_100000
,
2271 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2274 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
,
2275 struct ethtool_link_ksettings
*cmd
)
2277 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2278 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2279 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2280 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2281 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2282 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2283 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
2285 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2286 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2287 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2288 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
2289 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
2290 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
2293 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto
, unsigned long *mode
)
2297 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2298 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
2299 __set_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2304 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
2305 struct ethtool_link_ksettings
*cmd
)
2307 u32 speed
= SPEED_UNKNOWN
;
2308 u8 duplex
= DUPLEX_UNKNOWN
;
2314 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2315 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
2316 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
2317 duplex
= DUPLEX_FULL
;
2322 cmd
->base
.speed
= speed
;
2323 cmd
->base
.duplex
= duplex
;
2326 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
2328 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2329 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2330 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2331 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2334 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2335 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2336 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
2339 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2340 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2341 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2342 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
2349 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings
*cmd
)
2354 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2355 if (test_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2356 cmd
->link_modes
.advertising
))
2357 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2362 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
2367 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2368 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
2369 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2374 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
2379 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2380 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
2381 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2386 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap
,
2387 struct ethtool_link_ksettings
*cmd
)
2389 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Asym_Pause
);
2390 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
2391 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
2393 mlxsw_sp_from_ptys_supported_port(eth_proto_cap
, cmd
);
2394 mlxsw_sp_from_ptys_link(eth_proto_cap
, cmd
->link_modes
.supported
);
2397 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin
, bool autoneg
,
2398 struct ethtool_link_ksettings
*cmd
)
2403 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
2404 mlxsw_sp_from_ptys_link(eth_proto_admin
, cmd
->link_modes
.advertising
);
2408 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp
, u8 autoneg_status
,
2409 struct ethtool_link_ksettings
*cmd
)
2411 if (autoneg_status
!= MLXSW_REG_PTYS_AN_STATUS_OK
|| !eth_proto_lp
)
2414 ethtool_link_ksettings_add_link_mode(cmd
, lp_advertising
, Autoneg
);
2415 mlxsw_sp_from_ptys_link(eth_proto_lp
, cmd
->link_modes
.lp_advertising
);
2418 static int mlxsw_sp_port_get_link_ksettings(struct net_device
*dev
,
2419 struct ethtool_link_ksettings
*cmd
)
2421 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
, eth_proto_lp
;
2422 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2423 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2424 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2429 autoneg
= mlxsw_sp_port
->link
.autoneg
;
2430 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2431 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2434 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
,
2437 mlxsw_sp_port_get_link_supported(eth_proto_cap
, cmd
);
2439 mlxsw_sp_port_get_link_advertise(eth_proto_admin
, autoneg
, cmd
);
2441 eth_proto_lp
= mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl
);
2442 autoneg_status
= mlxsw_reg_ptys_an_status_get(ptys_pl
);
2443 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp
, autoneg_status
, cmd
);
2445 cmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2446 cmd
->base
.port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
2447 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
), eth_proto_oper
,
2454 mlxsw_sp_port_set_link_ksettings(struct net_device
*dev
,
2455 const struct ethtool_link_ksettings
*cmd
)
2457 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2458 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2459 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2460 u32 eth_proto_cap
, eth_proto_new
;
2464 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2465 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2468 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, NULL
, NULL
);
2470 autoneg
= cmd
->base
.autoneg
== AUTONEG_ENABLE
;
2471 eth_proto_new
= autoneg
?
2472 mlxsw_sp_to_ptys_advert_link(cmd
) :
2473 mlxsw_sp_to_ptys_speed(cmd
->base
.speed
);
2475 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
2476 if (!eth_proto_new
) {
2477 netdev_err(dev
, "No supported speed requested\n");
2481 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2483 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2487 if (!netif_running(dev
))
2490 mlxsw_sp_port
->link
.autoneg
= autoneg
;
2492 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2493 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
2498 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
2499 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
2500 .get_link
= ethtool_op_get_link
,
2501 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
2502 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
2503 .get_strings
= mlxsw_sp_port_get_strings
,
2504 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
2505 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
2506 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
2507 .get_link_ksettings
= mlxsw_sp_port_get_link_ksettings
,
2508 .set_link_ksettings
= mlxsw_sp_port_set_link_ksettings
,
2512 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
2514 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2515 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
2516 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2517 u32 eth_proto_admin
;
2519 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
2520 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2522 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2525 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2526 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
2527 bool dwrr
, u8 dwrr_weight
)
2529 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2530 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2532 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2534 mlxsw_reg_qeec_de_set(qeec_pl
, true);
2535 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
2536 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
2537 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2540 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2541 enum mlxsw_reg_qeec_hr hr
, u8 index
,
2542 u8 next_index
, u32 maxrate
)
2544 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2545 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2547 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2549 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
2550 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
2551 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2554 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2555 u8 switch_prio
, u8 tclass
)
2557 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2558 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
2560 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
2562 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
2565 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
2569 /* Setup the elements hierarcy, so that each TC is linked to
2570 * one subgroup, which are all member in the same group.
2572 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2573 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
2577 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2578 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2579 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
2584 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2585 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2586 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
2592 /* Make sure the max shaper is disabled in all hierarcies that
2595 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2596 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
2597 MLXSW_REG_QEEC_MAS_DIS
);
2600 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2601 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2602 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
2604 MLXSW_REG_QEEC_MAS_DIS
);
2608 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2609 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2610 MLXSW_REG_QEEC_HIERARCY_TC
,
2612 MLXSW_REG_QEEC_MAS_DIS
);
2617 /* Map all priorities to traffic class 0. */
2618 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2619 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
2627 static int __mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2628 bool split
, u8 module
, u8 width
, u8 lane
)
2630 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
2631 struct mlxsw_sp_port
*mlxsw_sp_port
;
2632 struct net_device
*dev
;
2635 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
2638 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
2639 mlxsw_sp_port
= netdev_priv(dev
);
2640 mlxsw_sp_port
->dev
= dev
;
2641 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
2642 mlxsw_sp_port
->local_port
= local_port
;
2643 mlxsw_sp_port
->pvid
= 1;
2644 mlxsw_sp_port
->split
= split
;
2645 mlxsw_sp_port
->mapping
.module
= module
;
2646 mlxsw_sp_port
->mapping
.width
= width
;
2647 mlxsw_sp_port
->mapping
.lane
= lane
;
2648 mlxsw_sp_port
->link
.autoneg
= 1;
2649 INIT_LIST_HEAD(&mlxsw_sp_port
->vlans_list
);
2650 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
2652 mlxsw_sp_port
->pcpu_stats
=
2653 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
2654 if (!mlxsw_sp_port
->pcpu_stats
) {
2656 goto err_alloc_stats
;
2659 mlxsw_sp_port
->sample
= kzalloc(sizeof(*mlxsw_sp_port
->sample
),
2661 if (!mlxsw_sp_port
->sample
) {
2663 goto err_alloc_sample
;
2666 mlxsw_sp_port
->hw_stats
.cache
=
2667 kzalloc(sizeof(*mlxsw_sp_port
->hw_stats
.cache
), GFP_KERNEL
);
2669 if (!mlxsw_sp_port
->hw_stats
.cache
) {
2671 goto err_alloc_hw_stats
;
2673 INIT_DELAYED_WORK(&mlxsw_sp_port
->hw_stats
.update_dw
,
2674 &update_stats_cache
);
2676 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
2677 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
2679 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
2681 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
2682 mlxsw_sp_port
->local_port
);
2683 goto err_port_swid_set
;
2686 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
2688 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
2689 mlxsw_sp_port
->local_port
);
2690 goto err_dev_addr_init
;
2693 netif_carrier_off(dev
);
2695 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
2696 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
2697 dev
->hw_features
|= NETIF_F_HW_TC
;
2700 dev
->max_mtu
= ETH_MAX_MTU
;
2702 /* Each packet needs to have a Tx header (metadata) on top all other
2705 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
2707 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
2709 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
2710 mlxsw_sp_port
->local_port
);
2711 goto err_port_system_port_mapping_set
;
2714 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
2716 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
2717 mlxsw_sp_port
->local_port
);
2718 goto err_port_speed_by_width_set
;
2721 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
2723 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
2724 mlxsw_sp_port
->local_port
);
2725 goto err_port_mtu_set
;
2728 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2730 goto err_port_admin_status_set
;
2732 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
2734 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
2735 mlxsw_sp_port
->local_port
);
2736 goto err_port_buffers_init
;
2739 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
2741 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
2742 mlxsw_sp_port
->local_port
);
2743 goto err_port_ets_init
;
2746 /* ETS and buffers must be initialized before DCB. */
2747 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
2749 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
2750 mlxsw_sp_port
->local_port
);
2751 goto err_port_dcb_init
;
2754 err
= mlxsw_sp_port_fids_init(mlxsw_sp_port
);
2756 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize FIDs\n",
2757 mlxsw_sp_port
->local_port
);
2758 goto err_port_fids_init
;
2761 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_get(mlxsw_sp_port
, 1);
2762 if (IS_ERR(mlxsw_sp_port_vlan
)) {
2763 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create VID 1\n",
2764 mlxsw_sp_port
->local_port
);
2765 goto err_port_vlan_get
;
2768 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
2769 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
2770 err
= register_netdev(dev
);
2772 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
2773 mlxsw_sp_port
->local_port
);
2774 goto err_register_netdev
;
2777 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
2778 mlxsw_sp_port
, dev
, mlxsw_sp_port
->split
,
2780 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
, 0);
2783 err_register_netdev
:
2784 mlxsw_sp
->ports
[local_port
] = NULL
;
2785 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2786 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
2788 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
2790 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2793 err_port_buffers_init
:
2794 err_port_admin_status_set
:
2796 err_port_speed_by_width_set
:
2797 err_port_system_port_mapping_set
:
2799 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2801 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2803 kfree(mlxsw_sp_port
->sample
);
2805 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2811 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2812 bool split
, u8 module
, u8 width
, u8 lane
)
2816 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
);
2818 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
2822 err
= __mlxsw_sp_port_create(mlxsw_sp
, local_port
, split
,
2823 module
, width
, lane
);
2825 goto err_port_create
;
2829 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2833 static void __mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2835 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2837 cancel_delayed_work_sync(&mlxsw_sp_port
->hw_stats
.update_dw
);
2838 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
2839 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
2840 mlxsw_sp
->ports
[local_port
] = NULL
;
2841 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2842 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
);
2843 mlxsw_sp_port_fids_fini(mlxsw_sp_port
);
2844 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2845 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2846 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
2847 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2848 kfree(mlxsw_sp_port
->sample
);
2849 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2850 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vlans_list
));
2851 free_netdev(mlxsw_sp_port
->dev
);
2854 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2856 __mlxsw_sp_port_remove(mlxsw_sp
, local_port
);
2857 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2860 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2862 return mlxsw_sp
->ports
[local_port
] != NULL
;
2865 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
2869 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
2870 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2871 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2872 kfree(mlxsw_sp
->port_to_module
);
2873 kfree(mlxsw_sp
->ports
);
2876 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
2878 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
2879 u8 module
, width
, lane
;
2884 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
2885 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
2886 if (!mlxsw_sp
->ports
)
2889 mlxsw_sp
->port_to_module
= kcalloc(max_ports
, sizeof(u8
), GFP_KERNEL
);
2890 if (!mlxsw_sp
->port_to_module
) {
2892 goto err_port_to_module_alloc
;
2895 for (i
= 1; i
< max_ports
; i
++) {
2896 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
2899 goto err_port_module_info_get
;
2902 mlxsw_sp
->port_to_module
[i
] = module
;
2903 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false,
2904 module
, width
, lane
);
2906 goto err_port_create
;
2911 err_port_module_info_get
:
2912 for (i
--; i
>= 1; i
--)
2913 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2914 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2915 kfree(mlxsw_sp
->port_to_module
);
2916 err_port_to_module_alloc
:
2917 kfree(mlxsw_sp
->ports
);
2921 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
2923 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
2925 return local_port
- offset
;
2928 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
2929 u8 module
, unsigned int count
)
2931 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
2934 for (i
= 0; i
< count
; i
++) {
2935 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
2938 goto err_port_module_map
;
2941 for (i
= 0; i
< count
; i
++) {
2942 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
2944 goto err_port_swid_set
;
2947 for (i
= 0; i
< count
; i
++) {
2948 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
2949 module
, width
, i
* width
);
2951 goto err_port_create
;
2957 for (i
--; i
>= 0; i
--)
2958 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2959 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2962 for (i
--; i
>= 0; i
--)
2963 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
2964 MLXSW_PORT_SWID_DISABLED_PORT
);
2966 err_port_module_map
:
2967 for (i
--; i
>= 0; i
--)
2968 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
2972 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
2973 u8 base_port
, unsigned int count
)
2975 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
2978 /* Split by four means we need to re-create two ports, otherwise
2983 for (i
= 0; i
< count
; i
++) {
2984 local_port
= base_port
+ i
* 2;
2985 module
= mlxsw_sp
->port_to_module
[local_port
];
2987 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
2991 for (i
= 0; i
< count
; i
++)
2992 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
2994 for (i
= 0; i
< count
; i
++) {
2995 local_port
= base_port
+ i
* 2;
2996 module
= mlxsw_sp
->port_to_module
[local_port
];
2998 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
3003 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
3006 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3007 struct mlxsw_sp_port
*mlxsw_sp_port
;
3008 u8 module
, cur_width
, base_port
;
3012 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3013 if (!mlxsw_sp_port
) {
3014 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
3019 module
= mlxsw_sp_port
->mapping
.module
;
3020 cur_width
= mlxsw_sp_port
->mapping
.width
;
3022 if (count
!= 2 && count
!= 4) {
3023 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
3027 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
3028 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
3032 /* Make sure we have enough slave (even) ports for the split. */
3034 base_port
= local_port
;
3035 if (mlxsw_sp
->ports
[base_port
+ 1]) {
3036 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
3040 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
3041 if (mlxsw_sp
->ports
[base_port
+ 1] ||
3042 mlxsw_sp
->ports
[base_port
+ 3]) {
3043 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
3048 for (i
= 0; i
< count
; i
++)
3049 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
3050 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
3052 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
3054 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
3055 goto err_port_split_create
;
3060 err_port_split_create
:
3061 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
3065 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
3067 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3068 struct mlxsw_sp_port
*mlxsw_sp_port
;
3069 u8 cur_width
, base_port
;
3073 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3074 if (!mlxsw_sp_port
) {
3075 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
3080 if (!mlxsw_sp_port
->split
) {
3081 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
3085 cur_width
= mlxsw_sp_port
->mapping
.width
;
3086 count
= cur_width
== 1 ? 4 : 2;
3088 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
3090 /* Determine which ports to remove. */
3091 if (count
== 2 && local_port
>= base_port
+ 2)
3092 base_port
= base_port
+ 2;
3094 for (i
= 0; i
< count
; i
++)
3095 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
3096 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
3098 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
3103 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
3104 char *pude_pl
, void *priv
)
3106 struct mlxsw_sp
*mlxsw_sp
= priv
;
3107 struct mlxsw_sp_port
*mlxsw_sp_port
;
3108 enum mlxsw_reg_pude_oper_status status
;
3111 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
3112 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3116 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
3117 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
3118 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
3119 netif_carrier_on(mlxsw_sp_port
->dev
);
3121 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
3122 netif_carrier_off(mlxsw_sp_port
->dev
);
3126 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
3127 u8 local_port
, void *priv
)
3129 struct mlxsw_sp
*mlxsw_sp
= priv
;
3130 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3131 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
3133 if (unlikely(!mlxsw_sp_port
)) {
3134 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
3139 skb
->dev
= mlxsw_sp_port
->dev
;
3141 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
3142 u64_stats_update_begin(&pcpu_stats
->syncp
);
3143 pcpu_stats
->rx_packets
++;
3144 pcpu_stats
->rx_bytes
+= skb
->len
;
3145 u64_stats_update_end(&pcpu_stats
->syncp
);
3147 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3148 netif_receive_skb(skb
);
3151 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
3154 skb
->offload_fwd_mark
= 1;
3155 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
3158 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff
*skb
, u8 local_port
,
3161 struct mlxsw_sp
*mlxsw_sp
= priv
;
3162 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
3163 struct psample_group
*psample_group
;
3166 if (unlikely(!mlxsw_sp_port
)) {
3167 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
3171 if (unlikely(!mlxsw_sp_port
->sample
)) {
3172 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received on unsupported port\n",
3177 size
= mlxsw_sp_port
->sample
->truncate
?
3178 mlxsw_sp_port
->sample
->trunc_size
: skb
->len
;
3181 psample_group
= rcu_dereference(mlxsw_sp_port
->sample
->psample_group
);
3184 psample_sample_packet(psample_group
, skb
, size
,
3185 mlxsw_sp_port
->dev
->ifindex
, 0,
3186 mlxsw_sp_port
->sample
->rate
);
3193 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3194 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3195 _is_ctrl, SP_##_trap_group, DISCARD)
3197 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3198 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3199 _is_ctrl, SP_##_trap_group, DISCARD)
3201 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3202 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3204 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
3206 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
3208 MLXSW_SP_RXL_NO_MARK(STP
, TRAP_TO_CPU
, STP
, true),
3209 MLXSW_SP_RXL_NO_MARK(LACP
, TRAP_TO_CPU
, LACP
, true),
3210 MLXSW_SP_RXL_NO_MARK(LLDP
, TRAP_TO_CPU
, LLDP
, true),
3211 MLXSW_SP_RXL_MARK(DHCP
, MIRROR_TO_CPU
, DHCP
, false),
3212 MLXSW_SP_RXL_MARK(IGMP_QUERY
, MIRROR_TO_CPU
, IGMP
, false),
3213 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3214 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3215 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE
, TRAP_TO_CPU
, IGMP
, false),
3216 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3217 MLXSW_SP_RXL_MARK(ARPBC
, MIRROR_TO_CPU
, ARP
, false),
3218 MLXSW_SP_RXL_MARK(ARPUC
, MIRROR_TO_CPU
, ARP
, false),
3219 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, IP2ME
, false),
3221 MLXSW_SP_RXL_NO_MARK(MTUERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3222 MLXSW_SP_RXL_NO_MARK(TTLERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3223 MLXSW_SP_RXL_NO_MARK(LBERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3224 MLXSW_SP_RXL_MARK(OSPF
, TRAP_TO_CPU
, OSPF
, false),
3225 MLXSW_SP_RXL_NO_MARK(IP2ME
, TRAP_TO_CPU
, IP2ME
, false),
3226 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0
, TRAP_TO_CPU
, REMOTE_ROUTE
, false),
3227 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4
, TRAP_TO_CPU
, ARP_MISS
, false),
3228 MLXSW_SP_RXL_NO_MARK(BGP_IPV4
, TRAP_TO_CPU
, BGP_IPV4
, false),
3229 /* PKT Sample trap */
3230 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func
, PKT_SAMPLE
, MIRROR_TO_CPU
,
3231 false, SP_IP2ME
, DISCARD
)
3234 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
3236 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
3237 enum mlxsw_reg_qpcr_ir_units ir_units
;
3238 int max_cpu_policers
;
3244 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
3247 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3249 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
3250 for (i
= 0; i
< max_cpu_policers
; i
++) {
3253 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3254 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3255 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3256 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3260 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3264 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
3265 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3266 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3267 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
3268 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3269 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3273 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3282 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
3284 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
3292 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3294 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3295 enum mlxsw_reg_htgt_trap_group i
;
3296 int max_cpu_policers
;
3297 int max_trap_groups
;
3302 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
3305 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
3306 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3308 for (i
= 0; i
< max_trap_groups
; i
++) {
3311 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3312 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3313 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3314 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3318 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
3319 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3323 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3324 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3328 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3332 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
3333 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3334 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3338 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
3339 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
3340 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
3341 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
3347 if (max_cpu_policers
<= policer_id
&&
3348 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
3351 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
3352 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3360 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
3365 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
3369 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
3373 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3374 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
3375 &mlxsw_sp_listener
[i
],
3378 goto err_listener_register
;
3383 err_listener_register
:
3384 for (i
--; i
>= 0; i
--) {
3385 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3386 &mlxsw_sp_listener
[i
],
3392 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
3396 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3397 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3398 &mlxsw_sp_listener
[i
],
3403 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
3405 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
3408 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
3409 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
3410 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
3411 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
3412 MLXSW_REG_SLCR_LAG_HASH_SIP
|
3413 MLXSW_REG_SLCR_LAG_HASH_DIP
|
3414 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
3415 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
3416 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
3417 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
3421 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
3422 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
3425 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
3426 sizeof(struct mlxsw_sp_upper
),
3428 if (!mlxsw_sp
->lags
)
3434 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
3436 kfree(mlxsw_sp
->lags
);
3439 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3441 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3443 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
3444 MLXSW_REG_HTGT_INVALID_POLICER
,
3445 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
3446 MLXSW_REG_HTGT_DEFAULT_TC
);
3447 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3450 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
3451 const struct mlxsw_bus_info
*mlxsw_bus_info
)
3453 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3456 mlxsw_sp
->core
= mlxsw_core
;
3457 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
3459 err
= mlxsw_sp_fw_rev_validate(mlxsw_sp
);
3461 dev_err(mlxsw_sp
->bus_info
->dev
, "Could not upgrade firmware\n");
3465 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
3467 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
3471 err
= mlxsw_sp_fids_init(mlxsw_sp
);
3473 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize FIDs\n");
3477 err
= mlxsw_sp_traps_init(mlxsw_sp
);
3479 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
3480 goto err_traps_init
;
3483 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
3485 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
3486 goto err_buffers_init
;
3489 err
= mlxsw_sp_lag_init(mlxsw_sp
);
3491 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
3495 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
3497 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
3498 goto err_switchdev_init
;
3501 err
= mlxsw_sp_router_init(mlxsw_sp
);
3503 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
3504 goto err_router_init
;
3507 err
= mlxsw_sp_span_init(mlxsw_sp
);
3509 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
3513 err
= mlxsw_sp_acl_init(mlxsw_sp
);
3515 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
3519 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
3521 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
3522 goto err_counter_pool_init
;
3525 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
3527 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
3528 goto err_dpipe_init
;
3531 err
= mlxsw_sp_ports_create(mlxsw_sp
);
3533 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
3534 goto err_ports_create
;
3540 mlxsw_sp_dpipe_fini(mlxsw_sp
);
3542 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
3543 err_counter_pool_init
:
3544 mlxsw_sp_acl_fini(mlxsw_sp
);
3546 mlxsw_sp_span_fini(mlxsw_sp
);
3548 mlxsw_sp_router_fini(mlxsw_sp
);
3550 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3552 mlxsw_sp_lag_fini(mlxsw_sp
);
3554 mlxsw_sp_buffers_fini(mlxsw_sp
);
3556 mlxsw_sp_traps_fini(mlxsw_sp
);
3558 mlxsw_sp_fids_fini(mlxsw_sp
);
3562 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
3564 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3566 mlxsw_sp_ports_remove(mlxsw_sp
);
3567 mlxsw_sp_dpipe_fini(mlxsw_sp
);
3568 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
3569 mlxsw_sp_acl_fini(mlxsw_sp
);
3570 mlxsw_sp_span_fini(mlxsw_sp
);
3571 mlxsw_sp_router_fini(mlxsw_sp
);
3572 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3573 mlxsw_sp_lag_fini(mlxsw_sp
);
3574 mlxsw_sp_buffers_fini(mlxsw_sp
);
3575 mlxsw_sp_traps_fini(mlxsw_sp
);
3576 mlxsw_sp_fids_fini(mlxsw_sp
);
3579 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
3580 .used_max_vepa_channels
= 1,
3581 .max_vepa_channels
= 0,
3583 .max_mid
= MLXSW_SP_MID_MAX
,
3586 .used_flood_tables
= 1,
3587 .used_flood_mode
= 1,
3589 .max_fid_offset_flood_tables
= 3,
3590 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
3591 .max_fid_flood_tables
= 3,
3592 .fid_flood_table_size
= MLXSW_SP_FID_8021D_MAX
,
3593 .used_max_ib_mc
= 1,
3597 .used_kvd_split_data
= 1,
3598 .kvd_hash_granularity
= MLXSW_SP_KVD_GRANULARITY
,
3599 .kvd_hash_single_parts
= 2,
3600 .kvd_hash_double_parts
= 1,
3601 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
3605 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
3608 .resource_query_enable
= 1,
3611 static struct mlxsw_driver mlxsw_sp_driver
= {
3612 .kind
= mlxsw_sp_driver_name
,
3613 .priv_size
= sizeof(struct mlxsw_sp
),
3614 .init
= mlxsw_sp_init
,
3615 .fini
= mlxsw_sp_fini
,
3616 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3617 .port_split
= mlxsw_sp_port_split
,
3618 .port_unsplit
= mlxsw_sp_port_unsplit
,
3619 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3620 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3621 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3622 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3623 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3624 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3625 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3626 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3627 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3628 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3629 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3630 .txhdr_len
= MLXSW_TXHDR_LEN
,
3631 .profile
= &mlxsw_sp_config_profile
,
3634 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
3636 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
3639 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
3641 struct mlxsw_sp_port
**p_mlxsw_sp_port
= data
;
3644 if (mlxsw_sp_port_dev_check(lower_dev
)) {
3645 *p_mlxsw_sp_port
= netdev_priv(lower_dev
);
3652 struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
3654 struct mlxsw_sp_port
*mlxsw_sp_port
;
3656 if (mlxsw_sp_port_dev_check(dev
))
3657 return netdev_priv(dev
);
3659 mlxsw_sp_port
= NULL
;
3660 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &mlxsw_sp_port
);
3662 return mlxsw_sp_port
;
3665 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3667 struct mlxsw_sp_port
*mlxsw_sp_port
;
3669 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3670 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3673 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3675 struct mlxsw_sp_port
*mlxsw_sp_port
;
3677 if (mlxsw_sp_port_dev_check(dev
))
3678 return netdev_priv(dev
);
3680 mlxsw_sp_port
= NULL
;
3681 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
3684 return mlxsw_sp_port
;
3687 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3689 struct mlxsw_sp_port
*mlxsw_sp_port
;
3692 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3694 dev_hold(mlxsw_sp_port
->dev
);
3696 return mlxsw_sp_port
;
3699 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3701 dev_put(mlxsw_sp_port
->dev
);
3704 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3706 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3708 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3709 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3712 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3714 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3716 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3717 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3720 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3721 u16 lag_id
, u8 port_index
)
3723 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3724 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3726 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3727 lag_id
, port_index
);
3728 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3731 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3734 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3735 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3737 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3739 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3742 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3745 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3746 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3748 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3750 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3753 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3756 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3757 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3759 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3761 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3764 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3765 struct net_device
*lag_dev
,
3768 struct mlxsw_sp_upper
*lag
;
3769 int free_lag_id
= -1;
3773 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
3774 for (i
= 0; i
< max_lag
; i
++) {
3775 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3776 if (lag
->ref_count
) {
3777 if (lag
->dev
== lag_dev
) {
3781 } else if (free_lag_id
< 0) {
3785 if (free_lag_id
< 0)
3787 *p_lag_id
= free_lag_id
;
3792 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3793 struct net_device
*lag_dev
,
3794 struct netdev_lag_upper_info
*lag_upper_info
)
3798 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
3800 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
3805 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3806 u16 lag_id
, u8
*p_port_index
)
3808 u64 max_lag_members
;
3811 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3813 for (i
= 0; i
< max_lag_members
; i
++) {
3814 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3822 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3823 struct net_device
*lag_dev
)
3825 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3826 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
3827 struct mlxsw_sp_upper
*lag
;
3832 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3835 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3836 if (!lag
->ref_count
) {
3837 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3843 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3846 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3848 goto err_col_port_add
;
3849 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
3851 goto err_col_port_enable
;
3853 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3854 mlxsw_sp_port
->local_port
);
3855 mlxsw_sp_port
->lag_id
= lag_id
;
3856 mlxsw_sp_port
->lagged
= 1;
3859 /* Port is no longer usable as a router interface */
3860 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, 1);
3861 if (mlxsw_sp_port_vlan
->fid
)
3862 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
3866 err_col_port_enable
:
3867 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3869 if (!lag
->ref_count
)
3870 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3874 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3875 struct net_device
*lag_dev
)
3877 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3878 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3879 struct mlxsw_sp_upper
*lag
;
3881 if (!mlxsw_sp_port
->lagged
)
3883 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3884 WARN_ON(lag
->ref_count
== 0);
3886 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
3887 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3889 /* Any VLANs configured on the port are no longer valid */
3890 mlxsw_sp_port_vlan_flush(mlxsw_sp_port
);
3892 if (lag
->ref_count
== 1)
3893 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3895 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3896 mlxsw_sp_port
->local_port
);
3897 mlxsw_sp_port
->lagged
= 0;
3900 mlxsw_sp_port_vlan_get(mlxsw_sp_port
, 1);
3901 /* Make sure untagged frames are allowed to ingress */
3902 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
3905 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3908 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3909 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3911 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
3912 mlxsw_sp_port
->local_port
);
3913 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3916 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3919 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3920 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3922 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
3923 mlxsw_sp_port
->local_port
);
3924 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3927 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3928 bool lag_tx_enabled
)
3931 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
3932 mlxsw_sp_port
->lag_id
);
3934 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
3935 mlxsw_sp_port
->lag_id
);
3938 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
3939 struct netdev_lag_lower_state_info
*info
)
3941 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
3944 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3947 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3948 enum mlxsw_reg_spms_state spms_state
;
3953 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
3954 MLXSW_REG_SPMS_STATE_DISCARDING
;
3956 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
3959 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
3961 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
3962 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
3964 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
3969 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
3973 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
3976 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
3978 goto err_port_stp_set
;
3979 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
3982 goto err_port_vlan_set
;
3986 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
3988 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
3992 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3994 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
3996 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
3997 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
4000 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*lower_dev
,
4001 struct net_device
*dev
,
4002 unsigned long event
, void *ptr
)
4004 struct netdev_notifier_changeupper_info
*info
;
4005 struct mlxsw_sp_port
*mlxsw_sp_port
;
4006 struct net_device
*upper_dev
;
4007 struct mlxsw_sp
*mlxsw_sp
;
4010 mlxsw_sp_port
= netdev_priv(dev
);
4011 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4015 case NETDEV_PRECHANGEUPPER
:
4016 upper_dev
= info
->upper_dev
;
4017 if (!is_vlan_dev(upper_dev
) &&
4018 !netif_is_lag_master(upper_dev
) &&
4019 !netif_is_bridge_master(upper_dev
) &&
4020 !netif_is_ovs_master(upper_dev
))
4024 if (netif_is_lag_master(upper_dev
) &&
4025 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
4028 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
))
4030 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
4031 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
)))
4033 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
))
4035 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
))
4038 case NETDEV_CHANGEUPPER
:
4039 upper_dev
= info
->upper_dev
;
4040 if (netif_is_bridge_master(upper_dev
)) {
4042 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4046 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4049 } else if (netif_is_lag_master(upper_dev
)) {
4051 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4054 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4056 } else if (netif_is_ovs_master(upper_dev
)) {
4058 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
4060 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
4068 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4069 unsigned long event
, void *ptr
)
4071 struct netdev_notifier_changelowerstate_info
*info
;
4072 struct mlxsw_sp_port
*mlxsw_sp_port
;
4075 mlxsw_sp_port
= netdev_priv(dev
);
4079 case NETDEV_CHANGELOWERSTATE
:
4080 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4081 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4082 info
->lower_state_info
);
4084 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4092 static int mlxsw_sp_netdevice_port_event(struct net_device
*lower_dev
,
4093 struct net_device
*port_dev
,
4094 unsigned long event
, void *ptr
)
4097 case NETDEV_PRECHANGEUPPER
:
4098 case NETDEV_CHANGEUPPER
:
4099 return mlxsw_sp_netdevice_port_upper_event(lower_dev
, port_dev
,
4101 case NETDEV_CHANGELOWERSTATE
:
4102 return mlxsw_sp_netdevice_port_lower_event(port_dev
, event
,
4109 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4110 unsigned long event
, void *ptr
)
4112 struct net_device
*dev
;
4113 struct list_head
*iter
;
4116 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4117 if (mlxsw_sp_port_dev_check(dev
)) {
4118 ret
= mlxsw_sp_netdevice_port_event(lag_dev
, dev
, event
,
4128 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device
*vlan_dev
,
4129 struct net_device
*dev
,
4130 unsigned long event
, void *ptr
,
4133 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4134 struct netdev_notifier_changeupper_info
*info
= ptr
;
4135 struct net_device
*upper_dev
;
4139 case NETDEV_PRECHANGEUPPER
:
4140 upper_dev
= info
->upper_dev
;
4141 if (!netif_is_bridge_master(upper_dev
))
4144 case NETDEV_CHANGEUPPER
:
4145 upper_dev
= info
->upper_dev
;
4146 if (netif_is_bridge_master(upper_dev
)) {
4148 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4152 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
4165 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device
*vlan_dev
,
4166 struct net_device
*lag_dev
,
4167 unsigned long event
,
4170 struct net_device
*dev
;
4171 struct list_head
*iter
;
4174 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4175 if (mlxsw_sp_port_dev_check(dev
)) {
4176 ret
= mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, dev
,
4187 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4188 unsigned long event
, void *ptr
)
4190 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4191 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4193 if (mlxsw_sp_port_dev_check(real_dev
))
4194 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev
, real_dev
,
4196 else if (netif_is_lag_master(real_dev
))
4197 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev
,
4204 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
4206 struct netdev_notifier_changeupper_info
*info
= ptr
;
4208 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
4210 return netif_is_l3_master(info
->upper_dev
);
4213 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4214 unsigned long event
, void *ptr
)
4216 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4219 if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
4220 err
= mlxsw_sp_netdevice_router_port_event(dev
);
4221 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
4222 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
4223 else if (mlxsw_sp_port_dev_check(dev
))
4224 err
= mlxsw_sp_netdevice_port_event(dev
, dev
, event
, ptr
);
4225 else if (netif_is_lag_master(dev
))
4226 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4227 else if (is_vlan_dev(dev
))
4228 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4230 return notifier_from_errno(err
);
4233 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
4234 .notifier_call
= mlxsw_sp_netdevice_event
,
4237 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
4238 .notifier_call
= mlxsw_sp_inetaddr_event
,
4239 .priority
= 10, /* Must be called before FIB notifier block */
4242 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly
= {
4243 .notifier_call
= mlxsw_sp_router_netevent_event
,
4246 static const struct pci_device_id mlxsw_sp_pci_id_table
[] = {
4247 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
4251 static struct pci_driver mlxsw_sp_pci_driver
= {
4252 .name
= mlxsw_sp_driver_name
,
4253 .id_table
= mlxsw_sp_pci_id_table
,
4256 static int __init
mlxsw_sp_module_init(void)
4260 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4261 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4262 register_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4264 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
4266 goto err_core_driver_register
;
4268 err
= mlxsw_pci_driver_register(&mlxsw_sp_pci_driver
);
4270 goto err_pci_driver_register
;
4274 err_pci_driver_register
:
4275 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4276 err_core_driver_register
:
4277 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4278 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4279 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4283 static void __exit
mlxsw_sp_module_exit(void)
4285 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver
);
4286 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4287 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4288 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4289 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4292 module_init(mlxsw_sp_module_init
);
4293 module_exit(mlxsw_sp_module_exit
);
4295 MODULE_LICENSE("Dual BSD/GPL");
4296 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4297 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4298 MODULE_DEVICE_TABLE(pci
, mlxsw_sp_pci_id_table
);
4299 MODULE_FIRMWARE(MLXSW_SP_FW_FILENAME
);