2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
72 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
73 static const char mlxsw_sp_driver_version
[] = "1.0";
79 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
82 * Packet control type.
83 * 0 - Ethernet control (e.g. EMADs, LACP)
86 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
89 * Packet protocol type. Must be set to 1 (Ethernet).
91 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
93 /* tx_hdr_rx_is_router
94 * Packet is sent from the router. Valid for data packets only.
96 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
99 * Indicates if the 'fid' field is valid and should be used for
100 * forwarding lookup. Valid for data packets only.
102 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
105 * Switch partition ID. Must be set to 0.
107 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
109 /* tx_hdr_control_tclass
110 * Indicates if the packet should use the control TClass and not one
111 * of the data TClasses.
113 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
116 * Egress TClass to be used on the egress device on the egress port.
118 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
121 * Destination local port for unicast packets.
122 * Destination multicast ID for multicast packets.
124 * Control packets are directed to a specific egress port, while data
125 * packets are transmitted through the CPU port (0) into the switch partition,
126 * where forwarding rules are applied.
128 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
131 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
132 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
133 * Valid for data packets only.
135 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
139 * 6 - Control packets
141 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
143 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
144 unsigned int counter_index
, u64
*packets
,
147 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
150 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
151 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES
);
152 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
155 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
156 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
160 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
161 unsigned int counter_index
)
163 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
165 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
166 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES
);
167 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
170 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
171 unsigned int *p_counter_index
)
175 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
179 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
181 goto err_counter_clear
;
185 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
190 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
191 unsigned int counter_index
)
193 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
197 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
198 const struct mlxsw_tx_info
*tx_info
)
200 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
202 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
204 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
205 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
206 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
207 mlxsw_tx_hdr_swid_set(txhdr
, 0);
208 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
209 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
210 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
213 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
216 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
217 enum mlxsw_reg_spms_state spms_state
;
222 case BR_STATE_FORWARDING
:
223 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
225 case BR_STATE_LEARNING
:
226 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
228 case BR_STATE_LISTENING
: /* fall-through */
229 case BR_STATE_DISABLED
: /* fall-through */
230 case BR_STATE_BLOCKING
:
231 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
237 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
240 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
241 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
243 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
248 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
250 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
253 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
256 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
260 static int mlxsw_sp_span_init(struct mlxsw_sp
*mlxsw_sp
)
264 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_SPAN
))
267 mlxsw_sp
->span
.entries_count
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
269 mlxsw_sp
->span
.entries
= kcalloc(mlxsw_sp
->span
.entries_count
,
270 sizeof(struct mlxsw_sp_span_entry
),
272 if (!mlxsw_sp
->span
.entries
)
275 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++)
276 INIT_LIST_HEAD(&mlxsw_sp
->span
.entries
[i
].bound_ports_list
);
281 static void mlxsw_sp_span_fini(struct mlxsw_sp
*mlxsw_sp
)
285 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
286 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
288 WARN_ON_ONCE(!list_empty(&curr
->bound_ports_list
));
290 kfree(mlxsw_sp
->span
.entries
);
293 static struct mlxsw_sp_span_entry
*
294 mlxsw_sp_span_entry_create(struct mlxsw_sp_port
*port
)
296 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
297 struct mlxsw_sp_span_entry
*span_entry
;
298 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
299 u8 local_port
= port
->local_port
;
304 /* find a free entry to use */
306 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
307 if (!mlxsw_sp
->span
.entries
[i
].used
) {
309 span_entry
= &mlxsw_sp
->span
.entries
[i
];
316 /* create a new port analayzer entry for local_port */
317 mlxsw_reg_mpat_pack(mpat_pl
, index
, local_port
, true);
318 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
322 span_entry
->used
= true;
323 span_entry
->id
= index
;
324 span_entry
->ref_count
= 1;
325 span_entry
->local_port
= local_port
;
329 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
330 struct mlxsw_sp_span_entry
*span_entry
)
332 u8 local_port
= span_entry
->local_port
;
333 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
334 int pa_id
= span_entry
->id
;
336 mlxsw_reg_mpat_pack(mpat_pl
, pa_id
, local_port
, false);
337 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
338 span_entry
->used
= false;
341 static struct mlxsw_sp_span_entry
*
342 mlxsw_sp_span_entry_find(struct mlxsw_sp_port
*port
)
344 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
347 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
348 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
350 if (curr
->used
&& curr
->local_port
== port
->local_port
)
356 static struct mlxsw_sp_span_entry
357 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port
*port
)
359 struct mlxsw_sp_span_entry
*span_entry
;
361 span_entry
= mlxsw_sp_span_entry_find(port
);
363 /* Already exists, just take a reference */
364 span_entry
->ref_count
++;
368 return mlxsw_sp_span_entry_create(port
);
371 static int mlxsw_sp_span_entry_put(struct mlxsw_sp
*mlxsw_sp
,
372 struct mlxsw_sp_span_entry
*span_entry
)
374 WARN_ON(!span_entry
->ref_count
);
375 if (--span_entry
->ref_count
== 0)
376 mlxsw_sp_span_entry_destroy(mlxsw_sp
, span_entry
);
380 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port
*port
)
382 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
383 struct mlxsw_sp_span_inspected_port
*p
;
386 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
387 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
389 list_for_each_entry(p
, &curr
->bound_ports_list
, list
)
390 if (p
->local_port
== port
->local_port
&&
391 p
->type
== MLXSW_SP_SPAN_EGRESS
)
398 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp
*mlxsw_sp
,
401 return mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
* 5 / 2) + 1;
404 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port
*port
, u16 mtu
)
406 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
407 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
410 /* If port is egress mirrored, the shared buffer size should be
411 * updated according to the mtu value
413 if (mlxsw_sp_span_is_egress_mirror(port
)) {
414 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
, mtu
);
416 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
417 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
419 netdev_err(port
->dev
, "Could not update shared buffer for mirroring\n");
427 static struct mlxsw_sp_span_inspected_port
*
428 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port
*port
,
429 struct mlxsw_sp_span_entry
*span_entry
)
431 struct mlxsw_sp_span_inspected_port
*p
;
433 list_for_each_entry(p
, &span_entry
->bound_ports_list
, list
)
434 if (port
->local_port
== p
->local_port
)
440 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port
*port
,
441 struct mlxsw_sp_span_entry
*span_entry
,
442 enum mlxsw_sp_span_type type
)
444 struct mlxsw_sp_span_inspected_port
*inspected_port
;
445 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
446 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
447 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
448 int pa_id
= span_entry
->id
;
451 /* if it is an egress SPAN, bind a shared buffer to it */
452 if (type
== MLXSW_SP_SPAN_EGRESS
) {
453 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
,
456 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
457 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
459 netdev_err(port
->dev
, "Could not create shared buffer for mirroring\n");
464 /* bind the port to the SPAN entry */
465 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
466 (enum mlxsw_reg_mpar_i_e
) type
, true, pa_id
);
467 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
469 goto err_mpar_reg_write
;
471 inspected_port
= kzalloc(sizeof(*inspected_port
), GFP_KERNEL
);
472 if (!inspected_port
) {
474 goto err_inspected_port_alloc
;
476 inspected_port
->local_port
= port
->local_port
;
477 inspected_port
->type
= type
;
478 list_add_tail(&inspected_port
->list
, &span_entry
->bound_ports_list
);
483 err_inspected_port_alloc
:
484 if (type
== MLXSW_SP_SPAN_EGRESS
) {
485 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
486 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
492 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port
*port
,
493 struct mlxsw_sp_span_entry
*span_entry
,
494 enum mlxsw_sp_span_type type
)
496 struct mlxsw_sp_span_inspected_port
*inspected_port
;
497 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
498 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
499 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
500 int pa_id
= span_entry
->id
;
502 inspected_port
= mlxsw_sp_span_entry_bound_port_find(port
, span_entry
);
506 /* remove the inspected port */
507 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
508 (enum mlxsw_reg_mpar_i_e
) type
, false, pa_id
);
509 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
511 /* remove the SBIB buffer if it was egress SPAN */
512 if (type
== MLXSW_SP_SPAN_EGRESS
) {
513 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
514 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
517 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
519 list_del(&inspected_port
->list
);
520 kfree(inspected_port
);
523 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port
*from
,
524 struct mlxsw_sp_port
*to
,
525 enum mlxsw_sp_span_type type
)
527 struct mlxsw_sp
*mlxsw_sp
= from
->mlxsw_sp
;
528 struct mlxsw_sp_span_entry
*span_entry
;
531 span_entry
= mlxsw_sp_span_entry_get(to
);
535 netdev_dbg(from
->dev
, "Adding inspected port to SPAN entry %d\n",
538 err
= mlxsw_sp_span_inspected_port_bind(from
, span_entry
, type
);
545 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
549 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port
*from
,
550 struct mlxsw_sp_port
*to
,
551 enum mlxsw_sp_span_type type
)
553 struct mlxsw_sp_span_entry
*span_entry
;
555 span_entry
= mlxsw_sp_span_entry_find(to
);
557 netdev_err(from
->dev
, "no span entry found\n");
561 netdev_dbg(from
->dev
, "removing inspected port from SPAN entry %d\n",
563 mlxsw_sp_span_inspected_port_unbind(from
, span_entry
, type
);
566 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
567 bool enable
, u32 rate
)
569 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
570 char mpsc_pl
[MLXSW_REG_MPSC_LEN
];
572 mlxsw_reg_mpsc_pack(mpsc_pl
, mlxsw_sp_port
->local_port
, enable
, rate
);
573 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpsc
), mpsc_pl
);
576 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
579 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
580 char paos_pl
[MLXSW_REG_PAOS_LEN
];
582 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
583 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
584 MLXSW_PORT_ADMIN_STATUS_DOWN
);
585 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
588 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
591 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
592 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
594 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
595 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
596 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
599 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
601 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
602 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
604 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
605 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
606 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
609 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
611 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
612 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
616 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
617 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
618 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
621 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
626 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
627 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
630 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
633 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
635 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
636 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
639 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
641 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
643 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
647 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
650 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
651 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
653 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
654 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
657 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
658 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
661 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
662 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
664 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
666 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
669 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
,
672 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
676 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
679 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
681 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
687 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
689 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
690 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
692 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
693 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
696 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
697 u8 local_port
, u8
*p_module
,
698 u8
*p_width
, u8
*p_lane
)
700 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
703 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
704 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
707 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
708 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
709 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
713 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
714 u8 module
, u8 width
, u8 lane
)
716 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
719 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
720 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
721 for (i
= 0; i
< width
; i
++) {
722 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
723 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
726 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
729 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
731 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
733 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
734 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
735 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
738 static int mlxsw_sp_port_open(struct net_device
*dev
)
740 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
743 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
746 netif_start_queue(dev
);
750 static int mlxsw_sp_port_stop(struct net_device
*dev
)
752 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
754 netif_stop_queue(dev
);
755 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
758 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
759 struct net_device
*dev
)
761 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
762 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
763 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
764 const struct mlxsw_tx_info tx_info
= {
765 .local_port
= mlxsw_sp_port
->local_port
,
771 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
772 return NETDEV_TX_BUSY
;
774 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
775 struct sk_buff
*skb_orig
= skb
;
777 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
779 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
780 dev_kfree_skb_any(skb_orig
);
783 dev_consume_skb_any(skb_orig
);
786 if (eth_skb_pad(skb
)) {
787 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
791 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
792 /* TX header is consumed by HW on the way so we shouldn't count its
793 * bytes as being sent.
795 len
= skb
->len
- MLXSW_TXHDR_LEN
;
797 /* Due to a race we might fail here because of a full queue. In that
798 * unlikely case we simply drop the packet.
800 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
803 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
804 u64_stats_update_begin(&pcpu_stats
->syncp
);
805 pcpu_stats
->tx_packets
++;
806 pcpu_stats
->tx_bytes
+= len
;
807 u64_stats_update_end(&pcpu_stats
->syncp
);
809 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
810 dev_kfree_skb_any(skb
);
815 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
819 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
821 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
822 struct sockaddr
*addr
= p
;
825 if (!is_valid_ether_addr(addr
->sa_data
))
826 return -EADDRNOTAVAIL
;
828 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
831 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
835 static u16
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp
*mlxsw_sp
,
838 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
);
841 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
843 static u16
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
846 delay
= mlxsw_sp_bytes_cells(mlxsw_sp
, DIV_ROUND_UP(delay
,
848 return MLXSW_SP_CELL_FACTOR
* delay
+ mlxsw_sp_bytes_cells(mlxsw_sp
,
852 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
853 * Assumes 100m cable and maximum MTU.
855 #define MLXSW_SP_PAUSE_DELAY 58752
857 static u16
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
858 u16 delay
, bool pfc
, bool pause
)
861 return mlxsw_sp_pfc_delay_get(mlxsw_sp
, mtu
, delay
);
863 return mlxsw_sp_bytes_cells(mlxsw_sp
, MLXSW_SP_PAUSE_DELAY
);
868 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int index
, u16 size
, u16 thres
,
872 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, index
, size
);
874 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, index
, size
,
878 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
879 u8
*prio_tc
, bool pause_en
,
880 struct ieee_pfc
*my_pfc
)
882 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
883 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
884 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
885 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
888 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
889 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
893 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
894 bool configure
= false;
899 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
900 if (prio_tc
[j
] == i
) {
901 pfc
= pfc_en
& BIT(j
);
910 lossy
= !(pfc
|| pause_en
);
911 thres
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, mtu
);
912 delay
= mlxsw_sp_pg_buf_delay_get(mlxsw_sp
, mtu
, delay
, pfc
,
914 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, thres
+ delay
, thres
, lossy
);
917 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
920 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
921 int mtu
, bool pause_en
)
923 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
924 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
925 struct ieee_pfc
*my_pfc
;
928 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
929 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
931 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
935 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
937 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
938 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
941 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
944 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
946 goto err_span_port_mtu_update
;
947 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
949 goto err_port_mtu_set
;
954 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
955 err_span_port_mtu_update
:
956 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
961 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
962 struct rtnl_link_stats64
*stats
)
964 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
965 struct mlxsw_sp_port_pcpu_stats
*p
;
966 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
971 for_each_possible_cpu(i
) {
972 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
974 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
975 rx_packets
= p
->rx_packets
;
976 rx_bytes
= p
->rx_bytes
;
977 tx_packets
= p
->tx_packets
;
978 tx_bytes
= p
->tx_bytes
;
979 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
981 stats
->rx_packets
+= rx_packets
;
982 stats
->rx_bytes
+= rx_bytes
;
983 stats
->tx_packets
+= tx_packets
;
984 stats
->tx_bytes
+= tx_bytes
;
985 /* tx_dropped is u32, updated without syncp protection. */
986 tx_dropped
+= p
->tx_dropped
;
988 stats
->tx_dropped
= tx_dropped
;
992 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
995 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
1002 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
1006 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
1007 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
1013 static int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
1014 int prio
, char *ppcnt_pl
)
1016 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1017 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1019 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
1020 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1023 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
1024 struct rtnl_link_stats64
*stats
)
1026 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1029 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
1035 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
1037 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
1039 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
1041 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
1043 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
1045 stats
->rx_crc_errors
=
1046 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
1047 stats
->rx_frame_errors
=
1048 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
1050 stats
->rx_length_errors
= (
1051 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
1052 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
1053 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
1055 stats
->rx_errors
= (stats
->rx_crc_errors
+
1056 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
1062 static void update_stats_cache(struct work_struct
*work
)
1064 struct mlxsw_sp_port
*mlxsw_sp_port
=
1065 container_of(work
, struct mlxsw_sp_port
,
1066 hw_stats
.update_dw
.work
);
1068 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
1071 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
1072 mlxsw_sp_port
->hw_stats
.cache
);
1075 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
,
1076 MLXSW_HW_STATS_UPDATE_TIME
);
1079 /* Return the stats from a cache that is updated periodically,
1080 * as this function might get called in an atomic context.
1083 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
1084 struct rtnl_link_stats64
*stats
)
1086 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1088 memcpy(stats
, mlxsw_sp_port
->hw_stats
.cache
, sizeof(*stats
));
1091 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1092 u16 vid_begin
, u16 vid_end
,
1093 bool is_member
, bool untagged
)
1095 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1099 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
1103 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
1104 vid_end
, is_member
, untagged
);
1105 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
1110 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
1111 u16 vid_end
, bool is_member
, bool untagged
)
1116 for (vid
= vid_begin
; vid
<= vid_end
;
1117 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
1118 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
1121 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
1122 is_member
, untagged
);
1130 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
1132 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
1133 u16 vid
, last_visited_vid
;
1136 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1137 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
1140 last_visited_vid
= vid
;
1141 goto err_port_vid_to_fid_set
;
1145 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
1147 last_visited_vid
= VLAN_N_VID
;
1148 goto err_port_vid_to_fid_set
;
1153 err_port_vid_to_fid_set
:
1154 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
1155 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
1160 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
1162 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
1166 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
1170 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1171 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
1180 static struct mlxsw_sp_port
*
1181 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1183 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1185 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
1186 if (!mlxsw_sp_vport
)
1189 /* dev will be set correctly after the VLAN device is linked
1190 * with the real device. In case of bridge SELF invocation, dev
1191 * will remain as is.
1193 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
1194 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1195 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
1196 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
1197 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
1198 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
1199 mlxsw_sp_vport
->vport
.vid
= vid
;
1201 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
1203 return mlxsw_sp_vport
;
1206 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
1208 list_del(&mlxsw_sp_vport
->vport
.list
);
1209 kfree(mlxsw_sp_vport
);
1212 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1213 __be16 __always_unused proto
, u16 vid
)
1215 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1216 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1217 bool untagged
= vid
== 1;
1220 /* VLAN 0 is added to HW filter when device goes up, but it is
1221 * reserved in our case, so simply return.
1226 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
))
1229 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vid
);
1230 if (!mlxsw_sp_vport
)
1233 /* When adding the first VLAN interface on a bridged port we need to
1234 * transition all the active 802.1Q bridge VLANs to use explicit
1235 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1237 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
1238 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
1240 goto err_port_vp_mode_trans
;
1243 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, untagged
);
1245 goto err_port_add_vid
;
1250 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1251 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1252 err_port_vp_mode_trans
:
1253 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1257 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1258 __be16 __always_unused proto
, u16 vid
)
1260 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1261 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1262 struct mlxsw_sp_fid
*f
;
1264 /* VLAN 0 is removed from HW filter when device goes down, but
1265 * it is reserved in our case, so simply return.
1270 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
1271 if (WARN_ON(!mlxsw_sp_vport
))
1274 mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
1276 /* Drop FID reference. If this was the last reference the
1277 * resources will be freed.
1279 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
1280 if (f
&& !WARN_ON(!f
->leave
))
1281 f
->leave(mlxsw_sp_vport
);
1283 /* When removing the last VLAN interface on a bridged port we need to
1284 * transition all active 802.1Q bridge VLANs to use VID to FID
1285 * mappings and set port's mode to VLAN mode.
1287 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1288 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1290 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1295 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
1298 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1299 u8 module
= mlxsw_sp_port
->mapping
.module
;
1300 u8 width
= mlxsw_sp_port
->mapping
.width
;
1301 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
1304 if (!mlxsw_sp_port
->split
)
1305 err
= snprintf(name
, len
, "p%d", module
+ 1);
1307 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
1316 static struct mlxsw_sp_port_mall_tc_entry
*
1317 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port
*port
,
1318 unsigned long cookie
) {
1319 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1321 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1322 if (mall_tc_entry
->cookie
== cookie
)
1323 return mall_tc_entry
;
1329 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1330 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
,
1331 const struct tc_action
*a
,
1334 struct net
*net
= dev_net(mlxsw_sp_port
->dev
);
1335 enum mlxsw_sp_span_type span_type
;
1336 struct mlxsw_sp_port
*to_port
;
1337 struct net_device
*to_dev
;
1340 ifindex
= tcf_mirred_ifindex(a
);
1341 to_dev
= __dev_get_by_index(net
, ifindex
);
1343 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1347 if (!mlxsw_sp_port_dev_check(to_dev
)) {
1348 netdev_err(mlxsw_sp_port
->dev
, "Cannot mirror to a non-spectrum port");
1351 to_port
= netdev_priv(to_dev
);
1353 mirror
->to_local_port
= to_port
->local_port
;
1354 mirror
->ingress
= ingress
;
1355 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1356 return mlxsw_sp_span_mirror_add(mlxsw_sp_port
, to_port
, span_type
);
1360 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1361 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
)
1363 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1364 enum mlxsw_sp_span_type span_type
;
1365 struct mlxsw_sp_port
*to_port
;
1367 to_port
= mlxsw_sp
->ports
[mirror
->to_local_port
];
1368 span_type
= mirror
->ingress
?
1369 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1370 mlxsw_sp_span_mirror_remove(mlxsw_sp_port
, to_port
, span_type
);
1374 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
,
1375 struct tc_cls_matchall_offload
*cls
,
1376 const struct tc_action
*a
,
1381 if (!mlxsw_sp_port
->sample
)
1383 if (rtnl_dereference(mlxsw_sp_port
->sample
->psample_group
)) {
1384 netdev_err(mlxsw_sp_port
->dev
, "sample already active\n");
1387 if (tcf_sample_rate(a
) > MLXSW_REG_MPSC_RATE_MAX
) {
1388 netdev_err(mlxsw_sp_port
->dev
, "sample rate not supported\n");
1392 rcu_assign_pointer(mlxsw_sp_port
->sample
->psample_group
,
1393 tcf_sample_psample_group(a
));
1394 mlxsw_sp_port
->sample
->truncate
= tcf_sample_truncate(a
);
1395 mlxsw_sp_port
->sample
->trunc_size
= tcf_sample_trunc_size(a
);
1396 mlxsw_sp_port
->sample
->rate
= tcf_sample_rate(a
);
1398 err
= mlxsw_sp_port_sample_set(mlxsw_sp_port
, true, tcf_sample_rate(a
));
1400 goto err_port_sample_set
;
1403 err_port_sample_set
:
1404 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1409 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
)
1411 if (!mlxsw_sp_port
->sample
)
1414 mlxsw_sp_port_sample_set(mlxsw_sp_port
, false, 1);
1415 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1418 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1420 struct tc_cls_matchall_offload
*cls
,
1423 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1424 const struct tc_action
*a
;
1428 if (!tc_single_action(cls
->exts
)) {
1429 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1433 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1436 mall_tc_entry
->cookie
= cls
->cookie
;
1438 tcf_exts_to_list(cls
->exts
, &actions
);
1439 a
= list_first_entry(&actions
, struct tc_action
, list
);
1441 if (is_tcf_mirred_egress_mirror(a
) && protocol
== htons(ETH_P_ALL
)) {
1442 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
;
1444 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1445 mirror
= &mall_tc_entry
->mirror
;
1446 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
,
1447 mirror
, a
, ingress
);
1448 } else if (is_tcf_sample(a
) && protocol
== htons(ETH_P_ALL
)) {
1449 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_SAMPLE
;
1450 err
= mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port
, cls
,
1457 goto err_add_action
;
1459 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1463 kfree(mall_tc_entry
);
1467 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1468 struct tc_cls_matchall_offload
*cls
)
1470 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1472 mall_tc_entry
= mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port
,
1474 if (!mall_tc_entry
) {
1475 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1478 list_del(&mall_tc_entry
->list
);
1480 switch (mall_tc_entry
->type
) {
1481 case MLXSW_SP_PORT_MALL_MIRROR
:
1482 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port
,
1483 &mall_tc_entry
->mirror
);
1485 case MLXSW_SP_PORT_MALL_SAMPLE
:
1486 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port
);
1492 kfree(mall_tc_entry
);
1495 static int mlxsw_sp_setup_tc(struct net_device
*dev
, u32 handle
,
1496 __be16 proto
, struct tc_to_netdev
*tc
)
1498 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1499 bool ingress
= TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
);
1502 case TC_SETUP_MATCHALL
:
1503 switch (tc
->cls_mall
->command
) {
1504 case TC_CLSMATCHALL_REPLACE
:
1505 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
,
1509 case TC_CLSMATCHALL_DESTROY
:
1510 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
,
1516 case TC_SETUP_CLSFLOWER
:
1517 switch (tc
->cls_flower
->command
) {
1518 case TC_CLSFLOWER_REPLACE
:
1519 return mlxsw_sp_flower_replace(mlxsw_sp_port
, ingress
,
1520 proto
, tc
->cls_flower
);
1521 case TC_CLSFLOWER_DESTROY
:
1522 mlxsw_sp_flower_destroy(mlxsw_sp_port
, ingress
,
1525 case TC_CLSFLOWER_STATS
:
1526 return mlxsw_sp_flower_stats(mlxsw_sp_port
, ingress
,
1536 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1537 .ndo_open
= mlxsw_sp_port_open
,
1538 .ndo_stop
= mlxsw_sp_port_stop
,
1539 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1540 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1541 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1542 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1543 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1544 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1545 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1546 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1547 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1548 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1549 .ndo_fdb_add
= switchdev_port_fdb_add
,
1550 .ndo_fdb_del
= switchdev_port_fdb_del
,
1551 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
1552 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
1553 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
1554 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
1555 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
1558 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
1559 struct ethtool_drvinfo
*drvinfo
)
1561 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1562 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1564 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
1565 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
1566 sizeof(drvinfo
->version
));
1567 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1569 mlxsw_sp
->bus_info
->fw_rev
.major
,
1570 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1571 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1572 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1573 sizeof(drvinfo
->bus_info
));
1576 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1577 struct ethtool_pauseparam
*pause
)
1579 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1581 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1582 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1585 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1586 struct ethtool_pauseparam
*pause
)
1588 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1590 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1591 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1592 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1594 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1598 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1599 struct ethtool_pauseparam
*pause
)
1601 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1602 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1605 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1606 netdev_err(dev
, "PFC already enabled on port\n");
1610 if (pause
->autoneg
) {
1611 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1615 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1617 netdev_err(dev
, "Failed to configure port's headroom\n");
1621 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1623 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1624 goto err_port_pause_configure
;
1627 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1628 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1632 err_port_pause_configure
:
1633 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1634 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1638 struct mlxsw_sp_port_hw_stats
{
1639 char str
[ETH_GSTRING_LEN
];
1640 u64 (*getter
)(const char *payload
);
1644 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1646 .str
= "a_frames_transmitted_ok",
1647 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1650 .str
= "a_frames_received_ok",
1651 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1654 .str
= "a_frame_check_sequence_errors",
1655 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1658 .str
= "a_alignment_errors",
1659 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1662 .str
= "a_octets_transmitted_ok",
1663 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1666 .str
= "a_octets_received_ok",
1667 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1670 .str
= "a_multicast_frames_xmitted_ok",
1671 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1674 .str
= "a_broadcast_frames_xmitted_ok",
1675 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1678 .str
= "a_multicast_frames_received_ok",
1679 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1682 .str
= "a_broadcast_frames_received_ok",
1683 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1686 .str
= "a_in_range_length_errors",
1687 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1690 .str
= "a_out_of_range_length_field",
1691 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1694 .str
= "a_frame_too_long_errors",
1695 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1698 .str
= "a_symbol_error_during_carrier",
1699 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1702 .str
= "a_mac_control_frames_transmitted",
1703 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1706 .str
= "a_mac_control_frames_received",
1707 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1710 .str
= "a_unsupported_opcodes_received",
1711 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1714 .str
= "a_pause_mac_ctrl_frames_received",
1715 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1718 .str
= "a_pause_mac_ctrl_frames_xmitted",
1719 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1723 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1725 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
1727 .str
= "rx_octets_prio",
1728 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
1731 .str
= "rx_frames_prio",
1732 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
1735 .str
= "tx_octets_prio",
1736 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
1739 .str
= "tx_frames_prio",
1740 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
1743 .str
= "rx_pause_prio",
1744 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
1747 .str
= "rx_pause_duration_prio",
1748 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
1751 .str
= "tx_pause_prio",
1752 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
1755 .str
= "tx_pause_duration_prio",
1756 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
1760 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1762 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
1764 .str
= "tc_transmit_queue_tc",
1765 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_get
,
1766 .cells_bytes
= true,
1769 .str
= "tc_no_buffer_discard_uc_tc",
1770 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
1774 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1776 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1777 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1778 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1779 IEEE_8021QAZ_MAX_TCS)
1781 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
1785 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
1786 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1787 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
1788 *p
+= ETH_GSTRING_LEN
;
1792 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
1796 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
1797 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1798 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
1799 *p
+= ETH_GSTRING_LEN
;
1803 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1804 u32 stringset
, u8
*data
)
1809 switch (stringset
) {
1811 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1812 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1814 p
+= ETH_GSTRING_LEN
;
1817 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1818 mlxsw_sp_port_get_prio_strings(&p
, i
);
1820 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1821 mlxsw_sp_port_get_tc_strings(&p
, i
);
1827 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1828 enum ethtool_phys_id_state state
)
1830 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1831 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1832 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1836 case ETHTOOL_ID_ACTIVE
:
1839 case ETHTOOL_ID_INACTIVE
:
1846 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1847 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1851 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
1852 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
1855 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
1856 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
1857 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
1859 case MLXSW_REG_PPCNT_PRIO_CNT
:
1860 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
1861 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1863 case MLXSW_REG_PPCNT_TC_CNT
:
1864 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
1865 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1874 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
1875 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
1876 u64
*data
, int data_index
)
1878 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1879 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1880 struct mlxsw_sp_port_hw_stats
*hw_stats
;
1881 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1885 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
1888 mlxsw_sp_port_get_stats_raw(dev
, grp
, prio
, ppcnt_pl
);
1889 for (i
= 0; i
< len
; i
++) {
1890 data
[data_index
+ i
] = hw_stats
[i
].getter(ppcnt_pl
);
1891 if (!hw_stats
[i
].cells_bytes
)
1893 data
[data_index
+ i
] = mlxsw_sp_cells_bytes(mlxsw_sp
,
1894 data
[data_index
+ i
]);
1898 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1899 struct ethtool_stats
*stats
, u64
*data
)
1901 int i
, data_index
= 0;
1903 /* IEEE 802.3 Counters */
1904 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
1906 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
1908 /* Per-Priority Counters */
1909 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1910 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
1912 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1915 /* Per-TC Counters */
1916 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1917 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
1919 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1923 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1927 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
1933 struct mlxsw_sp_port_link_mode
{
1934 enum ethtool_link_mode_bit_indices mask_ethtool
;
1939 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1941 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1942 .mask_ethtool
= ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
1946 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1947 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1948 .mask_ethtool
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
1949 .speed
= SPEED_1000
,
1952 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1953 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1954 .speed
= SPEED_10000
,
1957 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1958 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1959 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
1960 .speed
= SPEED_10000
,
1963 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1964 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1965 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1966 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1967 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
1968 .speed
= SPEED_10000
,
1971 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1972 .mask_ethtool
= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
1973 .speed
= SPEED_20000
,
1976 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1977 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
1978 .speed
= SPEED_40000
,
1981 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1982 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
1983 .speed
= SPEED_40000
,
1986 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1987 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
1988 .speed
= SPEED_40000
,
1991 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1992 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
1993 .speed
= SPEED_40000
,
1996 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
,
1997 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
1998 .speed
= SPEED_25000
,
2001 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
,
2002 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
2003 .speed
= SPEED_25000
,
2006 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2007 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2008 .speed
= SPEED_25000
,
2011 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
2012 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2013 .speed
= SPEED_25000
,
2016 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
,
2017 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
2018 .speed
= SPEED_50000
,
2021 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
2022 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
2023 .speed
= SPEED_50000
,
2026 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2
,
2027 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2028 .speed
= SPEED_50000
,
2031 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2032 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
,
2033 .speed
= SPEED_56000
,
2036 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2037 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT
,
2038 .speed
= SPEED_56000
,
2041 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2042 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
,
2043 .speed
= SPEED_56000
,
2046 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2047 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT
,
2048 .speed
= SPEED_56000
,
2051 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
,
2052 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2053 .speed
= SPEED_100000
,
2056 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
,
2057 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2058 .speed
= SPEED_100000
,
2061 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
,
2062 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2063 .speed
= SPEED_100000
,
2066 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
2067 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2068 .speed
= SPEED_100000
,
2072 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2075 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
,
2076 struct ethtool_link_ksettings
*cmd
)
2078 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2079 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2080 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2081 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2082 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2083 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2084 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
2086 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2087 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2088 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2089 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
2090 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
2091 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
2094 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto
, unsigned long *mode
)
2098 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2099 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
2100 __set_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2105 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
2106 struct ethtool_link_ksettings
*cmd
)
2108 u32 speed
= SPEED_UNKNOWN
;
2109 u8 duplex
= DUPLEX_UNKNOWN
;
2115 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2116 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
2117 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
2118 duplex
= DUPLEX_FULL
;
2123 cmd
->base
.speed
= speed
;
2124 cmd
->base
.duplex
= duplex
;
2127 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
2129 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2130 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2131 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2132 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2135 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2136 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2137 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
2140 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2141 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2142 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2143 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
2150 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings
*cmd
)
2155 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2156 if (test_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2157 cmd
->link_modes
.advertising
))
2158 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2163 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
2168 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2169 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
2170 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2175 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
2180 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2181 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
2182 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2187 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap
,
2188 struct ethtool_link_ksettings
*cmd
)
2190 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Asym_Pause
);
2191 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
2192 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
2194 mlxsw_sp_from_ptys_supported_port(eth_proto_cap
, cmd
);
2195 mlxsw_sp_from_ptys_link(eth_proto_cap
, cmd
->link_modes
.supported
);
2198 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin
, bool autoneg
,
2199 struct ethtool_link_ksettings
*cmd
)
2204 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
2205 mlxsw_sp_from_ptys_link(eth_proto_admin
, cmd
->link_modes
.advertising
);
2209 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp
, u8 autoneg_status
,
2210 struct ethtool_link_ksettings
*cmd
)
2212 if (autoneg_status
!= MLXSW_REG_PTYS_AN_STATUS_OK
|| !eth_proto_lp
)
2215 ethtool_link_ksettings_add_link_mode(cmd
, lp_advertising
, Autoneg
);
2216 mlxsw_sp_from_ptys_link(eth_proto_lp
, cmd
->link_modes
.lp_advertising
);
2219 static int mlxsw_sp_port_get_link_ksettings(struct net_device
*dev
,
2220 struct ethtool_link_ksettings
*cmd
)
2222 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
, eth_proto_lp
;
2223 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2224 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2225 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2230 autoneg
= mlxsw_sp_port
->link
.autoneg
;
2231 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2232 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2235 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
,
2238 mlxsw_sp_port_get_link_supported(eth_proto_cap
, cmd
);
2240 mlxsw_sp_port_get_link_advertise(eth_proto_admin
, autoneg
, cmd
);
2242 eth_proto_lp
= mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl
);
2243 autoneg_status
= mlxsw_reg_ptys_an_status_get(ptys_pl
);
2244 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp
, autoneg_status
, cmd
);
2246 cmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2247 cmd
->base
.port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
2248 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
), eth_proto_oper
,
2255 mlxsw_sp_port_set_link_ksettings(struct net_device
*dev
,
2256 const struct ethtool_link_ksettings
*cmd
)
2258 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2259 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2260 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2261 u32 eth_proto_cap
, eth_proto_new
;
2265 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2266 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2269 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, NULL
, NULL
);
2271 autoneg
= cmd
->base
.autoneg
== AUTONEG_ENABLE
;
2272 eth_proto_new
= autoneg
?
2273 mlxsw_sp_to_ptys_advert_link(cmd
) :
2274 mlxsw_sp_to_ptys_speed(cmd
->base
.speed
);
2276 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
2277 if (!eth_proto_new
) {
2278 netdev_err(dev
, "No supported speed requested\n");
2282 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2284 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2288 if (!netif_running(dev
))
2291 mlxsw_sp_port
->link
.autoneg
= autoneg
;
2293 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2294 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
2299 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
2300 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
2301 .get_link
= ethtool_op_get_link
,
2302 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
2303 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
2304 .get_strings
= mlxsw_sp_port_get_strings
,
2305 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
2306 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
2307 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
2308 .get_link_ksettings
= mlxsw_sp_port_get_link_ksettings
,
2309 .set_link_ksettings
= mlxsw_sp_port_set_link_ksettings
,
2313 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
2315 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2316 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
2317 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2318 u32 eth_proto_admin
;
2320 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
2321 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2323 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2326 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2327 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
2328 bool dwrr
, u8 dwrr_weight
)
2330 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2331 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2333 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2335 mlxsw_reg_qeec_de_set(qeec_pl
, true);
2336 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
2337 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
2338 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2341 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2342 enum mlxsw_reg_qeec_hr hr
, u8 index
,
2343 u8 next_index
, u32 maxrate
)
2345 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2346 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2348 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2350 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
2351 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
2352 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2355 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2356 u8 switch_prio
, u8 tclass
)
2358 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2359 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
2361 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
2363 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
2366 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
2370 /* Setup the elements hierarcy, so that each TC is linked to
2371 * one subgroup, which are all member in the same group.
2373 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2374 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
2378 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2379 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2380 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
2385 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2386 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2387 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
2393 /* Make sure the max shaper is disabled in all hierarcies that
2396 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2397 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
2398 MLXSW_REG_QEEC_MAS_DIS
);
2401 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2402 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2403 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
2405 MLXSW_REG_QEEC_MAS_DIS
);
2409 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2410 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2411 MLXSW_REG_QEEC_HIERARCY_TC
,
2413 MLXSW_REG_QEEC_MAS_DIS
);
2418 /* Map all priorities to traffic class 0. */
2419 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2420 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
2428 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
)
2430 mlxsw_sp_port
->pvid
= 1;
2432 return mlxsw_sp_port_add_vid(mlxsw_sp_port
->dev
, 0, 1);
2435 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_port
)
2437 return mlxsw_sp_port_kill_vid(mlxsw_sp_port
->dev
, 0, 1);
2440 static int __mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2441 bool split
, u8 module
, u8 width
, u8 lane
)
2443 struct mlxsw_sp_port
*mlxsw_sp_port
;
2444 struct net_device
*dev
;
2448 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
2451 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
2452 mlxsw_sp_port
= netdev_priv(dev
);
2453 mlxsw_sp_port
->dev
= dev
;
2454 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
2455 mlxsw_sp_port
->local_port
= local_port
;
2456 mlxsw_sp_port
->split
= split
;
2457 mlxsw_sp_port
->mapping
.module
= module
;
2458 mlxsw_sp_port
->mapping
.width
= width
;
2459 mlxsw_sp_port
->mapping
.lane
= lane
;
2460 mlxsw_sp_port
->link
.autoneg
= 1;
2461 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
2462 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2463 if (!mlxsw_sp_port
->active_vlans
) {
2465 goto err_port_active_vlans_alloc
;
2467 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2468 if (!mlxsw_sp_port
->untagged_vlans
) {
2470 goto err_port_untagged_vlans_alloc
;
2472 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
2473 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
2475 mlxsw_sp_port
->pcpu_stats
=
2476 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
2477 if (!mlxsw_sp_port
->pcpu_stats
) {
2479 goto err_alloc_stats
;
2482 mlxsw_sp_port
->sample
= kzalloc(sizeof(*mlxsw_sp_port
->sample
),
2484 if (!mlxsw_sp_port
->sample
) {
2486 goto err_alloc_sample
;
2489 mlxsw_sp_port
->hw_stats
.cache
=
2490 kzalloc(sizeof(*mlxsw_sp_port
->hw_stats
.cache
), GFP_KERNEL
);
2492 if (!mlxsw_sp_port
->hw_stats
.cache
) {
2494 goto err_alloc_hw_stats
;
2496 INIT_DELAYED_WORK(&mlxsw_sp_port
->hw_stats
.update_dw
,
2497 &update_stats_cache
);
2499 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
2500 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
2502 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
2504 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
2505 mlxsw_sp_port
->local_port
);
2506 goto err_port_swid_set
;
2509 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
2511 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
2512 mlxsw_sp_port
->local_port
);
2513 goto err_dev_addr_init
;
2516 netif_carrier_off(dev
);
2518 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
2519 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
2520 dev
->hw_features
|= NETIF_F_HW_TC
;
2523 dev
->max_mtu
= ETH_MAX_MTU
;
2525 /* Each packet needs to have a Tx header (metadata) on top all other
2528 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
2530 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
2532 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
2533 mlxsw_sp_port
->local_port
);
2534 goto err_port_system_port_mapping_set
;
2537 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
2539 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
2540 mlxsw_sp_port
->local_port
);
2541 goto err_port_speed_by_width_set
;
2544 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
2546 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
2547 mlxsw_sp_port
->local_port
);
2548 goto err_port_mtu_set
;
2551 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2553 goto err_port_admin_status_set
;
2555 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
2557 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
2558 mlxsw_sp_port
->local_port
);
2559 goto err_port_buffers_init
;
2562 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
2564 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
2565 mlxsw_sp_port
->local_port
);
2566 goto err_port_ets_init
;
2569 /* ETS and buffers must be initialized before DCB. */
2570 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
2572 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
2573 mlxsw_sp_port
->local_port
);
2574 goto err_port_dcb_init
;
2577 err
= mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port
);
2579 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create PVID vPort\n",
2580 mlxsw_sp_port
->local_port
);
2581 goto err_port_pvid_vport_create
;
2584 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
2585 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
2586 err
= register_netdev(dev
);
2588 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
2589 mlxsw_sp_port
->local_port
);
2590 goto err_register_netdev
;
2593 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
2594 mlxsw_sp_port
, dev
, mlxsw_sp_port
->split
,
2596 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
, 0);
2599 err_register_netdev
:
2600 mlxsw_sp
->ports
[local_port
] = NULL
;
2601 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2602 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port
);
2603 err_port_pvid_vport_create
:
2604 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2607 err_port_buffers_init
:
2608 err_port_admin_status_set
:
2610 err_port_speed_by_width_set
:
2611 err_port_system_port_mapping_set
:
2613 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2615 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2617 kfree(mlxsw_sp_port
->sample
);
2619 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2621 kfree(mlxsw_sp_port
->untagged_vlans
);
2622 err_port_untagged_vlans_alloc
:
2623 kfree(mlxsw_sp_port
->active_vlans
);
2624 err_port_active_vlans_alloc
:
2629 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2630 bool split
, u8 module
, u8 width
, u8 lane
)
2634 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
);
2636 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
2640 err
= __mlxsw_sp_port_create(mlxsw_sp
, local_port
, split
,
2641 module
, width
, lane
);
2643 goto err_port_create
;
2647 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2651 static void __mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2653 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2655 cancel_delayed_work_sync(&mlxsw_sp_port
->hw_stats
.update_dw
);
2656 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
2657 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
2658 mlxsw_sp
->ports
[local_port
] = NULL
;
2659 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2660 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port
);
2661 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2662 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2663 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
2664 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2665 kfree(mlxsw_sp_port
->sample
);
2666 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2667 kfree(mlxsw_sp_port
->untagged_vlans
);
2668 kfree(mlxsw_sp_port
->active_vlans
);
2669 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vports_list
));
2670 free_netdev(mlxsw_sp_port
->dev
);
2673 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2675 __mlxsw_sp_port_remove(mlxsw_sp
, local_port
);
2676 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2679 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2681 return mlxsw_sp
->ports
[local_port
] != NULL
;
2684 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
2688 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
2689 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2690 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2691 kfree(mlxsw_sp
->port_to_module
);
2692 kfree(mlxsw_sp
->ports
);
2695 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
2697 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
2698 u8 module
, width
, lane
;
2703 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
2704 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
2705 if (!mlxsw_sp
->ports
)
2708 mlxsw_sp
->port_to_module
= kcalloc(max_ports
, sizeof(u8
), GFP_KERNEL
);
2709 if (!mlxsw_sp
->port_to_module
) {
2711 goto err_port_to_module_alloc
;
2714 for (i
= 1; i
< max_ports
; i
++) {
2715 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
2718 goto err_port_module_info_get
;
2721 mlxsw_sp
->port_to_module
[i
] = module
;
2722 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false,
2723 module
, width
, lane
);
2725 goto err_port_create
;
2730 err_port_module_info_get
:
2731 for (i
--; i
>= 1; i
--)
2732 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2733 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2734 kfree(mlxsw_sp
->port_to_module
);
2735 err_port_to_module_alloc
:
2736 kfree(mlxsw_sp
->ports
);
2740 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
2742 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
2744 return local_port
- offset
;
2747 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
2748 u8 module
, unsigned int count
)
2750 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
2753 for (i
= 0; i
< count
; i
++) {
2754 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
2757 goto err_port_module_map
;
2760 for (i
= 0; i
< count
; i
++) {
2761 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
2763 goto err_port_swid_set
;
2766 for (i
= 0; i
< count
; i
++) {
2767 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
2768 module
, width
, i
* width
);
2770 goto err_port_create
;
2776 for (i
--; i
>= 0; i
--)
2777 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2778 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2781 for (i
--; i
>= 0; i
--)
2782 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
2783 MLXSW_PORT_SWID_DISABLED_PORT
);
2785 err_port_module_map
:
2786 for (i
--; i
>= 0; i
--)
2787 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
2791 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
2792 u8 base_port
, unsigned int count
)
2794 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
2797 /* Split by four means we need to re-create two ports, otherwise
2802 for (i
= 0; i
< count
; i
++) {
2803 local_port
= base_port
+ i
* 2;
2804 module
= mlxsw_sp
->port_to_module
[local_port
];
2806 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
2810 for (i
= 0; i
< count
; i
++)
2811 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
2813 for (i
= 0; i
< count
; i
++) {
2814 local_port
= base_port
+ i
* 2;
2815 module
= mlxsw_sp
->port_to_module
[local_port
];
2817 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
2822 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2825 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2826 struct mlxsw_sp_port
*mlxsw_sp_port
;
2827 u8 module
, cur_width
, base_port
;
2831 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2832 if (!mlxsw_sp_port
) {
2833 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2838 module
= mlxsw_sp_port
->mapping
.module
;
2839 cur_width
= mlxsw_sp_port
->mapping
.width
;
2841 if (count
!= 2 && count
!= 4) {
2842 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
2846 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
2847 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
2851 /* Make sure we have enough slave (even) ports for the split. */
2853 base_port
= local_port
;
2854 if (mlxsw_sp
->ports
[base_port
+ 1]) {
2855 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2859 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2860 if (mlxsw_sp
->ports
[base_port
+ 1] ||
2861 mlxsw_sp
->ports
[base_port
+ 3]) {
2862 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2867 for (i
= 0; i
< count
; i
++)
2868 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2869 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2871 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
2873 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2874 goto err_port_split_create
;
2879 err_port_split_create
:
2880 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2884 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
2886 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2887 struct mlxsw_sp_port
*mlxsw_sp_port
;
2888 u8 cur_width
, base_port
;
2892 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2893 if (!mlxsw_sp_port
) {
2894 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2899 if (!mlxsw_sp_port
->split
) {
2900 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2904 cur_width
= mlxsw_sp_port
->mapping
.width
;
2905 count
= cur_width
== 1 ? 4 : 2;
2907 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2909 /* Determine which ports to remove. */
2910 if (count
== 2 && local_port
>= base_port
+ 2)
2911 base_port
= base_port
+ 2;
2913 for (i
= 0; i
< count
; i
++)
2914 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2915 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2917 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2922 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2923 char *pude_pl
, void *priv
)
2925 struct mlxsw_sp
*mlxsw_sp
= priv
;
2926 struct mlxsw_sp_port
*mlxsw_sp_port
;
2927 enum mlxsw_reg_pude_oper_status status
;
2930 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2931 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2935 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2936 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2937 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2938 netif_carrier_on(mlxsw_sp_port
->dev
);
2940 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2941 netif_carrier_off(mlxsw_sp_port
->dev
);
2945 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
2946 u8 local_port
, void *priv
)
2948 struct mlxsw_sp
*mlxsw_sp
= priv
;
2949 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2950 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2952 if (unlikely(!mlxsw_sp_port
)) {
2953 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2958 skb
->dev
= mlxsw_sp_port
->dev
;
2960 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2961 u64_stats_update_begin(&pcpu_stats
->syncp
);
2962 pcpu_stats
->rx_packets
++;
2963 pcpu_stats
->rx_bytes
+= skb
->len
;
2964 u64_stats_update_end(&pcpu_stats
->syncp
);
2966 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2967 netif_receive_skb(skb
);
2970 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
2973 skb
->offload_fwd_mark
= 1;
2974 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2977 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff
*skb
, u8 local_port
,
2980 struct mlxsw_sp
*mlxsw_sp
= priv
;
2981 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2982 struct psample_group
*psample_group
;
2985 if (unlikely(!mlxsw_sp_port
)) {
2986 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
2990 if (unlikely(!mlxsw_sp_port
->sample
)) {
2991 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received on unsupported port\n",
2996 size
= mlxsw_sp_port
->sample
->truncate
?
2997 mlxsw_sp_port
->sample
->trunc_size
: skb
->len
;
3000 psample_group
= rcu_dereference(mlxsw_sp_port
->sample
->psample_group
);
3003 psample_sample_packet(psample_group
, skb
, size
,
3004 mlxsw_sp_port
->dev
->ifindex
, 0,
3005 mlxsw_sp_port
->sample
->rate
);
3012 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3013 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3014 _is_ctrl, SP_##_trap_group, DISCARD)
3016 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3017 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3018 _is_ctrl, SP_##_trap_group, DISCARD)
3020 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3021 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3023 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
3025 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
3027 MLXSW_SP_RXL_NO_MARK(STP
, TRAP_TO_CPU
, STP
, true),
3028 MLXSW_SP_RXL_NO_MARK(LACP
, TRAP_TO_CPU
, LACP
, true),
3029 MLXSW_SP_RXL_NO_MARK(LLDP
, TRAP_TO_CPU
, LLDP
, true),
3030 MLXSW_SP_RXL_MARK(DHCP
, MIRROR_TO_CPU
, DHCP
, false),
3031 MLXSW_SP_RXL_MARK(IGMP_QUERY
, MIRROR_TO_CPU
, IGMP
, false),
3032 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3033 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3034 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE
, TRAP_TO_CPU
, IGMP
, false),
3035 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3036 MLXSW_SP_RXL_MARK(ARPBC
, MIRROR_TO_CPU
, ARP
, false),
3037 MLXSW_SP_RXL_MARK(ARPUC
, MIRROR_TO_CPU
, ARP
, false),
3038 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, IP2ME
, false),
3040 MLXSW_SP_RXL_NO_MARK(MTUERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3041 MLXSW_SP_RXL_NO_MARK(TTLERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3042 MLXSW_SP_RXL_NO_MARK(LBERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3043 MLXSW_SP_RXL_MARK(OSPF
, TRAP_TO_CPU
, OSPF
, false),
3044 MLXSW_SP_RXL_NO_MARK(IP2ME
, TRAP_TO_CPU
, IP2ME
, false),
3045 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0
, TRAP_TO_CPU
, REMOTE_ROUTE
, false),
3046 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4
, TRAP_TO_CPU
, ARP_MISS
, false),
3047 MLXSW_SP_RXL_NO_MARK(BGP_IPV4
, TRAP_TO_CPU
, BGP_IPV4
, false),
3048 /* PKT Sample trap */
3049 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func
, PKT_SAMPLE
, MIRROR_TO_CPU
,
3050 false, SP_IP2ME
, DISCARD
)
3053 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
3055 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
3056 enum mlxsw_reg_qpcr_ir_units ir_units
;
3057 int max_cpu_policers
;
3063 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
3066 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3068 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
3069 for (i
= 0; i
< max_cpu_policers
; i
++) {
3072 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3073 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3074 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3075 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3079 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3083 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
3084 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3085 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3086 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
3087 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3088 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3092 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3101 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
3103 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
3111 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3113 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3114 enum mlxsw_reg_htgt_trap_group i
;
3115 int max_cpu_policers
;
3116 int max_trap_groups
;
3121 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
3124 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
3125 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3127 for (i
= 0; i
< max_trap_groups
; i
++) {
3130 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3131 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3132 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3133 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3137 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
3138 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3142 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3143 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3147 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3151 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
3152 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3153 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3157 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
3158 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
3159 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
3160 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
3166 if (max_cpu_policers
<= policer_id
&&
3167 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
3170 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
3171 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3179 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
3184 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
3188 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
3192 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3193 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
3194 &mlxsw_sp_listener
[i
],
3197 goto err_listener_register
;
3202 err_listener_register
:
3203 for (i
--; i
>= 0; i
--) {
3204 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3205 &mlxsw_sp_listener
[i
],
3211 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
3215 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3216 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3217 &mlxsw_sp_listener
[i
],
3222 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
3223 enum mlxsw_reg_sfgc_type type
,
3224 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
3226 enum mlxsw_flood_table_type table_type
;
3227 enum mlxsw_sp_flood_table flood_table
;
3228 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
3230 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
3231 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
3233 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
3236 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
:
3237 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
3239 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4
:
3240 flood_table
= MLXSW_SP_FLOOD_TABLE_MC
;
3243 flood_table
= MLXSW_SP_FLOOD_TABLE_BC
;
3246 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
3248 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
3251 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
3255 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
3256 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
3259 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
3260 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
3264 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
3265 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
3273 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
3275 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
3278 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
3279 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
3280 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
3281 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
3282 MLXSW_REG_SLCR_LAG_HASH_SIP
|
3283 MLXSW_REG_SLCR_LAG_HASH_DIP
|
3284 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
3285 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
3286 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
3287 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
3291 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
3292 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
3295 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
3296 sizeof(struct mlxsw_sp_upper
),
3298 if (!mlxsw_sp
->lags
)
3304 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
3306 kfree(mlxsw_sp
->lags
);
3309 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3311 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3313 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
3314 MLXSW_REG_HTGT_INVALID_POLICER
,
3315 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
3316 MLXSW_REG_HTGT_DEFAULT_TC
);
3317 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3320 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
);
3322 static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp
*mlxsw_sp
)
3324 return mlxsw_sp_vfid_op(mlxsw_sp
, MLXSW_SP_DUMMY_FID
, true);
3327 static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp
*mlxsw_sp
)
3329 mlxsw_sp_vfid_op(mlxsw_sp
, MLXSW_SP_DUMMY_FID
, false);
3332 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
3333 const struct mlxsw_bus_info
*mlxsw_bus_info
)
3335 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3338 mlxsw_sp
->core
= mlxsw_core
;
3339 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
3340 INIT_LIST_HEAD(&mlxsw_sp
->fids
);
3341 INIT_LIST_HEAD(&mlxsw_sp
->vfids
.list
);
3343 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
3345 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
3349 err
= mlxsw_sp_traps_init(mlxsw_sp
);
3351 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
3355 err
= mlxsw_sp_flood_init(mlxsw_sp
);
3357 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
3358 goto err_flood_init
;
3361 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
3363 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
3364 goto err_buffers_init
;
3367 err
= mlxsw_sp_lag_init(mlxsw_sp
);
3369 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
3373 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
3375 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
3376 goto err_switchdev_init
;
3379 err
= mlxsw_sp_router_init(mlxsw_sp
);
3381 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
3382 goto err_router_init
;
3385 err
= mlxsw_sp_span_init(mlxsw_sp
);
3387 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
3391 err
= mlxsw_sp_acl_init(mlxsw_sp
);
3393 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
3397 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
3399 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
3400 goto err_counter_pool_init
;
3403 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
3405 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
3406 goto err_dpipe_init
;
3409 err
= mlxsw_sp_dummy_fid_init(mlxsw_sp
);
3411 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init dummy FID\n");
3412 goto err_dummy_fid_init
;
3415 err
= mlxsw_sp_ports_create(mlxsw_sp
);
3417 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
3418 goto err_ports_create
;
3424 mlxsw_sp_dummy_fid_fini(mlxsw_sp
);
3426 mlxsw_sp_dpipe_fini(mlxsw_sp
);
3428 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
3429 err_counter_pool_init
:
3430 mlxsw_sp_acl_fini(mlxsw_sp
);
3432 mlxsw_sp_span_fini(mlxsw_sp
);
3434 mlxsw_sp_router_fini(mlxsw_sp
);
3436 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3438 mlxsw_sp_lag_fini(mlxsw_sp
);
3440 mlxsw_sp_buffers_fini(mlxsw_sp
);
3443 mlxsw_sp_traps_fini(mlxsw_sp
);
3447 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
3449 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3451 mlxsw_sp_ports_remove(mlxsw_sp
);
3452 mlxsw_sp_dummy_fid_fini(mlxsw_sp
);
3453 mlxsw_sp_dpipe_fini(mlxsw_sp
);
3454 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
3455 mlxsw_sp_acl_fini(mlxsw_sp
);
3456 mlxsw_sp_span_fini(mlxsw_sp
);
3457 mlxsw_sp_router_fini(mlxsw_sp
);
3458 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3459 mlxsw_sp_lag_fini(mlxsw_sp
);
3460 mlxsw_sp_buffers_fini(mlxsw_sp
);
3461 mlxsw_sp_traps_fini(mlxsw_sp
);
3462 WARN_ON(!list_empty(&mlxsw_sp
->vfids
.list
));
3463 WARN_ON(!list_empty(&mlxsw_sp
->fids
));
3466 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
3467 .used_max_vepa_channels
= 1,
3468 .max_vepa_channels
= 0,
3470 .max_mid
= MLXSW_SP_MID_MAX
,
3473 .used_flood_tables
= 1,
3474 .used_flood_mode
= 1,
3476 .max_fid_offset_flood_tables
= 3,
3477 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
3478 .max_fid_flood_tables
= 3,
3479 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
3480 .used_max_ib_mc
= 1,
3484 .used_kvd_split_data
= 1,
3485 .kvd_hash_granularity
= MLXSW_SP_KVD_GRANULARITY
,
3486 .kvd_hash_single_parts
= 2,
3487 .kvd_hash_double_parts
= 1,
3488 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
3492 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
3495 .resource_query_enable
= 1,
3498 static struct mlxsw_driver mlxsw_sp_driver
= {
3499 .kind
= mlxsw_sp_driver_name
,
3500 .priv_size
= sizeof(struct mlxsw_sp
),
3501 .init
= mlxsw_sp_init
,
3502 .fini
= mlxsw_sp_fini
,
3503 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3504 .port_split
= mlxsw_sp_port_split
,
3505 .port_unsplit
= mlxsw_sp_port_unsplit
,
3506 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3507 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3508 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3509 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3510 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3511 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3512 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3513 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3514 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3515 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3516 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3517 .txhdr_len
= MLXSW_TXHDR_LEN
,
3518 .profile
= &mlxsw_sp_config_profile
,
3521 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
3523 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
3526 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
3528 struct mlxsw_sp_port
**p_mlxsw_sp_port
= data
;
3531 if (mlxsw_sp_port_dev_check(lower_dev
)) {
3532 *p_mlxsw_sp_port
= netdev_priv(lower_dev
);
3539 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
3541 struct mlxsw_sp_port
*mlxsw_sp_port
;
3543 if (mlxsw_sp_port_dev_check(dev
))
3544 return netdev_priv(dev
);
3546 mlxsw_sp_port
= NULL
;
3547 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &mlxsw_sp_port
);
3549 return mlxsw_sp_port
;
3552 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3554 struct mlxsw_sp_port
*mlxsw_sp_port
;
3556 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3557 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3560 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3562 struct mlxsw_sp_port
*mlxsw_sp_port
;
3564 if (mlxsw_sp_port_dev_check(dev
))
3565 return netdev_priv(dev
);
3567 mlxsw_sp_port
= NULL
;
3568 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
3571 return mlxsw_sp_port
;
3574 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3576 struct mlxsw_sp_port
*mlxsw_sp_port
;
3579 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3581 dev_hold(mlxsw_sp_port
->dev
);
3583 return mlxsw_sp_port
;
3586 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3588 dev_put(mlxsw_sp_port
->dev
);
3591 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port
*lag_port
,
3594 if (mlxsw_sp_fid_is_vfid(fid
))
3595 return mlxsw_sp_port_vport_find_by_fid(lag_port
, fid
);
3597 return test_bit(fid
, lag_port
->active_vlans
);
3600 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
3603 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3604 u8 local_port
= mlxsw_sp_port
->local_port
;
3605 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3606 u64 max_lag_members
;
3609 if (!mlxsw_sp_port
->lagged
)
3612 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3614 for (i
= 0; i
< max_lag_members
; i
++) {
3615 struct mlxsw_sp_port
*lag_port
;
3617 lag_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
3618 if (!lag_port
|| lag_port
->local_port
== local_port
)
3620 if (mlxsw_sp_lag_port_fid_member(lag_port
, fid
))
3628 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3631 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3632 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3634 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
3635 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3636 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
3637 mlxsw_sp_port
->local_port
);
3639 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using Port=%d, FID=%d\n",
3640 mlxsw_sp_port
->local_port
, fid
);
3642 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3646 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3649 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3650 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3652 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
3653 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3654 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
3656 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using LAG ID=%d, FID=%d\n",
3657 mlxsw_sp_port
->lag_id
, fid
);
3659 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3662 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
3664 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port
, fid
))
3667 if (mlxsw_sp_port
->lagged
)
3668 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
,
3671 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, fid
);
3674 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp
*mlxsw_sp
)
3676 struct mlxsw_sp_fid
*f
, *tmp
;
3678 list_for_each_entry_safe(f
, tmp
, &mlxsw_sp
->fids
, list
)
3679 if (--f
->ref_count
== 0)
3680 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
3685 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
3686 struct net_device
*br_dev
)
3688 struct mlxsw_sp_upper
*master_bridge
= mlxsw_sp_master_bridge(mlxsw_sp
);
3690 return !master_bridge
->dev
|| master_bridge
->dev
== br_dev
;
3693 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
3694 struct net_device
*br_dev
)
3696 struct mlxsw_sp_upper
*master_bridge
= mlxsw_sp_master_bridge(mlxsw_sp
);
3698 master_bridge
->dev
= br_dev
;
3699 master_bridge
->ref_count
++;
3702 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
)
3704 struct mlxsw_sp_upper
*master_bridge
= mlxsw_sp_master_bridge(mlxsw_sp
);
3706 if (--master_bridge
->ref_count
== 0) {
3707 master_bridge
->dev
= NULL
;
3708 /* It's possible upper VLAN devices are still holding
3709 * references to underlying FIDs. Drop the reference
3710 * and release the resources if it was the last one.
3711 * If it wasn't, then something bad happened.
3713 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp
);
3717 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3718 struct net_device
*br_dev
)
3720 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3723 /* When port is not bridged untagged packets are tagged with
3724 * PVID=VID=1, thereby creating an implicit VLAN interface in
3725 * the device. Remove it and let bridge code take care of its
3728 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
3732 mlxsw_sp_master_bridge_inc(mlxsw_sp_port
->mlxsw_sp
, br_dev
);
3734 mlxsw_sp_port
->learning
= 1;
3735 mlxsw_sp_port
->learning_sync
= 1;
3736 mlxsw_sp_port
->uc_flood
= 1;
3737 mlxsw_sp_port
->mc_flood
= 1;
3738 mlxsw_sp_port
->mc_router
= 0;
3739 mlxsw_sp_port
->mc_disabled
= 1;
3740 mlxsw_sp_port
->bridged
= 1;
3745 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3747 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3749 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
3751 mlxsw_sp_master_bridge_dec(mlxsw_sp_port
->mlxsw_sp
);
3753 mlxsw_sp_port
->learning
= 0;
3754 mlxsw_sp_port
->learning_sync
= 0;
3755 mlxsw_sp_port
->uc_flood
= 0;
3756 mlxsw_sp_port
->mc_flood
= 0;
3757 mlxsw_sp_port
->mc_router
= 0;
3758 mlxsw_sp_port
->bridged
= 0;
3760 /* Add implicit VLAN interface in the device, so that untagged
3761 * packets will be classified to the default vFID.
3763 mlxsw_sp_port_add_vid(dev
, 0, 1);
3766 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3768 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3770 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3771 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3774 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3776 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3778 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3779 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3782 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3783 u16 lag_id
, u8 port_index
)
3785 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3786 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3788 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3789 lag_id
, port_index
);
3790 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3793 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3796 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3797 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3799 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3801 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3804 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3807 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3808 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3810 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3812 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3815 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3818 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3819 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3821 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3823 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3826 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3827 struct net_device
*lag_dev
,
3830 struct mlxsw_sp_upper
*lag
;
3831 int free_lag_id
= -1;
3835 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
3836 for (i
= 0; i
< max_lag
; i
++) {
3837 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3838 if (lag
->ref_count
) {
3839 if (lag
->dev
== lag_dev
) {
3843 } else if (free_lag_id
< 0) {
3847 if (free_lag_id
< 0)
3849 *p_lag_id
= free_lag_id
;
3854 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3855 struct net_device
*lag_dev
,
3856 struct netdev_lag_upper_info
*lag_upper_info
)
3860 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
3862 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
3867 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3868 u16 lag_id
, u8
*p_port_index
)
3870 u64 max_lag_members
;
3873 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3875 for (i
= 0; i
< max_lag_members
; i
++) {
3876 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3885 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3886 struct net_device
*lag_dev
, u16 lag_id
)
3888 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3889 struct mlxsw_sp_fid
*f
;
3891 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3892 if (WARN_ON(!mlxsw_sp_vport
))
3895 /* If vPort is assigned a RIF, then leave it since it's no
3898 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3900 f
->leave(mlxsw_sp_vport
);
3902 mlxsw_sp_vport
->lag_id
= lag_id
;
3903 mlxsw_sp_vport
->lagged
= 1;
3904 mlxsw_sp_vport
->dev
= lag_dev
;
3908 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3910 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3911 struct mlxsw_sp_fid
*f
;
3913 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3914 if (WARN_ON(!mlxsw_sp_vport
))
3917 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3919 f
->leave(mlxsw_sp_vport
);
3921 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
3922 mlxsw_sp_vport
->lagged
= 0;
3925 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3926 struct net_device
*lag_dev
)
3928 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3929 struct mlxsw_sp_upper
*lag
;
3934 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3937 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3938 if (!lag
->ref_count
) {
3939 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3945 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3948 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3950 goto err_col_port_add
;
3951 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
3953 goto err_col_port_enable
;
3955 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3956 mlxsw_sp_port
->local_port
);
3957 mlxsw_sp_port
->lag_id
= lag_id
;
3958 mlxsw_sp_port
->lagged
= 1;
3961 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port
, lag_dev
, lag_id
);
3965 err_col_port_enable
:
3966 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3968 if (!lag
->ref_count
)
3969 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3973 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3974 struct net_device
*lag_dev
)
3976 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3977 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3978 struct mlxsw_sp_upper
*lag
;
3980 if (!mlxsw_sp_port
->lagged
)
3982 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3983 WARN_ON(lag
->ref_count
== 0);
3985 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
3986 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3988 if (mlxsw_sp_port
->bridged
) {
3989 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
3990 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
3993 if (lag
->ref_count
== 1)
3994 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3996 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3997 mlxsw_sp_port
->local_port
);
3998 mlxsw_sp_port
->lagged
= 0;
4001 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port
);
4004 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
4007 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4008 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4010 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
4011 mlxsw_sp_port
->local_port
);
4012 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4015 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
4018 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4019 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4021 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
4022 mlxsw_sp_port
->local_port
);
4023 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4026 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
4027 bool lag_tx_enabled
)
4030 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
4031 mlxsw_sp_port
->lag_id
);
4033 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
4034 mlxsw_sp_port
->lag_id
);
4037 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
4038 struct netdev_lag_lower_state_info
*info
)
4040 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
4043 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
4044 struct net_device
*vlan_dev
)
4046 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4047 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4049 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4050 if (WARN_ON(!mlxsw_sp_vport
))
4053 mlxsw_sp_vport
->dev
= vlan_dev
;
4058 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
4059 struct net_device
*vlan_dev
)
4061 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4062 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4064 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4065 if (WARN_ON(!mlxsw_sp_vport
))
4068 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
4071 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
4074 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4075 enum mlxsw_reg_spms_state spms_state
;
4080 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
4081 MLXSW_REG_SPMS_STATE_DISCARDING
;
4083 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
4086 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
4088 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
4089 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
4091 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
4096 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
4100 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
4103 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
4106 goto err_port_vlan_set
;
4110 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
4114 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
4116 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
4118 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
4121 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
4122 unsigned long event
, void *ptr
)
4124 struct netdev_notifier_changeupper_info
*info
;
4125 struct mlxsw_sp_port
*mlxsw_sp_port
;
4126 struct net_device
*upper_dev
;
4127 struct mlxsw_sp
*mlxsw_sp
;
4130 mlxsw_sp_port
= netdev_priv(dev
);
4131 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4135 case NETDEV_PRECHANGEUPPER
:
4136 upper_dev
= info
->upper_dev
;
4137 if (!is_vlan_dev(upper_dev
) &&
4138 !netif_is_lag_master(upper_dev
) &&
4139 !netif_is_bridge_master(upper_dev
) &&
4140 !netif_is_ovs_master(upper_dev
))
4144 /* HW limitation forbids to put ports to multiple bridges. */
4145 if (netif_is_bridge_master(upper_dev
) &&
4146 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
4148 if (netif_is_lag_master(upper_dev
) &&
4149 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
4152 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
))
4154 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
4155 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
)))
4157 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
))
4159 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
))
4162 case NETDEV_CHANGEUPPER
:
4163 upper_dev
= info
->upper_dev
;
4164 if (is_vlan_dev(upper_dev
)) {
4166 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
4169 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
4171 } else if (netif_is_bridge_master(upper_dev
)) {
4173 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4176 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
4177 } else if (netif_is_lag_master(upper_dev
)) {
4179 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4182 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4184 } else if (netif_is_ovs_master(upper_dev
)) {
4186 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
4188 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
4199 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4200 unsigned long event
, void *ptr
)
4202 struct netdev_notifier_changelowerstate_info
*info
;
4203 struct mlxsw_sp_port
*mlxsw_sp_port
;
4206 mlxsw_sp_port
= netdev_priv(dev
);
4210 case NETDEV_CHANGELOWERSTATE
:
4211 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4212 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4213 info
->lower_state_info
);
4215 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4223 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
4224 unsigned long event
, void *ptr
)
4227 case NETDEV_PRECHANGEUPPER
:
4228 case NETDEV_CHANGEUPPER
:
4229 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
4230 case NETDEV_CHANGELOWERSTATE
:
4231 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
4237 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4238 unsigned long event
, void *ptr
)
4240 struct net_device
*dev
;
4241 struct list_head
*iter
;
4244 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4245 if (mlxsw_sp_port_dev_check(dev
)) {
4246 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4255 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp
*mlxsw_sp
,
4256 struct net_device
*vlan_dev
)
4258 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4259 struct mlxsw_sp_fid
*f
;
4261 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4263 f
= mlxsw_sp_fid_create(mlxsw_sp
, fid
);
4273 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp
*mlxsw_sp
,
4274 struct net_device
*vlan_dev
)
4276 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4277 struct mlxsw_sp_fid
*f
;
4279 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4281 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->rif
);
4282 if (f
&& --f
->ref_count
== 0)
4283 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
4286 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
4287 unsigned long event
, void *ptr
)
4289 struct netdev_notifier_changeupper_info
*info
;
4290 struct net_device
*upper_dev
;
4291 struct mlxsw_sp
*mlxsw_sp
;
4294 mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
4301 case NETDEV_PRECHANGEUPPER
:
4302 upper_dev
= info
->upper_dev
;
4303 if (!is_vlan_dev(upper_dev
))
4305 if (is_vlan_dev(upper_dev
) &&
4306 br_dev
!= mlxsw_sp_master_bridge(mlxsw_sp
)->dev
)
4309 case NETDEV_CHANGEUPPER
:
4310 upper_dev
= info
->upper_dev
;
4311 if (is_vlan_dev(upper_dev
)) {
4313 err
= mlxsw_sp_master_bridge_vlan_link(mlxsw_sp
,
4316 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp
,
4328 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
4330 return find_first_zero_bit(mlxsw_sp
->vfids
.mapped
,
4334 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
4336 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
4338 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, 0);
4339 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
4342 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
4344 static struct mlxsw_sp_fid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
4345 struct net_device
*br_dev
)
4347 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
4348 struct mlxsw_sp_fid
*f
;
4352 vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
4353 if (vfid
== MLXSW_SP_VFID_MAX
) {
4354 dev_err(dev
, "No available vFIDs\n");
4355 return ERR_PTR(-ERANGE
);
4358 fid
= mlxsw_sp_vfid_to_fid(vfid
);
4359 err
= mlxsw_sp_vfid_op(mlxsw_sp
, fid
, true);
4361 dev_err(dev
, "Failed to create FID=%d\n", fid
);
4362 return ERR_PTR(err
);
4365 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
4367 goto err_allocate_vfid
;
4369 f
->leave
= mlxsw_sp_vport_vfid_leave
;
4373 list_add(&f
->list
, &mlxsw_sp
->vfids
.list
);
4374 set_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4379 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4380 return ERR_PTR(-ENOMEM
);
4383 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
4384 struct mlxsw_sp_fid
*f
)
4386 u16 vfid
= mlxsw_sp_fid_to_vfid(f
->fid
);
4389 clear_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4393 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->rif
);
4397 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4400 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
4403 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
4404 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4406 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
, mt
, valid
, fid
,
4410 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4411 struct net_device
*br_dev
)
4413 struct mlxsw_sp_fid
*f
;
4416 f
= mlxsw_sp_vfid_find(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4418 f
= mlxsw_sp_vfid_create(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4423 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, true);
4425 goto err_vport_flood_set
;
4427 err
= mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, true);
4429 goto err_vport_fid_map
;
4431 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, f
);
4434 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", f
->fid
);
4439 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4440 err_vport_flood_set
:
4442 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4446 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4448 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4450 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
4452 mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, false);
4454 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4456 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport
, f
->fid
);
4458 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
4459 if (--f
->ref_count
== 0)
4460 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4463 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4464 struct net_device
*br_dev
)
4466 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4467 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4468 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
4471 if (f
&& !WARN_ON(!f
->leave
))
4472 f
->leave(mlxsw_sp_vport
);
4474 err
= mlxsw_sp_vport_vfid_join(mlxsw_sp_vport
, br_dev
);
4476 netdev_err(dev
, "Failed to join vFID\n");
4480 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
4482 netdev_err(dev
, "Failed to enable learning\n");
4483 goto err_port_vid_learning_set
;
4486 mlxsw_sp_vport
->learning
= 1;
4487 mlxsw_sp_vport
->learning_sync
= 1;
4488 mlxsw_sp_vport
->uc_flood
= 1;
4489 mlxsw_sp_vport
->mc_flood
= 1;
4490 mlxsw_sp_vport
->mc_router
= 0;
4491 mlxsw_sp_vport
->mc_disabled
= 1;
4492 mlxsw_sp_vport
->bridged
= 1;
4496 err_port_vid_learning_set
:
4497 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4501 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4503 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4505 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
4507 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4509 mlxsw_sp_vport
->learning
= 0;
4510 mlxsw_sp_vport
->learning_sync
= 0;
4511 mlxsw_sp_vport
->uc_flood
= 0;
4512 mlxsw_sp_vport
->mc_flood
= 0;
4513 mlxsw_sp_vport
->mc_router
= 0;
4514 mlxsw_sp_vport
->bridged
= 0;
4518 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
4519 const struct net_device
*br_dev
)
4521 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4523 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
4525 struct net_device
*dev
= mlxsw_sp_vport_dev_get(mlxsw_sp_vport
);
4527 if (dev
&& dev
== br_dev
)
4534 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
4535 unsigned long event
, void *ptr
,
4538 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4539 struct netdev_notifier_changeupper_info
*info
= ptr
;
4540 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4541 struct net_device
*upper_dev
;
4544 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4545 if (!mlxsw_sp_vport
)
4549 case NETDEV_PRECHANGEUPPER
:
4550 upper_dev
= info
->upper_dev
;
4551 if (!netif_is_bridge_master(upper_dev
))
4555 /* We can't have multiple VLAN interfaces configured on
4556 * the same port and being members in the same bridge.
4558 if (netif_is_bridge_master(upper_dev
) &&
4559 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
4563 case NETDEV_CHANGEUPPER
:
4564 upper_dev
= info
->upper_dev
;
4565 if (netif_is_bridge_master(upper_dev
)) {
4567 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
4570 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
);
4581 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
4582 unsigned long event
, void *ptr
,
4585 struct net_device
*dev
;
4586 struct list_head
*iter
;
4589 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4590 if (mlxsw_sp_port_dev_check(dev
)) {
4591 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
4601 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4602 unsigned long event
, void *ptr
)
4604 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4605 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4607 if (mlxsw_sp_port_dev_check(real_dev
))
4608 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
4610 else if (netif_is_lag_master(real_dev
))
4611 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
4617 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
4619 struct netdev_notifier_changeupper_info
*info
= ptr
;
4621 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
4623 return netif_is_l3_master(info
->upper_dev
);
4626 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4627 unsigned long event
, void *ptr
)
4629 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4632 if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
4633 err
= mlxsw_sp_netdevice_router_port_event(dev
);
4634 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
4635 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
4636 else if (mlxsw_sp_port_dev_check(dev
))
4637 err
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4638 else if (netif_is_lag_master(dev
))
4639 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4640 else if (netif_is_bridge_master(dev
))
4641 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4642 else if (is_vlan_dev(dev
))
4643 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4645 return notifier_from_errno(err
);
4648 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
4649 .notifier_call
= mlxsw_sp_netdevice_event
,
4652 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
4653 .notifier_call
= mlxsw_sp_inetaddr_event
,
4654 .priority
= 10, /* Must be called before FIB notifier block */
4657 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly
= {
4658 .notifier_call
= mlxsw_sp_router_netevent_event
,
4661 static const struct pci_device_id mlxsw_sp_pci_id_table
[] = {
4662 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
4666 static struct pci_driver mlxsw_sp_pci_driver
= {
4667 .name
= mlxsw_sp_driver_name
,
4668 .id_table
= mlxsw_sp_pci_id_table
,
4671 static int __init
mlxsw_sp_module_init(void)
4675 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4676 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4677 register_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4679 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
4681 goto err_core_driver_register
;
4683 err
= mlxsw_pci_driver_register(&mlxsw_sp_pci_driver
);
4685 goto err_pci_driver_register
;
4689 err_pci_driver_register
:
4690 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4691 err_core_driver_register
:
4692 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4693 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4694 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4698 static void __exit
mlxsw_sp_module_exit(void)
4700 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver
);
4701 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4702 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4703 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4704 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4707 module_init(mlxsw_sp_module_init
);
4708 module_exit(mlxsw_sp_module_exit
);
4710 MODULE_LICENSE("Dual BSD/GPL");
4711 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4712 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4713 MODULE_DEVICE_TABLE(pci
, mlxsw_sp_pci_id_table
);