2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
70 #include "spectrum_dpipe.h"
72 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
73 static const char mlxsw_sp_driver_version
[] = "1.0";
79 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
82 * Packet control type.
83 * 0 - Ethernet control (e.g. EMADs, LACP)
86 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
89 * Packet protocol type. Must be set to 1 (Ethernet).
91 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
93 /* tx_hdr_rx_is_router
94 * Packet is sent from the router. Valid for data packets only.
96 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
99 * Indicates if the 'fid' field is valid and should be used for
100 * forwarding lookup. Valid for data packets only.
102 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
105 * Switch partition ID. Must be set to 0.
107 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
109 /* tx_hdr_control_tclass
110 * Indicates if the packet should use the control TClass and not one
111 * of the data TClasses.
113 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
116 * Egress TClass to be used on the egress device on the egress port.
118 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
121 * Destination local port for unicast packets.
122 * Destination multicast ID for multicast packets.
124 * Control packets are directed to a specific egress port, while data
125 * packets are transmitted through the CPU port (0) into the switch partition,
126 * where forwarding rules are applied.
128 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
131 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
132 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
133 * Valid for data packets only.
135 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
139 * 6 - Control packets
141 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
143 int mlxsw_sp_flow_counter_get(struct mlxsw_sp
*mlxsw_sp
,
144 unsigned int counter_index
, u64
*packets
,
147 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
150 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_NOP
,
151 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES
);
152 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
155 *packets
= mlxsw_reg_mgpc_packet_counter_get(mgpc_pl
);
156 *bytes
= mlxsw_reg_mgpc_byte_counter_get(mgpc_pl
);
160 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp
*mlxsw_sp
,
161 unsigned int counter_index
)
163 char mgpc_pl
[MLXSW_REG_MGPC_LEN
];
165 mlxsw_reg_mgpc_pack(mgpc_pl
, counter_index
, MLXSW_REG_MGPC_OPCODE_CLEAR
,
166 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES
);
167 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mgpc
), mgpc_pl
);
170 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp
*mlxsw_sp
,
171 unsigned int *p_counter_index
)
175 err
= mlxsw_sp_counter_alloc(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
179 err
= mlxsw_sp_flow_counter_clear(mlxsw_sp
, *p_counter_index
);
181 goto err_counter_clear
;
185 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
190 void mlxsw_sp_flow_counter_free(struct mlxsw_sp
*mlxsw_sp
,
191 unsigned int counter_index
)
193 mlxsw_sp_counter_free(mlxsw_sp
, MLXSW_SP_COUNTER_SUB_POOL_FLOW
,
197 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
198 const struct mlxsw_tx_info
*tx_info
)
200 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
202 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
204 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
205 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
206 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
207 mlxsw_tx_hdr_swid_set(txhdr
, 0);
208 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
209 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
210 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
213 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
215 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
218 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
221 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
225 static int mlxsw_sp_span_init(struct mlxsw_sp
*mlxsw_sp
)
229 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_SPAN
))
232 mlxsw_sp
->span
.entries_count
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
234 mlxsw_sp
->span
.entries
= kcalloc(mlxsw_sp
->span
.entries_count
,
235 sizeof(struct mlxsw_sp_span_entry
),
237 if (!mlxsw_sp
->span
.entries
)
240 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++)
241 INIT_LIST_HEAD(&mlxsw_sp
->span
.entries
[i
].bound_ports_list
);
246 static void mlxsw_sp_span_fini(struct mlxsw_sp
*mlxsw_sp
)
250 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
251 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
253 WARN_ON_ONCE(!list_empty(&curr
->bound_ports_list
));
255 kfree(mlxsw_sp
->span
.entries
);
258 static struct mlxsw_sp_span_entry
*
259 mlxsw_sp_span_entry_create(struct mlxsw_sp_port
*port
)
261 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
262 struct mlxsw_sp_span_entry
*span_entry
;
263 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
264 u8 local_port
= port
->local_port
;
269 /* find a free entry to use */
271 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
272 if (!mlxsw_sp
->span
.entries
[i
].used
) {
274 span_entry
= &mlxsw_sp
->span
.entries
[i
];
281 /* create a new port analayzer entry for local_port */
282 mlxsw_reg_mpat_pack(mpat_pl
, index
, local_port
, true);
283 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
287 span_entry
->used
= true;
288 span_entry
->id
= index
;
289 span_entry
->ref_count
= 1;
290 span_entry
->local_port
= local_port
;
294 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
295 struct mlxsw_sp_span_entry
*span_entry
)
297 u8 local_port
= span_entry
->local_port
;
298 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
299 int pa_id
= span_entry
->id
;
301 mlxsw_reg_mpat_pack(mpat_pl
, pa_id
, local_port
, false);
302 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
303 span_entry
->used
= false;
306 static struct mlxsw_sp_span_entry
*
307 mlxsw_sp_span_entry_find(struct mlxsw_sp_port
*port
)
309 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
312 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
313 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
315 if (curr
->used
&& curr
->local_port
== port
->local_port
)
321 static struct mlxsw_sp_span_entry
322 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port
*port
)
324 struct mlxsw_sp_span_entry
*span_entry
;
326 span_entry
= mlxsw_sp_span_entry_find(port
);
328 /* Already exists, just take a reference */
329 span_entry
->ref_count
++;
333 return mlxsw_sp_span_entry_create(port
);
336 static int mlxsw_sp_span_entry_put(struct mlxsw_sp
*mlxsw_sp
,
337 struct mlxsw_sp_span_entry
*span_entry
)
339 WARN_ON(!span_entry
->ref_count
);
340 if (--span_entry
->ref_count
== 0)
341 mlxsw_sp_span_entry_destroy(mlxsw_sp
, span_entry
);
345 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port
*port
)
347 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
348 struct mlxsw_sp_span_inspected_port
*p
;
351 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
352 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
354 list_for_each_entry(p
, &curr
->bound_ports_list
, list
)
355 if (p
->local_port
== port
->local_port
&&
356 p
->type
== MLXSW_SP_SPAN_EGRESS
)
363 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp
*mlxsw_sp
,
366 return mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
* 5 / 2) + 1;
369 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port
*port
, u16 mtu
)
371 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
372 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
375 /* If port is egress mirrored, the shared buffer size should be
376 * updated according to the mtu value
378 if (mlxsw_sp_span_is_egress_mirror(port
)) {
379 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
, mtu
);
381 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
382 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
384 netdev_err(port
->dev
, "Could not update shared buffer for mirroring\n");
392 static struct mlxsw_sp_span_inspected_port
*
393 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port
*port
,
394 struct mlxsw_sp_span_entry
*span_entry
)
396 struct mlxsw_sp_span_inspected_port
*p
;
398 list_for_each_entry(p
, &span_entry
->bound_ports_list
, list
)
399 if (port
->local_port
== p
->local_port
)
405 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port
*port
,
406 struct mlxsw_sp_span_entry
*span_entry
,
407 enum mlxsw_sp_span_type type
)
409 struct mlxsw_sp_span_inspected_port
*inspected_port
;
410 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
411 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
412 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
413 int pa_id
= span_entry
->id
;
416 /* if it is an egress SPAN, bind a shared buffer to it */
417 if (type
== MLXSW_SP_SPAN_EGRESS
) {
418 u32 buffsize
= mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp
,
421 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, buffsize
);
422 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
424 netdev_err(port
->dev
, "Could not create shared buffer for mirroring\n");
429 /* bind the port to the SPAN entry */
430 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
431 (enum mlxsw_reg_mpar_i_e
) type
, true, pa_id
);
432 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
434 goto err_mpar_reg_write
;
436 inspected_port
= kzalloc(sizeof(*inspected_port
), GFP_KERNEL
);
437 if (!inspected_port
) {
439 goto err_inspected_port_alloc
;
441 inspected_port
->local_port
= port
->local_port
;
442 inspected_port
->type
= type
;
443 list_add_tail(&inspected_port
->list
, &span_entry
->bound_ports_list
);
448 err_inspected_port_alloc
:
449 if (type
== MLXSW_SP_SPAN_EGRESS
) {
450 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
451 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
457 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port
*port
,
458 struct mlxsw_sp_span_entry
*span_entry
,
459 enum mlxsw_sp_span_type type
)
461 struct mlxsw_sp_span_inspected_port
*inspected_port
;
462 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
463 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
464 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
465 int pa_id
= span_entry
->id
;
467 inspected_port
= mlxsw_sp_span_entry_bound_port_find(port
, span_entry
);
471 /* remove the inspected port */
472 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
473 (enum mlxsw_reg_mpar_i_e
) type
, false, pa_id
);
474 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
476 /* remove the SBIB buffer if it was egress SPAN */
477 if (type
== MLXSW_SP_SPAN_EGRESS
) {
478 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
479 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
482 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
484 list_del(&inspected_port
->list
);
485 kfree(inspected_port
);
488 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port
*from
,
489 struct mlxsw_sp_port
*to
,
490 enum mlxsw_sp_span_type type
)
492 struct mlxsw_sp
*mlxsw_sp
= from
->mlxsw_sp
;
493 struct mlxsw_sp_span_entry
*span_entry
;
496 span_entry
= mlxsw_sp_span_entry_get(to
);
500 netdev_dbg(from
->dev
, "Adding inspected port to SPAN entry %d\n",
503 err
= mlxsw_sp_span_inspected_port_bind(from
, span_entry
, type
);
510 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
514 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port
*from
,
515 struct mlxsw_sp_port
*to
,
516 enum mlxsw_sp_span_type type
)
518 struct mlxsw_sp_span_entry
*span_entry
;
520 span_entry
= mlxsw_sp_span_entry_find(to
);
522 netdev_err(from
->dev
, "no span entry found\n");
526 netdev_dbg(from
->dev
, "removing inspected port from SPAN entry %d\n",
528 mlxsw_sp_span_inspected_port_unbind(from
, span_entry
, type
);
531 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
532 bool enable
, u32 rate
)
534 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
535 char mpsc_pl
[MLXSW_REG_MPSC_LEN
];
537 mlxsw_reg_mpsc_pack(mpsc_pl
, mlxsw_sp_port
->local_port
, enable
, rate
);
538 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpsc
), mpsc_pl
);
541 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
544 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
545 char paos_pl
[MLXSW_REG_PAOS_LEN
];
547 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
548 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
549 MLXSW_PORT_ADMIN_STATUS_DOWN
);
550 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
553 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
556 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
557 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
559 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
560 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
561 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
564 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
566 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
567 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
569 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
570 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
571 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
574 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
576 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
577 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
581 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
582 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
583 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
586 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
591 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
592 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
595 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
598 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
600 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
601 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
604 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
606 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
608 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
612 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
615 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
616 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
618 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
619 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
622 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
623 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
626 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
627 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
629 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
631 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
634 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
635 u16 vid_begin
, u16 vid_end
,
638 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
642 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
645 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
646 vid_end
, learn_enable
);
647 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
652 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
653 u16 vid
, bool learn_enable
)
655 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, vid
,
660 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
662 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
663 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
665 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
666 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
669 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
670 u8 local_port
, u8
*p_module
,
671 u8
*p_width
, u8
*p_lane
)
673 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
676 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
677 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
680 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
681 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
682 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
686 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
687 u8 module
, u8 width
, u8 lane
)
689 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
692 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
693 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
694 for (i
= 0; i
< width
; i
++) {
695 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
696 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
699 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
702 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
704 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
706 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
707 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
708 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
711 static int mlxsw_sp_port_open(struct net_device
*dev
)
713 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
716 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
719 netif_start_queue(dev
);
723 static int mlxsw_sp_port_stop(struct net_device
*dev
)
725 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
727 netif_stop_queue(dev
);
728 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
731 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
732 struct net_device
*dev
)
734 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
735 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
736 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
737 const struct mlxsw_tx_info tx_info
= {
738 .local_port
= mlxsw_sp_port
->local_port
,
744 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
745 return NETDEV_TX_BUSY
;
747 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
748 struct sk_buff
*skb_orig
= skb
;
750 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
752 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
753 dev_kfree_skb_any(skb_orig
);
756 dev_consume_skb_any(skb_orig
);
759 if (eth_skb_pad(skb
)) {
760 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
764 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
765 /* TX header is consumed by HW on the way so we shouldn't count its
766 * bytes as being sent.
768 len
= skb
->len
- MLXSW_TXHDR_LEN
;
770 /* Due to a race we might fail here because of a full queue. In that
771 * unlikely case we simply drop the packet.
773 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
776 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
777 u64_stats_update_begin(&pcpu_stats
->syncp
);
778 pcpu_stats
->tx_packets
++;
779 pcpu_stats
->tx_bytes
+= len
;
780 u64_stats_update_end(&pcpu_stats
->syncp
);
782 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
783 dev_kfree_skb_any(skb
);
788 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
792 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
794 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
795 struct sockaddr
*addr
= p
;
798 if (!is_valid_ether_addr(addr
->sa_data
))
799 return -EADDRNOTAVAIL
;
801 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
804 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
808 static u16
mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp
*mlxsw_sp
,
811 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp
, mtu
);
814 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
816 static u16
mlxsw_sp_pfc_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
819 delay
= mlxsw_sp_bytes_cells(mlxsw_sp
, DIV_ROUND_UP(delay
,
821 return MLXSW_SP_CELL_FACTOR
* delay
+ mlxsw_sp_bytes_cells(mlxsw_sp
,
825 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
826 * Assumes 100m cable and maximum MTU.
828 #define MLXSW_SP_PAUSE_DELAY 58752
830 static u16
mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp
*mlxsw_sp
, int mtu
,
831 u16 delay
, bool pfc
, bool pause
)
834 return mlxsw_sp_pfc_delay_get(mlxsw_sp
, mtu
, delay
);
836 return mlxsw_sp_bytes_cells(mlxsw_sp
, MLXSW_SP_PAUSE_DELAY
);
841 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int index
, u16 size
, u16 thres
,
845 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, index
, size
);
847 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, index
, size
,
851 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
852 u8
*prio_tc
, bool pause_en
,
853 struct ieee_pfc
*my_pfc
)
855 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
856 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
857 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
858 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
861 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
862 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
866 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
867 bool configure
= false;
872 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
873 if (prio_tc
[j
] == i
) {
874 pfc
= pfc_en
& BIT(j
);
883 lossy
= !(pfc
|| pause_en
);
884 thres
= mlxsw_sp_pg_buf_threshold_get(mlxsw_sp
, mtu
);
885 delay
= mlxsw_sp_pg_buf_delay_get(mlxsw_sp
, mtu
, delay
, pfc
,
887 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, thres
+ delay
, thres
, lossy
);
890 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
893 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
894 int mtu
, bool pause_en
)
896 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
897 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
898 struct ieee_pfc
*my_pfc
;
901 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
902 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
904 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
908 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
910 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
911 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
914 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
917 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
919 goto err_span_port_mtu_update
;
920 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
922 goto err_port_mtu_set
;
927 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
928 err_span_port_mtu_update
:
929 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
934 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
935 struct rtnl_link_stats64
*stats
)
937 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
938 struct mlxsw_sp_port_pcpu_stats
*p
;
939 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
944 for_each_possible_cpu(i
) {
945 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
947 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
948 rx_packets
= p
->rx_packets
;
949 rx_bytes
= p
->rx_bytes
;
950 tx_packets
= p
->tx_packets
;
951 tx_bytes
= p
->tx_bytes
;
952 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
954 stats
->rx_packets
+= rx_packets
;
955 stats
->rx_bytes
+= rx_bytes
;
956 stats
->tx_packets
+= tx_packets
;
957 stats
->tx_bytes
+= tx_bytes
;
958 /* tx_dropped is u32, updated without syncp protection. */
959 tx_dropped
+= p
->tx_dropped
;
961 stats
->tx_dropped
= tx_dropped
;
965 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
968 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
975 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
979 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
980 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
986 static int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
987 int prio
, char *ppcnt_pl
)
989 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
990 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
992 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
993 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
996 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
997 struct rtnl_link_stats64
*stats
)
999 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1002 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
1008 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
1010 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
1012 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
1014 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
1016 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
1018 stats
->rx_crc_errors
=
1019 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
1020 stats
->rx_frame_errors
=
1021 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
1023 stats
->rx_length_errors
= (
1024 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
1025 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
1026 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
1028 stats
->rx_errors
= (stats
->rx_crc_errors
+
1029 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
1035 static void update_stats_cache(struct work_struct
*work
)
1037 struct mlxsw_sp_port
*mlxsw_sp_port
=
1038 container_of(work
, struct mlxsw_sp_port
,
1039 hw_stats
.update_dw
.work
);
1041 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
1044 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
1045 mlxsw_sp_port
->hw_stats
.cache
);
1048 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
,
1049 MLXSW_HW_STATS_UPDATE_TIME
);
1052 /* Return the stats from a cache that is updated periodically,
1053 * as this function might get called in an atomic context.
1056 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
1057 struct rtnl_link_stats64
*stats
)
1059 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1061 memcpy(stats
, mlxsw_sp_port
->hw_stats
.cache
, sizeof(*stats
));
1064 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1065 u16 vid_begin
, u16 vid_end
,
1066 bool is_member
, bool untagged
)
1068 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1072 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
1076 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
1077 vid_end
, is_member
, untagged
);
1078 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
1083 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
1084 u16 vid_end
, bool is_member
, bool untagged
)
1089 for (vid
= vid_begin
; vid
<= vid_end
;
1090 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
1091 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
1094 err
= __mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
1095 is_member
, untagged
);
1103 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
1105 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
1106 u16 vid
, last_visited_vid
;
1109 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1110 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
1113 last_visited_vid
= vid
;
1114 goto err_port_vid_to_fid_set
;
1118 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
1120 last_visited_vid
= VLAN_N_VID
;
1121 goto err_port_vid_to_fid_set
;
1126 err_port_vid_to_fid_set
:
1127 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
1128 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
1133 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
1135 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
1139 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
1143 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1144 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
1153 static struct mlxsw_sp_port
*
1154 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1156 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1158 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
1159 if (!mlxsw_sp_vport
)
1162 /* dev will be set correctly after the VLAN device is linked
1163 * with the real device. In case of bridge SELF invocation, dev
1164 * will remain as is.
1166 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
1167 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1168 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
1169 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
1170 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
1171 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
1172 mlxsw_sp_vport
->vport
.vid
= vid
;
1174 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
1176 return mlxsw_sp_vport
;
1179 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
1181 list_del(&mlxsw_sp_vport
->vport
.list
);
1182 kfree(mlxsw_sp_vport
);
1185 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1186 __be16 __always_unused proto
, u16 vid
)
1188 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1189 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1190 bool untagged
= vid
== 1;
1193 /* VLAN 0 is added to HW filter when device goes up, but it is
1194 * reserved in our case, so simply return.
1199 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
))
1202 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vid
);
1203 if (!mlxsw_sp_vport
)
1206 /* When adding the first VLAN interface on a bridged port we need to
1207 * transition all the active 802.1Q bridge VLANs to use explicit
1208 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1210 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
1211 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
1213 goto err_port_vp_mode_trans
;
1216 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, untagged
);
1218 goto err_port_add_vid
;
1223 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1224 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1225 err_port_vp_mode_trans
:
1226 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1230 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1231 __be16 __always_unused proto
, u16 vid
)
1233 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1234 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1235 struct mlxsw_sp_fid
*f
;
1237 /* VLAN 0 is removed from HW filter when device goes down, but
1238 * it is reserved in our case, so simply return.
1243 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
1244 if (WARN_ON(!mlxsw_sp_vport
))
1247 mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
1249 /* Drop FID reference. If this was the last reference the
1250 * resources will be freed.
1252 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
1253 if (f
&& !WARN_ON(!f
->leave
))
1254 f
->leave(mlxsw_sp_vport
);
1256 /* When removing the last VLAN interface on a bridged port we need to
1257 * transition all active 802.1Q bridge VLANs to use VID to FID
1258 * mappings and set port's mode to VLAN mode.
1260 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1261 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1263 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1268 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
1271 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1272 u8 module
= mlxsw_sp_port
->mapping
.module
;
1273 u8 width
= mlxsw_sp_port
->mapping
.width
;
1274 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
1277 if (!mlxsw_sp_port
->split
)
1278 err
= snprintf(name
, len
, "p%d", module
+ 1);
1280 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
1289 static struct mlxsw_sp_port_mall_tc_entry
*
1290 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port
*port
,
1291 unsigned long cookie
) {
1292 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1294 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1295 if (mall_tc_entry
->cookie
== cookie
)
1296 return mall_tc_entry
;
1302 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1303 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
,
1304 const struct tc_action
*a
,
1307 struct net
*net
= dev_net(mlxsw_sp_port
->dev
);
1308 enum mlxsw_sp_span_type span_type
;
1309 struct mlxsw_sp_port
*to_port
;
1310 struct net_device
*to_dev
;
1313 ifindex
= tcf_mirred_ifindex(a
);
1314 to_dev
= __dev_get_by_index(net
, ifindex
);
1316 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1320 if (!mlxsw_sp_port_dev_check(to_dev
)) {
1321 netdev_err(mlxsw_sp_port
->dev
, "Cannot mirror to a non-spectrum port");
1324 to_port
= netdev_priv(to_dev
);
1326 mirror
->to_local_port
= to_port
->local_port
;
1327 mirror
->ingress
= ingress
;
1328 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1329 return mlxsw_sp_span_mirror_add(mlxsw_sp_port
, to_port
, span_type
);
1333 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1334 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
)
1336 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1337 enum mlxsw_sp_span_type span_type
;
1338 struct mlxsw_sp_port
*to_port
;
1340 to_port
= mlxsw_sp
->ports
[mirror
->to_local_port
];
1341 span_type
= mirror
->ingress
?
1342 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1343 mlxsw_sp_span_mirror_remove(mlxsw_sp_port
, to_port
, span_type
);
1347 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
,
1348 struct tc_cls_matchall_offload
*cls
,
1349 const struct tc_action
*a
,
1354 if (!mlxsw_sp_port
->sample
)
1356 if (rtnl_dereference(mlxsw_sp_port
->sample
->psample_group
)) {
1357 netdev_err(mlxsw_sp_port
->dev
, "sample already active\n");
1360 if (tcf_sample_rate(a
) > MLXSW_REG_MPSC_RATE_MAX
) {
1361 netdev_err(mlxsw_sp_port
->dev
, "sample rate not supported\n");
1365 rcu_assign_pointer(mlxsw_sp_port
->sample
->psample_group
,
1366 tcf_sample_psample_group(a
));
1367 mlxsw_sp_port
->sample
->truncate
= tcf_sample_truncate(a
);
1368 mlxsw_sp_port
->sample
->trunc_size
= tcf_sample_trunc_size(a
);
1369 mlxsw_sp_port
->sample
->rate
= tcf_sample_rate(a
);
1371 err
= mlxsw_sp_port_sample_set(mlxsw_sp_port
, true, tcf_sample_rate(a
));
1373 goto err_port_sample_set
;
1376 err_port_sample_set
:
1377 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1382 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port
*mlxsw_sp_port
)
1384 if (!mlxsw_sp_port
->sample
)
1387 mlxsw_sp_port_sample_set(mlxsw_sp_port
, false, 1);
1388 RCU_INIT_POINTER(mlxsw_sp_port
->sample
->psample_group
, NULL
);
1391 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1393 struct tc_cls_matchall_offload
*cls
,
1396 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1397 const struct tc_action
*a
;
1401 if (!tc_single_action(cls
->exts
)) {
1402 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1406 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1409 mall_tc_entry
->cookie
= cls
->cookie
;
1411 tcf_exts_to_list(cls
->exts
, &actions
);
1412 a
= list_first_entry(&actions
, struct tc_action
, list
);
1414 if (is_tcf_mirred_egress_mirror(a
) && protocol
== htons(ETH_P_ALL
)) {
1415 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
;
1417 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1418 mirror
= &mall_tc_entry
->mirror
;
1419 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
,
1420 mirror
, a
, ingress
);
1421 } else if (is_tcf_sample(a
) && protocol
== htons(ETH_P_ALL
)) {
1422 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_SAMPLE
;
1423 err
= mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port
, cls
,
1430 goto err_add_action
;
1432 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1436 kfree(mall_tc_entry
);
1440 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1441 struct tc_cls_matchall_offload
*cls
)
1443 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1445 mall_tc_entry
= mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port
,
1447 if (!mall_tc_entry
) {
1448 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1451 list_del(&mall_tc_entry
->list
);
1453 switch (mall_tc_entry
->type
) {
1454 case MLXSW_SP_PORT_MALL_MIRROR
:
1455 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port
,
1456 &mall_tc_entry
->mirror
);
1458 case MLXSW_SP_PORT_MALL_SAMPLE
:
1459 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port
);
1465 kfree(mall_tc_entry
);
1468 static int mlxsw_sp_setup_tc(struct net_device
*dev
, u32 handle
,
1469 __be16 proto
, struct tc_to_netdev
*tc
)
1471 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1472 bool ingress
= TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
);
1475 case TC_SETUP_MATCHALL
:
1476 switch (tc
->cls_mall
->command
) {
1477 case TC_CLSMATCHALL_REPLACE
:
1478 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
,
1482 case TC_CLSMATCHALL_DESTROY
:
1483 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
,
1489 case TC_SETUP_CLSFLOWER
:
1490 switch (tc
->cls_flower
->command
) {
1491 case TC_CLSFLOWER_REPLACE
:
1492 return mlxsw_sp_flower_replace(mlxsw_sp_port
, ingress
,
1493 proto
, tc
->cls_flower
);
1494 case TC_CLSFLOWER_DESTROY
:
1495 mlxsw_sp_flower_destroy(mlxsw_sp_port
, ingress
,
1498 case TC_CLSFLOWER_STATS
:
1499 return mlxsw_sp_flower_stats(mlxsw_sp_port
, ingress
,
1509 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1510 .ndo_open
= mlxsw_sp_port_open
,
1511 .ndo_stop
= mlxsw_sp_port_stop
,
1512 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1513 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1514 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1515 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1516 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1517 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1518 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1519 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1520 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1521 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1522 .ndo_fdb_add
= switchdev_port_fdb_add
,
1523 .ndo_fdb_del
= switchdev_port_fdb_del
,
1524 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
1525 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
1526 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
1527 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
1528 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
1531 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
1532 struct ethtool_drvinfo
*drvinfo
)
1534 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1535 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1537 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
1538 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
1539 sizeof(drvinfo
->version
));
1540 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1542 mlxsw_sp
->bus_info
->fw_rev
.major
,
1543 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1544 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1545 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1546 sizeof(drvinfo
->bus_info
));
1549 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1550 struct ethtool_pauseparam
*pause
)
1552 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1554 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1555 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1558 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1559 struct ethtool_pauseparam
*pause
)
1561 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1563 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1564 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1565 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1567 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1571 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1572 struct ethtool_pauseparam
*pause
)
1574 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1575 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1578 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1579 netdev_err(dev
, "PFC already enabled on port\n");
1583 if (pause
->autoneg
) {
1584 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1588 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1590 netdev_err(dev
, "Failed to configure port's headroom\n");
1594 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1596 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1597 goto err_port_pause_configure
;
1600 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1601 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1605 err_port_pause_configure
:
1606 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1607 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1611 struct mlxsw_sp_port_hw_stats
{
1612 char str
[ETH_GSTRING_LEN
];
1613 u64 (*getter
)(const char *payload
);
1617 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1619 .str
= "a_frames_transmitted_ok",
1620 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1623 .str
= "a_frames_received_ok",
1624 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1627 .str
= "a_frame_check_sequence_errors",
1628 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1631 .str
= "a_alignment_errors",
1632 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1635 .str
= "a_octets_transmitted_ok",
1636 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1639 .str
= "a_octets_received_ok",
1640 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1643 .str
= "a_multicast_frames_xmitted_ok",
1644 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1647 .str
= "a_broadcast_frames_xmitted_ok",
1648 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1651 .str
= "a_multicast_frames_received_ok",
1652 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1655 .str
= "a_broadcast_frames_received_ok",
1656 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1659 .str
= "a_in_range_length_errors",
1660 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1663 .str
= "a_out_of_range_length_field",
1664 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1667 .str
= "a_frame_too_long_errors",
1668 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1671 .str
= "a_symbol_error_during_carrier",
1672 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1675 .str
= "a_mac_control_frames_transmitted",
1676 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1679 .str
= "a_mac_control_frames_received",
1680 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1683 .str
= "a_unsupported_opcodes_received",
1684 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1687 .str
= "a_pause_mac_ctrl_frames_received",
1688 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1691 .str
= "a_pause_mac_ctrl_frames_xmitted",
1692 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1696 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1698 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
1700 .str
= "rx_octets_prio",
1701 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
1704 .str
= "rx_frames_prio",
1705 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
1708 .str
= "tx_octets_prio",
1709 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
1712 .str
= "tx_frames_prio",
1713 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
1716 .str
= "rx_pause_prio",
1717 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
1720 .str
= "rx_pause_duration_prio",
1721 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
1724 .str
= "tx_pause_prio",
1725 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
1728 .str
= "tx_pause_duration_prio",
1729 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
1733 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1735 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
1737 .str
= "tc_transmit_queue_tc",
1738 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_get
,
1739 .cells_bytes
= true,
1742 .str
= "tc_no_buffer_discard_uc_tc",
1743 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
1747 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1749 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1750 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1751 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1752 IEEE_8021QAZ_MAX_TCS)
1754 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
1758 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
1759 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1760 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
1761 *p
+= ETH_GSTRING_LEN
;
1765 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
1769 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
1770 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1771 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
1772 *p
+= ETH_GSTRING_LEN
;
1776 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1777 u32 stringset
, u8
*data
)
1782 switch (stringset
) {
1784 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1785 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1787 p
+= ETH_GSTRING_LEN
;
1790 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1791 mlxsw_sp_port_get_prio_strings(&p
, i
);
1793 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1794 mlxsw_sp_port_get_tc_strings(&p
, i
);
1800 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1801 enum ethtool_phys_id_state state
)
1803 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1804 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1805 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1809 case ETHTOOL_ID_ACTIVE
:
1812 case ETHTOOL_ID_INACTIVE
:
1819 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1820 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1824 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
1825 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
1828 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
1829 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
1830 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
1832 case MLXSW_REG_PPCNT_PRIO_CNT
:
1833 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
1834 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1836 case MLXSW_REG_PPCNT_TC_CNT
:
1837 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
1838 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1847 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
1848 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
1849 u64
*data
, int data_index
)
1851 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1852 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1853 struct mlxsw_sp_port_hw_stats
*hw_stats
;
1854 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1858 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
1861 mlxsw_sp_port_get_stats_raw(dev
, grp
, prio
, ppcnt_pl
);
1862 for (i
= 0; i
< len
; i
++) {
1863 data
[data_index
+ i
] = hw_stats
[i
].getter(ppcnt_pl
);
1864 if (!hw_stats
[i
].cells_bytes
)
1866 data
[data_index
+ i
] = mlxsw_sp_cells_bytes(mlxsw_sp
,
1867 data
[data_index
+ i
]);
1871 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1872 struct ethtool_stats
*stats
, u64
*data
)
1874 int i
, data_index
= 0;
1876 /* IEEE 802.3 Counters */
1877 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
1879 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
1881 /* Per-Priority Counters */
1882 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1883 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
1885 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1888 /* Per-TC Counters */
1889 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1890 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
1892 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1896 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1900 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
1906 struct mlxsw_sp_port_link_mode
{
1907 enum ethtool_link_mode_bit_indices mask_ethtool
;
1912 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1914 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1915 .mask_ethtool
= ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
1919 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1920 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1921 .mask_ethtool
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
1922 .speed
= SPEED_1000
,
1925 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1926 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1927 .speed
= SPEED_10000
,
1930 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1931 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1932 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
1933 .speed
= SPEED_10000
,
1936 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1937 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1938 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1939 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1940 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
1941 .speed
= SPEED_10000
,
1944 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1945 .mask_ethtool
= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
1946 .speed
= SPEED_20000
,
1949 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1950 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
1951 .speed
= SPEED_40000
,
1954 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1955 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
1956 .speed
= SPEED_40000
,
1959 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1960 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
1961 .speed
= SPEED_40000
,
1964 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1965 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
1966 .speed
= SPEED_40000
,
1969 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
,
1970 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
1971 .speed
= SPEED_25000
,
1974 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
,
1975 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
1976 .speed
= SPEED_25000
,
1979 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1980 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
1981 .speed
= SPEED_25000
,
1984 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1985 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
1986 .speed
= SPEED_25000
,
1989 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
,
1990 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
1991 .speed
= SPEED_50000
,
1994 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
1995 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
1996 .speed
= SPEED_50000
,
1999 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2
,
2000 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2001 .speed
= SPEED_50000
,
2004 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2005 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
,
2006 .speed
= SPEED_56000
,
2009 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2010 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT
,
2011 .speed
= SPEED_56000
,
2014 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2015 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
,
2016 .speed
= SPEED_56000
,
2019 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
2020 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT
,
2021 .speed
= SPEED_56000
,
2024 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
,
2025 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2026 .speed
= SPEED_100000
,
2029 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
,
2030 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2031 .speed
= SPEED_100000
,
2034 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
,
2035 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2036 .speed
= SPEED_100000
,
2039 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
2040 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2041 .speed
= SPEED_100000
,
2045 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2048 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
,
2049 struct ethtool_link_ksettings
*cmd
)
2051 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2052 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2053 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2054 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2055 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2056 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2057 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
2059 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2060 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2061 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2062 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
2063 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
2064 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
2067 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto
, unsigned long *mode
)
2071 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2072 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
2073 __set_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2078 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
2079 struct ethtool_link_ksettings
*cmd
)
2081 u32 speed
= SPEED_UNKNOWN
;
2082 u8 duplex
= DUPLEX_UNKNOWN
;
2088 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2089 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
2090 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
2091 duplex
= DUPLEX_FULL
;
2096 cmd
->base
.speed
= speed
;
2097 cmd
->base
.duplex
= duplex
;
2100 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
2102 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
2103 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
2104 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
2105 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
2108 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
2109 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
2110 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
2113 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
2114 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
2115 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
2116 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
2123 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings
*cmd
)
2128 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2129 if (test_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
2130 cmd
->link_modes
.advertising
))
2131 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2136 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
2141 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2142 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
2143 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2148 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
2153 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
2154 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
2155 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
2160 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap
,
2161 struct ethtool_link_ksettings
*cmd
)
2163 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Asym_Pause
);
2164 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
2165 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
2167 mlxsw_sp_from_ptys_supported_port(eth_proto_cap
, cmd
);
2168 mlxsw_sp_from_ptys_link(eth_proto_cap
, cmd
->link_modes
.supported
);
2171 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin
, bool autoneg
,
2172 struct ethtool_link_ksettings
*cmd
)
2177 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
2178 mlxsw_sp_from_ptys_link(eth_proto_admin
, cmd
->link_modes
.advertising
);
2182 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp
, u8 autoneg_status
,
2183 struct ethtool_link_ksettings
*cmd
)
2185 if (autoneg_status
!= MLXSW_REG_PTYS_AN_STATUS_OK
|| !eth_proto_lp
)
2188 ethtool_link_ksettings_add_link_mode(cmd
, lp_advertising
, Autoneg
);
2189 mlxsw_sp_from_ptys_link(eth_proto_lp
, cmd
->link_modes
.lp_advertising
);
2192 static int mlxsw_sp_port_get_link_ksettings(struct net_device
*dev
,
2193 struct ethtool_link_ksettings
*cmd
)
2195 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
, eth_proto_lp
;
2196 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2197 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2198 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2203 autoneg
= mlxsw_sp_port
->link
.autoneg
;
2204 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2205 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2208 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
,
2211 mlxsw_sp_port_get_link_supported(eth_proto_cap
, cmd
);
2213 mlxsw_sp_port_get_link_advertise(eth_proto_admin
, autoneg
, cmd
);
2215 eth_proto_lp
= mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl
);
2216 autoneg_status
= mlxsw_reg_ptys_an_status_get(ptys_pl
);
2217 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp
, autoneg_status
, cmd
);
2219 cmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2220 cmd
->base
.port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
2221 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
), eth_proto_oper
,
2228 mlxsw_sp_port_set_link_ksettings(struct net_device
*dev
,
2229 const struct ethtool_link_ksettings
*cmd
)
2231 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2232 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2233 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2234 u32 eth_proto_cap
, eth_proto_new
;
2238 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2239 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2242 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, NULL
, NULL
);
2244 autoneg
= cmd
->base
.autoneg
== AUTONEG_ENABLE
;
2245 eth_proto_new
= autoneg
?
2246 mlxsw_sp_to_ptys_advert_link(cmd
) :
2247 mlxsw_sp_to_ptys_speed(cmd
->base
.speed
);
2249 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
2250 if (!eth_proto_new
) {
2251 netdev_err(dev
, "No supported speed requested\n");
2255 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2257 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2261 if (!netif_running(dev
))
2264 mlxsw_sp_port
->link
.autoneg
= autoneg
;
2266 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2267 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
2272 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
2273 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
2274 .get_link
= ethtool_op_get_link
,
2275 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
2276 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
2277 .get_strings
= mlxsw_sp_port_get_strings
,
2278 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
2279 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
2280 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
2281 .get_link_ksettings
= mlxsw_sp_port_get_link_ksettings
,
2282 .set_link_ksettings
= mlxsw_sp_port_set_link_ksettings
,
2286 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
2288 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2289 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
2290 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2291 u32 eth_proto_admin
;
2293 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
2294 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2296 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2299 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2300 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
2301 bool dwrr
, u8 dwrr_weight
)
2303 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2304 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2306 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2308 mlxsw_reg_qeec_de_set(qeec_pl
, true);
2309 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
2310 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
2311 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2314 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2315 enum mlxsw_reg_qeec_hr hr
, u8 index
,
2316 u8 next_index
, u32 maxrate
)
2318 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2319 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2321 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2323 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
2324 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
2325 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2328 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2329 u8 switch_prio
, u8 tclass
)
2331 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2332 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
2334 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
2336 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
2339 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
2343 /* Setup the elements hierarcy, so that each TC is linked to
2344 * one subgroup, which are all member in the same group.
2346 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2347 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
2351 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2352 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2353 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
2358 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2359 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2360 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
2366 /* Make sure the max shaper is disabled in all hierarcies that
2369 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2370 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
2371 MLXSW_REG_QEEC_MAS_DIS
);
2374 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2375 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2376 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
2378 MLXSW_REG_QEEC_MAS_DIS
);
2382 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2383 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2384 MLXSW_REG_QEEC_HIERARCY_TC
,
2386 MLXSW_REG_QEEC_MAS_DIS
);
2391 /* Map all priorities to traffic class 0. */
2392 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2393 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
2401 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
)
2403 mlxsw_sp_port
->pvid
= 1;
2405 return mlxsw_sp_port_add_vid(mlxsw_sp_port
->dev
, 0, 1);
2408 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_port
)
2410 return mlxsw_sp_port_kill_vid(mlxsw_sp_port
->dev
, 0, 1);
2413 static int __mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2414 bool split
, u8 module
, u8 width
, u8 lane
)
2416 struct mlxsw_sp_port
*mlxsw_sp_port
;
2417 struct net_device
*dev
;
2421 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
2424 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
2425 mlxsw_sp_port
= netdev_priv(dev
);
2426 mlxsw_sp_port
->dev
= dev
;
2427 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
2428 mlxsw_sp_port
->local_port
= local_port
;
2429 mlxsw_sp_port
->split
= split
;
2430 mlxsw_sp_port
->mapping
.module
= module
;
2431 mlxsw_sp_port
->mapping
.width
= width
;
2432 mlxsw_sp_port
->mapping
.lane
= lane
;
2433 mlxsw_sp_port
->link
.autoneg
= 1;
2434 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
2435 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2436 if (!mlxsw_sp_port
->active_vlans
) {
2438 goto err_port_active_vlans_alloc
;
2440 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2441 if (!mlxsw_sp_port
->untagged_vlans
) {
2443 goto err_port_untagged_vlans_alloc
;
2445 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
2446 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
2448 mlxsw_sp_port
->pcpu_stats
=
2449 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
2450 if (!mlxsw_sp_port
->pcpu_stats
) {
2452 goto err_alloc_stats
;
2455 mlxsw_sp_port
->sample
= kzalloc(sizeof(*mlxsw_sp_port
->sample
),
2457 if (!mlxsw_sp_port
->sample
) {
2459 goto err_alloc_sample
;
2462 mlxsw_sp_port
->hw_stats
.cache
=
2463 kzalloc(sizeof(*mlxsw_sp_port
->hw_stats
.cache
), GFP_KERNEL
);
2465 if (!mlxsw_sp_port
->hw_stats
.cache
) {
2467 goto err_alloc_hw_stats
;
2469 INIT_DELAYED_WORK(&mlxsw_sp_port
->hw_stats
.update_dw
,
2470 &update_stats_cache
);
2472 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
2473 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
2475 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
2477 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
2478 mlxsw_sp_port
->local_port
);
2479 goto err_port_swid_set
;
2482 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
2484 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
2485 mlxsw_sp_port
->local_port
);
2486 goto err_dev_addr_init
;
2489 netif_carrier_off(dev
);
2491 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
2492 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
2493 dev
->hw_features
|= NETIF_F_HW_TC
;
2496 dev
->max_mtu
= ETH_MAX_MTU
;
2498 /* Each packet needs to have a Tx header (metadata) on top all other
2501 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
2503 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
2505 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
2506 mlxsw_sp_port
->local_port
);
2507 goto err_port_system_port_mapping_set
;
2510 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
2512 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
2513 mlxsw_sp_port
->local_port
);
2514 goto err_port_speed_by_width_set
;
2517 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
2519 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
2520 mlxsw_sp_port
->local_port
);
2521 goto err_port_mtu_set
;
2524 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2526 goto err_port_admin_status_set
;
2528 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
2530 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
2531 mlxsw_sp_port
->local_port
);
2532 goto err_port_buffers_init
;
2535 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
2537 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
2538 mlxsw_sp_port
->local_port
);
2539 goto err_port_ets_init
;
2542 /* ETS and buffers must be initialized before DCB. */
2543 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
2545 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
2546 mlxsw_sp_port
->local_port
);
2547 goto err_port_dcb_init
;
2550 err
= mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port
);
2552 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create PVID vPort\n",
2553 mlxsw_sp_port
->local_port
);
2554 goto err_port_pvid_vport_create
;
2557 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
2558 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
2559 err
= register_netdev(dev
);
2561 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
2562 mlxsw_sp_port
->local_port
);
2563 goto err_register_netdev
;
2566 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
2567 mlxsw_sp_port
, dev
, mlxsw_sp_port
->split
,
2569 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
, 0);
2572 err_register_netdev
:
2573 mlxsw_sp
->ports
[local_port
] = NULL
;
2574 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2575 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port
);
2576 err_port_pvid_vport_create
:
2577 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2580 err_port_buffers_init
:
2581 err_port_admin_status_set
:
2583 err_port_speed_by_width_set
:
2584 err_port_system_port_mapping_set
:
2586 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2588 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2590 kfree(mlxsw_sp_port
->sample
);
2592 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2594 kfree(mlxsw_sp_port
->untagged_vlans
);
2595 err_port_untagged_vlans_alloc
:
2596 kfree(mlxsw_sp_port
->active_vlans
);
2597 err_port_active_vlans_alloc
:
2602 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2603 bool split
, u8 module
, u8 width
, u8 lane
)
2607 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
);
2609 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
2613 err
= __mlxsw_sp_port_create(mlxsw_sp
, local_port
, split
,
2614 module
, width
, lane
);
2616 goto err_port_create
;
2620 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2624 static void __mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2626 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2628 cancel_delayed_work_sync(&mlxsw_sp_port
->hw_stats
.update_dw
);
2629 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
2630 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
2631 mlxsw_sp
->ports
[local_port
] = NULL
;
2632 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2633 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port
);
2634 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2635 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2636 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
2637 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2638 kfree(mlxsw_sp_port
->sample
);
2639 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2640 kfree(mlxsw_sp_port
->untagged_vlans
);
2641 kfree(mlxsw_sp_port
->active_vlans
);
2642 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vports_list
));
2643 free_netdev(mlxsw_sp_port
->dev
);
2646 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2648 __mlxsw_sp_port_remove(mlxsw_sp
, local_port
);
2649 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2652 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2654 return mlxsw_sp
->ports
[local_port
] != NULL
;
2657 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
2661 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
2662 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2663 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2664 kfree(mlxsw_sp
->port_to_module
);
2665 kfree(mlxsw_sp
->ports
);
2668 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
2670 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
2671 u8 module
, width
, lane
;
2676 alloc_size
= sizeof(struct mlxsw_sp_port
*) * max_ports
;
2677 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
2678 if (!mlxsw_sp
->ports
)
2681 mlxsw_sp
->port_to_module
= kcalloc(max_ports
, sizeof(u8
), GFP_KERNEL
);
2682 if (!mlxsw_sp
->port_to_module
) {
2684 goto err_port_to_module_alloc
;
2687 for (i
= 1; i
< max_ports
; i
++) {
2688 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
2691 goto err_port_module_info_get
;
2694 mlxsw_sp
->port_to_module
[i
] = module
;
2695 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false,
2696 module
, width
, lane
);
2698 goto err_port_create
;
2703 err_port_module_info_get
:
2704 for (i
--; i
>= 1; i
--)
2705 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2706 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2707 kfree(mlxsw_sp
->port_to_module
);
2708 err_port_to_module_alloc
:
2709 kfree(mlxsw_sp
->ports
);
2713 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
2715 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
2717 return local_port
- offset
;
2720 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
2721 u8 module
, unsigned int count
)
2723 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
2726 for (i
= 0; i
< count
; i
++) {
2727 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
2730 goto err_port_module_map
;
2733 for (i
= 0; i
< count
; i
++) {
2734 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
2736 goto err_port_swid_set
;
2739 for (i
= 0; i
< count
; i
++) {
2740 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
2741 module
, width
, i
* width
);
2743 goto err_port_create
;
2749 for (i
--; i
>= 0; i
--)
2750 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2751 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2754 for (i
--; i
>= 0; i
--)
2755 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
2756 MLXSW_PORT_SWID_DISABLED_PORT
);
2758 err_port_module_map
:
2759 for (i
--; i
>= 0; i
--)
2760 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
2764 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
2765 u8 base_port
, unsigned int count
)
2767 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
2770 /* Split by four means we need to re-create two ports, otherwise
2775 for (i
= 0; i
< count
; i
++) {
2776 local_port
= base_port
+ i
* 2;
2777 module
= mlxsw_sp
->port_to_module
[local_port
];
2779 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
2783 for (i
= 0; i
< count
; i
++)
2784 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
2786 for (i
= 0; i
< count
; i
++) {
2787 local_port
= base_port
+ i
* 2;
2788 module
= mlxsw_sp
->port_to_module
[local_port
];
2790 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
2795 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2798 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2799 struct mlxsw_sp_port
*mlxsw_sp_port
;
2800 u8 module
, cur_width
, base_port
;
2804 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2805 if (!mlxsw_sp_port
) {
2806 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2811 module
= mlxsw_sp_port
->mapping
.module
;
2812 cur_width
= mlxsw_sp_port
->mapping
.width
;
2814 if (count
!= 2 && count
!= 4) {
2815 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
2819 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
2820 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
2824 /* Make sure we have enough slave (even) ports for the split. */
2826 base_port
= local_port
;
2827 if (mlxsw_sp
->ports
[base_port
+ 1]) {
2828 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2832 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2833 if (mlxsw_sp
->ports
[base_port
+ 1] ||
2834 mlxsw_sp
->ports
[base_port
+ 3]) {
2835 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2840 for (i
= 0; i
< count
; i
++)
2841 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2842 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2844 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
2846 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2847 goto err_port_split_create
;
2852 err_port_split_create
:
2853 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2857 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
2859 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2860 struct mlxsw_sp_port
*mlxsw_sp_port
;
2861 u8 cur_width
, base_port
;
2865 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2866 if (!mlxsw_sp_port
) {
2867 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2872 if (!mlxsw_sp_port
->split
) {
2873 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2877 cur_width
= mlxsw_sp_port
->mapping
.width
;
2878 count
= cur_width
== 1 ? 4 : 2;
2880 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2882 /* Determine which ports to remove. */
2883 if (count
== 2 && local_port
>= base_port
+ 2)
2884 base_port
= base_port
+ 2;
2886 for (i
= 0; i
< count
; i
++)
2887 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2888 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2890 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2895 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2896 char *pude_pl
, void *priv
)
2898 struct mlxsw_sp
*mlxsw_sp
= priv
;
2899 struct mlxsw_sp_port
*mlxsw_sp_port
;
2900 enum mlxsw_reg_pude_oper_status status
;
2903 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2904 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2908 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2909 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2910 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2911 netif_carrier_on(mlxsw_sp_port
->dev
);
2913 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2914 netif_carrier_off(mlxsw_sp_port
->dev
);
2918 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
2919 u8 local_port
, void *priv
)
2921 struct mlxsw_sp
*mlxsw_sp
= priv
;
2922 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2923 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2925 if (unlikely(!mlxsw_sp_port
)) {
2926 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2931 skb
->dev
= mlxsw_sp_port
->dev
;
2933 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2934 u64_stats_update_begin(&pcpu_stats
->syncp
);
2935 pcpu_stats
->rx_packets
++;
2936 pcpu_stats
->rx_bytes
+= skb
->len
;
2937 u64_stats_update_end(&pcpu_stats
->syncp
);
2939 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2940 netif_receive_skb(skb
);
2943 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
2946 skb
->offload_fwd_mark
= 1;
2947 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2950 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff
*skb
, u8 local_port
,
2953 struct mlxsw_sp
*mlxsw_sp
= priv
;
2954 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2955 struct psample_group
*psample_group
;
2958 if (unlikely(!mlxsw_sp_port
)) {
2959 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received for non-existent port\n",
2963 if (unlikely(!mlxsw_sp_port
->sample
)) {
2964 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: sample skb received on unsupported port\n",
2969 size
= mlxsw_sp_port
->sample
->truncate
?
2970 mlxsw_sp_port
->sample
->trunc_size
: skb
->len
;
2973 psample_group
= rcu_dereference(mlxsw_sp_port
->sample
->psample_group
);
2976 psample_sample_packet(psample_group
, skb
, size
,
2977 mlxsw_sp_port
->dev
->ifindex
, 0,
2978 mlxsw_sp_port
->sample
->rate
);
2985 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2986 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2987 _is_ctrl, SP_##_trap_group, DISCARD)
2989 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2990 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2991 _is_ctrl, SP_##_trap_group, DISCARD)
2993 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2994 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2996 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
2998 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
3000 MLXSW_SP_RXL_NO_MARK(STP
, TRAP_TO_CPU
, STP
, true),
3001 MLXSW_SP_RXL_NO_MARK(LACP
, TRAP_TO_CPU
, LACP
, true),
3002 MLXSW_SP_RXL_NO_MARK(LLDP
, TRAP_TO_CPU
, LLDP
, true),
3003 MLXSW_SP_RXL_MARK(DHCP
, MIRROR_TO_CPU
, DHCP
, false),
3004 MLXSW_SP_RXL_MARK(IGMP_QUERY
, MIRROR_TO_CPU
, IGMP
, false),
3005 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3006 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3007 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE
, TRAP_TO_CPU
, IGMP
, false),
3008 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT
, TRAP_TO_CPU
, IGMP
, false),
3009 MLXSW_SP_RXL_MARK(ARPBC
, MIRROR_TO_CPU
, ARP
, false),
3010 MLXSW_SP_RXL_MARK(ARPUC
, MIRROR_TO_CPU
, ARP
, false),
3011 MLXSW_SP_RXL_NO_MARK(FID_MISS
, TRAP_TO_CPU
, IP2ME
, false),
3013 MLXSW_SP_RXL_NO_MARK(MTUERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3014 MLXSW_SP_RXL_NO_MARK(TTLERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3015 MLXSW_SP_RXL_NO_MARK(LBERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
3016 MLXSW_SP_RXL_MARK(OSPF
, TRAP_TO_CPU
, OSPF
, false),
3017 MLXSW_SP_RXL_NO_MARK(IP2ME
, TRAP_TO_CPU
, IP2ME
, false),
3018 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0
, TRAP_TO_CPU
, REMOTE_ROUTE
, false),
3019 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4
, TRAP_TO_CPU
, ARP_MISS
, false),
3020 MLXSW_SP_RXL_NO_MARK(BGP_IPV4
, TRAP_TO_CPU
, BGP_IPV4
, false),
3021 /* PKT Sample trap */
3022 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func
, PKT_SAMPLE
, MIRROR_TO_CPU
,
3023 false, SP_IP2ME
, DISCARD
)
3026 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
3028 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
3029 enum mlxsw_reg_qpcr_ir_units ir_units
;
3030 int max_cpu_policers
;
3036 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
3039 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3041 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
3042 for (i
= 0; i
< max_cpu_policers
; i
++) {
3045 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3046 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3047 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3048 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3052 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3056 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
3057 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3058 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3059 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
3060 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3061 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3065 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3074 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
3076 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
3084 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3086 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3087 enum mlxsw_reg_htgt_trap_group i
;
3088 int max_cpu_policers
;
3089 int max_trap_groups
;
3094 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
3097 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
3098 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
3100 for (i
= 0; i
< max_trap_groups
; i
++) {
3103 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
3104 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
3105 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
3106 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
3110 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
3111 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
3115 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
3116 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
3120 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
3124 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
3125 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
3126 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
3130 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
3131 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
3132 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
3133 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
3139 if (max_cpu_policers
<= policer_id
&&
3140 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
3143 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
3144 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3152 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
3157 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
3161 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
3165 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3166 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
3167 &mlxsw_sp_listener
[i
],
3170 goto err_listener_register
;
3175 err_listener_register
:
3176 for (i
--; i
>= 0; i
--) {
3177 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3178 &mlxsw_sp_listener
[i
],
3184 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
3188 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
3189 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
3190 &mlxsw_sp_listener
[i
],
3195 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
3196 enum mlxsw_reg_sfgc_type type
,
3197 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
3199 enum mlxsw_flood_table_type table_type
;
3200 enum mlxsw_sp_flood_table flood_table
;
3201 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
3203 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
3204 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
3206 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
3209 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
:
3210 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
3212 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4
:
3213 flood_table
= MLXSW_SP_FLOOD_TABLE_MC
;
3216 flood_table
= MLXSW_SP_FLOOD_TABLE_BC
;
3219 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
3221 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
3224 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
3228 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
3229 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
3232 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
3233 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
3237 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
3238 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
3246 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
3248 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
3251 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
3252 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
3253 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
3254 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
3255 MLXSW_REG_SLCR_LAG_HASH_SIP
|
3256 MLXSW_REG_SLCR_LAG_HASH_DIP
|
3257 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
3258 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
3259 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
3260 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
3264 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
3265 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
3268 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
3269 sizeof(struct mlxsw_sp_upper
),
3271 if (!mlxsw_sp
->lags
)
3277 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
3279 kfree(mlxsw_sp
->lags
);
3282 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3284 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3286 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
3287 MLXSW_REG_HTGT_INVALID_POLICER
,
3288 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
3289 MLXSW_REG_HTGT_DEFAULT_TC
);
3290 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3293 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
);
3295 static int mlxsw_sp_dummy_fid_init(struct mlxsw_sp
*mlxsw_sp
)
3297 return mlxsw_sp_vfid_op(mlxsw_sp
, MLXSW_SP_DUMMY_FID
, true);
3300 static void mlxsw_sp_dummy_fid_fini(struct mlxsw_sp
*mlxsw_sp
)
3302 mlxsw_sp_vfid_op(mlxsw_sp
, MLXSW_SP_DUMMY_FID
, false);
3305 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
3306 const struct mlxsw_bus_info
*mlxsw_bus_info
)
3308 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3311 mlxsw_sp
->core
= mlxsw_core
;
3312 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
3313 INIT_LIST_HEAD(&mlxsw_sp
->fids
);
3314 INIT_LIST_HEAD(&mlxsw_sp
->vfids
.list
);
3315 INIT_LIST_HEAD(&mlxsw_sp
->br_mids
.list
);
3317 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
3319 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
3323 err
= mlxsw_sp_traps_init(mlxsw_sp
);
3325 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
3329 err
= mlxsw_sp_flood_init(mlxsw_sp
);
3331 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
3332 goto err_flood_init
;
3335 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
3337 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
3338 goto err_buffers_init
;
3341 err
= mlxsw_sp_lag_init(mlxsw_sp
);
3343 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
3347 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
3349 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
3350 goto err_switchdev_init
;
3353 err
= mlxsw_sp_router_init(mlxsw_sp
);
3355 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
3356 goto err_router_init
;
3359 err
= mlxsw_sp_span_init(mlxsw_sp
);
3361 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
3365 err
= mlxsw_sp_acl_init(mlxsw_sp
);
3367 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize ACL\n");
3371 err
= mlxsw_sp_counter_pool_init(mlxsw_sp
);
3373 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init counter pool\n");
3374 goto err_counter_pool_init
;
3377 err
= mlxsw_sp_dpipe_init(mlxsw_sp
);
3379 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init pipeline debug\n");
3380 goto err_dpipe_init
;
3383 err
= mlxsw_sp_dummy_fid_init(mlxsw_sp
);
3385 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init dummy FID\n");
3386 goto err_dummy_fid_init
;
3389 err
= mlxsw_sp_ports_create(mlxsw_sp
);
3391 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
3392 goto err_ports_create
;
3398 mlxsw_sp_dummy_fid_fini(mlxsw_sp
);
3400 mlxsw_sp_dpipe_fini(mlxsw_sp
);
3402 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
3403 err_counter_pool_init
:
3404 mlxsw_sp_acl_fini(mlxsw_sp
);
3406 mlxsw_sp_span_fini(mlxsw_sp
);
3408 mlxsw_sp_router_fini(mlxsw_sp
);
3410 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3412 mlxsw_sp_lag_fini(mlxsw_sp
);
3414 mlxsw_sp_buffers_fini(mlxsw_sp
);
3417 mlxsw_sp_traps_fini(mlxsw_sp
);
3421 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
3423 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3425 mlxsw_sp_ports_remove(mlxsw_sp
);
3426 mlxsw_sp_dummy_fid_fini(mlxsw_sp
);
3427 mlxsw_sp_dpipe_fini(mlxsw_sp
);
3428 mlxsw_sp_counter_pool_fini(mlxsw_sp
);
3429 mlxsw_sp_acl_fini(mlxsw_sp
);
3430 mlxsw_sp_span_fini(mlxsw_sp
);
3431 mlxsw_sp_router_fini(mlxsw_sp
);
3432 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3433 mlxsw_sp_lag_fini(mlxsw_sp
);
3434 mlxsw_sp_buffers_fini(mlxsw_sp
);
3435 mlxsw_sp_traps_fini(mlxsw_sp
);
3436 WARN_ON(!list_empty(&mlxsw_sp
->vfids
.list
));
3437 WARN_ON(!list_empty(&mlxsw_sp
->fids
));
3440 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
3441 .used_max_vepa_channels
= 1,
3442 .max_vepa_channels
= 0,
3444 .max_mid
= MLXSW_SP_MID_MAX
,
3447 .used_flood_tables
= 1,
3448 .used_flood_mode
= 1,
3450 .max_fid_offset_flood_tables
= 3,
3451 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
3452 .max_fid_flood_tables
= 3,
3453 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
3454 .used_max_ib_mc
= 1,
3458 .used_kvd_split_data
= 1,
3459 .kvd_hash_granularity
= MLXSW_SP_KVD_GRANULARITY
,
3460 .kvd_hash_single_parts
= 2,
3461 .kvd_hash_double_parts
= 1,
3462 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
3466 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
3469 .resource_query_enable
= 1,
3472 static struct mlxsw_driver mlxsw_sp_driver
= {
3473 .kind
= mlxsw_sp_driver_name
,
3474 .priv_size
= sizeof(struct mlxsw_sp
),
3475 .init
= mlxsw_sp_init
,
3476 .fini
= mlxsw_sp_fini
,
3477 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3478 .port_split
= mlxsw_sp_port_split
,
3479 .port_unsplit
= mlxsw_sp_port_unsplit
,
3480 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3481 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3482 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3483 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3484 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3485 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3486 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3487 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3488 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3489 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3490 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3491 .txhdr_len
= MLXSW_TXHDR_LEN
,
3492 .profile
= &mlxsw_sp_config_profile
,
3495 bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
3497 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
3500 static int mlxsw_sp_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
3502 struct mlxsw_sp_port
**p_mlxsw_sp_port
= data
;
3505 if (mlxsw_sp_port_dev_check(lower_dev
)) {
3506 *p_mlxsw_sp_port
= netdev_priv(lower_dev
);
3513 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
3515 struct mlxsw_sp_port
*mlxsw_sp_port
;
3517 if (mlxsw_sp_port_dev_check(dev
))
3518 return netdev_priv(dev
);
3520 mlxsw_sp_port
= NULL
;
3521 netdev_walk_all_lower_dev(dev
, mlxsw_sp_lower_dev_walk
, &mlxsw_sp_port
);
3523 return mlxsw_sp_port
;
3526 struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3528 struct mlxsw_sp_port
*mlxsw_sp_port
;
3530 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3531 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3534 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3536 struct mlxsw_sp_port
*mlxsw_sp_port
;
3538 if (mlxsw_sp_port_dev_check(dev
))
3539 return netdev_priv(dev
);
3541 mlxsw_sp_port
= NULL
;
3542 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_sp_lower_dev_walk
,
3545 return mlxsw_sp_port
;
3548 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3550 struct mlxsw_sp_port
*mlxsw_sp_port
;
3553 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3555 dev_hold(mlxsw_sp_port
->dev
);
3557 return mlxsw_sp_port
;
3560 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3562 dev_put(mlxsw_sp_port
->dev
);
3565 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port
*lag_port
,
3568 if (mlxsw_sp_fid_is_vfid(fid
))
3569 return mlxsw_sp_port_vport_find_by_fid(lag_port
, fid
);
3571 return test_bit(fid
, lag_port
->active_vlans
);
3574 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
3577 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3578 u8 local_port
= mlxsw_sp_port
->local_port
;
3579 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3580 u64 max_lag_members
;
3583 if (!mlxsw_sp_port
->lagged
)
3586 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3588 for (i
= 0; i
< max_lag_members
; i
++) {
3589 struct mlxsw_sp_port
*lag_port
;
3591 lag_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
3592 if (!lag_port
|| lag_port
->local_port
== local_port
)
3594 if (mlxsw_sp_lag_port_fid_member(lag_port
, fid
))
3602 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3605 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3606 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3608 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
3609 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3610 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
3611 mlxsw_sp_port
->local_port
);
3613 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using Port=%d, FID=%d\n",
3614 mlxsw_sp_port
->local_port
, fid
);
3616 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3620 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3623 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3624 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3626 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
3627 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3628 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
3630 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using LAG ID=%d, FID=%d\n",
3631 mlxsw_sp_port
->lag_id
, fid
);
3633 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3636 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
3638 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port
, fid
))
3641 if (mlxsw_sp_port
->lagged
)
3642 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
,
3645 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, fid
);
3648 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp
*mlxsw_sp
)
3650 struct mlxsw_sp_fid
*f
, *tmp
;
3652 list_for_each_entry_safe(f
, tmp
, &mlxsw_sp
->fids
, list
)
3653 if (--f
->ref_count
== 0)
3654 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
3659 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
3660 struct net_device
*br_dev
)
3662 return !mlxsw_sp
->master_bridge
.dev
||
3663 mlxsw_sp
->master_bridge
.dev
== br_dev
;
3666 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
3667 struct net_device
*br_dev
)
3669 mlxsw_sp
->master_bridge
.dev
= br_dev
;
3670 mlxsw_sp
->master_bridge
.ref_count
++;
3673 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
)
3675 if (--mlxsw_sp
->master_bridge
.ref_count
== 0) {
3676 mlxsw_sp
->master_bridge
.dev
= NULL
;
3677 /* It's possible upper VLAN devices are still holding
3678 * references to underlying FIDs. Drop the reference
3679 * and release the resources if it was the last one.
3680 * If it wasn't, then something bad happened.
3682 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp
);
3686 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3687 struct net_device
*br_dev
)
3689 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3692 /* When port is not bridged untagged packets are tagged with
3693 * PVID=VID=1, thereby creating an implicit VLAN interface in
3694 * the device. Remove it and let bridge code take care of its
3697 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
3701 mlxsw_sp_master_bridge_inc(mlxsw_sp_port
->mlxsw_sp
, br_dev
);
3703 mlxsw_sp_port
->learning
= 1;
3704 mlxsw_sp_port
->learning_sync
= 1;
3705 mlxsw_sp_port
->uc_flood
= 1;
3706 mlxsw_sp_port
->mc_flood
= 1;
3707 mlxsw_sp_port
->mc_router
= 0;
3708 mlxsw_sp_port
->mc_disabled
= 1;
3709 mlxsw_sp_port
->bridged
= 1;
3714 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3716 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3718 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
3720 mlxsw_sp_master_bridge_dec(mlxsw_sp_port
->mlxsw_sp
);
3722 mlxsw_sp_port
->learning
= 0;
3723 mlxsw_sp_port
->learning_sync
= 0;
3724 mlxsw_sp_port
->uc_flood
= 0;
3725 mlxsw_sp_port
->mc_flood
= 0;
3726 mlxsw_sp_port
->mc_router
= 0;
3727 mlxsw_sp_port
->bridged
= 0;
3729 /* Add implicit VLAN interface in the device, so that untagged
3730 * packets will be classified to the default vFID.
3732 mlxsw_sp_port_add_vid(dev
, 0, 1);
3735 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3737 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3739 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3740 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3743 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3745 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3747 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3748 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3751 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3752 u16 lag_id
, u8 port_index
)
3754 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3755 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3757 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3758 lag_id
, port_index
);
3759 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3762 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3765 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3766 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3768 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3770 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3773 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3776 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3777 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3779 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3781 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3784 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3787 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3788 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3790 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3792 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3795 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3796 struct net_device
*lag_dev
,
3799 struct mlxsw_sp_upper
*lag
;
3800 int free_lag_id
= -1;
3804 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
3805 for (i
= 0; i
< max_lag
; i
++) {
3806 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3807 if (lag
->ref_count
) {
3808 if (lag
->dev
== lag_dev
) {
3812 } else if (free_lag_id
< 0) {
3816 if (free_lag_id
< 0)
3818 *p_lag_id
= free_lag_id
;
3823 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3824 struct net_device
*lag_dev
,
3825 struct netdev_lag_upper_info
*lag_upper_info
)
3829 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
3831 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
3836 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3837 u16 lag_id
, u8
*p_port_index
)
3839 u64 max_lag_members
;
3842 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3844 for (i
= 0; i
< max_lag_members
; i
++) {
3845 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3854 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3855 struct net_device
*lag_dev
, u16 lag_id
)
3857 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3858 struct mlxsw_sp_fid
*f
;
3860 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3861 if (WARN_ON(!mlxsw_sp_vport
))
3864 /* If vPort is assigned a RIF, then leave it since it's no
3867 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3869 f
->leave(mlxsw_sp_vport
);
3871 mlxsw_sp_vport
->lag_id
= lag_id
;
3872 mlxsw_sp_vport
->lagged
= 1;
3873 mlxsw_sp_vport
->dev
= lag_dev
;
3877 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3879 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3880 struct mlxsw_sp_fid
*f
;
3882 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3883 if (WARN_ON(!mlxsw_sp_vport
))
3886 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3888 f
->leave(mlxsw_sp_vport
);
3890 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
3891 mlxsw_sp_vport
->lagged
= 0;
3894 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3895 struct net_device
*lag_dev
)
3897 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3898 struct mlxsw_sp_upper
*lag
;
3903 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3906 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3907 if (!lag
->ref_count
) {
3908 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3914 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3917 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3919 goto err_col_port_add
;
3920 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
3922 goto err_col_port_enable
;
3924 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3925 mlxsw_sp_port
->local_port
);
3926 mlxsw_sp_port
->lag_id
= lag_id
;
3927 mlxsw_sp_port
->lagged
= 1;
3930 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port
, lag_dev
, lag_id
);
3934 err_col_port_enable
:
3935 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3937 if (!lag
->ref_count
)
3938 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3942 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3943 struct net_device
*lag_dev
)
3945 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3946 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3947 struct mlxsw_sp_upper
*lag
;
3949 if (!mlxsw_sp_port
->lagged
)
3951 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3952 WARN_ON(lag
->ref_count
== 0);
3954 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
3955 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3957 if (mlxsw_sp_port
->bridged
) {
3958 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
3959 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
3962 if (lag
->ref_count
== 1)
3963 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3965 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3966 mlxsw_sp_port
->local_port
);
3967 mlxsw_sp_port
->lagged
= 0;
3970 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port
);
3973 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3976 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3977 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3979 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
3980 mlxsw_sp_port
->local_port
);
3981 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3984 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3987 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3988 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3990 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
3991 mlxsw_sp_port
->local_port
);
3992 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3995 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3996 bool lag_tx_enabled
)
3999 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
4000 mlxsw_sp_port
->lag_id
);
4002 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
4003 mlxsw_sp_port
->lag_id
);
4006 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
4007 struct netdev_lag_lower_state_info
*info
)
4009 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
4012 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
4013 struct net_device
*vlan_dev
)
4015 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4016 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4018 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4019 if (WARN_ON(!mlxsw_sp_vport
))
4022 mlxsw_sp_vport
->dev
= vlan_dev
;
4027 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
4028 struct net_device
*vlan_dev
)
4030 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4031 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4033 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4034 if (WARN_ON(!mlxsw_sp_vport
))
4037 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
4040 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
4043 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4044 enum mlxsw_reg_spms_state spms_state
;
4049 spms_state
= enable
? MLXSW_REG_SPMS_STATE_FORWARDING
:
4050 MLXSW_REG_SPMS_STATE_DISCARDING
;
4052 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
4055 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
4057 for (vid
= 0; vid
< VLAN_N_VID
; vid
++)
4058 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
4060 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
4065 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
4069 err
= mlxsw_sp_port_stp_set(mlxsw_sp_port
, true);
4072 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
4075 goto err_port_vlan_set
;
4079 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
4083 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
4085 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, 2, VLAN_N_VID
- 1,
4087 mlxsw_sp_port_stp_set(mlxsw_sp_port
, false);
4090 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
4091 unsigned long event
, void *ptr
)
4093 struct netdev_notifier_changeupper_info
*info
;
4094 struct mlxsw_sp_port
*mlxsw_sp_port
;
4095 struct net_device
*upper_dev
;
4096 struct mlxsw_sp
*mlxsw_sp
;
4099 mlxsw_sp_port
= netdev_priv(dev
);
4100 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4104 case NETDEV_PRECHANGEUPPER
:
4105 upper_dev
= info
->upper_dev
;
4106 if (!is_vlan_dev(upper_dev
) &&
4107 !netif_is_lag_master(upper_dev
) &&
4108 !netif_is_bridge_master(upper_dev
) &&
4109 !netif_is_ovs_master(upper_dev
))
4113 /* HW limitation forbids to put ports to multiple bridges. */
4114 if (netif_is_bridge_master(upper_dev
) &&
4115 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
4117 if (netif_is_lag_master(upper_dev
) &&
4118 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
4121 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
))
4123 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
4124 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
)))
4126 if (netif_is_ovs_master(upper_dev
) && vlan_uses_dev(dev
))
4128 if (netif_is_ovs_port(dev
) && is_vlan_dev(upper_dev
))
4131 case NETDEV_CHANGEUPPER
:
4132 upper_dev
= info
->upper_dev
;
4133 if (is_vlan_dev(upper_dev
)) {
4135 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
4138 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
4140 } else if (netif_is_bridge_master(upper_dev
)) {
4142 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4145 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
4146 } else if (netif_is_lag_master(upper_dev
)) {
4148 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4151 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4153 } else if (netif_is_ovs_master(upper_dev
)) {
4155 err
= mlxsw_sp_port_ovs_join(mlxsw_sp_port
);
4157 mlxsw_sp_port_ovs_leave(mlxsw_sp_port
);
4168 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4169 unsigned long event
, void *ptr
)
4171 struct netdev_notifier_changelowerstate_info
*info
;
4172 struct mlxsw_sp_port
*mlxsw_sp_port
;
4175 mlxsw_sp_port
= netdev_priv(dev
);
4179 case NETDEV_CHANGELOWERSTATE
:
4180 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4181 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4182 info
->lower_state_info
);
4184 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4192 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
4193 unsigned long event
, void *ptr
)
4196 case NETDEV_PRECHANGEUPPER
:
4197 case NETDEV_CHANGEUPPER
:
4198 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
4199 case NETDEV_CHANGELOWERSTATE
:
4200 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
4206 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4207 unsigned long event
, void *ptr
)
4209 struct net_device
*dev
;
4210 struct list_head
*iter
;
4213 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4214 if (mlxsw_sp_port_dev_check(dev
)) {
4215 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4224 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp
*mlxsw_sp
,
4225 struct net_device
*vlan_dev
)
4227 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4228 struct mlxsw_sp_fid
*f
;
4230 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4232 f
= mlxsw_sp_fid_create(mlxsw_sp
, fid
);
4242 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp
*mlxsw_sp
,
4243 struct net_device
*vlan_dev
)
4245 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4246 struct mlxsw_sp_fid
*f
;
4248 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4250 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->rif
);
4251 if (f
&& --f
->ref_count
== 0)
4252 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
4255 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
4256 unsigned long event
, void *ptr
)
4258 struct netdev_notifier_changeupper_info
*info
;
4259 struct net_device
*upper_dev
;
4260 struct mlxsw_sp
*mlxsw_sp
;
4263 mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
4270 case NETDEV_PRECHANGEUPPER
:
4271 upper_dev
= info
->upper_dev
;
4272 if (!is_vlan_dev(upper_dev
))
4274 if (is_vlan_dev(upper_dev
) &&
4275 br_dev
!= mlxsw_sp
->master_bridge
.dev
)
4278 case NETDEV_CHANGEUPPER
:
4279 upper_dev
= info
->upper_dev
;
4280 if (is_vlan_dev(upper_dev
)) {
4282 err
= mlxsw_sp_master_bridge_vlan_link(mlxsw_sp
,
4285 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp
,
4297 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
4299 return find_first_zero_bit(mlxsw_sp
->vfids
.mapped
,
4303 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
4305 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
4307 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, 0);
4308 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
4311 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
4313 static struct mlxsw_sp_fid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
4314 struct net_device
*br_dev
)
4316 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
4317 struct mlxsw_sp_fid
*f
;
4321 vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
4322 if (vfid
== MLXSW_SP_VFID_MAX
) {
4323 dev_err(dev
, "No available vFIDs\n");
4324 return ERR_PTR(-ERANGE
);
4327 fid
= mlxsw_sp_vfid_to_fid(vfid
);
4328 err
= mlxsw_sp_vfid_op(mlxsw_sp
, fid
, true);
4330 dev_err(dev
, "Failed to create FID=%d\n", fid
);
4331 return ERR_PTR(err
);
4334 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
4336 goto err_allocate_vfid
;
4338 f
->leave
= mlxsw_sp_vport_vfid_leave
;
4342 list_add(&f
->list
, &mlxsw_sp
->vfids
.list
);
4343 set_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4348 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4349 return ERR_PTR(-ENOMEM
);
4352 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
4353 struct mlxsw_sp_fid
*f
)
4355 u16 vfid
= mlxsw_sp_fid_to_vfid(f
->fid
);
4358 clear_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4362 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->rif
);
4366 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4369 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
4372 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
4373 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4375 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
, mt
, valid
, fid
,
4379 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4380 struct net_device
*br_dev
)
4382 struct mlxsw_sp_fid
*f
;
4385 f
= mlxsw_sp_vfid_find(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4387 f
= mlxsw_sp_vfid_create(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4392 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, true);
4394 goto err_vport_flood_set
;
4396 err
= mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, true);
4398 goto err_vport_fid_map
;
4400 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, f
);
4403 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", f
->fid
);
4408 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4409 err_vport_flood_set
:
4411 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4415 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4417 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4419 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
4421 mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, false);
4423 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4425 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport
, f
->fid
);
4427 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
4428 if (--f
->ref_count
== 0)
4429 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4432 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4433 struct net_device
*br_dev
)
4435 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4436 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4437 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
4440 if (f
&& !WARN_ON(!f
->leave
))
4441 f
->leave(mlxsw_sp_vport
);
4443 err
= mlxsw_sp_vport_vfid_join(mlxsw_sp_vport
, br_dev
);
4445 netdev_err(dev
, "Failed to join vFID\n");
4449 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
4451 netdev_err(dev
, "Failed to enable learning\n");
4452 goto err_port_vid_learning_set
;
4455 mlxsw_sp_vport
->learning
= 1;
4456 mlxsw_sp_vport
->learning_sync
= 1;
4457 mlxsw_sp_vport
->uc_flood
= 1;
4458 mlxsw_sp_vport
->mc_flood
= 1;
4459 mlxsw_sp_vport
->mc_router
= 0;
4460 mlxsw_sp_vport
->mc_disabled
= 1;
4461 mlxsw_sp_vport
->bridged
= 1;
4465 err_port_vid_learning_set
:
4466 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4470 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4472 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4474 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
4476 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4478 mlxsw_sp_vport
->learning
= 0;
4479 mlxsw_sp_vport
->learning_sync
= 0;
4480 mlxsw_sp_vport
->uc_flood
= 0;
4481 mlxsw_sp_vport
->mc_flood
= 0;
4482 mlxsw_sp_vport
->mc_router
= 0;
4483 mlxsw_sp_vport
->bridged
= 0;
4487 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
4488 const struct net_device
*br_dev
)
4490 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4492 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
4494 struct net_device
*dev
= mlxsw_sp_vport_dev_get(mlxsw_sp_vport
);
4496 if (dev
&& dev
== br_dev
)
4503 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
4504 unsigned long event
, void *ptr
,
4507 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4508 struct netdev_notifier_changeupper_info
*info
= ptr
;
4509 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4510 struct net_device
*upper_dev
;
4513 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4514 if (!mlxsw_sp_vport
)
4518 case NETDEV_PRECHANGEUPPER
:
4519 upper_dev
= info
->upper_dev
;
4520 if (!netif_is_bridge_master(upper_dev
))
4524 /* We can't have multiple VLAN interfaces configured on
4525 * the same port and being members in the same bridge.
4527 if (netif_is_bridge_master(upper_dev
) &&
4528 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
4532 case NETDEV_CHANGEUPPER
:
4533 upper_dev
= info
->upper_dev
;
4534 if (netif_is_bridge_master(upper_dev
)) {
4536 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
4539 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
);
4550 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
4551 unsigned long event
, void *ptr
,
4554 struct net_device
*dev
;
4555 struct list_head
*iter
;
4558 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4559 if (mlxsw_sp_port_dev_check(dev
)) {
4560 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
4570 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4571 unsigned long event
, void *ptr
)
4573 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4574 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4576 if (mlxsw_sp_port_dev_check(real_dev
))
4577 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
4579 else if (netif_is_lag_master(real_dev
))
4580 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
4586 static bool mlxsw_sp_is_vrf_event(unsigned long event
, void *ptr
)
4588 struct netdev_notifier_changeupper_info
*info
= ptr
;
4590 if (event
!= NETDEV_PRECHANGEUPPER
&& event
!= NETDEV_CHANGEUPPER
)
4592 return netif_is_l3_master(info
->upper_dev
);
4595 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4596 unsigned long event
, void *ptr
)
4598 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4601 if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
4602 err
= mlxsw_sp_netdevice_router_port_event(dev
);
4603 else if (mlxsw_sp_is_vrf_event(event
, ptr
))
4604 err
= mlxsw_sp_netdevice_vrf_event(dev
, event
, ptr
);
4605 else if (mlxsw_sp_port_dev_check(dev
))
4606 err
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4607 else if (netif_is_lag_master(dev
))
4608 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4609 else if (netif_is_bridge_master(dev
))
4610 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4611 else if (is_vlan_dev(dev
))
4612 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4614 return notifier_from_errno(err
);
4617 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
4618 .notifier_call
= mlxsw_sp_netdevice_event
,
4621 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
4622 .notifier_call
= mlxsw_sp_inetaddr_event
,
4623 .priority
= 10, /* Must be called before FIB notifier block */
4626 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly
= {
4627 .notifier_call
= mlxsw_sp_router_netevent_event
,
4630 static const struct pci_device_id mlxsw_sp_pci_id_table
[] = {
4631 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
4635 static struct pci_driver mlxsw_sp_pci_driver
= {
4636 .name
= mlxsw_sp_driver_name
,
4637 .id_table
= mlxsw_sp_pci_id_table
,
4640 static int __init
mlxsw_sp_module_init(void)
4644 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4645 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4646 register_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4648 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
4650 goto err_core_driver_register
;
4652 err
= mlxsw_pci_driver_register(&mlxsw_sp_pci_driver
);
4654 goto err_pci_driver_register
;
4658 err_pci_driver_register
:
4659 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4660 err_core_driver_register
:
4661 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4662 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4663 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4667 static void __exit
mlxsw_sp_module_exit(void)
4669 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver
);
4670 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4671 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4672 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4673 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4676 module_init(mlxsw_sp_module_init
);
4677 module_exit(mlxsw_sp_module_exit
);
4679 MODULE_LICENSE("Dual BSD/GPL");
4680 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4681 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4682 MODULE_DEVICE_TABLE(pci
, mlxsw_sp_pci_id_table
);