2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
67 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
68 static const char mlxsw_sp_driver_version
[] = "1.0";
74 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
77 * Packet control type.
78 * 0 - Ethernet control (e.g. EMADs, LACP)
81 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
84 * Packet protocol type. Must be set to 1 (Ethernet).
86 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
88 /* tx_hdr_rx_is_router
89 * Packet is sent from the router. Valid for data packets only.
91 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
94 * Indicates if the 'fid' field is valid and should be used for
95 * forwarding lookup. Valid for data packets only.
97 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
100 * Switch partition ID. Must be set to 0.
102 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
104 /* tx_hdr_control_tclass
105 * Indicates if the packet should use the control TClass and not one
106 * of the data TClasses.
108 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
111 * Egress TClass to be used on the egress device on the egress port.
113 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
116 * Destination local port for unicast packets.
117 * Destination multicast ID for multicast packets.
119 * Control packets are directed to a specific egress port, while data
120 * packets are transmitted through the CPU port (0) into the switch partition,
121 * where forwarding rules are applied.
123 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
126 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
127 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
128 * Valid for data packets only.
130 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
134 * 6 - Control packets
136 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
138 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
);
140 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
141 const struct mlxsw_tx_info
*tx_info
)
143 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
145 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
147 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
148 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
149 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
150 mlxsw_tx_hdr_swid_set(txhdr
, 0);
151 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
152 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
153 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
156 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
158 char spad_pl
[MLXSW_REG_SPAD_LEN
];
161 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
164 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
168 static int mlxsw_sp_span_init(struct mlxsw_sp
*mlxsw_sp
)
170 struct mlxsw_resources
*resources
;
173 resources
= mlxsw_core_resources_get(mlxsw_sp
->core
);
174 if (!resources
->max_span_valid
)
177 mlxsw_sp
->span
.entries_count
= resources
->max_span
;
178 mlxsw_sp
->span
.entries
= kcalloc(mlxsw_sp
->span
.entries_count
,
179 sizeof(struct mlxsw_sp_span_entry
),
181 if (!mlxsw_sp
->span
.entries
)
184 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++)
185 INIT_LIST_HEAD(&mlxsw_sp
->span
.entries
[i
].bound_ports_list
);
190 static void mlxsw_sp_span_fini(struct mlxsw_sp
*mlxsw_sp
)
194 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
195 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
197 WARN_ON_ONCE(!list_empty(&curr
->bound_ports_list
));
199 kfree(mlxsw_sp
->span
.entries
);
202 static struct mlxsw_sp_span_entry
*
203 mlxsw_sp_span_entry_create(struct mlxsw_sp_port
*port
)
205 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
206 struct mlxsw_sp_span_entry
*span_entry
;
207 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
208 u8 local_port
= port
->local_port
;
213 /* find a free entry to use */
215 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
216 if (!mlxsw_sp
->span
.entries
[i
].used
) {
218 span_entry
= &mlxsw_sp
->span
.entries
[i
];
225 /* create a new port analayzer entry for local_port */
226 mlxsw_reg_mpat_pack(mpat_pl
, index
, local_port
, true);
227 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
231 span_entry
->used
= true;
232 span_entry
->id
= index
;
233 span_entry
->ref_count
= 0;
234 span_entry
->local_port
= local_port
;
238 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
239 struct mlxsw_sp_span_entry
*span_entry
)
241 u8 local_port
= span_entry
->local_port
;
242 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
243 int pa_id
= span_entry
->id
;
245 mlxsw_reg_mpat_pack(mpat_pl
, pa_id
, local_port
, false);
246 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
247 span_entry
->used
= false;
250 struct mlxsw_sp_span_entry
*mlxsw_sp_span_entry_find(struct mlxsw_sp_port
*port
)
252 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
255 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
256 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
258 if (curr
->used
&& curr
->local_port
== port
->local_port
)
264 struct mlxsw_sp_span_entry
*mlxsw_sp_span_entry_get(struct mlxsw_sp_port
*port
)
266 struct mlxsw_sp_span_entry
*span_entry
;
268 span_entry
= mlxsw_sp_span_entry_find(port
);
270 span_entry
->ref_count
++;
274 return mlxsw_sp_span_entry_create(port
);
277 static int mlxsw_sp_span_entry_put(struct mlxsw_sp
*mlxsw_sp
,
278 struct mlxsw_sp_span_entry
*span_entry
)
280 if (--span_entry
->ref_count
== 0)
281 mlxsw_sp_span_entry_destroy(mlxsw_sp
, span_entry
);
285 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port
*port
)
287 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
288 struct mlxsw_sp_span_inspected_port
*p
;
291 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
292 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
294 list_for_each_entry(p
, &curr
->bound_ports_list
, list
)
295 if (p
->local_port
== port
->local_port
&&
296 p
->type
== MLXSW_SP_SPAN_EGRESS
)
303 static int mlxsw_sp_span_mtu_to_buffsize(int mtu
)
305 return MLXSW_SP_BYTES_TO_CELLS(mtu
* 5 / 2) + 1;
308 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port
*port
, u16 mtu
)
310 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
311 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
314 /* If port is egress mirrored, the shared buffer size should be
315 * updated according to the mtu value
317 if (mlxsw_sp_span_is_egress_mirror(port
)) {
318 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
,
319 mlxsw_sp_span_mtu_to_buffsize(mtu
));
320 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
322 netdev_err(port
->dev
, "Could not update shared buffer for mirroring\n");
330 static struct mlxsw_sp_span_inspected_port
*
331 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port
*port
,
332 struct mlxsw_sp_span_entry
*span_entry
)
334 struct mlxsw_sp_span_inspected_port
*p
;
336 list_for_each_entry(p
, &span_entry
->bound_ports_list
, list
)
337 if (port
->local_port
== p
->local_port
)
343 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port
*port
,
344 struct mlxsw_sp_span_entry
*span_entry
,
345 enum mlxsw_sp_span_type type
)
347 struct mlxsw_sp_span_inspected_port
*inspected_port
;
348 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
349 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
350 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
351 int pa_id
= span_entry
->id
;
354 /* if it is an egress SPAN, bind a shared buffer to it */
355 if (type
== MLXSW_SP_SPAN_EGRESS
) {
356 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
,
357 mlxsw_sp_span_mtu_to_buffsize(port
->dev
->mtu
));
358 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
360 netdev_err(port
->dev
, "Could not create shared buffer for mirroring\n");
365 /* bind the port to the SPAN entry */
366 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
, type
, true, pa_id
);
367 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
369 goto err_mpar_reg_write
;
371 inspected_port
= kzalloc(sizeof(*inspected_port
), GFP_KERNEL
);
372 if (!inspected_port
) {
374 goto err_inspected_port_alloc
;
376 inspected_port
->local_port
= port
->local_port
;
377 inspected_port
->type
= type
;
378 list_add_tail(&inspected_port
->list
, &span_entry
->bound_ports_list
);
383 err_inspected_port_alloc
:
384 if (type
== MLXSW_SP_SPAN_EGRESS
) {
385 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
386 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
392 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port
*port
,
393 struct mlxsw_sp_span_entry
*span_entry
,
394 enum mlxsw_sp_span_type type
)
396 struct mlxsw_sp_span_inspected_port
*inspected_port
;
397 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
398 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
399 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
400 int pa_id
= span_entry
->id
;
402 inspected_port
= mlxsw_sp_span_entry_bound_port_find(port
, span_entry
);
406 /* remove the inspected port */
407 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
, type
, false, pa_id
);
408 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
410 /* remove the SBIB buffer if it was egress SPAN */
411 if (type
== MLXSW_SP_SPAN_EGRESS
) {
412 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
413 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
416 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
418 list_del(&inspected_port
->list
);
419 kfree(inspected_port
);
422 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port
*from
,
423 struct mlxsw_sp_port
*to
,
424 enum mlxsw_sp_span_type type
)
426 struct mlxsw_sp
*mlxsw_sp
= from
->mlxsw_sp
;
427 struct mlxsw_sp_span_entry
*span_entry
;
430 span_entry
= mlxsw_sp_span_entry_get(to
);
434 netdev_dbg(from
->dev
, "Adding inspected port to SPAN entry %d\n",
437 err
= mlxsw_sp_span_inspected_port_bind(from
, span_entry
, type
);
444 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
448 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port
*from
,
449 struct mlxsw_sp_port
*to
,
450 enum mlxsw_sp_span_type type
)
452 struct mlxsw_sp_span_entry
*span_entry
;
454 span_entry
= mlxsw_sp_span_entry_find(to
);
456 netdev_err(from
->dev
, "no span entry found\n");
460 netdev_dbg(from
->dev
, "removing inspected port from SPAN entry %d\n",
462 mlxsw_sp_span_inspected_port_unbind(from
, span_entry
, type
);
465 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
468 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
469 char paos_pl
[MLXSW_REG_PAOS_LEN
];
471 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
472 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
473 MLXSW_PORT_ADMIN_STATUS_DOWN
);
474 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
477 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
480 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
481 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
483 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
484 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
485 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
488 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
490 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
491 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
493 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
494 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
495 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
498 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
500 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
501 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
505 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
506 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
507 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
510 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
515 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
516 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
519 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
522 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
524 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
525 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
528 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
530 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
532 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
536 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
539 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
540 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
542 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
543 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
546 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
547 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
550 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
551 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
553 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
555 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
558 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
559 u16 vid
, bool learn_enable
)
561 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
565 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
568 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
570 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
576 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
578 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
579 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
581 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
582 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
585 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
586 u8 local_port
, u8
*p_module
,
587 u8
*p_width
, u8
*p_lane
)
589 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
592 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
593 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
596 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
597 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
598 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
602 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
603 u8 module
, u8 width
, u8 lane
)
605 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
608 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
609 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
610 for (i
= 0; i
< width
; i
++) {
611 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
612 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
615 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
618 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
620 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
622 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
623 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
624 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
627 static int mlxsw_sp_port_open(struct net_device
*dev
)
629 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
632 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
635 netif_start_queue(dev
);
639 static int mlxsw_sp_port_stop(struct net_device
*dev
)
641 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
643 netif_stop_queue(dev
);
644 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
647 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
648 struct net_device
*dev
)
650 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
651 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
652 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
653 const struct mlxsw_tx_info tx_info
= {
654 .local_port
= mlxsw_sp_port
->local_port
,
660 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
661 return NETDEV_TX_BUSY
;
663 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
664 struct sk_buff
*skb_orig
= skb
;
666 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
668 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
669 dev_kfree_skb_any(skb_orig
);
674 if (eth_skb_pad(skb
)) {
675 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
679 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
680 /* TX header is consumed by HW on the way so we shouldn't count its
681 * bytes as being sent.
683 len
= skb
->len
- MLXSW_TXHDR_LEN
;
685 /* Due to a race we might fail here because of a full queue. In that
686 * unlikely case we simply drop the packet.
688 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
691 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
692 u64_stats_update_begin(&pcpu_stats
->syncp
);
693 pcpu_stats
->tx_packets
++;
694 pcpu_stats
->tx_bytes
+= len
;
695 u64_stats_update_end(&pcpu_stats
->syncp
);
697 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
698 dev_kfree_skb_any(skb
);
703 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
707 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
709 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
710 struct sockaddr
*addr
= p
;
713 if (!is_valid_ether_addr(addr
->sa_data
))
714 return -EADDRNOTAVAIL
;
716 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
719 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
723 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int pg_index
, int mtu
,
724 bool pause_en
, bool pfc_en
, u16 delay
)
726 u16 pg_size
= 2 * MLXSW_SP_BYTES_TO_CELLS(mtu
);
728 delay
= pfc_en
? mlxsw_sp_pfc_delay_get(mtu
, delay
) :
729 MLXSW_SP_PAUSE_DELAY
;
731 if (pause_en
|| pfc_en
)
732 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, pg_index
,
733 pg_size
+ delay
, pg_size
);
735 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, pg_index
, pg_size
);
738 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
739 u8
*prio_tc
, bool pause_en
,
740 struct ieee_pfc
*my_pfc
)
742 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
743 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
744 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
745 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
748 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
749 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
753 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
754 bool configure
= false;
757 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
758 if (prio_tc
[j
] == i
) {
759 pfc
= pfc_en
& BIT(j
);
767 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, mtu
, pause_en
, pfc
, delay
);
770 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
773 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
774 int mtu
, bool pause_en
)
776 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
777 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
778 struct ieee_pfc
*my_pfc
;
781 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
782 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
784 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
788 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
790 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
791 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
794 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
797 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
799 goto err_span_port_mtu_update
;
800 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
802 goto err_port_mtu_set
;
807 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
808 err_span_port_mtu_update
:
809 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
813 static struct rtnl_link_stats64
*
814 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
815 struct rtnl_link_stats64
*stats
)
817 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
818 struct mlxsw_sp_port_pcpu_stats
*p
;
819 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
824 for_each_possible_cpu(i
) {
825 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
827 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
828 rx_packets
= p
->rx_packets
;
829 rx_bytes
= p
->rx_bytes
;
830 tx_packets
= p
->tx_packets
;
831 tx_bytes
= p
->tx_bytes
;
832 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
834 stats
->rx_packets
+= rx_packets
;
835 stats
->rx_bytes
+= rx_bytes
;
836 stats
->tx_packets
+= tx_packets
;
837 stats
->tx_bytes
+= tx_bytes
;
838 /* tx_dropped is u32, updated without syncp protection. */
839 tx_dropped
+= p
->tx_dropped
;
841 stats
->tx_dropped
= tx_dropped
;
845 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
846 u16 vid_end
, bool is_member
, bool untagged
)
848 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
852 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
856 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
857 vid_end
, is_member
, untagged
);
858 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
863 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
865 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
866 u16 vid
, last_visited_vid
;
869 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
870 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
873 last_visited_vid
= vid
;
874 goto err_port_vid_to_fid_set
;
878 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
880 last_visited_vid
= VLAN_N_VID
;
881 goto err_port_vid_to_fid_set
;
886 err_port_vid_to_fid_set
:
887 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
888 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
893 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
895 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
899 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
903 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
904 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
913 static struct mlxsw_sp_port
*
914 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
916 struct mlxsw_sp_port
*mlxsw_sp_vport
;
918 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
922 /* dev will be set correctly after the VLAN device is linked
923 * with the real device. In case of bridge SELF invocation, dev
926 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
927 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
928 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
929 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
930 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
931 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
932 mlxsw_sp_vport
->vport
.vid
= vid
;
934 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
936 return mlxsw_sp_vport
;
939 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
941 list_del(&mlxsw_sp_vport
->vport
.list
);
942 kfree(mlxsw_sp_vport
);
945 int mlxsw_sp_port_add_vid(struct net_device
*dev
, __be16 __always_unused proto
,
948 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
949 struct mlxsw_sp_port
*mlxsw_sp_vport
;
950 bool untagged
= vid
== 1;
953 /* VLAN 0 is added to HW filter when device goes up, but it is
954 * reserved in our case, so simply return.
959 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
)) {
960 netdev_warn(dev
, "VID=%d already configured\n", vid
);
964 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vid
);
965 if (!mlxsw_sp_vport
) {
966 netdev_err(dev
, "Failed to create vPort for VID=%d\n", vid
);
970 /* When adding the first VLAN interface on a bridged port we need to
971 * transition all the active 802.1Q bridge VLANs to use explicit
972 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
974 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
975 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
977 netdev_err(dev
, "Failed to set to Virtual mode\n");
978 goto err_port_vp_mode_trans
;
982 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
984 netdev_err(dev
, "Failed to disable learning for VID=%d\n", vid
);
985 goto err_port_vid_learning_set
;
988 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, untagged
);
990 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
992 goto err_port_add_vid
;
998 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
999 err_port_vid_learning_set
:
1000 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1001 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1002 err_port_vp_mode_trans
:
1003 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1007 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1008 __be16 __always_unused proto
, u16 vid
)
1010 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1011 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1012 struct mlxsw_sp_fid
*f
;
1015 /* VLAN 0 is removed from HW filter when device goes down, but
1016 * it is reserved in our case, so simply return.
1021 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
1022 if (!mlxsw_sp_vport
) {
1023 netdev_warn(dev
, "VID=%d does not exist\n", vid
);
1027 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
1029 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
1034 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
1036 netdev_err(dev
, "Failed to enable learning for VID=%d\n", vid
);
1040 /* Drop FID reference. If this was the last reference the
1041 * resources will be freed.
1043 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
1044 if (f
&& !WARN_ON(!f
->leave
))
1045 f
->leave(mlxsw_sp_vport
);
1047 /* When removing the last VLAN interface on a bridged port we need to
1048 * transition all active 802.1Q bridge VLANs to use VID to FID
1049 * mappings and set port's mode to VLAN mode.
1051 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
1052 err
= mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1054 netdev_err(dev
, "Failed to set to VLAN mode\n");
1059 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1064 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
1067 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1068 u8 module
= mlxsw_sp_port
->mapping
.module
;
1069 u8 width
= mlxsw_sp_port
->mapping
.width
;
1070 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
1073 if (!mlxsw_sp_port
->split
)
1074 err
= snprintf(name
, len
, "p%d", module
+ 1);
1076 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
1085 static struct mlxsw_sp_port_mall_tc_entry
*
1086 mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port
*port
,
1087 unsigned long cookie
) {
1088 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1090 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1091 if (mall_tc_entry
->cookie
== cookie
)
1092 return mall_tc_entry
;
1098 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1099 struct tc_cls_matchall_offload
*cls
,
1100 const struct tc_action
*a
,
1103 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1104 struct net
*net
= dev_net(mlxsw_sp_port
->dev
);
1105 enum mlxsw_sp_span_type span_type
;
1106 struct mlxsw_sp_port
*to_port
;
1107 struct net_device
*to_dev
;
1111 ifindex
= tcf_mirred_ifindex(a
);
1112 to_dev
= __dev_get_by_index(net
, ifindex
);
1114 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1118 if (!mlxsw_sp_port_dev_check(to_dev
)) {
1119 netdev_err(mlxsw_sp_port
->dev
, "Cannot mirror to a non-spectrum port");
1122 to_port
= netdev_priv(to_dev
);
1124 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1128 mall_tc_entry
->cookie
= cls
->cookie
;
1129 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1130 mall_tc_entry
->mirror
.to_local_port
= to_port
->local_port
;
1131 mall_tc_entry
->mirror
.ingress
= ingress
;
1132 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1134 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1135 err
= mlxsw_sp_span_mirror_add(mlxsw_sp_port
, to_port
, span_type
);
1137 goto err_mirror_add
;
1141 list_del(&mall_tc_entry
->list
);
1142 kfree(mall_tc_entry
);
1146 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1148 struct tc_cls_matchall_offload
*cls
,
1151 const struct tc_action
*a
;
1154 if (!tc_single_action(cls
->exts
)) {
1155 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1159 tc_for_each_action(a
, cls
->exts
) {
1160 if (!is_tcf_mirred_mirror(a
) || protocol
!= htons(ETH_P_ALL
))
1163 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
, cls
,
1172 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1173 struct tc_cls_matchall_offload
*cls
)
1175 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1176 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1177 enum mlxsw_sp_span_type span_type
;
1178 struct mlxsw_sp_port
*to_port
;
1180 mall_tc_entry
= mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port
,
1182 if (!mall_tc_entry
) {
1183 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1187 switch (mall_tc_entry
->type
) {
1188 case MLXSW_SP_PORT_MALL_MIRROR
:
1189 to_port
= mlxsw_sp
->ports
[mall_tc_entry
->mirror
.to_local_port
];
1190 span_type
= mall_tc_entry
->mirror
.ingress
?
1191 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1193 mlxsw_sp_span_mirror_remove(mlxsw_sp_port
, to_port
, span_type
);
1199 list_del(&mall_tc_entry
->list
);
1200 kfree(mall_tc_entry
);
1203 static int mlxsw_sp_setup_tc(struct net_device
*dev
, u32 handle
,
1204 __be16 proto
, struct tc_to_netdev
*tc
)
1206 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1207 bool ingress
= TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
);
1209 if (tc
->type
== TC_SETUP_MATCHALL
) {
1210 switch (tc
->cls_mall
->command
) {
1211 case TC_CLSMATCHALL_REPLACE
:
1212 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
,
1216 case TC_CLSMATCHALL_DESTROY
:
1217 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
,
1228 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1229 .ndo_open
= mlxsw_sp_port_open
,
1230 .ndo_stop
= mlxsw_sp_port_stop
,
1231 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1232 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1233 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1234 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1235 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1236 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1237 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1238 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1239 .ndo_neigh_construct
= mlxsw_sp_router_neigh_construct
,
1240 .ndo_neigh_destroy
= mlxsw_sp_router_neigh_destroy
,
1241 .ndo_fdb_add
= switchdev_port_fdb_add
,
1242 .ndo_fdb_del
= switchdev_port_fdb_del
,
1243 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
1244 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
1245 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
1246 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
1247 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
1250 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
1251 struct ethtool_drvinfo
*drvinfo
)
1253 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1254 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1256 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
1257 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
1258 sizeof(drvinfo
->version
));
1259 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1261 mlxsw_sp
->bus_info
->fw_rev
.major
,
1262 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1263 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1264 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1265 sizeof(drvinfo
->bus_info
));
1268 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1269 struct ethtool_pauseparam
*pause
)
1271 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1273 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1274 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1277 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1278 struct ethtool_pauseparam
*pause
)
1280 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1282 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1283 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1284 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1286 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1290 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1291 struct ethtool_pauseparam
*pause
)
1293 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1294 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1297 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1298 netdev_err(dev
, "PFC already enabled on port\n");
1302 if (pause
->autoneg
) {
1303 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1307 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1309 netdev_err(dev
, "Failed to configure port's headroom\n");
1313 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1315 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1316 goto err_port_pause_configure
;
1319 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1320 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1324 err_port_pause_configure
:
1325 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1326 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1330 struct mlxsw_sp_port_hw_stats
{
1331 char str
[ETH_GSTRING_LEN
];
1332 u64 (*getter
)(char *payload
);
1335 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1337 .str
= "a_frames_transmitted_ok",
1338 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1341 .str
= "a_frames_received_ok",
1342 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1345 .str
= "a_frame_check_sequence_errors",
1346 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1349 .str
= "a_alignment_errors",
1350 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1353 .str
= "a_octets_transmitted_ok",
1354 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1357 .str
= "a_octets_received_ok",
1358 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1361 .str
= "a_multicast_frames_xmitted_ok",
1362 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1365 .str
= "a_broadcast_frames_xmitted_ok",
1366 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1369 .str
= "a_multicast_frames_received_ok",
1370 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1373 .str
= "a_broadcast_frames_received_ok",
1374 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1377 .str
= "a_in_range_length_errors",
1378 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1381 .str
= "a_out_of_range_length_field",
1382 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1385 .str
= "a_frame_too_long_errors",
1386 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1389 .str
= "a_symbol_error_during_carrier",
1390 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1393 .str
= "a_mac_control_frames_transmitted",
1394 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1397 .str
= "a_mac_control_frames_received",
1398 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1401 .str
= "a_unsupported_opcodes_received",
1402 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1405 .str
= "a_pause_mac_ctrl_frames_received",
1406 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1409 .str
= "a_pause_mac_ctrl_frames_xmitted",
1410 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1414 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1416 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
1418 .str
= "rx_octets_prio",
1419 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
1422 .str
= "rx_frames_prio",
1423 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
1426 .str
= "tx_octets_prio",
1427 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
1430 .str
= "tx_frames_prio",
1431 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
1434 .str
= "rx_pause_prio",
1435 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
1438 .str
= "rx_pause_duration_prio",
1439 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
1442 .str
= "tx_pause_prio",
1443 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
1446 .str
= "tx_pause_duration_prio",
1447 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
1451 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1453 static u64
mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl
)
1455 u64 transmit_queue
= mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
1457 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue
);
1460 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
1462 .str
= "tc_transmit_queue_tc",
1463 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get
,
1466 .str
= "tc_no_buffer_discard_uc_tc",
1467 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
1471 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1473 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1474 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1475 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1476 IEEE_8021QAZ_MAX_TCS)
1478 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
1482 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
1483 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1484 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
1485 *p
+= ETH_GSTRING_LEN
;
1489 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
1493 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
1494 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1495 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
1496 *p
+= ETH_GSTRING_LEN
;
1500 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1501 u32 stringset
, u8
*data
)
1506 switch (stringset
) {
1508 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1509 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1511 p
+= ETH_GSTRING_LEN
;
1514 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1515 mlxsw_sp_port_get_prio_strings(&p
, i
);
1517 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1518 mlxsw_sp_port_get_tc_strings(&p
, i
);
1524 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1525 enum ethtool_phys_id_state state
)
1527 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1528 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1529 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1533 case ETHTOOL_ID_ACTIVE
:
1536 case ETHTOOL_ID_INACTIVE
:
1543 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1544 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1548 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
1549 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
1552 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
1553 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
1554 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
1556 case MLXSW_REG_PPCNT_PRIO_CNT
:
1557 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
1558 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1560 case MLXSW_REG_PPCNT_TC_CNT
:
1561 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
1562 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1571 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
1572 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
1573 u64
*data
, int data_index
)
1575 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1576 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1577 struct mlxsw_sp_port_hw_stats
*hw_stats
;
1578 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1582 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
1585 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
1586 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1587 for (i
= 0; i
< len
; i
++)
1588 data
[data_index
+ i
] = !err
? hw_stats
[i
].getter(ppcnt_pl
) : 0;
1591 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1592 struct ethtool_stats
*stats
, u64
*data
)
1594 int i
, data_index
= 0;
1596 /* IEEE 802.3 Counters */
1597 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
1599 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
1601 /* Per-Priority Counters */
1602 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1603 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
1605 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1608 /* Per-TC Counters */
1609 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1610 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
1612 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1616 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1620 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
1626 struct mlxsw_sp_port_link_mode
{
1633 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1635 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1636 .supported
= SUPPORTED_100baseT_Full
,
1637 .advertised
= ADVERTISED_100baseT_Full
,
1641 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
1645 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1646 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1647 .supported
= SUPPORTED_1000baseKX_Full
,
1648 .advertised
= ADVERTISED_1000baseKX_Full
,
1652 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1653 .supported
= SUPPORTED_10000baseT_Full
,
1654 .advertised
= ADVERTISED_10000baseT_Full
,
1658 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1659 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1660 .supported
= SUPPORTED_10000baseKX4_Full
,
1661 .advertised
= ADVERTISED_10000baseKX4_Full
,
1665 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1666 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1667 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1668 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1669 .supported
= SUPPORTED_10000baseKR_Full
,
1670 .advertised
= ADVERTISED_10000baseKR_Full
,
1674 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1675 .supported
= SUPPORTED_20000baseKR2_Full
,
1676 .advertised
= ADVERTISED_20000baseKR2_Full
,
1680 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1681 .supported
= SUPPORTED_40000baseCR4_Full
,
1682 .advertised
= ADVERTISED_40000baseCR4_Full
,
1686 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1687 .supported
= SUPPORTED_40000baseKR4_Full
,
1688 .advertised
= ADVERTISED_40000baseKR4_Full
,
1692 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1693 .supported
= SUPPORTED_40000baseSR4_Full
,
1694 .advertised
= ADVERTISED_40000baseSR4_Full
,
1698 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1699 .supported
= SUPPORTED_40000baseLR4_Full
,
1700 .advertised
= ADVERTISED_40000baseLR4_Full
,
1704 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
1705 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
1706 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1710 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
1711 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
1712 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
1716 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1717 .supported
= SUPPORTED_56000baseKR4_Full
,
1718 .advertised
= ADVERTISED_56000baseKR4_Full
,
1722 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
1723 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1724 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1725 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
1730 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1732 static u32
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
)
1734 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1735 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1736 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1737 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1738 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1739 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1740 return SUPPORTED_FIBRE
;
1742 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1743 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1744 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1745 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1746 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
1747 return SUPPORTED_Backplane
;
1751 static u32
mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto
)
1756 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1757 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1758 modes
|= mlxsw_sp_port_link_mode
[i
].supported
;
1763 static u32
mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto
)
1768 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1769 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1770 modes
|= mlxsw_sp_port_link_mode
[i
].advertised
;
1775 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
1776 struct ethtool_cmd
*cmd
)
1778 u32 speed
= SPEED_UNKNOWN
;
1779 u8 duplex
= DUPLEX_UNKNOWN
;
1785 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1786 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
1787 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
1788 duplex
= DUPLEX_FULL
;
1793 ethtool_cmd_speed_set(cmd
, speed
);
1794 cmd
->duplex
= duplex
;
1797 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
1799 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1800 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1801 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1802 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1805 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1806 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1807 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
1810 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1811 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1812 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1813 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
1819 static int mlxsw_sp_port_get_settings(struct net_device
*dev
,
1820 struct ethtool_cmd
*cmd
)
1822 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1823 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1824 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1826 u32 eth_proto_admin
;
1830 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1831 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1833 netdev_err(dev
, "Failed to get proto");
1836 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
,
1837 ð_proto_admin
, ð_proto_oper
);
1839 cmd
->supported
= mlxsw_sp_from_ptys_supported_port(eth_proto_cap
) |
1840 mlxsw_sp_from_ptys_supported_link(eth_proto_cap
) |
1841 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
|
1843 cmd
->advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_admin
);
1844 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
),
1845 eth_proto_oper
, cmd
);
1847 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
1848 cmd
->port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
1849 cmd
->lp_advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_oper
);
1851 cmd
->transceiver
= XCVR_INTERNAL
;
1855 static u32
mlxsw_sp_to_ptys_advert_link(u32 advertising
)
1860 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1861 if (advertising
& mlxsw_sp_port_link_mode
[i
].advertised
)
1862 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1867 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
1872 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1873 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
1874 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1879 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
1884 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1885 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
1886 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1891 static int mlxsw_sp_port_set_settings(struct net_device
*dev
,
1892 struct ethtool_cmd
*cmd
)
1894 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1895 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1896 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1900 u32 eth_proto_admin
;
1903 speed
= ethtool_cmd_speed(cmd
);
1905 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
1906 mlxsw_sp_to_ptys_advert_link(cmd
->advertising
) :
1907 mlxsw_sp_to_ptys_speed(speed
);
1909 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1910 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1912 netdev_err(dev
, "Failed to get proto");
1915 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
, NULL
);
1917 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
1918 if (!eth_proto_new
) {
1919 netdev_err(dev
, "Not supported proto admin requested");
1922 if (eth_proto_new
== eth_proto_admin
)
1925 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, eth_proto_new
);
1926 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1928 netdev_err(dev
, "Failed to set proto admin");
1932 if (!netif_running(dev
))
1935 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1937 netdev_err(dev
, "Failed to set admin status");
1941 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1943 netdev_err(dev
, "Failed to set admin status");
1950 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
1951 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
1952 .get_link
= ethtool_op_get_link
,
1953 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
1954 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
1955 .get_strings
= mlxsw_sp_port_get_strings
,
1956 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
1957 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
1958 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
1959 .get_settings
= mlxsw_sp_port_get_settings
,
1960 .set_settings
= mlxsw_sp_port_set_settings
,
1964 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
1966 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1967 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
1968 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1969 u32 eth_proto_admin
;
1971 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
1972 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
1974 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1977 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1978 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
1979 bool dwrr
, u8 dwrr_weight
)
1981 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1982 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1984 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1986 mlxsw_reg_qeec_de_set(qeec_pl
, true);
1987 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
1988 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
1989 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1992 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1993 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1994 u8 next_index
, u32 maxrate
)
1996 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1997 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1999 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2001 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
2002 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
2003 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2006 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2007 u8 switch_prio
, u8 tclass
)
2009 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2010 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
2012 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
2014 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
2017 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
2021 /* Setup the elements hierarcy, so that each TC is linked to
2022 * one subgroup, which are all member in the same group.
2024 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2025 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
2029 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2030 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2031 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
2036 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2037 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2038 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
2044 /* Make sure the max shaper is disabled in all hierarcies that
2047 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2048 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
2049 MLXSW_REG_QEEC_MAS_DIS
);
2052 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2053 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2054 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
2056 MLXSW_REG_QEEC_MAS_DIS
);
2060 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2061 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2062 MLXSW_REG_QEEC_HIERARCY_TC
,
2064 MLXSW_REG_QEEC_MAS_DIS
);
2069 /* Map all priorities to traffic class 0. */
2070 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2071 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
2079 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2080 bool split
, u8 module
, u8 width
, u8 lane
)
2082 struct mlxsw_sp_port
*mlxsw_sp_port
;
2083 struct net_device
*dev
;
2087 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
2090 mlxsw_sp_port
= netdev_priv(dev
);
2091 mlxsw_sp_port
->dev
= dev
;
2092 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
2093 mlxsw_sp_port
->local_port
= local_port
;
2094 mlxsw_sp_port
->split
= split
;
2095 mlxsw_sp_port
->mapping
.module
= module
;
2096 mlxsw_sp_port
->mapping
.width
= width
;
2097 mlxsw_sp_port
->mapping
.lane
= lane
;
2098 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
2099 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2100 if (!mlxsw_sp_port
->active_vlans
) {
2102 goto err_port_active_vlans_alloc
;
2104 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2105 if (!mlxsw_sp_port
->untagged_vlans
) {
2107 goto err_port_untagged_vlans_alloc
;
2109 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
2110 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
2112 mlxsw_sp_port
->pcpu_stats
=
2113 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
2114 if (!mlxsw_sp_port
->pcpu_stats
) {
2116 goto err_alloc_stats
;
2119 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
2120 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
2122 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
2124 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
2125 mlxsw_sp_port
->local_port
);
2126 goto err_dev_addr_init
;
2129 netif_carrier_off(dev
);
2131 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
2132 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
2133 dev
->hw_features
|= NETIF_F_HW_TC
;
2135 /* Each packet needs to have a Tx header (metadata) on top all other
2138 dev
->hard_header_len
+= MLXSW_TXHDR_LEN
;
2140 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
2142 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
2143 mlxsw_sp_port
->local_port
);
2144 goto err_port_system_port_mapping_set
;
2147 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
2149 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
2150 mlxsw_sp_port
->local_port
);
2151 goto err_port_swid_set
;
2154 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
2156 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
2157 mlxsw_sp_port
->local_port
);
2158 goto err_port_speed_by_width_set
;
2161 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
2163 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
2164 mlxsw_sp_port
->local_port
);
2165 goto err_port_mtu_set
;
2168 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2170 goto err_port_admin_status_set
;
2172 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
2174 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
2175 mlxsw_sp_port
->local_port
);
2176 goto err_port_buffers_init
;
2179 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
2181 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
2182 mlxsw_sp_port
->local_port
);
2183 goto err_port_ets_init
;
2186 /* ETS and buffers must be initialized before DCB. */
2187 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
2189 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
2190 mlxsw_sp_port
->local_port
);
2191 goto err_port_dcb_init
;
2194 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
2195 err
= register_netdev(dev
);
2197 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
2198 mlxsw_sp_port
->local_port
);
2199 goto err_register_netdev
;
2202 err
= mlxsw_core_port_init(mlxsw_sp
->core
, &mlxsw_sp_port
->core_port
,
2203 mlxsw_sp_port
->local_port
, dev
,
2204 mlxsw_sp_port
->split
, module
);
2206 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
2207 mlxsw_sp_port
->local_port
);
2208 goto err_core_port_init
;
2211 err
= mlxsw_sp_port_vlan_init(mlxsw_sp_port
);
2213 goto err_port_vlan_init
;
2215 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
2219 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
2221 unregister_netdev(dev
);
2222 err_register_netdev
:
2225 err_port_buffers_init
:
2226 err_port_admin_status_set
:
2228 err_port_speed_by_width_set
:
2230 err_port_system_port_mapping_set
:
2232 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2234 kfree(mlxsw_sp_port
->untagged_vlans
);
2235 err_port_untagged_vlans_alloc
:
2236 kfree(mlxsw_sp_port
->active_vlans
);
2237 err_port_active_vlans_alloc
:
2242 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2244 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2248 mlxsw_sp
->ports
[local_port
] = NULL
;
2249 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
2250 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
2251 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2252 mlxsw_sp_port_kill_vid(mlxsw_sp_port
->dev
, 0, 1);
2253 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2254 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2255 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
2256 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2257 kfree(mlxsw_sp_port
->untagged_vlans
);
2258 kfree(mlxsw_sp_port
->active_vlans
);
2259 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vports_list
));
2260 free_netdev(mlxsw_sp_port
->dev
);
2263 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
2267 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
2268 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2269 kfree(mlxsw_sp
->ports
);
2272 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
2274 u8 module
, width
, lane
;
2279 alloc_size
= sizeof(struct mlxsw_sp_port
*) * MLXSW_PORT_MAX_PORTS
;
2280 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
2281 if (!mlxsw_sp
->ports
)
2284 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
2285 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
2288 goto err_port_module_info_get
;
2291 mlxsw_sp
->port_to_module
[i
] = module
;
2292 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false, module
, width
,
2295 goto err_port_create
;
2300 err_port_module_info_get
:
2301 for (i
--; i
>= 1; i
--)
2302 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2303 kfree(mlxsw_sp
->ports
);
2307 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
2309 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
2311 return local_port
- offset
;
2314 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
2315 u8 module
, unsigned int count
)
2317 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
2320 for (i
= 0; i
< count
; i
++) {
2321 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
2324 goto err_port_module_map
;
2327 for (i
= 0; i
< count
; i
++) {
2328 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
2330 goto err_port_swid_set
;
2333 for (i
= 0; i
< count
; i
++) {
2334 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
2335 module
, width
, i
* width
);
2337 goto err_port_create
;
2343 for (i
--; i
>= 0; i
--)
2344 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2347 for (i
--; i
>= 0; i
--)
2348 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
2349 MLXSW_PORT_SWID_DISABLED_PORT
);
2351 err_port_module_map
:
2352 for (i
--; i
>= 0; i
--)
2353 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
2357 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
2358 u8 base_port
, unsigned int count
)
2360 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
2363 /* Split by four means we need to re-create two ports, otherwise
2368 for (i
= 0; i
< count
; i
++) {
2369 local_port
= base_port
+ i
* 2;
2370 module
= mlxsw_sp
->port_to_module
[local_port
];
2372 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
2376 for (i
= 0; i
< count
; i
++)
2377 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
2379 for (i
= 0; i
< count
; i
++) {
2380 local_port
= base_port
+ i
* 2;
2381 module
= mlxsw_sp
->port_to_module
[local_port
];
2383 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
2388 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2391 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2392 struct mlxsw_sp_port
*mlxsw_sp_port
;
2393 u8 module
, cur_width
, base_port
;
2397 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2398 if (!mlxsw_sp_port
) {
2399 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2404 module
= mlxsw_sp_port
->mapping
.module
;
2405 cur_width
= mlxsw_sp_port
->mapping
.width
;
2407 if (count
!= 2 && count
!= 4) {
2408 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
2412 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
2413 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
2417 /* Make sure we have enough slave (even) ports for the split. */
2419 base_port
= local_port
;
2420 if (mlxsw_sp
->ports
[base_port
+ 1]) {
2421 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2425 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2426 if (mlxsw_sp
->ports
[base_port
+ 1] ||
2427 mlxsw_sp
->ports
[base_port
+ 3]) {
2428 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2433 for (i
= 0; i
< count
; i
++)
2434 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2436 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
2438 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2439 goto err_port_split_create
;
2444 err_port_split_create
:
2445 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2449 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
2451 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2452 struct mlxsw_sp_port
*mlxsw_sp_port
;
2453 u8 cur_width
, base_port
;
2457 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2458 if (!mlxsw_sp_port
) {
2459 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2464 if (!mlxsw_sp_port
->split
) {
2465 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2469 cur_width
= mlxsw_sp_port
->mapping
.width
;
2470 count
= cur_width
== 1 ? 4 : 2;
2472 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2474 /* Determine which ports to remove. */
2475 if (count
== 2 && local_port
>= base_port
+ 2)
2476 base_port
= base_port
+ 2;
2478 for (i
= 0; i
< count
; i
++)
2479 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2481 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2486 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2487 char *pude_pl
, void *priv
)
2489 struct mlxsw_sp
*mlxsw_sp
= priv
;
2490 struct mlxsw_sp_port
*mlxsw_sp_port
;
2491 enum mlxsw_reg_pude_oper_status status
;
2494 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2495 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2499 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2500 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2501 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2502 netif_carrier_on(mlxsw_sp_port
->dev
);
2504 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2505 netif_carrier_off(mlxsw_sp_port
->dev
);
2509 static struct mlxsw_event_listener mlxsw_sp_pude_event
= {
2510 .func
= mlxsw_sp_pude_event_func
,
2511 .trap_id
= MLXSW_TRAP_ID_PUDE
,
2514 static int mlxsw_sp_event_register(struct mlxsw_sp
*mlxsw_sp
,
2515 enum mlxsw_event_trap_id trap_id
)
2517 struct mlxsw_event_listener
*el
;
2518 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2522 case MLXSW_TRAP_ID_PUDE
:
2523 el
= &mlxsw_sp_pude_event
;
2526 err
= mlxsw_core_event_listener_register(mlxsw_sp
->core
, el
, mlxsw_sp
);
2530 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
, trap_id
);
2531 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2533 goto err_event_trap_set
;
2538 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2542 static void mlxsw_sp_event_unregister(struct mlxsw_sp
*mlxsw_sp
,
2543 enum mlxsw_event_trap_id trap_id
)
2545 struct mlxsw_event_listener
*el
;
2548 case MLXSW_TRAP_ID_PUDE
:
2549 el
= &mlxsw_sp_pude_event
;
2552 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2555 static void mlxsw_sp_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
2558 struct mlxsw_sp
*mlxsw_sp
= priv
;
2559 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2560 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2562 if (unlikely(!mlxsw_sp_port
)) {
2563 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2568 skb
->dev
= mlxsw_sp_port
->dev
;
2570 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2571 u64_stats_update_begin(&pcpu_stats
->syncp
);
2572 pcpu_stats
->rx_packets
++;
2573 pcpu_stats
->rx_bytes
+= skb
->len
;
2574 u64_stats_update_end(&pcpu_stats
->syncp
);
2576 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2577 netif_receive_skb(skb
);
2580 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener
[] = {
2582 .func
= mlxsw_sp_rx_listener_func
,
2583 .local_port
= MLXSW_PORT_DONT_CARE
,
2584 .trap_id
= MLXSW_TRAP_ID_FDB_MC
,
2586 /* Traps for specific L2 packet types, not trapped as FDB MC */
2588 .func
= mlxsw_sp_rx_listener_func
,
2589 .local_port
= MLXSW_PORT_DONT_CARE
,
2590 .trap_id
= MLXSW_TRAP_ID_STP
,
2593 .func
= mlxsw_sp_rx_listener_func
,
2594 .local_port
= MLXSW_PORT_DONT_CARE
,
2595 .trap_id
= MLXSW_TRAP_ID_LACP
,
2598 .func
= mlxsw_sp_rx_listener_func
,
2599 .local_port
= MLXSW_PORT_DONT_CARE
,
2600 .trap_id
= MLXSW_TRAP_ID_EAPOL
,
2603 .func
= mlxsw_sp_rx_listener_func
,
2604 .local_port
= MLXSW_PORT_DONT_CARE
,
2605 .trap_id
= MLXSW_TRAP_ID_LLDP
,
2608 .func
= mlxsw_sp_rx_listener_func
,
2609 .local_port
= MLXSW_PORT_DONT_CARE
,
2610 .trap_id
= MLXSW_TRAP_ID_MMRP
,
2613 .func
= mlxsw_sp_rx_listener_func
,
2614 .local_port
= MLXSW_PORT_DONT_CARE
,
2615 .trap_id
= MLXSW_TRAP_ID_MVRP
,
2618 .func
= mlxsw_sp_rx_listener_func
,
2619 .local_port
= MLXSW_PORT_DONT_CARE
,
2620 .trap_id
= MLXSW_TRAP_ID_RPVST
,
2623 .func
= mlxsw_sp_rx_listener_func
,
2624 .local_port
= MLXSW_PORT_DONT_CARE
,
2625 .trap_id
= MLXSW_TRAP_ID_DHCP
,
2628 .func
= mlxsw_sp_rx_listener_func
,
2629 .local_port
= MLXSW_PORT_DONT_CARE
,
2630 .trap_id
= MLXSW_TRAP_ID_IGMP_QUERY
,
2633 .func
= mlxsw_sp_rx_listener_func
,
2634 .local_port
= MLXSW_PORT_DONT_CARE
,
2635 .trap_id
= MLXSW_TRAP_ID_IGMP_V1_REPORT
,
2638 .func
= mlxsw_sp_rx_listener_func
,
2639 .local_port
= MLXSW_PORT_DONT_CARE
,
2640 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_REPORT
,
2643 .func
= mlxsw_sp_rx_listener_func
,
2644 .local_port
= MLXSW_PORT_DONT_CARE
,
2645 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_LEAVE
,
2648 .func
= mlxsw_sp_rx_listener_func
,
2649 .local_port
= MLXSW_PORT_DONT_CARE
,
2650 .trap_id
= MLXSW_TRAP_ID_IGMP_V3_REPORT
,
2653 .func
= mlxsw_sp_rx_listener_func
,
2654 .local_port
= MLXSW_PORT_DONT_CARE
,
2655 .trap_id
= MLXSW_TRAP_ID_ARPBC
,
2658 .func
= mlxsw_sp_rx_listener_func
,
2659 .local_port
= MLXSW_PORT_DONT_CARE
,
2660 .trap_id
= MLXSW_TRAP_ID_ARPUC
,
2663 .func
= mlxsw_sp_rx_listener_func
,
2664 .local_port
= MLXSW_PORT_DONT_CARE
,
2665 .trap_id
= MLXSW_TRAP_ID_IP2ME
,
2668 .func
= mlxsw_sp_rx_listener_func
,
2669 .local_port
= MLXSW_PORT_DONT_CARE
,
2670 .trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
,
2673 .func
= mlxsw_sp_rx_listener_func
,
2674 .local_port
= MLXSW_PORT_DONT_CARE
,
2675 .trap_id
= MLXSW_TRAP_ID_HOST_MISS_IPV4
,
2679 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2681 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2682 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2686 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_RX
);
2687 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2691 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_CTRL
);
2692 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2696 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2697 err
= mlxsw_core_rx_listener_register(mlxsw_sp
->core
,
2698 &mlxsw_sp_rx_listener
[i
],
2701 goto err_rx_listener_register
;
2703 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
2704 mlxsw_sp_rx_listener
[i
].trap_id
);
2705 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2707 goto err_rx_trap_set
;
2712 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2713 &mlxsw_sp_rx_listener
[i
],
2715 err_rx_listener_register
:
2716 for (i
--; i
>= 0; i
--) {
2717 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_DISCARD
,
2718 mlxsw_sp_rx_listener
[i
].trap_id
);
2719 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2721 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2722 &mlxsw_sp_rx_listener
[i
],
2728 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2730 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2733 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2734 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_DISCARD
,
2735 mlxsw_sp_rx_listener
[i
].trap_id
);
2736 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2738 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2739 &mlxsw_sp_rx_listener
[i
],
2744 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
2745 enum mlxsw_reg_sfgc_type type
,
2746 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
2748 enum mlxsw_flood_table_type table_type
;
2749 enum mlxsw_sp_flood_table flood_table
;
2750 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
2752 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
2753 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
2755 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
2757 if (type
== MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
)
2758 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
2760 flood_table
= MLXSW_SP_FLOOD_TABLE_BM
;
2762 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
2764 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
2767 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
2771 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
2772 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
2775 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2776 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
2780 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2781 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
2789 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2791 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2793 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2794 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2795 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2796 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2797 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2798 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2799 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
2800 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
2801 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
2802 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
2805 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
2806 const struct mlxsw_bus_info
*mlxsw_bus_info
)
2808 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2811 mlxsw_sp
->core
= mlxsw_core
;
2812 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
2813 INIT_LIST_HEAD(&mlxsw_sp
->fids
);
2814 INIT_LIST_HEAD(&mlxsw_sp
->vfids
.list
);
2815 INIT_LIST_HEAD(&mlxsw_sp
->br_mids
.list
);
2817 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
2819 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
2823 err
= mlxsw_sp_event_register(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2825 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register for PUDE events\n");
2829 err
= mlxsw_sp_traps_init(mlxsw_sp
);
2831 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for RX\n");
2832 goto err_rx_listener_register
;
2835 err
= mlxsw_sp_flood_init(mlxsw_sp
);
2837 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
2838 goto err_flood_init
;
2841 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
2843 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
2844 goto err_buffers_init
;
2847 err
= mlxsw_sp_lag_init(mlxsw_sp
);
2849 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
2853 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
2855 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
2856 goto err_switchdev_init
;
2859 err
= mlxsw_sp_router_init(mlxsw_sp
);
2861 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
2862 goto err_router_init
;
2865 err
= mlxsw_sp_span_init(mlxsw_sp
);
2867 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
2871 err
= mlxsw_sp_ports_create(mlxsw_sp
);
2873 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
2874 goto err_ports_create
;
2880 mlxsw_sp_span_fini(mlxsw_sp
);
2882 mlxsw_sp_router_fini(mlxsw_sp
);
2884 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2887 mlxsw_sp_buffers_fini(mlxsw_sp
);
2890 mlxsw_sp_traps_fini(mlxsw_sp
);
2891 err_rx_listener_register
:
2892 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2896 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
2898 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2901 mlxsw_sp_ports_remove(mlxsw_sp
);
2902 mlxsw_sp_span_fini(mlxsw_sp
);
2903 mlxsw_sp_router_fini(mlxsw_sp
);
2904 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2905 mlxsw_sp_buffers_fini(mlxsw_sp
);
2906 mlxsw_sp_traps_fini(mlxsw_sp
);
2907 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2908 WARN_ON(!list_empty(&mlxsw_sp
->vfids
.list
));
2909 WARN_ON(!list_empty(&mlxsw_sp
->fids
));
2910 for (i
= 0; i
< MLXSW_SP_RIF_MAX
; i
++)
2911 WARN_ON_ONCE(mlxsw_sp
->rifs
[i
]);
2914 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
2915 .used_max_vepa_channels
= 1,
2916 .max_vepa_channels
= 0,
2918 .max_lag
= MLXSW_SP_LAG_MAX
,
2919 .used_max_port_per_lag
= 1,
2920 .max_port_per_lag
= MLXSW_SP_PORT_PER_LAG_MAX
,
2922 .max_mid
= MLXSW_SP_MID_MAX
,
2925 .used_max_system_port
= 1,
2926 .max_system_port
= 64,
2927 .used_max_vlan_groups
= 1,
2928 .max_vlan_groups
= 127,
2929 .used_max_regions
= 1,
2931 .used_flood_tables
= 1,
2932 .used_flood_mode
= 1,
2934 .max_fid_offset_flood_tables
= 2,
2935 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
2936 .max_fid_flood_tables
= 2,
2937 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
2938 .used_max_ib_mc
= 1,
2942 .used_kvd_sizes
= 1,
2943 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
2944 .kvd_hash_single_size
= MLXSW_SP_KVD_HASH_SINGLE_SIZE
,
2945 .kvd_hash_double_size
= MLXSW_SP_KVD_HASH_DOUBLE_SIZE
,
2949 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2952 .resource_query_enable
= 1,
2955 static struct mlxsw_driver mlxsw_sp_driver
= {
2956 .kind
= MLXSW_DEVICE_KIND_SPECTRUM
,
2957 .owner
= THIS_MODULE
,
2958 .priv_size
= sizeof(struct mlxsw_sp
),
2959 .init
= mlxsw_sp_init
,
2960 .fini
= mlxsw_sp_fini
,
2961 .port_split
= mlxsw_sp_port_split
,
2962 .port_unsplit
= mlxsw_sp_port_unsplit
,
2963 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
2964 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
2965 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
2966 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
2967 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
2968 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
2969 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
2970 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
2971 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
2972 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
2973 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
2974 .txhdr_len
= MLXSW_TXHDR_LEN
,
2975 .profile
= &mlxsw_sp_config_profile
,
2978 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
2980 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
2983 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
2985 struct net_device
*lower_dev
;
2986 struct list_head
*iter
;
2988 if (mlxsw_sp_port_dev_check(dev
))
2989 return netdev_priv(dev
);
2991 netdev_for_each_all_lower_dev(dev
, lower_dev
, iter
) {
2992 if (mlxsw_sp_port_dev_check(lower_dev
))
2993 return netdev_priv(lower_dev
);
2998 static struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3000 struct mlxsw_sp_port
*mlxsw_sp_port
;
3002 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3003 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3006 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3008 struct net_device
*lower_dev
;
3009 struct list_head
*iter
;
3011 if (mlxsw_sp_port_dev_check(dev
))
3012 return netdev_priv(dev
);
3014 netdev_for_each_all_lower_dev_rcu(dev
, lower_dev
, iter
) {
3015 if (mlxsw_sp_port_dev_check(lower_dev
))
3016 return netdev_priv(lower_dev
);
3021 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3023 struct mlxsw_sp_port
*mlxsw_sp_port
;
3026 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3028 dev_hold(mlxsw_sp_port
->dev
);
3030 return mlxsw_sp_port
;
3033 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3035 dev_put(mlxsw_sp_port
->dev
);
3038 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif
*r
,
3039 unsigned long event
)
3048 if (r
&& --r
->ref_count
== 0)
3050 /* It is possible we already removed the RIF ourselves
3051 * if it was assigned to a netdev that is now a bridge
3060 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp
*mlxsw_sp
)
3064 for (i
= 0; i
< MLXSW_SP_RIF_MAX
; i
++)
3065 if (!mlxsw_sp
->rifs
[i
])
3068 return MLXSW_SP_RIF_MAX
;
3071 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3072 bool *p_lagged
, u16
*p_system_port
)
3074 u8 local_port
= mlxsw_sp_vport
->local_port
;
3076 *p_lagged
= mlxsw_sp_vport
->lagged
;
3077 *p_system_port
= *p_lagged
? mlxsw_sp_vport
->lag_id
: local_port
;
3080 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3081 struct net_device
*l3_dev
, u16 rif
,
3084 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3085 bool lagged
= mlxsw_sp_vport
->lagged
;
3086 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3089 mlxsw_reg_ritr_pack(ritr_pl
, create
, MLXSW_REG_RITR_SP_IF
, rif
,
3090 l3_dev
->mtu
, l3_dev
->dev_addr
);
3092 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport
, &lagged
, &system_port
);
3093 mlxsw_reg_ritr_sp_if_pack(ritr_pl
, lagged
, system_port
,
3094 mlxsw_sp_vport_vid_get(mlxsw_sp_vport
));
3096 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3099 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
3101 static struct mlxsw_sp_fid
*
3102 mlxsw_sp_rfid_alloc(u16 fid
, struct net_device
*l3_dev
)
3104 struct mlxsw_sp_fid
*f
;
3106 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
3110 f
->leave
= mlxsw_sp_vport_rif_sp_leave
;
3118 static struct mlxsw_sp_rif
*
3119 mlxsw_sp_rif_alloc(u16 rif
, struct net_device
*l3_dev
, struct mlxsw_sp_fid
*f
)
3121 struct mlxsw_sp_rif
*r
;
3123 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
3127 ether_addr_copy(r
->addr
, l3_dev
->dev_addr
);
3128 r
->mtu
= l3_dev
->mtu
;
3137 static struct mlxsw_sp_rif
*
3138 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3139 struct net_device
*l3_dev
)
3141 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3142 struct mlxsw_sp_fid
*f
;
3143 struct mlxsw_sp_rif
*r
;
3147 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
3148 if (rif
== MLXSW_SP_RIF_MAX
)
3149 return ERR_PTR(-ERANGE
);
3151 err
= mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, true);
3153 return ERR_PTR(err
);
3155 fid
= mlxsw_sp_rif_sp_to_fid(rif
);
3156 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, true);
3158 goto err_rif_fdb_op
;
3160 f
= mlxsw_sp_rfid_alloc(fid
, l3_dev
);
3163 goto err_rfid_alloc
;
3166 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
3173 mlxsw_sp
->rifs
[rif
] = r
;
3180 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
3182 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
3183 return ERR_PTR(err
);
3186 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3187 struct mlxsw_sp_rif
*r
)
3189 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3190 struct net_device
*l3_dev
= r
->dev
;
3191 struct mlxsw_sp_fid
*f
= r
->f
;
3195 mlxsw_sp
->rifs
[rif
] = NULL
;
3202 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
3204 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
3207 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3208 struct net_device
*l3_dev
)
3210 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3211 struct mlxsw_sp_rif
*r
;
3213 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3215 r
= mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport
, l3_dev
);
3220 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, r
->f
);
3223 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", r
->f
->fid
);
3228 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
3230 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3232 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
3234 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
3235 if (--f
->ref_count
== 0)
3236 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport
, f
->r
);
3239 static int mlxsw_sp_inetaddr_vport_event(struct net_device
*l3_dev
,
3240 struct net_device
*port_dev
,
3241 unsigned long event
, u16 vid
)
3243 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(port_dev
);
3244 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3246 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3247 if (WARN_ON(!mlxsw_sp_vport
))
3252 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport
, l3_dev
);
3254 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport
);
3261 static int mlxsw_sp_inetaddr_port_event(struct net_device
*port_dev
,
3262 unsigned long event
)
3264 if (netif_is_bridge_port(port_dev
) || netif_is_lag_port(port_dev
))
3267 return mlxsw_sp_inetaddr_vport_event(port_dev
, port_dev
, event
, 1);
3270 static int __mlxsw_sp_inetaddr_lag_event(struct net_device
*l3_dev
,
3271 struct net_device
*lag_dev
,
3272 unsigned long event
, u16 vid
)
3274 struct net_device
*port_dev
;
3275 struct list_head
*iter
;
3278 netdev_for_each_lower_dev(lag_dev
, port_dev
, iter
) {
3279 if (mlxsw_sp_port_dev_check(port_dev
)) {
3280 err
= mlxsw_sp_inetaddr_vport_event(l3_dev
, port_dev
,
3290 static int mlxsw_sp_inetaddr_lag_event(struct net_device
*lag_dev
,
3291 unsigned long event
)
3293 if (netif_is_bridge_port(lag_dev
))
3296 return __mlxsw_sp_inetaddr_lag_event(lag_dev
, lag_dev
, event
, 1);
3299 static struct mlxsw_sp_fid
*mlxsw_sp_bridge_fid_get(struct mlxsw_sp
*mlxsw_sp
,
3300 struct net_device
*l3_dev
)
3304 if (is_vlan_dev(l3_dev
))
3305 fid
= vlan_dev_vlan_id(l3_dev
);
3306 else if (mlxsw_sp
->master_bridge
.dev
== l3_dev
)
3309 return mlxsw_sp_vfid_find(mlxsw_sp
, l3_dev
);
3311 return mlxsw_sp_fid_find(mlxsw_sp
, fid
);
3314 static enum mlxsw_reg_ritr_if_type
mlxsw_sp_rif_type_get(u16 fid
)
3316 if (mlxsw_sp_fid_is_vfid(fid
))
3317 return MLXSW_REG_RITR_FID_IF
;
3319 return MLXSW_REG_RITR_VLAN_IF
;
3322 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp
*mlxsw_sp
,
3323 struct net_device
*l3_dev
,
3327 enum mlxsw_reg_ritr_if_type rif_type
;
3328 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3330 rif_type
= mlxsw_sp_rif_type_get(fid
);
3331 mlxsw_reg_ritr_pack(ritr_pl
, create
, rif_type
, rif
, l3_dev
->mtu
,
3333 mlxsw_reg_ritr_fid_set(ritr_pl
, rif_type
, fid
);
3335 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3338 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp
*mlxsw_sp
,
3339 struct net_device
*l3_dev
,
3340 struct mlxsw_sp_fid
*f
)
3342 struct mlxsw_sp_rif
*r
;
3346 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
3347 if (rif
== MLXSW_SP_RIF_MAX
)
3350 err
= mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, true);
3354 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, true);
3356 goto err_rif_fdb_op
;
3358 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
3365 mlxsw_sp
->rifs
[rif
] = r
;
3367 netdev_dbg(l3_dev
, "RIF=%d created\n", rif
);
3372 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
3374 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
3378 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp
*mlxsw_sp
,
3379 struct mlxsw_sp_rif
*r
)
3381 struct net_device
*l3_dev
= r
->dev
;
3382 struct mlxsw_sp_fid
*f
= r
->f
;
3385 mlxsw_sp
->rifs
[rif
] = NULL
;
3390 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
3392 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
3394 netdev_dbg(l3_dev
, "RIF=%d destroyed\n", rif
);
3397 static int mlxsw_sp_inetaddr_bridge_event(struct net_device
*l3_dev
,
3398 struct net_device
*br_dev
,
3399 unsigned long event
)
3401 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3402 struct mlxsw_sp_fid
*f
;
3404 /* FID can either be an actual FID if the L3 device is the
3405 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3406 * L3 device is a VLAN-unaware bridge and we get a vFID.
3408 f
= mlxsw_sp_bridge_fid_get(mlxsw_sp
, l3_dev
);
3414 return mlxsw_sp_rif_bridge_create(mlxsw_sp
, l3_dev
, f
);
3416 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
3423 static int mlxsw_sp_inetaddr_vlan_event(struct net_device
*vlan_dev
,
3424 unsigned long event
)
3426 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3427 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
3428 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3430 if (mlxsw_sp_port_dev_check(real_dev
))
3431 return mlxsw_sp_inetaddr_vport_event(vlan_dev
, real_dev
, event
,
3433 else if (netif_is_lag_master(real_dev
))
3434 return __mlxsw_sp_inetaddr_lag_event(vlan_dev
, real_dev
, event
,
3436 else if (netif_is_bridge_master(real_dev
) &&
3437 mlxsw_sp
->master_bridge
.dev
== real_dev
)
3438 return mlxsw_sp_inetaddr_bridge_event(vlan_dev
, real_dev
,
3444 static int mlxsw_sp_inetaddr_event(struct notifier_block
*unused
,
3445 unsigned long event
, void *ptr
)
3447 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
3448 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
3449 struct mlxsw_sp
*mlxsw_sp
;
3450 struct mlxsw_sp_rif
*r
;
3453 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3457 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3458 if (!mlxsw_sp_rif_should_config(r
, event
))
3461 if (mlxsw_sp_port_dev_check(dev
))
3462 err
= mlxsw_sp_inetaddr_port_event(dev
, event
);
3463 else if (netif_is_lag_master(dev
))
3464 err
= mlxsw_sp_inetaddr_lag_event(dev
, event
);
3465 else if (netif_is_bridge_master(dev
))
3466 err
= mlxsw_sp_inetaddr_bridge_event(dev
, dev
, event
);
3467 else if (is_vlan_dev(dev
))
3468 err
= mlxsw_sp_inetaddr_vlan_event(dev
, event
);
3471 return notifier_from_errno(err
);
3474 static int mlxsw_sp_rif_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif
,
3475 const char *mac
, int mtu
)
3477 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3480 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
3481 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3485 mlxsw_reg_ritr_mtu_set(ritr_pl
, mtu
);
3486 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl
, mac
);
3487 mlxsw_reg_ritr_op_set(ritr_pl
, MLXSW_REG_RITR_RIF_CREATE
);
3488 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3491 static int mlxsw_sp_netdevice_router_port_event(struct net_device
*dev
)
3493 struct mlxsw_sp
*mlxsw_sp
;
3494 struct mlxsw_sp_rif
*r
;
3497 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3501 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3505 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, false);
3509 err
= mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, dev
->dev_addr
, dev
->mtu
);
3513 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, dev
->dev_addr
, r
->f
->fid
, true);
3515 goto err_rif_fdb_op
;
3517 ether_addr_copy(r
->addr
, dev
->dev_addr
);
3520 netdev_dbg(dev
, "Updated RIF=%d\n", r
->rif
);
3525 mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, r
->addr
, r
->mtu
);
3527 mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, true);
3531 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port
*lag_port
,
3534 if (mlxsw_sp_fid_is_vfid(fid
))
3535 return mlxsw_sp_port_vport_find_by_fid(lag_port
, fid
);
3537 return test_bit(fid
, lag_port
->active_vlans
);
3540 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
3543 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3544 u8 local_port
= mlxsw_sp_port
->local_port
;
3545 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3548 if (!mlxsw_sp_port
->lagged
)
3551 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
3552 struct mlxsw_sp_port
*lag_port
;
3554 lag_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
3555 if (!lag_port
|| lag_port
->local_port
== local_port
)
3557 if (mlxsw_sp_lag_port_fid_member(lag_port
, fid
))
3565 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3568 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3569 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3571 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
3572 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3573 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
3574 mlxsw_sp_port
->local_port
);
3576 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using Port=%d, FID=%d\n",
3577 mlxsw_sp_port
->local_port
, fid
);
3579 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3583 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3586 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3587 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3589 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
3590 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3591 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
3593 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using LAG ID=%d, FID=%d\n",
3594 mlxsw_sp_port
->lag_id
, fid
);
3596 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3599 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
3601 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port
, fid
))
3604 if (mlxsw_sp_port
->lagged
)
3605 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
,
3608 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, fid
);
3611 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp
*mlxsw_sp
)
3613 struct mlxsw_sp_fid
*f
, *tmp
;
3615 list_for_each_entry_safe(f
, tmp
, &mlxsw_sp
->fids
, list
)
3616 if (--f
->ref_count
== 0)
3617 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
3622 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
3623 struct net_device
*br_dev
)
3625 return !mlxsw_sp
->master_bridge
.dev
||
3626 mlxsw_sp
->master_bridge
.dev
== br_dev
;
3629 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
3630 struct net_device
*br_dev
)
3632 mlxsw_sp
->master_bridge
.dev
= br_dev
;
3633 mlxsw_sp
->master_bridge
.ref_count
++;
3636 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
)
3638 if (--mlxsw_sp
->master_bridge
.ref_count
== 0) {
3639 mlxsw_sp
->master_bridge
.dev
= NULL
;
3640 /* It's possible upper VLAN devices are still holding
3641 * references to underlying FIDs. Drop the reference
3642 * and release the resources if it was the last one.
3643 * If it wasn't, then something bad happened.
3645 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp
);
3649 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3650 struct net_device
*br_dev
)
3652 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3655 /* When port is not bridged untagged packets are tagged with
3656 * PVID=VID=1, thereby creating an implicit VLAN interface in
3657 * the device. Remove it and let bridge code take care of its
3660 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
3664 mlxsw_sp_master_bridge_inc(mlxsw_sp_port
->mlxsw_sp
, br_dev
);
3666 mlxsw_sp_port
->learning
= 1;
3667 mlxsw_sp_port
->learning_sync
= 1;
3668 mlxsw_sp_port
->uc_flood
= 1;
3669 mlxsw_sp_port
->bridged
= 1;
3674 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3676 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3678 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
3680 mlxsw_sp_master_bridge_dec(mlxsw_sp_port
->mlxsw_sp
);
3682 mlxsw_sp_port
->learning
= 0;
3683 mlxsw_sp_port
->learning_sync
= 0;
3684 mlxsw_sp_port
->uc_flood
= 0;
3685 mlxsw_sp_port
->bridged
= 0;
3687 /* Add implicit VLAN interface in the device, so that untagged
3688 * packets will be classified to the default vFID.
3690 mlxsw_sp_port_add_vid(dev
, 0, 1);
3693 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3695 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3697 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3698 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3701 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3703 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3705 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3706 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3709 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3710 u16 lag_id
, u8 port_index
)
3712 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3713 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3715 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3716 lag_id
, port_index
);
3717 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3720 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3723 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3724 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3726 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3728 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3731 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3734 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3735 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3737 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3739 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3742 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3745 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3746 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3748 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3750 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3753 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3754 struct net_device
*lag_dev
,
3757 struct mlxsw_sp_upper
*lag
;
3758 int free_lag_id
= -1;
3761 for (i
= 0; i
< MLXSW_SP_LAG_MAX
; i
++) {
3762 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3763 if (lag
->ref_count
) {
3764 if (lag
->dev
== lag_dev
) {
3768 } else if (free_lag_id
< 0) {
3772 if (free_lag_id
< 0)
3774 *p_lag_id
= free_lag_id
;
3779 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3780 struct net_device
*lag_dev
,
3781 struct netdev_lag_upper_info
*lag_upper_info
)
3785 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
3787 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
3792 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3793 u16 lag_id
, u8
*p_port_index
)
3797 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
3798 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3807 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3810 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3811 struct mlxsw_sp_fid
*f
;
3813 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3814 if (WARN_ON(!mlxsw_sp_vport
))
3817 /* If vPort is assigned a RIF, then leave it since it's no
3820 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3822 f
->leave(mlxsw_sp_vport
);
3824 mlxsw_sp_vport
->lag_id
= lag_id
;
3825 mlxsw_sp_vport
->lagged
= 1;
3829 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3831 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3832 struct mlxsw_sp_fid
*f
;
3834 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3835 if (WARN_ON(!mlxsw_sp_vport
))
3838 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3840 f
->leave(mlxsw_sp_vport
);
3842 mlxsw_sp_vport
->lagged
= 0;
3845 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3846 struct net_device
*lag_dev
)
3848 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3849 struct mlxsw_sp_upper
*lag
;
3854 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3857 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3858 if (!lag
->ref_count
) {
3859 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3865 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3868 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3870 goto err_col_port_add
;
3871 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
3873 goto err_col_port_enable
;
3875 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3876 mlxsw_sp_port
->local_port
);
3877 mlxsw_sp_port
->lag_id
= lag_id
;
3878 mlxsw_sp_port
->lagged
= 1;
3881 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port
, lag_id
);
3885 err_col_port_enable
:
3886 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3888 if (!lag
->ref_count
)
3889 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3893 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3894 struct net_device
*lag_dev
)
3896 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3897 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3898 struct mlxsw_sp_upper
*lag
;
3900 if (!mlxsw_sp_port
->lagged
)
3902 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3903 WARN_ON(lag
->ref_count
== 0);
3905 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
3906 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3908 if (mlxsw_sp_port
->bridged
) {
3909 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
3910 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
3913 if (lag
->ref_count
== 1)
3914 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3916 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3917 mlxsw_sp_port
->local_port
);
3918 mlxsw_sp_port
->lagged
= 0;
3921 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port
);
3924 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3927 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3928 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3930 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
3931 mlxsw_sp_port
->local_port
);
3932 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3935 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3938 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3939 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3941 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
3942 mlxsw_sp_port
->local_port
);
3943 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3946 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3947 bool lag_tx_enabled
)
3950 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
3951 mlxsw_sp_port
->lag_id
);
3953 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
3954 mlxsw_sp_port
->lag_id
);
3957 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
3958 struct netdev_lag_lower_state_info
*info
)
3960 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
3963 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
3964 struct net_device
*vlan_dev
)
3966 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3967 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3969 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3970 if (WARN_ON(!mlxsw_sp_vport
))
3973 mlxsw_sp_vport
->dev
= vlan_dev
;
3978 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
3979 struct net_device
*vlan_dev
)
3981 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3982 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3984 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3985 if (WARN_ON(!mlxsw_sp_vport
))
3988 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
3991 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
3992 unsigned long event
, void *ptr
)
3994 struct netdev_notifier_changeupper_info
*info
;
3995 struct mlxsw_sp_port
*mlxsw_sp_port
;
3996 struct net_device
*upper_dev
;
3997 struct mlxsw_sp
*mlxsw_sp
;
4000 mlxsw_sp_port
= netdev_priv(dev
);
4001 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4005 case NETDEV_PRECHANGEUPPER
:
4006 upper_dev
= info
->upper_dev
;
4007 if (!is_vlan_dev(upper_dev
) &&
4008 !netif_is_lag_master(upper_dev
) &&
4009 !netif_is_bridge_master(upper_dev
))
4013 /* HW limitation forbids to put ports to multiple bridges. */
4014 if (netif_is_bridge_master(upper_dev
) &&
4015 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
4017 if (netif_is_lag_master(upper_dev
) &&
4018 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
4021 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
))
4023 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
4024 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
)))
4027 case NETDEV_CHANGEUPPER
:
4028 upper_dev
= info
->upper_dev
;
4029 if (is_vlan_dev(upper_dev
)) {
4031 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
4034 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
4036 } else if (netif_is_bridge_master(upper_dev
)) {
4038 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4041 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
4042 } else if (netif_is_lag_master(upper_dev
)) {
4044 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4047 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4059 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4060 unsigned long event
, void *ptr
)
4062 struct netdev_notifier_changelowerstate_info
*info
;
4063 struct mlxsw_sp_port
*mlxsw_sp_port
;
4066 mlxsw_sp_port
= netdev_priv(dev
);
4070 case NETDEV_CHANGELOWERSTATE
:
4071 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4072 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4073 info
->lower_state_info
);
4075 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4083 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
4084 unsigned long event
, void *ptr
)
4087 case NETDEV_PRECHANGEUPPER
:
4088 case NETDEV_CHANGEUPPER
:
4089 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
4090 case NETDEV_CHANGELOWERSTATE
:
4091 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
4097 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4098 unsigned long event
, void *ptr
)
4100 struct net_device
*dev
;
4101 struct list_head
*iter
;
4104 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4105 if (mlxsw_sp_port_dev_check(dev
)) {
4106 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4115 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp
*mlxsw_sp
,
4116 struct net_device
*vlan_dev
)
4118 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4119 struct mlxsw_sp_fid
*f
;
4121 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4123 f
= mlxsw_sp_fid_create(mlxsw_sp
, fid
);
4133 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp
*mlxsw_sp
,
4134 struct net_device
*vlan_dev
)
4136 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4137 struct mlxsw_sp_fid
*f
;
4139 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4141 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
4142 if (f
&& --f
->ref_count
== 0)
4143 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
4146 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
4147 unsigned long event
, void *ptr
)
4149 struct netdev_notifier_changeupper_info
*info
;
4150 struct net_device
*upper_dev
;
4151 struct mlxsw_sp
*mlxsw_sp
;
4154 mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
4157 if (br_dev
!= mlxsw_sp
->master_bridge
.dev
)
4163 case NETDEV_CHANGEUPPER
:
4164 upper_dev
= info
->upper_dev
;
4165 if (!is_vlan_dev(upper_dev
))
4167 if (info
->linking
) {
4168 err
= mlxsw_sp_master_bridge_vlan_link(mlxsw_sp
,
4173 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp
, upper_dev
);
4181 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
4183 return find_first_zero_bit(mlxsw_sp
->vfids
.mapped
,
4187 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
4189 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
4191 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, 0);
4192 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
4195 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
4197 static struct mlxsw_sp_fid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
4198 struct net_device
*br_dev
)
4200 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
4201 struct mlxsw_sp_fid
*f
;
4205 vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
4206 if (vfid
== MLXSW_SP_VFID_MAX
) {
4207 dev_err(dev
, "No available vFIDs\n");
4208 return ERR_PTR(-ERANGE
);
4211 fid
= mlxsw_sp_vfid_to_fid(vfid
);
4212 err
= mlxsw_sp_vfid_op(mlxsw_sp
, fid
, true);
4214 dev_err(dev
, "Failed to create FID=%d\n", fid
);
4215 return ERR_PTR(err
);
4218 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
4220 goto err_allocate_vfid
;
4222 f
->leave
= mlxsw_sp_vport_vfid_leave
;
4226 list_add(&f
->list
, &mlxsw_sp
->vfids
.list
);
4227 set_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4232 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4233 return ERR_PTR(-ENOMEM
);
4236 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
4237 struct mlxsw_sp_fid
*f
)
4239 u16 vfid
= mlxsw_sp_fid_to_vfid(f
->fid
);
4242 clear_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4246 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
4250 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4253 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
4256 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
4257 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4259 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
, mt
, valid
, fid
,
4263 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4264 struct net_device
*br_dev
)
4266 struct mlxsw_sp_fid
*f
;
4269 f
= mlxsw_sp_vfid_find(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4271 f
= mlxsw_sp_vfid_create(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4276 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, true);
4278 goto err_vport_flood_set
;
4280 err
= mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, true);
4282 goto err_vport_fid_map
;
4284 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, f
);
4287 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", f
->fid
);
4292 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4293 err_vport_flood_set
:
4295 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4299 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4301 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4303 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
4305 mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, false);
4307 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4309 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport
, f
->fid
);
4311 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
4312 if (--f
->ref_count
== 0)
4313 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4316 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4317 struct net_device
*br_dev
)
4319 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4320 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4321 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
4324 if (f
&& !WARN_ON(!f
->leave
))
4325 f
->leave(mlxsw_sp_vport
);
4327 err
= mlxsw_sp_vport_vfid_join(mlxsw_sp_vport
, br_dev
);
4329 netdev_err(dev
, "Failed to join vFID\n");
4333 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
4335 netdev_err(dev
, "Failed to enable learning\n");
4336 goto err_port_vid_learning_set
;
4339 mlxsw_sp_vport
->learning
= 1;
4340 mlxsw_sp_vport
->learning_sync
= 1;
4341 mlxsw_sp_vport
->uc_flood
= 1;
4342 mlxsw_sp_vport
->bridged
= 1;
4346 err_port_vid_learning_set
:
4347 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4351 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4353 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4355 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
4357 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4359 mlxsw_sp_vport
->learning
= 0;
4360 mlxsw_sp_vport
->learning_sync
= 0;
4361 mlxsw_sp_vport
->uc_flood
= 0;
4362 mlxsw_sp_vport
->bridged
= 0;
4366 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
4367 const struct net_device
*br_dev
)
4369 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4371 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
4373 struct net_device
*dev
= mlxsw_sp_vport_dev_get(mlxsw_sp_vport
);
4375 if (dev
&& dev
== br_dev
)
4382 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
4383 unsigned long event
, void *ptr
,
4386 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4387 struct netdev_notifier_changeupper_info
*info
= ptr
;
4388 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4389 struct net_device
*upper_dev
;
4392 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4395 case NETDEV_PRECHANGEUPPER
:
4396 upper_dev
= info
->upper_dev
;
4397 if (!netif_is_bridge_master(upper_dev
))
4401 /* We can't have multiple VLAN interfaces configured on
4402 * the same port and being members in the same bridge.
4404 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
4408 case NETDEV_CHANGEUPPER
:
4409 upper_dev
= info
->upper_dev
;
4410 if (info
->linking
) {
4411 if (WARN_ON(!mlxsw_sp_vport
))
4413 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
4416 if (!mlxsw_sp_vport
)
4418 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
);
4425 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
4426 unsigned long event
, void *ptr
,
4429 struct net_device
*dev
;
4430 struct list_head
*iter
;
4433 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4434 if (mlxsw_sp_port_dev_check(dev
)) {
4435 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
4445 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4446 unsigned long event
, void *ptr
)
4448 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4449 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4451 if (mlxsw_sp_port_dev_check(real_dev
))
4452 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
4454 else if (netif_is_lag_master(real_dev
))
4455 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
4461 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4462 unsigned long event
, void *ptr
)
4464 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4467 if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
4468 err
= mlxsw_sp_netdevice_router_port_event(dev
);
4469 else if (mlxsw_sp_port_dev_check(dev
))
4470 err
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4471 else if (netif_is_lag_master(dev
))
4472 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4473 else if (netif_is_bridge_master(dev
))
4474 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4475 else if (is_vlan_dev(dev
))
4476 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4478 return notifier_from_errno(err
);
4481 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
4482 .notifier_call
= mlxsw_sp_netdevice_event
,
4485 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
4486 .notifier_call
= mlxsw_sp_inetaddr_event
,
4487 .priority
= 10, /* Must be called before FIB notifier block */
4490 static int __init
mlxsw_sp_module_init(void)
4494 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4495 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4496 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
4498 goto err_core_driver_register
;
4501 err_core_driver_register
:
4502 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4506 static void __exit
mlxsw_sp_module_exit(void)
4508 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4509 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4510 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4513 module_init(mlxsw_sp_module_init
);
4514 module_exit(mlxsw_sp_module_exit
);
4516 MODULE_LICENSE("Dual BSD/GPL");
4517 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4518 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4519 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM
);