2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
69 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
70 static const char mlxsw_sp_driver_version
[] = "1.0";
76 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
79 * Packet control type.
80 * 0 - Ethernet control (e.g. EMADs, LACP)
83 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
86 * Packet protocol type. Must be set to 1 (Ethernet).
88 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
90 /* tx_hdr_rx_is_router
91 * Packet is sent from the router. Valid for data packets only.
93 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
96 * Indicates if the 'fid' field is valid and should be used for
97 * forwarding lookup. Valid for data packets only.
99 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
102 * Switch partition ID. Must be set to 0.
104 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
106 /* tx_hdr_control_tclass
107 * Indicates if the packet should use the control TClass and not one
108 * of the data TClasses.
110 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
113 * Egress TClass to be used on the egress device on the egress port.
115 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
118 * Destination local port for unicast packets.
119 * Destination multicast ID for multicast packets.
121 * Control packets are directed to a specific egress port, while data
122 * packets are transmitted through the CPU port (0) into the switch partition,
123 * where forwarding rules are applied.
125 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
128 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
129 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
130 * Valid for data packets only.
132 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
136 * 6 - Control packets
138 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
140 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
);
142 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
143 const struct mlxsw_tx_info
*tx_info
)
145 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
147 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
149 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
150 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
151 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
152 mlxsw_tx_hdr_swid_set(txhdr
, 0);
153 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
154 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
155 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
158 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
160 char spad_pl
[MLXSW_REG_SPAD_LEN
] = {0};
163 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
166 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
170 static int mlxsw_sp_span_init(struct mlxsw_sp
*mlxsw_sp
)
174 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_SPAN
))
177 mlxsw_sp
->span
.entries_count
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
179 mlxsw_sp
->span
.entries
= kcalloc(mlxsw_sp
->span
.entries_count
,
180 sizeof(struct mlxsw_sp_span_entry
),
182 if (!mlxsw_sp
->span
.entries
)
185 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++)
186 INIT_LIST_HEAD(&mlxsw_sp
->span
.entries
[i
].bound_ports_list
);
191 static void mlxsw_sp_span_fini(struct mlxsw_sp
*mlxsw_sp
)
195 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
196 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
198 WARN_ON_ONCE(!list_empty(&curr
->bound_ports_list
));
200 kfree(mlxsw_sp
->span
.entries
);
203 static struct mlxsw_sp_span_entry
*
204 mlxsw_sp_span_entry_create(struct mlxsw_sp_port
*port
)
206 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
207 struct mlxsw_sp_span_entry
*span_entry
;
208 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
209 u8 local_port
= port
->local_port
;
214 /* find a free entry to use */
216 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
217 if (!mlxsw_sp
->span
.entries
[i
].used
) {
219 span_entry
= &mlxsw_sp
->span
.entries
[i
];
226 /* create a new port analayzer entry for local_port */
227 mlxsw_reg_mpat_pack(mpat_pl
, index
, local_port
, true);
228 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
232 span_entry
->used
= true;
233 span_entry
->id
= index
;
234 span_entry
->ref_count
= 1;
235 span_entry
->local_port
= local_port
;
239 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp
*mlxsw_sp
,
240 struct mlxsw_sp_span_entry
*span_entry
)
242 u8 local_port
= span_entry
->local_port
;
243 char mpat_pl
[MLXSW_REG_MPAT_LEN
];
244 int pa_id
= span_entry
->id
;
246 mlxsw_reg_mpat_pack(mpat_pl
, pa_id
, local_port
, false);
247 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpat
), mpat_pl
);
248 span_entry
->used
= false;
251 static struct mlxsw_sp_span_entry
*
252 mlxsw_sp_span_entry_find(struct mlxsw_sp_port
*port
)
254 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
257 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
258 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
260 if (curr
->used
&& curr
->local_port
== port
->local_port
)
266 static struct mlxsw_sp_span_entry
267 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port
*port
)
269 struct mlxsw_sp_span_entry
*span_entry
;
271 span_entry
= mlxsw_sp_span_entry_find(port
);
273 /* Already exists, just take a reference */
274 span_entry
->ref_count
++;
278 return mlxsw_sp_span_entry_create(port
);
281 static int mlxsw_sp_span_entry_put(struct mlxsw_sp
*mlxsw_sp
,
282 struct mlxsw_sp_span_entry
*span_entry
)
284 WARN_ON(!span_entry
->ref_count
);
285 if (--span_entry
->ref_count
== 0)
286 mlxsw_sp_span_entry_destroy(mlxsw_sp
, span_entry
);
290 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port
*port
)
292 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
293 struct mlxsw_sp_span_inspected_port
*p
;
296 for (i
= 0; i
< mlxsw_sp
->span
.entries_count
; i
++) {
297 struct mlxsw_sp_span_entry
*curr
= &mlxsw_sp
->span
.entries
[i
];
299 list_for_each_entry(p
, &curr
->bound_ports_list
, list
)
300 if (p
->local_port
== port
->local_port
&&
301 p
->type
== MLXSW_SP_SPAN_EGRESS
)
308 static int mlxsw_sp_span_mtu_to_buffsize(int mtu
)
310 return MLXSW_SP_BYTES_TO_CELLS(mtu
* 5 / 2) + 1;
313 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port
*port
, u16 mtu
)
315 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
316 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
319 /* If port is egress mirrored, the shared buffer size should be
320 * updated according to the mtu value
322 if (mlxsw_sp_span_is_egress_mirror(port
)) {
323 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
,
324 mlxsw_sp_span_mtu_to_buffsize(mtu
));
325 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
327 netdev_err(port
->dev
, "Could not update shared buffer for mirroring\n");
335 static struct mlxsw_sp_span_inspected_port
*
336 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port
*port
,
337 struct mlxsw_sp_span_entry
*span_entry
)
339 struct mlxsw_sp_span_inspected_port
*p
;
341 list_for_each_entry(p
, &span_entry
->bound_ports_list
, list
)
342 if (port
->local_port
== p
->local_port
)
348 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port
*port
,
349 struct mlxsw_sp_span_entry
*span_entry
,
350 enum mlxsw_sp_span_type type
)
352 struct mlxsw_sp_span_inspected_port
*inspected_port
;
353 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
354 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
355 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
356 int pa_id
= span_entry
->id
;
359 /* if it is an egress SPAN, bind a shared buffer to it */
360 if (type
== MLXSW_SP_SPAN_EGRESS
) {
361 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
,
362 mlxsw_sp_span_mtu_to_buffsize(port
->dev
->mtu
));
363 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
365 netdev_err(port
->dev
, "Could not create shared buffer for mirroring\n");
370 /* bind the port to the SPAN entry */
371 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
372 (enum mlxsw_reg_mpar_i_e
) type
, true, pa_id
);
373 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
375 goto err_mpar_reg_write
;
377 inspected_port
= kzalloc(sizeof(*inspected_port
), GFP_KERNEL
);
378 if (!inspected_port
) {
380 goto err_inspected_port_alloc
;
382 inspected_port
->local_port
= port
->local_port
;
383 inspected_port
->type
= type
;
384 list_add_tail(&inspected_port
->list
, &span_entry
->bound_ports_list
);
389 err_inspected_port_alloc
:
390 if (type
== MLXSW_SP_SPAN_EGRESS
) {
391 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
392 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
398 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port
*port
,
399 struct mlxsw_sp_span_entry
*span_entry
,
400 enum mlxsw_sp_span_type type
)
402 struct mlxsw_sp_span_inspected_port
*inspected_port
;
403 struct mlxsw_sp
*mlxsw_sp
= port
->mlxsw_sp
;
404 char mpar_pl
[MLXSW_REG_MPAR_LEN
];
405 char sbib_pl
[MLXSW_REG_SBIB_LEN
];
406 int pa_id
= span_entry
->id
;
408 inspected_port
= mlxsw_sp_span_entry_bound_port_find(port
, span_entry
);
412 /* remove the inspected port */
413 mlxsw_reg_mpar_pack(mpar_pl
, port
->local_port
,
414 (enum mlxsw_reg_mpar_i_e
) type
, false, pa_id
);
415 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mpar
), mpar_pl
);
417 /* remove the SBIB buffer if it was egress SPAN */
418 if (type
== MLXSW_SP_SPAN_EGRESS
) {
419 mlxsw_reg_sbib_pack(sbib_pl
, port
->local_port
, 0);
420 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbib
), sbib_pl
);
423 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
425 list_del(&inspected_port
->list
);
426 kfree(inspected_port
);
429 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port
*from
,
430 struct mlxsw_sp_port
*to
,
431 enum mlxsw_sp_span_type type
)
433 struct mlxsw_sp
*mlxsw_sp
= from
->mlxsw_sp
;
434 struct mlxsw_sp_span_entry
*span_entry
;
437 span_entry
= mlxsw_sp_span_entry_get(to
);
441 netdev_dbg(from
->dev
, "Adding inspected port to SPAN entry %d\n",
444 err
= mlxsw_sp_span_inspected_port_bind(from
, span_entry
, type
);
451 mlxsw_sp_span_entry_put(mlxsw_sp
, span_entry
);
455 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port
*from
,
456 struct mlxsw_sp_port
*to
,
457 enum mlxsw_sp_span_type type
)
459 struct mlxsw_sp_span_entry
*span_entry
;
461 span_entry
= mlxsw_sp_span_entry_find(to
);
463 netdev_err(from
->dev
, "no span entry found\n");
467 netdev_dbg(from
->dev
, "removing inspected port from SPAN entry %d\n",
469 mlxsw_sp_span_inspected_port_unbind(from
, span_entry
, type
);
472 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
475 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
476 char paos_pl
[MLXSW_REG_PAOS_LEN
];
478 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
479 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
480 MLXSW_PORT_ADMIN_STATUS_DOWN
);
481 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
484 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
487 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
488 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
490 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
491 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
492 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
495 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
497 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
498 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
500 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
501 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
502 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
505 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
507 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
508 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
512 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
513 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
514 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
517 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
522 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
523 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
526 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
529 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
531 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
532 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
535 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
537 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
539 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
543 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
546 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
547 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
549 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
550 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
553 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
554 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
557 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
558 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
560 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
562 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
565 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
566 u16 vid_begin
, u16 vid_end
,
569 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
573 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
576 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
577 vid_end
, learn_enable
);
578 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
583 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
584 u16 vid
, bool learn_enable
)
586 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, vid
,
591 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
593 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
594 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
596 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
597 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
600 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
601 u8 local_port
, u8
*p_module
,
602 u8
*p_width
, u8
*p_lane
)
604 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
607 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
608 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
611 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
612 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
613 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
617 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
618 u8 module
, u8 width
, u8 lane
)
620 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
623 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
624 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
625 for (i
= 0; i
< width
; i
++) {
626 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
627 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
630 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
633 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
635 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
637 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
638 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
639 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
642 static int mlxsw_sp_port_open(struct net_device
*dev
)
644 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
647 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
650 netif_start_queue(dev
);
654 static int mlxsw_sp_port_stop(struct net_device
*dev
)
656 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
658 netif_stop_queue(dev
);
659 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
662 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
663 struct net_device
*dev
)
665 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
666 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
667 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
668 const struct mlxsw_tx_info tx_info
= {
669 .local_port
= mlxsw_sp_port
->local_port
,
675 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
676 return NETDEV_TX_BUSY
;
678 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
679 struct sk_buff
*skb_orig
= skb
;
681 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
683 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
684 dev_kfree_skb_any(skb_orig
);
687 dev_consume_skb_any(skb_orig
);
690 if (eth_skb_pad(skb
)) {
691 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
695 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
696 /* TX header is consumed by HW on the way so we shouldn't count its
697 * bytes as being sent.
699 len
= skb
->len
- MLXSW_TXHDR_LEN
;
701 /* Due to a race we might fail here because of a full queue. In that
702 * unlikely case we simply drop the packet.
704 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
707 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
708 u64_stats_update_begin(&pcpu_stats
->syncp
);
709 pcpu_stats
->tx_packets
++;
710 pcpu_stats
->tx_bytes
+= len
;
711 u64_stats_update_end(&pcpu_stats
->syncp
);
713 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
714 dev_kfree_skb_any(skb
);
719 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
723 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
725 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
726 struct sockaddr
*addr
= p
;
729 if (!is_valid_ether_addr(addr
->sa_data
))
730 return -EADDRNOTAVAIL
;
732 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
735 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
739 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int pg_index
, int mtu
,
740 bool pause_en
, bool pfc_en
, u16 delay
)
742 u16 pg_size
= 2 * MLXSW_SP_BYTES_TO_CELLS(mtu
);
744 delay
= pfc_en
? mlxsw_sp_pfc_delay_get(mtu
, delay
) :
745 MLXSW_SP_PAUSE_DELAY
;
747 if (pause_en
|| pfc_en
)
748 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, pg_index
,
749 pg_size
+ delay
, pg_size
);
751 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, pg_index
, pg_size
);
754 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
755 u8
*prio_tc
, bool pause_en
,
756 struct ieee_pfc
*my_pfc
)
758 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
759 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
760 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
761 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
764 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
765 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
769 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
770 bool configure
= false;
773 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
774 if (prio_tc
[j
] == i
) {
775 pfc
= pfc_en
& BIT(j
);
783 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, mtu
, pause_en
, pfc
, delay
);
786 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
789 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
790 int mtu
, bool pause_en
)
792 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
793 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
794 struct ieee_pfc
*my_pfc
;
797 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
798 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
800 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
804 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
806 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
807 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
810 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
813 err
= mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, mtu
);
815 goto err_span_port_mtu_update
;
816 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
818 goto err_port_mtu_set
;
823 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port
, dev
->mtu
);
824 err_span_port_mtu_update
:
825 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
830 mlxsw_sp_port_get_sw_stats64(const struct net_device
*dev
,
831 struct rtnl_link_stats64
*stats
)
833 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
834 struct mlxsw_sp_port_pcpu_stats
*p
;
835 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
840 for_each_possible_cpu(i
) {
841 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
843 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
844 rx_packets
= p
->rx_packets
;
845 rx_bytes
= p
->rx_bytes
;
846 tx_packets
= p
->tx_packets
;
847 tx_bytes
= p
->tx_bytes
;
848 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
850 stats
->rx_packets
+= rx_packets
;
851 stats
->rx_bytes
+= rx_bytes
;
852 stats
->tx_packets
+= tx_packets
;
853 stats
->tx_bytes
+= tx_bytes
;
854 /* tx_dropped is u32, updated without syncp protection. */
855 tx_dropped
+= p
->tx_dropped
;
857 stats
->tx_dropped
= tx_dropped
;
861 static bool mlxsw_sp_port_has_offload_stats(const struct net_device
*dev
, int attr_id
)
864 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
871 static int mlxsw_sp_port_get_offload_stats(int attr_id
, const struct net_device
*dev
,
875 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
876 return mlxsw_sp_port_get_sw_stats64(dev
, sp
);
882 static int mlxsw_sp_port_get_stats_raw(struct net_device
*dev
, int grp
,
883 int prio
, char *ppcnt_pl
)
885 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
886 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
888 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
889 return mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
892 static int mlxsw_sp_port_get_hw_stats(struct net_device
*dev
,
893 struct rtnl_link_stats64
*stats
)
895 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
898 err
= mlxsw_sp_port_get_stats_raw(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
,
904 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl
);
906 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl
);
908 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl
);
910 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl
);
912 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl
);
914 stats
->rx_crc_errors
=
915 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl
);
916 stats
->rx_frame_errors
=
917 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl
);
919 stats
->rx_length_errors
= (
920 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl
) +
921 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl
) +
922 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl
));
924 stats
->rx_errors
= (stats
->rx_crc_errors
+
925 stats
->rx_frame_errors
+ stats
->rx_length_errors
);
931 static void update_stats_cache(struct work_struct
*work
)
933 struct mlxsw_sp_port
*mlxsw_sp_port
=
934 container_of(work
, struct mlxsw_sp_port
,
935 hw_stats
.update_dw
.work
);
937 if (!netif_carrier_ok(mlxsw_sp_port
->dev
))
940 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port
->dev
,
941 mlxsw_sp_port
->hw_stats
.cache
);
944 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
,
945 MLXSW_HW_STATS_UPDATE_TIME
);
948 /* Return the stats from a cache that is updated periodically,
949 * as this function might get called in an atomic context.
952 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
953 struct rtnl_link_stats64
*stats
)
955 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
957 memcpy(stats
, mlxsw_sp_port
->hw_stats
.cache
, sizeof(*stats
));
960 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
961 u16 vid_end
, bool is_member
, bool untagged
)
963 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
967 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
971 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
972 vid_end
, is_member
, untagged
);
973 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
978 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
980 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
981 u16 vid
, last_visited_vid
;
984 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
985 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
988 last_visited_vid
= vid
;
989 goto err_port_vid_to_fid_set
;
993 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
995 last_visited_vid
= VLAN_N_VID
;
996 goto err_port_vid_to_fid_set
;
1001 err_port_vid_to_fid_set
:
1002 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
1003 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
1008 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
1010 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
1014 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
1018 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1019 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
1028 static struct mlxsw_sp_port
*
1029 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
1031 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1033 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
1034 if (!mlxsw_sp_vport
)
1037 /* dev will be set correctly after the VLAN device is linked
1038 * with the real device. In case of bridge SELF invocation, dev
1039 * will remain as is.
1041 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
1042 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1043 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
1044 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
1045 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
1046 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
1047 mlxsw_sp_vport
->vport
.vid
= vid
;
1049 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
1051 return mlxsw_sp_vport
;
1054 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
1056 list_del(&mlxsw_sp_vport
->vport
.list
);
1057 kfree(mlxsw_sp_vport
);
1060 static int mlxsw_sp_port_add_vid(struct net_device
*dev
,
1061 __be16 __always_unused proto
, u16 vid
)
1063 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1064 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1065 bool untagged
= vid
== 1;
1068 /* VLAN 0 is added to HW filter when device goes up, but it is
1069 * reserved in our case, so simply return.
1074 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
))
1077 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vid
);
1078 if (!mlxsw_sp_vport
)
1081 /* When adding the first VLAN interface on a bridged port we need to
1082 * transition all the active 802.1Q bridge VLANs to use explicit
1083 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1085 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
1086 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
1088 goto err_port_vp_mode_trans
;
1091 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, untagged
);
1093 goto err_port_add_vid
;
1098 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1099 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1100 err_port_vp_mode_trans
:
1101 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1105 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
1106 __be16 __always_unused proto
, u16 vid
)
1108 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1109 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1110 struct mlxsw_sp_fid
*f
;
1112 /* VLAN 0 is removed from HW filter when device goes down, but
1113 * it is reserved in our case, so simply return.
1118 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
1119 if (WARN_ON(!mlxsw_sp_vport
))
1122 mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
1124 /* Drop FID reference. If this was the last reference the
1125 * resources will be freed.
1127 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
1128 if (f
&& !WARN_ON(!f
->leave
))
1129 f
->leave(mlxsw_sp_vport
);
1131 /* When removing the last VLAN interface on a bridged port we need to
1132 * transition all active 802.1Q bridge VLANs to use VID to FID
1133 * mappings and set port's mode to VLAN mode.
1135 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
1136 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
1138 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
1143 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
1146 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1147 u8 module
= mlxsw_sp_port
->mapping
.module
;
1148 u8 width
= mlxsw_sp_port
->mapping
.width
;
1149 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
1152 if (!mlxsw_sp_port
->split
)
1153 err
= snprintf(name
, len
, "p%d", module
+ 1);
1155 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
1164 static struct mlxsw_sp_port_mall_tc_entry
*
1165 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port
*port
,
1166 unsigned long cookie
) {
1167 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1169 list_for_each_entry(mall_tc_entry
, &port
->mall_tc_list
, list
)
1170 if (mall_tc_entry
->cookie
== cookie
)
1171 return mall_tc_entry
;
1177 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1178 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
,
1179 const struct tc_action
*a
,
1182 struct net
*net
= dev_net(mlxsw_sp_port
->dev
);
1183 enum mlxsw_sp_span_type span_type
;
1184 struct mlxsw_sp_port
*to_port
;
1185 struct net_device
*to_dev
;
1188 ifindex
= tcf_mirred_ifindex(a
);
1189 to_dev
= __dev_get_by_index(net
, ifindex
);
1191 netdev_err(mlxsw_sp_port
->dev
, "Could not find requested device\n");
1195 if (!mlxsw_sp_port_dev_check(to_dev
)) {
1196 netdev_err(mlxsw_sp_port
->dev
, "Cannot mirror to a non-spectrum port");
1199 to_port
= netdev_priv(to_dev
);
1201 mirror
->to_local_port
= to_port
->local_port
;
1202 mirror
->ingress
= ingress
;
1203 span_type
= ingress
? MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1204 return mlxsw_sp_span_mirror_add(mlxsw_sp_port
, to_port
, span_type
);
1208 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port
*mlxsw_sp_port
,
1209 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
)
1211 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1212 enum mlxsw_sp_span_type span_type
;
1213 struct mlxsw_sp_port
*to_port
;
1215 to_port
= mlxsw_sp
->ports
[mirror
->to_local_port
];
1216 span_type
= mirror
->ingress
?
1217 MLXSW_SP_SPAN_INGRESS
: MLXSW_SP_SPAN_EGRESS
;
1218 mlxsw_sp_span_mirror_remove(mlxsw_sp_port
, to_port
, span_type
);
1221 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1223 struct tc_cls_matchall_offload
*cls
,
1226 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1227 const struct tc_action
*a
;
1231 if (!tc_single_action(cls
->exts
)) {
1232 netdev_err(mlxsw_sp_port
->dev
, "only singular actions are supported\n");
1236 mall_tc_entry
= kzalloc(sizeof(*mall_tc_entry
), GFP_KERNEL
);
1239 mall_tc_entry
->cookie
= cls
->cookie
;
1241 tcf_exts_to_list(cls
->exts
, &actions
);
1242 a
= list_first_entry(&actions
, struct tc_action
, list
);
1244 if (is_tcf_mirred_egress_mirror(a
) && protocol
== htons(ETH_P_ALL
)) {
1245 struct mlxsw_sp_port_mall_mirror_tc_entry
*mirror
;
1247 mall_tc_entry
->type
= MLXSW_SP_PORT_MALL_MIRROR
;
1248 mirror
= &mall_tc_entry
->mirror
;
1249 err
= mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port
,
1250 mirror
, a
, ingress
);
1256 goto err_add_action
;
1258 list_add_tail(&mall_tc_entry
->list
, &mlxsw_sp_port
->mall_tc_list
);
1262 kfree(mall_tc_entry
);
1266 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port
*mlxsw_sp_port
,
1267 struct tc_cls_matchall_offload
*cls
)
1269 struct mlxsw_sp_port_mall_tc_entry
*mall_tc_entry
;
1271 mall_tc_entry
= mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port
,
1273 if (!mall_tc_entry
) {
1274 netdev_dbg(mlxsw_sp_port
->dev
, "tc entry not found on port\n");
1277 list_del(&mall_tc_entry
->list
);
1279 switch (mall_tc_entry
->type
) {
1280 case MLXSW_SP_PORT_MALL_MIRROR
:
1281 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port
,
1282 &mall_tc_entry
->mirror
);
1288 kfree(mall_tc_entry
);
1291 static int mlxsw_sp_setup_tc(struct net_device
*dev
, u32 handle
,
1292 __be16 proto
, struct tc_to_netdev
*tc
)
1294 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1295 bool ingress
= TC_H_MAJ(handle
) == TC_H_MAJ(TC_H_INGRESS
);
1297 if (tc
->type
== TC_SETUP_MATCHALL
) {
1298 switch (tc
->cls_mall
->command
) {
1299 case TC_CLSMATCHALL_REPLACE
:
1300 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port
,
1304 case TC_CLSMATCHALL_DESTROY
:
1305 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port
,
1316 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
1317 .ndo_open
= mlxsw_sp_port_open
,
1318 .ndo_stop
= mlxsw_sp_port_stop
,
1319 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
1320 .ndo_setup_tc
= mlxsw_sp_setup_tc
,
1321 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
1322 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
1323 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
1324 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
1325 .ndo_has_offload_stats
= mlxsw_sp_port_has_offload_stats
,
1326 .ndo_get_offload_stats
= mlxsw_sp_port_get_offload_stats
,
1327 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
1328 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
1329 .ndo_neigh_construct
= mlxsw_sp_router_neigh_construct
,
1330 .ndo_neigh_destroy
= mlxsw_sp_router_neigh_destroy
,
1331 .ndo_fdb_add
= switchdev_port_fdb_add
,
1332 .ndo_fdb_del
= switchdev_port_fdb_del
,
1333 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
1334 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
1335 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
1336 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
1337 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
1340 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
1341 struct ethtool_drvinfo
*drvinfo
)
1343 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1344 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1346 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
1347 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
1348 sizeof(drvinfo
->version
));
1349 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1351 mlxsw_sp
->bus_info
->fw_rev
.major
,
1352 mlxsw_sp
->bus_info
->fw_rev
.minor
,
1353 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
1354 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
1355 sizeof(drvinfo
->bus_info
));
1358 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
1359 struct ethtool_pauseparam
*pause
)
1361 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1363 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
1364 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1367 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1368 struct ethtool_pauseparam
*pause
)
1370 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1372 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1373 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1374 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1376 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1380 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1381 struct ethtool_pauseparam
*pause
)
1383 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1384 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1387 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1388 netdev_err(dev
, "PFC already enabled on port\n");
1392 if (pause
->autoneg
) {
1393 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1397 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1399 netdev_err(dev
, "Failed to configure port's headroom\n");
1403 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1405 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1406 goto err_port_pause_configure
;
1409 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1410 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1414 err_port_pause_configure
:
1415 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1416 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1420 struct mlxsw_sp_port_hw_stats
{
1421 char str
[ETH_GSTRING_LEN
];
1422 u64 (*getter
)(const char *payload
);
1425 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1427 .str
= "a_frames_transmitted_ok",
1428 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1431 .str
= "a_frames_received_ok",
1432 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1435 .str
= "a_frame_check_sequence_errors",
1436 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1439 .str
= "a_alignment_errors",
1440 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1443 .str
= "a_octets_transmitted_ok",
1444 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1447 .str
= "a_octets_received_ok",
1448 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1451 .str
= "a_multicast_frames_xmitted_ok",
1452 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1455 .str
= "a_broadcast_frames_xmitted_ok",
1456 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1459 .str
= "a_multicast_frames_received_ok",
1460 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1463 .str
= "a_broadcast_frames_received_ok",
1464 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1467 .str
= "a_in_range_length_errors",
1468 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1471 .str
= "a_out_of_range_length_field",
1472 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1475 .str
= "a_frame_too_long_errors",
1476 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1479 .str
= "a_symbol_error_during_carrier",
1480 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1483 .str
= "a_mac_control_frames_transmitted",
1484 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1487 .str
= "a_mac_control_frames_received",
1488 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1491 .str
= "a_unsupported_opcodes_received",
1492 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1495 .str
= "a_pause_mac_ctrl_frames_received",
1496 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1499 .str
= "a_pause_mac_ctrl_frames_xmitted",
1500 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1504 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1506 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
1508 .str
= "rx_octets_prio",
1509 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
1512 .str
= "rx_frames_prio",
1513 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
1516 .str
= "tx_octets_prio",
1517 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
1520 .str
= "tx_frames_prio",
1521 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
1524 .str
= "rx_pause_prio",
1525 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
1528 .str
= "rx_pause_duration_prio",
1529 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
1532 .str
= "tx_pause_prio",
1533 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
1536 .str
= "tx_pause_duration_prio",
1537 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
1541 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1543 static u64
mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl
)
1545 u64 transmit_queue
= mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
1547 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue
);
1550 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
1552 .str
= "tc_transmit_queue_tc",
1553 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get
,
1556 .str
= "tc_no_buffer_discard_uc_tc",
1557 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
1561 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1563 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1564 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1565 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1566 IEEE_8021QAZ_MAX_TCS)
1568 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
1572 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
1573 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1574 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
1575 *p
+= ETH_GSTRING_LEN
;
1579 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
1583 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
1584 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1585 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
1586 *p
+= ETH_GSTRING_LEN
;
1590 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1591 u32 stringset
, u8
*data
)
1596 switch (stringset
) {
1598 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1599 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1601 p
+= ETH_GSTRING_LEN
;
1604 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1605 mlxsw_sp_port_get_prio_strings(&p
, i
);
1607 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1608 mlxsw_sp_port_get_tc_strings(&p
, i
);
1614 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1615 enum ethtool_phys_id_state state
)
1617 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1618 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1619 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1623 case ETHTOOL_ID_ACTIVE
:
1626 case ETHTOOL_ID_INACTIVE
:
1633 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1634 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1638 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
1639 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
1642 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
1643 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
1644 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
1646 case MLXSW_REG_PPCNT_PRIO_CNT
:
1647 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
1648 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1650 case MLXSW_REG_PPCNT_TC_CNT
:
1651 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
1652 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1661 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
1662 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
1663 u64
*data
, int data_index
)
1665 struct mlxsw_sp_port_hw_stats
*hw_stats
;
1666 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1670 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
1673 mlxsw_sp_port_get_stats_raw(dev
, grp
, prio
, ppcnt_pl
);
1674 for (i
= 0; i
< len
; i
++)
1675 data
[data_index
+ i
] = hw_stats
[i
].getter(ppcnt_pl
);
1678 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1679 struct ethtool_stats
*stats
, u64
*data
)
1681 int i
, data_index
= 0;
1683 /* IEEE 802.3 Counters */
1684 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
1686 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
1688 /* Per-Priority Counters */
1689 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1690 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
1692 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1695 /* Per-TC Counters */
1696 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1697 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
1699 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1703 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1707 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
1713 struct mlxsw_sp_port_link_mode
{
1714 enum ethtool_link_mode_bit_indices mask_ethtool
;
1719 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1721 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1722 .mask_ethtool
= ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
1726 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1727 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1728 .mask_ethtool
= ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
1729 .speed
= SPEED_1000
,
1732 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1733 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1734 .speed
= SPEED_10000
,
1737 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1738 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1739 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
1740 .speed
= SPEED_10000
,
1743 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1744 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1745 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1746 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1747 .mask_ethtool
= ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
1748 .speed
= SPEED_10000
,
1751 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1752 .mask_ethtool
= ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
1753 .speed
= SPEED_20000
,
1756 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1757 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
1758 .speed
= SPEED_40000
,
1761 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1762 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
1763 .speed
= SPEED_40000
,
1766 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1767 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
1768 .speed
= SPEED_40000
,
1771 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1772 .mask_ethtool
= ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
1773 .speed
= SPEED_40000
,
1776 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
,
1777 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
1778 .speed
= SPEED_25000
,
1781 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
,
1782 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
1783 .speed
= SPEED_25000
,
1786 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1787 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
1788 .speed
= SPEED_25000
,
1791 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1792 .mask_ethtool
= ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
1793 .speed
= SPEED_25000
,
1796 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
,
1797 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
1798 .speed
= SPEED_50000
,
1801 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
1802 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
1803 .speed
= SPEED_50000
,
1806 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2
,
1807 .mask_ethtool
= ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
1808 .speed
= SPEED_50000
,
1811 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1812 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT
,
1813 .speed
= SPEED_56000
,
1816 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1817 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT
,
1818 .speed
= SPEED_56000
,
1821 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1822 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT
,
1823 .speed
= SPEED_56000
,
1826 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1827 .mask_ethtool
= ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT
,
1828 .speed
= SPEED_56000
,
1831 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
,
1832 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
1833 .speed
= SPEED_100000
,
1836 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
,
1837 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
1838 .speed
= SPEED_100000
,
1841 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
,
1842 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
1843 .speed
= SPEED_100000
,
1846 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
1847 .mask_ethtool
= ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
1848 .speed
= SPEED_100000
,
1852 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1855 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
,
1856 struct ethtool_link_ksettings
*cmd
)
1858 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1859 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1860 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1861 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1862 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1863 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1864 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
1866 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1867 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1868 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1869 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1870 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
1871 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Backplane
);
1874 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto
, unsigned long *mode
)
1878 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1879 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1880 __set_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
1885 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
1886 struct ethtool_link_ksettings
*cmd
)
1888 u32 speed
= SPEED_UNKNOWN
;
1889 u8 duplex
= DUPLEX_UNKNOWN
;
1895 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1896 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
1897 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
1898 duplex
= DUPLEX_FULL
;
1903 cmd
->base
.speed
= speed
;
1904 cmd
->base
.duplex
= duplex
;
1907 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
1909 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1910 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1911 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1912 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1915 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1916 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1917 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
1920 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1921 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1922 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1923 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
1930 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings
*cmd
)
1935 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1936 if (test_bit(mlxsw_sp_port_link_mode
[i
].mask_ethtool
,
1937 cmd
->link_modes
.advertising
))
1938 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1943 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
1948 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1949 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
1950 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1955 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
1960 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1961 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
1962 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1967 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap
,
1968 struct ethtool_link_ksettings
*cmd
)
1970 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Asym_Pause
);
1971 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
1972 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
1974 mlxsw_sp_from_ptys_supported_port(eth_proto_cap
, cmd
);
1975 mlxsw_sp_from_ptys_link(eth_proto_cap
, cmd
->link_modes
.supported
);
1978 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin
, bool autoneg
,
1979 struct ethtool_link_ksettings
*cmd
)
1984 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
1985 mlxsw_sp_from_ptys_link(eth_proto_admin
, cmd
->link_modes
.advertising
);
1989 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp
, u8 autoneg_status
,
1990 struct ethtool_link_ksettings
*cmd
)
1992 if (autoneg_status
!= MLXSW_REG_PTYS_AN_STATUS_OK
|| !eth_proto_lp
)
1995 ethtool_link_ksettings_add_link_mode(cmd
, lp_advertising
, Autoneg
);
1996 mlxsw_sp_from_ptys_link(eth_proto_lp
, cmd
->link_modes
.lp_advertising
);
1999 static int mlxsw_sp_port_get_link_ksettings(struct net_device
*dev
,
2000 struct ethtool_link_ksettings
*cmd
)
2002 u32 eth_proto_cap
, eth_proto_admin
, eth_proto_oper
, eth_proto_lp
;
2003 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2004 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2005 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2010 autoneg
= mlxsw_sp_port
->link
.autoneg
;
2011 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2012 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2015 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
,
2018 mlxsw_sp_port_get_link_supported(eth_proto_cap
, cmd
);
2020 mlxsw_sp_port_get_link_advertise(eth_proto_admin
, autoneg
, cmd
);
2022 eth_proto_lp
= mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl
);
2023 autoneg_status
= mlxsw_reg_ptys_an_status_get(ptys_pl
);
2024 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp
, autoneg_status
, cmd
);
2026 cmd
->base
.autoneg
= autoneg
? AUTONEG_ENABLE
: AUTONEG_DISABLE
;
2027 cmd
->base
.port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
2028 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
), eth_proto_oper
,
2035 mlxsw_sp_port_set_link_ksettings(struct net_device
*dev
,
2036 const struct ethtool_link_ksettings
*cmd
)
2038 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
2039 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2040 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2041 u32 eth_proto_cap
, eth_proto_new
;
2045 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
2046 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2049 mlxsw_reg_ptys_eth_unpack(ptys_pl
, ð_proto_cap
, NULL
, NULL
);
2051 autoneg
= cmd
->base
.autoneg
== AUTONEG_ENABLE
;
2052 eth_proto_new
= autoneg
?
2053 mlxsw_sp_to_ptys_advert_link(cmd
) :
2054 mlxsw_sp_to_ptys_speed(cmd
->base
.speed
);
2056 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
2057 if (!eth_proto_new
) {
2058 netdev_err(dev
, "No supported speed requested\n");
2062 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2064 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2068 if (!netif_running(dev
))
2071 mlxsw_sp_port
->link
.autoneg
= autoneg
;
2073 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2074 mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
2079 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
2080 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
2081 .get_link
= ethtool_op_get_link
,
2082 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
2083 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
2084 .get_strings
= mlxsw_sp_port_get_strings
,
2085 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
2086 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
2087 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
2088 .get_link_ksettings
= mlxsw_sp_port_get_link_ksettings
,
2089 .set_link_ksettings
= mlxsw_sp_port_set_link_ksettings
,
2093 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
2095 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2096 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
2097 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
2098 u32 eth_proto_admin
;
2100 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
2101 mlxsw_reg_ptys_eth_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
2103 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
2106 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2107 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
2108 bool dwrr
, u8 dwrr_weight
)
2110 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2111 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2113 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2115 mlxsw_reg_qeec_de_set(qeec_pl
, true);
2116 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
2117 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
2118 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2121 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2122 enum mlxsw_reg_qeec_hr hr
, u8 index
,
2123 u8 next_index
, u32 maxrate
)
2125 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2126 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
2128 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
2130 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
2131 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
2132 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
2135 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2136 u8 switch_prio
, u8 tclass
)
2138 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2139 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
2141 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
2143 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
2146 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
2150 /* Setup the elements hierarcy, so that each TC is linked to
2151 * one subgroup, which are all member in the same group.
2153 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2154 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
2158 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2159 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2160 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
2165 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2166 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
2167 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
2173 /* Make sure the max shaper is disabled in all hierarcies that
2176 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2177 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
2178 MLXSW_REG_QEEC_MAS_DIS
);
2181 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2182 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2183 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
2185 MLXSW_REG_QEEC_MAS_DIS
);
2189 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2190 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
2191 MLXSW_REG_QEEC_HIERARCY_TC
,
2193 MLXSW_REG_QEEC_MAS_DIS
);
2198 /* Map all priorities to traffic class 0. */
2199 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
2200 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
2208 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
)
2210 mlxsw_sp_port
->pvid
= 1;
2212 return mlxsw_sp_port_add_vid(mlxsw_sp_port
->dev
, 0, 1);
2215 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_port
)
2217 return mlxsw_sp_port_kill_vid(mlxsw_sp_port
->dev
, 0, 1);
2220 static int __mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2221 bool split
, u8 module
, u8 width
, u8 lane
)
2223 struct mlxsw_sp_port
*mlxsw_sp_port
;
2224 struct net_device
*dev
;
2228 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
2231 SET_NETDEV_DEV(dev
, mlxsw_sp
->bus_info
->dev
);
2232 mlxsw_sp_port
= netdev_priv(dev
);
2233 mlxsw_sp_port
->dev
= dev
;
2234 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
2235 mlxsw_sp_port
->local_port
= local_port
;
2236 mlxsw_sp_port
->split
= split
;
2237 mlxsw_sp_port
->mapping
.module
= module
;
2238 mlxsw_sp_port
->mapping
.width
= width
;
2239 mlxsw_sp_port
->mapping
.lane
= lane
;
2240 mlxsw_sp_port
->link
.autoneg
= 1;
2241 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
2242 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2243 if (!mlxsw_sp_port
->active_vlans
) {
2245 goto err_port_active_vlans_alloc
;
2247 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
2248 if (!mlxsw_sp_port
->untagged_vlans
) {
2250 goto err_port_untagged_vlans_alloc
;
2252 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
2253 INIT_LIST_HEAD(&mlxsw_sp_port
->mall_tc_list
);
2255 mlxsw_sp_port
->pcpu_stats
=
2256 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
2257 if (!mlxsw_sp_port
->pcpu_stats
) {
2259 goto err_alloc_stats
;
2262 mlxsw_sp_port
->hw_stats
.cache
=
2263 kzalloc(sizeof(*mlxsw_sp_port
->hw_stats
.cache
), GFP_KERNEL
);
2265 if (!mlxsw_sp_port
->hw_stats
.cache
) {
2267 goto err_alloc_hw_stats
;
2269 INIT_DELAYED_WORK(&mlxsw_sp_port
->hw_stats
.update_dw
,
2270 &update_stats_cache
);
2272 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
2273 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
2275 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
2277 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
2278 mlxsw_sp_port
->local_port
);
2279 goto err_port_swid_set
;
2282 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
2284 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
2285 mlxsw_sp_port
->local_port
);
2286 goto err_dev_addr_init
;
2289 netif_carrier_off(dev
);
2291 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
2292 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_TC
;
2293 dev
->hw_features
|= NETIF_F_HW_TC
;
2296 dev
->max_mtu
= ETH_MAX_MTU
;
2298 /* Each packet needs to have a Tx header (metadata) on top all other
2301 dev
->needed_headroom
= MLXSW_TXHDR_LEN
;
2303 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
2305 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
2306 mlxsw_sp_port
->local_port
);
2307 goto err_port_system_port_mapping_set
;
2310 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
2312 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
2313 mlxsw_sp_port
->local_port
);
2314 goto err_port_speed_by_width_set
;
2317 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
2319 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
2320 mlxsw_sp_port
->local_port
);
2321 goto err_port_mtu_set
;
2324 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
2326 goto err_port_admin_status_set
;
2328 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
2330 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
2331 mlxsw_sp_port
->local_port
);
2332 goto err_port_buffers_init
;
2335 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
2337 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
2338 mlxsw_sp_port
->local_port
);
2339 goto err_port_ets_init
;
2342 /* ETS and buffers must be initialized before DCB. */
2343 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
2345 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
2346 mlxsw_sp_port
->local_port
);
2347 goto err_port_dcb_init
;
2350 err
= mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port
);
2352 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to create PVID vPort\n",
2353 mlxsw_sp_port
->local_port
);
2354 goto err_port_pvid_vport_create
;
2357 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
2358 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
2359 err
= register_netdev(dev
);
2361 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
2362 mlxsw_sp_port
->local_port
);
2363 goto err_register_netdev
;
2366 mlxsw_core_port_eth_set(mlxsw_sp
->core
, mlxsw_sp_port
->local_port
,
2367 mlxsw_sp_port
, dev
, mlxsw_sp_port
->split
,
2369 mlxsw_core_schedule_dw(&mlxsw_sp_port
->hw_stats
.update_dw
, 0);
2372 err_register_netdev
:
2373 mlxsw_sp
->ports
[local_port
] = NULL
;
2374 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2375 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port
);
2376 err_port_pvid_vport_create
:
2377 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2380 err_port_buffers_init
:
2381 err_port_admin_status_set
:
2383 err_port_speed_by_width_set
:
2384 err_port_system_port_mapping_set
:
2386 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2388 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2390 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2392 kfree(mlxsw_sp_port
->untagged_vlans
);
2393 err_port_untagged_vlans_alloc
:
2394 kfree(mlxsw_sp_port
->active_vlans
);
2395 err_port_active_vlans_alloc
:
2400 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
2401 bool split
, u8 module
, u8 width
, u8 lane
)
2405 err
= mlxsw_core_port_init(mlxsw_sp
->core
, local_port
);
2407 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
2411 err
= __mlxsw_sp_port_create(mlxsw_sp
, local_port
, split
,
2412 module
, width
, lane
);
2414 goto err_port_create
;
2418 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2422 static void __mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2424 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2426 cancel_delayed_work_sync(&mlxsw_sp_port
->hw_stats
.update_dw
);
2427 mlxsw_core_port_clear(mlxsw_sp
->core
, local_port
, mlxsw_sp
);
2428 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
2429 mlxsw_sp
->ports
[local_port
] = NULL
;
2430 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
2431 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port
);
2432 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
2433 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
2434 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
2435 kfree(mlxsw_sp_port
->hw_stats
.cache
);
2436 free_percpu(mlxsw_sp_port
->pcpu_stats
);
2437 kfree(mlxsw_sp_port
->untagged_vlans
);
2438 kfree(mlxsw_sp_port
->active_vlans
);
2439 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vports_list
));
2440 free_netdev(mlxsw_sp_port
->dev
);
2443 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2445 __mlxsw_sp_port_remove(mlxsw_sp
, local_port
);
2446 mlxsw_core_port_fini(mlxsw_sp
->core
, local_port
);
2449 static bool mlxsw_sp_port_created(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
2451 return mlxsw_sp
->ports
[local_port
] != NULL
;
2454 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
2458 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
2459 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2460 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2461 kfree(mlxsw_sp
->ports
);
2464 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
2466 u8 module
, width
, lane
;
2471 alloc_size
= sizeof(struct mlxsw_sp_port
*) * MLXSW_PORT_MAX_PORTS
;
2472 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
2473 if (!mlxsw_sp
->ports
)
2476 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
2477 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
2480 goto err_port_module_info_get
;
2483 mlxsw_sp
->port_to_module
[i
] = module
;
2484 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false,
2485 module
, width
, lane
);
2487 goto err_port_create
;
2492 err_port_module_info_get
:
2493 for (i
--; i
>= 1; i
--)
2494 if (mlxsw_sp_port_created(mlxsw_sp
, i
))
2495 mlxsw_sp_port_remove(mlxsw_sp
, i
);
2496 kfree(mlxsw_sp
->ports
);
2500 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
2502 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
2504 return local_port
- offset
;
2507 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
2508 u8 module
, unsigned int count
)
2510 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
2513 for (i
= 0; i
< count
; i
++) {
2514 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
2517 goto err_port_module_map
;
2520 for (i
= 0; i
< count
; i
++) {
2521 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
2523 goto err_port_swid_set
;
2526 for (i
= 0; i
< count
; i
++) {
2527 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
2528 module
, width
, i
* width
);
2530 goto err_port_create
;
2536 for (i
--; i
>= 0; i
--)
2537 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2538 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2541 for (i
--; i
>= 0; i
--)
2542 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
2543 MLXSW_PORT_SWID_DISABLED_PORT
);
2545 err_port_module_map
:
2546 for (i
--; i
>= 0; i
--)
2547 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
2551 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
2552 u8 base_port
, unsigned int count
)
2554 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
2557 /* Split by four means we need to re-create two ports, otherwise
2562 for (i
= 0; i
< count
; i
++) {
2563 local_port
= base_port
+ i
* 2;
2564 module
= mlxsw_sp
->port_to_module
[local_port
];
2566 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
2570 for (i
= 0; i
< count
; i
++)
2571 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
2573 for (i
= 0; i
< count
; i
++) {
2574 local_port
= base_port
+ i
* 2;
2575 module
= mlxsw_sp
->port_to_module
[local_port
];
2577 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
2582 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
2585 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2586 struct mlxsw_sp_port
*mlxsw_sp_port
;
2587 u8 module
, cur_width
, base_port
;
2591 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2592 if (!mlxsw_sp_port
) {
2593 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2598 module
= mlxsw_sp_port
->mapping
.module
;
2599 cur_width
= mlxsw_sp_port
->mapping
.width
;
2601 if (count
!= 2 && count
!= 4) {
2602 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
2606 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
2607 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
2611 /* Make sure we have enough slave (even) ports for the split. */
2613 base_port
= local_port
;
2614 if (mlxsw_sp
->ports
[base_port
+ 1]) {
2615 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2619 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2620 if (mlxsw_sp
->ports
[base_port
+ 1] ||
2621 mlxsw_sp
->ports
[base_port
+ 3]) {
2622 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2627 for (i
= 0; i
< count
; i
++)
2628 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2629 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2631 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
2633 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2634 goto err_port_split_create
;
2639 err_port_split_create
:
2640 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2644 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
2646 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2647 struct mlxsw_sp_port
*mlxsw_sp_port
;
2648 u8 cur_width
, base_port
;
2652 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2653 if (!mlxsw_sp_port
) {
2654 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2659 if (!mlxsw_sp_port
->split
) {
2660 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2664 cur_width
= mlxsw_sp_port
->mapping
.width
;
2665 count
= cur_width
== 1 ? 4 : 2;
2667 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2669 /* Determine which ports to remove. */
2670 if (count
== 2 && local_port
>= base_port
+ 2)
2671 base_port
= base_port
+ 2;
2673 for (i
= 0; i
< count
; i
++)
2674 if (mlxsw_sp_port_created(mlxsw_sp
, base_port
+ i
))
2675 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2677 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2682 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2683 char *pude_pl
, void *priv
)
2685 struct mlxsw_sp
*mlxsw_sp
= priv
;
2686 struct mlxsw_sp_port
*mlxsw_sp_port
;
2687 enum mlxsw_reg_pude_oper_status status
;
2690 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2691 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2695 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2696 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2697 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2698 netif_carrier_on(mlxsw_sp_port
->dev
);
2700 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2701 netif_carrier_off(mlxsw_sp_port
->dev
);
2705 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff
*skb
,
2706 u8 local_port
, void *priv
)
2708 struct mlxsw_sp
*mlxsw_sp
= priv
;
2709 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2710 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2712 if (unlikely(!mlxsw_sp_port
)) {
2713 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2718 skb
->dev
= mlxsw_sp_port
->dev
;
2720 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2721 u64_stats_update_begin(&pcpu_stats
->syncp
);
2722 pcpu_stats
->rx_packets
++;
2723 pcpu_stats
->rx_bytes
+= skb
->len
;
2724 u64_stats_update_end(&pcpu_stats
->syncp
);
2726 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2727 netif_receive_skb(skb
);
2730 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff
*skb
, u8 local_port
,
2733 skb
->offload_fwd_mark
= 1;
2734 return mlxsw_sp_rx_listener_no_mark_func(skb
, local_port
, priv
);
2737 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2738 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2739 _is_ctrl, SP_##_trap_group, DISCARD)
2741 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2742 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2743 _is_ctrl, SP_##_trap_group, DISCARD)
2745 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2746 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2748 static const struct mlxsw_listener mlxsw_sp_listener
[] = {
2750 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func
, PUDE
),
2752 MLXSW_SP_RXL_NO_MARK(STP
, TRAP_TO_CPU
, STP
, true),
2753 MLXSW_SP_RXL_NO_MARK(LACP
, TRAP_TO_CPU
, LACP
, true),
2754 MLXSW_SP_RXL_NO_MARK(LLDP
, TRAP_TO_CPU
, LLDP
, true),
2755 MLXSW_SP_RXL_MARK(DHCP
, MIRROR_TO_CPU
, DHCP
, false),
2756 MLXSW_SP_RXL_MARK(IGMP_QUERY
, MIRROR_TO_CPU
, IGMP
, false),
2757 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT
, TRAP_TO_CPU
, IGMP
, false),
2758 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT
, TRAP_TO_CPU
, IGMP
, false),
2759 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE
, TRAP_TO_CPU
, IGMP
, false),
2760 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT
, TRAP_TO_CPU
, IGMP
, false),
2761 MLXSW_SP_RXL_MARK(ARPBC
, MIRROR_TO_CPU
, ARP
, false),
2762 MLXSW_SP_RXL_MARK(ARPUC
, MIRROR_TO_CPU
, ARP
, false),
2764 MLXSW_SP_RXL_NO_MARK(MTUERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
2765 MLXSW_SP_RXL_NO_MARK(TTLERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
2766 MLXSW_SP_RXL_NO_MARK(LBERROR
, TRAP_TO_CPU
, ROUTER_EXP
, false),
2767 MLXSW_SP_RXL_MARK(OSPF
, TRAP_TO_CPU
, OSPF
, false),
2768 MLXSW_SP_RXL_NO_MARK(IP2ME
, TRAP_TO_CPU
, IP2ME
, false),
2769 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0
, TRAP_TO_CPU
, REMOTE_ROUTE
, false),
2770 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4
, TRAP_TO_CPU
, ARP_MISS
, false),
2771 MLXSW_SP_RXL_NO_MARK(BGP_IPV4
, TRAP_TO_CPU
, BGP_IPV4
, false),
2774 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core
*mlxsw_core
)
2776 char qpcr_pl
[MLXSW_REG_QPCR_LEN
];
2777 enum mlxsw_reg_qpcr_ir_units ir_units
;
2778 int max_cpu_policers
;
2784 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_CPU_POLICERS
))
2787 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
2789 ir_units
= MLXSW_REG_QPCR_IR_UNITS_M
;
2790 for (i
= 0; i
< max_cpu_policers
; i
++) {
2793 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
2794 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
2795 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
2796 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
2800 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
2804 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
2805 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
2806 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
2807 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
2808 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
2809 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
2813 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
2822 mlxsw_reg_qpcr_pack(qpcr_pl
, i
, ir_units
, is_bytes
, rate
,
2824 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(qpcr
), qpcr_pl
);
2832 static int mlxsw_sp_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
2834 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2835 enum mlxsw_reg_htgt_trap_group i
;
2836 int max_cpu_policers
;
2837 int max_trap_groups
;
2842 if (!MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_TRAP_GROUPS
))
2845 max_trap_groups
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_TRAP_GROUPS
);
2846 max_cpu_policers
= MLXSW_CORE_RES_GET(mlxsw_core
, MAX_CPU_POLICERS
);
2848 for (i
= 0; i
< max_trap_groups
; i
++) {
2851 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP
:
2852 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP
:
2853 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP
:
2854 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF
:
2858 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4
:
2859 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP
:
2863 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP
:
2864 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME
:
2868 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP
:
2872 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS
:
2873 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP
:
2874 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE
:
2878 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT
:
2879 priority
= MLXSW_REG_HTGT_DEFAULT_PRIORITY
;
2880 tc
= MLXSW_REG_HTGT_DEFAULT_TC
;
2881 policer_id
= MLXSW_REG_HTGT_INVALID_POLICER
;
2887 if (max_cpu_policers
<= policer_id
&&
2888 policer_id
!= MLXSW_REG_HTGT_INVALID_POLICER
)
2891 mlxsw_reg_htgt_pack(htgt_pl
, i
, policer_id
, priority
, tc
);
2892 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
2900 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2905 err
= mlxsw_sp_cpu_policers_set(mlxsw_sp
->core
);
2909 err
= mlxsw_sp_trap_groups_set(mlxsw_sp
->core
);
2913 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
2914 err
= mlxsw_core_trap_register(mlxsw_sp
->core
,
2915 &mlxsw_sp_listener
[i
],
2918 goto err_listener_register
;
2923 err_listener_register
:
2924 for (i
--; i
>= 0; i
--) {
2925 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
2926 &mlxsw_sp_listener
[i
],
2932 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2936 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_listener
); i
++) {
2937 mlxsw_core_trap_unregister(mlxsw_sp
->core
,
2938 &mlxsw_sp_listener
[i
],
2943 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
2944 enum mlxsw_reg_sfgc_type type
,
2945 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
2947 enum mlxsw_flood_table_type table_type
;
2948 enum mlxsw_sp_flood_table flood_table
;
2949 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
2951 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
2952 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
2954 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
2956 if (type
== MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
)
2957 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
2959 flood_table
= MLXSW_SP_FLOOD_TABLE_BM
;
2961 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
2963 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
2966 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
2970 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
2971 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
2974 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2975 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
2979 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2980 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
2988 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2990 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2993 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2994 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2995 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2996 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2997 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2998 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2999 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
3000 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
3001 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
3002 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
3006 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG
) ||
3007 !MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_LAG_MEMBERS
))
3010 mlxsw_sp
->lags
= kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
),
3011 sizeof(struct mlxsw_sp_upper
),
3013 if (!mlxsw_sp
->lags
)
3019 static void mlxsw_sp_lag_fini(struct mlxsw_sp
*mlxsw_sp
)
3021 kfree(mlxsw_sp
->lags
);
3024 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core
*mlxsw_core
)
3026 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
3028 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_EMAD
,
3029 MLXSW_REG_HTGT_INVALID_POLICER
,
3030 MLXSW_REG_HTGT_DEFAULT_PRIORITY
,
3031 MLXSW_REG_HTGT_DEFAULT_TC
);
3032 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(htgt
), htgt_pl
);
3035 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
3036 const struct mlxsw_bus_info
*mlxsw_bus_info
)
3038 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3041 mlxsw_sp
->core
= mlxsw_core
;
3042 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
3043 INIT_LIST_HEAD(&mlxsw_sp
->fids
);
3044 INIT_LIST_HEAD(&mlxsw_sp
->vfids
.list
);
3045 INIT_LIST_HEAD(&mlxsw_sp
->br_mids
.list
);
3047 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
3049 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
3053 err
= mlxsw_sp_traps_init(mlxsw_sp
);
3055 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps\n");
3059 err
= mlxsw_sp_flood_init(mlxsw_sp
);
3061 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
3062 goto err_flood_init
;
3065 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
3067 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
3068 goto err_buffers_init
;
3071 err
= mlxsw_sp_lag_init(mlxsw_sp
);
3073 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
3077 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
3079 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
3080 goto err_switchdev_init
;
3083 err
= mlxsw_sp_router_init(mlxsw_sp
);
3085 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
3086 goto err_router_init
;
3089 err
= mlxsw_sp_span_init(mlxsw_sp
);
3091 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to init span system\n");
3095 err
= mlxsw_sp_ports_create(mlxsw_sp
);
3097 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
3098 goto err_ports_create
;
3104 mlxsw_sp_span_fini(mlxsw_sp
);
3106 mlxsw_sp_router_fini(mlxsw_sp
);
3108 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3110 mlxsw_sp_lag_fini(mlxsw_sp
);
3112 mlxsw_sp_buffers_fini(mlxsw_sp
);
3115 mlxsw_sp_traps_fini(mlxsw_sp
);
3119 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
3121 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
3123 mlxsw_sp_ports_remove(mlxsw_sp
);
3124 mlxsw_sp_span_fini(mlxsw_sp
);
3125 mlxsw_sp_router_fini(mlxsw_sp
);
3126 mlxsw_sp_switchdev_fini(mlxsw_sp
);
3127 mlxsw_sp_lag_fini(mlxsw_sp
);
3128 mlxsw_sp_buffers_fini(mlxsw_sp
);
3129 mlxsw_sp_traps_fini(mlxsw_sp
);
3130 WARN_ON(!list_empty(&mlxsw_sp
->vfids
.list
));
3131 WARN_ON(!list_empty(&mlxsw_sp
->fids
));
3134 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
3135 .used_max_vepa_channels
= 1,
3136 .max_vepa_channels
= 0,
3138 .max_mid
= MLXSW_SP_MID_MAX
,
3141 .used_flood_tables
= 1,
3142 .used_flood_mode
= 1,
3144 .max_fid_offset_flood_tables
= 2,
3145 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
3146 .max_fid_flood_tables
= 2,
3147 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
3148 .used_max_ib_mc
= 1,
3152 .used_kvd_split_data
= 1,
3153 .kvd_hash_granularity
= MLXSW_SP_KVD_GRANULARITY
,
3154 .kvd_hash_single_parts
= 2,
3155 .kvd_hash_double_parts
= 1,
3156 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
3160 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
3163 .resource_query_enable
= 1,
3166 static struct mlxsw_driver mlxsw_sp_driver
= {
3167 .kind
= mlxsw_sp_driver_name
,
3168 .priv_size
= sizeof(struct mlxsw_sp
),
3169 .init
= mlxsw_sp_init
,
3170 .fini
= mlxsw_sp_fini
,
3171 .basic_trap_groups_set
= mlxsw_sp_basic_trap_groups_set
,
3172 .port_split
= mlxsw_sp_port_split
,
3173 .port_unsplit
= mlxsw_sp_port_unsplit
,
3174 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
3175 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
3176 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
3177 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
3178 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
3179 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
3180 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
3181 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
3182 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
3183 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
3184 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
3185 .txhdr_len
= MLXSW_TXHDR_LEN
,
3186 .profile
= &mlxsw_sp_config_profile
,
3189 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
3191 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
3194 static int mlxsw_lower_dev_walk(struct net_device
*lower_dev
, void *data
)
3196 struct mlxsw_sp_port
**port
= data
;
3199 if (mlxsw_sp_port_dev_check(lower_dev
)) {
3200 *port
= netdev_priv(lower_dev
);
3207 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
3209 struct mlxsw_sp_port
*port
;
3211 if (mlxsw_sp_port_dev_check(dev
))
3212 return netdev_priv(dev
);
3215 netdev_walk_all_lower_dev(dev
, mlxsw_lower_dev_walk
, &port
);
3220 static struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
3222 struct mlxsw_sp_port
*mlxsw_sp_port
;
3224 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
3225 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
3228 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
3230 struct mlxsw_sp_port
*port
;
3232 if (mlxsw_sp_port_dev_check(dev
))
3233 return netdev_priv(dev
);
3236 netdev_walk_all_lower_dev_rcu(dev
, mlxsw_lower_dev_walk
, &port
);
3241 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
3243 struct mlxsw_sp_port
*mlxsw_sp_port
;
3246 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
3248 dev_hold(mlxsw_sp_port
->dev
);
3250 return mlxsw_sp_port
;
3253 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
3255 dev_put(mlxsw_sp_port
->dev
);
3258 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif
*r
,
3259 unsigned long event
)
3268 if (r
&& --r
->ref_count
== 0)
3270 /* It is possible we already removed the RIF ourselves
3271 * if it was assigned to a netdev that is now a bridge
3280 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp
*mlxsw_sp
)
3284 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_RIFS
); i
++)
3285 if (!mlxsw_sp
->rifs
[i
])
3288 return MLXSW_SP_INVALID_RIF
;
3291 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3292 bool *p_lagged
, u16
*p_system_port
)
3294 u8 local_port
= mlxsw_sp_vport
->local_port
;
3296 *p_lagged
= mlxsw_sp_vport
->lagged
;
3297 *p_system_port
= *p_lagged
? mlxsw_sp_vport
->lag_id
: local_port
;
3300 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3301 struct net_device
*l3_dev
, u16 rif
,
3304 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3305 bool lagged
= mlxsw_sp_vport
->lagged
;
3306 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3309 mlxsw_reg_ritr_pack(ritr_pl
, create
, MLXSW_REG_RITR_SP_IF
, rif
,
3310 l3_dev
->mtu
, l3_dev
->dev_addr
);
3312 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport
, &lagged
, &system_port
);
3313 mlxsw_reg_ritr_sp_if_pack(ritr_pl
, lagged
, system_port
,
3314 mlxsw_sp_vport_vid_get(mlxsw_sp_vport
));
3316 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3319 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
3321 static struct mlxsw_sp_fid
*
3322 mlxsw_sp_rfid_alloc(u16 fid
, struct net_device
*l3_dev
)
3324 struct mlxsw_sp_fid
*f
;
3326 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
3330 f
->leave
= mlxsw_sp_vport_rif_sp_leave
;
3338 static struct mlxsw_sp_rif
*
3339 mlxsw_sp_rif_alloc(u16 rif
, struct net_device
*l3_dev
, struct mlxsw_sp_fid
*f
)
3341 struct mlxsw_sp_rif
*r
;
3343 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
3347 ether_addr_copy(r
->addr
, l3_dev
->dev_addr
);
3348 r
->mtu
= l3_dev
->mtu
;
3357 static struct mlxsw_sp_rif
*
3358 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3359 struct net_device
*l3_dev
)
3361 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3362 struct mlxsw_sp_fid
*f
;
3363 struct mlxsw_sp_rif
*r
;
3367 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
3368 if (rif
== MLXSW_SP_INVALID_RIF
)
3369 return ERR_PTR(-ERANGE
);
3371 err
= mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, true);
3373 return ERR_PTR(err
);
3375 fid
= mlxsw_sp_rif_sp_to_fid(rif
);
3376 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, true);
3378 goto err_rif_fdb_op
;
3380 f
= mlxsw_sp_rfid_alloc(fid
, l3_dev
);
3383 goto err_rfid_alloc
;
3386 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
3393 mlxsw_sp
->rifs
[rif
] = r
;
3400 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
3402 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
3403 return ERR_PTR(err
);
3406 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3407 struct mlxsw_sp_rif
*r
)
3409 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3410 struct net_device
*l3_dev
= r
->dev
;
3411 struct mlxsw_sp_fid
*f
= r
->f
;
3415 mlxsw_sp
->rifs
[rif
] = NULL
;
3422 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
3424 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
3427 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3428 struct net_device
*l3_dev
)
3430 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3431 struct mlxsw_sp_rif
*r
;
3433 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
3435 r
= mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport
, l3_dev
);
3440 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, r
->f
);
3443 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", r
->f
->fid
);
3448 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
3450 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3452 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
3454 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
3455 if (--f
->ref_count
== 0)
3456 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport
, f
->r
);
3459 static int mlxsw_sp_inetaddr_vport_event(struct net_device
*l3_dev
,
3460 struct net_device
*port_dev
,
3461 unsigned long event
, u16 vid
)
3463 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(port_dev
);
3464 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3466 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3467 if (WARN_ON(!mlxsw_sp_vport
))
3472 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport
, l3_dev
);
3474 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport
);
3481 static int mlxsw_sp_inetaddr_port_event(struct net_device
*port_dev
,
3482 unsigned long event
)
3484 if (netif_is_bridge_port(port_dev
) || netif_is_lag_port(port_dev
))
3487 return mlxsw_sp_inetaddr_vport_event(port_dev
, port_dev
, event
, 1);
3490 static int __mlxsw_sp_inetaddr_lag_event(struct net_device
*l3_dev
,
3491 struct net_device
*lag_dev
,
3492 unsigned long event
, u16 vid
)
3494 struct net_device
*port_dev
;
3495 struct list_head
*iter
;
3498 netdev_for_each_lower_dev(lag_dev
, port_dev
, iter
) {
3499 if (mlxsw_sp_port_dev_check(port_dev
)) {
3500 err
= mlxsw_sp_inetaddr_vport_event(l3_dev
, port_dev
,
3510 static int mlxsw_sp_inetaddr_lag_event(struct net_device
*lag_dev
,
3511 unsigned long event
)
3513 if (netif_is_bridge_port(lag_dev
))
3516 return __mlxsw_sp_inetaddr_lag_event(lag_dev
, lag_dev
, event
, 1);
3519 static struct mlxsw_sp_fid
*mlxsw_sp_bridge_fid_get(struct mlxsw_sp
*mlxsw_sp
,
3520 struct net_device
*l3_dev
)
3524 if (is_vlan_dev(l3_dev
))
3525 fid
= vlan_dev_vlan_id(l3_dev
);
3526 else if (mlxsw_sp
->master_bridge
.dev
== l3_dev
)
3529 return mlxsw_sp_vfid_find(mlxsw_sp
, l3_dev
);
3531 return mlxsw_sp_fid_find(mlxsw_sp
, fid
);
3534 static enum mlxsw_flood_table_type
mlxsw_sp_flood_table_type_get(u16 fid
)
3536 return mlxsw_sp_fid_is_vfid(fid
) ? MLXSW_REG_SFGC_TABLE_TYPE_FID
:
3537 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
3540 static u16
mlxsw_sp_flood_table_index_get(u16 fid
)
3542 return mlxsw_sp_fid_is_vfid(fid
) ? mlxsw_sp_fid_to_vfid(fid
) : fid
;
3545 static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp
*mlxsw_sp
, u16 fid
,
3548 enum mlxsw_flood_table_type table_type
;
3553 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
3557 table_type
= mlxsw_sp_flood_table_type_get(fid
);
3558 index
= mlxsw_sp_flood_table_index_get(fid
);
3559 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, index
, table_type
,
3560 1, MLXSW_PORT_ROUTER_PORT
, set
);
3561 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
3567 static enum mlxsw_reg_ritr_if_type
mlxsw_sp_rif_type_get(u16 fid
)
3569 if (mlxsw_sp_fid_is_vfid(fid
))
3570 return MLXSW_REG_RITR_FID_IF
;
3572 return MLXSW_REG_RITR_VLAN_IF
;
3575 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp
*mlxsw_sp
,
3576 struct net_device
*l3_dev
,
3580 enum mlxsw_reg_ritr_if_type rif_type
;
3581 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3583 rif_type
= mlxsw_sp_rif_type_get(fid
);
3584 mlxsw_reg_ritr_pack(ritr_pl
, create
, rif_type
, rif
, l3_dev
->mtu
,
3586 mlxsw_reg_ritr_fid_set(ritr_pl
, rif_type
, fid
);
3588 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3591 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp
*mlxsw_sp
,
3592 struct net_device
*l3_dev
,
3593 struct mlxsw_sp_fid
*f
)
3595 struct mlxsw_sp_rif
*r
;
3599 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
3600 if (rif
== MLXSW_SP_INVALID_RIF
)
3603 err
= mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, true);
3607 err
= mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, true);
3609 goto err_rif_bridge_op
;
3611 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, true);
3613 goto err_rif_fdb_op
;
3615 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
3622 mlxsw_sp
->rifs
[rif
] = r
;
3624 netdev_dbg(l3_dev
, "RIF=%d created\n", rif
);
3629 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
3631 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
3633 mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, false);
3637 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp
*mlxsw_sp
,
3638 struct mlxsw_sp_rif
*r
)
3640 struct net_device
*l3_dev
= r
->dev
;
3641 struct mlxsw_sp_fid
*f
= r
->f
;
3644 mlxsw_sp
->rifs
[rif
] = NULL
;
3649 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
3651 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
3653 mlxsw_sp_router_port_flood_set(mlxsw_sp
, f
->fid
, false);
3655 netdev_dbg(l3_dev
, "RIF=%d destroyed\n", rif
);
3658 static int mlxsw_sp_inetaddr_bridge_event(struct net_device
*l3_dev
,
3659 struct net_device
*br_dev
,
3660 unsigned long event
)
3662 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
3663 struct mlxsw_sp_fid
*f
;
3665 /* FID can either be an actual FID if the L3 device is the
3666 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
3667 * L3 device is a VLAN-unaware bridge and we get a vFID.
3669 f
= mlxsw_sp_bridge_fid_get(mlxsw_sp
, l3_dev
);
3675 return mlxsw_sp_rif_bridge_create(mlxsw_sp
, l3_dev
, f
);
3677 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
3684 static int mlxsw_sp_inetaddr_vlan_event(struct net_device
*vlan_dev
,
3685 unsigned long event
)
3687 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3688 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
3689 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3691 if (mlxsw_sp_port_dev_check(real_dev
))
3692 return mlxsw_sp_inetaddr_vport_event(vlan_dev
, real_dev
, event
,
3694 else if (netif_is_lag_master(real_dev
))
3695 return __mlxsw_sp_inetaddr_lag_event(vlan_dev
, real_dev
, event
,
3697 else if (netif_is_bridge_master(real_dev
) &&
3698 mlxsw_sp
->master_bridge
.dev
== real_dev
)
3699 return mlxsw_sp_inetaddr_bridge_event(vlan_dev
, real_dev
,
3705 static int mlxsw_sp_inetaddr_event(struct notifier_block
*unused
,
3706 unsigned long event
, void *ptr
)
3708 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
3709 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
3710 struct mlxsw_sp
*mlxsw_sp
;
3711 struct mlxsw_sp_rif
*r
;
3714 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3718 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3719 if (!mlxsw_sp_rif_should_config(r
, event
))
3722 if (mlxsw_sp_port_dev_check(dev
))
3723 err
= mlxsw_sp_inetaddr_port_event(dev
, event
);
3724 else if (netif_is_lag_master(dev
))
3725 err
= mlxsw_sp_inetaddr_lag_event(dev
, event
);
3726 else if (netif_is_bridge_master(dev
))
3727 err
= mlxsw_sp_inetaddr_bridge_event(dev
, dev
, event
);
3728 else if (is_vlan_dev(dev
))
3729 err
= mlxsw_sp_inetaddr_vlan_event(dev
, event
);
3732 return notifier_from_errno(err
);
3735 static int mlxsw_sp_rif_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif
,
3736 const char *mac
, int mtu
)
3738 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3741 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
3742 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3746 mlxsw_reg_ritr_mtu_set(ritr_pl
, mtu
);
3747 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl
, mac
);
3748 mlxsw_reg_ritr_op_set(ritr_pl
, MLXSW_REG_RITR_RIF_CREATE
);
3749 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3752 static int mlxsw_sp_netdevice_router_port_event(struct net_device
*dev
)
3754 struct mlxsw_sp
*mlxsw_sp
;
3755 struct mlxsw_sp_rif
*r
;
3758 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3762 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3766 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, false);
3770 err
= mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, dev
->dev_addr
, dev
->mtu
);
3774 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, dev
->dev_addr
, r
->f
->fid
, true);
3776 goto err_rif_fdb_op
;
3778 ether_addr_copy(r
->addr
, dev
->dev_addr
);
3781 netdev_dbg(dev
, "Updated RIF=%d\n", r
->rif
);
3786 mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, r
->addr
, r
->mtu
);
3788 mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, true);
3792 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port
*lag_port
,
3795 if (mlxsw_sp_fid_is_vfid(fid
))
3796 return mlxsw_sp_port_vport_find_by_fid(lag_port
, fid
);
3798 return test_bit(fid
, lag_port
->active_vlans
);
3801 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
3804 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3805 u8 local_port
= mlxsw_sp_port
->local_port
;
3806 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3807 u64 max_lag_members
;
3810 if (!mlxsw_sp_port
->lagged
)
3813 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
3815 for (i
= 0; i
< max_lag_members
; i
++) {
3816 struct mlxsw_sp_port
*lag_port
;
3818 lag_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
3819 if (!lag_port
|| lag_port
->local_port
== local_port
)
3821 if (mlxsw_sp_lag_port_fid_member(lag_port
, fid
))
3829 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3832 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3833 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3835 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
3836 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3837 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
3838 mlxsw_sp_port
->local_port
);
3840 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using Port=%d, FID=%d\n",
3841 mlxsw_sp_port
->local_port
, fid
);
3843 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3847 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3850 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3851 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3853 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
3854 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3855 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
3857 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using LAG ID=%d, FID=%d\n",
3858 mlxsw_sp_port
->lag_id
, fid
);
3860 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3863 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
3865 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port
, fid
))
3868 if (mlxsw_sp_port
->lagged
)
3869 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
,
3872 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, fid
);
3875 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp
*mlxsw_sp
)
3877 struct mlxsw_sp_fid
*f
, *tmp
;
3879 list_for_each_entry_safe(f
, tmp
, &mlxsw_sp
->fids
, list
)
3880 if (--f
->ref_count
== 0)
3881 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
3886 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
3887 struct net_device
*br_dev
)
3889 return !mlxsw_sp
->master_bridge
.dev
||
3890 mlxsw_sp
->master_bridge
.dev
== br_dev
;
3893 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
3894 struct net_device
*br_dev
)
3896 mlxsw_sp
->master_bridge
.dev
= br_dev
;
3897 mlxsw_sp
->master_bridge
.ref_count
++;
3900 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
)
3902 if (--mlxsw_sp
->master_bridge
.ref_count
== 0) {
3903 mlxsw_sp
->master_bridge
.dev
= NULL
;
3904 /* It's possible upper VLAN devices are still holding
3905 * references to underlying FIDs. Drop the reference
3906 * and release the resources if it was the last one.
3907 * If it wasn't, then something bad happened.
3909 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp
);
3913 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3914 struct net_device
*br_dev
)
3916 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3919 /* When port is not bridged untagged packets are tagged with
3920 * PVID=VID=1, thereby creating an implicit VLAN interface in
3921 * the device. Remove it and let bridge code take care of its
3924 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
3928 mlxsw_sp_master_bridge_inc(mlxsw_sp_port
->mlxsw_sp
, br_dev
);
3930 mlxsw_sp_port
->learning
= 1;
3931 mlxsw_sp_port
->learning_sync
= 1;
3932 mlxsw_sp_port
->uc_flood
= 1;
3933 mlxsw_sp_port
->bridged
= 1;
3938 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3940 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3942 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
3944 mlxsw_sp_master_bridge_dec(mlxsw_sp_port
->mlxsw_sp
);
3946 mlxsw_sp_port
->learning
= 0;
3947 mlxsw_sp_port
->learning_sync
= 0;
3948 mlxsw_sp_port
->uc_flood
= 0;
3949 mlxsw_sp_port
->bridged
= 0;
3951 /* Add implicit VLAN interface in the device, so that untagged
3952 * packets will be classified to the default vFID.
3954 mlxsw_sp_port_add_vid(dev
, 0, 1);
3957 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3959 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3961 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3962 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3965 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3967 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3969 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3970 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3973 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3974 u16 lag_id
, u8 port_index
)
3976 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3977 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3979 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3980 lag_id
, port_index
);
3981 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3984 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3987 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3988 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3990 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3992 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3995 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3998 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3999 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
4001 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
4003 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
4006 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
4009 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4010 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
4012 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
4014 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
4017 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
4018 struct net_device
*lag_dev
,
4021 struct mlxsw_sp_upper
*lag
;
4022 int free_lag_id
= -1;
4026 max_lag
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_LAG
);
4027 for (i
= 0; i
< max_lag
; i
++) {
4028 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
4029 if (lag
->ref_count
) {
4030 if (lag
->dev
== lag_dev
) {
4034 } else if (free_lag_id
< 0) {
4038 if (free_lag_id
< 0)
4040 *p_lag_id
= free_lag_id
;
4045 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
4046 struct net_device
*lag_dev
,
4047 struct netdev_lag_upper_info
*lag_upper_info
)
4051 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
4053 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
4058 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
4059 u16 lag_id
, u8
*p_port_index
)
4061 u64 max_lag_members
;
4064 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
4066 for (i
= 0; i
< max_lag_members
; i
++) {
4067 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
4076 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
4079 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4080 struct mlxsw_sp_fid
*f
;
4082 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
4083 if (WARN_ON(!mlxsw_sp_vport
))
4086 /* If vPort is assigned a RIF, then leave it since it's no
4089 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4091 f
->leave(mlxsw_sp_vport
);
4093 mlxsw_sp_vport
->lag_id
= lag_id
;
4094 mlxsw_sp_vport
->lagged
= 1;
4098 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
4100 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4101 struct mlxsw_sp_fid
*f
;
4103 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
4104 if (WARN_ON(!mlxsw_sp_vport
))
4107 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4109 f
->leave(mlxsw_sp_vport
);
4111 mlxsw_sp_vport
->lagged
= 0;
4114 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
4115 struct net_device
*lag_dev
)
4117 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4118 struct mlxsw_sp_upper
*lag
;
4123 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
4126 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
4127 if (!lag
->ref_count
) {
4128 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
4134 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
4137 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
4139 goto err_col_port_add
;
4140 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
4142 goto err_col_port_enable
;
4144 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
4145 mlxsw_sp_port
->local_port
);
4146 mlxsw_sp_port
->lag_id
= lag_id
;
4147 mlxsw_sp_port
->lagged
= 1;
4150 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port
, lag_id
);
4154 err_col_port_enable
:
4155 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
4157 if (!lag
->ref_count
)
4158 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
4162 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
4163 struct net_device
*lag_dev
)
4165 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4166 u16 lag_id
= mlxsw_sp_port
->lag_id
;
4167 struct mlxsw_sp_upper
*lag
;
4169 if (!mlxsw_sp_port
->lagged
)
4171 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
4172 WARN_ON(lag
->ref_count
== 0);
4174 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
4175 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
4177 if (mlxsw_sp_port
->bridged
) {
4178 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
4179 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
4182 if (lag
->ref_count
== 1)
4183 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
4185 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
4186 mlxsw_sp_port
->local_port
);
4187 mlxsw_sp_port
->lagged
= 0;
4190 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port
);
4193 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
4196 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4197 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4199 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
4200 mlxsw_sp_port
->local_port
);
4201 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4204 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
4207 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4208 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
4210 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
4211 mlxsw_sp_port
->local_port
);
4212 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
4215 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
4216 bool lag_tx_enabled
)
4219 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
4220 mlxsw_sp_port
->lag_id
);
4222 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
4223 mlxsw_sp_port
->lag_id
);
4226 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
4227 struct netdev_lag_lower_state_info
*info
)
4229 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
4232 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
4233 struct net_device
*vlan_dev
)
4235 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4236 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4238 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4239 if (WARN_ON(!mlxsw_sp_vport
))
4242 mlxsw_sp_vport
->dev
= vlan_dev
;
4247 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
4248 struct net_device
*vlan_dev
)
4250 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4251 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4253 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4254 if (WARN_ON(!mlxsw_sp_vport
))
4257 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
4260 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
4261 unsigned long event
, void *ptr
)
4263 struct netdev_notifier_changeupper_info
*info
;
4264 struct mlxsw_sp_port
*mlxsw_sp_port
;
4265 struct net_device
*upper_dev
;
4266 struct mlxsw_sp
*mlxsw_sp
;
4269 mlxsw_sp_port
= netdev_priv(dev
);
4270 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
4274 case NETDEV_PRECHANGEUPPER
:
4275 upper_dev
= info
->upper_dev
;
4276 if (!is_vlan_dev(upper_dev
) &&
4277 !netif_is_lag_master(upper_dev
) &&
4278 !netif_is_bridge_master(upper_dev
))
4282 /* HW limitation forbids to put ports to multiple bridges. */
4283 if (netif_is_bridge_master(upper_dev
) &&
4284 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
4286 if (netif_is_lag_master(upper_dev
) &&
4287 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
4290 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
))
4292 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
4293 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
)))
4296 case NETDEV_CHANGEUPPER
:
4297 upper_dev
= info
->upper_dev
;
4298 if (is_vlan_dev(upper_dev
)) {
4300 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
4303 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
4305 } else if (netif_is_bridge_master(upper_dev
)) {
4307 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
4310 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
4311 } else if (netif_is_lag_master(upper_dev
)) {
4313 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
4316 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
4328 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
4329 unsigned long event
, void *ptr
)
4331 struct netdev_notifier_changelowerstate_info
*info
;
4332 struct mlxsw_sp_port
*mlxsw_sp_port
;
4335 mlxsw_sp_port
= netdev_priv(dev
);
4339 case NETDEV_CHANGELOWERSTATE
:
4340 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
4341 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
4342 info
->lower_state_info
);
4344 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
4352 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
4353 unsigned long event
, void *ptr
)
4356 case NETDEV_PRECHANGEUPPER
:
4357 case NETDEV_CHANGEUPPER
:
4358 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
4359 case NETDEV_CHANGELOWERSTATE
:
4360 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
4366 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
4367 unsigned long event
, void *ptr
)
4369 struct net_device
*dev
;
4370 struct list_head
*iter
;
4373 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4374 if (mlxsw_sp_port_dev_check(dev
)) {
4375 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4384 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp
*mlxsw_sp
,
4385 struct net_device
*vlan_dev
)
4387 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4388 struct mlxsw_sp_fid
*f
;
4390 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4392 f
= mlxsw_sp_fid_create(mlxsw_sp
, fid
);
4402 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp
*mlxsw_sp
,
4403 struct net_device
*vlan_dev
)
4405 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
4406 struct mlxsw_sp_fid
*f
;
4408 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
4410 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
4411 if (f
&& --f
->ref_count
== 0)
4412 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
4415 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
4416 unsigned long event
, void *ptr
)
4418 struct netdev_notifier_changeupper_info
*info
;
4419 struct net_device
*upper_dev
;
4420 struct mlxsw_sp
*mlxsw_sp
;
4423 mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
4426 if (br_dev
!= mlxsw_sp
->master_bridge
.dev
)
4432 case NETDEV_CHANGEUPPER
:
4433 upper_dev
= info
->upper_dev
;
4434 if (!is_vlan_dev(upper_dev
))
4436 if (info
->linking
) {
4437 err
= mlxsw_sp_master_bridge_vlan_link(mlxsw_sp
,
4442 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp
, upper_dev
);
4450 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
4452 return find_first_zero_bit(mlxsw_sp
->vfids
.mapped
,
4456 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
4458 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
4460 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, 0);
4461 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
4464 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
4466 static struct mlxsw_sp_fid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
4467 struct net_device
*br_dev
)
4469 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
4470 struct mlxsw_sp_fid
*f
;
4474 vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
4475 if (vfid
== MLXSW_SP_VFID_MAX
) {
4476 dev_err(dev
, "No available vFIDs\n");
4477 return ERR_PTR(-ERANGE
);
4480 fid
= mlxsw_sp_vfid_to_fid(vfid
);
4481 err
= mlxsw_sp_vfid_op(mlxsw_sp
, fid
, true);
4483 dev_err(dev
, "Failed to create FID=%d\n", fid
);
4484 return ERR_PTR(err
);
4487 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
4489 goto err_allocate_vfid
;
4491 f
->leave
= mlxsw_sp_vport_vfid_leave
;
4495 list_add(&f
->list
, &mlxsw_sp
->vfids
.list
);
4496 set_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4501 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4502 return ERR_PTR(-ENOMEM
);
4505 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
4506 struct mlxsw_sp_fid
*f
)
4508 u16 vfid
= mlxsw_sp_fid_to_vfid(f
->fid
);
4511 clear_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
4515 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
4519 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
4522 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
4525 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
4526 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4528 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
, mt
, valid
, fid
,
4532 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4533 struct net_device
*br_dev
)
4535 struct mlxsw_sp_fid
*f
;
4538 f
= mlxsw_sp_vfid_find(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4540 f
= mlxsw_sp_vfid_create(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
4545 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, true);
4547 goto err_vport_flood_set
;
4549 err
= mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, true);
4551 goto err_vport_fid_map
;
4553 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, f
);
4556 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", f
->fid
);
4561 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4562 err_vport_flood_set
:
4564 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4568 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4570 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4572 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
4574 mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, false);
4576 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
4578 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport
, f
->fid
);
4580 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
4581 if (--f
->ref_count
== 0)
4582 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
4585 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
4586 struct net_device
*br_dev
)
4588 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
4589 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4590 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
4593 if (f
&& !WARN_ON(!f
->leave
))
4594 f
->leave(mlxsw_sp_vport
);
4596 err
= mlxsw_sp_vport_vfid_join(mlxsw_sp_vport
, br_dev
);
4598 netdev_err(dev
, "Failed to join vFID\n");
4602 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
4604 netdev_err(dev
, "Failed to enable learning\n");
4605 goto err_port_vid_learning_set
;
4608 mlxsw_sp_vport
->learning
= 1;
4609 mlxsw_sp_vport
->learning_sync
= 1;
4610 mlxsw_sp_vport
->uc_flood
= 1;
4611 mlxsw_sp_vport
->bridged
= 1;
4615 err_port_vid_learning_set
:
4616 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4620 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
4622 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
4624 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
4626 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
4628 mlxsw_sp_vport
->learning
= 0;
4629 mlxsw_sp_vport
->learning_sync
= 0;
4630 mlxsw_sp_vport
->uc_flood
= 0;
4631 mlxsw_sp_vport
->bridged
= 0;
4635 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
4636 const struct net_device
*br_dev
)
4638 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4640 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
4642 struct net_device
*dev
= mlxsw_sp_vport_dev_get(mlxsw_sp_vport
);
4644 if (dev
&& dev
== br_dev
)
4651 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
4652 unsigned long event
, void *ptr
,
4655 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
4656 struct netdev_notifier_changeupper_info
*info
= ptr
;
4657 struct mlxsw_sp_port
*mlxsw_sp_vport
;
4658 struct net_device
*upper_dev
;
4661 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
4664 case NETDEV_PRECHANGEUPPER
:
4665 upper_dev
= info
->upper_dev
;
4666 if (!netif_is_bridge_master(upper_dev
))
4670 /* We can't have multiple VLAN interfaces configured on
4671 * the same port and being members in the same bridge.
4673 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
4677 case NETDEV_CHANGEUPPER
:
4678 upper_dev
= info
->upper_dev
;
4679 if (info
->linking
) {
4680 if (WARN_ON(!mlxsw_sp_vport
))
4682 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
4685 if (!mlxsw_sp_vport
)
4687 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
);
4694 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
4695 unsigned long event
, void *ptr
,
4698 struct net_device
*dev
;
4699 struct list_head
*iter
;
4702 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
4703 if (mlxsw_sp_port_dev_check(dev
)) {
4704 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
4714 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
4715 unsigned long event
, void *ptr
)
4717 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
4718 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
4720 if (mlxsw_sp_port_dev_check(real_dev
))
4721 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
4723 else if (netif_is_lag_master(real_dev
))
4724 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
4730 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4731 unsigned long event
, void *ptr
)
4733 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4736 if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
4737 err
= mlxsw_sp_netdevice_router_port_event(dev
);
4738 else if (mlxsw_sp_port_dev_check(dev
))
4739 err
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4740 else if (netif_is_lag_master(dev
))
4741 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4742 else if (netif_is_bridge_master(dev
))
4743 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4744 else if (is_vlan_dev(dev
))
4745 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4747 return notifier_from_errno(err
);
4750 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
4751 .notifier_call
= mlxsw_sp_netdevice_event
,
4754 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
4755 .notifier_call
= mlxsw_sp_inetaddr_event
,
4756 .priority
= 10, /* Must be called before FIB notifier block */
4759 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly
= {
4760 .notifier_call
= mlxsw_sp_router_netevent_event
,
4763 static const struct pci_device_id mlxsw_sp_pci_id_table
[] = {
4764 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SPECTRUM
), 0},
4768 static struct pci_driver mlxsw_sp_pci_driver
= {
4769 .name
= mlxsw_sp_driver_name
,
4770 .id_table
= mlxsw_sp_pci_id_table
,
4773 static int __init
mlxsw_sp_module_init(void)
4777 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4778 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4779 register_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4781 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
4783 goto err_core_driver_register
;
4785 err
= mlxsw_pci_driver_register(&mlxsw_sp_pci_driver
);
4787 goto err_pci_driver_register
;
4791 err_pci_driver_register
:
4792 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4793 err_core_driver_register
:
4794 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4795 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4796 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4800 static void __exit
mlxsw_sp_module_exit(void)
4802 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver
);
4803 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4804 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb
);
4805 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4806 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4809 module_init(mlxsw_sp_module_init
);
4810 module_exit(mlxsw_sp_module_exit
);
4812 MODULE_LICENSE("Dual BSD/GPL");
4813 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4814 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4815 MODULE_DEVICE_TABLE(pci
, mlxsw_sp_pci_id_table
);