2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
65 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
66 static const char mlxsw_sp_driver_version
[] = "1.0";
72 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
75 * Packet control type.
76 * 0 - Ethernet control (e.g. EMADs, LACP)
79 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
82 * Packet protocol type. Must be set to 1 (Ethernet).
84 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
86 /* tx_hdr_rx_is_router
87 * Packet is sent from the router. Valid for data packets only.
89 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
92 * Indicates if the 'fid' field is valid and should be used for
93 * forwarding lookup. Valid for data packets only.
95 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
98 * Switch partition ID. Must be set to 0.
100 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
102 /* tx_hdr_control_tclass
103 * Indicates if the packet should use the control TClass and not one
104 * of the data TClasses.
106 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
109 * Egress TClass to be used on the egress device on the egress port.
111 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
114 * Destination local port for unicast packets.
115 * Destination multicast ID for multicast packets.
117 * Control packets are directed to a specific egress port, while data
118 * packets are transmitted through the CPU port (0) into the switch partition,
119 * where forwarding rules are applied.
121 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
124 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
125 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
126 * Valid for data packets only.
128 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
132 * 6 - Control packets
134 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
136 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
137 const struct mlxsw_tx_info
*tx_info
)
139 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
141 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
143 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
144 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
145 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
146 mlxsw_tx_hdr_swid_set(txhdr
, 0);
147 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
148 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
149 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
152 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
154 char spad_pl
[MLXSW_REG_SPAD_LEN
];
157 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
160 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
164 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
167 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
168 char paos_pl
[MLXSW_REG_PAOS_LEN
];
170 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
171 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
172 MLXSW_PORT_ADMIN_STATUS_DOWN
);
173 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
176 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
179 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
180 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
182 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
183 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
184 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
187 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
189 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
190 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
192 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
193 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
194 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
197 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
199 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
200 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
204 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
205 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
206 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
209 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
214 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
215 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
218 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
221 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
223 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
224 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
227 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
229 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
231 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
235 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
238 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
239 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
241 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
242 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
245 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
246 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
249 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
250 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
252 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
254 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
257 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
258 u16 vid
, bool learn_enable
)
260 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
264 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
267 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
269 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
275 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
277 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
278 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
280 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
281 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
284 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
285 u8 local_port
, u8
*p_module
,
286 u8
*p_width
, u8
*p_lane
)
288 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
291 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
292 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
295 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
296 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
297 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
301 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
302 u8 module
, u8 width
, u8 lane
)
304 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
307 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
308 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
309 for (i
= 0; i
< width
; i
++) {
310 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
311 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
314 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
317 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
319 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
321 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
322 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
323 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
326 static int mlxsw_sp_port_open(struct net_device
*dev
)
328 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
331 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
334 netif_start_queue(dev
);
338 static int mlxsw_sp_port_stop(struct net_device
*dev
)
340 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
342 netif_stop_queue(dev
);
343 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
346 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
347 struct net_device
*dev
)
349 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
350 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
351 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
352 const struct mlxsw_tx_info tx_info
= {
353 .local_port
= mlxsw_sp_port
->local_port
,
359 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
360 return NETDEV_TX_BUSY
;
362 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
363 struct sk_buff
*skb_orig
= skb
;
365 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
367 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
368 dev_kfree_skb_any(skb_orig
);
373 if (eth_skb_pad(skb
)) {
374 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
378 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
379 /* TX header is consumed by HW on the way so we shouldn't count its
380 * bytes as being sent.
382 len
= skb
->len
- MLXSW_TXHDR_LEN
;
384 /* Due to a race we might fail here because of a full queue. In that
385 * unlikely case we simply drop the packet.
387 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
390 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
391 u64_stats_update_begin(&pcpu_stats
->syncp
);
392 pcpu_stats
->tx_packets
++;
393 pcpu_stats
->tx_bytes
+= len
;
394 u64_stats_update_end(&pcpu_stats
->syncp
);
396 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
397 dev_kfree_skb_any(skb
);
402 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
406 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
408 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
409 struct sockaddr
*addr
= p
;
412 if (!is_valid_ether_addr(addr
->sa_data
))
413 return -EADDRNOTAVAIL
;
415 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
418 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
422 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int pg_index
, int mtu
,
423 bool pause_en
, bool pfc_en
, u16 delay
)
425 u16 pg_size
= 2 * MLXSW_SP_BYTES_TO_CELLS(mtu
);
427 delay
= pfc_en
? mlxsw_sp_pfc_delay_get(mtu
, delay
) :
428 MLXSW_SP_PAUSE_DELAY
;
430 if (pause_en
|| pfc_en
)
431 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, pg_index
,
432 pg_size
+ delay
, pg_size
);
434 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, pg_index
, pg_size
);
437 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
438 u8
*prio_tc
, bool pause_en
,
439 struct ieee_pfc
*my_pfc
)
441 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
442 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
443 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
444 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
447 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
448 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
452 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
453 bool configure
= false;
456 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
457 if (prio_tc
[j
] == i
) {
458 pfc
= pfc_en
& BIT(j
);
466 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, mtu
, pause_en
, pfc
, delay
);
469 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
472 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
473 int mtu
, bool pause_en
)
475 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
476 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
477 struct ieee_pfc
*my_pfc
;
480 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
481 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
483 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
487 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
489 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
490 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
493 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
496 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
498 goto err_port_mtu_set
;
503 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
507 static struct rtnl_link_stats64
*
508 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
509 struct rtnl_link_stats64
*stats
)
511 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
512 struct mlxsw_sp_port_pcpu_stats
*p
;
513 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
518 for_each_possible_cpu(i
) {
519 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
521 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
522 rx_packets
= p
->rx_packets
;
523 rx_bytes
= p
->rx_bytes
;
524 tx_packets
= p
->tx_packets
;
525 tx_bytes
= p
->tx_bytes
;
526 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
528 stats
->rx_packets
+= rx_packets
;
529 stats
->rx_bytes
+= rx_bytes
;
530 stats
->tx_packets
+= tx_packets
;
531 stats
->tx_bytes
+= tx_bytes
;
532 /* tx_dropped is u32, updated without syncp protection. */
533 tx_dropped
+= p
->tx_dropped
;
535 stats
->tx_dropped
= tx_dropped
;
539 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
540 u16 vid_end
, bool is_member
, bool untagged
)
542 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
546 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
550 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
551 vid_end
, is_member
, untagged
);
552 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
557 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
559 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
560 u16 vid
, last_visited_vid
;
563 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
564 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
567 last_visited_vid
= vid
;
568 goto err_port_vid_to_fid_set
;
572 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
574 last_visited_vid
= VLAN_N_VID
;
575 goto err_port_vid_to_fid_set
;
580 err_port_vid_to_fid_set
:
581 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
582 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
587 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
589 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
593 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
597 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
598 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
607 static struct mlxsw_sp_port
*
608 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
610 struct mlxsw_sp_port
*mlxsw_sp_vport
;
612 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
616 /* dev will be set correctly after the VLAN device is linked
617 * with the real device. In case of bridge SELF invocation, dev
620 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
621 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
622 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
623 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
624 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
625 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
626 mlxsw_sp_vport
->vport
.vid
= vid
;
628 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
630 return mlxsw_sp_vport
;
633 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
635 list_del(&mlxsw_sp_vport
->vport
.list
);
636 kfree(mlxsw_sp_vport
);
639 int mlxsw_sp_port_add_vid(struct net_device
*dev
, __be16 __always_unused proto
,
642 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
643 struct mlxsw_sp_port
*mlxsw_sp_vport
;
644 bool untagged
= vid
== 1;
647 /* VLAN 0 is added to HW filter when device goes up, but it is
648 * reserved in our case, so simply return.
653 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
)) {
654 netdev_warn(dev
, "VID=%d already configured\n", vid
);
658 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vid
);
659 if (!mlxsw_sp_vport
) {
660 netdev_err(dev
, "Failed to create vPort for VID=%d\n", vid
);
664 /* When adding the first VLAN interface on a bridged port we need to
665 * transition all the active 802.1Q bridge VLANs to use explicit
666 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
668 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
669 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
671 netdev_err(dev
, "Failed to set to Virtual mode\n");
672 goto err_port_vp_mode_trans
;
676 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
678 netdev_err(dev
, "Failed to disable learning for VID=%d\n", vid
);
679 goto err_port_vid_learning_set
;
682 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, untagged
);
684 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
686 goto err_port_add_vid
;
692 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
693 err_port_vid_learning_set
:
694 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
695 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
696 err_port_vp_mode_trans
:
697 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
701 static int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
702 __be16 __always_unused proto
, u16 vid
)
704 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
705 struct mlxsw_sp_port
*mlxsw_sp_vport
;
706 struct mlxsw_sp_fid
*f
;
709 /* VLAN 0 is removed from HW filter when device goes down, but
710 * it is reserved in our case, so simply return.
715 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
716 if (!mlxsw_sp_vport
) {
717 netdev_warn(dev
, "VID=%d does not exist\n", vid
);
721 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
723 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
728 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
730 netdev_err(dev
, "Failed to enable learning for VID=%d\n", vid
);
734 /* Drop FID reference. If this was the last reference the
735 * resources will be freed.
737 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
738 if (f
&& !WARN_ON(!f
->leave
))
739 f
->leave(mlxsw_sp_vport
);
741 /* When removing the last VLAN interface on a bridged port we need to
742 * transition all active 802.1Q bridge VLANs to use VID to FID
743 * mappings and set port's mode to VLAN mode.
745 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
746 err
= mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
748 netdev_err(dev
, "Failed to set to VLAN mode\n");
753 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
758 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
761 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
762 u8 module
= mlxsw_sp_port
->mapping
.module
;
763 u8 width
= mlxsw_sp_port
->mapping
.width
;
764 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
767 if (!mlxsw_sp_port
->split
)
768 err
= snprintf(name
, len
, "p%d", module
+ 1);
770 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
779 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
780 .ndo_open
= mlxsw_sp_port_open
,
781 .ndo_stop
= mlxsw_sp_port_stop
,
782 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
783 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
784 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
785 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
786 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
787 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
788 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
789 .ndo_neigh_construct
= mlxsw_sp_router_neigh_construct
,
790 .ndo_neigh_destroy
= mlxsw_sp_router_neigh_destroy
,
791 .ndo_fdb_add
= switchdev_port_fdb_add
,
792 .ndo_fdb_del
= switchdev_port_fdb_del
,
793 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
794 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
795 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
796 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
797 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
800 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
801 struct ethtool_drvinfo
*drvinfo
)
803 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
804 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
806 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
807 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
808 sizeof(drvinfo
->version
));
809 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
811 mlxsw_sp
->bus_info
->fw_rev
.major
,
812 mlxsw_sp
->bus_info
->fw_rev
.minor
,
813 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
814 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
815 sizeof(drvinfo
->bus_info
));
818 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
819 struct ethtool_pauseparam
*pause
)
821 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
823 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
824 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
827 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
828 struct ethtool_pauseparam
*pause
)
830 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
832 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
833 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
834 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
836 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
840 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
841 struct ethtool_pauseparam
*pause
)
843 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
844 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
847 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
848 netdev_err(dev
, "PFC already enabled on port\n");
852 if (pause
->autoneg
) {
853 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
857 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
859 netdev_err(dev
, "Failed to configure port's headroom\n");
863 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
865 netdev_err(dev
, "Failed to set PAUSE parameters\n");
866 goto err_port_pause_configure
;
869 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
870 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
874 err_port_pause_configure
:
875 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
876 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
880 struct mlxsw_sp_port_hw_stats
{
881 char str
[ETH_GSTRING_LEN
];
882 u64 (*getter
)(char *payload
);
885 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
887 .str
= "a_frames_transmitted_ok",
888 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
891 .str
= "a_frames_received_ok",
892 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
895 .str
= "a_frame_check_sequence_errors",
896 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
899 .str
= "a_alignment_errors",
900 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
903 .str
= "a_octets_transmitted_ok",
904 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
907 .str
= "a_octets_received_ok",
908 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
911 .str
= "a_multicast_frames_xmitted_ok",
912 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
915 .str
= "a_broadcast_frames_xmitted_ok",
916 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
919 .str
= "a_multicast_frames_received_ok",
920 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
923 .str
= "a_broadcast_frames_received_ok",
924 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
927 .str
= "a_in_range_length_errors",
928 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
931 .str
= "a_out_of_range_length_field",
932 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
935 .str
= "a_frame_too_long_errors",
936 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
939 .str
= "a_symbol_error_during_carrier",
940 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
943 .str
= "a_mac_control_frames_transmitted",
944 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
947 .str
= "a_mac_control_frames_received",
948 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
951 .str
= "a_unsupported_opcodes_received",
952 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
955 .str
= "a_pause_mac_ctrl_frames_received",
956 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
959 .str
= "a_pause_mac_ctrl_frames_xmitted",
960 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
964 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
966 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats
[] = {
968 .str
= "rx_octets_prio",
969 .getter
= mlxsw_reg_ppcnt_rx_octets_get
,
972 .str
= "rx_frames_prio",
973 .getter
= mlxsw_reg_ppcnt_rx_frames_get
,
976 .str
= "tx_octets_prio",
977 .getter
= mlxsw_reg_ppcnt_tx_octets_get
,
980 .str
= "tx_frames_prio",
981 .getter
= mlxsw_reg_ppcnt_tx_frames_get
,
984 .str
= "rx_pause_prio",
985 .getter
= mlxsw_reg_ppcnt_rx_pause_get
,
988 .str
= "rx_pause_duration_prio",
989 .getter
= mlxsw_reg_ppcnt_rx_pause_duration_get
,
992 .str
= "tx_pause_prio",
993 .getter
= mlxsw_reg_ppcnt_tx_pause_get
,
996 .str
= "tx_pause_duration_prio",
997 .getter
= mlxsw_reg_ppcnt_tx_pause_duration_get
,
1001 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1003 static u64
mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl
)
1005 u64 transmit_queue
= mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl
);
1007 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue
);
1010 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats
[] = {
1012 .str
= "tc_transmit_queue_tc",
1013 .getter
= mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get
,
1016 .str
= "tc_no_buffer_discard_uc_tc",
1017 .getter
= mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get
,
1021 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1023 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1024 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1025 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1026 IEEE_8021QAZ_MAX_TCS)
1028 static void mlxsw_sp_port_get_prio_strings(u8
**p
, int prio
)
1032 for (i
= 0; i
< MLXSW_SP_PORT_HW_PRIO_STATS_LEN
; i
++) {
1033 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1034 mlxsw_sp_port_hw_prio_stats
[i
].str
, prio
);
1035 *p
+= ETH_GSTRING_LEN
;
1039 static void mlxsw_sp_port_get_tc_strings(u8
**p
, int tc
)
1043 for (i
= 0; i
< MLXSW_SP_PORT_HW_TC_STATS_LEN
; i
++) {
1044 snprintf(*p
, ETH_GSTRING_LEN
, "%s_%d",
1045 mlxsw_sp_port_hw_tc_stats
[i
].str
, tc
);
1046 *p
+= ETH_GSTRING_LEN
;
1050 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1051 u32 stringset
, u8
*data
)
1056 switch (stringset
) {
1058 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1059 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1061 p
+= ETH_GSTRING_LEN
;
1064 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1065 mlxsw_sp_port_get_prio_strings(&p
, i
);
1067 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
1068 mlxsw_sp_port_get_tc_strings(&p
, i
);
1074 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1075 enum ethtool_phys_id_state state
)
1077 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1078 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1079 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1083 case ETHTOOL_ID_ACTIVE
:
1086 case ETHTOOL_ID_INACTIVE
:
1093 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1094 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1098 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats
**p_hw_stats
,
1099 int *p_len
, enum mlxsw_reg_ppcnt_grp grp
)
1102 case MLXSW_REG_PPCNT_IEEE_8023_CNT
:
1103 *p_hw_stats
= mlxsw_sp_port_hw_stats
;
1104 *p_len
= MLXSW_SP_PORT_HW_STATS_LEN
;
1106 case MLXSW_REG_PPCNT_PRIO_CNT
:
1107 *p_hw_stats
= mlxsw_sp_port_hw_prio_stats
;
1108 *p_len
= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1110 case MLXSW_REG_PPCNT_TC_CNT
:
1111 *p_hw_stats
= mlxsw_sp_port_hw_tc_stats
;
1112 *p_len
= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1121 static void __mlxsw_sp_port_get_stats(struct net_device
*dev
,
1122 enum mlxsw_reg_ppcnt_grp grp
, int prio
,
1123 u64
*data
, int data_index
)
1125 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1126 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1127 struct mlxsw_sp_port_hw_stats
*hw_stats
;
1128 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1132 err
= mlxsw_sp_get_hw_stats_by_group(&hw_stats
, &len
, grp
);
1135 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
, grp
, prio
);
1136 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1137 for (i
= 0; i
< len
; i
++)
1138 data
[data_index
+ i
] = !err
? hw_stats
[i
].getter(ppcnt_pl
) : 0;
1141 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1142 struct ethtool_stats
*stats
, u64
*data
)
1144 int i
, data_index
= 0;
1146 /* IEEE 802.3 Counters */
1147 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0,
1149 data_index
= MLXSW_SP_PORT_HW_STATS_LEN
;
1151 /* Per-Priority Counters */
1152 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1153 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_PRIO_CNT
, i
,
1155 data_index
+= MLXSW_SP_PORT_HW_PRIO_STATS_LEN
;
1158 /* Per-TC Counters */
1159 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1160 __mlxsw_sp_port_get_stats(dev
, MLXSW_REG_PPCNT_TC_CNT
, i
,
1162 data_index
+= MLXSW_SP_PORT_HW_TC_STATS_LEN
;
1166 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1170 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN
;
1176 struct mlxsw_sp_port_link_mode
{
1183 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1185 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1186 .supported
= SUPPORTED_100baseT_Full
,
1187 .advertised
= ADVERTISED_100baseT_Full
,
1191 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
1195 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1196 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1197 .supported
= SUPPORTED_1000baseKX_Full
,
1198 .advertised
= ADVERTISED_1000baseKX_Full
,
1202 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1203 .supported
= SUPPORTED_10000baseT_Full
,
1204 .advertised
= ADVERTISED_10000baseT_Full
,
1208 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1209 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1210 .supported
= SUPPORTED_10000baseKX4_Full
,
1211 .advertised
= ADVERTISED_10000baseKX4_Full
,
1215 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1216 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1217 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1218 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1219 .supported
= SUPPORTED_10000baseKR_Full
,
1220 .advertised
= ADVERTISED_10000baseKR_Full
,
1224 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1225 .supported
= SUPPORTED_20000baseKR2_Full
,
1226 .advertised
= ADVERTISED_20000baseKR2_Full
,
1230 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1231 .supported
= SUPPORTED_40000baseCR4_Full
,
1232 .advertised
= ADVERTISED_40000baseCR4_Full
,
1236 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1237 .supported
= SUPPORTED_40000baseKR4_Full
,
1238 .advertised
= ADVERTISED_40000baseKR4_Full
,
1242 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1243 .supported
= SUPPORTED_40000baseSR4_Full
,
1244 .advertised
= ADVERTISED_40000baseSR4_Full
,
1248 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1249 .supported
= SUPPORTED_40000baseLR4_Full
,
1250 .advertised
= ADVERTISED_40000baseLR4_Full
,
1254 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
1255 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
1256 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1260 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
1261 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
1262 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
1266 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1267 .supported
= SUPPORTED_56000baseKR4_Full
,
1268 .advertised
= ADVERTISED_56000baseKR4_Full
,
1272 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
1273 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1274 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1275 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
1280 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1282 static u32
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
)
1284 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1285 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1286 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1287 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1288 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1289 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1290 return SUPPORTED_FIBRE
;
1292 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1293 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1294 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1295 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1296 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
1297 return SUPPORTED_Backplane
;
1301 static u32
mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto
)
1306 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1307 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1308 modes
|= mlxsw_sp_port_link_mode
[i
].supported
;
1313 static u32
mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto
)
1318 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1319 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1320 modes
|= mlxsw_sp_port_link_mode
[i
].advertised
;
1325 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
1326 struct ethtool_cmd
*cmd
)
1328 u32 speed
= SPEED_UNKNOWN
;
1329 u8 duplex
= DUPLEX_UNKNOWN
;
1335 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1336 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
1337 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
1338 duplex
= DUPLEX_FULL
;
1343 ethtool_cmd_speed_set(cmd
, speed
);
1344 cmd
->duplex
= duplex
;
1347 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
1349 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1350 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1351 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1352 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1355 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1356 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1357 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
1360 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1361 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1362 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1363 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
1369 static int mlxsw_sp_port_get_settings(struct net_device
*dev
,
1370 struct ethtool_cmd
*cmd
)
1372 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1373 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1374 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1376 u32 eth_proto_admin
;
1380 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1381 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1383 netdev_err(dev
, "Failed to get proto");
1386 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
,
1387 ð_proto_admin
, ð_proto_oper
);
1389 cmd
->supported
= mlxsw_sp_from_ptys_supported_port(eth_proto_cap
) |
1390 mlxsw_sp_from_ptys_supported_link(eth_proto_cap
) |
1391 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
|
1393 cmd
->advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_admin
);
1394 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
),
1395 eth_proto_oper
, cmd
);
1397 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
1398 cmd
->port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
1399 cmd
->lp_advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_oper
);
1401 cmd
->transceiver
= XCVR_INTERNAL
;
1405 static u32
mlxsw_sp_to_ptys_advert_link(u32 advertising
)
1410 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1411 if (advertising
& mlxsw_sp_port_link_mode
[i
].advertised
)
1412 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1417 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
1422 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1423 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
1424 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1429 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
1434 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1435 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
1436 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1441 static int mlxsw_sp_port_set_settings(struct net_device
*dev
,
1442 struct ethtool_cmd
*cmd
)
1444 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1445 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1446 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1450 u32 eth_proto_admin
;
1453 speed
= ethtool_cmd_speed(cmd
);
1455 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
1456 mlxsw_sp_to_ptys_advert_link(cmd
->advertising
) :
1457 mlxsw_sp_to_ptys_speed(speed
);
1459 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1460 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1462 netdev_err(dev
, "Failed to get proto");
1465 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
, NULL
);
1467 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
1468 if (!eth_proto_new
) {
1469 netdev_err(dev
, "Not supported proto admin requested");
1472 if (eth_proto_new
== eth_proto_admin
)
1475 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, eth_proto_new
);
1476 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1478 netdev_err(dev
, "Failed to set proto admin");
1482 if (!netif_running(dev
))
1485 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1487 netdev_err(dev
, "Failed to set admin status");
1491 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1493 netdev_err(dev
, "Failed to set admin status");
1500 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
1501 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
1502 .get_link
= ethtool_op_get_link
,
1503 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
1504 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
1505 .get_strings
= mlxsw_sp_port_get_strings
,
1506 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
1507 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
1508 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
1509 .get_settings
= mlxsw_sp_port_get_settings
,
1510 .set_settings
= mlxsw_sp_port_set_settings
,
1514 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
1516 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1517 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
1518 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1519 u32 eth_proto_admin
;
1521 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
1522 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
1524 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1527 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1528 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
1529 bool dwrr
, u8 dwrr_weight
)
1531 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1532 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1534 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1536 mlxsw_reg_qeec_de_set(qeec_pl
, true);
1537 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
1538 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
1539 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1542 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1543 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1544 u8 next_index
, u32 maxrate
)
1546 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1547 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1549 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1551 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
1552 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
1553 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1556 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1557 u8 switch_prio
, u8 tclass
)
1559 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1560 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
1562 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
1564 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
1567 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1571 /* Setup the elements hierarcy, so that each TC is linked to
1572 * one subgroup, which are all member in the same group.
1574 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1575 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
1579 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1580 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1581 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
1586 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1587 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1588 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
1594 /* Make sure the max shaper is disabled in all hierarcies that
1597 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1598 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
1599 MLXSW_REG_QEEC_MAS_DIS
);
1602 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1603 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1604 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
1606 MLXSW_REG_QEEC_MAS_DIS
);
1610 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1611 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1612 MLXSW_REG_QEEC_HIERARCY_TC
,
1614 MLXSW_REG_QEEC_MAS_DIS
);
1619 /* Map all priorities to traffic class 0. */
1620 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1621 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
1629 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1630 bool split
, u8 module
, u8 width
, u8 lane
)
1632 struct mlxsw_sp_port
*mlxsw_sp_port
;
1633 struct net_device
*dev
;
1637 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
1640 mlxsw_sp_port
= netdev_priv(dev
);
1641 mlxsw_sp_port
->dev
= dev
;
1642 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1643 mlxsw_sp_port
->local_port
= local_port
;
1644 mlxsw_sp_port
->split
= split
;
1645 mlxsw_sp_port
->mapping
.module
= module
;
1646 mlxsw_sp_port
->mapping
.width
= width
;
1647 mlxsw_sp_port
->mapping
.lane
= lane
;
1648 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
1649 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
1650 if (!mlxsw_sp_port
->active_vlans
) {
1652 goto err_port_active_vlans_alloc
;
1654 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
1655 if (!mlxsw_sp_port
->untagged_vlans
) {
1657 goto err_port_untagged_vlans_alloc
;
1659 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
1661 mlxsw_sp_port
->pcpu_stats
=
1662 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
1663 if (!mlxsw_sp_port
->pcpu_stats
) {
1665 goto err_alloc_stats
;
1668 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
1669 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
1671 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
1673 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
1674 mlxsw_sp_port
->local_port
);
1675 goto err_dev_addr_init
;
1678 netif_carrier_off(dev
);
1680 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1681 NETIF_F_HW_VLAN_CTAG_FILTER
;
1683 /* Each packet needs to have a Tx header (metadata) on top all other
1686 dev
->hard_header_len
+= MLXSW_TXHDR_LEN
;
1688 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
1690 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1691 mlxsw_sp_port
->local_port
);
1692 goto err_port_system_port_mapping_set
;
1695 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
1697 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1698 mlxsw_sp_port
->local_port
);
1699 goto err_port_swid_set
;
1702 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
1704 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
1705 mlxsw_sp_port
->local_port
);
1706 goto err_port_speed_by_width_set
;
1709 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
1711 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1712 mlxsw_sp_port
->local_port
);
1713 goto err_port_mtu_set
;
1716 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1718 goto err_port_admin_status_set
;
1720 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
1722 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
1723 mlxsw_sp_port
->local_port
);
1724 goto err_port_buffers_init
;
1727 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
1729 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
1730 mlxsw_sp_port
->local_port
);
1731 goto err_port_ets_init
;
1734 /* ETS and buffers must be initialized before DCB. */
1735 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
1737 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
1738 mlxsw_sp_port
->local_port
);
1739 goto err_port_dcb_init
;
1742 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
1743 err
= register_netdev(dev
);
1745 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1746 mlxsw_sp_port
->local_port
);
1747 goto err_register_netdev
;
1750 err
= mlxsw_core_port_init(mlxsw_sp
->core
, &mlxsw_sp_port
->core_port
,
1751 mlxsw_sp_port
->local_port
, dev
,
1752 mlxsw_sp_port
->split
, module
);
1754 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
1755 mlxsw_sp_port
->local_port
);
1756 goto err_core_port_init
;
1759 err
= mlxsw_sp_port_vlan_init(mlxsw_sp_port
);
1761 goto err_port_vlan_init
;
1763 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
1767 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
1769 unregister_netdev(dev
);
1770 err_register_netdev
:
1773 err_port_buffers_init
:
1774 err_port_admin_status_set
:
1776 err_port_speed_by_width_set
:
1778 err_port_system_port_mapping_set
:
1780 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1782 kfree(mlxsw_sp_port
->untagged_vlans
);
1783 err_port_untagged_vlans_alloc
:
1784 kfree(mlxsw_sp_port
->active_vlans
);
1785 err_port_active_vlans_alloc
:
1790 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1792 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1796 mlxsw_sp
->ports
[local_port
] = NULL
;
1797 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
1798 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
1799 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1800 mlxsw_sp_port_kill_vid(mlxsw_sp_port
->dev
, 0, 1);
1801 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
1802 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1803 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
1804 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1805 kfree(mlxsw_sp_port
->untagged_vlans
);
1806 kfree(mlxsw_sp_port
->active_vlans
);
1807 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port
->vports_list
));
1808 free_netdev(mlxsw_sp_port
->dev
);
1811 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
1815 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
1816 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1817 kfree(mlxsw_sp
->ports
);
1820 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
1822 u8 module
, width
, lane
;
1827 alloc_size
= sizeof(struct mlxsw_sp_port
*) * MLXSW_PORT_MAX_PORTS
;
1828 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1829 if (!mlxsw_sp
->ports
)
1832 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
1833 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
1836 goto err_port_module_info_get
;
1839 mlxsw_sp
->port_to_module
[i
] = module
;
1840 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false, module
, width
,
1843 goto err_port_create
;
1848 err_port_module_info_get
:
1849 for (i
--; i
>= 1; i
--)
1850 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1851 kfree(mlxsw_sp
->ports
);
1855 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
1857 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
1859 return local_port
- offset
;
1862 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
1863 u8 module
, unsigned int count
)
1865 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
1868 for (i
= 0; i
< count
; i
++) {
1869 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
1872 goto err_port_module_map
;
1875 for (i
= 0; i
< count
; i
++) {
1876 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
1878 goto err_port_swid_set
;
1881 for (i
= 0; i
< count
; i
++) {
1882 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
1883 module
, width
, i
* width
);
1885 goto err_port_create
;
1891 for (i
--; i
>= 0; i
--)
1892 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
1895 for (i
--; i
>= 0; i
--)
1896 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
1897 MLXSW_PORT_SWID_DISABLED_PORT
);
1899 err_port_module_map
:
1900 for (i
--; i
>= 0; i
--)
1901 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
1905 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
1906 u8 base_port
, unsigned int count
)
1908 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
1911 /* Split by four means we need to re-create two ports, otherwise
1916 for (i
= 0; i
< count
; i
++) {
1917 local_port
= base_port
+ i
* 2;
1918 module
= mlxsw_sp
->port_to_module
[local_port
];
1920 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
1924 for (i
= 0; i
< count
; i
++)
1925 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
1927 for (i
= 0; i
< count
; i
++) {
1928 local_port
= base_port
+ i
* 2;
1929 module
= mlxsw_sp
->port_to_module
[local_port
];
1931 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
1936 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1939 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1940 struct mlxsw_sp_port
*mlxsw_sp_port
;
1941 u8 module
, cur_width
, base_port
;
1945 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1946 if (!mlxsw_sp_port
) {
1947 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
1952 module
= mlxsw_sp_port
->mapping
.module
;
1953 cur_width
= mlxsw_sp_port
->mapping
.width
;
1955 if (count
!= 2 && count
!= 4) {
1956 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
1960 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
1961 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
1965 /* Make sure we have enough slave (even) ports for the split. */
1967 base_port
= local_port
;
1968 if (mlxsw_sp
->ports
[base_port
+ 1]) {
1969 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
1973 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
1974 if (mlxsw_sp
->ports
[base_port
+ 1] ||
1975 mlxsw_sp
->ports
[base_port
+ 3]) {
1976 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
1981 for (i
= 0; i
< count
; i
++)
1982 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
1984 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
1986 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
1987 goto err_port_split_create
;
1992 err_port_split_create
:
1993 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
1997 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
1999 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2000 struct mlxsw_sp_port
*mlxsw_sp_port
;
2001 u8 cur_width
, base_port
;
2005 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2006 if (!mlxsw_sp_port
) {
2007 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2012 if (!mlxsw_sp_port
->split
) {
2013 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2017 cur_width
= mlxsw_sp_port
->mapping
.width
;
2018 count
= cur_width
== 1 ? 4 : 2;
2020 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2022 /* Determine which ports to remove. */
2023 if (count
== 2 && local_port
>= base_port
+ 2)
2024 base_port
= base_port
+ 2;
2026 for (i
= 0; i
< count
; i
++)
2027 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2029 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2034 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2035 char *pude_pl
, void *priv
)
2037 struct mlxsw_sp
*mlxsw_sp
= priv
;
2038 struct mlxsw_sp_port
*mlxsw_sp_port
;
2039 enum mlxsw_reg_pude_oper_status status
;
2042 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2043 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2047 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2048 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2049 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2050 netif_carrier_on(mlxsw_sp_port
->dev
);
2052 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2053 netif_carrier_off(mlxsw_sp_port
->dev
);
2057 static struct mlxsw_event_listener mlxsw_sp_pude_event
= {
2058 .func
= mlxsw_sp_pude_event_func
,
2059 .trap_id
= MLXSW_TRAP_ID_PUDE
,
2062 static int mlxsw_sp_event_register(struct mlxsw_sp
*mlxsw_sp
,
2063 enum mlxsw_event_trap_id trap_id
)
2065 struct mlxsw_event_listener
*el
;
2066 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2070 case MLXSW_TRAP_ID_PUDE
:
2071 el
= &mlxsw_sp_pude_event
;
2074 err
= mlxsw_core_event_listener_register(mlxsw_sp
->core
, el
, mlxsw_sp
);
2078 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
, trap_id
);
2079 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2081 goto err_event_trap_set
;
2086 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2090 static void mlxsw_sp_event_unregister(struct mlxsw_sp
*mlxsw_sp
,
2091 enum mlxsw_event_trap_id trap_id
)
2093 struct mlxsw_event_listener
*el
;
2096 case MLXSW_TRAP_ID_PUDE
:
2097 el
= &mlxsw_sp_pude_event
;
2100 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2103 static void mlxsw_sp_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
2106 struct mlxsw_sp
*mlxsw_sp
= priv
;
2107 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2108 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2110 if (unlikely(!mlxsw_sp_port
)) {
2111 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2116 skb
->dev
= mlxsw_sp_port
->dev
;
2118 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2119 u64_stats_update_begin(&pcpu_stats
->syncp
);
2120 pcpu_stats
->rx_packets
++;
2121 pcpu_stats
->rx_bytes
+= skb
->len
;
2122 u64_stats_update_end(&pcpu_stats
->syncp
);
2124 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2125 netif_receive_skb(skb
);
2128 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener
[] = {
2130 .func
= mlxsw_sp_rx_listener_func
,
2131 .local_port
= MLXSW_PORT_DONT_CARE
,
2132 .trap_id
= MLXSW_TRAP_ID_FDB_MC
,
2134 /* Traps for specific L2 packet types, not trapped as FDB MC */
2136 .func
= mlxsw_sp_rx_listener_func
,
2137 .local_port
= MLXSW_PORT_DONT_CARE
,
2138 .trap_id
= MLXSW_TRAP_ID_STP
,
2141 .func
= mlxsw_sp_rx_listener_func
,
2142 .local_port
= MLXSW_PORT_DONT_CARE
,
2143 .trap_id
= MLXSW_TRAP_ID_LACP
,
2146 .func
= mlxsw_sp_rx_listener_func
,
2147 .local_port
= MLXSW_PORT_DONT_CARE
,
2148 .trap_id
= MLXSW_TRAP_ID_EAPOL
,
2151 .func
= mlxsw_sp_rx_listener_func
,
2152 .local_port
= MLXSW_PORT_DONT_CARE
,
2153 .trap_id
= MLXSW_TRAP_ID_LLDP
,
2156 .func
= mlxsw_sp_rx_listener_func
,
2157 .local_port
= MLXSW_PORT_DONT_CARE
,
2158 .trap_id
= MLXSW_TRAP_ID_MMRP
,
2161 .func
= mlxsw_sp_rx_listener_func
,
2162 .local_port
= MLXSW_PORT_DONT_CARE
,
2163 .trap_id
= MLXSW_TRAP_ID_MVRP
,
2166 .func
= mlxsw_sp_rx_listener_func
,
2167 .local_port
= MLXSW_PORT_DONT_CARE
,
2168 .trap_id
= MLXSW_TRAP_ID_RPVST
,
2171 .func
= mlxsw_sp_rx_listener_func
,
2172 .local_port
= MLXSW_PORT_DONT_CARE
,
2173 .trap_id
= MLXSW_TRAP_ID_DHCP
,
2176 .func
= mlxsw_sp_rx_listener_func
,
2177 .local_port
= MLXSW_PORT_DONT_CARE
,
2178 .trap_id
= MLXSW_TRAP_ID_IGMP_QUERY
,
2181 .func
= mlxsw_sp_rx_listener_func
,
2182 .local_port
= MLXSW_PORT_DONT_CARE
,
2183 .trap_id
= MLXSW_TRAP_ID_IGMP_V1_REPORT
,
2186 .func
= mlxsw_sp_rx_listener_func
,
2187 .local_port
= MLXSW_PORT_DONT_CARE
,
2188 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_REPORT
,
2191 .func
= mlxsw_sp_rx_listener_func
,
2192 .local_port
= MLXSW_PORT_DONT_CARE
,
2193 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_LEAVE
,
2196 .func
= mlxsw_sp_rx_listener_func
,
2197 .local_port
= MLXSW_PORT_DONT_CARE
,
2198 .trap_id
= MLXSW_TRAP_ID_IGMP_V3_REPORT
,
2201 .func
= mlxsw_sp_rx_listener_func
,
2202 .local_port
= MLXSW_PORT_DONT_CARE
,
2203 .trap_id
= MLXSW_TRAP_ID_ARPBC
,
2206 .func
= mlxsw_sp_rx_listener_func
,
2207 .local_port
= MLXSW_PORT_DONT_CARE
,
2208 .trap_id
= MLXSW_TRAP_ID_ARPUC
,
2211 .func
= mlxsw_sp_rx_listener_func
,
2212 .local_port
= MLXSW_PORT_DONT_CARE
,
2213 .trap_id
= MLXSW_TRAP_ID_IP2ME
,
2216 .func
= mlxsw_sp_rx_listener_func
,
2217 .local_port
= MLXSW_PORT_DONT_CARE
,
2218 .trap_id
= MLXSW_TRAP_ID_RTR_INGRESS0
,
2221 .func
= mlxsw_sp_rx_listener_func
,
2222 .local_port
= MLXSW_PORT_DONT_CARE
,
2223 .trap_id
= MLXSW_TRAP_ID_HOST_MISS_IPV4
,
2227 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2229 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2230 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2234 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_RX
);
2235 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2239 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_CTRL
);
2240 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2244 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2245 err
= mlxsw_core_rx_listener_register(mlxsw_sp
->core
,
2246 &mlxsw_sp_rx_listener
[i
],
2249 goto err_rx_listener_register
;
2251 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
2252 mlxsw_sp_rx_listener
[i
].trap_id
);
2253 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2255 goto err_rx_trap_set
;
2260 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2261 &mlxsw_sp_rx_listener
[i
],
2263 err_rx_listener_register
:
2264 for (i
--; i
>= 0; i
--) {
2265 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_DISCARD
,
2266 mlxsw_sp_rx_listener
[i
].trap_id
);
2267 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2269 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2270 &mlxsw_sp_rx_listener
[i
],
2276 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2278 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2281 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2282 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_DISCARD
,
2283 mlxsw_sp_rx_listener
[i
].trap_id
);
2284 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2286 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2287 &mlxsw_sp_rx_listener
[i
],
2292 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
2293 enum mlxsw_reg_sfgc_type type
,
2294 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
2296 enum mlxsw_flood_table_type table_type
;
2297 enum mlxsw_sp_flood_table flood_table
;
2298 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
2300 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
2301 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
2303 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
2305 if (type
== MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
)
2306 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
2308 flood_table
= MLXSW_SP_FLOOD_TABLE_BM
;
2310 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
2312 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
2315 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
2319 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
2320 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
2323 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2324 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
2328 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2329 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
2337 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2339 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2341 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2342 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2343 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2344 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2345 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2346 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2347 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
2348 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
2349 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
2350 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
2353 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
2354 const struct mlxsw_bus_info
*mlxsw_bus_info
)
2356 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2359 mlxsw_sp
->core
= mlxsw_core
;
2360 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
2361 INIT_LIST_HEAD(&mlxsw_sp
->fids
);
2362 INIT_LIST_HEAD(&mlxsw_sp
->vfids
.list
);
2363 INIT_LIST_HEAD(&mlxsw_sp
->br_mids
.list
);
2365 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
2367 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
2371 err
= mlxsw_sp_event_register(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2373 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register for PUDE events\n");
2377 err
= mlxsw_sp_traps_init(mlxsw_sp
);
2379 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for RX\n");
2380 goto err_rx_listener_register
;
2383 err
= mlxsw_sp_flood_init(mlxsw_sp
);
2385 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
2386 goto err_flood_init
;
2389 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
2391 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
2392 goto err_buffers_init
;
2395 err
= mlxsw_sp_lag_init(mlxsw_sp
);
2397 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
2401 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
2403 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
2404 goto err_switchdev_init
;
2407 err
= mlxsw_sp_router_init(mlxsw_sp
);
2409 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize router\n");
2410 goto err_router_init
;
2413 err
= mlxsw_sp_ports_create(mlxsw_sp
);
2415 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
2416 goto err_ports_create
;
2422 mlxsw_sp_router_fini(mlxsw_sp
);
2424 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2427 mlxsw_sp_buffers_fini(mlxsw_sp
);
2430 mlxsw_sp_traps_fini(mlxsw_sp
);
2431 err_rx_listener_register
:
2432 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2436 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
2438 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2441 mlxsw_sp_ports_remove(mlxsw_sp
);
2442 mlxsw_sp_router_fini(mlxsw_sp
);
2443 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2444 mlxsw_sp_buffers_fini(mlxsw_sp
);
2445 mlxsw_sp_traps_fini(mlxsw_sp
);
2446 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2447 WARN_ON(!list_empty(&mlxsw_sp
->vfids
.list
));
2448 WARN_ON(!list_empty(&mlxsw_sp
->fids
));
2449 for (i
= 0; i
< MLXSW_SP_RIF_MAX
; i
++)
2450 WARN_ON_ONCE(mlxsw_sp
->rifs
[i
]);
2453 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
2454 .used_max_vepa_channels
= 1,
2455 .max_vepa_channels
= 0,
2457 .max_lag
= MLXSW_SP_LAG_MAX
,
2458 .used_max_port_per_lag
= 1,
2459 .max_port_per_lag
= MLXSW_SP_PORT_PER_LAG_MAX
,
2461 .max_mid
= MLXSW_SP_MID_MAX
,
2464 .used_max_system_port
= 1,
2465 .max_system_port
= 64,
2466 .used_max_vlan_groups
= 1,
2467 .max_vlan_groups
= 127,
2468 .used_max_regions
= 1,
2470 .used_flood_tables
= 1,
2471 .used_flood_mode
= 1,
2473 .max_fid_offset_flood_tables
= 2,
2474 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
2475 .max_fid_flood_tables
= 2,
2476 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
2477 .used_max_ib_mc
= 1,
2481 .used_kvd_sizes
= 1,
2482 .kvd_linear_size
= MLXSW_SP_KVD_LINEAR_SIZE
,
2483 .kvd_hash_single_size
= MLXSW_SP_KVD_HASH_SINGLE_SIZE
,
2484 .kvd_hash_double_size
= MLXSW_SP_KVD_HASH_DOUBLE_SIZE
,
2488 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2493 static struct mlxsw_driver mlxsw_sp_driver
= {
2494 .kind
= MLXSW_DEVICE_KIND_SPECTRUM
,
2495 .owner
= THIS_MODULE
,
2496 .priv_size
= sizeof(struct mlxsw_sp
),
2497 .init
= mlxsw_sp_init
,
2498 .fini
= mlxsw_sp_fini
,
2499 .port_split
= mlxsw_sp_port_split
,
2500 .port_unsplit
= mlxsw_sp_port_unsplit
,
2501 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
2502 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
2503 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
2504 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
2505 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
2506 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
2507 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
2508 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
2509 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
2510 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
2511 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
2512 .txhdr_len
= MLXSW_TXHDR_LEN
,
2513 .profile
= &mlxsw_sp_config_profile
,
2516 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
2518 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
2521 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find(struct net_device
*dev
)
2523 struct net_device
*lower_dev
;
2524 struct list_head
*iter
;
2526 if (mlxsw_sp_port_dev_check(dev
))
2527 return netdev_priv(dev
);
2529 netdev_for_each_all_lower_dev(dev
, lower_dev
, iter
) {
2530 if (mlxsw_sp_port_dev_check(lower_dev
))
2531 return netdev_priv(lower_dev
);
2536 static struct mlxsw_sp
*mlxsw_sp_lower_get(struct net_device
*dev
)
2538 struct mlxsw_sp_port
*mlxsw_sp_port
;
2540 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
2541 return mlxsw_sp_port
? mlxsw_sp_port
->mlxsw_sp
: NULL
;
2544 static struct mlxsw_sp_port
*mlxsw_sp_port_dev_lower_find_rcu(struct net_device
*dev
)
2546 struct net_device
*lower_dev
;
2547 struct list_head
*iter
;
2549 if (mlxsw_sp_port_dev_check(dev
))
2550 return netdev_priv(dev
);
2552 netdev_for_each_all_lower_dev_rcu(dev
, lower_dev
, iter
) {
2553 if (mlxsw_sp_port_dev_check(lower_dev
))
2554 return netdev_priv(lower_dev
);
2559 struct mlxsw_sp_port
*mlxsw_sp_port_lower_dev_hold(struct net_device
*dev
)
2561 struct mlxsw_sp_port
*mlxsw_sp_port
;
2564 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find_rcu(dev
);
2566 dev_hold(mlxsw_sp_port
->dev
);
2568 return mlxsw_sp_port
;
2571 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port
*mlxsw_sp_port
)
2573 dev_put(mlxsw_sp_port
->dev
);
2576 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif
*r
,
2577 unsigned long event
)
2586 if (r
&& --r
->ref_count
== 0)
2588 /* It is possible we already removed the RIF ourselves
2589 * if it was assigned to a netdev that is now a bridge
2598 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp
*mlxsw_sp
)
2602 for (i
= 0; i
< MLXSW_SP_RIF_MAX
; i
++)
2603 if (!mlxsw_sp
->rifs
[i
])
2606 return MLXSW_SP_RIF_MAX
;
2609 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2610 bool *p_lagged
, u16
*p_system_port
)
2612 u8 local_port
= mlxsw_sp_vport
->local_port
;
2614 *p_lagged
= mlxsw_sp_vport
->lagged
;
2615 *p_system_port
= *p_lagged
? mlxsw_sp_vport
->lag_id
: local_port
;
2618 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2619 struct net_device
*l3_dev
, u16 rif
,
2622 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2623 bool lagged
= mlxsw_sp_vport
->lagged
;
2624 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2627 mlxsw_reg_ritr_pack(ritr_pl
, create
, MLXSW_REG_RITR_SP_IF
, rif
,
2628 l3_dev
->mtu
, l3_dev
->dev_addr
);
2630 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport
, &lagged
, &system_port
);
2631 mlxsw_reg_ritr_sp_if_pack(ritr_pl
, lagged
, system_port
,
2632 mlxsw_sp_vport_vid_get(mlxsw_sp_vport
));
2634 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2637 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
2639 static struct mlxsw_sp_fid
*
2640 mlxsw_sp_rfid_alloc(u16 fid
, struct net_device
*l3_dev
)
2642 struct mlxsw_sp_fid
*f
;
2644 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
2648 f
->leave
= mlxsw_sp_vport_rif_sp_leave
;
2656 static struct mlxsw_sp_rif
*
2657 mlxsw_sp_rif_alloc(u16 rif
, struct net_device
*l3_dev
, struct mlxsw_sp_fid
*f
)
2659 struct mlxsw_sp_rif
*r
;
2661 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
2665 ether_addr_copy(r
->addr
, l3_dev
->dev_addr
);
2666 r
->mtu
= l3_dev
->mtu
;
2675 static struct mlxsw_sp_rif
*
2676 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2677 struct net_device
*l3_dev
)
2679 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2680 struct mlxsw_sp_fid
*f
;
2681 struct mlxsw_sp_rif
*r
;
2685 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
2686 if (rif
== MLXSW_SP_RIF_MAX
)
2687 return ERR_PTR(-ERANGE
);
2689 err
= mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, true);
2691 return ERR_PTR(err
);
2693 fid
= mlxsw_sp_rif_sp_to_fid(rif
);
2694 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, true);
2696 goto err_rif_fdb_op
;
2698 f
= mlxsw_sp_rfid_alloc(fid
, l3_dev
);
2701 goto err_rfid_alloc
;
2704 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
2711 mlxsw_sp
->rifs
[rif
] = r
;
2718 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
2720 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
2721 return ERR_PTR(err
);
2724 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2725 struct mlxsw_sp_rif
*r
)
2727 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2728 struct net_device
*l3_dev
= r
->dev
;
2729 struct mlxsw_sp_fid
*f
= r
->f
;
2733 mlxsw_sp
->rifs
[rif
] = NULL
;
2740 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, fid
, false);
2742 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport
, l3_dev
, rif
, false);
2745 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2746 struct net_device
*l3_dev
)
2748 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
2749 struct mlxsw_sp_rif
*r
;
2751 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, l3_dev
);
2753 r
= mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport
, l3_dev
);
2758 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, r
->f
);
2761 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", r
->f
->fid
);
2766 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
2768 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
2770 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
2772 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
2773 if (--f
->ref_count
== 0)
2774 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport
, f
->r
);
2777 static int mlxsw_sp_inetaddr_vport_event(struct net_device
*l3_dev
,
2778 struct net_device
*port_dev
,
2779 unsigned long event
, u16 vid
)
2781 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(port_dev
);
2782 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2784 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
2785 if (WARN_ON(!mlxsw_sp_vport
))
2790 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport
, l3_dev
);
2792 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport
);
2799 static int mlxsw_sp_inetaddr_port_event(struct net_device
*port_dev
,
2800 unsigned long event
)
2802 if (netif_is_bridge_port(port_dev
) || netif_is_lag_port(port_dev
))
2805 return mlxsw_sp_inetaddr_vport_event(port_dev
, port_dev
, event
, 1);
2808 static int __mlxsw_sp_inetaddr_lag_event(struct net_device
*l3_dev
,
2809 struct net_device
*lag_dev
,
2810 unsigned long event
, u16 vid
)
2812 struct net_device
*port_dev
;
2813 struct list_head
*iter
;
2816 netdev_for_each_lower_dev(lag_dev
, port_dev
, iter
) {
2817 if (mlxsw_sp_port_dev_check(port_dev
)) {
2818 err
= mlxsw_sp_inetaddr_vport_event(l3_dev
, port_dev
,
2828 static int mlxsw_sp_inetaddr_lag_event(struct net_device
*lag_dev
,
2829 unsigned long event
)
2831 if (netif_is_bridge_port(lag_dev
))
2834 return __mlxsw_sp_inetaddr_lag_event(lag_dev
, lag_dev
, event
, 1);
2837 static struct mlxsw_sp_fid
*mlxsw_sp_bridge_fid_get(struct mlxsw_sp
*mlxsw_sp
,
2838 struct net_device
*l3_dev
)
2842 if (is_vlan_dev(l3_dev
))
2843 fid
= vlan_dev_vlan_id(l3_dev
);
2844 else if (mlxsw_sp
->master_bridge
.dev
== l3_dev
)
2847 return mlxsw_sp_vfid_find(mlxsw_sp
, l3_dev
);
2849 return mlxsw_sp_fid_find(mlxsw_sp
, fid
);
2852 static enum mlxsw_reg_ritr_if_type
mlxsw_sp_rif_type_get(u16 fid
)
2854 if (mlxsw_sp_fid_is_vfid(fid
))
2855 return MLXSW_REG_RITR_FID_IF
;
2857 return MLXSW_REG_RITR_VLAN_IF
;
2860 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp
*mlxsw_sp
,
2861 struct net_device
*l3_dev
,
2865 enum mlxsw_reg_ritr_if_type rif_type
;
2866 char ritr_pl
[MLXSW_REG_RITR_LEN
];
2868 rif_type
= mlxsw_sp_rif_type_get(fid
);
2869 mlxsw_reg_ritr_pack(ritr_pl
, create
, rif_type
, rif
, l3_dev
->mtu
,
2871 mlxsw_reg_ritr_fid_set(ritr_pl
, rif_type
, fid
);
2873 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
2876 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp
*mlxsw_sp
,
2877 struct net_device
*l3_dev
,
2878 struct mlxsw_sp_fid
*f
)
2880 struct mlxsw_sp_rif
*r
;
2884 rif
= mlxsw_sp_avail_rif_get(mlxsw_sp
);
2885 if (rif
== MLXSW_SP_RIF_MAX
)
2888 err
= mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, true);
2892 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, true);
2894 goto err_rif_fdb_op
;
2896 r
= mlxsw_sp_rif_alloc(rif
, l3_dev
, f
);
2903 mlxsw_sp
->rifs
[rif
] = r
;
2905 netdev_dbg(l3_dev
, "RIF=%d created\n", rif
);
2910 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
2912 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
2916 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp
*mlxsw_sp
,
2917 struct mlxsw_sp_rif
*r
)
2919 struct net_device
*l3_dev
= r
->dev
;
2920 struct mlxsw_sp_fid
*f
= r
->f
;
2923 mlxsw_sp
->rifs
[rif
] = NULL
;
2928 mlxsw_sp_rif_fdb_op(mlxsw_sp
, l3_dev
->dev_addr
, f
->fid
, false);
2930 mlxsw_sp_rif_bridge_op(mlxsw_sp
, l3_dev
, f
->fid
, rif
, false);
2932 netdev_dbg(l3_dev
, "RIF=%d destroyed\n", rif
);
2935 static int mlxsw_sp_inetaddr_bridge_event(struct net_device
*l3_dev
,
2936 struct net_device
*br_dev
,
2937 unsigned long event
)
2939 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(l3_dev
);
2940 struct mlxsw_sp_fid
*f
;
2942 /* FID can either be an actual FID if the L3 device is the
2943 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2944 * L3 device is a VLAN-unaware bridge and we get a vFID.
2946 f
= mlxsw_sp_bridge_fid_get(mlxsw_sp
, l3_dev
);
2952 return mlxsw_sp_rif_bridge_create(mlxsw_sp
, l3_dev
, f
);
2954 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
2961 static int mlxsw_sp_inetaddr_vlan_event(struct net_device
*vlan_dev
,
2962 unsigned long event
)
2964 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
2965 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(vlan_dev
);
2966 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
2968 if (mlxsw_sp_port_dev_check(real_dev
))
2969 return mlxsw_sp_inetaddr_vport_event(vlan_dev
, real_dev
, event
,
2971 else if (netif_is_lag_master(real_dev
))
2972 return __mlxsw_sp_inetaddr_lag_event(vlan_dev
, real_dev
, event
,
2974 else if (netif_is_bridge_master(real_dev
) &&
2975 mlxsw_sp
->master_bridge
.dev
== real_dev
)
2976 return mlxsw_sp_inetaddr_bridge_event(vlan_dev
, real_dev
,
2982 static int mlxsw_sp_inetaddr_event(struct notifier_block
*unused
,
2983 unsigned long event
, void *ptr
)
2985 struct in_ifaddr
*ifa
= (struct in_ifaddr
*) ptr
;
2986 struct net_device
*dev
= ifa
->ifa_dev
->dev
;
2987 struct mlxsw_sp
*mlxsw_sp
;
2988 struct mlxsw_sp_rif
*r
;
2991 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
2995 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
2996 if (!mlxsw_sp_rif_should_config(r
, event
))
2999 if (mlxsw_sp_port_dev_check(dev
))
3000 err
= mlxsw_sp_inetaddr_port_event(dev
, event
);
3001 else if (netif_is_lag_master(dev
))
3002 err
= mlxsw_sp_inetaddr_lag_event(dev
, event
);
3003 else if (netif_is_bridge_master(dev
))
3004 err
= mlxsw_sp_inetaddr_bridge_event(dev
, dev
, event
);
3005 else if (is_vlan_dev(dev
))
3006 err
= mlxsw_sp_inetaddr_vlan_event(dev
, event
);
3009 return notifier_from_errno(err
);
3012 static int mlxsw_sp_rif_edit(struct mlxsw_sp
*mlxsw_sp
, u16 rif
,
3013 const char *mac
, int mtu
)
3015 char ritr_pl
[MLXSW_REG_RITR_LEN
];
3018 mlxsw_reg_ritr_rif_pack(ritr_pl
, rif
);
3019 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3023 mlxsw_reg_ritr_mtu_set(ritr_pl
, mtu
);
3024 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl
, mac
);
3025 mlxsw_reg_ritr_op_set(ritr_pl
, MLXSW_REG_RITR_RIF_CREATE
);
3026 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ritr
), ritr_pl
);
3029 static int mlxsw_sp_netdevice_router_port_event(struct net_device
*dev
)
3031 struct mlxsw_sp
*mlxsw_sp
;
3032 struct mlxsw_sp_rif
*r
;
3035 mlxsw_sp
= mlxsw_sp_lower_get(dev
);
3039 r
= mlxsw_sp_rif_find_by_dev(mlxsw_sp
, dev
);
3043 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, false);
3047 err
= mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, dev
->dev_addr
, dev
->mtu
);
3051 err
= mlxsw_sp_rif_fdb_op(mlxsw_sp
, dev
->dev_addr
, r
->f
->fid
, true);
3053 goto err_rif_fdb_op
;
3055 ether_addr_copy(r
->addr
, dev
->dev_addr
);
3058 netdev_dbg(dev
, "Updated RIF=%d\n", r
->rif
);
3063 mlxsw_sp_rif_edit(mlxsw_sp
, r
->rif
, r
->addr
, r
->mtu
);
3065 mlxsw_sp_rif_fdb_op(mlxsw_sp
, r
->addr
, r
->f
->fid
, true);
3069 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port
*lag_port
,
3072 if (mlxsw_sp_fid_is_vfid(fid
))
3073 return mlxsw_sp_port_vport_find_by_fid(lag_port
, fid
);
3075 return test_bit(fid
, lag_port
->active_vlans
);
3078 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port
*mlxsw_sp_port
,
3081 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3082 u8 local_port
= mlxsw_sp_port
->local_port
;
3083 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3086 if (!mlxsw_sp_port
->lagged
)
3089 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
3090 struct mlxsw_sp_port
*lag_port
;
3092 lag_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
3093 if (!lag_port
|| lag_port
->local_port
== local_port
)
3095 if (mlxsw_sp_lag_port_fid_member(lag_port
, fid
))
3103 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3106 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3107 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3109 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
3110 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3111 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
3112 mlxsw_sp_port
->local_port
);
3114 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using Port=%d, FID=%d\n",
3115 mlxsw_sp_port
->local_port
, fid
);
3117 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3121 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3124 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3125 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
3127 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
3128 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
3129 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
3131 netdev_dbg(mlxsw_sp_port
->dev
, "FDB flushed using LAG ID=%d, FID=%d\n",
3132 mlxsw_sp_port
->lag_id
, fid
);
3134 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
3137 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
3139 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port
, fid
))
3142 if (mlxsw_sp_port
->lagged
)
3143 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
,
3146 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, fid
);
3149 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp
*mlxsw_sp
)
3151 struct mlxsw_sp_fid
*f
, *tmp
;
3153 list_for_each_entry_safe(f
, tmp
, &mlxsw_sp
->fids
, list
)
3154 if (--f
->ref_count
== 0)
3155 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
3160 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
3161 struct net_device
*br_dev
)
3163 return !mlxsw_sp
->master_bridge
.dev
||
3164 mlxsw_sp
->master_bridge
.dev
== br_dev
;
3167 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
3168 struct net_device
*br_dev
)
3170 mlxsw_sp
->master_bridge
.dev
= br_dev
;
3171 mlxsw_sp
->master_bridge
.ref_count
++;
3174 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
)
3176 if (--mlxsw_sp
->master_bridge
.ref_count
== 0) {
3177 mlxsw_sp
->master_bridge
.dev
= NULL
;
3178 /* It's possible upper VLAN devices are still holding
3179 * references to underlying FIDs. Drop the reference
3180 * and release the resources if it was the last one.
3181 * If it wasn't, then something bad happened.
3183 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp
);
3187 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3188 struct net_device
*br_dev
)
3190 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3193 /* When port is not bridged untagged packets are tagged with
3194 * PVID=VID=1, thereby creating an implicit VLAN interface in
3195 * the device. Remove it and let bridge code take care of its
3198 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
3202 mlxsw_sp_master_bridge_inc(mlxsw_sp_port
->mlxsw_sp
, br_dev
);
3204 mlxsw_sp_port
->learning
= 1;
3205 mlxsw_sp_port
->learning_sync
= 1;
3206 mlxsw_sp_port
->uc_flood
= 1;
3207 mlxsw_sp_port
->bridged
= 1;
3212 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3214 struct net_device
*dev
= mlxsw_sp_port
->dev
;
3216 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
3218 mlxsw_sp_master_bridge_dec(mlxsw_sp_port
->mlxsw_sp
);
3220 mlxsw_sp_port
->learning
= 0;
3221 mlxsw_sp_port
->learning_sync
= 0;
3222 mlxsw_sp_port
->uc_flood
= 0;
3223 mlxsw_sp_port
->bridged
= 0;
3225 /* Add implicit VLAN interface in the device, so that untagged
3226 * packets will be classified to the default vFID.
3228 mlxsw_sp_port_add_vid(dev
, 0, 1);
3231 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3233 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3235 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
3236 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3239 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
3241 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3243 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
3244 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3247 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3248 u16 lag_id
, u8 port_index
)
3250 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3251 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3253 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3254 lag_id
, port_index
);
3255 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3258 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3261 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3262 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3264 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3266 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3269 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3272 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3273 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3275 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3277 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3280 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
3283 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3284 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
3286 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
3288 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
3291 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3292 struct net_device
*lag_dev
,
3295 struct mlxsw_sp_upper
*lag
;
3296 int free_lag_id
= -1;
3299 for (i
= 0; i
< MLXSW_SP_LAG_MAX
; i
++) {
3300 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
3301 if (lag
->ref_count
) {
3302 if (lag
->dev
== lag_dev
) {
3306 } else if (free_lag_id
< 0) {
3310 if (free_lag_id
< 0)
3312 *p_lag_id
= free_lag_id
;
3317 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
3318 struct net_device
*lag_dev
,
3319 struct netdev_lag_upper_info
*lag_upper_info
)
3323 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
3325 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
3330 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
3331 u16 lag_id
, u8
*p_port_index
)
3335 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
3336 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
3345 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3348 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3349 struct mlxsw_sp_fid
*f
;
3351 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3352 if (WARN_ON(!mlxsw_sp_vport
))
3355 /* If vPort is assigned a RIF, then leave it since it's no
3358 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3360 f
->leave(mlxsw_sp_vport
);
3362 mlxsw_sp_vport
->lag_id
= lag_id
;
3363 mlxsw_sp_vport
->lagged
= 1;
3367 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
)
3369 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3370 struct mlxsw_sp_fid
*f
;
3372 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, 1);
3373 if (WARN_ON(!mlxsw_sp_vport
))
3376 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3378 f
->leave(mlxsw_sp_vport
);
3380 mlxsw_sp_vport
->lagged
= 0;
3383 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
3384 struct net_device
*lag_dev
)
3386 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3387 struct mlxsw_sp_upper
*lag
;
3392 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
3395 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3396 if (!lag
->ref_count
) {
3397 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
3403 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
3406 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
3408 goto err_col_port_add
;
3409 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
3411 goto err_col_port_enable
;
3413 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
3414 mlxsw_sp_port
->local_port
);
3415 mlxsw_sp_port
->lag_id
= lag_id
;
3416 mlxsw_sp_port
->lagged
= 1;
3419 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port
, lag_id
);
3423 err_col_port_enable
:
3424 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3426 if (!lag
->ref_count
)
3427 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3431 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
3432 struct net_device
*lag_dev
)
3434 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3435 u16 lag_id
= mlxsw_sp_port
->lag_id
;
3436 struct mlxsw_sp_upper
*lag
;
3438 if (!mlxsw_sp_port
->lagged
)
3440 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
3441 WARN_ON(lag
->ref_count
== 0);
3443 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
3444 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
3446 if (mlxsw_sp_port
->bridged
) {
3447 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
3448 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
3451 if (lag
->ref_count
== 1)
3452 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
3454 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
3455 mlxsw_sp_port
->local_port
);
3456 mlxsw_sp_port
->lagged
= 0;
3459 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port
);
3462 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
3465 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3466 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3468 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
3469 mlxsw_sp_port
->local_port
);
3470 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3473 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
3476 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3477 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
3479 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
3480 mlxsw_sp_port
->local_port
);
3481 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
3484 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
3485 bool lag_tx_enabled
)
3488 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
3489 mlxsw_sp_port
->lag_id
);
3491 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
3492 mlxsw_sp_port
->lag_id
);
3495 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
3496 struct netdev_lag_lower_state_info
*info
)
3498 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
3501 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
3502 struct net_device
*vlan_dev
)
3504 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3505 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3507 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3508 if (WARN_ON(!mlxsw_sp_vport
))
3511 mlxsw_sp_vport
->dev
= vlan_dev
;
3516 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
3517 struct net_device
*vlan_dev
)
3519 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3520 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3522 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3523 if (WARN_ON(!mlxsw_sp_vport
))
3526 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
3529 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
3530 unsigned long event
, void *ptr
)
3532 struct netdev_notifier_changeupper_info
*info
;
3533 struct mlxsw_sp_port
*mlxsw_sp_port
;
3534 struct net_device
*upper_dev
;
3535 struct mlxsw_sp
*mlxsw_sp
;
3538 mlxsw_sp_port
= netdev_priv(dev
);
3539 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3543 case NETDEV_PRECHANGEUPPER
:
3544 upper_dev
= info
->upper_dev
;
3545 if (!is_vlan_dev(upper_dev
) &&
3546 !netif_is_lag_master(upper_dev
) &&
3547 !netif_is_bridge_master(upper_dev
))
3551 /* HW limitation forbids to put ports to multiple bridges. */
3552 if (netif_is_bridge_master(upper_dev
) &&
3553 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
3555 if (netif_is_lag_master(upper_dev
) &&
3556 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
3559 if (netif_is_lag_master(upper_dev
) && vlan_uses_dev(dev
))
3561 if (netif_is_lag_port(dev
) && is_vlan_dev(upper_dev
) &&
3562 !netif_is_lag_master(vlan_dev_real_dev(upper_dev
)))
3565 case NETDEV_CHANGEUPPER
:
3566 upper_dev
= info
->upper_dev
;
3567 if (is_vlan_dev(upper_dev
)) {
3569 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
3572 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
3574 } else if (netif_is_bridge_master(upper_dev
)) {
3576 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
,
3579 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
);
3580 } else if (netif_is_lag_master(upper_dev
)) {
3582 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
3585 mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
3597 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
3598 unsigned long event
, void *ptr
)
3600 struct netdev_notifier_changelowerstate_info
*info
;
3601 struct mlxsw_sp_port
*mlxsw_sp_port
;
3604 mlxsw_sp_port
= netdev_priv(dev
);
3608 case NETDEV_CHANGELOWERSTATE
:
3609 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
3610 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
3611 info
->lower_state_info
);
3613 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
3621 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
3622 unsigned long event
, void *ptr
)
3625 case NETDEV_PRECHANGEUPPER
:
3626 case NETDEV_CHANGEUPPER
:
3627 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
3628 case NETDEV_CHANGELOWERSTATE
:
3629 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
3635 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
3636 unsigned long event
, void *ptr
)
3638 struct net_device
*dev
;
3639 struct list_head
*iter
;
3642 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
3643 if (mlxsw_sp_port_dev_check(dev
)) {
3644 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
3653 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp
*mlxsw_sp
,
3654 struct net_device
*vlan_dev
)
3656 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
3657 struct mlxsw_sp_fid
*f
;
3659 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
3661 f
= mlxsw_sp_fid_create(mlxsw_sp
, fid
);
3671 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp
*mlxsw_sp
,
3672 struct net_device
*vlan_dev
)
3674 u16 fid
= vlan_dev_vlan_id(vlan_dev
);
3675 struct mlxsw_sp_fid
*f
;
3677 f
= mlxsw_sp_fid_find(mlxsw_sp
, fid
);
3679 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
3680 if (f
&& --f
->ref_count
== 0)
3681 mlxsw_sp_fid_destroy(mlxsw_sp
, f
);
3684 static int mlxsw_sp_netdevice_bridge_event(struct net_device
*br_dev
,
3685 unsigned long event
, void *ptr
)
3687 struct netdev_notifier_changeupper_info
*info
;
3688 struct net_device
*upper_dev
;
3689 struct mlxsw_sp
*mlxsw_sp
;
3692 mlxsw_sp
= mlxsw_sp_lower_get(br_dev
);
3695 if (br_dev
!= mlxsw_sp
->master_bridge
.dev
)
3701 case NETDEV_CHANGEUPPER
:
3702 upper_dev
= info
->upper_dev
;
3703 if (!is_vlan_dev(upper_dev
))
3705 if (info
->linking
) {
3706 err
= mlxsw_sp_master_bridge_vlan_link(mlxsw_sp
,
3711 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp
, upper_dev
);
3719 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
3721 return find_first_zero_bit(mlxsw_sp
->vfids
.mapped
,
3725 static int mlxsw_sp_vfid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
3727 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
3729 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, 0);
3730 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
3733 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
);
3735 static struct mlxsw_sp_fid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
3736 struct net_device
*br_dev
)
3738 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
3739 struct mlxsw_sp_fid
*f
;
3743 vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
3744 if (vfid
== MLXSW_SP_VFID_MAX
) {
3745 dev_err(dev
, "No available vFIDs\n");
3746 return ERR_PTR(-ERANGE
);
3749 fid
= mlxsw_sp_vfid_to_fid(vfid
);
3750 err
= mlxsw_sp_vfid_op(mlxsw_sp
, fid
, true);
3752 dev_err(dev
, "Failed to create FID=%d\n", fid
);
3753 return ERR_PTR(err
);
3756 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
3758 goto err_allocate_vfid
;
3760 f
->leave
= mlxsw_sp_vport_vfid_leave
;
3764 list_add(&f
->list
, &mlxsw_sp
->vfids
.list
);
3765 set_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
3770 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
3771 return ERR_PTR(-ENOMEM
);
3774 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
3775 struct mlxsw_sp_fid
*f
)
3777 u16 vfid
= mlxsw_sp_fid_to_vfid(f
->fid
);
3780 clear_bit(vfid
, mlxsw_sp
->vfids
.mapped
);
3784 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
3788 mlxsw_sp_vfid_op(mlxsw_sp
, fid
, false);
3791 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
3794 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
3795 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3797 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
, mt
, valid
, fid
,
3801 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3802 struct net_device
*br_dev
)
3804 struct mlxsw_sp_fid
*f
;
3807 f
= mlxsw_sp_vfid_find(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
3809 f
= mlxsw_sp_vfid_create(mlxsw_sp_vport
->mlxsw_sp
, br_dev
);
3814 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, true);
3816 goto err_vport_flood_set
;
3818 err
= mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, true);
3820 goto err_vport_fid_map
;
3822 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, f
);
3825 netdev_dbg(mlxsw_sp_vport
->dev
, "Joined FID=%d\n", f
->fid
);
3830 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
3831 err_vport_flood_set
:
3833 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
3837 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
3839 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3841 netdev_dbg(mlxsw_sp_vport
->dev
, "Left FID=%d\n", f
->fid
);
3843 mlxsw_sp_vport_fid_map(mlxsw_sp_vport
, f
->fid
, false);
3845 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, f
->fid
, false);
3847 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport
, f
->fid
);
3849 mlxsw_sp_vport_fid_set(mlxsw_sp_vport
, NULL
);
3850 if (--f
->ref_count
== 0)
3851 mlxsw_sp_vfid_destroy(mlxsw_sp_vport
->mlxsw_sp
, f
);
3854 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3855 struct net_device
*br_dev
)
3857 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_vport
);
3858 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3859 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
3862 if (f
&& !WARN_ON(!f
->leave
))
3863 f
->leave(mlxsw_sp_vport
);
3865 err
= mlxsw_sp_vport_vfid_join(mlxsw_sp_vport
, br_dev
);
3867 netdev_err(dev
, "Failed to join vFID\n");
3871 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
3873 netdev_err(dev
, "Failed to enable learning\n");
3874 goto err_port_vid_learning_set
;
3877 mlxsw_sp_vport
->learning
= 1;
3878 mlxsw_sp_vport
->learning_sync
= 1;
3879 mlxsw_sp_vport
->uc_flood
= 1;
3880 mlxsw_sp_vport
->bridged
= 1;
3884 err_port_vid_learning_set
:
3885 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
3889 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
)
3891 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3893 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
3895 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport
);
3897 mlxsw_sp_vport
->learning
= 0;
3898 mlxsw_sp_vport
->learning_sync
= 0;
3899 mlxsw_sp_vport
->uc_flood
= 0;
3900 mlxsw_sp_vport
->bridged
= 0;
3904 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3905 const struct net_device
*br_dev
)
3907 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3909 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
3911 struct net_device
*dev
= mlxsw_sp_vport_dev_get(mlxsw_sp_vport
);
3913 if (dev
&& dev
== br_dev
)
3920 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
3921 unsigned long event
, void *ptr
,
3924 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
3925 struct netdev_notifier_changeupper_info
*info
= ptr
;
3926 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3927 struct net_device
*upper_dev
;
3930 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3933 case NETDEV_PRECHANGEUPPER
:
3934 upper_dev
= info
->upper_dev
;
3935 if (!netif_is_bridge_master(upper_dev
))
3939 /* We can't have multiple VLAN interfaces configured on
3940 * the same port and being members in the same bridge.
3942 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
3946 case NETDEV_CHANGEUPPER
:
3947 upper_dev
= info
->upper_dev
;
3948 if (info
->linking
) {
3949 if (WARN_ON(!mlxsw_sp_vport
))
3951 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
3954 if (!mlxsw_sp_vport
)
3956 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
);
3963 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
3964 unsigned long event
, void *ptr
,
3967 struct net_device
*dev
;
3968 struct list_head
*iter
;
3971 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
3972 if (mlxsw_sp_port_dev_check(dev
)) {
3973 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
3983 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
3984 unsigned long event
, void *ptr
)
3986 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3987 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3989 if (mlxsw_sp_port_dev_check(real_dev
))
3990 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
3992 else if (netif_is_lag_master(real_dev
))
3993 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
3999 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
4000 unsigned long event
, void *ptr
)
4002 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
4005 if (event
== NETDEV_CHANGEADDR
|| event
== NETDEV_CHANGEMTU
)
4006 err
= mlxsw_sp_netdevice_router_port_event(dev
);
4007 else if (mlxsw_sp_port_dev_check(dev
))
4008 err
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
4009 else if (netif_is_lag_master(dev
))
4010 err
= mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
4011 else if (netif_is_bridge_master(dev
))
4012 err
= mlxsw_sp_netdevice_bridge_event(dev
, event
, ptr
);
4013 else if (is_vlan_dev(dev
))
4014 err
= mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
4016 return notifier_from_errno(err
);
4019 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
4020 .notifier_call
= mlxsw_sp_netdevice_event
,
4023 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly
= {
4024 .notifier_call
= mlxsw_sp_inetaddr_event
,
4025 .priority
= 10, /* Must be called before FIB notifier block */
4028 static int __init
mlxsw_sp_module_init(void)
4032 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4033 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4034 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
4036 goto err_core_driver_register
;
4039 err_core_driver_register
:
4040 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4044 static void __exit
mlxsw_sp_module_exit(void)
4046 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
4047 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb
);
4048 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
4051 module_init(mlxsw_sp_module_init
);
4052 module_exit(mlxsw_sp_module_exit
);
4054 MODULE_LICENSE("Dual BSD/GPL");
4055 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4056 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4057 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM
);