2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/dcbnl.h>
53 #include <net/switchdev.h>
54 #include <generated/utsrelease.h>
63 static const char mlxsw_sp_driver_name
[] = "mlxsw_spectrum";
64 static const char mlxsw_sp_driver_version
[] = "1.0";
70 MLXSW_ITEM32(tx
, hdr
, version
, 0x00, 28, 4);
73 * Packet control type.
74 * 0 - Ethernet control (e.g. EMADs, LACP)
77 MLXSW_ITEM32(tx
, hdr
, ctl
, 0x00, 26, 2);
80 * Packet protocol type. Must be set to 1 (Ethernet).
82 MLXSW_ITEM32(tx
, hdr
, proto
, 0x00, 21, 3);
84 /* tx_hdr_rx_is_router
85 * Packet is sent from the router. Valid for data packets only.
87 MLXSW_ITEM32(tx
, hdr
, rx_is_router
, 0x00, 19, 1);
90 * Indicates if the 'fid' field is valid and should be used for
91 * forwarding lookup. Valid for data packets only.
93 MLXSW_ITEM32(tx
, hdr
, fid_valid
, 0x00, 16, 1);
96 * Switch partition ID. Must be set to 0.
98 MLXSW_ITEM32(tx
, hdr
, swid
, 0x00, 12, 3);
100 /* tx_hdr_control_tclass
101 * Indicates if the packet should use the control TClass and not one
102 * of the data TClasses.
104 MLXSW_ITEM32(tx
, hdr
, control_tclass
, 0x00, 6, 1);
107 * Egress TClass to be used on the egress device on the egress port.
109 MLXSW_ITEM32(tx
, hdr
, etclass
, 0x00, 0, 4);
112 * Destination local port for unicast packets.
113 * Destination multicast ID for multicast packets.
115 * Control packets are directed to a specific egress port, while data
116 * packets are transmitted through the CPU port (0) into the switch partition,
117 * where forwarding rules are applied.
119 MLXSW_ITEM32(tx
, hdr
, port_mid
, 0x04, 16, 16);
122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124 * Valid for data packets only.
126 MLXSW_ITEM32(tx
, hdr
, fid
, 0x08, 0, 16);
130 * 6 - Control packets
132 MLXSW_ITEM32(tx
, hdr
, type
, 0x0C, 0, 4);
134 static void mlxsw_sp_txhdr_construct(struct sk_buff
*skb
,
135 const struct mlxsw_tx_info
*tx_info
)
137 char *txhdr
= skb_push(skb
, MLXSW_TXHDR_LEN
);
139 memset(txhdr
, 0, MLXSW_TXHDR_LEN
);
141 mlxsw_tx_hdr_version_set(txhdr
, MLXSW_TXHDR_VERSION_1
);
142 mlxsw_tx_hdr_ctl_set(txhdr
, MLXSW_TXHDR_ETH_CTL
);
143 mlxsw_tx_hdr_proto_set(txhdr
, MLXSW_TXHDR_PROTO_ETH
);
144 mlxsw_tx_hdr_swid_set(txhdr
, 0);
145 mlxsw_tx_hdr_control_tclass_set(txhdr
, 1);
146 mlxsw_tx_hdr_port_mid_set(txhdr
, tx_info
->local_port
);
147 mlxsw_tx_hdr_type_set(txhdr
, MLXSW_TXHDR_TYPE_CONTROL
);
150 static int mlxsw_sp_base_mac_get(struct mlxsw_sp
*mlxsw_sp
)
152 char spad_pl
[MLXSW_REG_SPAD_LEN
];
155 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(spad
), spad_pl
);
158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl
, mlxsw_sp
->base_mac
);
162 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
165 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
166 char paos_pl
[MLXSW_REG_PAOS_LEN
];
168 mlxsw_reg_paos_pack(paos_pl
, mlxsw_sp_port
->local_port
,
169 is_up
? MLXSW_PORT_ADMIN_STATUS_UP
:
170 MLXSW_PORT_ADMIN_STATUS_DOWN
);
171 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(paos
), paos_pl
);
174 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
177 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
178 char ppad_pl
[MLXSW_REG_PPAD_LEN
];
180 mlxsw_reg_ppad_pack(ppad_pl
, true, mlxsw_sp_port
->local_port
);
181 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl
, addr
);
182 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ppad
), ppad_pl
);
185 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
187 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
188 unsigned char *addr
= mlxsw_sp_port
->dev
->dev_addr
;
190 ether_addr_copy(addr
, mlxsw_sp
->base_mac
);
191 addr
[ETH_ALEN
- 1] += mlxsw_sp_port
->local_port
;
192 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
);
195 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
196 u16 vid
, enum mlxsw_reg_spms_state state
)
198 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
202 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
205 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
206 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, state
);
207 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
212 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mtu
)
214 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
215 char pmtu_pl
[MLXSW_REG_PMTU_LEN
];
219 mtu
+= MLXSW_TXHDR_LEN
+ ETH_HLEN
;
220 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, 0);
221 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
224 max_mtu
= mlxsw_reg_pmtu_max_mtu_get(pmtu_pl
);
229 mlxsw_reg_pmtu_pack(pmtu_pl
, mlxsw_sp_port
->local_port
, mtu
);
230 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmtu
), pmtu_pl
);
233 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
236 char pspa_pl
[MLXSW_REG_PSPA_LEN
];
238 mlxsw_reg_pspa_pack(pspa_pl
, swid
, local_port
);
239 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pspa
), pspa_pl
);
242 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 swid
)
244 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
246 return __mlxsw_sp_port_swid_set(mlxsw_sp
, mlxsw_sp_port
->local_port
,
250 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
253 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
254 char svpe_pl
[MLXSW_REG_SVPE_LEN
];
256 mlxsw_reg_svpe_pack(svpe_pl
, mlxsw_sp_port
->local_port
, enable
);
257 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svpe
), svpe_pl
);
260 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
261 enum mlxsw_reg_svfa_mt mt
, bool valid
, u16 fid
,
264 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
265 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
267 mlxsw_reg_svfa_pack(svfa_pl
, mlxsw_sp_port
->local_port
, mt
, valid
,
269 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
272 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
273 u16 vid
, bool learn_enable
)
275 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
279 spvmlr_pl
= kmalloc(MLXSW_REG_SPVMLR_LEN
, GFP_KERNEL
);
282 mlxsw_reg_spvmlr_pack(spvmlr_pl
, mlxsw_sp_port
->local_port
, vid
, vid
,
284 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvmlr
), spvmlr_pl
);
290 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port
*mlxsw_sp_port
)
292 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
293 char sspr_pl
[MLXSW_REG_SSPR_LEN
];
295 mlxsw_reg_sspr_pack(sspr_pl
, mlxsw_sp_port
->local_port
);
296 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sspr
), sspr_pl
);
299 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp
*mlxsw_sp
,
300 u8 local_port
, u8
*p_module
,
301 u8
*p_width
, u8
*p_lane
)
303 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
306 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
307 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
310 *p_module
= mlxsw_reg_pmlp_module_get(pmlp_pl
, 0);
311 *p_width
= mlxsw_reg_pmlp_width_get(pmlp_pl
);
312 *p_lane
= mlxsw_reg_pmlp_tx_lane_get(pmlp_pl
, 0);
316 static int mlxsw_sp_port_module_map(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
317 u8 module
, u8 width
, u8 lane
)
319 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
322 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
323 mlxsw_reg_pmlp_width_set(pmlp_pl
, width
);
324 for (i
= 0; i
< width
; i
++) {
325 mlxsw_reg_pmlp_module_set(pmlp_pl
, i
, module
);
326 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl
, i
, lane
+ i
); /* Rx & Tx */
329 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
332 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
334 char pmlp_pl
[MLXSW_REG_PMLP_LEN
];
336 mlxsw_reg_pmlp_pack(pmlp_pl
, local_port
);
337 mlxsw_reg_pmlp_width_set(pmlp_pl
, 0);
338 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pmlp
), pmlp_pl
);
341 static int mlxsw_sp_port_open(struct net_device
*dev
)
343 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
346 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
349 netif_start_queue(dev
);
353 static int mlxsw_sp_port_stop(struct net_device
*dev
)
355 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
357 netif_stop_queue(dev
);
358 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
361 static netdev_tx_t
mlxsw_sp_port_xmit(struct sk_buff
*skb
,
362 struct net_device
*dev
)
364 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
365 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
366 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
367 const struct mlxsw_tx_info tx_info
= {
368 .local_port
= mlxsw_sp_port
->local_port
,
374 if (mlxsw_core_skb_transmit_busy(mlxsw_sp
->core
, &tx_info
))
375 return NETDEV_TX_BUSY
;
377 if (unlikely(skb_headroom(skb
) < MLXSW_TXHDR_LEN
)) {
378 struct sk_buff
*skb_orig
= skb
;
380 skb
= skb_realloc_headroom(skb
, MLXSW_TXHDR_LEN
);
382 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
383 dev_kfree_skb_any(skb_orig
);
388 if (eth_skb_pad(skb
)) {
389 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
393 mlxsw_sp_txhdr_construct(skb
, &tx_info
);
394 /* TX header is consumed by HW on the way so we shouldn't count its
395 * bytes as being sent.
397 len
= skb
->len
- MLXSW_TXHDR_LEN
;
399 /* Due to a race we might fail here because of a full queue. In that
400 * unlikely case we simply drop the packet.
402 err
= mlxsw_core_skb_transmit(mlxsw_sp
->core
, skb
, &tx_info
);
405 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
406 u64_stats_update_begin(&pcpu_stats
->syncp
);
407 pcpu_stats
->tx_packets
++;
408 pcpu_stats
->tx_bytes
+= len
;
409 u64_stats_update_end(&pcpu_stats
->syncp
);
411 this_cpu_inc(mlxsw_sp_port
->pcpu_stats
->tx_dropped
);
412 dev_kfree_skb_any(skb
);
417 static void mlxsw_sp_set_rx_mode(struct net_device
*dev
)
421 static int mlxsw_sp_port_set_mac_address(struct net_device
*dev
, void *p
)
423 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
424 struct sockaddr
*addr
= p
;
427 if (!is_valid_ether_addr(addr
->sa_data
))
428 return -EADDRNOTAVAIL
;
430 err
= mlxsw_sp_port_dev_addr_set(mlxsw_sp_port
, addr
->sa_data
);
433 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
437 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl
, int pg_index
, int mtu
,
438 bool pause_en
, bool pfc_en
, u16 delay
)
440 u16 pg_size
= 2 * MLXSW_SP_BYTES_TO_CELLS(mtu
);
442 delay
= pfc_en
? mlxsw_sp_pfc_delay_get(mtu
, delay
) :
443 MLXSW_SP_PAUSE_DELAY
;
445 if (pause_en
|| pfc_en
)
446 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl
, pg_index
,
447 pg_size
+ delay
, pg_size
);
449 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, pg_index
, pg_size
);
452 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
, int mtu
,
453 u8
*prio_tc
, bool pause_en
,
454 struct ieee_pfc
*my_pfc
)
456 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
457 u8 pfc_en
= !!my_pfc
? my_pfc
->pfc_en
: 0;
458 u16 delay
= !!my_pfc
? my_pfc
->delay
: 0;
459 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
462 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
, 0, 0);
463 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
467 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
468 bool configure
= false;
471 for (j
= 0; j
< IEEE_8021QAZ_MAX_TCS
; j
++) {
472 if (prio_tc
[j
] == i
) {
473 pfc
= pfc_en
& BIT(j
);
481 mlxsw_sp_pg_buf_pack(pbmc_pl
, i
, mtu
, pause_en
, pfc
, delay
);
484 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
487 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
488 int mtu
, bool pause_en
)
490 u8 def_prio_tc
[IEEE_8021QAZ_MAX_TCS
] = {0};
491 bool dcb_en
= !!mlxsw_sp_port
->dcb
.ets
;
492 struct ieee_pfc
*my_pfc
;
495 prio_tc
= dcb_en
? mlxsw_sp_port
->dcb
.ets
->prio_tc
: def_prio_tc
;
496 my_pfc
= dcb_en
? mlxsw_sp_port
->dcb
.pfc
: NULL
;
498 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, prio_tc
,
502 static int mlxsw_sp_port_change_mtu(struct net_device
*dev
, int mtu
)
504 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
505 bool pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
508 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, mtu
, pause_en
);
511 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, mtu
);
513 goto err_port_mtu_set
;
518 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
522 static struct rtnl_link_stats64
*
523 mlxsw_sp_port_get_stats64(struct net_device
*dev
,
524 struct rtnl_link_stats64
*stats
)
526 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
527 struct mlxsw_sp_port_pcpu_stats
*p
;
528 u64 rx_packets
, rx_bytes
, tx_packets
, tx_bytes
;
533 for_each_possible_cpu(i
) {
534 p
= per_cpu_ptr(mlxsw_sp_port
->pcpu_stats
, i
);
536 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
537 rx_packets
= p
->rx_packets
;
538 rx_bytes
= p
->rx_bytes
;
539 tx_packets
= p
->tx_packets
;
540 tx_bytes
= p
->tx_bytes
;
541 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
543 stats
->rx_packets
+= rx_packets
;
544 stats
->rx_bytes
+= rx_bytes
;
545 stats
->tx_packets
+= tx_packets
;
546 stats
->tx_bytes
+= tx_bytes
;
547 /* tx_dropped is u32, updated without syncp protection. */
548 tx_dropped
+= p
->tx_dropped
;
550 stats
->tx_dropped
= tx_dropped
;
554 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid_begin
,
555 u16 vid_end
, bool is_member
, bool untagged
)
557 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
561 spvm_pl
= kmalloc(MLXSW_REG_SPVM_LEN
, GFP_KERNEL
);
565 mlxsw_reg_spvm_pack(spvm_pl
, mlxsw_sp_port
->local_port
, vid_begin
,
566 vid_end
, is_member
, untagged
);
567 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvm
), spvm_pl
);
572 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
574 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
575 u16 vid
, last_visited_vid
;
578 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
579 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, vid
,
582 last_visited_vid
= vid
;
583 goto err_port_vid_to_fid_set
;
587 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, true);
589 last_visited_vid
= VLAN_N_VID
;
590 goto err_port_vid_to_fid_set
;
595 err_port_vid_to_fid_set
:
596 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
597 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, vid
,
602 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port
*mlxsw_sp_port
)
604 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
608 err
= mlxsw_sp_port_vp_mode_set(mlxsw_sp_port
, false);
612 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
613 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false,
622 static struct mlxsw_sp_vfid
*
623 mlxsw_sp_vfid_find(const struct mlxsw_sp
*mlxsw_sp
, u16 vid
)
625 struct mlxsw_sp_vfid
*vfid
;
627 list_for_each_entry(vfid
, &mlxsw_sp
->port_vfids
.list
, list
) {
628 if (vfid
->vid
== vid
)
635 static u16
mlxsw_sp_avail_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
637 return find_first_zero_bit(mlxsw_sp
->port_vfids
.mapped
,
638 MLXSW_SP_VFID_PORT_MAX
);
641 static int __mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
, u16 vfid
)
643 u16 fid
= mlxsw_sp_vfid_to_fid(vfid
);
644 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
646 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, 0);
647 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
650 static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 vfid
)
652 u16 fid
= mlxsw_sp_vfid_to_fid(vfid
);
653 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
655 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
, fid
, 0);
656 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
659 static struct mlxsw_sp_vfid
*mlxsw_sp_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
662 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
663 struct mlxsw_sp_vfid
*vfid
;
667 n_vfid
= mlxsw_sp_avail_vfid_get(mlxsw_sp
);
668 if (n_vfid
== MLXSW_SP_VFID_PORT_MAX
) {
669 dev_err(dev
, "No available vFIDs\n");
670 return ERR_PTR(-ERANGE
);
673 err
= __mlxsw_sp_vfid_create(mlxsw_sp
, n_vfid
);
675 dev_err(dev
, "Failed to create vFID=%d\n", n_vfid
);
679 vfid
= kzalloc(sizeof(*vfid
), GFP_KERNEL
);
681 goto err_allocate_vfid
;
686 list_add(&vfid
->list
, &mlxsw_sp
->port_vfids
.list
);
687 set_bit(n_vfid
, mlxsw_sp
->port_vfids
.mapped
);
692 __mlxsw_sp_vfid_destroy(mlxsw_sp
, n_vfid
);
693 return ERR_PTR(-ENOMEM
);
696 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
697 struct mlxsw_sp_vfid
*vfid
)
699 clear_bit(vfid
->vfid
, mlxsw_sp
->port_vfids
.mapped
);
700 list_del(&vfid
->list
);
702 __mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
->vfid
);
707 static struct mlxsw_sp_port
*
708 mlxsw_sp_port_vport_create(struct mlxsw_sp_port
*mlxsw_sp_port
,
709 struct mlxsw_sp_vfid
*vfid
)
711 struct mlxsw_sp_port
*mlxsw_sp_vport
;
713 mlxsw_sp_vport
= kzalloc(sizeof(*mlxsw_sp_vport
), GFP_KERNEL
);
717 /* dev will be set correctly after the VLAN device is linked
718 * with the real device. In case of bridge SELF invocation, dev
721 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
722 mlxsw_sp_vport
->mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
723 mlxsw_sp_vport
->local_port
= mlxsw_sp_port
->local_port
;
724 mlxsw_sp_vport
->stp_state
= BR_STATE_FORWARDING
;
725 mlxsw_sp_vport
->lagged
= mlxsw_sp_port
->lagged
;
726 mlxsw_sp_vport
->lag_id
= mlxsw_sp_port
->lag_id
;
727 mlxsw_sp_vport
->vport
.vfid
= vfid
;
728 mlxsw_sp_vport
->vport
.vid
= vfid
->vid
;
730 list_add(&mlxsw_sp_vport
->vport
.list
, &mlxsw_sp_port
->vports_list
);
732 return mlxsw_sp_vport
;
735 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port
*mlxsw_sp_vport
)
737 list_del(&mlxsw_sp_vport
->vport
.list
);
738 kfree(mlxsw_sp_vport
);
741 int mlxsw_sp_port_add_vid(struct net_device
*dev
, __be16 __always_unused proto
,
744 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
745 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
746 struct mlxsw_sp_port
*mlxsw_sp_vport
;
747 struct mlxsw_sp_vfid
*vfid
;
750 /* VLAN 0 is added to HW filter when device goes up, but it is
751 * reserved in our case, so simply return.
756 if (mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
)) {
757 netdev_warn(dev
, "VID=%d already configured\n", vid
);
761 vfid
= mlxsw_sp_vfid_find(mlxsw_sp
, vid
);
763 vfid
= mlxsw_sp_vfid_create(mlxsw_sp
, vid
);
765 netdev_err(dev
, "Failed to create vFID for VID=%d\n",
767 return PTR_ERR(vfid
);
771 mlxsw_sp_vport
= mlxsw_sp_port_vport_create(mlxsw_sp_port
, vfid
);
772 if (!mlxsw_sp_vport
) {
773 netdev_err(dev
, "Failed to create vPort for VID=%d\n", vid
);
775 goto err_port_vport_create
;
778 if (!vfid
->nr_vports
) {
779 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
,
782 netdev_err(dev
, "Failed to setup flooding for vFID=%d\n",
784 goto err_vport_flood_set
;
788 /* When adding the first VLAN interface on a bridged port we need to
789 * transition all the active 802.1Q bridge VLANs to use explicit
790 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
792 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
793 err
= mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port
);
795 netdev_err(dev
, "Failed to set to Virtual mode\n");
796 goto err_port_vp_mode_trans
;
800 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
801 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
803 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
806 netdev_err(dev
, "Failed to map {Port, VID=%d} to vFID=%d\n",
808 goto err_port_vid_to_fid_set
;
811 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
813 netdev_err(dev
, "Failed to disable learning for VID=%d\n", vid
);
814 goto err_port_vid_learning_set
;
817 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, true, false);
819 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
821 goto err_port_add_vid
;
824 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_vport
, vid
,
825 MLXSW_REG_SPMS_STATE_FORWARDING
);
827 netdev_err(dev
, "Failed to set STP state for VID=%d\n", vid
);
828 goto err_port_stp_state_set
;
835 err_port_stp_state_set
:
836 mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
838 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
839 err_port_vid_learning_set
:
840 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
841 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
, false,
842 mlxsw_sp_vfid_to_fid(vfid
->vfid
), vid
);
843 err_port_vid_to_fid_set
:
844 if (list_is_singular(&mlxsw_sp_port
->vports_list
))
845 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
846 err_port_vp_mode_trans
:
847 if (!vfid
->nr_vports
)
848 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, false,
851 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
852 err_port_vport_create
:
853 if (!vfid
->nr_vports
)
854 mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
);
858 int mlxsw_sp_port_kill_vid(struct net_device
*dev
,
859 __be16 __always_unused proto
, u16 vid
)
861 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
862 struct mlxsw_sp_port
*mlxsw_sp_vport
;
863 struct mlxsw_sp_vfid
*vfid
;
866 /* VLAN 0 is removed from HW filter when device goes down, but
867 * it is reserved in our case, so simply return.
872 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
873 if (!mlxsw_sp_vport
) {
874 netdev_warn(dev
, "VID=%d does not exist\n", vid
);
878 vfid
= mlxsw_sp_vport
->vport
.vfid
;
880 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_vport
, vid
,
881 MLXSW_REG_SPMS_STATE_DISCARDING
);
883 netdev_err(dev
, "Failed to set STP state for VID=%d\n", vid
);
887 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_vport
, vid
, vid
, false, false);
889 netdev_err(dev
, "Failed to set VLAN membership for VID=%d\n",
894 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
896 netdev_err(dev
, "Failed to enable learning for VID=%d\n", vid
);
900 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
901 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
903 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
906 netdev_err(dev
, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
911 /* When removing the last VLAN interface on a bridged port we need to
912 * transition all active 802.1Q bridge VLANs to use VID to FID
913 * mappings and set port's mode to VLAN mode.
915 if (list_is_singular(&mlxsw_sp_port
->vports_list
)) {
916 err
= mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port
);
918 netdev_err(dev
, "Failed to set to VLAN mode\n");
924 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport
);
926 /* Destroy the vFID if no vPorts are assigned to it anymore. */
927 if (!vfid
->nr_vports
)
928 mlxsw_sp_vfid_destroy(mlxsw_sp_port
->mlxsw_sp
, vfid
);
933 static int mlxsw_sp_port_get_phys_port_name(struct net_device
*dev
, char *name
,
936 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
937 u8 module
= mlxsw_sp_port
->mapping
.module
;
938 u8 width
= mlxsw_sp_port
->mapping
.width
;
939 u8 lane
= mlxsw_sp_port
->mapping
.lane
;
942 if (!mlxsw_sp_port
->split
)
943 err
= snprintf(name
, len
, "p%d", module
+ 1);
945 err
= snprintf(name
, len
, "p%ds%d", module
+ 1,
954 static const struct net_device_ops mlxsw_sp_port_netdev_ops
= {
955 .ndo_open
= mlxsw_sp_port_open
,
956 .ndo_stop
= mlxsw_sp_port_stop
,
957 .ndo_start_xmit
= mlxsw_sp_port_xmit
,
958 .ndo_set_rx_mode
= mlxsw_sp_set_rx_mode
,
959 .ndo_set_mac_address
= mlxsw_sp_port_set_mac_address
,
960 .ndo_change_mtu
= mlxsw_sp_port_change_mtu
,
961 .ndo_get_stats64
= mlxsw_sp_port_get_stats64
,
962 .ndo_vlan_rx_add_vid
= mlxsw_sp_port_add_vid
,
963 .ndo_vlan_rx_kill_vid
= mlxsw_sp_port_kill_vid
,
964 .ndo_fdb_add
= switchdev_port_fdb_add
,
965 .ndo_fdb_del
= switchdev_port_fdb_del
,
966 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
967 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
968 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
969 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
970 .ndo_get_phys_port_name
= mlxsw_sp_port_get_phys_port_name
,
973 static void mlxsw_sp_port_get_drvinfo(struct net_device
*dev
,
974 struct ethtool_drvinfo
*drvinfo
)
976 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
977 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
979 strlcpy(drvinfo
->driver
, mlxsw_sp_driver_name
, sizeof(drvinfo
->driver
));
980 strlcpy(drvinfo
->version
, mlxsw_sp_driver_version
,
981 sizeof(drvinfo
->version
));
982 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
984 mlxsw_sp
->bus_info
->fw_rev
.major
,
985 mlxsw_sp
->bus_info
->fw_rev
.minor
,
986 mlxsw_sp
->bus_info
->fw_rev
.subminor
);
987 strlcpy(drvinfo
->bus_info
, mlxsw_sp
->bus_info
->device_name
,
988 sizeof(drvinfo
->bus_info
));
991 static void mlxsw_sp_port_get_pauseparam(struct net_device
*dev
,
992 struct ethtool_pauseparam
*pause
)
994 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
996 pause
->rx_pause
= mlxsw_sp_port
->link
.rx_pause
;
997 pause
->tx_pause
= mlxsw_sp_port
->link
.tx_pause
;
1000 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1001 struct ethtool_pauseparam
*pause
)
1003 char pfcc_pl
[MLXSW_REG_PFCC_LEN
];
1005 mlxsw_reg_pfcc_pack(pfcc_pl
, mlxsw_sp_port
->local_port
);
1006 mlxsw_reg_pfcc_pprx_set(pfcc_pl
, pause
->rx_pause
);
1007 mlxsw_reg_pfcc_pptx_set(pfcc_pl
, pause
->tx_pause
);
1009 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pfcc
),
1013 static int mlxsw_sp_port_set_pauseparam(struct net_device
*dev
,
1014 struct ethtool_pauseparam
*pause
)
1016 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1017 bool pause_en
= pause
->tx_pause
|| pause
->rx_pause
;
1020 if (mlxsw_sp_port
->dcb
.pfc
&& mlxsw_sp_port
->dcb
.pfc
->pfc_en
) {
1021 netdev_err(dev
, "PFC already enabled on port\n");
1025 if (pause
->autoneg
) {
1026 netdev_err(dev
, "PAUSE frames autonegotiation isn't supported\n");
1030 err
= mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1032 netdev_err(dev
, "Failed to configure port's headroom\n");
1036 err
= mlxsw_sp_port_pause_set(mlxsw_sp_port
, pause
);
1038 netdev_err(dev
, "Failed to set PAUSE parameters\n");
1039 goto err_port_pause_configure
;
1042 mlxsw_sp_port
->link
.rx_pause
= pause
->rx_pause
;
1043 mlxsw_sp_port
->link
.tx_pause
= pause
->tx_pause
;
1047 err_port_pause_configure
:
1048 pause_en
= mlxsw_sp_port_is_pause_en(mlxsw_sp_port
);
1049 mlxsw_sp_port_headroom_set(mlxsw_sp_port
, dev
->mtu
, pause_en
);
1053 struct mlxsw_sp_port_hw_stats
{
1054 char str
[ETH_GSTRING_LEN
];
1055 u64 (*getter
)(char *payload
);
1058 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats
[] = {
1060 .str
= "a_frames_transmitted_ok",
1061 .getter
= mlxsw_reg_ppcnt_a_frames_transmitted_ok_get
,
1064 .str
= "a_frames_received_ok",
1065 .getter
= mlxsw_reg_ppcnt_a_frames_received_ok_get
,
1068 .str
= "a_frame_check_sequence_errors",
1069 .getter
= mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get
,
1072 .str
= "a_alignment_errors",
1073 .getter
= mlxsw_reg_ppcnt_a_alignment_errors_get
,
1076 .str
= "a_octets_transmitted_ok",
1077 .getter
= mlxsw_reg_ppcnt_a_octets_transmitted_ok_get
,
1080 .str
= "a_octets_received_ok",
1081 .getter
= mlxsw_reg_ppcnt_a_octets_received_ok_get
,
1084 .str
= "a_multicast_frames_xmitted_ok",
1085 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get
,
1088 .str
= "a_broadcast_frames_xmitted_ok",
1089 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get
,
1092 .str
= "a_multicast_frames_received_ok",
1093 .getter
= mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get
,
1096 .str
= "a_broadcast_frames_received_ok",
1097 .getter
= mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get
,
1100 .str
= "a_in_range_length_errors",
1101 .getter
= mlxsw_reg_ppcnt_a_in_range_length_errors_get
,
1104 .str
= "a_out_of_range_length_field",
1105 .getter
= mlxsw_reg_ppcnt_a_out_of_range_length_field_get
,
1108 .str
= "a_frame_too_long_errors",
1109 .getter
= mlxsw_reg_ppcnt_a_frame_too_long_errors_get
,
1112 .str
= "a_symbol_error_during_carrier",
1113 .getter
= mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get
,
1116 .str
= "a_mac_control_frames_transmitted",
1117 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get
,
1120 .str
= "a_mac_control_frames_received",
1121 .getter
= mlxsw_reg_ppcnt_a_mac_control_frames_received_get
,
1124 .str
= "a_unsupported_opcodes_received",
1125 .getter
= mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get
,
1128 .str
= "a_pause_mac_ctrl_frames_received",
1129 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get
,
1132 .str
= "a_pause_mac_ctrl_frames_xmitted",
1133 .getter
= mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get
,
1137 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1139 static void mlxsw_sp_port_get_strings(struct net_device
*dev
,
1140 u32 stringset
, u8
*data
)
1145 switch (stringset
) {
1147 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++) {
1148 memcpy(p
, mlxsw_sp_port_hw_stats
[i
].str
,
1150 p
+= ETH_GSTRING_LEN
;
1156 static int mlxsw_sp_port_set_phys_id(struct net_device
*dev
,
1157 enum ethtool_phys_id_state state
)
1159 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1160 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1161 char mlcr_pl
[MLXSW_REG_MLCR_LEN
];
1165 case ETHTOOL_ID_ACTIVE
:
1168 case ETHTOOL_ID_INACTIVE
:
1175 mlxsw_reg_mlcr_pack(mlcr_pl
, mlxsw_sp_port
->local_port
, active
);
1176 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(mlcr
), mlcr_pl
);
1179 static void mlxsw_sp_port_get_stats(struct net_device
*dev
,
1180 struct ethtool_stats
*stats
, u64
*data
)
1182 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1183 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1184 char ppcnt_pl
[MLXSW_REG_PPCNT_LEN
];
1188 mlxsw_reg_ppcnt_pack(ppcnt_pl
, mlxsw_sp_port
->local_port
,
1189 MLXSW_REG_PPCNT_IEEE_8023_CNT
, 0);
1190 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ppcnt
), ppcnt_pl
);
1191 for (i
= 0; i
< MLXSW_SP_PORT_HW_STATS_LEN
; i
++)
1192 data
[i
] = !err
? mlxsw_sp_port_hw_stats
[i
].getter(ppcnt_pl
) : 0;
1195 static int mlxsw_sp_port_get_sset_count(struct net_device
*dev
, int sset
)
1199 return MLXSW_SP_PORT_HW_STATS_LEN
;
1205 struct mlxsw_sp_port_link_mode
{
1212 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode
[] = {
1214 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T
,
1215 .supported
= SUPPORTED_100baseT_Full
,
1216 .advertised
= ADVERTISED_100baseT_Full
,
1220 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX
,
1224 .mask
= MLXSW_REG_PTYS_ETH_SPEED_SGMII
|
1225 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
,
1226 .supported
= SUPPORTED_1000baseKX_Full
,
1227 .advertised
= ADVERTISED_1000baseKX_Full
,
1231 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T
,
1232 .supported
= SUPPORTED_10000baseT_Full
,
1233 .advertised
= ADVERTISED_10000baseT_Full
,
1237 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4
|
1238 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
,
1239 .supported
= SUPPORTED_10000baseKX4_Full
,
1240 .advertised
= ADVERTISED_10000baseKX4_Full
,
1244 .mask
= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1245 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1246 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1247 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR
,
1248 .supported
= SUPPORTED_10000baseKR_Full
,
1249 .advertised
= ADVERTISED_10000baseKR_Full
,
1253 .mask
= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2
,
1254 .supported
= SUPPORTED_20000baseKR2_Full
,
1255 .advertised
= ADVERTISED_20000baseKR2_Full
,
1259 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
,
1260 .supported
= SUPPORTED_40000baseCR4_Full
,
1261 .advertised
= ADVERTISED_40000baseCR4_Full
,
1265 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
,
1266 .supported
= SUPPORTED_40000baseKR4_Full
,
1267 .advertised
= ADVERTISED_40000baseKR4_Full
,
1271 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
,
1272 .supported
= SUPPORTED_40000baseSR4_Full
,
1273 .advertised
= ADVERTISED_40000baseSR4_Full
,
1277 .mask
= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4
,
1278 .supported
= SUPPORTED_40000baseLR4_Full
,
1279 .advertised
= ADVERTISED_40000baseLR4_Full
,
1283 .mask
= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR
|
1284 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR
|
1285 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR
,
1289 .mask
= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4
|
1290 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2
|
1291 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2
,
1295 .mask
= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4
,
1296 .supported
= SUPPORTED_56000baseKR4_Full
,
1297 .advertised
= ADVERTISED_56000baseKR4_Full
,
1301 .mask
= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
|
1302 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1303 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1304 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4
,
1309 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1311 static u32
mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto
)
1313 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1314 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1315 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1316 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1317 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1318 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1319 return SUPPORTED_FIBRE
;
1321 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1322 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1323 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1324 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
|
1325 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX
))
1326 return SUPPORTED_Backplane
;
1330 static u32
mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto
)
1335 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1336 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1337 modes
|= mlxsw_sp_port_link_mode
[i
].supported
;
1342 static u32
mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto
)
1347 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1348 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
)
1349 modes
|= mlxsw_sp_port_link_mode
[i
].advertised
;
1354 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok
, u32 ptys_eth_proto
,
1355 struct ethtool_cmd
*cmd
)
1357 u32 speed
= SPEED_UNKNOWN
;
1358 u8 duplex
= DUPLEX_UNKNOWN
;
1364 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1365 if (ptys_eth_proto
& mlxsw_sp_port_link_mode
[i
].mask
) {
1366 speed
= mlxsw_sp_port_link_mode
[i
].speed
;
1367 duplex
= DUPLEX_FULL
;
1372 ethtool_cmd_speed_set(cmd
, speed
);
1373 cmd
->duplex
= duplex
;
1376 static u8
mlxsw_sp_port_connector_port(u32 ptys_eth_proto
)
1378 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR
|
1379 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4
|
1380 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4
|
1381 MLXSW_REG_PTYS_ETH_SPEED_SGMII
))
1384 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR
|
1385 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4
|
1386 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4
))
1389 if (ptys_eth_proto
& (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR
|
1390 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4
|
1391 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4
|
1392 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4
))
1398 static int mlxsw_sp_port_get_settings(struct net_device
*dev
,
1399 struct ethtool_cmd
*cmd
)
1401 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1402 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1403 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1405 u32 eth_proto_admin
;
1409 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1410 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1412 netdev_err(dev
, "Failed to get proto");
1415 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
,
1416 ð_proto_admin
, ð_proto_oper
);
1418 cmd
->supported
= mlxsw_sp_from_ptys_supported_port(eth_proto_cap
) |
1419 mlxsw_sp_from_ptys_supported_link(eth_proto_cap
) |
1420 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
;
1421 cmd
->advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_admin
);
1422 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev
),
1423 eth_proto_oper
, cmd
);
1425 eth_proto_oper
= eth_proto_oper
? eth_proto_oper
: eth_proto_cap
;
1426 cmd
->port
= mlxsw_sp_port_connector_port(eth_proto_oper
);
1427 cmd
->lp_advertising
= mlxsw_sp_from_ptys_advert_link(eth_proto_oper
);
1429 cmd
->transceiver
= XCVR_INTERNAL
;
1433 static u32
mlxsw_sp_to_ptys_advert_link(u32 advertising
)
1438 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1439 if (advertising
& mlxsw_sp_port_link_mode
[i
].advertised
)
1440 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1445 static u32
mlxsw_sp_to_ptys_speed(u32 speed
)
1450 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1451 if (speed
== mlxsw_sp_port_link_mode
[i
].speed
)
1452 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1457 static u32
mlxsw_sp_to_ptys_upper_speed(u32 upper_speed
)
1462 for (i
= 0; i
< MLXSW_SP_PORT_LINK_MODE_LEN
; i
++) {
1463 if (mlxsw_sp_port_link_mode
[i
].speed
<= upper_speed
)
1464 ptys_proto
|= mlxsw_sp_port_link_mode
[i
].mask
;
1469 static int mlxsw_sp_port_set_settings(struct net_device
*dev
,
1470 struct ethtool_cmd
*cmd
)
1472 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1473 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1474 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1478 u32 eth_proto_admin
;
1481 speed
= ethtool_cmd_speed(cmd
);
1483 eth_proto_new
= cmd
->autoneg
== AUTONEG_ENABLE
?
1484 mlxsw_sp_to_ptys_advert_link(cmd
->advertising
) :
1485 mlxsw_sp_to_ptys_speed(speed
);
1487 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, 0);
1488 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1490 netdev_err(dev
, "Failed to get proto");
1493 mlxsw_reg_ptys_unpack(ptys_pl
, ð_proto_cap
, ð_proto_admin
, NULL
);
1495 eth_proto_new
= eth_proto_new
& eth_proto_cap
;
1496 if (!eth_proto_new
) {
1497 netdev_err(dev
, "Not supported proto admin requested");
1500 if (eth_proto_new
== eth_proto_admin
)
1503 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
, eth_proto_new
);
1504 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1506 netdev_err(dev
, "Failed to set proto admin");
1510 if (!netif_running(dev
))
1513 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1515 netdev_err(dev
, "Failed to set admin status");
1519 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, true);
1521 netdev_err(dev
, "Failed to set admin status");
1528 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops
= {
1529 .get_drvinfo
= mlxsw_sp_port_get_drvinfo
,
1530 .get_link
= ethtool_op_get_link
,
1531 .get_pauseparam
= mlxsw_sp_port_get_pauseparam
,
1532 .set_pauseparam
= mlxsw_sp_port_set_pauseparam
,
1533 .get_strings
= mlxsw_sp_port_get_strings
,
1534 .set_phys_id
= mlxsw_sp_port_set_phys_id
,
1535 .get_ethtool_stats
= mlxsw_sp_port_get_stats
,
1536 .get_sset_count
= mlxsw_sp_port_get_sset_count
,
1537 .get_settings
= mlxsw_sp_port_get_settings
,
1538 .set_settings
= mlxsw_sp_port_set_settings
,
1542 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u8 width
)
1544 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1545 u32 upper_speed
= MLXSW_SP_PORT_BASE_SPEED
* width
;
1546 char ptys_pl
[MLXSW_REG_PTYS_LEN
];
1547 u32 eth_proto_admin
;
1549 eth_proto_admin
= mlxsw_sp_to_ptys_upper_speed(upper_speed
);
1550 mlxsw_reg_ptys_pack(ptys_pl
, mlxsw_sp_port
->local_port
,
1552 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(ptys
), ptys_pl
);
1555 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1556 enum mlxsw_reg_qeec_hr hr
, u8 index
, u8 next_index
,
1557 bool dwrr
, u8 dwrr_weight
)
1559 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1560 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1562 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1564 mlxsw_reg_qeec_de_set(qeec_pl
, true);
1565 mlxsw_reg_qeec_dwrr_set(qeec_pl
, dwrr
);
1566 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl
, dwrr_weight
);
1567 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1570 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1571 enum mlxsw_reg_qeec_hr hr
, u8 index
,
1572 u8 next_index
, u32 maxrate
)
1574 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1575 char qeec_pl
[MLXSW_REG_QEEC_LEN
];
1577 mlxsw_reg_qeec_pack(qeec_pl
, mlxsw_sp_port
->local_port
, hr
, index
,
1579 mlxsw_reg_qeec_mase_set(qeec_pl
, true);
1580 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl
, maxrate
);
1581 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qeec
), qeec_pl
);
1584 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1585 u8 switch_prio
, u8 tclass
)
1587 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1588 char qtct_pl
[MLXSW_REG_QTCT_LEN
];
1590 mlxsw_reg_qtct_pack(qtct_pl
, mlxsw_sp_port
->local_port
, switch_prio
,
1592 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(qtct
), qtct_pl
);
1595 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1599 /* Setup the elements hierarcy, so that each TC is linked to
1600 * one subgroup, which are all member in the same group.
1602 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1603 MLXSW_REG_QEEC_HIERARCY_GROUP
, 0, 0, false,
1607 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1608 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1609 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
, i
,
1614 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1615 err
= mlxsw_sp_port_ets_set(mlxsw_sp_port
,
1616 MLXSW_REG_QEEC_HIERARCY_TC
, i
, i
,
1622 /* Make sure the max shaper is disabled in all hierarcies that
1625 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1626 MLXSW_REG_QEEC_HIERARCY_PORT
, 0, 0,
1627 MLXSW_REG_QEEC_MAS_DIS
);
1630 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1631 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1632 MLXSW_REG_QEEC_HIERARCY_SUBGROUP
,
1634 MLXSW_REG_QEEC_MAS_DIS
);
1638 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1639 err
= mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port
,
1640 MLXSW_REG_QEEC_HIERARCY_TC
,
1642 MLXSW_REG_QEEC_MAS_DIS
);
1647 /* Map all priorities to traffic class 0. */
1648 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++) {
1649 err
= mlxsw_sp_port_prio_tc_set(mlxsw_sp_port
, i
, 0);
1657 static int mlxsw_sp_port_create(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1658 bool split
, u8 module
, u8 width
, u8 lane
)
1660 struct mlxsw_sp_port
*mlxsw_sp_port
;
1661 struct net_device
*dev
;
1665 dev
= alloc_etherdev(sizeof(struct mlxsw_sp_port
));
1668 mlxsw_sp_port
= netdev_priv(dev
);
1669 mlxsw_sp_port
->dev
= dev
;
1670 mlxsw_sp_port
->mlxsw_sp
= mlxsw_sp
;
1671 mlxsw_sp_port
->local_port
= local_port
;
1672 mlxsw_sp_port
->split
= split
;
1673 mlxsw_sp_port
->mapping
.module
= module
;
1674 mlxsw_sp_port
->mapping
.width
= width
;
1675 mlxsw_sp_port
->mapping
.lane
= lane
;
1676 bytes
= DIV_ROUND_UP(VLAN_N_VID
, BITS_PER_BYTE
);
1677 mlxsw_sp_port
->active_vlans
= kzalloc(bytes
, GFP_KERNEL
);
1678 if (!mlxsw_sp_port
->active_vlans
) {
1680 goto err_port_active_vlans_alloc
;
1682 mlxsw_sp_port
->untagged_vlans
= kzalloc(bytes
, GFP_KERNEL
);
1683 if (!mlxsw_sp_port
->untagged_vlans
) {
1685 goto err_port_untagged_vlans_alloc
;
1687 INIT_LIST_HEAD(&mlxsw_sp_port
->vports_list
);
1689 mlxsw_sp_port
->pcpu_stats
=
1690 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats
);
1691 if (!mlxsw_sp_port
->pcpu_stats
) {
1693 goto err_alloc_stats
;
1696 dev
->netdev_ops
= &mlxsw_sp_port_netdev_ops
;
1697 dev
->ethtool_ops
= &mlxsw_sp_port_ethtool_ops
;
1699 err
= mlxsw_sp_port_dev_addr_init(mlxsw_sp_port
);
1701 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Unable to init port mac address\n",
1702 mlxsw_sp_port
->local_port
);
1703 goto err_dev_addr_init
;
1706 netif_carrier_off(dev
);
1708 dev
->features
|= NETIF_F_NETNS_LOCAL
| NETIF_F_LLTX
| NETIF_F_SG
|
1709 NETIF_F_HW_VLAN_CTAG_FILTER
;
1711 /* Each packet needs to have a Tx header (metadata) on top all other
1714 dev
->hard_header_len
+= MLXSW_TXHDR_LEN
;
1716 err
= mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port
);
1718 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set system port mapping\n",
1719 mlxsw_sp_port
->local_port
);
1720 goto err_port_system_port_mapping_set
;
1723 err
= mlxsw_sp_port_swid_set(mlxsw_sp_port
, 0);
1725 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set SWID\n",
1726 mlxsw_sp_port
->local_port
);
1727 goto err_port_swid_set
;
1730 err
= mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port
, width
);
1732 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to enable speeds\n",
1733 mlxsw_sp_port
->local_port
);
1734 goto err_port_speed_by_width_set
;
1737 err
= mlxsw_sp_port_mtu_set(mlxsw_sp_port
, ETH_DATA_LEN
);
1739 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to set MTU\n",
1740 mlxsw_sp_port
->local_port
);
1741 goto err_port_mtu_set
;
1744 err
= mlxsw_sp_port_admin_status_set(mlxsw_sp_port
, false);
1746 goto err_port_admin_status_set
;
1748 err
= mlxsw_sp_port_buffers_init(mlxsw_sp_port
);
1750 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize buffers\n",
1751 mlxsw_sp_port
->local_port
);
1752 goto err_port_buffers_init
;
1755 err
= mlxsw_sp_port_ets_init(mlxsw_sp_port
);
1757 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize ETS\n",
1758 mlxsw_sp_port
->local_port
);
1759 goto err_port_ets_init
;
1762 /* ETS and buffers must be initialized before DCB. */
1763 err
= mlxsw_sp_port_dcb_init(mlxsw_sp_port
);
1765 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to initialize DCB\n",
1766 mlxsw_sp_port
->local_port
);
1767 goto err_port_dcb_init
;
1770 mlxsw_sp_port_switchdev_init(mlxsw_sp_port
);
1771 err
= register_netdev(dev
);
1773 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to register netdev\n",
1774 mlxsw_sp_port
->local_port
);
1775 goto err_register_netdev
;
1778 err
= mlxsw_core_port_init(mlxsw_sp
->core
, &mlxsw_sp_port
->core_port
,
1779 mlxsw_sp_port
->local_port
, dev
,
1780 mlxsw_sp_port
->split
, module
);
1782 dev_err(mlxsw_sp
->bus_info
->dev
, "Port %d: Failed to init core port\n",
1783 mlxsw_sp_port
->local_port
);
1784 goto err_core_port_init
;
1787 err
= mlxsw_sp_port_vlan_init(mlxsw_sp_port
);
1789 goto err_port_vlan_init
;
1791 mlxsw_sp
->ports
[local_port
] = mlxsw_sp_port
;
1795 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
1797 unregister_netdev(dev
);
1798 err_register_netdev
:
1801 err_port_buffers_init
:
1802 err_port_admin_status_set
:
1804 err_port_speed_by_width_set
:
1806 err_port_system_port_mapping_set
:
1808 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1810 kfree(mlxsw_sp_port
->untagged_vlans
);
1811 err_port_untagged_vlans_alloc
:
1812 kfree(mlxsw_sp_port
->active_vlans
);
1813 err_port_active_vlans_alloc
:
1818 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)
1820 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1821 struct mlxsw_sp_port
*mlxsw_sp_vport
, *tmp
;
1823 list_for_each_entry_safe(mlxsw_sp_vport
, tmp
,
1824 &mlxsw_sp_port
->vports_list
, vport
.list
) {
1825 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1827 /* vPorts created for VLAN devices should already be gone
1828 * by now, since we unregistered the port netdev.
1830 WARN_ON(is_vlan_dev(mlxsw_sp_vport
->dev
));
1831 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
1835 static void mlxsw_sp_port_remove(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
)
1837 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1841 mlxsw_sp
->ports
[local_port
] = NULL
;
1842 mlxsw_core_port_fini(&mlxsw_sp_port
->core_port
);
1843 unregister_netdev(mlxsw_sp_port
->dev
); /* This calls ndo_stop */
1844 mlxsw_sp_port_dcb_fini(mlxsw_sp_port
);
1845 mlxsw_sp_port_vports_fini(mlxsw_sp_port
);
1846 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port
);
1847 mlxsw_sp_port_swid_set(mlxsw_sp_port
, MLXSW_PORT_SWID_DISABLED_PORT
);
1848 mlxsw_sp_port_module_unmap(mlxsw_sp
, mlxsw_sp_port
->local_port
);
1849 free_percpu(mlxsw_sp_port
->pcpu_stats
);
1850 kfree(mlxsw_sp_port
->untagged_vlans
);
1851 kfree(mlxsw_sp_port
->active_vlans
);
1852 free_netdev(mlxsw_sp_port
->dev
);
1855 static void mlxsw_sp_ports_remove(struct mlxsw_sp
*mlxsw_sp
)
1859 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
1860 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1861 kfree(mlxsw_sp
->ports
);
1864 static int mlxsw_sp_ports_create(struct mlxsw_sp
*mlxsw_sp
)
1866 u8 module
, width
, lane
;
1871 alloc_size
= sizeof(struct mlxsw_sp_port
*) * MLXSW_PORT_MAX_PORTS
;
1872 mlxsw_sp
->ports
= kzalloc(alloc_size
, GFP_KERNEL
);
1873 if (!mlxsw_sp
->ports
)
1876 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
1877 err
= mlxsw_sp_port_module_info_get(mlxsw_sp
, i
, &module
,
1880 goto err_port_module_info_get
;
1883 mlxsw_sp
->port_to_module
[i
] = module
;
1884 err
= mlxsw_sp_port_create(mlxsw_sp
, i
, false, module
, width
,
1887 goto err_port_create
;
1892 err_port_module_info_get
:
1893 for (i
--; i
>= 1; i
--)
1894 mlxsw_sp_port_remove(mlxsw_sp
, i
);
1895 kfree(mlxsw_sp
->ports
);
1899 static u8
mlxsw_sp_cluster_base_port_get(u8 local_port
)
1901 u8 offset
= (local_port
- 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX
;
1903 return local_port
- offset
;
1906 static int mlxsw_sp_port_split_create(struct mlxsw_sp
*mlxsw_sp
, u8 base_port
,
1907 u8 module
, unsigned int count
)
1909 u8 width
= MLXSW_PORT_MODULE_MAX_WIDTH
/ count
;
1912 for (i
= 0; i
< count
; i
++) {
1913 err
= mlxsw_sp_port_module_map(mlxsw_sp
, base_port
+ i
, module
,
1916 goto err_port_module_map
;
1919 for (i
= 0; i
< count
; i
++) {
1920 err
= __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
, 0);
1922 goto err_port_swid_set
;
1925 for (i
= 0; i
< count
; i
++) {
1926 err
= mlxsw_sp_port_create(mlxsw_sp
, base_port
+ i
, true,
1927 module
, width
, i
* width
);
1929 goto err_port_create
;
1935 for (i
--; i
>= 0; i
--)
1936 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
1939 for (i
--; i
>= 0; i
--)
1940 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
,
1941 MLXSW_PORT_SWID_DISABLED_PORT
);
1943 err_port_module_map
:
1944 for (i
--; i
>= 0; i
--)
1945 mlxsw_sp_port_module_unmap(mlxsw_sp
, base_port
+ i
);
1949 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp
*mlxsw_sp
,
1950 u8 base_port
, unsigned int count
)
1952 u8 local_port
, module
, width
= MLXSW_PORT_MODULE_MAX_WIDTH
;
1955 /* Split by four means we need to re-create two ports, otherwise
1960 for (i
= 0; i
< count
; i
++) {
1961 local_port
= base_port
+ i
* 2;
1962 module
= mlxsw_sp
->port_to_module
[local_port
];
1964 mlxsw_sp_port_module_map(mlxsw_sp
, local_port
, module
, width
,
1968 for (i
= 0; i
< count
; i
++)
1969 __mlxsw_sp_port_swid_set(mlxsw_sp
, base_port
+ i
* 2, 0);
1971 for (i
= 0; i
< count
; i
++) {
1972 local_port
= base_port
+ i
* 2;
1973 module
= mlxsw_sp
->port_to_module
[local_port
];
1975 mlxsw_sp_port_create(mlxsw_sp
, local_port
, false, module
,
1980 static int mlxsw_sp_port_split(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1983 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
1984 struct mlxsw_sp_port
*mlxsw_sp_port
;
1985 u8 module
, cur_width
, base_port
;
1989 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1990 if (!mlxsw_sp_port
) {
1991 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
1996 module
= mlxsw_sp_port
->mapping
.module
;
1997 cur_width
= mlxsw_sp_port
->mapping
.width
;
1999 if (count
!= 2 && count
!= 4) {
2000 netdev_err(mlxsw_sp_port
->dev
, "Port can only be split into 2 or 4 ports\n");
2004 if (cur_width
!= MLXSW_PORT_MODULE_MAX_WIDTH
) {
2005 netdev_err(mlxsw_sp_port
->dev
, "Port cannot be split further\n");
2009 /* Make sure we have enough slave (even) ports for the split. */
2011 base_port
= local_port
;
2012 if (mlxsw_sp
->ports
[base_port
+ 1]) {
2013 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2017 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2018 if (mlxsw_sp
->ports
[base_port
+ 1] ||
2019 mlxsw_sp
->ports
[base_port
+ 3]) {
2020 netdev_err(mlxsw_sp_port
->dev
, "Invalid split configuration\n");
2025 for (i
= 0; i
< count
; i
++)
2026 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2028 err
= mlxsw_sp_port_split_create(mlxsw_sp
, base_port
, module
, count
);
2030 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create split ports\n");
2031 goto err_port_split_create
;
2036 err_port_split_create
:
2037 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2041 static int mlxsw_sp_port_unsplit(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
2043 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2044 struct mlxsw_sp_port
*mlxsw_sp_port
;
2045 u8 cur_width
, base_port
;
2049 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2050 if (!mlxsw_sp_port
) {
2051 dev_err(mlxsw_sp
->bus_info
->dev
, "Port number \"%d\" does not exist\n",
2056 if (!mlxsw_sp_port
->split
) {
2057 netdev_err(mlxsw_sp_port
->dev
, "Port wasn't split\n");
2061 cur_width
= mlxsw_sp_port
->mapping
.width
;
2062 count
= cur_width
== 1 ? 4 : 2;
2064 base_port
= mlxsw_sp_cluster_base_port_get(local_port
);
2066 /* Determine which ports to remove. */
2067 if (count
== 2 && local_port
>= base_port
+ 2)
2068 base_port
= base_port
+ 2;
2070 for (i
= 0; i
< count
; i
++)
2071 mlxsw_sp_port_remove(mlxsw_sp
, base_port
+ i
);
2073 mlxsw_sp_port_unsplit_create(mlxsw_sp
, base_port
, count
);
2078 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info
*reg
,
2079 char *pude_pl
, void *priv
)
2081 struct mlxsw_sp
*mlxsw_sp
= priv
;
2082 struct mlxsw_sp_port
*mlxsw_sp_port
;
2083 enum mlxsw_reg_pude_oper_status status
;
2086 local_port
= mlxsw_reg_pude_local_port_get(pude_pl
);
2087 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2088 if (!mlxsw_sp_port
) {
2089 dev_warn(mlxsw_sp
->bus_info
->dev
, "Port %d: Link event received for non-existent port\n",
2094 status
= mlxsw_reg_pude_oper_status_get(pude_pl
);
2095 if (status
== MLXSW_PORT_OPER_STATUS_UP
) {
2096 netdev_info(mlxsw_sp_port
->dev
, "link up\n");
2097 netif_carrier_on(mlxsw_sp_port
->dev
);
2099 netdev_info(mlxsw_sp_port
->dev
, "link down\n");
2100 netif_carrier_off(mlxsw_sp_port
->dev
);
2104 static struct mlxsw_event_listener mlxsw_sp_pude_event
= {
2105 .func
= mlxsw_sp_pude_event_func
,
2106 .trap_id
= MLXSW_TRAP_ID_PUDE
,
2109 static int mlxsw_sp_event_register(struct mlxsw_sp
*mlxsw_sp
,
2110 enum mlxsw_event_trap_id trap_id
)
2112 struct mlxsw_event_listener
*el
;
2113 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2117 case MLXSW_TRAP_ID_PUDE
:
2118 el
= &mlxsw_sp_pude_event
;
2121 err
= mlxsw_core_event_listener_register(mlxsw_sp
->core
, el
, mlxsw_sp
);
2125 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
, trap_id
);
2126 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2128 goto err_event_trap_set
;
2133 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2137 static void mlxsw_sp_event_unregister(struct mlxsw_sp
*mlxsw_sp
,
2138 enum mlxsw_event_trap_id trap_id
)
2140 struct mlxsw_event_listener
*el
;
2143 case MLXSW_TRAP_ID_PUDE
:
2144 el
= &mlxsw_sp_pude_event
;
2147 mlxsw_core_event_listener_unregister(mlxsw_sp
->core
, el
, mlxsw_sp
);
2150 static void mlxsw_sp_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
2153 struct mlxsw_sp
*mlxsw_sp
= priv
;
2154 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
2155 struct mlxsw_sp_port_pcpu_stats
*pcpu_stats
;
2157 if (unlikely(!mlxsw_sp_port
)) {
2158 dev_warn_ratelimited(mlxsw_sp
->bus_info
->dev
, "Port %d: skb received for non-existent port\n",
2163 skb
->dev
= mlxsw_sp_port
->dev
;
2165 pcpu_stats
= this_cpu_ptr(mlxsw_sp_port
->pcpu_stats
);
2166 u64_stats_update_begin(&pcpu_stats
->syncp
);
2167 pcpu_stats
->rx_packets
++;
2168 pcpu_stats
->rx_bytes
+= skb
->len
;
2169 u64_stats_update_end(&pcpu_stats
->syncp
);
2171 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2172 netif_receive_skb(skb
);
2175 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener
[] = {
2177 .func
= mlxsw_sp_rx_listener_func
,
2178 .local_port
= MLXSW_PORT_DONT_CARE
,
2179 .trap_id
= MLXSW_TRAP_ID_FDB_MC
,
2181 /* Traps for specific L2 packet types, not trapped as FDB MC */
2183 .func
= mlxsw_sp_rx_listener_func
,
2184 .local_port
= MLXSW_PORT_DONT_CARE
,
2185 .trap_id
= MLXSW_TRAP_ID_STP
,
2188 .func
= mlxsw_sp_rx_listener_func
,
2189 .local_port
= MLXSW_PORT_DONT_CARE
,
2190 .trap_id
= MLXSW_TRAP_ID_LACP
,
2193 .func
= mlxsw_sp_rx_listener_func
,
2194 .local_port
= MLXSW_PORT_DONT_CARE
,
2195 .trap_id
= MLXSW_TRAP_ID_EAPOL
,
2198 .func
= mlxsw_sp_rx_listener_func
,
2199 .local_port
= MLXSW_PORT_DONT_CARE
,
2200 .trap_id
= MLXSW_TRAP_ID_LLDP
,
2203 .func
= mlxsw_sp_rx_listener_func
,
2204 .local_port
= MLXSW_PORT_DONT_CARE
,
2205 .trap_id
= MLXSW_TRAP_ID_MMRP
,
2208 .func
= mlxsw_sp_rx_listener_func
,
2209 .local_port
= MLXSW_PORT_DONT_CARE
,
2210 .trap_id
= MLXSW_TRAP_ID_MVRP
,
2213 .func
= mlxsw_sp_rx_listener_func
,
2214 .local_port
= MLXSW_PORT_DONT_CARE
,
2215 .trap_id
= MLXSW_TRAP_ID_RPVST
,
2218 .func
= mlxsw_sp_rx_listener_func
,
2219 .local_port
= MLXSW_PORT_DONT_CARE
,
2220 .trap_id
= MLXSW_TRAP_ID_DHCP
,
2223 .func
= mlxsw_sp_rx_listener_func
,
2224 .local_port
= MLXSW_PORT_DONT_CARE
,
2225 .trap_id
= MLXSW_TRAP_ID_IGMP_QUERY
,
2228 .func
= mlxsw_sp_rx_listener_func
,
2229 .local_port
= MLXSW_PORT_DONT_CARE
,
2230 .trap_id
= MLXSW_TRAP_ID_IGMP_V1_REPORT
,
2233 .func
= mlxsw_sp_rx_listener_func
,
2234 .local_port
= MLXSW_PORT_DONT_CARE
,
2235 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_REPORT
,
2238 .func
= mlxsw_sp_rx_listener_func
,
2239 .local_port
= MLXSW_PORT_DONT_CARE
,
2240 .trap_id
= MLXSW_TRAP_ID_IGMP_V2_LEAVE
,
2243 .func
= mlxsw_sp_rx_listener_func
,
2244 .local_port
= MLXSW_PORT_DONT_CARE
,
2245 .trap_id
= MLXSW_TRAP_ID_IGMP_V3_REPORT
,
2249 static int mlxsw_sp_traps_init(struct mlxsw_sp
*mlxsw_sp
)
2251 char htgt_pl
[MLXSW_REG_HTGT_LEN
];
2252 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2256 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_RX
);
2257 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2261 mlxsw_reg_htgt_pack(htgt_pl
, MLXSW_REG_HTGT_TRAP_GROUP_CTRL
);
2262 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(htgt
), htgt_pl
);
2266 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2267 err
= mlxsw_core_rx_listener_register(mlxsw_sp
->core
,
2268 &mlxsw_sp_rx_listener
[i
],
2271 goto err_rx_listener_register
;
2273 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU
,
2274 mlxsw_sp_rx_listener
[i
].trap_id
);
2275 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2277 goto err_rx_trap_set
;
2282 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2283 &mlxsw_sp_rx_listener
[i
],
2285 err_rx_listener_register
:
2286 for (i
--; i
>= 0; i
--) {
2287 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
2288 mlxsw_sp_rx_listener
[i
].trap_id
);
2289 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2291 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2292 &mlxsw_sp_rx_listener
[i
],
2298 static void mlxsw_sp_traps_fini(struct mlxsw_sp
*mlxsw_sp
)
2300 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
2303 for (i
= 0; i
< ARRAY_SIZE(mlxsw_sp_rx_listener
); i
++) {
2304 mlxsw_reg_hpkt_pack(hpkt_pl
, MLXSW_REG_HPKT_ACTION_FORWARD
,
2305 mlxsw_sp_rx_listener
[i
].trap_id
);
2306 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(hpkt
), hpkt_pl
);
2308 mlxsw_core_rx_listener_unregister(mlxsw_sp
->core
,
2309 &mlxsw_sp_rx_listener
[i
],
2314 static int __mlxsw_sp_flood_init(struct mlxsw_core
*mlxsw_core
,
2315 enum mlxsw_reg_sfgc_type type
,
2316 enum mlxsw_reg_sfgc_bridge_type bridge_type
)
2318 enum mlxsw_flood_table_type table_type
;
2319 enum mlxsw_sp_flood_table flood_table
;
2320 char sfgc_pl
[MLXSW_REG_SFGC_LEN
];
2322 if (bridge_type
== MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
)
2323 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
2325 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
2327 if (type
== MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST
)
2328 flood_table
= MLXSW_SP_FLOOD_TABLE_UC
;
2330 flood_table
= MLXSW_SP_FLOOD_TABLE_BM
;
2332 mlxsw_reg_sfgc_pack(sfgc_pl
, type
, bridge_type
, table_type
,
2334 return mlxsw_reg_write(mlxsw_core
, MLXSW_REG(sfgc
), sfgc_pl
);
2337 static int mlxsw_sp_flood_init(struct mlxsw_sp
*mlxsw_sp
)
2341 for (type
= 0; type
< MLXSW_REG_SFGC_TYPE_MAX
; type
++) {
2342 if (type
== MLXSW_REG_SFGC_TYPE_RESERVED
)
2345 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2346 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID
);
2350 err
= __mlxsw_sp_flood_init(mlxsw_sp
->core
, type
,
2351 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID
);
2359 static int mlxsw_sp_lag_init(struct mlxsw_sp
*mlxsw_sp
)
2361 char slcr_pl
[MLXSW_REG_SLCR_LEN
];
2363 mlxsw_reg_slcr_pack(slcr_pl
, MLXSW_REG_SLCR_LAG_HASH_SMAC
|
2364 MLXSW_REG_SLCR_LAG_HASH_DMAC
|
2365 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE
|
2366 MLXSW_REG_SLCR_LAG_HASH_VLANID
|
2367 MLXSW_REG_SLCR_LAG_HASH_SIP
|
2368 MLXSW_REG_SLCR_LAG_HASH_DIP
|
2369 MLXSW_REG_SLCR_LAG_HASH_SPORT
|
2370 MLXSW_REG_SLCR_LAG_HASH_DPORT
|
2371 MLXSW_REG_SLCR_LAG_HASH_IPPROTO
);
2372 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcr
), slcr_pl
);
2375 static int mlxsw_sp_init(struct mlxsw_core
*mlxsw_core
,
2376 const struct mlxsw_bus_info
*mlxsw_bus_info
)
2378 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2381 mlxsw_sp
->core
= mlxsw_core
;
2382 mlxsw_sp
->bus_info
= mlxsw_bus_info
;
2383 INIT_LIST_HEAD(&mlxsw_sp
->port_vfids
.list
);
2384 INIT_LIST_HEAD(&mlxsw_sp
->br_vfids
.list
);
2385 INIT_LIST_HEAD(&mlxsw_sp
->br_mids
.list
);
2387 err
= mlxsw_sp_base_mac_get(mlxsw_sp
);
2389 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to get base mac\n");
2393 err
= mlxsw_sp_ports_create(mlxsw_sp
);
2395 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to create ports\n");
2399 err
= mlxsw_sp_event_register(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2401 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register for PUDE events\n");
2402 goto err_event_register
;
2405 err
= mlxsw_sp_traps_init(mlxsw_sp
);
2407 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set traps for RX\n");
2408 goto err_rx_listener_register
;
2411 err
= mlxsw_sp_flood_init(mlxsw_sp
);
2413 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize flood tables\n");
2414 goto err_flood_init
;
2417 err
= mlxsw_sp_buffers_init(mlxsw_sp
);
2419 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize buffers\n");
2420 goto err_buffers_init
;
2423 err
= mlxsw_sp_lag_init(mlxsw_sp
);
2425 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize LAG\n");
2429 err
= mlxsw_sp_switchdev_init(mlxsw_sp
);
2431 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to initialize switchdev\n");
2432 goto err_switchdev_init
;
2439 mlxsw_sp_buffers_fini(mlxsw_sp
);
2442 mlxsw_sp_traps_fini(mlxsw_sp
);
2443 err_rx_listener_register
:
2444 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2446 mlxsw_sp_ports_remove(mlxsw_sp
);
2450 static void mlxsw_sp_fini(struct mlxsw_core
*mlxsw_core
)
2452 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
2454 mlxsw_sp_switchdev_fini(mlxsw_sp
);
2455 mlxsw_sp_buffers_fini(mlxsw_sp
);
2456 mlxsw_sp_traps_fini(mlxsw_sp
);
2457 mlxsw_sp_event_unregister(mlxsw_sp
, MLXSW_TRAP_ID_PUDE
);
2458 mlxsw_sp_ports_remove(mlxsw_sp
);
2461 static struct mlxsw_config_profile mlxsw_sp_config_profile
= {
2462 .used_max_vepa_channels
= 1,
2463 .max_vepa_channels
= 0,
2465 .max_lag
= MLXSW_SP_LAG_MAX
,
2466 .used_max_port_per_lag
= 1,
2467 .max_port_per_lag
= MLXSW_SP_PORT_PER_LAG_MAX
,
2469 .max_mid
= MLXSW_SP_MID_MAX
,
2472 .used_max_system_port
= 1,
2473 .max_system_port
= 64,
2474 .used_max_vlan_groups
= 1,
2475 .max_vlan_groups
= 127,
2476 .used_max_regions
= 1,
2478 .used_flood_tables
= 1,
2479 .used_flood_mode
= 1,
2481 .max_fid_offset_flood_tables
= 2,
2482 .fid_offset_flood_table_size
= VLAN_N_VID
- 1,
2483 .max_fid_flood_tables
= 2,
2484 .fid_flood_table_size
= MLXSW_SP_VFID_MAX
,
2485 .used_max_ib_mc
= 1,
2492 .type
= MLXSW_PORT_SWID_TYPE_ETH
,
2497 static struct mlxsw_driver mlxsw_sp_driver
= {
2498 .kind
= MLXSW_DEVICE_KIND_SPECTRUM
,
2499 .owner
= THIS_MODULE
,
2500 .priv_size
= sizeof(struct mlxsw_sp
),
2501 .init
= mlxsw_sp_init
,
2502 .fini
= mlxsw_sp_fini
,
2503 .port_split
= mlxsw_sp_port_split
,
2504 .port_unsplit
= mlxsw_sp_port_unsplit
,
2505 .sb_pool_get
= mlxsw_sp_sb_pool_get
,
2506 .sb_pool_set
= mlxsw_sp_sb_pool_set
,
2507 .sb_port_pool_get
= mlxsw_sp_sb_port_pool_get
,
2508 .sb_port_pool_set
= mlxsw_sp_sb_port_pool_set
,
2509 .sb_tc_pool_bind_get
= mlxsw_sp_sb_tc_pool_bind_get
,
2510 .sb_tc_pool_bind_set
= mlxsw_sp_sb_tc_pool_bind_set
,
2511 .sb_occ_snapshot
= mlxsw_sp_sb_occ_snapshot
,
2512 .sb_occ_max_clear
= mlxsw_sp_sb_occ_max_clear
,
2513 .sb_occ_port_pool_get
= mlxsw_sp_sb_occ_port_pool_get
,
2514 .sb_occ_tc_port_bind_get
= mlxsw_sp_sb_occ_tc_port_bind_get
,
2515 .txhdr_construct
= mlxsw_sp_txhdr_construct
,
2516 .txhdr_len
= MLXSW_TXHDR_LEN
,
2517 .profile
= &mlxsw_sp_config_profile
,
2521 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2523 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2524 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2526 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT
);
2527 mlxsw_reg_sfdf_system_port_set(sfdf_pl
, mlxsw_sp_port
->local_port
);
2529 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2533 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
2536 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2537 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2539 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
);
2540 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
2541 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
,
2542 mlxsw_sp_port
->local_port
);
2544 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2548 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2550 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2551 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2553 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG
);
2554 mlxsw_reg_sfdf_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
2556 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2560 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port
*mlxsw_sp_port
,
2563 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2564 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
2566 mlxsw_reg_sfdf_pack(sfdf_pl
, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
);
2567 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid
);
2568 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl
, mlxsw_sp_port
->lag_id
);
2570 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
2574 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2576 int err
, last_err
= 0;
2579 for (vid
= 1; vid
< VLAN_N_VID
- 1; vid
++) {
2580 err
= mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port
, vid
);
2589 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port
*mlxsw_sp_port
)
2591 int err
, last_err
= 0;
2594 for (vid
= 1; vid
< VLAN_N_VID
- 1; vid
++) {
2595 err
= mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port
, vid
);
2603 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_port
)
2605 if (!list_empty(&mlxsw_sp_port
->vports_list
))
2606 if (mlxsw_sp_port
->lagged
)
2607 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port
);
2609 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port
);
2611 if (mlxsw_sp_port
->lagged
)
2612 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port
);
2614 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port
);
2617 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port
*mlxsw_sp_vport
)
2619 u16 vfid
= mlxsw_sp_vport_vfid_get(mlxsw_sp_vport
);
2620 u16 fid
= mlxsw_sp_vfid_to_fid(vfid
);
2622 if (mlxsw_sp_vport
->lagged
)
2623 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport
,
2626 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport
, fid
);
2629 static bool mlxsw_sp_port_dev_check(const struct net_device
*dev
)
2631 return dev
->netdev_ops
== &mlxsw_sp_port_netdev_ops
;
2634 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
)
2636 struct net_device
*dev
= mlxsw_sp_port
->dev
;
2639 /* When port is not bridged untagged packets are tagged with
2640 * PVID=VID=1, thereby creating an implicit VLAN interface in
2641 * the device. Remove it and let bridge code take care of its
2644 err
= mlxsw_sp_port_kill_vid(dev
, 0, 1);
2648 mlxsw_sp_port
->learning
= 1;
2649 mlxsw_sp_port
->learning_sync
= 1;
2650 mlxsw_sp_port
->uc_flood
= 1;
2651 mlxsw_sp_port
->bridged
= 1;
2656 static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
2659 struct net_device
*dev
= mlxsw_sp_port
->dev
;
2661 if (flush_fdb
&& mlxsw_sp_port_fdb_flush(mlxsw_sp_port
))
2662 netdev_err(mlxsw_sp_port
->dev
, "Failed to flush FDB\n");
2664 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
2666 mlxsw_sp_port
->learning
= 0;
2667 mlxsw_sp_port
->learning_sync
= 0;
2668 mlxsw_sp_port
->uc_flood
= 0;
2669 mlxsw_sp_port
->bridged
= 0;
2671 /* Add implicit VLAN interface in the device, so that untagged
2672 * packets will be classified to the default vFID.
2674 return mlxsw_sp_port_add_vid(dev
, 0, 1);
2677 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp
*mlxsw_sp
,
2678 struct net_device
*br_dev
)
2680 return !mlxsw_sp
->master_bridge
.dev
||
2681 mlxsw_sp
->master_bridge
.dev
== br_dev
;
2684 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp
*mlxsw_sp
,
2685 struct net_device
*br_dev
)
2687 mlxsw_sp
->master_bridge
.dev
= br_dev
;
2688 mlxsw_sp
->master_bridge
.ref_count
++;
2691 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp
*mlxsw_sp
,
2692 struct net_device
*br_dev
)
2694 if (--mlxsw_sp
->master_bridge
.ref_count
== 0)
2695 mlxsw_sp
->master_bridge
.dev
= NULL
;
2698 static int mlxsw_sp_lag_create(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
2700 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2702 mlxsw_reg_sldr_lag_create_pack(sldr_pl
, lag_id
);
2703 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2706 static int mlxsw_sp_lag_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
)
2708 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2710 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl
, lag_id
);
2711 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2714 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
2715 u16 lag_id
, u8 port_index
)
2717 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2718 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2720 mlxsw_reg_slcor_port_add_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2721 lag_id
, port_index
);
2722 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2725 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
2728 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2729 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2731 mlxsw_reg_slcor_port_remove_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2733 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2736 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port
*mlxsw_sp_port
,
2739 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2740 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2742 mlxsw_reg_slcor_col_enable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2744 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2747 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port
*mlxsw_sp_port
,
2750 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2751 char slcor_pl
[MLXSW_REG_SLCOR_LEN
];
2753 mlxsw_reg_slcor_col_disable_pack(slcor_pl
, mlxsw_sp_port
->local_port
,
2755 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(slcor
), slcor_pl
);
2758 static int mlxsw_sp_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
2759 struct net_device
*lag_dev
,
2762 struct mlxsw_sp_upper
*lag
;
2763 int free_lag_id
= -1;
2766 for (i
= 0; i
< MLXSW_SP_LAG_MAX
; i
++) {
2767 lag
= mlxsw_sp_lag_get(mlxsw_sp
, i
);
2768 if (lag
->ref_count
) {
2769 if (lag
->dev
== lag_dev
) {
2773 } else if (free_lag_id
< 0) {
2777 if (free_lag_id
< 0)
2779 *p_lag_id
= free_lag_id
;
2784 mlxsw_sp_master_lag_check(struct mlxsw_sp
*mlxsw_sp
,
2785 struct net_device
*lag_dev
,
2786 struct netdev_lag_upper_info
*lag_upper_info
)
2790 if (mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
) != 0)
2792 if (lag_upper_info
->tx_type
!= NETDEV_LAG_TX_TYPE_HASH
)
2797 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp
*mlxsw_sp
,
2798 u16 lag_id
, u8
*p_port_index
)
2802 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
2803 if (!mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
)) {
2811 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
2812 struct net_device
*lag_dev
)
2814 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2815 struct mlxsw_sp_upper
*lag
;
2820 err
= mlxsw_sp_lag_index_get(mlxsw_sp
, lag_dev
, &lag_id
);
2823 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
2824 if (!lag
->ref_count
) {
2825 err
= mlxsw_sp_lag_create(mlxsw_sp
, lag_id
);
2831 err
= mlxsw_sp_port_lag_index_get(mlxsw_sp
, lag_id
, &port_index
);
2834 err
= mlxsw_sp_lag_col_port_add(mlxsw_sp_port
, lag_id
, port_index
);
2836 goto err_col_port_add
;
2837 err
= mlxsw_sp_lag_col_port_enable(mlxsw_sp_port
, lag_id
);
2839 goto err_col_port_enable
;
2841 mlxsw_core_lag_mapping_set(mlxsw_sp
->core
, lag_id
, port_index
,
2842 mlxsw_sp_port
->local_port
);
2843 mlxsw_sp_port
->lag_id
= lag_id
;
2844 mlxsw_sp_port
->lagged
= 1;
2848 err_col_port_enable
:
2849 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
2851 if (!lag
->ref_count
)
2852 mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
2856 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
,
2857 struct net_device
*br_dev
,
2860 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
2861 struct net_device
*lag_dev
)
2863 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2864 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2865 struct mlxsw_sp_upper
*lag
;
2866 u16 lag_id
= mlxsw_sp_port
->lag_id
;
2869 if (!mlxsw_sp_port
->lagged
)
2871 lag
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
);
2872 WARN_ON(lag
->ref_count
== 0);
2874 err
= mlxsw_sp_lag_col_port_disable(mlxsw_sp_port
, lag_id
);
2877 err
= mlxsw_sp_lag_col_port_remove(mlxsw_sp_port
, lag_id
);
2881 /* In case we leave a LAG device that has bridges built on top,
2882 * then their teardown sequence is never issued and we need to
2883 * invoke the necessary cleanup routines ourselves.
2885 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
2887 struct net_device
*br_dev
;
2889 if (!mlxsw_sp_vport
->bridged
)
2892 br_dev
= mlxsw_sp_vport_br_get(mlxsw_sp_vport
);
2893 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
, br_dev
, false);
2896 if (mlxsw_sp_port
->bridged
) {
2897 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port
);
2898 mlxsw_sp_port_bridge_leave(mlxsw_sp_port
, false);
2899 mlxsw_sp_master_bridge_dec(mlxsw_sp
, NULL
);
2902 if (lag
->ref_count
== 1) {
2903 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port
))
2904 netdev_err(mlxsw_sp_port
->dev
, "Failed to flush FDB\n");
2905 err
= mlxsw_sp_lag_destroy(mlxsw_sp
, lag_id
);
2910 mlxsw_core_lag_mapping_clear(mlxsw_sp
->core
, lag_id
,
2911 mlxsw_sp_port
->local_port
);
2912 mlxsw_sp_port
->lagged
= 0;
2917 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
2920 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2921 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2923 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl
, lag_id
,
2924 mlxsw_sp_port
->local_port
);
2925 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2928 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port
*mlxsw_sp_port
,
2931 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
2932 char sldr_pl
[MLXSW_REG_SLDR_LEN
];
2934 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl
, lag_id
,
2935 mlxsw_sp_port
->local_port
);
2936 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sldr
), sldr_pl
);
2939 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
2940 bool lag_tx_enabled
)
2943 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port
,
2944 mlxsw_sp_port
->lag_id
);
2946 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port
,
2947 mlxsw_sp_port
->lag_id
);
2950 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port
*mlxsw_sp_port
,
2951 struct netdev_lag_lower_state_info
*info
)
2953 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port
, info
->tx_enabled
);
2956 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port
*mlxsw_sp_port
,
2957 struct net_device
*vlan_dev
)
2959 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2960 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
2962 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
2963 if (!mlxsw_sp_vport
) {
2964 WARN_ON(!mlxsw_sp_vport
);
2968 mlxsw_sp_vport
->dev
= vlan_dev
;
2973 static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port
*mlxsw_sp_port
,
2974 struct net_device
*vlan_dev
)
2976 struct mlxsw_sp_port
*mlxsw_sp_vport
;
2977 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
2979 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
2980 if (!mlxsw_sp_vport
) {
2981 WARN_ON(!mlxsw_sp_vport
);
2985 /* When removing a VLAN device while still bridged we should first
2986 * remove it from the bridge, as we receive the bridge's notification
2987 * when the vPort is already gone.
2989 if (mlxsw_sp_vport
->bridged
) {
2990 struct net_device
*br_dev
;
2992 br_dev
= mlxsw_sp_vport_br_get(mlxsw_sp_vport
);
2993 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
, br_dev
, true);
2996 mlxsw_sp_vport
->dev
= mlxsw_sp_port
->dev
;
3001 static int mlxsw_sp_netdevice_port_upper_event(struct net_device
*dev
,
3002 unsigned long event
, void *ptr
)
3004 struct netdev_notifier_changeupper_info
*info
;
3005 struct mlxsw_sp_port
*mlxsw_sp_port
;
3006 struct net_device
*upper_dev
;
3007 struct mlxsw_sp
*mlxsw_sp
;
3010 mlxsw_sp_port
= netdev_priv(dev
);
3011 mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
3015 case NETDEV_PRECHANGEUPPER
:
3016 upper_dev
= info
->upper_dev
;
3017 if (!info
->master
|| !info
->linking
)
3019 /* HW limitation forbids to put ports to multiple bridges. */
3020 if (netif_is_bridge_master(upper_dev
) &&
3021 !mlxsw_sp_master_bridge_check(mlxsw_sp
, upper_dev
))
3023 if (netif_is_lag_master(upper_dev
) &&
3024 !mlxsw_sp_master_lag_check(mlxsw_sp
, upper_dev
,
3028 case NETDEV_CHANGEUPPER
:
3029 upper_dev
= info
->upper_dev
;
3030 if (is_vlan_dev(upper_dev
)) {
3031 if (info
->linking
) {
3032 err
= mlxsw_sp_port_vlan_link(mlxsw_sp_port
,
3035 netdev_err(dev
, "Failed to link VLAN device\n");
3039 err
= mlxsw_sp_port_vlan_unlink(mlxsw_sp_port
,
3042 netdev_err(dev
, "Failed to unlink VLAN device\n");
3046 } else if (netif_is_bridge_master(upper_dev
)) {
3047 if (info
->linking
) {
3048 err
= mlxsw_sp_port_bridge_join(mlxsw_sp_port
);
3050 netdev_err(dev
, "Failed to join bridge\n");
3053 mlxsw_sp_master_bridge_inc(mlxsw_sp
, upper_dev
);
3055 err
= mlxsw_sp_port_bridge_leave(mlxsw_sp_port
,
3057 mlxsw_sp_master_bridge_dec(mlxsw_sp
, upper_dev
);
3059 netdev_err(dev
, "Failed to leave bridge\n");
3063 } else if (netif_is_lag_master(upper_dev
)) {
3064 if (info
->linking
) {
3065 err
= mlxsw_sp_port_lag_join(mlxsw_sp_port
,
3068 netdev_err(dev
, "Failed to join link aggregation\n");
3072 err
= mlxsw_sp_port_lag_leave(mlxsw_sp_port
,
3075 netdev_err(dev
, "Failed to leave link aggregation\n");
3086 static int mlxsw_sp_netdevice_port_lower_event(struct net_device
*dev
,
3087 unsigned long event
, void *ptr
)
3089 struct netdev_notifier_changelowerstate_info
*info
;
3090 struct mlxsw_sp_port
*mlxsw_sp_port
;
3093 mlxsw_sp_port
= netdev_priv(dev
);
3097 case NETDEV_CHANGELOWERSTATE
:
3098 if (netif_is_lag_port(dev
) && mlxsw_sp_port
->lagged
) {
3099 err
= mlxsw_sp_port_lag_changed(mlxsw_sp_port
,
3100 info
->lower_state_info
);
3102 netdev_err(dev
, "Failed to reflect link aggregation lower state change\n");
3110 static int mlxsw_sp_netdevice_port_event(struct net_device
*dev
,
3111 unsigned long event
, void *ptr
)
3114 case NETDEV_PRECHANGEUPPER
:
3115 case NETDEV_CHANGEUPPER
:
3116 return mlxsw_sp_netdevice_port_upper_event(dev
, event
, ptr
);
3117 case NETDEV_CHANGELOWERSTATE
:
3118 return mlxsw_sp_netdevice_port_lower_event(dev
, event
, ptr
);
3124 static int mlxsw_sp_netdevice_lag_event(struct net_device
*lag_dev
,
3125 unsigned long event
, void *ptr
)
3127 struct net_device
*dev
;
3128 struct list_head
*iter
;
3131 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
3132 if (mlxsw_sp_port_dev_check(dev
)) {
3133 ret
= mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
3134 if (ret
== NOTIFY_BAD
)
3142 static struct mlxsw_sp_vfid
*
3143 mlxsw_sp_br_vfid_find(const struct mlxsw_sp
*mlxsw_sp
,
3144 const struct net_device
*br_dev
)
3146 struct mlxsw_sp_vfid
*vfid
;
3148 list_for_each_entry(vfid
, &mlxsw_sp
->br_vfids
.list
, list
) {
3149 if (vfid
->br_dev
== br_dev
)
3156 static u16
mlxsw_sp_vfid_to_br_vfid(u16 vfid
)
3158 return vfid
- MLXSW_SP_VFID_PORT_MAX
;
3161 static u16
mlxsw_sp_br_vfid_to_vfid(u16 br_vfid
)
3163 return MLXSW_SP_VFID_PORT_MAX
+ br_vfid
;
3166 static u16
mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp
*mlxsw_sp
)
3168 return find_first_zero_bit(mlxsw_sp
->br_vfids
.mapped
,
3169 MLXSW_SP_VFID_BR_MAX
);
3172 static struct mlxsw_sp_vfid
*mlxsw_sp_br_vfid_create(struct mlxsw_sp
*mlxsw_sp
,
3173 struct net_device
*br_dev
)
3175 struct device
*dev
= mlxsw_sp
->bus_info
->dev
;
3176 struct mlxsw_sp_vfid
*vfid
;
3180 n_vfid
= mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp
));
3181 if (n_vfid
== MLXSW_SP_VFID_MAX
) {
3182 dev_err(dev
, "No available vFIDs\n");
3183 return ERR_PTR(-ERANGE
);
3186 err
= __mlxsw_sp_vfid_create(mlxsw_sp
, n_vfid
);
3188 dev_err(dev
, "Failed to create vFID=%d\n", n_vfid
);
3189 return ERR_PTR(err
);
3192 vfid
= kzalloc(sizeof(*vfid
), GFP_KERNEL
);
3194 goto err_allocate_vfid
;
3196 vfid
->vfid
= n_vfid
;
3197 vfid
->br_dev
= br_dev
;
3199 list_add(&vfid
->list
, &mlxsw_sp
->br_vfids
.list
);
3200 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid
), mlxsw_sp
->br_vfids
.mapped
);
3205 __mlxsw_sp_vfid_destroy(mlxsw_sp
, n_vfid
);
3206 return ERR_PTR(-ENOMEM
);
3209 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp
*mlxsw_sp
,
3210 struct mlxsw_sp_vfid
*vfid
)
3212 u16 br_vfid
= mlxsw_sp_vfid_to_br_vfid(vfid
->vfid
);
3214 clear_bit(br_vfid
, mlxsw_sp
->br_vfids
.mapped
);
3215 list_del(&vfid
->list
);
3217 __mlxsw_sp_vfid_destroy(mlxsw_sp
, vfid
->vfid
);
3222 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3223 struct net_device
*br_dev
,
3226 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3227 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3228 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
3229 struct mlxsw_sp_vfid
*vfid
, *new_vfid
;
3232 vfid
= mlxsw_sp_br_vfid_find(mlxsw_sp
, br_dev
);
3238 /* We need a vFID to go back to after leaving the bridge's vFID. */
3239 new_vfid
= mlxsw_sp_vfid_find(mlxsw_sp
, vid
);
3241 new_vfid
= mlxsw_sp_vfid_create(mlxsw_sp
, vid
);
3242 if (IS_ERR(new_vfid
)) {
3243 netdev_err(dev
, "Failed to create vFID for VID=%d\n",
3245 return PTR_ERR(new_vfid
);
3249 /* Invalidate existing {Port, VID} to vFID mapping and create a new
3250 * one for the new vFID.
3252 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3253 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3255 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
3258 netdev_err(dev
, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3260 goto err_port_vid_to_fid_invalidate
;
3263 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3264 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3266 mlxsw_sp_vfid_to_fid(new_vfid
->vfid
),
3269 netdev_err(dev
, "Failed to map {Port, VID} to vFID=%d\n",
3271 goto err_port_vid_to_fid_validate
;
3274 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
3276 netdev_err(dev
, "Failed to disable learning\n");
3277 goto err_port_vid_learning_set
;
3280 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, false,
3283 netdev_err(dev
, "Failed clear to clear flooding\n");
3284 goto err_vport_flood_set
;
3287 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_vport
, vid
,
3288 MLXSW_REG_SPMS_STATE_FORWARDING
);
3290 netdev_err(dev
, "Failed to set STP state\n");
3291 goto err_port_stp_state_set
;
3294 if (flush_fdb
&& mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport
))
3295 netdev_err(dev
, "Failed to flush FDB\n");
3297 /* Switch between the vFIDs and destroy the old one if needed. */
3298 new_vfid
->nr_vports
++;
3299 mlxsw_sp_vport
->vport
.vfid
= new_vfid
;
3301 if (!vfid
->nr_vports
)
3302 mlxsw_sp_br_vfid_destroy(mlxsw_sp
, vfid
);
3304 mlxsw_sp_vport
->learning
= 0;
3305 mlxsw_sp_vport
->learning_sync
= 0;
3306 mlxsw_sp_vport
->uc_flood
= 0;
3307 mlxsw_sp_vport
->bridged
= 0;
3311 err_port_stp_state_set
:
3312 err_vport_flood_set
:
3313 err_port_vid_learning_set
:
3314 err_port_vid_to_fid_validate
:
3315 err_port_vid_to_fid_invalidate
:
3316 /* Rollback vFID only if new. */
3317 if (!new_vfid
->nr_vports
)
3318 mlxsw_sp_vfid_destroy(mlxsw_sp
, new_vfid
);
3322 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_vport
,
3323 struct net_device
*br_dev
)
3325 struct mlxsw_sp_vfid
*old_vfid
= mlxsw_sp_vport
->vport
.vfid
;
3326 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_vport
->mlxsw_sp
;
3327 u16 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
3328 struct net_device
*dev
= mlxsw_sp_vport
->dev
;
3329 struct mlxsw_sp_vfid
*vfid
;
3332 vfid
= mlxsw_sp_br_vfid_find(mlxsw_sp
, br_dev
);
3334 vfid
= mlxsw_sp_br_vfid_create(mlxsw_sp
, br_dev
);
3336 netdev_err(dev
, "Failed to create bridge vFID\n");
3337 return PTR_ERR(vfid
);
3341 err
= mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, true, false);
3343 netdev_err(dev
, "Failed to setup flooding for vFID=%d\n",
3345 goto err_port_flood_set
;
3348 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, true);
3350 netdev_err(dev
, "Failed to enable learning\n");
3351 goto err_port_vid_learning_set
;
3354 /* We need to invalidate existing {Port, VID} to vFID mapping and
3355 * create a new one for the bridge's vFID.
3357 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3358 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3360 mlxsw_sp_vfid_to_fid(old_vfid
->vfid
),
3363 netdev_err(dev
, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3365 goto err_port_vid_to_fid_invalidate
;
3368 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3369 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
,
3371 mlxsw_sp_vfid_to_fid(vfid
->vfid
),
3374 netdev_err(dev
, "Failed to map {Port, VID} to vFID=%d\n",
3376 goto err_port_vid_to_fid_validate
;
3379 /* Switch between the vFIDs and destroy the old one if needed. */
3381 mlxsw_sp_vport
->vport
.vfid
= vfid
;
3382 old_vfid
->nr_vports
--;
3383 if (!old_vfid
->nr_vports
)
3384 mlxsw_sp_vfid_destroy(mlxsw_sp
, old_vfid
);
3386 mlxsw_sp_vport
->learning
= 1;
3387 mlxsw_sp_vport
->learning_sync
= 1;
3388 mlxsw_sp_vport
->uc_flood
= 1;
3389 mlxsw_sp_vport
->bridged
= 1;
3393 err_port_vid_to_fid_validate
:
3394 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport
,
3395 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
, false,
3396 mlxsw_sp_vfid_to_fid(old_vfid
->vfid
), vid
);
3397 err_port_vid_to_fid_invalidate
:
3398 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport
, vid
, false);
3399 err_port_vid_learning_set
:
3400 mlxsw_sp_vport_flood_set(mlxsw_sp_vport
, vfid
->vfid
, false, false);
3402 if (!vfid
->nr_vports
)
3403 mlxsw_sp_br_vfid_destroy(mlxsw_sp
, vfid
);
3408 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port
*mlxsw_sp_port
,
3409 const struct net_device
*br_dev
)
3411 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3413 list_for_each_entry(mlxsw_sp_vport
, &mlxsw_sp_port
->vports_list
,
3415 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport
) == br_dev
)
3422 static int mlxsw_sp_netdevice_vport_event(struct net_device
*dev
,
3423 unsigned long event
, void *ptr
,
3426 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
3427 struct netdev_notifier_changeupper_info
*info
= ptr
;
3428 struct mlxsw_sp_port
*mlxsw_sp_vport
;
3429 struct net_device
*upper_dev
;
3432 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
3435 case NETDEV_PRECHANGEUPPER
:
3436 upper_dev
= info
->upper_dev
;
3437 if (!info
->master
|| !info
->linking
)
3439 if (!netif_is_bridge_master(upper_dev
))
3441 /* We can't have multiple VLAN interfaces configured on
3442 * the same port and being members in the same bridge.
3444 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port
,
3448 case NETDEV_CHANGEUPPER
:
3449 upper_dev
= info
->upper_dev
;
3452 if (info
->linking
) {
3453 if (!mlxsw_sp_vport
) {
3454 WARN_ON(!mlxsw_sp_vport
);
3457 err
= mlxsw_sp_vport_bridge_join(mlxsw_sp_vport
,
3460 netdev_err(dev
, "Failed to join bridge\n");
3464 /* We ignore bridge's unlinking notifications if vPort
3465 * is gone, since we already left the bridge when the
3466 * VLAN device was unlinked from the real device.
3468 if (!mlxsw_sp_vport
)
3470 err
= mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport
,
3473 netdev_err(dev
, "Failed to leave bridge\n");
3482 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device
*lag_dev
,
3483 unsigned long event
, void *ptr
,
3486 struct net_device
*dev
;
3487 struct list_head
*iter
;
3490 netdev_for_each_lower_dev(lag_dev
, dev
, iter
) {
3491 if (mlxsw_sp_port_dev_check(dev
)) {
3492 ret
= mlxsw_sp_netdevice_vport_event(dev
, event
, ptr
,
3494 if (ret
== NOTIFY_BAD
)
3502 static int mlxsw_sp_netdevice_vlan_event(struct net_device
*vlan_dev
,
3503 unsigned long event
, void *ptr
)
3505 struct net_device
*real_dev
= vlan_dev_real_dev(vlan_dev
);
3506 u16 vid
= vlan_dev_vlan_id(vlan_dev
);
3508 if (mlxsw_sp_port_dev_check(real_dev
))
3509 return mlxsw_sp_netdevice_vport_event(real_dev
, event
, ptr
,
3511 else if (netif_is_lag_master(real_dev
))
3512 return mlxsw_sp_netdevice_lag_vport_event(real_dev
, event
, ptr
,
3518 static int mlxsw_sp_netdevice_event(struct notifier_block
*unused
,
3519 unsigned long event
, void *ptr
)
3521 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3523 if (mlxsw_sp_port_dev_check(dev
))
3524 return mlxsw_sp_netdevice_port_event(dev
, event
, ptr
);
3526 if (netif_is_lag_master(dev
))
3527 return mlxsw_sp_netdevice_lag_event(dev
, event
, ptr
);
3529 if (is_vlan_dev(dev
))
3530 return mlxsw_sp_netdevice_vlan_event(dev
, event
, ptr
);
3535 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly
= {
3536 .notifier_call
= mlxsw_sp_netdevice_event
,
3539 static int __init
mlxsw_sp_module_init(void)
3543 register_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
3544 err
= mlxsw_core_driver_register(&mlxsw_sp_driver
);
3546 goto err_core_driver_register
;
3549 err_core_driver_register
:
3550 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
3554 static void __exit
mlxsw_sp_module_exit(void)
3556 mlxsw_core_driver_unregister(&mlxsw_sp_driver
);
3557 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb
);
3560 module_init(mlxsw_sp_module_init
);
3561 module_exit(mlxsw_sp_module_exit
);
3563 MODULE_LICENSE("Dual BSD/GPL");
3564 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3565 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3566 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM
);