]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <linux/inetdevice.h>
55 #include <net/switchdev.h>
56 #include <generated/utsrelease.h>
57
58 #include "spectrum.h"
59 #include "core.h"
60 #include "reg.h"
61 #include "port.h"
62 #include "trap.h"
63 #include "txheader.h"
64
65 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
66 static const char mlxsw_sp_driver_version[] = "1.0";
67
68 /* tx_hdr_version
69 * Tx header version.
70 * Must be set to 1.
71 */
72 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
73
74 /* tx_hdr_ctl
75 * Packet control type.
76 * 0 - Ethernet control (e.g. EMADs, LACP)
77 * 1 - Ethernet data
78 */
79 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
80
81 /* tx_hdr_proto
82 * Packet protocol type. Must be set to 1 (Ethernet).
83 */
84 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
85
86 /* tx_hdr_rx_is_router
87 * Packet is sent from the router. Valid for data packets only.
88 */
89 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
90
91 /* tx_hdr_fid_valid
92 * Indicates if the 'fid' field is valid and should be used for
93 * forwarding lookup. Valid for data packets only.
94 */
95 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
96
97 /* tx_hdr_swid
98 * Switch partition ID. Must be set to 0.
99 */
100 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
101
102 /* tx_hdr_control_tclass
103 * Indicates if the packet should use the control TClass and not one
104 * of the data TClasses.
105 */
106 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
107
108 /* tx_hdr_etclass
109 * Egress TClass to be used on the egress device on the egress port.
110 */
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
112
113 /* tx_hdr_port_mid
114 * Destination local port for unicast packets.
115 * Destination multicast ID for multicast packets.
116 *
117 * Control packets are directed to a specific egress port, while data
118 * packets are transmitted through the CPU port (0) into the switch partition,
119 * where forwarding rules are applied.
120 */
121 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
122
123 /* tx_hdr_fid
124 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
125 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
126 * Valid for data packets only.
127 */
128 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
129
130 /* tx_hdr_type
131 * 0 - Data packets
132 * 6 - Control packets
133 */
134 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
135
136 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
137 const struct mlxsw_tx_info *tx_info)
138 {
139 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
140
141 memset(txhdr, 0, MLXSW_TXHDR_LEN);
142
143 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
144 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
145 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
146 mlxsw_tx_hdr_swid_set(txhdr, 0);
147 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
148 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
149 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
150 }
151
152 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
153 {
154 char spad_pl[MLXSW_REG_SPAD_LEN];
155 int err;
156
157 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
158 if (err)
159 return err;
160 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
161 return 0;
162 }
163
164 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
165 bool is_up)
166 {
167 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
168 char paos_pl[MLXSW_REG_PAOS_LEN];
169
170 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
171 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
172 MLXSW_PORT_ADMIN_STATUS_DOWN);
173 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
174 }
175
176 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
177 unsigned char *addr)
178 {
179 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
180 char ppad_pl[MLXSW_REG_PPAD_LEN];
181
182 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
183 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
185 }
186
187 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
188 {
189 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
190 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
191
192 ether_addr_copy(addr, mlxsw_sp->base_mac);
193 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
194 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
195 }
196
197 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
198 {
199 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
200 char pmtu_pl[MLXSW_REG_PMTU_LEN];
201 int max_mtu;
202 int err;
203
204 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
205 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
206 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
207 if (err)
208 return err;
209 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
210
211 if (mtu > max_mtu)
212 return -EINVAL;
213
214 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
215 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
216 }
217
218 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
219 u8 swid)
220 {
221 char pspa_pl[MLXSW_REG_PSPA_LEN];
222
223 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
225 }
226
227 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
228 {
229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
230
231 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
232 swid);
233 }
234
235 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
236 bool enable)
237 {
238 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
239 char svpe_pl[MLXSW_REG_SVPE_LEN];
240
241 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
242 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
243 }
244
245 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
246 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
247 u16 vid)
248 {
249 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
250 char svfa_pl[MLXSW_REG_SVFA_LEN];
251
252 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
253 fid, vid);
254 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
255 }
256
257 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
258 u16 vid, bool learn_enable)
259 {
260 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
261 char *spvmlr_pl;
262 int err;
263
264 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
265 if (!spvmlr_pl)
266 return -ENOMEM;
267 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
268 learn_enable);
269 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
270 kfree(spvmlr_pl);
271 return err;
272 }
273
274 static int
275 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
276 {
277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278 char sspr_pl[MLXSW_REG_SSPR_LEN];
279
280 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
282 }
283
284 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
285 u8 local_port, u8 *p_module,
286 u8 *p_width, u8 *p_lane)
287 {
288 char pmlp_pl[MLXSW_REG_PMLP_LEN];
289 int err;
290
291 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
292 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
293 if (err)
294 return err;
295 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
296 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
297 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
298 return 0;
299 }
300
301 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
302 u8 module, u8 width, u8 lane)
303 {
304 char pmlp_pl[MLXSW_REG_PMLP_LEN];
305 int i;
306
307 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
308 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
309 for (i = 0; i < width; i++) {
310 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
311 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
312 }
313
314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
315 }
316
317 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
318 {
319 char pmlp_pl[MLXSW_REG_PMLP_LEN];
320
321 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
322 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
323 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
324 }
325
326 static int mlxsw_sp_port_open(struct net_device *dev)
327 {
328 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
329 int err;
330
331 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
332 if (err)
333 return err;
334 netif_start_queue(dev);
335 return 0;
336 }
337
338 static int mlxsw_sp_port_stop(struct net_device *dev)
339 {
340 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
341
342 netif_stop_queue(dev);
343 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
344 }
345
346 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
347 struct net_device *dev)
348 {
349 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
350 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
351 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
352 const struct mlxsw_tx_info tx_info = {
353 .local_port = mlxsw_sp_port->local_port,
354 .is_emad = false,
355 };
356 u64 len;
357 int err;
358
359 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
360 return NETDEV_TX_BUSY;
361
362 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
363 struct sk_buff *skb_orig = skb;
364
365 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
366 if (!skb) {
367 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
368 dev_kfree_skb_any(skb_orig);
369 return NETDEV_TX_OK;
370 }
371 }
372
373 if (eth_skb_pad(skb)) {
374 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
375 return NETDEV_TX_OK;
376 }
377
378 mlxsw_sp_txhdr_construct(skb, &tx_info);
379 /* TX header is consumed by HW on the way so we shouldn't count its
380 * bytes as being sent.
381 */
382 len = skb->len - MLXSW_TXHDR_LEN;
383
384 /* Due to a race we might fail here because of a full queue. In that
385 * unlikely case we simply drop the packet.
386 */
387 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
388
389 if (!err) {
390 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
391 u64_stats_update_begin(&pcpu_stats->syncp);
392 pcpu_stats->tx_packets++;
393 pcpu_stats->tx_bytes += len;
394 u64_stats_update_end(&pcpu_stats->syncp);
395 } else {
396 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
397 dev_kfree_skb_any(skb);
398 }
399 return NETDEV_TX_OK;
400 }
401
402 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
403 {
404 }
405
406 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
407 {
408 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
409 struct sockaddr *addr = p;
410 int err;
411
412 if (!is_valid_ether_addr(addr->sa_data))
413 return -EADDRNOTAVAIL;
414
415 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
416 if (err)
417 return err;
418 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
419 return 0;
420 }
421
422 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
423 bool pause_en, bool pfc_en, u16 delay)
424 {
425 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
426
427 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
428 MLXSW_SP_PAUSE_DELAY;
429
430 if (pause_en || pfc_en)
431 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
432 pg_size + delay, pg_size);
433 else
434 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
435 }
436
437 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
438 u8 *prio_tc, bool pause_en,
439 struct ieee_pfc *my_pfc)
440 {
441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
442 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
443 u16 delay = !!my_pfc ? my_pfc->delay : 0;
444 char pbmc_pl[MLXSW_REG_PBMC_LEN];
445 int i, j, err;
446
447 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
448 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
449 if (err)
450 return err;
451
452 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
453 bool configure = false;
454 bool pfc = false;
455
456 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
457 if (prio_tc[j] == i) {
458 pfc = pfc_en & BIT(j);
459 configure = true;
460 break;
461 }
462 }
463
464 if (!configure)
465 continue;
466 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
467 }
468
469 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
470 }
471
472 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
473 int mtu, bool pause_en)
474 {
475 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
476 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
477 struct ieee_pfc *my_pfc;
478 u8 *prio_tc;
479
480 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
481 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
482
483 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
484 pause_en, my_pfc);
485 }
486
487 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
488 {
489 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
490 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
491 int err;
492
493 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
494 if (err)
495 return err;
496 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
497 if (err)
498 goto err_port_mtu_set;
499 dev->mtu = mtu;
500 return 0;
501
502 err_port_mtu_set:
503 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
504 return err;
505 }
506
507 static struct rtnl_link_stats64 *
508 mlxsw_sp_port_get_stats64(struct net_device *dev,
509 struct rtnl_link_stats64 *stats)
510 {
511 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
512 struct mlxsw_sp_port_pcpu_stats *p;
513 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
514 u32 tx_dropped = 0;
515 unsigned int start;
516 int i;
517
518 for_each_possible_cpu(i) {
519 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
520 do {
521 start = u64_stats_fetch_begin_irq(&p->syncp);
522 rx_packets = p->rx_packets;
523 rx_bytes = p->rx_bytes;
524 tx_packets = p->tx_packets;
525 tx_bytes = p->tx_bytes;
526 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
527
528 stats->rx_packets += rx_packets;
529 stats->rx_bytes += rx_bytes;
530 stats->tx_packets += tx_packets;
531 stats->tx_bytes += tx_bytes;
532 /* tx_dropped is u32, updated without syncp protection. */
533 tx_dropped += p->tx_dropped;
534 }
535 stats->tx_dropped = tx_dropped;
536 return stats;
537 }
538
539 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
540 u16 vid_end, bool is_member, bool untagged)
541 {
542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
543 char *spvm_pl;
544 int err;
545
546 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
547 if (!spvm_pl)
548 return -ENOMEM;
549
550 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
551 vid_end, is_member, untagged);
552 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
553 kfree(spvm_pl);
554 return err;
555 }
556
557 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
558 {
559 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
560 u16 vid, last_visited_vid;
561 int err;
562
563 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
564 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
565 vid);
566 if (err) {
567 last_visited_vid = vid;
568 goto err_port_vid_to_fid_set;
569 }
570 }
571
572 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
573 if (err) {
574 last_visited_vid = VLAN_N_VID;
575 goto err_port_vid_to_fid_set;
576 }
577
578 return 0;
579
580 err_port_vid_to_fid_set:
581 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
582 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
583 vid);
584 return err;
585 }
586
587 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
588 {
589 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
590 u16 vid;
591 int err;
592
593 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
594 if (err)
595 return err;
596
597 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
598 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
599 vid, vid);
600 if (err)
601 return err;
602 }
603
604 return 0;
605 }
606
607 static struct mlxsw_sp_port *
608 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
609 {
610 struct mlxsw_sp_port *mlxsw_sp_vport;
611
612 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
613 if (!mlxsw_sp_vport)
614 return NULL;
615
616 /* dev will be set correctly after the VLAN device is linked
617 * with the real device. In case of bridge SELF invocation, dev
618 * will remain as is.
619 */
620 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
621 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
622 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
623 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
624 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
625 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
626 mlxsw_sp_vport->vport.vid = vid;
627
628 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
629
630 return mlxsw_sp_vport;
631 }
632
633 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
634 {
635 list_del(&mlxsw_sp_vport->vport.list);
636 kfree(mlxsw_sp_vport);
637 }
638
639 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
640 u16 vid)
641 {
642 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
643 struct mlxsw_sp_port *mlxsw_sp_vport;
644 bool untagged = vid == 1;
645 int err;
646
647 /* VLAN 0 is added to HW filter when device goes up, but it is
648 * reserved in our case, so simply return.
649 */
650 if (!vid)
651 return 0;
652
653 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
654 netdev_warn(dev, "VID=%d already configured\n", vid);
655 return 0;
656 }
657
658 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
659 if (!mlxsw_sp_vport) {
660 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
661 return -ENOMEM;
662 }
663
664 /* When adding the first VLAN interface on a bridged port we need to
665 * transition all the active 802.1Q bridge VLANs to use explicit
666 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
667 */
668 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
669 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
670 if (err) {
671 netdev_err(dev, "Failed to set to Virtual mode\n");
672 goto err_port_vp_mode_trans;
673 }
674 }
675
676 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
677 if (err) {
678 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
679 goto err_port_vid_learning_set;
680 }
681
682 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
683 if (err) {
684 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
685 vid);
686 goto err_port_add_vid;
687 }
688
689 return 0;
690
691 err_port_add_vid:
692 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
693 err_port_vid_learning_set:
694 if (list_is_singular(&mlxsw_sp_port->vports_list))
695 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
696 err_port_vp_mode_trans:
697 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
698 return err;
699 }
700
701 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
702 __be16 __always_unused proto, u16 vid)
703 {
704 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
705 struct mlxsw_sp_port *mlxsw_sp_vport;
706 struct mlxsw_sp_fid *f;
707 int err;
708
709 /* VLAN 0 is removed from HW filter when device goes down, but
710 * it is reserved in our case, so simply return.
711 */
712 if (!vid)
713 return 0;
714
715 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
716 if (!mlxsw_sp_vport) {
717 netdev_warn(dev, "VID=%d does not exist\n", vid);
718 return 0;
719 }
720
721 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
722 if (err) {
723 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
724 vid);
725 return err;
726 }
727
728 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
729 if (err) {
730 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
731 return err;
732 }
733
734 /* Drop FID reference. If this was the last reference the
735 * resources will be freed.
736 */
737 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
738 if (f && !WARN_ON(!f->leave))
739 f->leave(mlxsw_sp_vport);
740
741 /* When removing the last VLAN interface on a bridged port we need to
742 * transition all active 802.1Q bridge VLANs to use VID to FID
743 * mappings and set port's mode to VLAN mode.
744 */
745 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
746 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
747 if (err) {
748 netdev_err(dev, "Failed to set to VLAN mode\n");
749 return err;
750 }
751 }
752
753 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
754
755 return 0;
756 }
757
758 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
759 size_t len)
760 {
761 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762 u8 module = mlxsw_sp_port->mapping.module;
763 u8 width = mlxsw_sp_port->mapping.width;
764 u8 lane = mlxsw_sp_port->mapping.lane;
765 int err;
766
767 if (!mlxsw_sp_port->split)
768 err = snprintf(name, len, "p%d", module + 1);
769 else
770 err = snprintf(name, len, "p%ds%d", module + 1,
771 lane / width);
772
773 if (err >= len)
774 return -EINVAL;
775
776 return 0;
777 }
778
779 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
780 .ndo_open = mlxsw_sp_port_open,
781 .ndo_stop = mlxsw_sp_port_stop,
782 .ndo_start_xmit = mlxsw_sp_port_xmit,
783 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
784 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
785 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
786 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
787 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
788 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
789 .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
790 .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
791 .ndo_fdb_add = switchdev_port_fdb_add,
792 .ndo_fdb_del = switchdev_port_fdb_del,
793 .ndo_fdb_dump = switchdev_port_fdb_dump,
794 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
795 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
796 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
797 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
798 };
799
800 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
801 struct ethtool_drvinfo *drvinfo)
802 {
803 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
804 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
805
806 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
807 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
808 sizeof(drvinfo->version));
809 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
810 "%d.%d.%d",
811 mlxsw_sp->bus_info->fw_rev.major,
812 mlxsw_sp->bus_info->fw_rev.minor,
813 mlxsw_sp->bus_info->fw_rev.subminor);
814 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
815 sizeof(drvinfo->bus_info));
816 }
817
818 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
819 struct ethtool_pauseparam *pause)
820 {
821 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
822
823 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
824 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
825 }
826
827 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
828 struct ethtool_pauseparam *pause)
829 {
830 char pfcc_pl[MLXSW_REG_PFCC_LEN];
831
832 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
833 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
834 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
835
836 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
837 pfcc_pl);
838 }
839
840 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
841 struct ethtool_pauseparam *pause)
842 {
843 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
844 bool pause_en = pause->tx_pause || pause->rx_pause;
845 int err;
846
847 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
848 netdev_err(dev, "PFC already enabled on port\n");
849 return -EINVAL;
850 }
851
852 if (pause->autoneg) {
853 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
854 return -EINVAL;
855 }
856
857 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
858 if (err) {
859 netdev_err(dev, "Failed to configure port's headroom\n");
860 return err;
861 }
862
863 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
864 if (err) {
865 netdev_err(dev, "Failed to set PAUSE parameters\n");
866 goto err_port_pause_configure;
867 }
868
869 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
870 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
871
872 return 0;
873
874 err_port_pause_configure:
875 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
876 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
877 return err;
878 }
879
880 struct mlxsw_sp_port_hw_stats {
881 char str[ETH_GSTRING_LEN];
882 u64 (*getter)(char *payload);
883 };
884
885 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
886 {
887 .str = "a_frames_transmitted_ok",
888 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
889 },
890 {
891 .str = "a_frames_received_ok",
892 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
893 },
894 {
895 .str = "a_frame_check_sequence_errors",
896 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
897 },
898 {
899 .str = "a_alignment_errors",
900 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
901 },
902 {
903 .str = "a_octets_transmitted_ok",
904 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
905 },
906 {
907 .str = "a_octets_received_ok",
908 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
909 },
910 {
911 .str = "a_multicast_frames_xmitted_ok",
912 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
913 },
914 {
915 .str = "a_broadcast_frames_xmitted_ok",
916 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
917 },
918 {
919 .str = "a_multicast_frames_received_ok",
920 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
921 },
922 {
923 .str = "a_broadcast_frames_received_ok",
924 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
925 },
926 {
927 .str = "a_in_range_length_errors",
928 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
929 },
930 {
931 .str = "a_out_of_range_length_field",
932 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
933 },
934 {
935 .str = "a_frame_too_long_errors",
936 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
937 },
938 {
939 .str = "a_symbol_error_during_carrier",
940 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
941 },
942 {
943 .str = "a_mac_control_frames_transmitted",
944 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
945 },
946 {
947 .str = "a_mac_control_frames_received",
948 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
949 },
950 {
951 .str = "a_unsupported_opcodes_received",
952 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
953 },
954 {
955 .str = "a_pause_mac_ctrl_frames_received",
956 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
957 },
958 {
959 .str = "a_pause_mac_ctrl_frames_xmitted",
960 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
961 },
962 };
963
964 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
965
966 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
967 {
968 .str = "rx_octets_prio",
969 .getter = mlxsw_reg_ppcnt_rx_octets_get,
970 },
971 {
972 .str = "rx_frames_prio",
973 .getter = mlxsw_reg_ppcnt_rx_frames_get,
974 },
975 {
976 .str = "tx_octets_prio",
977 .getter = mlxsw_reg_ppcnt_tx_octets_get,
978 },
979 {
980 .str = "tx_frames_prio",
981 .getter = mlxsw_reg_ppcnt_tx_frames_get,
982 },
983 {
984 .str = "rx_pause_prio",
985 .getter = mlxsw_reg_ppcnt_rx_pause_get,
986 },
987 {
988 .str = "rx_pause_duration_prio",
989 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
990 },
991 {
992 .str = "tx_pause_prio",
993 .getter = mlxsw_reg_ppcnt_tx_pause_get,
994 },
995 {
996 .str = "tx_pause_duration_prio",
997 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
998 },
999 };
1000
1001 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1002
1003 static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
1004 {
1005 u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
1006
1007 return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
1008 }
1009
1010 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1011 {
1012 .str = "tc_transmit_queue_tc",
1013 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
1014 },
1015 {
1016 .str = "tc_no_buffer_discard_uc_tc",
1017 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1018 },
1019 };
1020
1021 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1022
1023 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1024 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1025 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1026 IEEE_8021QAZ_MAX_TCS)
1027
1028 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1029 {
1030 int i;
1031
1032 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1033 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1034 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1035 *p += ETH_GSTRING_LEN;
1036 }
1037 }
1038
1039 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1040 {
1041 int i;
1042
1043 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1044 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1045 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1046 *p += ETH_GSTRING_LEN;
1047 }
1048 }
1049
1050 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1051 u32 stringset, u8 *data)
1052 {
1053 u8 *p = data;
1054 int i;
1055
1056 switch (stringset) {
1057 case ETH_SS_STATS:
1058 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1059 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1060 ETH_GSTRING_LEN);
1061 p += ETH_GSTRING_LEN;
1062 }
1063
1064 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1065 mlxsw_sp_port_get_prio_strings(&p, i);
1066
1067 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1068 mlxsw_sp_port_get_tc_strings(&p, i);
1069
1070 break;
1071 }
1072 }
1073
1074 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1075 enum ethtool_phys_id_state state)
1076 {
1077 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1078 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1079 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1080 bool active;
1081
1082 switch (state) {
1083 case ETHTOOL_ID_ACTIVE:
1084 active = true;
1085 break;
1086 case ETHTOOL_ID_INACTIVE:
1087 active = false;
1088 break;
1089 default:
1090 return -EOPNOTSUPP;
1091 }
1092
1093 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1094 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1095 }
1096
1097 static int
1098 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1099 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1100 {
1101 switch (grp) {
1102 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1103 *p_hw_stats = mlxsw_sp_port_hw_stats;
1104 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1105 break;
1106 case MLXSW_REG_PPCNT_PRIO_CNT:
1107 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1108 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1109 break;
1110 case MLXSW_REG_PPCNT_TC_CNT:
1111 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1112 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1113 break;
1114 default:
1115 WARN_ON(1);
1116 return -ENOTSUPP;
1117 }
1118 return 0;
1119 }
1120
1121 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1122 enum mlxsw_reg_ppcnt_grp grp, int prio,
1123 u64 *data, int data_index)
1124 {
1125 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1126 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1127 struct mlxsw_sp_port_hw_stats *hw_stats;
1128 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1129 int i, len;
1130 int err;
1131
1132 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1133 if (err)
1134 return;
1135 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1136 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1137 for (i = 0; i < len; i++)
1138 data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
1139 }
1140
1141 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1142 struct ethtool_stats *stats, u64 *data)
1143 {
1144 int i, data_index = 0;
1145
1146 /* IEEE 802.3 Counters */
1147 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1148 data, data_index);
1149 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1150
1151 /* Per-Priority Counters */
1152 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1153 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1154 data, data_index);
1155 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1156 }
1157
1158 /* Per-TC Counters */
1159 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1160 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1161 data, data_index);
1162 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1163 }
1164 }
1165
1166 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1167 {
1168 switch (sset) {
1169 case ETH_SS_STATS:
1170 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1171 default:
1172 return -EOPNOTSUPP;
1173 }
1174 }
1175
1176 struct mlxsw_sp_port_link_mode {
1177 u32 mask;
1178 u32 supported;
1179 u32 advertised;
1180 u32 speed;
1181 };
1182
1183 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1184 {
1185 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1186 .supported = SUPPORTED_100baseT_Full,
1187 .advertised = ADVERTISED_100baseT_Full,
1188 .speed = 100,
1189 },
1190 {
1191 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1192 .speed = 100,
1193 },
1194 {
1195 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1196 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1197 .supported = SUPPORTED_1000baseKX_Full,
1198 .advertised = ADVERTISED_1000baseKX_Full,
1199 .speed = 1000,
1200 },
1201 {
1202 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1203 .supported = SUPPORTED_10000baseT_Full,
1204 .advertised = ADVERTISED_10000baseT_Full,
1205 .speed = 10000,
1206 },
1207 {
1208 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1209 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1210 .supported = SUPPORTED_10000baseKX4_Full,
1211 .advertised = ADVERTISED_10000baseKX4_Full,
1212 .speed = 10000,
1213 },
1214 {
1215 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1216 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1217 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1218 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1219 .supported = SUPPORTED_10000baseKR_Full,
1220 .advertised = ADVERTISED_10000baseKR_Full,
1221 .speed = 10000,
1222 },
1223 {
1224 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1225 .supported = SUPPORTED_20000baseKR2_Full,
1226 .advertised = ADVERTISED_20000baseKR2_Full,
1227 .speed = 20000,
1228 },
1229 {
1230 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1231 .supported = SUPPORTED_40000baseCR4_Full,
1232 .advertised = ADVERTISED_40000baseCR4_Full,
1233 .speed = 40000,
1234 },
1235 {
1236 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1237 .supported = SUPPORTED_40000baseKR4_Full,
1238 .advertised = ADVERTISED_40000baseKR4_Full,
1239 .speed = 40000,
1240 },
1241 {
1242 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1243 .supported = SUPPORTED_40000baseSR4_Full,
1244 .advertised = ADVERTISED_40000baseSR4_Full,
1245 .speed = 40000,
1246 },
1247 {
1248 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1249 .supported = SUPPORTED_40000baseLR4_Full,
1250 .advertised = ADVERTISED_40000baseLR4_Full,
1251 .speed = 40000,
1252 },
1253 {
1254 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1255 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1256 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1257 .speed = 25000,
1258 },
1259 {
1260 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1261 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1262 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1263 .speed = 50000,
1264 },
1265 {
1266 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1267 .supported = SUPPORTED_56000baseKR4_Full,
1268 .advertised = ADVERTISED_56000baseKR4_Full,
1269 .speed = 56000,
1270 },
1271 {
1272 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1273 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1274 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1275 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1276 .speed = 100000,
1277 },
1278 };
1279
1280 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1281
1282 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1283 {
1284 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1285 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1286 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1287 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1288 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1289 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1290 return SUPPORTED_FIBRE;
1291
1292 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1293 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1294 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1295 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1296 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1297 return SUPPORTED_Backplane;
1298 return 0;
1299 }
1300
1301 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1302 {
1303 u32 modes = 0;
1304 int i;
1305
1306 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1307 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1308 modes |= mlxsw_sp_port_link_mode[i].supported;
1309 }
1310 return modes;
1311 }
1312
1313 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1314 {
1315 u32 modes = 0;
1316 int i;
1317
1318 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1319 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1320 modes |= mlxsw_sp_port_link_mode[i].advertised;
1321 }
1322 return modes;
1323 }
1324
1325 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1326 struct ethtool_cmd *cmd)
1327 {
1328 u32 speed = SPEED_UNKNOWN;
1329 u8 duplex = DUPLEX_UNKNOWN;
1330 int i;
1331
1332 if (!carrier_ok)
1333 goto out;
1334
1335 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1336 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1337 speed = mlxsw_sp_port_link_mode[i].speed;
1338 duplex = DUPLEX_FULL;
1339 break;
1340 }
1341 }
1342 out:
1343 ethtool_cmd_speed_set(cmd, speed);
1344 cmd->duplex = duplex;
1345 }
1346
1347 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1348 {
1349 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1350 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1351 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1352 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1353 return PORT_FIBRE;
1354
1355 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1356 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1357 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1358 return PORT_DA;
1359
1360 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1361 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1362 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1363 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1364 return PORT_NONE;
1365
1366 return PORT_OTHER;
1367 }
1368
1369 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1370 struct ethtool_cmd *cmd)
1371 {
1372 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1373 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1374 char ptys_pl[MLXSW_REG_PTYS_LEN];
1375 u32 eth_proto_cap;
1376 u32 eth_proto_admin;
1377 u32 eth_proto_oper;
1378 int err;
1379
1380 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1381 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1382 if (err) {
1383 netdev_err(dev, "Failed to get proto");
1384 return err;
1385 }
1386 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1387 &eth_proto_admin, &eth_proto_oper);
1388
1389 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1390 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1391 SUPPORTED_Pause | SUPPORTED_Asym_Pause |
1392 SUPPORTED_Autoneg;
1393 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1394 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1395 eth_proto_oper, cmd);
1396
1397 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1398 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1399 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1400
1401 cmd->transceiver = XCVR_INTERNAL;
1402 return 0;
1403 }
1404
1405 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1406 {
1407 u32 ptys_proto = 0;
1408 int i;
1409
1410 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1411 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1412 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1413 }
1414 return ptys_proto;
1415 }
1416
1417 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1418 {
1419 u32 ptys_proto = 0;
1420 int i;
1421
1422 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1423 if (speed == mlxsw_sp_port_link_mode[i].speed)
1424 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1425 }
1426 return ptys_proto;
1427 }
1428
1429 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1430 {
1431 u32 ptys_proto = 0;
1432 int i;
1433
1434 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1435 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1436 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1437 }
1438 return ptys_proto;
1439 }
1440
1441 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1442 struct ethtool_cmd *cmd)
1443 {
1444 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1446 char ptys_pl[MLXSW_REG_PTYS_LEN];
1447 u32 speed;
1448 u32 eth_proto_new;
1449 u32 eth_proto_cap;
1450 u32 eth_proto_admin;
1451 int err;
1452
1453 speed = ethtool_cmd_speed(cmd);
1454
1455 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1456 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1457 mlxsw_sp_to_ptys_speed(speed);
1458
1459 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1460 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1461 if (err) {
1462 netdev_err(dev, "Failed to get proto");
1463 return err;
1464 }
1465 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1466
1467 eth_proto_new = eth_proto_new & eth_proto_cap;
1468 if (!eth_proto_new) {
1469 netdev_err(dev, "Not supported proto admin requested");
1470 return -EINVAL;
1471 }
1472 if (eth_proto_new == eth_proto_admin)
1473 return 0;
1474
1475 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1476 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1477 if (err) {
1478 netdev_err(dev, "Failed to set proto admin");
1479 return err;
1480 }
1481
1482 if (!netif_running(dev))
1483 return 0;
1484
1485 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1486 if (err) {
1487 netdev_err(dev, "Failed to set admin status");
1488 return err;
1489 }
1490
1491 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1492 if (err) {
1493 netdev_err(dev, "Failed to set admin status");
1494 return err;
1495 }
1496
1497 return 0;
1498 }
1499
1500 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1501 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1502 .get_link = ethtool_op_get_link,
1503 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
1504 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
1505 .get_strings = mlxsw_sp_port_get_strings,
1506 .set_phys_id = mlxsw_sp_port_set_phys_id,
1507 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1508 .get_sset_count = mlxsw_sp_port_get_sset_count,
1509 .get_settings = mlxsw_sp_port_get_settings,
1510 .set_settings = mlxsw_sp_port_set_settings,
1511 };
1512
1513 static int
1514 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1515 {
1516 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1517 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1518 char ptys_pl[MLXSW_REG_PTYS_LEN];
1519 u32 eth_proto_admin;
1520
1521 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1522 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1523 eth_proto_admin);
1524 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1525 }
1526
1527 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1528 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1529 bool dwrr, u8 dwrr_weight)
1530 {
1531 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1532 char qeec_pl[MLXSW_REG_QEEC_LEN];
1533
1534 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1535 next_index);
1536 mlxsw_reg_qeec_de_set(qeec_pl, true);
1537 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1538 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1540 }
1541
1542 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1543 enum mlxsw_reg_qeec_hr hr, u8 index,
1544 u8 next_index, u32 maxrate)
1545 {
1546 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1547 char qeec_pl[MLXSW_REG_QEEC_LEN];
1548
1549 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1550 next_index);
1551 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1552 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1553 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1554 }
1555
1556 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1557 u8 switch_prio, u8 tclass)
1558 {
1559 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1560 char qtct_pl[MLXSW_REG_QTCT_LEN];
1561
1562 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1563 tclass);
1564 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1565 }
1566
1567 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1568 {
1569 int err, i;
1570
1571 /* Setup the elements hierarcy, so that each TC is linked to
1572 * one subgroup, which are all member in the same group.
1573 */
1574 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1575 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1576 0);
1577 if (err)
1578 return err;
1579 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1580 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1581 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1582 0, false, 0);
1583 if (err)
1584 return err;
1585 }
1586 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1587 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1588 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1589 false, 0);
1590 if (err)
1591 return err;
1592 }
1593
1594 /* Make sure the max shaper is disabled in all hierarcies that
1595 * support it.
1596 */
1597 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1598 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1599 MLXSW_REG_QEEC_MAS_DIS);
1600 if (err)
1601 return err;
1602 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1603 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1604 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1605 i, 0,
1606 MLXSW_REG_QEEC_MAS_DIS);
1607 if (err)
1608 return err;
1609 }
1610 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1611 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1612 MLXSW_REG_QEEC_HIERARCY_TC,
1613 i, i,
1614 MLXSW_REG_QEEC_MAS_DIS);
1615 if (err)
1616 return err;
1617 }
1618
1619 /* Map all priorities to traffic class 0. */
1620 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1621 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1622 if (err)
1623 return err;
1624 }
1625
1626 return 0;
1627 }
1628
1629 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1630 bool split, u8 module, u8 width, u8 lane)
1631 {
1632 struct mlxsw_sp_port *mlxsw_sp_port;
1633 struct net_device *dev;
1634 size_t bytes;
1635 int err;
1636
1637 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1638 if (!dev)
1639 return -ENOMEM;
1640 mlxsw_sp_port = netdev_priv(dev);
1641 mlxsw_sp_port->dev = dev;
1642 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1643 mlxsw_sp_port->local_port = local_port;
1644 mlxsw_sp_port->split = split;
1645 mlxsw_sp_port->mapping.module = module;
1646 mlxsw_sp_port->mapping.width = width;
1647 mlxsw_sp_port->mapping.lane = lane;
1648 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1649 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1650 if (!mlxsw_sp_port->active_vlans) {
1651 err = -ENOMEM;
1652 goto err_port_active_vlans_alloc;
1653 }
1654 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1655 if (!mlxsw_sp_port->untagged_vlans) {
1656 err = -ENOMEM;
1657 goto err_port_untagged_vlans_alloc;
1658 }
1659 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1660
1661 mlxsw_sp_port->pcpu_stats =
1662 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1663 if (!mlxsw_sp_port->pcpu_stats) {
1664 err = -ENOMEM;
1665 goto err_alloc_stats;
1666 }
1667
1668 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1669 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1670
1671 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1672 if (err) {
1673 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1674 mlxsw_sp_port->local_port);
1675 goto err_dev_addr_init;
1676 }
1677
1678 netif_carrier_off(dev);
1679
1680 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1681 NETIF_F_HW_VLAN_CTAG_FILTER;
1682
1683 /* Each packet needs to have a Tx header (metadata) on top all other
1684 * headers.
1685 */
1686 dev->hard_header_len += MLXSW_TXHDR_LEN;
1687
1688 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1689 if (err) {
1690 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1691 mlxsw_sp_port->local_port);
1692 goto err_port_system_port_mapping_set;
1693 }
1694
1695 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1696 if (err) {
1697 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1698 mlxsw_sp_port->local_port);
1699 goto err_port_swid_set;
1700 }
1701
1702 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1703 if (err) {
1704 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1705 mlxsw_sp_port->local_port);
1706 goto err_port_speed_by_width_set;
1707 }
1708
1709 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1710 if (err) {
1711 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1712 mlxsw_sp_port->local_port);
1713 goto err_port_mtu_set;
1714 }
1715
1716 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1717 if (err)
1718 goto err_port_admin_status_set;
1719
1720 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1721 if (err) {
1722 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1723 mlxsw_sp_port->local_port);
1724 goto err_port_buffers_init;
1725 }
1726
1727 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1728 if (err) {
1729 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1730 mlxsw_sp_port->local_port);
1731 goto err_port_ets_init;
1732 }
1733
1734 /* ETS and buffers must be initialized before DCB. */
1735 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1736 if (err) {
1737 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1738 mlxsw_sp_port->local_port);
1739 goto err_port_dcb_init;
1740 }
1741
1742 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1743 err = register_netdev(dev);
1744 if (err) {
1745 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1746 mlxsw_sp_port->local_port);
1747 goto err_register_netdev;
1748 }
1749
1750 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1751 mlxsw_sp_port->local_port, dev,
1752 mlxsw_sp_port->split, module);
1753 if (err) {
1754 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1755 mlxsw_sp_port->local_port);
1756 goto err_core_port_init;
1757 }
1758
1759 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1760 if (err)
1761 goto err_port_vlan_init;
1762
1763 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1764 return 0;
1765
1766 err_port_vlan_init:
1767 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1768 err_core_port_init:
1769 unregister_netdev(dev);
1770 err_register_netdev:
1771 err_port_dcb_init:
1772 err_port_ets_init:
1773 err_port_buffers_init:
1774 err_port_admin_status_set:
1775 err_port_mtu_set:
1776 err_port_speed_by_width_set:
1777 err_port_swid_set:
1778 err_port_system_port_mapping_set:
1779 err_dev_addr_init:
1780 free_percpu(mlxsw_sp_port->pcpu_stats);
1781 err_alloc_stats:
1782 kfree(mlxsw_sp_port->untagged_vlans);
1783 err_port_untagged_vlans_alloc:
1784 kfree(mlxsw_sp_port->active_vlans);
1785 err_port_active_vlans_alloc:
1786 free_netdev(dev);
1787 return err;
1788 }
1789
1790 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1791 {
1792 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1793
1794 if (!mlxsw_sp_port)
1795 return;
1796 mlxsw_sp->ports[local_port] = NULL;
1797 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1798 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1799 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1800 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1801 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1802 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1803 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1804 free_percpu(mlxsw_sp_port->pcpu_stats);
1805 kfree(mlxsw_sp_port->untagged_vlans);
1806 kfree(mlxsw_sp_port->active_vlans);
1807 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
1808 free_netdev(mlxsw_sp_port->dev);
1809 }
1810
1811 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1812 {
1813 int i;
1814
1815 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1816 mlxsw_sp_port_remove(mlxsw_sp, i);
1817 kfree(mlxsw_sp->ports);
1818 }
1819
1820 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1821 {
1822 u8 module, width, lane;
1823 size_t alloc_size;
1824 int i;
1825 int err;
1826
1827 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1828 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1829 if (!mlxsw_sp->ports)
1830 return -ENOMEM;
1831
1832 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1833 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1834 &width, &lane);
1835 if (err)
1836 goto err_port_module_info_get;
1837 if (!width)
1838 continue;
1839 mlxsw_sp->port_to_module[i] = module;
1840 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1841 lane);
1842 if (err)
1843 goto err_port_create;
1844 }
1845 return 0;
1846
1847 err_port_create:
1848 err_port_module_info_get:
1849 for (i--; i >= 1; i--)
1850 mlxsw_sp_port_remove(mlxsw_sp, i);
1851 kfree(mlxsw_sp->ports);
1852 return err;
1853 }
1854
1855 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1856 {
1857 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1858
1859 return local_port - offset;
1860 }
1861
1862 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1863 u8 module, unsigned int count)
1864 {
1865 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1866 int err, i;
1867
1868 for (i = 0; i < count; i++) {
1869 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1870 width, i * width);
1871 if (err)
1872 goto err_port_module_map;
1873 }
1874
1875 for (i = 0; i < count; i++) {
1876 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1877 if (err)
1878 goto err_port_swid_set;
1879 }
1880
1881 for (i = 0; i < count; i++) {
1882 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1883 module, width, i * width);
1884 if (err)
1885 goto err_port_create;
1886 }
1887
1888 return 0;
1889
1890 err_port_create:
1891 for (i--; i >= 0; i--)
1892 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1893 i = count;
1894 err_port_swid_set:
1895 for (i--; i >= 0; i--)
1896 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1897 MLXSW_PORT_SWID_DISABLED_PORT);
1898 i = count;
1899 err_port_module_map:
1900 for (i--; i >= 0; i--)
1901 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1902 return err;
1903 }
1904
1905 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1906 u8 base_port, unsigned int count)
1907 {
1908 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1909 int i;
1910
1911 /* Split by four means we need to re-create two ports, otherwise
1912 * only one.
1913 */
1914 count = count / 2;
1915
1916 for (i = 0; i < count; i++) {
1917 local_port = base_port + i * 2;
1918 module = mlxsw_sp->port_to_module[local_port];
1919
1920 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1921 0);
1922 }
1923
1924 for (i = 0; i < count; i++)
1925 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1926
1927 for (i = 0; i < count; i++) {
1928 local_port = base_port + i * 2;
1929 module = mlxsw_sp->port_to_module[local_port];
1930
1931 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1932 width, 0);
1933 }
1934 }
1935
1936 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1937 unsigned int count)
1938 {
1939 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1940 struct mlxsw_sp_port *mlxsw_sp_port;
1941 u8 module, cur_width, base_port;
1942 int i;
1943 int err;
1944
1945 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1946 if (!mlxsw_sp_port) {
1947 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1948 local_port);
1949 return -EINVAL;
1950 }
1951
1952 module = mlxsw_sp_port->mapping.module;
1953 cur_width = mlxsw_sp_port->mapping.width;
1954
1955 if (count != 2 && count != 4) {
1956 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1957 return -EINVAL;
1958 }
1959
1960 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1961 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1962 return -EINVAL;
1963 }
1964
1965 /* Make sure we have enough slave (even) ports for the split. */
1966 if (count == 2) {
1967 base_port = local_port;
1968 if (mlxsw_sp->ports[base_port + 1]) {
1969 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1970 return -EINVAL;
1971 }
1972 } else {
1973 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1974 if (mlxsw_sp->ports[base_port + 1] ||
1975 mlxsw_sp->ports[base_port + 3]) {
1976 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1977 return -EINVAL;
1978 }
1979 }
1980
1981 for (i = 0; i < count; i++)
1982 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1983
1984 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
1985 if (err) {
1986 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
1987 goto err_port_split_create;
1988 }
1989
1990 return 0;
1991
1992 err_port_split_create:
1993 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
1994 return err;
1995 }
1996
1997 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
1998 {
1999 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2000 struct mlxsw_sp_port *mlxsw_sp_port;
2001 u8 cur_width, base_port;
2002 unsigned int count;
2003 int i;
2004
2005 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2006 if (!mlxsw_sp_port) {
2007 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2008 local_port);
2009 return -EINVAL;
2010 }
2011
2012 if (!mlxsw_sp_port->split) {
2013 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2014 return -EINVAL;
2015 }
2016
2017 cur_width = mlxsw_sp_port->mapping.width;
2018 count = cur_width == 1 ? 4 : 2;
2019
2020 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2021
2022 /* Determine which ports to remove. */
2023 if (count == 2 && local_port >= base_port + 2)
2024 base_port = base_port + 2;
2025
2026 for (i = 0; i < count; i++)
2027 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2028
2029 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2030
2031 return 0;
2032 }
2033
2034 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2035 char *pude_pl, void *priv)
2036 {
2037 struct mlxsw_sp *mlxsw_sp = priv;
2038 struct mlxsw_sp_port *mlxsw_sp_port;
2039 enum mlxsw_reg_pude_oper_status status;
2040 u8 local_port;
2041
2042 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2043 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2044 if (!mlxsw_sp_port)
2045 return;
2046
2047 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2048 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2049 netdev_info(mlxsw_sp_port->dev, "link up\n");
2050 netif_carrier_on(mlxsw_sp_port->dev);
2051 } else {
2052 netdev_info(mlxsw_sp_port->dev, "link down\n");
2053 netif_carrier_off(mlxsw_sp_port->dev);
2054 }
2055 }
2056
2057 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2058 .func = mlxsw_sp_pude_event_func,
2059 .trap_id = MLXSW_TRAP_ID_PUDE,
2060 };
2061
2062 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2063 enum mlxsw_event_trap_id trap_id)
2064 {
2065 struct mlxsw_event_listener *el;
2066 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2067 int err;
2068
2069 switch (trap_id) {
2070 case MLXSW_TRAP_ID_PUDE:
2071 el = &mlxsw_sp_pude_event;
2072 break;
2073 }
2074 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2075 if (err)
2076 return err;
2077
2078 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2079 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2080 if (err)
2081 goto err_event_trap_set;
2082
2083 return 0;
2084
2085 err_event_trap_set:
2086 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2087 return err;
2088 }
2089
2090 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2091 enum mlxsw_event_trap_id trap_id)
2092 {
2093 struct mlxsw_event_listener *el;
2094
2095 switch (trap_id) {
2096 case MLXSW_TRAP_ID_PUDE:
2097 el = &mlxsw_sp_pude_event;
2098 break;
2099 }
2100 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2101 }
2102
2103 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2104 void *priv)
2105 {
2106 struct mlxsw_sp *mlxsw_sp = priv;
2107 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2108 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2109
2110 if (unlikely(!mlxsw_sp_port)) {
2111 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2112 local_port);
2113 return;
2114 }
2115
2116 skb->dev = mlxsw_sp_port->dev;
2117
2118 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2119 u64_stats_update_begin(&pcpu_stats->syncp);
2120 pcpu_stats->rx_packets++;
2121 pcpu_stats->rx_bytes += skb->len;
2122 u64_stats_update_end(&pcpu_stats->syncp);
2123
2124 skb->protocol = eth_type_trans(skb, skb->dev);
2125 netif_receive_skb(skb);
2126 }
2127
2128 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2129 {
2130 .func = mlxsw_sp_rx_listener_func,
2131 .local_port = MLXSW_PORT_DONT_CARE,
2132 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2133 },
2134 /* Traps for specific L2 packet types, not trapped as FDB MC */
2135 {
2136 .func = mlxsw_sp_rx_listener_func,
2137 .local_port = MLXSW_PORT_DONT_CARE,
2138 .trap_id = MLXSW_TRAP_ID_STP,
2139 },
2140 {
2141 .func = mlxsw_sp_rx_listener_func,
2142 .local_port = MLXSW_PORT_DONT_CARE,
2143 .trap_id = MLXSW_TRAP_ID_LACP,
2144 },
2145 {
2146 .func = mlxsw_sp_rx_listener_func,
2147 .local_port = MLXSW_PORT_DONT_CARE,
2148 .trap_id = MLXSW_TRAP_ID_EAPOL,
2149 },
2150 {
2151 .func = mlxsw_sp_rx_listener_func,
2152 .local_port = MLXSW_PORT_DONT_CARE,
2153 .trap_id = MLXSW_TRAP_ID_LLDP,
2154 },
2155 {
2156 .func = mlxsw_sp_rx_listener_func,
2157 .local_port = MLXSW_PORT_DONT_CARE,
2158 .trap_id = MLXSW_TRAP_ID_MMRP,
2159 },
2160 {
2161 .func = mlxsw_sp_rx_listener_func,
2162 .local_port = MLXSW_PORT_DONT_CARE,
2163 .trap_id = MLXSW_TRAP_ID_MVRP,
2164 },
2165 {
2166 .func = mlxsw_sp_rx_listener_func,
2167 .local_port = MLXSW_PORT_DONT_CARE,
2168 .trap_id = MLXSW_TRAP_ID_RPVST,
2169 },
2170 {
2171 .func = mlxsw_sp_rx_listener_func,
2172 .local_port = MLXSW_PORT_DONT_CARE,
2173 .trap_id = MLXSW_TRAP_ID_DHCP,
2174 },
2175 {
2176 .func = mlxsw_sp_rx_listener_func,
2177 .local_port = MLXSW_PORT_DONT_CARE,
2178 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2179 },
2180 {
2181 .func = mlxsw_sp_rx_listener_func,
2182 .local_port = MLXSW_PORT_DONT_CARE,
2183 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2184 },
2185 {
2186 .func = mlxsw_sp_rx_listener_func,
2187 .local_port = MLXSW_PORT_DONT_CARE,
2188 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2189 },
2190 {
2191 .func = mlxsw_sp_rx_listener_func,
2192 .local_port = MLXSW_PORT_DONT_CARE,
2193 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2194 },
2195 {
2196 .func = mlxsw_sp_rx_listener_func,
2197 .local_port = MLXSW_PORT_DONT_CARE,
2198 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2199 },
2200 {
2201 .func = mlxsw_sp_rx_listener_func,
2202 .local_port = MLXSW_PORT_DONT_CARE,
2203 .trap_id = MLXSW_TRAP_ID_ARPBC,
2204 },
2205 {
2206 .func = mlxsw_sp_rx_listener_func,
2207 .local_port = MLXSW_PORT_DONT_CARE,
2208 .trap_id = MLXSW_TRAP_ID_ARPUC,
2209 },
2210 {
2211 .func = mlxsw_sp_rx_listener_func,
2212 .local_port = MLXSW_PORT_DONT_CARE,
2213 .trap_id = MLXSW_TRAP_ID_IP2ME,
2214 },
2215 {
2216 .func = mlxsw_sp_rx_listener_func,
2217 .local_port = MLXSW_PORT_DONT_CARE,
2218 .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
2219 },
2220 {
2221 .func = mlxsw_sp_rx_listener_func,
2222 .local_port = MLXSW_PORT_DONT_CARE,
2223 .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
2224 },
2225 };
2226
2227 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2228 {
2229 char htgt_pl[MLXSW_REG_HTGT_LEN];
2230 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2231 int i;
2232 int err;
2233
2234 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2235 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2236 if (err)
2237 return err;
2238
2239 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2240 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2241 if (err)
2242 return err;
2243
2244 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2245 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2246 &mlxsw_sp_rx_listener[i],
2247 mlxsw_sp);
2248 if (err)
2249 goto err_rx_listener_register;
2250
2251 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2252 mlxsw_sp_rx_listener[i].trap_id);
2253 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2254 if (err)
2255 goto err_rx_trap_set;
2256 }
2257 return 0;
2258
2259 err_rx_trap_set:
2260 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2261 &mlxsw_sp_rx_listener[i],
2262 mlxsw_sp);
2263 err_rx_listener_register:
2264 for (i--; i >= 0; i--) {
2265 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2266 mlxsw_sp_rx_listener[i].trap_id);
2267 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2268
2269 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2270 &mlxsw_sp_rx_listener[i],
2271 mlxsw_sp);
2272 }
2273 return err;
2274 }
2275
2276 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2277 {
2278 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2279 int i;
2280
2281 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2282 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
2283 mlxsw_sp_rx_listener[i].trap_id);
2284 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2285
2286 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2287 &mlxsw_sp_rx_listener[i],
2288 mlxsw_sp);
2289 }
2290 }
2291
2292 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2293 enum mlxsw_reg_sfgc_type type,
2294 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2295 {
2296 enum mlxsw_flood_table_type table_type;
2297 enum mlxsw_sp_flood_table flood_table;
2298 char sfgc_pl[MLXSW_REG_SFGC_LEN];
2299
2300 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2301 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2302 else
2303 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2304
2305 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2306 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2307 else
2308 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2309
2310 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2311 flood_table);
2312 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2313 }
2314
2315 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2316 {
2317 int type, err;
2318
2319 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2320 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2321 continue;
2322
2323 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2324 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2325 if (err)
2326 return err;
2327
2328 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2329 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2330 if (err)
2331 return err;
2332 }
2333
2334 return 0;
2335 }
2336
2337 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2338 {
2339 char slcr_pl[MLXSW_REG_SLCR_LEN];
2340
2341 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2342 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2343 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2344 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2345 MLXSW_REG_SLCR_LAG_HASH_SIP |
2346 MLXSW_REG_SLCR_LAG_HASH_DIP |
2347 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2348 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2349 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2350 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2351 }
2352
2353 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2354 const struct mlxsw_bus_info *mlxsw_bus_info)
2355 {
2356 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2357 int err;
2358
2359 mlxsw_sp->core = mlxsw_core;
2360 mlxsw_sp->bus_info = mlxsw_bus_info;
2361 INIT_LIST_HEAD(&mlxsw_sp->fids);
2362 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
2363 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2364
2365 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2366 if (err) {
2367 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2368 return err;
2369 }
2370
2371 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2372 if (err) {
2373 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2374 return err;
2375 }
2376
2377 err = mlxsw_sp_traps_init(mlxsw_sp);
2378 if (err) {
2379 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2380 goto err_rx_listener_register;
2381 }
2382
2383 err = mlxsw_sp_flood_init(mlxsw_sp);
2384 if (err) {
2385 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2386 goto err_flood_init;
2387 }
2388
2389 err = mlxsw_sp_buffers_init(mlxsw_sp);
2390 if (err) {
2391 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2392 goto err_buffers_init;
2393 }
2394
2395 err = mlxsw_sp_lag_init(mlxsw_sp);
2396 if (err) {
2397 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2398 goto err_lag_init;
2399 }
2400
2401 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2402 if (err) {
2403 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2404 goto err_switchdev_init;
2405 }
2406
2407 err = mlxsw_sp_router_init(mlxsw_sp);
2408 if (err) {
2409 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
2410 goto err_router_init;
2411 }
2412
2413 err = mlxsw_sp_ports_create(mlxsw_sp);
2414 if (err) {
2415 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2416 goto err_ports_create;
2417 }
2418
2419 return 0;
2420
2421 err_ports_create:
2422 mlxsw_sp_router_fini(mlxsw_sp);
2423 err_router_init:
2424 mlxsw_sp_switchdev_fini(mlxsw_sp);
2425 err_switchdev_init:
2426 err_lag_init:
2427 mlxsw_sp_buffers_fini(mlxsw_sp);
2428 err_buffers_init:
2429 err_flood_init:
2430 mlxsw_sp_traps_fini(mlxsw_sp);
2431 err_rx_listener_register:
2432 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2433 return err;
2434 }
2435
2436 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2437 {
2438 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2439 int i;
2440
2441 mlxsw_sp_ports_remove(mlxsw_sp);
2442 mlxsw_sp_router_fini(mlxsw_sp);
2443 mlxsw_sp_switchdev_fini(mlxsw_sp);
2444 mlxsw_sp_buffers_fini(mlxsw_sp);
2445 mlxsw_sp_traps_fini(mlxsw_sp);
2446 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2447 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
2448 WARN_ON(!list_empty(&mlxsw_sp->fids));
2449 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2450 WARN_ON_ONCE(mlxsw_sp->rifs[i]);
2451 }
2452
2453 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2454 .used_max_vepa_channels = 1,
2455 .max_vepa_channels = 0,
2456 .used_max_lag = 1,
2457 .max_lag = MLXSW_SP_LAG_MAX,
2458 .used_max_port_per_lag = 1,
2459 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
2460 .used_max_mid = 1,
2461 .max_mid = MLXSW_SP_MID_MAX,
2462 .used_max_pgt = 1,
2463 .max_pgt = 0,
2464 .used_max_system_port = 1,
2465 .max_system_port = 64,
2466 .used_max_vlan_groups = 1,
2467 .max_vlan_groups = 127,
2468 .used_max_regions = 1,
2469 .max_regions = 400,
2470 .used_flood_tables = 1,
2471 .used_flood_mode = 1,
2472 .flood_mode = 3,
2473 .max_fid_offset_flood_tables = 2,
2474 .fid_offset_flood_table_size = VLAN_N_VID - 1,
2475 .max_fid_flood_tables = 2,
2476 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
2477 .used_max_ib_mc = 1,
2478 .max_ib_mc = 0,
2479 .used_max_pkey = 1,
2480 .max_pkey = 0,
2481 .used_kvd_sizes = 1,
2482 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
2483 .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
2484 .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
2485 .swid_config = {
2486 {
2487 .used_type = 1,
2488 .type = MLXSW_PORT_SWID_TYPE_ETH,
2489 }
2490 },
2491 };
2492
2493 static struct mlxsw_driver mlxsw_sp_driver = {
2494 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
2495 .owner = THIS_MODULE,
2496 .priv_size = sizeof(struct mlxsw_sp),
2497 .init = mlxsw_sp_init,
2498 .fini = mlxsw_sp_fini,
2499 .port_split = mlxsw_sp_port_split,
2500 .port_unsplit = mlxsw_sp_port_unsplit,
2501 .sb_pool_get = mlxsw_sp_sb_pool_get,
2502 .sb_pool_set = mlxsw_sp_sb_pool_set,
2503 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
2504 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
2505 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
2506 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
2507 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
2508 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
2509 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
2510 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
2511 .txhdr_construct = mlxsw_sp_txhdr_construct,
2512 .txhdr_len = MLXSW_TXHDR_LEN,
2513 .profile = &mlxsw_sp_config_profile,
2514 };
2515
2516 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2517 {
2518 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2519 }
2520
2521 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
2522 {
2523 struct net_device *lower_dev;
2524 struct list_head *iter;
2525
2526 if (mlxsw_sp_port_dev_check(dev))
2527 return netdev_priv(dev);
2528
2529 netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
2530 if (mlxsw_sp_port_dev_check(lower_dev))
2531 return netdev_priv(lower_dev);
2532 }
2533 return NULL;
2534 }
2535
2536 static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
2537 {
2538 struct mlxsw_sp_port *mlxsw_sp_port;
2539
2540 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2541 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
2542 }
2543
2544 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
2545 {
2546 struct net_device *lower_dev;
2547 struct list_head *iter;
2548
2549 if (mlxsw_sp_port_dev_check(dev))
2550 return netdev_priv(dev);
2551
2552 netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
2553 if (mlxsw_sp_port_dev_check(lower_dev))
2554 return netdev_priv(lower_dev);
2555 }
2556 return NULL;
2557 }
2558
2559 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
2560 {
2561 struct mlxsw_sp_port *mlxsw_sp_port;
2562
2563 rcu_read_lock();
2564 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
2565 if (mlxsw_sp_port)
2566 dev_hold(mlxsw_sp_port->dev);
2567 rcu_read_unlock();
2568 return mlxsw_sp_port;
2569 }
2570
2571 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
2572 {
2573 dev_put(mlxsw_sp_port->dev);
2574 }
2575
2576 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
2577 unsigned long event)
2578 {
2579 switch (event) {
2580 case NETDEV_UP:
2581 if (!r)
2582 return true;
2583 r->ref_count++;
2584 return false;
2585 case NETDEV_DOWN:
2586 if (r && --r->ref_count == 0)
2587 return true;
2588 /* It is possible we already removed the RIF ourselves
2589 * if it was assigned to a netdev that is now a bridge
2590 * or LAG slave.
2591 */
2592 return false;
2593 }
2594
2595 return false;
2596 }
2597
2598 static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
2599 {
2600 int i;
2601
2602 for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
2603 if (!mlxsw_sp->rifs[i])
2604 return i;
2605
2606 return MLXSW_SP_RIF_MAX;
2607 }
2608
2609 static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
2610 bool *p_lagged, u16 *p_system_port)
2611 {
2612 u8 local_port = mlxsw_sp_vport->local_port;
2613
2614 *p_lagged = mlxsw_sp_vport->lagged;
2615 *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
2616 }
2617
2618 static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
2619 struct net_device *l3_dev, u16 rif,
2620 bool create)
2621 {
2622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2623 bool lagged = mlxsw_sp_vport->lagged;
2624 char ritr_pl[MLXSW_REG_RITR_LEN];
2625 u16 system_port;
2626
2627 mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
2628 l3_dev->mtu, l3_dev->dev_addr);
2629
2630 mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
2631 mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
2632 mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
2633
2634 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2635 }
2636
2637 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
2638
2639 static struct mlxsw_sp_fid *
2640 mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
2641 {
2642 struct mlxsw_sp_fid *f;
2643
2644 f = kzalloc(sizeof(*f), GFP_KERNEL);
2645 if (!f)
2646 return NULL;
2647
2648 f->leave = mlxsw_sp_vport_rif_sp_leave;
2649 f->ref_count = 0;
2650 f->dev = l3_dev;
2651 f->fid = fid;
2652
2653 return f;
2654 }
2655
2656 static struct mlxsw_sp_rif *
2657 mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
2658 {
2659 struct mlxsw_sp_rif *r;
2660
2661 r = kzalloc(sizeof(*r), GFP_KERNEL);
2662 if (!r)
2663 return NULL;
2664
2665 ether_addr_copy(r->addr, l3_dev->dev_addr);
2666 r->mtu = l3_dev->mtu;
2667 r->ref_count = 1;
2668 r->dev = l3_dev;
2669 r->rif = rif;
2670 r->f = f;
2671
2672 return r;
2673 }
2674
2675 static struct mlxsw_sp_rif *
2676 mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
2677 struct net_device *l3_dev)
2678 {
2679 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2680 struct mlxsw_sp_fid *f;
2681 struct mlxsw_sp_rif *r;
2682 u16 fid, rif;
2683 int err;
2684
2685 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2686 if (rif == MLXSW_SP_RIF_MAX)
2687 return ERR_PTR(-ERANGE);
2688
2689 err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
2690 if (err)
2691 return ERR_PTR(err);
2692
2693 fid = mlxsw_sp_rif_sp_to_fid(rif);
2694 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
2695 if (err)
2696 goto err_rif_fdb_op;
2697
2698 f = mlxsw_sp_rfid_alloc(fid, l3_dev);
2699 if (!f) {
2700 err = -ENOMEM;
2701 goto err_rfid_alloc;
2702 }
2703
2704 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2705 if (!r) {
2706 err = -ENOMEM;
2707 goto err_rif_alloc;
2708 }
2709
2710 f->r = r;
2711 mlxsw_sp->rifs[rif] = r;
2712
2713 return r;
2714
2715 err_rif_alloc:
2716 kfree(f);
2717 err_rfid_alloc:
2718 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2719 err_rif_fdb_op:
2720 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2721 return ERR_PTR(err);
2722 }
2723
2724 static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
2725 struct mlxsw_sp_rif *r)
2726 {
2727 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2728 struct net_device *l3_dev = r->dev;
2729 struct mlxsw_sp_fid *f = r->f;
2730 u16 fid = f->fid;
2731 u16 rif = r->rif;
2732
2733 mlxsw_sp->rifs[rif] = NULL;
2734 f->r = NULL;
2735
2736 kfree(r);
2737
2738 kfree(f);
2739
2740 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
2741
2742 mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
2743 }
2744
2745 static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2746 struct net_device *l3_dev)
2747 {
2748 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2749 struct mlxsw_sp_rif *r;
2750
2751 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
2752 if (!r) {
2753 r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
2754 if (IS_ERR(r))
2755 return PTR_ERR(r);
2756 }
2757
2758 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
2759 r->f->ref_count++;
2760
2761 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
2762
2763 return 0;
2764 }
2765
2766 static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
2767 {
2768 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
2769
2770 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
2771
2772 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
2773 if (--f->ref_count == 0)
2774 mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
2775 }
2776
2777 static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
2778 struct net_device *port_dev,
2779 unsigned long event, u16 vid)
2780 {
2781 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
2782 struct mlxsw_sp_port *mlxsw_sp_vport;
2783
2784 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2785 if (WARN_ON(!mlxsw_sp_vport))
2786 return -EINVAL;
2787
2788 switch (event) {
2789 case NETDEV_UP:
2790 return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
2791 case NETDEV_DOWN:
2792 mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
2793 break;
2794 }
2795
2796 return 0;
2797 }
2798
2799 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
2800 unsigned long event)
2801 {
2802 if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
2803 return 0;
2804
2805 return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
2806 }
2807
2808 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
2809 struct net_device *lag_dev,
2810 unsigned long event, u16 vid)
2811 {
2812 struct net_device *port_dev;
2813 struct list_head *iter;
2814 int err;
2815
2816 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
2817 if (mlxsw_sp_port_dev_check(port_dev)) {
2818 err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
2819 event, vid);
2820 if (err)
2821 return err;
2822 }
2823 }
2824
2825 return 0;
2826 }
2827
2828 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
2829 unsigned long event)
2830 {
2831 if (netif_is_bridge_port(lag_dev))
2832 return 0;
2833
2834 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
2835 }
2836
2837 static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2838 struct net_device *l3_dev)
2839 {
2840 u16 fid;
2841
2842 if (is_vlan_dev(l3_dev))
2843 fid = vlan_dev_vlan_id(l3_dev);
2844 else if (mlxsw_sp->master_bridge.dev == l3_dev)
2845 fid = 1;
2846 else
2847 return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
2848
2849 return mlxsw_sp_fid_find(mlxsw_sp, fid);
2850 }
2851
2852 static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
2853 {
2854 if (mlxsw_sp_fid_is_vfid(fid))
2855 return MLXSW_REG_RITR_FID_IF;
2856 else
2857 return MLXSW_REG_RITR_VLAN_IF;
2858 }
2859
2860 static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
2861 struct net_device *l3_dev,
2862 u16 fid, u16 rif,
2863 bool create)
2864 {
2865 enum mlxsw_reg_ritr_if_type rif_type;
2866 char ritr_pl[MLXSW_REG_RITR_LEN];
2867
2868 rif_type = mlxsw_sp_rif_type_get(fid);
2869 mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
2870 l3_dev->dev_addr);
2871 mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
2872
2873 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2874 }
2875
2876 static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
2877 struct net_device *l3_dev,
2878 struct mlxsw_sp_fid *f)
2879 {
2880 struct mlxsw_sp_rif *r;
2881 u16 rif;
2882 int err;
2883
2884 rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
2885 if (rif == MLXSW_SP_RIF_MAX)
2886 return -ERANGE;
2887
2888 err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
2889 if (err)
2890 return err;
2891
2892 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
2893 if (err)
2894 goto err_rif_fdb_op;
2895
2896 r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
2897 if (!r) {
2898 err = -ENOMEM;
2899 goto err_rif_alloc;
2900 }
2901
2902 f->r = r;
2903 mlxsw_sp->rifs[rif] = r;
2904
2905 netdev_dbg(l3_dev, "RIF=%d created\n", rif);
2906
2907 return 0;
2908
2909 err_rif_alloc:
2910 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2911 err_rif_fdb_op:
2912 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2913 return err;
2914 }
2915
2916 void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
2917 struct mlxsw_sp_rif *r)
2918 {
2919 struct net_device *l3_dev = r->dev;
2920 struct mlxsw_sp_fid *f = r->f;
2921 u16 rif = r->rif;
2922
2923 mlxsw_sp->rifs[rif] = NULL;
2924 f->r = NULL;
2925
2926 kfree(r);
2927
2928 mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
2929
2930 mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
2931
2932 netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
2933 }
2934
2935 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
2936 struct net_device *br_dev,
2937 unsigned long event)
2938 {
2939 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
2940 struct mlxsw_sp_fid *f;
2941
2942 /* FID can either be an actual FID if the L3 device is the
2943 * VLAN-aware bridge or a VLAN device on top. Otherwise, the
2944 * L3 device is a VLAN-unaware bridge and we get a vFID.
2945 */
2946 f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
2947 if (WARN_ON(!f))
2948 return -EINVAL;
2949
2950 switch (event) {
2951 case NETDEV_UP:
2952 return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
2953 case NETDEV_DOWN:
2954 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
2955 break;
2956 }
2957
2958 return 0;
2959 }
2960
2961 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
2962 unsigned long event)
2963 {
2964 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2965 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
2966 u16 vid = vlan_dev_vlan_id(vlan_dev);
2967
2968 if (mlxsw_sp_port_dev_check(real_dev))
2969 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
2970 vid);
2971 else if (netif_is_lag_master(real_dev))
2972 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
2973 vid);
2974 else if (netif_is_bridge_master(real_dev) &&
2975 mlxsw_sp->master_bridge.dev == real_dev)
2976 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
2977 event);
2978
2979 return 0;
2980 }
2981
2982 static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
2983 unsigned long event, void *ptr)
2984 {
2985 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
2986 struct net_device *dev = ifa->ifa_dev->dev;
2987 struct mlxsw_sp *mlxsw_sp;
2988 struct mlxsw_sp_rif *r;
2989 int err = 0;
2990
2991 mlxsw_sp = mlxsw_sp_lower_get(dev);
2992 if (!mlxsw_sp)
2993 goto out;
2994
2995 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
2996 if (!mlxsw_sp_rif_should_config(r, event))
2997 goto out;
2998
2999 if (mlxsw_sp_port_dev_check(dev))
3000 err = mlxsw_sp_inetaddr_port_event(dev, event);
3001 else if (netif_is_lag_master(dev))
3002 err = mlxsw_sp_inetaddr_lag_event(dev, event);
3003 else if (netif_is_bridge_master(dev))
3004 err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
3005 else if (is_vlan_dev(dev))
3006 err = mlxsw_sp_inetaddr_vlan_event(dev, event);
3007
3008 out:
3009 return notifier_from_errno(err);
3010 }
3011
3012 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
3013 const char *mac, int mtu)
3014 {
3015 char ritr_pl[MLXSW_REG_RITR_LEN];
3016 int err;
3017
3018 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3019 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3020 if (err)
3021 return err;
3022
3023 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3024 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3025 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3026 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3027 }
3028
3029 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3030 {
3031 struct mlxsw_sp *mlxsw_sp;
3032 struct mlxsw_sp_rif *r;
3033 int err;
3034
3035 mlxsw_sp = mlxsw_sp_lower_get(dev);
3036 if (!mlxsw_sp)
3037 return 0;
3038
3039 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3040 if (!r)
3041 return 0;
3042
3043 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
3044 if (err)
3045 return err;
3046
3047 err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
3048 if (err)
3049 goto err_rif_edit;
3050
3051 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
3052 if (err)
3053 goto err_rif_fdb_op;
3054
3055 ether_addr_copy(r->addr, dev->dev_addr);
3056 r->mtu = dev->mtu;
3057
3058 netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
3059
3060 return 0;
3061
3062 err_rif_fdb_op:
3063 mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
3064 err_rif_edit:
3065 mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
3066 return err;
3067 }
3068
3069 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3070 u16 fid)
3071 {
3072 if (mlxsw_sp_fid_is_vfid(fid))
3073 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3074 else
3075 return test_bit(fid, lag_port->active_vlans);
3076 }
3077
3078 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3079 u16 fid)
3080 {
3081 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3082 u8 local_port = mlxsw_sp_port->local_port;
3083 u16 lag_id = mlxsw_sp_port->lag_id;
3084 int i, count = 0;
3085
3086 if (!mlxsw_sp_port->lagged)
3087 return true;
3088
3089 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3090 struct mlxsw_sp_port *lag_port;
3091
3092 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3093 if (!lag_port || lag_port->local_port == local_port)
3094 continue;
3095 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3096 count++;
3097 }
3098
3099 return !count;
3100 }
3101
3102 static int
3103 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3104 u16 fid)
3105 {
3106 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3107 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3108
3109 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3110 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3111 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3112 mlxsw_sp_port->local_port);
3113
3114 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3115 mlxsw_sp_port->local_port, fid);
3116
3117 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3118 }
3119
3120 static int
3121 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3122 u16 fid)
3123 {
3124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3125 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3126
3127 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3128 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3129 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3130
3131 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3132 mlxsw_sp_port->lag_id, fid);
3133
3134 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3135 }
3136
3137 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3138 {
3139 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3140 return 0;
3141
3142 if (mlxsw_sp_port->lagged)
3143 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3144 fid);
3145 else
3146 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3147 }
3148
3149 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3150 {
3151 struct mlxsw_sp_fid *f, *tmp;
3152
3153 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3154 if (--f->ref_count == 0)
3155 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3156 else
3157 WARN_ON_ONCE(1);
3158 }
3159
3160 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3161 struct net_device *br_dev)
3162 {
3163 return !mlxsw_sp->master_bridge.dev ||
3164 mlxsw_sp->master_bridge.dev == br_dev;
3165 }
3166
3167 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3168 struct net_device *br_dev)
3169 {
3170 mlxsw_sp->master_bridge.dev = br_dev;
3171 mlxsw_sp->master_bridge.ref_count++;
3172 }
3173
3174 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3175 {
3176 if (--mlxsw_sp->master_bridge.ref_count == 0) {
3177 mlxsw_sp->master_bridge.dev = NULL;
3178 /* It's possible upper VLAN devices are still holding
3179 * references to underlying FIDs. Drop the reference
3180 * and release the resources if it was the last one.
3181 * If it wasn't, then something bad happened.
3182 */
3183 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3184 }
3185 }
3186
3187 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3188 struct net_device *br_dev)
3189 {
3190 struct net_device *dev = mlxsw_sp_port->dev;
3191 int err;
3192
3193 /* When port is not bridged untagged packets are tagged with
3194 * PVID=VID=1, thereby creating an implicit VLAN interface in
3195 * the device. Remove it and let bridge code take care of its
3196 * own VLANs.
3197 */
3198 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3199 if (err)
3200 return err;
3201
3202 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3203
3204 mlxsw_sp_port->learning = 1;
3205 mlxsw_sp_port->learning_sync = 1;
3206 mlxsw_sp_port->uc_flood = 1;
3207 mlxsw_sp_port->bridged = 1;
3208
3209 return 0;
3210 }
3211
3212 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3213 {
3214 struct net_device *dev = mlxsw_sp_port->dev;
3215
3216 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3217
3218 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3219
3220 mlxsw_sp_port->learning = 0;
3221 mlxsw_sp_port->learning_sync = 0;
3222 mlxsw_sp_port->uc_flood = 0;
3223 mlxsw_sp_port->bridged = 0;
3224
3225 /* Add implicit VLAN interface in the device, so that untagged
3226 * packets will be classified to the default vFID.
3227 */
3228 mlxsw_sp_port_add_vid(dev, 0, 1);
3229 }
3230
3231 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3232 {
3233 char sldr_pl[MLXSW_REG_SLDR_LEN];
3234
3235 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3237 }
3238
3239 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3240 {
3241 char sldr_pl[MLXSW_REG_SLDR_LEN];
3242
3243 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3244 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3245 }
3246
3247 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3248 u16 lag_id, u8 port_index)
3249 {
3250 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3251 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3252
3253 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3254 lag_id, port_index);
3255 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3256 }
3257
3258 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3259 u16 lag_id)
3260 {
3261 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3262 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3263
3264 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3265 lag_id);
3266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3267 }
3268
3269 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3270 u16 lag_id)
3271 {
3272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3273 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3274
3275 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3276 lag_id);
3277 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3278 }
3279
3280 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3281 u16 lag_id)
3282 {
3283 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3284 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3285
3286 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3287 lag_id);
3288 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3289 }
3290
3291 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3292 struct net_device *lag_dev,
3293 u16 *p_lag_id)
3294 {
3295 struct mlxsw_sp_upper *lag;
3296 int free_lag_id = -1;
3297 int i;
3298
3299 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
3300 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3301 if (lag->ref_count) {
3302 if (lag->dev == lag_dev) {
3303 *p_lag_id = i;
3304 return 0;
3305 }
3306 } else if (free_lag_id < 0) {
3307 free_lag_id = i;
3308 }
3309 }
3310 if (free_lag_id < 0)
3311 return -EBUSY;
3312 *p_lag_id = free_lag_id;
3313 return 0;
3314 }
3315
3316 static bool
3317 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3318 struct net_device *lag_dev,
3319 struct netdev_lag_upper_info *lag_upper_info)
3320 {
3321 u16 lag_id;
3322
3323 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3324 return false;
3325 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3326 return false;
3327 return true;
3328 }
3329
3330 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3331 u16 lag_id, u8 *p_port_index)
3332 {
3333 int i;
3334
3335 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
3336 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3337 *p_port_index = i;
3338 return 0;
3339 }
3340 }
3341 return -EBUSY;
3342 }
3343
3344 static void
3345 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3346 u16 lag_id)
3347 {
3348 struct mlxsw_sp_port *mlxsw_sp_vport;
3349 struct mlxsw_sp_fid *f;
3350
3351 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3352 if (WARN_ON(!mlxsw_sp_vport))
3353 return;
3354
3355 /* If vPort is assigned a RIF, then leave it since it's no
3356 * longer valid.
3357 */
3358 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3359 if (f)
3360 f->leave(mlxsw_sp_vport);
3361
3362 mlxsw_sp_vport->lag_id = lag_id;
3363 mlxsw_sp_vport->lagged = 1;
3364 }
3365
3366 static void
3367 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3368 {
3369 struct mlxsw_sp_port *mlxsw_sp_vport;
3370 struct mlxsw_sp_fid *f;
3371
3372 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3373 if (WARN_ON(!mlxsw_sp_vport))
3374 return;
3375
3376 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3377 if (f)
3378 f->leave(mlxsw_sp_vport);
3379
3380 mlxsw_sp_vport->lagged = 0;
3381 }
3382
3383 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3384 struct net_device *lag_dev)
3385 {
3386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3387 struct mlxsw_sp_upper *lag;
3388 u16 lag_id;
3389 u8 port_index;
3390 int err;
3391
3392 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3393 if (err)
3394 return err;
3395 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3396 if (!lag->ref_count) {
3397 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3398 if (err)
3399 return err;
3400 lag->dev = lag_dev;
3401 }
3402
3403 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3404 if (err)
3405 return err;
3406 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3407 if (err)
3408 goto err_col_port_add;
3409 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3410 if (err)
3411 goto err_col_port_enable;
3412
3413 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3414 mlxsw_sp_port->local_port);
3415 mlxsw_sp_port->lag_id = lag_id;
3416 mlxsw_sp_port->lagged = 1;
3417 lag->ref_count++;
3418
3419 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
3420
3421 return 0;
3422
3423 err_col_port_enable:
3424 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3425 err_col_port_add:
3426 if (!lag->ref_count)
3427 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3428 return err;
3429 }
3430
3431 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3432 struct net_device *lag_dev)
3433 {
3434 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3435 u16 lag_id = mlxsw_sp_port->lag_id;
3436 struct mlxsw_sp_upper *lag;
3437
3438 if (!mlxsw_sp_port->lagged)
3439 return;
3440 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3441 WARN_ON(lag->ref_count == 0);
3442
3443 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3444 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3445
3446 if (mlxsw_sp_port->bridged) {
3447 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3448 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3449 }
3450
3451 if (lag->ref_count == 1)
3452 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3453
3454 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3455 mlxsw_sp_port->local_port);
3456 mlxsw_sp_port->lagged = 0;
3457 lag->ref_count--;
3458
3459 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3460 }
3461
3462 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3463 u16 lag_id)
3464 {
3465 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3466 char sldr_pl[MLXSW_REG_SLDR_LEN];
3467
3468 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3469 mlxsw_sp_port->local_port);
3470 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3471 }
3472
3473 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3474 u16 lag_id)
3475 {
3476 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3477 char sldr_pl[MLXSW_REG_SLDR_LEN];
3478
3479 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3480 mlxsw_sp_port->local_port);
3481 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3482 }
3483
3484 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3485 bool lag_tx_enabled)
3486 {
3487 if (lag_tx_enabled)
3488 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3489 mlxsw_sp_port->lag_id);
3490 else
3491 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3492 mlxsw_sp_port->lag_id);
3493 }
3494
3495 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3496 struct netdev_lag_lower_state_info *info)
3497 {
3498 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3499 }
3500
3501 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3502 struct net_device *vlan_dev)
3503 {
3504 struct mlxsw_sp_port *mlxsw_sp_vport;
3505 u16 vid = vlan_dev_vlan_id(vlan_dev);
3506
3507 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3508 if (WARN_ON(!mlxsw_sp_vport))
3509 return -EINVAL;
3510
3511 mlxsw_sp_vport->dev = vlan_dev;
3512
3513 return 0;
3514 }
3515
3516 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3517 struct net_device *vlan_dev)
3518 {
3519 struct mlxsw_sp_port *mlxsw_sp_vport;
3520 u16 vid = vlan_dev_vlan_id(vlan_dev);
3521
3522 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3523 if (WARN_ON(!mlxsw_sp_vport))
3524 return;
3525
3526 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3527 }
3528
3529 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3530 unsigned long event, void *ptr)
3531 {
3532 struct netdev_notifier_changeupper_info *info;
3533 struct mlxsw_sp_port *mlxsw_sp_port;
3534 struct net_device *upper_dev;
3535 struct mlxsw_sp *mlxsw_sp;
3536 int err = 0;
3537
3538 mlxsw_sp_port = netdev_priv(dev);
3539 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3540 info = ptr;
3541
3542 switch (event) {
3543 case NETDEV_PRECHANGEUPPER:
3544 upper_dev = info->upper_dev;
3545 if (!is_vlan_dev(upper_dev) &&
3546 !netif_is_lag_master(upper_dev) &&
3547 !netif_is_bridge_master(upper_dev))
3548 return -EINVAL;
3549 if (!info->linking)
3550 break;
3551 /* HW limitation forbids to put ports to multiple bridges. */
3552 if (netif_is_bridge_master(upper_dev) &&
3553 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3554 return -EINVAL;
3555 if (netif_is_lag_master(upper_dev) &&
3556 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3557 info->upper_info))
3558 return -EINVAL;
3559 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3560 return -EINVAL;
3561 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3562 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3563 return -EINVAL;
3564 break;
3565 case NETDEV_CHANGEUPPER:
3566 upper_dev = info->upper_dev;
3567 if (is_vlan_dev(upper_dev)) {
3568 if (info->linking)
3569 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3570 upper_dev);
3571 else
3572 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3573 upper_dev);
3574 } else if (netif_is_bridge_master(upper_dev)) {
3575 if (info->linking)
3576 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3577 upper_dev);
3578 else
3579 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3580 } else if (netif_is_lag_master(upper_dev)) {
3581 if (info->linking)
3582 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3583 upper_dev);
3584 else
3585 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3586 upper_dev);
3587 } else {
3588 err = -EINVAL;
3589 WARN_ON(1);
3590 }
3591 break;
3592 }
3593
3594 return err;
3595 }
3596
3597 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3598 unsigned long event, void *ptr)
3599 {
3600 struct netdev_notifier_changelowerstate_info *info;
3601 struct mlxsw_sp_port *mlxsw_sp_port;
3602 int err;
3603
3604 mlxsw_sp_port = netdev_priv(dev);
3605 info = ptr;
3606
3607 switch (event) {
3608 case NETDEV_CHANGELOWERSTATE:
3609 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3610 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3611 info->lower_state_info);
3612 if (err)
3613 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3614 }
3615 break;
3616 }
3617
3618 return 0;
3619 }
3620
3621 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3622 unsigned long event, void *ptr)
3623 {
3624 switch (event) {
3625 case NETDEV_PRECHANGEUPPER:
3626 case NETDEV_CHANGEUPPER:
3627 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3628 case NETDEV_CHANGELOWERSTATE:
3629 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3630 }
3631
3632 return 0;
3633 }
3634
3635 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3636 unsigned long event, void *ptr)
3637 {
3638 struct net_device *dev;
3639 struct list_head *iter;
3640 int ret;
3641
3642 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3643 if (mlxsw_sp_port_dev_check(dev)) {
3644 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3645 if (ret)
3646 return ret;
3647 }
3648 }
3649
3650 return 0;
3651 }
3652
3653 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
3654 struct net_device *vlan_dev)
3655 {
3656 u16 fid = vlan_dev_vlan_id(vlan_dev);
3657 struct mlxsw_sp_fid *f;
3658
3659 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3660 if (!f) {
3661 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
3662 if (IS_ERR(f))
3663 return PTR_ERR(f);
3664 }
3665
3666 f->ref_count++;
3667
3668 return 0;
3669 }
3670
3671 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
3672 struct net_device *vlan_dev)
3673 {
3674 u16 fid = vlan_dev_vlan_id(vlan_dev);
3675 struct mlxsw_sp_fid *f;
3676
3677 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
3678 if (f && f->r)
3679 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3680 if (f && --f->ref_count == 0)
3681 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3682 }
3683
3684 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
3685 unsigned long event, void *ptr)
3686 {
3687 struct netdev_notifier_changeupper_info *info;
3688 struct net_device *upper_dev;
3689 struct mlxsw_sp *mlxsw_sp;
3690 int err;
3691
3692 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3693 if (!mlxsw_sp)
3694 return 0;
3695 if (br_dev != mlxsw_sp->master_bridge.dev)
3696 return 0;
3697
3698 info = ptr;
3699
3700 switch (event) {
3701 case NETDEV_CHANGEUPPER:
3702 upper_dev = info->upper_dev;
3703 if (!is_vlan_dev(upper_dev))
3704 break;
3705 if (info->linking) {
3706 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
3707 upper_dev);
3708 if (err)
3709 return err;
3710 } else {
3711 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
3712 }
3713 break;
3714 }
3715
3716 return 0;
3717 }
3718
3719 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3720 {
3721 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
3722 MLXSW_SP_VFID_MAX);
3723 }
3724
3725 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
3726 {
3727 char sfmr_pl[MLXSW_REG_SFMR_LEN];
3728
3729 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
3730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
3731 }
3732
3733 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
3734
3735 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
3736 struct net_device *br_dev)
3737 {
3738 struct device *dev = mlxsw_sp->bus_info->dev;
3739 struct mlxsw_sp_fid *f;
3740 u16 vfid, fid;
3741 int err;
3742
3743 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
3744 if (vfid == MLXSW_SP_VFID_MAX) {
3745 dev_err(dev, "No available vFIDs\n");
3746 return ERR_PTR(-ERANGE);
3747 }
3748
3749 fid = mlxsw_sp_vfid_to_fid(vfid);
3750 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3751 if (err) {
3752 dev_err(dev, "Failed to create FID=%d\n", fid);
3753 return ERR_PTR(err);
3754 }
3755
3756 f = kzalloc(sizeof(*f), GFP_KERNEL);
3757 if (!f)
3758 goto err_allocate_vfid;
3759
3760 f->leave = mlxsw_sp_vport_vfid_leave;
3761 f->fid = fid;
3762 f->dev = br_dev;
3763
3764 list_add(&f->list, &mlxsw_sp->vfids.list);
3765 set_bit(vfid, mlxsw_sp->vfids.mapped);
3766
3767 return f;
3768
3769 err_allocate_vfid:
3770 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3771 return ERR_PTR(-ENOMEM);
3772 }
3773
3774 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3775 struct mlxsw_sp_fid *f)
3776 {
3777 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
3778 u16 fid = f->fid;
3779
3780 clear_bit(vfid, mlxsw_sp->vfids.mapped);
3781 list_del(&f->list);
3782
3783 if (f->r)
3784 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
3785
3786 kfree(f);
3787
3788 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3789 }
3790
3791 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
3792 bool valid)
3793 {
3794 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
3795 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3796
3797 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
3798 vid);
3799 }
3800
3801 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3802 struct net_device *br_dev)
3803 {
3804 struct mlxsw_sp_fid *f;
3805 int err;
3806
3807 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
3808 if (!f) {
3809 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
3810 if (IS_ERR(f))
3811 return PTR_ERR(f);
3812 }
3813
3814 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
3815 if (err)
3816 goto err_vport_flood_set;
3817
3818 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
3819 if (err)
3820 goto err_vport_fid_map;
3821
3822 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
3823 f->ref_count++;
3824
3825 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
3826
3827 return 0;
3828
3829 err_vport_fid_map:
3830 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3831 err_vport_flood_set:
3832 if (!f->ref_count)
3833 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3834 return err;
3835 }
3836
3837 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3838 {
3839 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3840
3841 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
3842
3843 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
3844
3845 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
3846
3847 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
3848
3849 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
3850 if (--f->ref_count == 0)
3851 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
3852 }
3853
3854 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3855 struct net_device *br_dev)
3856 {
3857 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3858 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3859 struct net_device *dev = mlxsw_sp_vport->dev;
3860 int err;
3861
3862 if (f && !WARN_ON(!f->leave))
3863 f->leave(mlxsw_sp_vport);
3864
3865 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
3866 if (err) {
3867 netdev_err(dev, "Failed to join vFID\n");
3868 return err;
3869 }
3870
3871 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3872 if (err) {
3873 netdev_err(dev, "Failed to enable learning\n");
3874 goto err_port_vid_learning_set;
3875 }
3876
3877 mlxsw_sp_vport->learning = 1;
3878 mlxsw_sp_vport->learning_sync = 1;
3879 mlxsw_sp_vport->uc_flood = 1;
3880 mlxsw_sp_vport->bridged = 1;
3881
3882 return 0;
3883
3884 err_port_vid_learning_set:
3885 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3886 return err;
3887 }
3888
3889 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
3890 {
3891 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3892
3893 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3894
3895 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
3896
3897 mlxsw_sp_vport->learning = 0;
3898 mlxsw_sp_vport->learning_sync = 0;
3899 mlxsw_sp_vport->uc_flood = 0;
3900 mlxsw_sp_vport->bridged = 0;
3901 }
3902
3903 static bool
3904 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3905 const struct net_device *br_dev)
3906 {
3907 struct mlxsw_sp_port *mlxsw_sp_vport;
3908
3909 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3910 vport.list) {
3911 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
3912
3913 if (dev && dev == br_dev)
3914 return false;
3915 }
3916
3917 return true;
3918 }
3919
3920 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3921 unsigned long event, void *ptr,
3922 u16 vid)
3923 {
3924 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3925 struct netdev_notifier_changeupper_info *info = ptr;
3926 struct mlxsw_sp_port *mlxsw_sp_vport;
3927 struct net_device *upper_dev;
3928 int err = 0;
3929
3930 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3931
3932 switch (event) {
3933 case NETDEV_PRECHANGEUPPER:
3934 upper_dev = info->upper_dev;
3935 if (!netif_is_bridge_master(upper_dev))
3936 return -EINVAL;
3937 if (!info->linking)
3938 break;
3939 /* We can't have multiple VLAN interfaces configured on
3940 * the same port and being members in the same bridge.
3941 */
3942 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3943 upper_dev))
3944 return -EINVAL;
3945 break;
3946 case NETDEV_CHANGEUPPER:
3947 upper_dev = info->upper_dev;
3948 if (info->linking) {
3949 if (WARN_ON(!mlxsw_sp_vport))
3950 return -EINVAL;
3951 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3952 upper_dev);
3953 } else {
3954 if (!mlxsw_sp_vport)
3955 return 0;
3956 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
3957 }
3958 }
3959
3960 return err;
3961 }
3962
3963 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3964 unsigned long event, void *ptr,
3965 u16 vid)
3966 {
3967 struct net_device *dev;
3968 struct list_head *iter;
3969 int ret;
3970
3971 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3972 if (mlxsw_sp_port_dev_check(dev)) {
3973 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3974 vid);
3975 if (ret)
3976 return ret;
3977 }
3978 }
3979
3980 return 0;
3981 }
3982
3983 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3984 unsigned long event, void *ptr)
3985 {
3986 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3987 u16 vid = vlan_dev_vlan_id(vlan_dev);
3988
3989 if (mlxsw_sp_port_dev_check(real_dev))
3990 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3991 vid);
3992 else if (netif_is_lag_master(real_dev))
3993 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3994 vid);
3995
3996 return 0;
3997 }
3998
3999 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4000 unsigned long event, void *ptr)
4001 {
4002 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4003 int err = 0;
4004
4005 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4006 err = mlxsw_sp_netdevice_router_port_event(dev);
4007 else if (mlxsw_sp_port_dev_check(dev))
4008 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4009 else if (netif_is_lag_master(dev))
4010 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4011 else if (netif_is_bridge_master(dev))
4012 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4013 else if (is_vlan_dev(dev))
4014 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4015
4016 return notifier_from_errno(err);
4017 }
4018
4019 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4020 .notifier_call = mlxsw_sp_netdevice_event,
4021 };
4022
4023 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4024 .notifier_call = mlxsw_sp_inetaddr_event,
4025 .priority = 10, /* Must be called before FIB notifier block */
4026 };
4027
4028 static int __init mlxsw_sp_module_init(void)
4029 {
4030 int err;
4031
4032 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4033 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4034 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4035 if (err)
4036 goto err_core_driver_register;
4037 return 0;
4038
4039 err_core_driver_register:
4040 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4041 return err;
4042 }
4043
4044 static void __exit mlxsw_sp_module_exit(void)
4045 {
4046 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4047 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4048 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4049 }
4050
4051 module_init(mlxsw_sp_module_init);
4052 module_exit(mlxsw_sp_module_exit);
4053
4054 MODULE_LICENSE("Dual BSD/GPL");
4055 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4056 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4057 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);