2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16
mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
58 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
);
61 fid
= f
? f
->fid
: fid
;
64 fid
= mlxsw_sp_port
->pvid
;
69 static struct mlxsw_sp_port
*
70 mlxsw_sp_port_orig_get(struct net_device
*dev
,
71 struct mlxsw_sp_port
*mlxsw_sp_port
)
73 struct mlxsw_sp_port
*mlxsw_sp_vport
;
76 if (!is_vlan_dev(dev
))
79 vid
= vlan_dev_vlan_id(dev
);
80 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
81 WARN_ON(!mlxsw_sp_vport
);
83 return mlxsw_sp_vport
;
86 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
87 struct switchdev_attr
*attr
)
89 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
90 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
92 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
98 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
99 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
100 attr
->u
.ppid
.id_len
);
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
103 attr
->u
.brport_flags
=
104 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
105 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
106 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
118 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
119 enum mlxsw_reg_spms_state spms_state
;
125 case BR_STATE_FORWARDING
:
126 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
128 case BR_STATE_LEARNING
:
129 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
131 case BR_STATE_LISTENING
: /* fall-through */
132 case BR_STATE_DISABLED
: /* fall-through */
133 case BR_STATE_BLOCKING
:
134 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
140 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
143 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
146 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
147 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
149 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
150 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
153 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
159 struct switchdev_trans
*trans
,
162 if (switchdev_trans_ph_prepare(trans
))
165 mlxsw_sp_port
->stp_state
= state
;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
170 u16 idx_begin
, u16 idx_end
, bool uc_set
,
173 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
174 u16 local_port
= mlxsw_sp_port
->local_port
;
175 enum mlxsw_flood_table_type table_type
;
176 u16 range
= idx_end
- idx_begin
+ 1;
180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
181 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
183 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
185 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
189 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
190 table_type
, range
, local_port
, uc_set
);
191 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
195 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
196 table_type
, range
, local_port
, bm_set
);
197 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
199 goto err_flood_bm_set
;
204 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
205 table_type
, range
, local_port
, !uc_set
);
206 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
212 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
215 struct net_device
*dev
= mlxsw_sp_port
->dev
;
216 u16 vid
, last_visited_vid
;
219 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
220 u16 fid
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
)->fid
;
221 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
223 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
227 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
228 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
231 last_visited_vid
= vid
;
232 goto err_port_flood_set
;
239 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
240 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
241 netdev_err(dev
, "Failed to configure unicast flooding\n");
245 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
250 /* In case of vFIDs, index into the flooding table is relative to
251 * the start of the vFIDs range.
253 vfid
= mlxsw_sp_fid_to_vfid(fid
);
254 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
, set
);
257 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
258 struct switchdev_trans
*trans
,
259 unsigned long brport_flags
)
261 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
265 if (!mlxsw_sp_port
->bridged
)
268 if (switchdev_trans_ph_prepare(trans
))
271 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
272 set
= mlxsw_sp_port
->uc_flood
? false : true;
273 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
278 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
279 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
280 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
285 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
287 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
290 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
291 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
294 mlxsw_sp
->ageing_time
= ageing_time
;
298 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
299 struct switchdev_trans
*trans
,
300 unsigned long ageing_clock_t
)
302 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
303 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
304 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
306 if (switchdev_trans_ph_prepare(trans
)) {
307 if (ageing_time
< MLXSW_SP_MIN_AGEING_TIME
||
308 ageing_time
> MLXSW_SP_MAX_AGEING_TIME
)
314 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
317 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
318 struct switchdev_trans
*trans
,
319 struct net_device
*orig_dev
,
322 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
324 /* SWITCHDEV_TRANS_PREPARE phase */
325 if ((!vlan_enabled
) && (mlxsw_sp
->master_bridge
.dev
== orig_dev
)) {
326 netdev_err(mlxsw_sp_port
->dev
, "Bridge must be vlan-aware\n");
333 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
334 const struct switchdev_attr
*attr
,
335 struct switchdev_trans
*trans
)
337 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
340 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
345 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
346 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
349 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
350 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
351 attr
->u
.brport_flags
);
353 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
354 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
355 attr
->u
.ageing_time
);
357 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
358 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
360 attr
->u
.vlan_filtering
);
370 static int mlxsw_sp_fid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
372 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
374 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, fid
);
375 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
378 static int mlxsw_sp_fid_map(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool valid
)
380 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
381 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
383 mlxsw_reg_svfa_pack(svfa_pl
, 0, mt
, valid
, fid
, fid
);
384 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
387 static struct mlxsw_sp_fid
*mlxsw_sp_fid_alloc(u16 fid
)
389 struct mlxsw_sp_fid
*f
;
391 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
400 struct mlxsw_sp_fid
*mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
402 struct mlxsw_sp_fid
*f
;
405 err
= mlxsw_sp_fid_op(mlxsw_sp
, fid
, true);
409 /* Although all the ports member in the FID might be using a
410 * {Port, VID} to FID mapping, we create a global VID-to-FID
411 * mapping. This allows a port to transition to VLAN mode,
412 * knowing the global mapping exists.
414 err
= mlxsw_sp_fid_map(mlxsw_sp
, fid
, true);
418 f
= mlxsw_sp_fid_alloc(fid
);
421 goto err_allocate_fid
;
424 list_add(&f
->list
, &mlxsw_sp
->fids
);
429 mlxsw_sp_fid_map(mlxsw_sp
, fid
, false);
431 mlxsw_sp_fid_op(mlxsw_sp
, fid
, false);
435 void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_fid
*f
)
442 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
446 mlxsw_sp_fid_map(mlxsw_sp
, fid
, false);
448 mlxsw_sp_fid_op(mlxsw_sp
, fid
, false);
451 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
454 struct mlxsw_sp_fid
*f
;
456 if (test_bit(fid
, mlxsw_sp_port
->active_vlans
))
459 f
= mlxsw_sp_fid_find(mlxsw_sp_port
->mlxsw_sp
, fid
);
461 f
= mlxsw_sp_fid_create(mlxsw_sp_port
->mlxsw_sp
, fid
);
468 netdev_dbg(mlxsw_sp_port
->dev
, "Joined FID=%d\n", fid
);
473 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
476 struct mlxsw_sp_fid
*f
;
478 f
= mlxsw_sp_fid_find(mlxsw_sp_port
->mlxsw_sp
, fid
);
482 netdev_dbg(mlxsw_sp_port
->dev
, "Left FID=%d\n", fid
);
484 mlxsw_sp_port_fdb_flush(mlxsw_sp_port
, fid
);
486 if (--f
->ref_count
== 0)
487 mlxsw_sp_fid_destroy(mlxsw_sp_port
->mlxsw_sp
, f
);
490 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
,
493 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
495 /* If port doesn't have vPorts, then it can use the global
496 * VID-to-FID mapping.
498 if (list_empty(&mlxsw_sp_port
->vports_list
))
501 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, valid
, fid
, fid
);
504 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
505 u16 fid_begin
, u16 fid_end
)
509 for (fid
= fid_begin
; fid
<= fid_end
; fid
++) {
510 err
= __mlxsw_sp_port_fid_join(mlxsw_sp_port
, fid
);
512 goto err_port_fid_join
;
515 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
,
516 mlxsw_sp_port
->uc_flood
, true);
518 goto err_port_flood_set
;
520 for (fid
= fid_begin
; fid
<= fid_end
; fid
++) {
521 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, true);
523 goto err_port_fid_map
;
529 for (fid
--; fid
>= fid_begin
; fid
--)
530 mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, false);
531 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
, false,
536 for (fid
--; fid
>= fid_begin
; fid
--)
537 __mlxsw_sp_port_fid_leave(mlxsw_sp_port
, fid
);
541 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
542 u16 fid_begin
, u16 fid_end
)
546 for (fid
= fid_begin
; fid
<= fid_end
; fid
++)
547 mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, false);
549 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
, false,
552 for (fid
= fid_begin
; fid
<= fid_end
; fid
++)
553 __mlxsw_sp_port_fid_leave(mlxsw_sp_port
, fid
);
556 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
559 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
560 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
562 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
563 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
566 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
569 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
570 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
572 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
573 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
576 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
578 struct net_device
*dev
= mlxsw_sp_port
->dev
;
582 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
584 netdev_err(dev
, "Failed to disallow untagged traffic\n");
588 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
590 netdev_err(dev
, "Failed to set PVID\n");
594 /* Only allow if not already allowed. */
595 if (!mlxsw_sp_port
->pvid
) {
596 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
,
599 netdev_err(dev
, "Failed to allow untagged traffic\n");
600 goto err_port_allow_untagged_set
;
605 mlxsw_sp_port
->pvid
= vid
;
608 err_port_allow_untagged_set
:
609 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
613 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
614 u16 vid_begin
, u16 vid_end
, bool is_member
,
620 for (vid
= vid_begin
; vid
<= vid_end
;
621 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
622 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
625 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
626 is_member
, untagged
);
634 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
635 u16 vid_begin
, u16 vid_end
,
636 bool flag_untagged
, bool flag_pvid
)
638 struct net_device
*dev
= mlxsw_sp_port
->dev
;
642 if (!mlxsw_sp_port
->bridged
)
645 err
= mlxsw_sp_port_fid_join(mlxsw_sp_port
, vid_begin
, vid_end
);
647 netdev_err(dev
, "Failed to join FIDs\n");
651 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
652 true, flag_untagged
);
654 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
656 goto err_port_vlans_set
;
659 old_pvid
= mlxsw_sp_port
->pvid
;
660 if (flag_pvid
&& old_pvid
!= vid_begin
) {
661 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
663 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
664 goto err_port_pvid_set
;
666 } else if (!flag_pvid
&& old_pvid
>= vid_begin
&& old_pvid
<= vid_end
) {
667 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
669 netdev_err(dev
, "Unable to del PVID\n");
670 goto err_port_pvid_set
;
674 /* Changing activity bits only if HW operation succeded */
675 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
676 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
678 set_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
680 clear_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
683 /* STP state change must be done after we set active VLANs */
684 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
685 mlxsw_sp_port
->stp_state
);
687 netdev_err(dev
, "Failed to set STP state\n");
688 goto err_port_stp_state_set
;
693 err_port_stp_state_set
:
694 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
695 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
696 if (old_pvid
!= mlxsw_sp_port
->pvid
)
697 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
699 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
702 mlxsw_sp_port_fid_leave(mlxsw_sp_port
, vid_begin
, vid_end
);
706 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
707 const struct switchdev_obj_port_vlan
*vlan
,
708 struct switchdev_trans
*trans
)
710 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
711 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
713 if (switchdev_trans_ph_prepare(trans
))
716 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
717 vlan
->vid_begin
, vlan
->vid_end
,
718 flag_untagged
, flag_pvid
);
721 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
723 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
724 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
727 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
729 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
730 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
733 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
734 const char *mac
, u16 fid
, bool adding
,
735 enum mlxsw_reg_sfd_rec_action action
,
741 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
745 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
746 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
747 mac
, fid
, action
, local_port
);
748 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
754 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
755 const char *mac
, u16 fid
, bool adding
,
758 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
, adding
,
759 MLXSW_REG_SFD_REC_ACTION_NOP
, dynamic
);
762 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *mac
, u16 fid
,
765 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, 0, mac
, fid
, adding
,
766 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER
,
770 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
771 const char *mac
, u16 fid
, u16 lag_vid
,
772 bool adding
, bool dynamic
)
777 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
781 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
782 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
783 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
785 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
792 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
793 const struct switchdev_obj_port_fdb
*fdb
,
794 struct switchdev_trans
*trans
)
796 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
799 if (switchdev_trans_ph_prepare(trans
))
802 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
803 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
806 if (!mlxsw_sp_port
->lagged
)
807 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
808 mlxsw_sp_port
->local_port
,
809 fdb
->addr
, fid
, true, false);
811 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
812 mlxsw_sp_port
->lag_id
,
813 fdb
->addr
, fid
, lag_vid
,
817 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *addr
,
818 u16 fid
, u16 mid
, bool adding
)
823 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
827 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
828 mlxsw_reg_sfd_mc_pack(sfd_pl
, 0, addr
, fid
,
829 MLXSW_REG_SFD_REC_ACTION_NOP
, mid
);
830 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
835 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mid
,
836 bool add
, bool clear_all_ports
)
838 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
842 smid_pl
= kmalloc(MLXSW_REG_SMID_LEN
, GFP_KERNEL
);
846 mlxsw_reg_smid_pack(smid_pl
, mid
, mlxsw_sp_port
->local_port
, add
);
847 if (clear_all_ports
) {
848 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
849 if (mlxsw_sp
->ports
[i
])
850 mlxsw_reg_smid_port_mask_set(smid_pl
, i
, 1);
852 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(smid
), smid_pl
);
857 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_get(struct mlxsw_sp
*mlxsw_sp
,
858 const unsigned char *addr
,
861 struct mlxsw_sp_mid
*mid
;
863 list_for_each_entry(mid
, &mlxsw_sp
->br_mids
.list
, list
) {
864 if (ether_addr_equal(mid
->addr
, addr
) && mid
->vid
== vid
)
870 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_alloc(struct mlxsw_sp
*mlxsw_sp
,
871 const unsigned char *addr
,
874 struct mlxsw_sp_mid
*mid
;
877 mid_idx
= find_first_zero_bit(mlxsw_sp
->br_mids
.mapped
,
879 if (mid_idx
== MLXSW_SP_MID_MAX
)
882 mid
= kzalloc(sizeof(*mid
), GFP_KERNEL
);
886 set_bit(mid_idx
, mlxsw_sp
->br_mids
.mapped
);
887 ether_addr_copy(mid
->addr
, addr
);
891 list_add_tail(&mid
->list
, &mlxsw_sp
->br_mids
.list
);
896 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp
*mlxsw_sp
,
897 struct mlxsw_sp_mid
*mid
)
899 if (--mid
->ref_count
== 0) {
900 list_del(&mid
->list
);
901 clear_bit(mid
->mid
, mlxsw_sp
->br_mids
.mapped
);
908 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
909 const struct switchdev_obj_port_mdb
*mdb
,
910 struct switchdev_trans
*trans
)
912 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
913 struct net_device
*dev
= mlxsw_sp_port
->dev
;
914 struct mlxsw_sp_mid
*mid
;
915 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
918 if (switchdev_trans_ph_prepare(trans
))
921 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
923 mid
= __mlxsw_sp_mc_alloc(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
925 netdev_err(dev
, "Unable to allocate MC group\n");
931 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, true,
932 mid
->ref_count
== 1);
934 netdev_err(dev
, "Unable to set SMID\n");
938 if (mid
->ref_count
== 1) {
939 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid
->mid
,
942 netdev_err(dev
, "Unable to set MC SFD\n");
950 __mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
);
954 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
955 const struct switchdev_obj
*obj
,
956 struct switchdev_trans
*trans
)
958 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
961 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
966 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
967 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
970 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
971 SWITCHDEV_OBJ_PORT_VLAN(obj
),
974 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
975 err
= mlxsw_sp_router_fib4_add(mlxsw_sp_port
,
976 SWITCHDEV_OBJ_IPV4_FIB(obj
),
979 case SWITCHDEV_OBJ_ID_PORT_FDB
:
980 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
981 SWITCHDEV_OBJ_PORT_FDB(obj
),
984 case SWITCHDEV_OBJ_ID_PORT_MDB
:
985 err
= mlxsw_sp_port_mdb_add(mlxsw_sp_port
,
986 SWITCHDEV_OBJ_PORT_MDB(obj
),
997 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
998 u16 vid_begin
, u16 vid_end
)
1000 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1004 if (!mlxsw_sp_port
->bridged
)
1007 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
1010 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
1015 pvid
= mlxsw_sp_port
->pvid
;
1016 if (pvid
>= vid_begin
&& pvid
<= vid_end
) {
1017 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
1019 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
1024 mlxsw_sp_port_fid_leave(mlxsw_sp_port
, vid_begin
, vid_end
);
1026 /* Changing activity bits only if HW operation succeded */
1027 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
1028 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
1033 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1034 const struct switchdev_obj_port_vlan
*vlan
)
1036 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vlan
->vid_begin
,
1040 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
)
1044 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
1045 __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vid
, vid
);
1049 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1050 const struct switchdev_obj_port_fdb
*fdb
)
1052 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
1055 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1056 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1059 if (!mlxsw_sp_port
->lagged
)
1060 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
1061 mlxsw_sp_port
->local_port
,
1065 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
1066 mlxsw_sp_port
->lag_id
,
1067 fdb
->addr
, fid
, lag_vid
,
1071 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1072 const struct switchdev_obj_port_mdb
*mdb
)
1074 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1075 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1076 struct mlxsw_sp_mid
*mid
;
1077 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
1081 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
1083 netdev_err(dev
, "Unable to remove port from MC DB\n");
1087 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, false, false);
1089 netdev_err(dev
, "Unable to remove port from SMID\n");
1092 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
)) {
1093 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid_idx
,
1096 netdev_err(dev
, "Unable to remove MC SFD\n");
1102 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
1103 const struct switchdev_obj
*obj
)
1105 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1108 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1113 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1114 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
1117 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1118 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1120 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
1121 err
= mlxsw_sp_router_fib4_del(mlxsw_sp_port
,
1122 SWITCHDEV_OBJ_IPV4_FIB(obj
));
1124 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1125 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
1126 SWITCHDEV_OBJ_PORT_FDB(obj
));
1128 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1129 err
= mlxsw_sp_port_mdb_del(mlxsw_sp_port
,
1130 SWITCHDEV_OBJ_PORT_MDB(obj
));
1140 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
1143 struct mlxsw_sp_port
*mlxsw_sp_port
;
1146 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
1147 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
1149 return mlxsw_sp_port
;
1154 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1155 struct switchdev_obj_port_fdb
*fdb
,
1156 switchdev_obj_dump_cb_t
*cb
,
1157 struct net_device
*orig_dev
)
1159 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1160 struct mlxsw_sp_port
*tmp
;
1161 struct mlxsw_sp_fid
*f
;
1173 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1177 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
);
1178 vport_fid
= f
? f
->fid
: 0;
1180 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
1182 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
1183 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1187 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
1189 /* Even in case of error, we have to run the dump to the end
1190 * so the session in firmware is finished.
1195 for (i
= 0; i
< num_rec
; i
++) {
1196 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
1197 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
1198 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
1200 if (local_port
== mlxsw_sp_port
->local_port
) {
1201 if (vport_fid
&& vport_fid
== fid
)
1203 else if (!vport_fid
&&
1204 !mlxsw_sp_fid_is_vfid(fid
))
1208 ether_addr_copy(fdb
->addr
, mac
);
1209 fdb
->ndm_state
= NUD_REACHABLE
;
1210 err
= cb(&fdb
->obj
);
1215 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
1216 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
1217 mac
, &fid
, &lag_id
);
1218 tmp
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1219 if (tmp
&& tmp
->local_port
==
1220 mlxsw_sp_port
->local_port
) {
1221 /* LAG records can only point to LAG
1222 * devices or VLAN devices on top.
1224 if (!netif_is_lag_master(orig_dev
) &&
1225 !is_vlan_dev(orig_dev
))
1227 if (vport_fid
&& vport_fid
== fid
)
1229 else if (!vport_fid
&&
1230 !mlxsw_sp_fid_is_vfid(fid
))
1234 ether_addr_copy(fdb
->addr
, mac
);
1235 fdb
->ndm_state
= NUD_REACHABLE
;
1236 err
= cb(&fdb
->obj
);
1243 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
1247 return stored_err
? stored_err
: err
;
1250 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1251 struct switchdev_obj_port_vlan
*vlan
,
1252 switchdev_obj_dump_cb_t
*cb
)
1257 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1259 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1260 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1261 return cb(&vlan
->obj
);
1264 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1266 if (vid
== mlxsw_sp_port
->pvid
)
1267 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1268 if (test_bit(vid
, mlxsw_sp_port
->untagged_vlans
))
1269 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1270 vlan
->vid_begin
= vid
;
1271 vlan
->vid_end
= vid
;
1272 err
= cb(&vlan
->obj
);
1279 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
1280 struct switchdev_obj
*obj
,
1281 switchdev_obj_dump_cb_t
*cb
)
1283 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1286 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1291 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1292 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
1293 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
1295 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1296 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
1297 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
,
1308 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1309 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1310 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1311 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1312 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1313 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
1316 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync
, bool adding
,
1318 struct net_device
*dev
)
1320 struct switchdev_notifier_fdb_info info
;
1321 unsigned long notifier_type
;
1323 if (learning_sync
) {
1326 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
1327 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
1331 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1332 char *sfn_pl
, int rec_index
,
1335 struct mlxsw_sp_port
*mlxsw_sp_port
;
1339 bool do_notification
= true;
1342 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1343 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1344 if (!mlxsw_sp_port
) {
1345 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1349 if (mlxsw_sp_fid_is_vfid(fid
)) {
1350 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1352 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port
,
1354 if (!mlxsw_sp_vport
) {
1355 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1359 /* Override the physical port with the vPort. */
1360 mlxsw_sp_port
= mlxsw_sp_vport
;
1365 adding
= adding
&& mlxsw_sp_port
->learning
;
1368 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
,
1371 if (net_ratelimit())
1372 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1376 if (!do_notification
)
1378 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
,
1379 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1384 do_notification
= false;
1388 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1389 char *sfn_pl
, int rec_index
,
1392 struct mlxsw_sp_port
*mlxsw_sp_port
;
1393 struct net_device
*dev
;
1398 bool do_notification
= true;
1401 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1402 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1403 if (!mlxsw_sp_port
) {
1404 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1408 if (mlxsw_sp_fid_is_vfid(fid
)) {
1409 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1411 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port
,
1413 if (!mlxsw_sp_vport
) {
1414 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1418 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1419 dev
= mlxsw_sp_vport
->dev
;
1421 /* Override the physical port with the vPort. */
1422 mlxsw_sp_port
= mlxsw_sp_vport
;
1424 dev
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
;
1428 adding
= adding
&& mlxsw_sp_port
->learning
;
1431 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1434 if (net_ratelimit())
1435 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1439 if (!do_notification
)
1441 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
, adding
, mac
,
1447 do_notification
= false;
1451 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1452 char *sfn_pl
, int rec_index
)
1454 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1455 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1456 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1459 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1460 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1463 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1464 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1467 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1468 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1474 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1476 mlxsw_core_schedule_dw(&mlxsw_sp
->fdb_notify
.dw
,
1477 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1480 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1482 struct mlxsw_sp
*mlxsw_sp
;
1488 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1492 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1496 mlxsw_reg_sfn_pack(sfn_pl
);
1497 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1499 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1502 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1503 for (i
= 0; i
< num_rec
; i
++)
1504 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1510 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1513 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1517 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1519 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1522 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1523 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1524 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1528 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1530 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1533 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1535 return mlxsw_sp_fdb_init(mlxsw_sp
);
1538 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1540 mlxsw_sp_fdb_fini(mlxsw_sp
);
1543 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1545 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1548 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)