2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 static u16
mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port
*mlxsw_sp_port
,
58 struct mlxsw_sp_fid
*f
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
);
61 fid
= f
? f
->fid
: fid
;
64 fid
= mlxsw_sp_port
->pvid
;
69 static struct mlxsw_sp_port
*
70 mlxsw_sp_port_orig_get(struct net_device
*dev
,
71 struct mlxsw_sp_port
*mlxsw_sp_port
)
73 struct mlxsw_sp_port
*mlxsw_sp_vport
;
76 if (!is_vlan_dev(dev
))
79 vid
= vlan_dev_vlan_id(dev
);
80 mlxsw_sp_vport
= mlxsw_sp_port_vport_find(mlxsw_sp_port
, vid
);
81 WARN_ON(!mlxsw_sp_vport
);
83 return mlxsw_sp_vport
;
86 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
87 struct switchdev_attr
*attr
)
89 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
90 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
92 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
98 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
99 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
100 attr
->u
.ppid
.id_len
);
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
103 attr
->u
.brport_flags
=
104 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
105 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
106 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
118 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
119 enum mlxsw_reg_spms_state spms_state
;
125 case BR_STATE_FORWARDING
:
126 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
128 case BR_STATE_LEARNING
:
129 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
131 case BR_STATE_LISTENING
: /* fall-through */
132 case BR_STATE_DISABLED
: /* fall-through */
133 case BR_STATE_BLOCKING
:
134 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
140 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
143 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
146 vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
147 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
149 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
150 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
153 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
159 struct switchdev_trans
*trans
,
162 if (switchdev_trans_ph_prepare(trans
))
165 mlxsw_sp_port
->stp_state
= state
;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
170 u16 idx_begin
, u16 idx_end
, bool set
,
173 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
174 u16 local_port
= mlxsw_sp_port
->local_port
;
175 enum mlxsw_flood_table_type table_type
;
176 u16 range
= idx_end
- idx_begin
+ 1;
180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
181 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
183 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
185 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
189 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
190 table_type
, range
, local_port
, set
);
191 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
195 /* Flooding control allows one to decide whether a given port will
196 * flood unicast traffic for which there is no FDB entry.
201 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, idx_begin
,
202 table_type
, range
, local_port
, set
);
203 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
205 goto err_flood_bm_set
;
210 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, idx_begin
,
211 table_type
, range
, local_port
, !set
);
212 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
218 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
221 struct net_device
*dev
= mlxsw_sp_port
->dev
;
222 u16 vid
, last_visited_vid
;
225 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
226 u16 fid
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
)->fid
;
227 u16 vfid
= mlxsw_sp_fid_to_vfid(fid
);
229 return __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vfid
, vfid
,
233 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
234 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
237 last_visited_vid
= vid
;
238 goto err_port_flood_set
;
245 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
246 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
247 netdev_err(dev
, "Failed to configure unicast flooding\n");
251 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 fid
,
256 /* In case of vFIDs, index into the flooding table is relative to
257 * the start of the vFIDs range.
259 vfid
= mlxsw_sp_fid_to_vfid(fid
);
260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
,
264 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
265 struct switchdev_trans
*trans
,
266 unsigned long brport_flags
)
268 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
272 if (!mlxsw_sp_port
->bridged
)
275 if (switchdev_trans_ph_prepare(trans
))
278 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
279 set
= mlxsw_sp_port
->uc_flood
? false : true;
280 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
285 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
286 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
287 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
292 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
294 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
297 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
298 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
301 mlxsw_sp
->ageing_time
= ageing_time
;
305 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
306 struct switchdev_trans
*trans
,
307 unsigned long ageing_clock_t
)
309 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
310 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
311 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
313 if (switchdev_trans_ph_prepare(trans
)) {
314 if (ageing_time
< MLXSW_SP_MIN_AGEING_TIME
||
315 ageing_time
> MLXSW_SP_MAX_AGEING_TIME
)
321 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
324 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
325 struct switchdev_trans
*trans
,
326 struct net_device
*orig_dev
,
329 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
331 /* SWITCHDEV_TRANS_PREPARE phase */
332 if ((!vlan_enabled
) && (mlxsw_sp
->master_bridge
.dev
== orig_dev
)) {
333 netdev_err(mlxsw_sp_port
->dev
, "Bridge must be vlan-aware\n");
340 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
341 const struct switchdev_attr
*attr
,
342 struct switchdev_trans
*trans
)
344 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
347 mlxsw_sp_port
= mlxsw_sp_port_orig_get(attr
->orig_dev
, mlxsw_sp_port
);
352 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
353 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
356 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
357 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
358 attr
->u
.brport_flags
);
360 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
361 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
362 attr
->u
.ageing_time
);
364 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
365 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
367 attr
->u
.vlan_filtering
);
377 static int mlxsw_sp_fid_op(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool create
)
379 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
381 mlxsw_reg_sfmr_pack(sfmr_pl
, !create
, fid
, fid
);
382 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
385 static int mlxsw_sp_fid_map(struct mlxsw_sp
*mlxsw_sp
, u16 fid
, bool valid
)
387 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
388 char svfa_pl
[MLXSW_REG_SVFA_LEN
];
390 mlxsw_reg_svfa_pack(svfa_pl
, 0, mt
, valid
, fid
, fid
);
391 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(svfa
), svfa_pl
);
394 static struct mlxsw_sp_fid
*mlxsw_sp_fid_alloc(u16 fid
)
396 struct mlxsw_sp_fid
*f
;
398 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
407 struct mlxsw_sp_fid
*mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
409 struct mlxsw_sp_fid
*f
;
412 err
= mlxsw_sp_fid_op(mlxsw_sp
, fid
, true);
416 /* Although all the ports member in the FID might be using a
417 * {Port, VID} to FID mapping, we create a global VID-to-FID
418 * mapping. This allows a port to transition to VLAN mode,
419 * knowing the global mapping exists.
421 err
= mlxsw_sp_fid_map(mlxsw_sp
, fid
, true);
425 f
= mlxsw_sp_fid_alloc(fid
);
428 goto err_allocate_fid
;
431 list_add(&f
->list
, &mlxsw_sp
->fids
);
436 mlxsw_sp_fid_map(mlxsw_sp
, fid
, false);
438 mlxsw_sp_fid_op(mlxsw_sp
, fid
, false);
442 void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, struct mlxsw_sp_fid
*f
)
449 mlxsw_sp_rif_bridge_destroy(mlxsw_sp
, f
->r
);
453 mlxsw_sp_fid_op(mlxsw_sp
, fid
, false);
456 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
459 struct mlxsw_sp_fid
*f
;
461 f
= mlxsw_sp_fid_find(mlxsw_sp_port
->mlxsw_sp
, fid
);
463 f
= mlxsw_sp_fid_create(mlxsw_sp_port
->mlxsw_sp
, fid
);
470 netdev_dbg(mlxsw_sp_port
->dev
, "Joined FID=%d\n", fid
);
475 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
478 struct mlxsw_sp_fid
*f
;
480 f
= mlxsw_sp_fid_find(mlxsw_sp_port
->mlxsw_sp
, fid
);
484 netdev_dbg(mlxsw_sp_port
->dev
, "Left FID=%d\n", fid
);
486 mlxsw_sp_port_fdb_flush(mlxsw_sp_port
, fid
);
488 if (--f
->ref_count
== 0)
489 mlxsw_sp_fid_destroy(mlxsw_sp_port
->mlxsw_sp
, f
);
492 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
,
495 enum mlxsw_reg_svfa_mt mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
497 /* If port doesn't have vPorts, then it can use the global
498 * VID-to-FID mapping.
500 if (list_empty(&mlxsw_sp_port
->vports_list
))
503 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, valid
, fid
, fid
);
506 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
507 u16 fid_begin
, u16 fid_end
)
511 for (fid
= fid_begin
; fid
<= fid_end
; fid
++) {
512 err
= __mlxsw_sp_port_fid_join(mlxsw_sp_port
, fid
);
514 goto err_port_fid_join
;
517 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
,
520 goto err_port_flood_set
;
522 for (fid
= fid_begin
; fid
<= fid_end
; fid
++) {
523 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, true);
525 goto err_port_fid_map
;
531 for (fid
--; fid
>= fid_begin
; fid
--)
532 mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, false);
533 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
, false,
538 for (fid
--; fid
>= fid_begin
; fid
--)
539 __mlxsw_sp_port_fid_leave(mlxsw_sp_port
, fid
);
543 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
544 u16 fid_begin
, u16 fid_end
)
548 for (fid
= fid_begin
; fid
<= fid_end
; fid
++)
549 mlxsw_sp_port_fid_map(mlxsw_sp_port
, fid
, false);
551 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, fid_begin
, fid_end
, false,
554 for (fid
= fid_begin
; fid
<= fid_end
; fid
++)
555 __mlxsw_sp_port_fid_leave(mlxsw_sp_port
, fid
);
558 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
561 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
562 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
564 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
565 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
568 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
571 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
572 char spaft_pl
[MLXSW_REG_SPAFT_LEN
];
574 mlxsw_reg_spaft_pack(spaft_pl
, mlxsw_sp_port
->local_port
, allow
);
575 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spaft
), spaft_pl
);
578 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
580 struct net_device
*dev
= mlxsw_sp_port
->dev
;
584 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
, false);
586 netdev_err(dev
, "Failed to disallow untagged traffic\n");
590 err
= __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid
);
592 netdev_err(dev
, "Failed to set PVID\n");
596 /* Only allow if not already allowed. */
597 if (!mlxsw_sp_port
->pvid
) {
598 err
= mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port
,
601 netdev_err(dev
, "Failed to allow untagged traffic\n");
602 goto err_port_allow_untagged_set
;
607 mlxsw_sp_port
->pvid
= vid
;
610 err_port_allow_untagged_set
:
611 __mlxsw_sp_port_pvid_set(mlxsw_sp_port
, mlxsw_sp_port
->pvid
);
615 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
616 u16 vid_begin
, u16 vid_end
, bool is_member
,
622 for (vid
= vid_begin
; vid
<= vid_end
;
623 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
624 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
627 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
628 is_member
, untagged
);
636 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
637 u16 vid_begin
, u16 vid_end
,
638 bool flag_untagged
, bool flag_pvid
)
640 struct net_device
*dev
= mlxsw_sp_port
->dev
;
644 if (!mlxsw_sp_port
->bridged
)
647 err
= mlxsw_sp_port_fid_join(mlxsw_sp_port
, vid_begin
, vid_end
);
649 netdev_err(dev
, "Failed to join FIDs\n");
653 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
654 true, flag_untagged
);
656 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
658 goto err_port_vlans_set
;
661 old_pvid
= mlxsw_sp_port
->pvid
;
662 if (flag_pvid
&& old_pvid
!= vid_begin
) {
663 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
665 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
666 goto err_port_pvid_set
;
668 } else if (!flag_pvid
&& old_pvid
>= vid_begin
&& old_pvid
<= vid_end
) {
669 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
671 netdev_err(dev
, "Unable to del PVID\n");
672 goto err_port_pvid_set
;
676 /* Changing activity bits only if HW operation succeded */
677 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
678 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
680 set_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
682 clear_bit(vid
, mlxsw_sp_port
->untagged_vlans
);
685 /* STP state change must be done after we set active VLANs */
686 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
687 mlxsw_sp_port
->stp_state
);
689 netdev_err(dev
, "Failed to set STP state\n");
690 goto err_port_stp_state_set
;
695 err_port_stp_state_set
:
696 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
697 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
698 if (old_pvid
!= mlxsw_sp_port
->pvid
)
699 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
701 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
704 mlxsw_sp_port_fid_leave(mlxsw_sp_port
, vid_begin
, vid_end
);
708 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
709 const struct switchdev_obj_port_vlan
*vlan
,
710 struct switchdev_trans
*trans
)
712 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
713 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
715 if (switchdev_trans_ph_prepare(trans
))
718 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
719 vlan
->vid_begin
, vlan
->vid_end
,
720 flag_untagged
, flag_pvid
);
723 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
725 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
726 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
729 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
731 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
732 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
735 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
736 const char *mac
, u16 fid
, bool adding
,
737 enum mlxsw_reg_sfd_rec_action action
,
743 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
747 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
748 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
749 mac
, fid
, action
, local_port
);
750 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
756 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
757 const char *mac
, u16 fid
, bool adding
,
760 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
, adding
,
761 MLXSW_REG_SFD_REC_ACTION_NOP
, dynamic
);
764 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *mac
, u16 fid
,
767 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, 0, mac
, fid
, adding
,
768 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER
,
772 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
773 const char *mac
, u16 fid
, u16 lag_vid
,
774 bool adding
, bool dynamic
)
779 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
783 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
784 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
785 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
787 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
794 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
795 const struct switchdev_obj_port_fdb
*fdb
,
796 struct switchdev_trans
*trans
)
798 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
801 if (switchdev_trans_ph_prepare(trans
))
804 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
805 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
808 if (!mlxsw_sp_port
->lagged
)
809 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
810 mlxsw_sp_port
->local_port
,
811 fdb
->addr
, fid
, true, false);
813 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
814 mlxsw_sp_port
->lag_id
,
815 fdb
->addr
, fid
, lag_vid
,
819 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *addr
,
820 u16 fid
, u16 mid
, bool adding
)
825 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
829 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
830 mlxsw_reg_sfd_mc_pack(sfd_pl
, 0, addr
, fid
,
831 MLXSW_REG_SFD_REC_ACTION_NOP
, mid
);
832 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
837 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mid
,
838 bool add
, bool clear_all_ports
)
840 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
844 smid_pl
= kmalloc(MLXSW_REG_SMID_LEN
, GFP_KERNEL
);
848 mlxsw_reg_smid_pack(smid_pl
, mid
, mlxsw_sp_port
->local_port
, add
);
849 if (clear_all_ports
) {
850 for (i
= 1; i
< MLXSW_PORT_MAX_PORTS
; i
++)
851 if (mlxsw_sp
->ports
[i
])
852 mlxsw_reg_smid_port_mask_set(smid_pl
, i
, 1);
854 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(smid
), smid_pl
);
859 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_get(struct mlxsw_sp
*mlxsw_sp
,
860 const unsigned char *addr
,
863 struct mlxsw_sp_mid
*mid
;
865 list_for_each_entry(mid
, &mlxsw_sp
->br_mids
.list
, list
) {
866 if (ether_addr_equal(mid
->addr
, addr
) && mid
->vid
== vid
)
872 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_alloc(struct mlxsw_sp
*mlxsw_sp
,
873 const unsigned char *addr
,
876 struct mlxsw_sp_mid
*mid
;
879 mid_idx
= find_first_zero_bit(mlxsw_sp
->br_mids
.mapped
,
881 if (mid_idx
== MLXSW_SP_MID_MAX
)
884 mid
= kzalloc(sizeof(*mid
), GFP_KERNEL
);
888 set_bit(mid_idx
, mlxsw_sp
->br_mids
.mapped
);
889 ether_addr_copy(mid
->addr
, addr
);
893 list_add_tail(&mid
->list
, &mlxsw_sp
->br_mids
.list
);
898 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp
*mlxsw_sp
,
899 struct mlxsw_sp_mid
*mid
)
901 if (--mid
->ref_count
== 0) {
902 list_del(&mid
->list
);
903 clear_bit(mid
->mid
, mlxsw_sp
->br_mids
.mapped
);
910 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
911 const struct switchdev_obj_port_mdb
*mdb
,
912 struct switchdev_trans
*trans
)
914 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
915 struct net_device
*dev
= mlxsw_sp_port
->dev
;
916 struct mlxsw_sp_mid
*mid
;
917 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
920 if (switchdev_trans_ph_prepare(trans
))
923 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
925 mid
= __mlxsw_sp_mc_alloc(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
927 netdev_err(dev
, "Unable to allocate MC group\n");
933 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, true,
934 mid
->ref_count
== 1);
936 netdev_err(dev
, "Unable to set SMID\n");
940 if (mid
->ref_count
== 1) {
941 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid
->mid
,
944 netdev_err(dev
, "Unable to set MC SFD\n");
952 __mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
);
956 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
957 const struct switchdev_obj
*obj
,
958 struct switchdev_trans
*trans
)
960 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
963 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
968 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
969 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
972 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
973 SWITCHDEV_OBJ_PORT_VLAN(obj
),
976 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
977 err
= mlxsw_sp_router_fib4_add(mlxsw_sp_port
,
978 SWITCHDEV_OBJ_IPV4_FIB(obj
),
981 case SWITCHDEV_OBJ_ID_PORT_FDB
:
982 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
983 SWITCHDEV_OBJ_PORT_FDB(obj
),
986 case SWITCHDEV_OBJ_ID_PORT_MDB
:
987 err
= mlxsw_sp_port_mdb_add(mlxsw_sp_port
,
988 SWITCHDEV_OBJ_PORT_MDB(obj
),
999 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1000 u16 vid_begin
, u16 vid_end
)
1002 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1006 if (!mlxsw_sp_port
->bridged
)
1009 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
1012 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
1017 pvid
= mlxsw_sp_port
->pvid
;
1018 if (pvid
>= vid_begin
&& pvid
<= vid_end
) {
1019 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 0);
1021 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
1026 mlxsw_sp_port_fid_leave(mlxsw_sp_port
, vid_begin
, vid_end
);
1028 /* Changing activity bits only if HW operation succeded */
1029 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
1030 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
1035 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1036 const struct switchdev_obj_port_vlan
*vlan
)
1038 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vlan
->vid_begin
,
1042 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
)
1046 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
1047 __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, vid
, vid
);
1051 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1052 const struct switchdev_obj_port_fdb
*fdb
)
1054 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, fdb
->vid
);
1057 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1058 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1061 if (!mlxsw_sp_port
->lagged
)
1062 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
->mlxsw_sp
,
1063 mlxsw_sp_port
->local_port
,
1067 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
1068 mlxsw_sp_port
->lag_id
,
1069 fdb
->addr
, fid
, lag_vid
,
1073 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1074 const struct switchdev_obj_port_mdb
*mdb
)
1076 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1077 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1078 struct mlxsw_sp_mid
*mid
;
1079 u16 fid
= mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port
, mdb
->vid
);
1083 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, mdb
->vid
);
1085 netdev_err(dev
, "Unable to remove port from MC DB\n");
1089 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, false, false);
1091 netdev_err(dev
, "Unable to remove port from SMID\n");
1094 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
)) {
1095 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid
, mid_idx
,
1098 netdev_err(dev
, "Unable to remove MC SFD\n");
1104 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
1105 const struct switchdev_obj
*obj
)
1107 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1110 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1115 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1116 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
))
1119 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1120 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1122 case SWITCHDEV_OBJ_ID_IPV4_FIB
:
1123 err
= mlxsw_sp_router_fib4_del(mlxsw_sp_port
,
1124 SWITCHDEV_OBJ_IPV4_FIB(obj
));
1126 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1127 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
1128 SWITCHDEV_OBJ_PORT_FDB(obj
));
1130 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1131 err
= mlxsw_sp_port_mdb_del(mlxsw_sp_port
,
1132 SWITCHDEV_OBJ_PORT_MDB(obj
));
1142 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
1145 struct mlxsw_sp_port
*mlxsw_sp_port
;
1148 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
1149 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
1151 return mlxsw_sp_port
;
1156 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1157 struct switchdev_obj_port_fdb
*fdb
,
1158 switchdev_obj_dump_cb_t
*cb
,
1159 struct net_device
*orig_dev
)
1161 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1162 struct mlxsw_sp_port
*tmp
;
1163 struct mlxsw_sp_fid
*f
;
1175 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1179 f
= mlxsw_sp_vport_fid_get(mlxsw_sp_port
);
1180 vport_fid
= f
? f
->fid
: 0;
1182 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
1184 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
1185 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1189 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
1191 /* Even in case of error, we have to run the dump to the end
1192 * so the session in firmware is finished.
1197 for (i
= 0; i
< num_rec
; i
++) {
1198 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
1199 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
1200 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &fid
,
1202 if (local_port
== mlxsw_sp_port
->local_port
) {
1203 if (vport_fid
&& vport_fid
== fid
)
1205 else if (!vport_fid
&&
1206 !mlxsw_sp_fid_is_vfid(fid
))
1210 ether_addr_copy(fdb
->addr
, mac
);
1211 fdb
->ndm_state
= NUD_REACHABLE
;
1212 err
= cb(&fdb
->obj
);
1217 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
1218 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
1219 mac
, &fid
, &lag_id
);
1220 tmp
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1221 if (tmp
&& tmp
->local_port
==
1222 mlxsw_sp_port
->local_port
) {
1223 /* LAG records can only point to LAG
1224 * devices or VLAN devices on top.
1226 if (!netif_is_lag_master(orig_dev
) &&
1227 !is_vlan_dev(orig_dev
))
1229 if (vport_fid
&& vport_fid
== fid
)
1231 else if (!vport_fid
&&
1232 !mlxsw_sp_fid_is_vfid(fid
))
1236 ether_addr_copy(fdb
->addr
, mac
);
1237 fdb
->ndm_state
= NUD_REACHABLE
;
1238 err
= cb(&fdb
->obj
);
1245 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
1249 return stored_err
? stored_err
: err
;
1252 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
1253 struct switchdev_obj_port_vlan
*vlan
,
1254 switchdev_obj_dump_cb_t
*cb
)
1259 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
1261 vlan
->vid_begin
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1262 vlan
->vid_end
= mlxsw_sp_vport_vid_get(mlxsw_sp_port
);
1263 return cb(&vlan
->obj
);
1266 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
1268 if (vid
== mlxsw_sp_port
->pvid
)
1269 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
1270 if (test_bit(vid
, mlxsw_sp_port
->untagged_vlans
))
1271 vlan
->flags
|= BRIDGE_VLAN_INFO_UNTAGGED
;
1272 vlan
->vid_begin
= vid
;
1273 vlan
->vid_end
= vid
;
1274 err
= cb(&vlan
->obj
);
1281 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
1282 struct switchdev_obj
*obj
,
1283 switchdev_obj_dump_cb_t
*cb
)
1285 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1288 mlxsw_sp_port
= mlxsw_sp_port_orig_get(obj
->orig_dev
, mlxsw_sp_port
);
1293 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1294 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
1295 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
1297 case SWITCHDEV_OBJ_ID_PORT_FDB
:
1298 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
1299 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
,
1310 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1311 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1312 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1313 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1314 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1315 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
1318 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync
, bool adding
,
1320 struct net_device
*dev
)
1322 struct switchdev_notifier_fdb_info info
;
1323 unsigned long notifier_type
;
1325 if (learning_sync
) {
1328 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
1329 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
1333 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1334 char *sfn_pl
, int rec_index
,
1337 struct mlxsw_sp_port
*mlxsw_sp_port
;
1341 bool do_notification
= true;
1344 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1345 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1346 if (!mlxsw_sp_port
) {
1347 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1351 if (mlxsw_sp_fid_is_vfid(fid
)) {
1352 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1354 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port
,
1356 if (!mlxsw_sp_vport
) {
1357 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1361 /* Override the physical port with the vPort. */
1362 mlxsw_sp_port
= mlxsw_sp_vport
;
1367 adding
= adding
&& mlxsw_sp_port
->learning
;
1370 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
,
1373 if (net_ratelimit())
1374 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1378 if (!do_notification
)
1380 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
,
1381 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
1386 do_notification
= false;
1390 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1391 char *sfn_pl
, int rec_index
,
1394 struct mlxsw_sp_port
*mlxsw_sp_port
;
1395 struct net_device
*dev
;
1400 bool do_notification
= true;
1403 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1404 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1405 if (!mlxsw_sp_port
) {
1406 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1410 if (mlxsw_sp_fid_is_vfid(fid
)) {
1411 struct mlxsw_sp_port
*mlxsw_sp_vport
;
1413 mlxsw_sp_vport
= mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port
,
1415 if (!mlxsw_sp_vport
) {
1416 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching vPort following FDB notification\n");
1420 lag_vid
= mlxsw_sp_vport_vid_get(mlxsw_sp_vport
);
1421 dev
= mlxsw_sp_vport
->dev
;
1423 /* Override the physical port with the vPort. */
1424 mlxsw_sp_port
= mlxsw_sp_vport
;
1426 dev
= mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
;
1430 adding
= adding
&& mlxsw_sp_port
->learning
;
1433 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1436 if (net_ratelimit())
1437 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
1441 if (!do_notification
)
1443 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning_sync
, adding
, mac
,
1449 do_notification
= false;
1453 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1454 char *sfn_pl
, int rec_index
)
1456 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1457 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1458 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1461 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1462 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1465 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1466 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1469 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1470 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1476 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1478 mlxsw_core_schedule_dw(&mlxsw_sp
->fdb_notify
.dw
,
1479 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
1482 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1484 struct mlxsw_sp
*mlxsw_sp
;
1490 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1494 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
1498 mlxsw_reg_sfn_pack(sfn_pl
);
1499 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1501 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1504 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1505 for (i
= 0; i
< num_rec
; i
++)
1506 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1512 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1515 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1519 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1521 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1524 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1525 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1526 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1530 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1532 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1535 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1537 return mlxsw_sp_fdb_init(mlxsw_sp
);
1540 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1542 mlxsw_sp_fdb_fini(mlxsw_sp
);
1545 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1547 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1550 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)