2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <net/switchdev.h>
54 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
55 struct switchdev_attr
*attr
)
57 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
58 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
61 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
62 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
63 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
66 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
67 attr
->u
.brport_flags
=
68 (mlxsw_sp_port
->learning
? BR_LEARNING
: 0) |
69 (mlxsw_sp_port
->learning_sync
? BR_LEARNING_SYNC
: 0) |
70 (mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0);
79 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
82 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
83 enum mlxsw_reg_spms_state spms_state
;
89 case BR_STATE_DISABLED
: /* fall-through */
90 case BR_STATE_FORWARDING
:
91 spms_state
= MLXSW_REG_SPMS_STATE_FORWARDING
;
93 case BR_STATE_LISTENING
: /* fall-through */
94 case BR_STATE_LEARNING
:
95 spms_state
= MLXSW_REG_SPMS_STATE_LEARNING
;
97 case BR_STATE_BLOCKING
:
98 spms_state
= MLXSW_REG_SPMS_STATE_DISCARDING
;
104 spms_pl
= kmalloc(MLXSW_REG_SPMS_LEN
, GFP_KERNEL
);
107 mlxsw_reg_spms_pack(spms_pl
, mlxsw_sp_port
->local_port
);
108 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
)
109 mlxsw_reg_spms_vid_pack(spms_pl
, vid
, spms_state
);
111 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spms
), spms_pl
);
116 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
117 struct switchdev_trans
*trans
,
120 if (switchdev_trans_ph_prepare(trans
))
123 mlxsw_sp_port
->stp_state
= state
;
124 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port
, state
);
127 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
128 u16 fid_begin
, u16 fid_end
, bool set
,
131 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
132 u16 local_port
= mlxsw_sp_port
->local_port
;
133 enum mlxsw_flood_table_type table_type
;
134 u16 range
= fid_end
- fid_begin
+ 1;
138 if (mlxsw_sp_port_is_vport(mlxsw_sp_port
)) {
139 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID
;
140 local_port
= MLXSW_PORT_CPU_PORT
;
142 table_type
= MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST
;
145 sftr_pl
= kmalloc(MLXSW_REG_SFTR_LEN
, GFP_KERNEL
);
149 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_UC
, fid_begin
,
150 table_type
, range
, local_port
, set
);
151 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
155 /* Flooding control allows one to decide whether a given port will
156 * flood unicast traffic for which there is no FDB entry.
161 mlxsw_reg_sftr_pack(sftr_pl
, MLXSW_SP_FLOOD_TABLE_BM
, fid_begin
,
162 table_type
, range
, local_port
, set
);
163 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sftr
), sftr_pl
);
170 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
173 struct net_device
*dev
= mlxsw_sp_port
->dev
;
174 u16 vid
, last_visited_vid
;
177 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
178 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, set
,
181 last_visited_vid
= vid
;
182 goto err_port_flood_set
;
189 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, last_visited_vid
)
190 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid
, vid
, !set
, true);
191 netdev_err(dev
, "Failed to configure unicast flooding\n");
195 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port
*mlxsw_sp_vport
, u16 vfid
,
198 /* In case of vFIDs, index into the flooding table is relative to
199 * the start of the vFIDs range.
201 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport
, vfid
, vfid
, set
, true);
204 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
205 struct switchdev_trans
*trans
,
206 unsigned long brport_flags
)
208 unsigned long uc_flood
= mlxsw_sp_port
->uc_flood
? BR_FLOOD
: 0;
212 if (switchdev_trans_ph_prepare(trans
))
215 if ((uc_flood
^ brport_flags
) & BR_FLOOD
) {
216 set
= mlxsw_sp_port
->uc_flood
? false : true;
217 err
= mlxsw_sp_port_uc_flood_set(mlxsw_sp_port
, set
);
222 mlxsw_sp_port
->uc_flood
= brport_flags
& BR_FLOOD
? 1 : 0;
223 mlxsw_sp_port
->learning
= brport_flags
& BR_LEARNING
? 1 : 0;
224 mlxsw_sp_port
->learning_sync
= brport_flags
& BR_LEARNING_SYNC
? 1 : 0;
229 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
231 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
234 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
235 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
238 mlxsw_sp
->ageing_time
= ageing_time
;
242 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
243 struct switchdev_trans
*trans
,
244 unsigned long ageing_clock_t
)
246 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
247 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
248 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
250 if (switchdev_trans_ph_prepare(trans
))
253 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
256 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
257 const struct switchdev_attr
*attr
,
258 struct switchdev_trans
*trans
)
260 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
264 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
265 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
268 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
269 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
270 attr
->u
.brport_flags
);
272 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
273 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
274 attr
->u
.ageing_time
);
284 static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 vid
)
286 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
287 char spvid_pl
[MLXSW_REG_SPVID_LEN
];
289 mlxsw_reg_spvid_pack(spvid_pl
, mlxsw_sp_port
->local_port
, vid
);
290 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(spvid
), spvid_pl
);
293 static int mlxsw_sp_fid_create(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
295 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
298 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_CREATE_FID
, fid
, fid
);
299 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
304 set_bit(fid
, mlxsw_sp
->active_fids
);
308 static void mlxsw_sp_fid_destroy(struct mlxsw_sp
*mlxsw_sp
, u16 fid
)
310 char sfmr_pl
[MLXSW_REG_SFMR_LEN
];
312 clear_bit(fid
, mlxsw_sp
->active_fids
);
314 mlxsw_reg_sfmr_pack(sfmr_pl
, MLXSW_REG_SFMR_OP_DESTROY_FID
,
316 mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfmr
), sfmr_pl
);
319 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
321 enum mlxsw_reg_svfa_mt mt
;
323 if (!list_empty(&mlxsw_sp_port
->vports_list
))
324 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
326 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
328 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, true, fid
, fid
);
331 static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 fid
)
333 enum mlxsw_reg_svfa_mt mt
;
335 if (list_empty(&mlxsw_sp_port
->vports_list
))
338 mt
= MLXSW_REG_SVFA_MT_PORT_VID_TO_FID
;
339 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
, false, fid
, fid
);
342 static int mlxsw_sp_port_add_vids(struct net_device
*dev
, u16 vid_begin
,
348 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
349 err
= mlxsw_sp_port_add_vid(dev
, 0, vid
);
351 goto err_port_add_vid
;
356 for (vid
--; vid
>= vid_begin
; vid
--)
357 mlxsw_sp_port_kill_vid(dev
, 0, vid
);
361 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
362 u16 vid_begin
, u16 vid_end
, bool is_member
,
368 for (vid
= vid_begin
; vid
<= vid_end
;
369 vid
+= MLXSW_REG_SPVM_REC_MAX_COUNT
) {
370 vid_e
= min((u16
) (vid
+ MLXSW_REG_SPVM_REC_MAX_COUNT
- 1),
373 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid_e
,
374 is_member
, untagged
);
382 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
383 u16 vid_begin
, u16 vid_end
,
384 bool flag_untagged
, bool flag_pvid
)
386 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
387 struct net_device
*dev
= mlxsw_sp_port
->dev
;
388 u16 vid
, last_visited_vid
, old_pvid
;
389 enum mlxsw_reg_svfa_mt mt
;
392 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
393 * not bridged, then packets ingressing through the port with
394 * the specified VIDs will be directed to CPU.
396 if (!mlxsw_sp_port
->bridged
)
397 return mlxsw_sp_port_add_vids(dev
, vid_begin
, vid_end
);
399 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
400 if (!test_bit(vid
, mlxsw_sp
->active_fids
)) {
401 err
= mlxsw_sp_fid_create(mlxsw_sp
, vid
);
403 netdev_err(dev
, "Failed to create FID=%d\n",
408 /* When creating a FID, we set a VID to FID mapping
409 * regardless of the port's mode.
411 mt
= MLXSW_REG_SVFA_MT_VID_TO_FID
;
412 err
= mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port
, mt
,
415 netdev_err(dev
, "Failed to create FID=VID=%d mapping\n",
417 goto err_port_vid_to_fid_set
;
422 /* Set FID mapping according to port's mode */
423 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
424 err
= mlxsw_sp_port_fid_map(mlxsw_sp_port
, vid
);
426 netdev_err(dev
, "Failed to map FID=%d", vid
);
427 last_visited_vid
= --vid
;
428 goto err_port_fid_map
;
432 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
435 netdev_err(dev
, "Failed to configure flooding\n");
436 goto err_port_flood_set
;
439 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
440 true, flag_untagged
);
442 netdev_err(dev
, "Unable to add VIDs %d-%d\n", vid_begin
,
444 goto err_port_vlans_set
;
447 old_pvid
= mlxsw_sp_port
->pvid
;
448 if (flag_pvid
&& old_pvid
!= vid_begin
) {
449 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, vid_begin
);
451 netdev_err(dev
, "Unable to add PVID %d\n", vid_begin
);
452 goto err_port_pvid_set
;
454 mlxsw_sp_port
->pvid
= vid_begin
;
457 /* Changing activity bits only if HW operation succeded */
458 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
459 set_bit(vid
, mlxsw_sp_port
->active_vlans
);
461 /* STP state change must be done after we set active VLANs */
462 err
= mlxsw_sp_port_stp_state_set(mlxsw_sp_port
,
463 mlxsw_sp_port
->stp_state
);
465 netdev_err(dev
, "Failed to set STP state\n");
466 goto err_port_stp_state_set
;
471 err_port_vid_to_fid_set
:
472 mlxsw_sp_fid_destroy(mlxsw_sp
, vid
);
475 err_port_stp_state_set
:
476 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
477 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
478 if (old_pvid
!= mlxsw_sp_port
->pvid
)
479 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
481 __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
484 __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
, false,
487 last_visited_vid
= vid_end
;
489 for (vid
= last_visited_vid
; vid
>= vid_begin
; vid
--)
490 mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
494 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
495 const struct switchdev_obj_port_vlan
*vlan
,
496 struct switchdev_trans
*trans
)
498 bool untagged_flag
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
499 bool pvid_flag
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
501 if (switchdev_trans_ph_prepare(trans
))
504 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
505 vlan
->vid_begin
, vlan
->vid_end
,
506 untagged_flag
, pvid_flag
);
509 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
511 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
512 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
515 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
517 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
518 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
521 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp_port
*mlxsw_sp_port
,
522 const char *mac
, u16 vid
, bool adding
,
525 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
529 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
533 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
534 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
535 mac
, vid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
536 mlxsw_sp_port
->local_port
);
537 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
543 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
544 const char *mac
, u16 vid
, bool adding
,
550 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
554 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
555 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
556 mac
, vid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
558 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
565 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
566 const struct switchdev_obj_port_fdb
*fdb
,
567 struct switchdev_trans
*trans
)
571 if (switchdev_trans_ph_prepare(trans
))
575 vid
= mlxsw_sp_port
->pvid
;
577 if (!mlxsw_sp_port
->lagged
)
578 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
,
579 fdb
->addr
, vid
, true, false);
581 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
582 mlxsw_sp_port
->lag_id
,
583 fdb
->addr
, vid
, true, false);
586 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
587 const struct switchdev_obj
*obj
,
588 struct switchdev_trans
*trans
)
590 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
594 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
595 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
596 SWITCHDEV_OBJ_PORT_VLAN(obj
),
599 case SWITCHDEV_OBJ_ID_PORT_FDB
:
600 err
= mlxsw_sp_port_fdb_static_add(mlxsw_sp_port
,
601 SWITCHDEV_OBJ_PORT_FDB(obj
),
612 static int mlxsw_sp_port_kill_vids(struct net_device
*dev
, u16 vid_begin
,
618 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
619 err
= mlxsw_sp_port_kill_vid(dev
, 0, vid
);
627 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
628 u16 vid_begin
, u16 vid_end
, bool init
)
630 struct net_device
*dev
= mlxsw_sp_port
->dev
;
634 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
635 * not bridged, then prevent packets ingressing through the
636 * port with the specified VIDs from being trapped to CPU.
638 if (!init
&& !mlxsw_sp_port
->bridged
)
639 return mlxsw_sp_port_kill_vids(dev
, vid_begin
, vid_end
);
641 err
= __mlxsw_sp_port_vlans_set(mlxsw_sp_port
, vid_begin
, vid_end
,
644 netdev_err(dev
, "Unable to del VIDs %d-%d\n", vid_begin
,
649 pvid
= mlxsw_sp_port
->pvid
;
650 if (pvid
>= vid_begin
&& pvid
<= vid_end
&& pvid
!= 1) {
651 /* Default VLAN is always 1 */
652 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
654 netdev_err(dev
, "Unable to del PVID %d\n", pvid
);
657 mlxsw_sp_port
->pvid
= 1;
663 err
= __mlxsw_sp_port_flood_set(mlxsw_sp_port
, vid_begin
, vid_end
,
666 netdev_err(dev
, "Failed to clear flooding\n");
670 for (vid
= vid_begin
; vid
<= vid_end
; vid
++) {
671 /* Remove FID mapping in case of Virtual mode */
672 err
= mlxsw_sp_port_fid_unmap(mlxsw_sp_port
, vid
);
674 netdev_err(dev
, "Failed to unmap FID=%d", vid
);
680 /* Changing activity bits only if HW operation succeded */
681 for (vid
= vid_begin
; vid
<= vid_end
; vid
++)
682 clear_bit(vid
, mlxsw_sp_port
->active_vlans
);
687 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
688 const struct switchdev_obj_port_vlan
*vlan
)
690 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
691 vlan
->vid_begin
, vlan
->vid_end
, false);
695 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
696 const struct switchdev_obj_port_fdb
*fdb
)
698 if (!mlxsw_sp_port
->lagged
)
699 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
,
703 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port
->mlxsw_sp
,
704 mlxsw_sp_port
->lag_id
,
709 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
710 const struct switchdev_obj
*obj
)
712 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
716 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
717 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
718 SWITCHDEV_OBJ_PORT_VLAN(obj
));
720 case SWITCHDEV_OBJ_ID_PORT_FDB
:
721 err
= mlxsw_sp_port_fdb_static_del(mlxsw_sp_port
,
722 SWITCHDEV_OBJ_PORT_FDB(obj
));
732 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
735 struct mlxsw_sp_port
*mlxsw_sp_port
;
738 for (i
= 0; i
< MLXSW_SP_PORT_PER_LAG_MAX
; i
++) {
739 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
741 return mlxsw_sp_port
;
746 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
747 struct switchdev_obj_port_fdb
*fdb
,
748 switchdev_obj_dump_cb_t
*cb
)
750 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
761 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
765 mlxsw_reg_sfd_pack(sfd_pl
, MLXSW_REG_SFD_OP_QUERY_DUMP
, 0);
767 mlxsw_reg_sfd_num_rec_set(sfd_pl
, MLXSW_REG_SFD_REC_MAX_COUNT
);
768 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
772 num_rec
= mlxsw_reg_sfd_num_rec_get(sfd_pl
);
774 /* Even in case of error, we have to run the dump to the end
775 * so the session in firmware is finished.
780 for (i
= 0; i
< num_rec
; i
++) {
781 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl
, i
)) {
782 case MLXSW_REG_SFD_REC_TYPE_UNICAST
:
783 mlxsw_reg_sfd_uc_unpack(sfd_pl
, i
, mac
, &vid
,
785 if (local_port
== mlxsw_sp_port
->local_port
) {
786 ether_addr_copy(fdb
->addr
, mac
);
787 fdb
->ndm_state
= NUD_REACHABLE
;
794 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG
:
795 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl
, i
,
798 mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
)) {
799 ether_addr_copy(fdb
->addr
, mac
);
800 fdb
->ndm_state
= NUD_REACHABLE
;
809 } while (num_rec
== MLXSW_REG_SFD_REC_MAX_COUNT
);
813 return stored_err
? stored_err
: err
;
816 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port
*mlxsw_sp_port
,
817 struct switchdev_obj_port_vlan
*vlan
,
818 switchdev_obj_dump_cb_t
*cb
)
823 for_each_set_bit(vid
, mlxsw_sp_port
->active_vlans
, VLAN_N_VID
) {
825 if (vid
== mlxsw_sp_port
->pvid
)
826 vlan
->flags
|= BRIDGE_VLAN_INFO_PVID
;
827 vlan
->vid_begin
= vid
;
829 err
= cb(&vlan
->obj
);
836 static int mlxsw_sp_port_obj_dump(struct net_device
*dev
,
837 struct switchdev_obj
*obj
,
838 switchdev_obj_dump_cb_t
*cb
)
840 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
844 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
845 err
= mlxsw_sp_port_vlan_dump(mlxsw_sp_port
,
846 SWITCHDEV_OBJ_PORT_VLAN(obj
), cb
);
848 case SWITCHDEV_OBJ_ID_PORT_FDB
:
849 err
= mlxsw_sp_port_fdb_dump(mlxsw_sp_port
,
850 SWITCHDEV_OBJ_PORT_FDB(obj
), cb
);
860 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
861 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
862 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
863 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
864 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
865 .switchdev_port_obj_dump
= mlxsw_sp_port_obj_dump
,
868 static void mlxsw_sp_fdb_call_notifiers(bool learning
, bool learning_sync
,
869 bool adding
, char *mac
, u16 vid
,
870 struct net_device
*dev
)
872 struct switchdev_notifier_fdb_info info
;
873 unsigned long notifier_type
;
875 if (learning
&& learning_sync
) {
878 notifier_type
= adding
? SWITCHDEV_FDB_ADD
: SWITCHDEV_FDB_DEL
;
879 call_switchdev_notifiers(notifier_type
, dev
, &info
.info
);
883 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
884 char *sfn_pl
, int rec_index
,
887 struct mlxsw_sp_port
*mlxsw_sp_port
;
893 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &vid
, &local_port
);
894 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
895 if (!mlxsw_sp_port
) {
896 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
900 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port
, mac
, vid
,
901 adding
&& mlxsw_sp_port
->learning
, true);
904 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
908 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
909 mlxsw_sp_port
->learning_sync
,
910 adding
, mac
, vid
, mlxsw_sp_port
->dev
);
913 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
914 char *sfn_pl
, int rec_index
,
917 struct mlxsw_sp_port
*mlxsw_sp_port
;
923 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &vid
, &lag_id
);
924 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
925 if (!mlxsw_sp_port
) {
926 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
930 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, vid
,
931 adding
&& mlxsw_sp_port
->learning
,
935 netdev_err(mlxsw_sp_port
->dev
, "Failed to set FDB entry\n");
939 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port
->learning
,
940 mlxsw_sp_port
->learning_sync
,
942 mlxsw_sp_lag_get(mlxsw_sp
, lag_id
)->dev
);
945 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
946 char *sfn_pl
, int rec_index
)
948 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
949 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
950 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
953 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
954 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
957 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
958 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
961 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
962 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
968 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
970 schedule_delayed_work(&mlxsw_sp
->fdb_notify
.dw
,
971 msecs_to_jiffies(mlxsw_sp
->fdb_notify
.interval
));
974 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
976 struct mlxsw_sp
*mlxsw_sp
;
982 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
986 mlxsw_sp
= container_of(work
, struct mlxsw_sp
, fdb_notify
.dw
.work
);
989 mlxsw_reg_sfn_pack(sfn_pl
);
990 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
992 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
995 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
996 for (i
= 0; i
< num_rec
; i
++)
997 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1002 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1005 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1009 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1011 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1014 INIT_DELAYED_WORK(&mlxsw_sp
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1015 mlxsw_sp
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1016 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1020 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1022 cancel_delayed_work_sync(&mlxsw_sp
->fdb_notify
.dw
);
1025 static void mlxsw_sp_fids_fini(struct mlxsw_sp
*mlxsw_sp
)
1029 for_each_set_bit(fid
, mlxsw_sp
->active_fids
, VLAN_N_VID
)
1030 mlxsw_sp_fid_destroy(mlxsw_sp
, fid
);
1033 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1035 return mlxsw_sp_fdb_init(mlxsw_sp
);
1038 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1040 mlxsw_sp_fdb_fini(mlxsw_sp
);
1041 mlxsw_sp_fids_fini(mlxsw_sp
);
1044 int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1046 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1049 /* Allow only untagged packets to ingress and tag them internally
1052 mlxsw_sp_port
->pvid
= 1;
1053 err
= __mlxsw_sp_port_vlans_del(mlxsw_sp_port
, 0, VLAN_N_VID
, true);
1055 netdev_err(dev
, "Unable to init VLANs\n");
1059 /* Add implicit VLAN interface in the device, so that untagged
1060 * packets will be classified to the default vFID.
1062 err
= mlxsw_sp_port_add_vid(dev
, 0, 1);
1064 netdev_err(dev
, "Failed to configure default vFID\n");
1069 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
1071 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
1074 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)