2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
55 struct mlxsw_sp_bridge_ops
;
57 struct mlxsw_sp_bridge
{
58 struct mlxsw_sp
*mlxsw_sp
;
60 struct delayed_work dw
;
61 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
62 unsigned int interval
; /* ms */
64 #define MLXSW_SP_MIN_AGEING_TIME 10
65 #define MLXSW_SP_MAX_AGEING_TIME 1000000
66 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
68 bool vlan_enabled_exists
;
69 struct list_head bridges_list
;
70 struct list_head mids_list
;
71 DECLARE_BITMAP(mids_bitmap
, MLXSW_SP_MID_MAX
);
72 const struct mlxsw_sp_bridge_ops
*bridge_8021q_ops
;
73 const struct mlxsw_sp_bridge_ops
*bridge_8021d_ops
;
76 struct mlxsw_sp_bridge_device
{
77 struct net_device
*dev
;
78 struct list_head list
;
79 struct list_head ports_list
;
82 const struct mlxsw_sp_bridge_ops
*ops
;
85 struct mlxsw_sp_bridge_port
{
86 struct net_device
*dev
;
87 struct mlxsw_sp_bridge_device
*bridge_device
;
88 struct list_head list
;
89 struct list_head vlans_list
;
90 unsigned int ref_count
;
101 struct mlxsw_sp_bridge_vlan
{
102 struct list_head list
;
103 struct list_head port_vlan_list
;
107 struct mlxsw_sp_bridge_ops
{
108 int (*port_join
)(struct mlxsw_sp_bridge_device
*bridge_device
,
109 struct mlxsw_sp_bridge_port
*bridge_port
,
110 struct mlxsw_sp_port
*mlxsw_sp_port
);
111 void (*port_leave
)(struct mlxsw_sp_bridge_device
*bridge_device
,
112 struct mlxsw_sp_bridge_port
*bridge_port
,
113 struct mlxsw_sp_port
*mlxsw_sp_port
);
114 struct mlxsw_sp_fid
*
115 (*fid_get
)(struct mlxsw_sp_bridge_device
*bridge_device
,
120 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp
*mlxsw_sp
,
121 struct mlxsw_sp_bridge_port
*bridge_port
,
124 static struct mlxsw_sp_bridge_device
*
125 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge
*bridge
,
126 const struct net_device
*br_dev
)
128 struct mlxsw_sp_bridge_device
*bridge_device
;
130 list_for_each_entry(bridge_device
, &bridge
->bridges_list
, list
)
131 if (bridge_device
->dev
== br_dev
)
132 return bridge_device
;
137 static struct mlxsw_sp_bridge_device
*
138 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge
*bridge
,
139 struct net_device
*br_dev
)
141 struct device
*dev
= bridge
->mlxsw_sp
->bus_info
->dev
;
142 struct mlxsw_sp_bridge_device
*bridge_device
;
143 bool vlan_enabled
= br_vlan_enabled(br_dev
);
145 if (vlan_enabled
&& bridge
->vlan_enabled_exists
) {
146 dev_err(dev
, "Only one VLAN-aware bridge is supported\n");
147 return ERR_PTR(-EINVAL
);
150 bridge_device
= kzalloc(sizeof(*bridge_device
), GFP_KERNEL
);
152 return ERR_PTR(-ENOMEM
);
154 bridge_device
->dev
= br_dev
;
155 bridge_device
->vlan_enabled
= vlan_enabled
;
156 bridge_device
->multicast_enabled
= br_multicast_enabled(br_dev
);
157 INIT_LIST_HEAD(&bridge_device
->ports_list
);
159 bridge
->vlan_enabled_exists
= true;
160 bridge_device
->ops
= bridge
->bridge_8021q_ops
;
162 bridge_device
->ops
= bridge
->bridge_8021d_ops
;
164 list_add(&bridge_device
->list
, &bridge
->bridges_list
);
166 return bridge_device
;
170 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge
*bridge
,
171 struct mlxsw_sp_bridge_device
*bridge_device
)
173 list_del(&bridge_device
->list
);
174 if (bridge_device
->vlan_enabled
)
175 bridge
->vlan_enabled_exists
= false;
176 WARN_ON(!list_empty(&bridge_device
->ports_list
));
177 kfree(bridge_device
);
180 static struct mlxsw_sp_bridge_device
*
181 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge
*bridge
,
182 struct net_device
*br_dev
)
184 struct mlxsw_sp_bridge_device
*bridge_device
;
186 bridge_device
= mlxsw_sp_bridge_device_find(bridge
, br_dev
);
188 return bridge_device
;
190 return mlxsw_sp_bridge_device_create(bridge
, br_dev
);
194 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge
*bridge
,
195 struct mlxsw_sp_bridge_device
*bridge_device
)
197 if (list_empty(&bridge_device
->ports_list
))
198 mlxsw_sp_bridge_device_destroy(bridge
, bridge_device
);
201 static struct mlxsw_sp_bridge_port
*
202 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device
*bridge_device
,
203 const struct net_device
*brport_dev
)
205 struct mlxsw_sp_bridge_port
*bridge_port
;
207 list_for_each_entry(bridge_port
, &bridge_device
->ports_list
, list
) {
208 if (bridge_port
->dev
== brport_dev
)
215 static struct mlxsw_sp_bridge_port
*
216 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge
*bridge
,
217 struct net_device
*brport_dev
)
219 struct net_device
*br_dev
= netdev_master_upper_dev_get(brport_dev
);
220 struct mlxsw_sp_bridge_device
*bridge_device
;
225 bridge_device
= mlxsw_sp_bridge_device_find(bridge
, br_dev
);
229 return __mlxsw_sp_bridge_port_find(bridge_device
, brport_dev
);
232 static struct mlxsw_sp_bridge_port
*
233 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device
*bridge_device
,
234 struct net_device
*brport_dev
)
236 struct mlxsw_sp_bridge_port
*bridge_port
;
237 struct mlxsw_sp_port
*mlxsw_sp_port
;
239 bridge_port
= kzalloc(sizeof(*bridge_port
), GFP_KERNEL
);
243 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(brport_dev
);
244 bridge_port
->lagged
= mlxsw_sp_port
->lagged
;
245 if (bridge_port
->lagged
)
246 bridge_port
->lag_id
= mlxsw_sp_port
->lag_id
;
248 bridge_port
->system_port
= mlxsw_sp_port
->local_port
;
249 bridge_port
->dev
= brport_dev
;
250 bridge_port
->bridge_device
= bridge_device
;
251 bridge_port
->stp_state
= BR_STATE_DISABLED
;
252 bridge_port
->flags
= BR_LEARNING
| BR_FLOOD
| BR_LEARNING_SYNC
;
253 INIT_LIST_HEAD(&bridge_port
->vlans_list
);
254 list_add(&bridge_port
->list
, &bridge_device
->ports_list
);
255 bridge_port
->ref_count
= 1;
261 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port
*bridge_port
)
263 list_del(&bridge_port
->list
);
264 WARN_ON(!list_empty(&bridge_port
->vlans_list
));
269 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port
*
272 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(bridge_port
->dev
);
274 /* In case ports were pulled from out of a bridged LAG, then
275 * it's possible the reference count isn't zero, yet the bridge
276 * port should be destroyed, as it's no longer an upper of ours.
278 if (!mlxsw_sp
&& list_empty(&bridge_port
->vlans_list
))
280 else if (bridge_port
->ref_count
== 0)
286 static struct mlxsw_sp_bridge_port
*
287 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge
*bridge
,
288 struct net_device
*brport_dev
)
290 struct net_device
*br_dev
= netdev_master_upper_dev_get(brport_dev
);
291 struct mlxsw_sp_bridge_device
*bridge_device
;
292 struct mlxsw_sp_bridge_port
*bridge_port
;
295 bridge_port
= mlxsw_sp_bridge_port_find(bridge
, brport_dev
);
297 bridge_port
->ref_count
++;
301 bridge_device
= mlxsw_sp_bridge_device_get(bridge
, br_dev
);
302 if (IS_ERR(bridge_device
))
303 return ERR_CAST(bridge_device
);
305 bridge_port
= mlxsw_sp_bridge_port_create(bridge_device
, brport_dev
);
308 goto err_bridge_port_create
;
313 err_bridge_port_create
:
314 mlxsw_sp_bridge_device_put(bridge
, bridge_device
);
318 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge
*bridge
,
319 struct mlxsw_sp_bridge_port
*bridge_port
)
321 struct mlxsw_sp_bridge_device
*bridge_device
;
323 bridge_port
->ref_count
--;
324 if (!mlxsw_sp_bridge_port_should_destroy(bridge_port
))
326 bridge_device
= bridge_port
->bridge_device
;
327 mlxsw_sp_bridge_port_destroy(bridge_port
);
328 mlxsw_sp_bridge_device_put(bridge
, bridge_device
);
331 static struct mlxsw_sp_port_vlan
*
332 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port
*mlxsw_sp_port
,
333 const struct mlxsw_sp_bridge_device
*
337 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
339 list_for_each_entry(mlxsw_sp_port_vlan
, &mlxsw_sp_port
->vlans_list
,
341 if (!mlxsw_sp_port_vlan
->bridge_port
)
343 if (mlxsw_sp_port_vlan
->bridge_port
->bridge_device
!=
346 if (bridge_device
->vlan_enabled
&&
347 mlxsw_sp_port_vlan
->vid
!= vid
)
349 return mlxsw_sp_port_vlan
;
355 static struct mlxsw_sp_port_vlan
*
356 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port
*mlxsw_sp_port
,
359 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
361 list_for_each_entry(mlxsw_sp_port_vlan
, &mlxsw_sp_port
->vlans_list
,
363 struct mlxsw_sp_fid
*fid
= mlxsw_sp_port_vlan
->fid
;
365 if (fid
&& mlxsw_sp_fid_index(fid
) == fid_index
)
366 return mlxsw_sp_port_vlan
;
372 static struct mlxsw_sp_bridge_vlan
*
373 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port
*bridge_port
,
376 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
378 list_for_each_entry(bridge_vlan
, &bridge_port
->vlans_list
, list
) {
379 if (bridge_vlan
->vid
== vid
)
386 static struct mlxsw_sp_bridge_vlan
*
387 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port
*bridge_port
, u16 vid
)
389 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
391 bridge_vlan
= kzalloc(sizeof(*bridge_vlan
), GFP_KERNEL
);
395 INIT_LIST_HEAD(&bridge_vlan
->port_vlan_list
);
396 bridge_vlan
->vid
= vid
;
397 list_add(&bridge_vlan
->list
, &bridge_port
->vlans_list
);
403 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan
*bridge_vlan
)
405 list_del(&bridge_vlan
->list
);
406 WARN_ON(!list_empty(&bridge_vlan
->port_vlan_list
));
410 static struct mlxsw_sp_bridge_vlan
*
411 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port
*bridge_port
, u16 vid
)
413 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
415 bridge_vlan
= mlxsw_sp_bridge_vlan_find(bridge_port
, vid
);
419 return mlxsw_sp_bridge_vlan_create(bridge_port
, vid
);
422 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan
*bridge_vlan
)
424 if (list_empty(&bridge_vlan
->port_vlan_list
))
425 mlxsw_sp_bridge_vlan_destroy(bridge_vlan
);
428 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge
*bridge
,
429 struct net_device
*dev
,
430 unsigned long *brport_flags
)
432 struct mlxsw_sp_bridge_port
*bridge_port
;
434 bridge_port
= mlxsw_sp_bridge_port_find(bridge
, dev
);
435 if (WARN_ON(!bridge_port
))
438 memcpy(brport_flags
, &bridge_port
->flags
, sizeof(*brport_flags
));
441 static int mlxsw_sp_port_attr_get(struct net_device
*dev
,
442 struct switchdev_attr
*attr
)
444 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
445 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
448 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
449 attr
->u
.ppid
.id_len
= sizeof(mlxsw_sp
->base_mac
);
450 memcpy(&attr
->u
.ppid
.id
, &mlxsw_sp
->base_mac
,
451 attr
->u
.ppid
.id_len
);
453 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
454 mlxsw_sp_port_bridge_flags_get(mlxsw_sp
->bridge
, attr
->orig_dev
,
455 &attr
->u
.brport_flags
);
457 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT
:
458 attr
->u
.brport_flags_support
= BR_LEARNING
| BR_FLOOD
;
468 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
469 struct mlxsw_sp_bridge_vlan
*bridge_vlan
,
472 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
474 list_for_each_entry(mlxsw_sp_port_vlan
, &bridge_vlan
->port_vlan_list
,
476 if (mlxsw_sp_port_vlan
->mlxsw_sp_port
!= mlxsw_sp_port
)
478 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port
,
479 bridge_vlan
->vid
, state
);
485 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
486 struct switchdev_trans
*trans
,
487 struct net_device
*orig_dev
,
490 struct mlxsw_sp_bridge_port
*bridge_port
;
491 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
494 if (switchdev_trans_ph_prepare(trans
))
497 /* It's possible we failed to enslave the port, yet this
498 * operation is executed due to it being deferred.
500 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp_port
->mlxsw_sp
->bridge
,
505 list_for_each_entry(bridge_vlan
, &bridge_port
->vlans_list
, list
) {
506 err
= mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port
,
509 goto err_port_bridge_vlan_stp_set
;
512 bridge_port
->stp_state
= state
;
516 err_port_bridge_vlan_stp_set
:
517 list_for_each_entry_continue_reverse(bridge_vlan
,
518 &bridge_port
->vlans_list
, list
)
519 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port
, bridge_vlan
,
520 bridge_port
->stp_state
);
525 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
526 struct mlxsw_sp_bridge_vlan
*bridge_vlan
,
527 enum mlxsw_sp_flood_type packet_type
,
530 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
532 list_for_each_entry(mlxsw_sp_port_vlan
, &bridge_vlan
->port_vlan_list
,
534 if (mlxsw_sp_port_vlan
->mlxsw_sp_port
!= mlxsw_sp_port
)
536 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan
->fid
,
538 mlxsw_sp_port
->local_port
,
546 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
547 struct mlxsw_sp_bridge_port
*bridge_port
,
548 enum mlxsw_sp_flood_type packet_type
,
551 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
554 list_for_each_entry(bridge_vlan
, &bridge_port
->vlans_list
, list
) {
555 err
= mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port
,
560 goto err_port_bridge_vlan_flood_set
;
565 err_port_bridge_vlan_flood_set
:
566 list_for_each_entry_continue_reverse(bridge_vlan
,
567 &bridge_port
->vlans_list
, list
)
568 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port
, bridge_vlan
,
569 packet_type
, !member
);
574 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
575 struct mlxsw_sp_bridge_vlan
*bridge_vlan
,
578 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
579 u16 vid
= bridge_vlan
->vid
;
581 list_for_each_entry(mlxsw_sp_port_vlan
, &bridge_vlan
->port_vlan_list
,
583 if (mlxsw_sp_port_vlan
->mlxsw_sp_port
!= mlxsw_sp_port
)
585 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, set
);
592 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
593 struct mlxsw_sp_bridge_port
*bridge_port
,
596 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
599 list_for_each_entry(bridge_vlan
, &bridge_port
->vlans_list
, list
) {
600 err
= mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port
,
603 goto err_port_bridge_vlan_learning_set
;
608 err_port_bridge_vlan_learning_set
:
609 list_for_each_entry_continue_reverse(bridge_vlan
,
610 &bridge_port
->vlans_list
, list
)
611 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port
,
616 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
617 struct switchdev_trans
*trans
,
618 struct net_device
*orig_dev
,
619 unsigned long brport_flags
)
621 struct mlxsw_sp_bridge_port
*bridge_port
;
624 if (switchdev_trans_ph_prepare(trans
))
627 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp_port
->mlxsw_sp
->bridge
,
629 if (WARN_ON(!bridge_port
))
632 err
= mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port
, bridge_port
,
633 MLXSW_SP_FLOOD_TYPE_UC
,
634 brport_flags
& BR_FLOOD
);
638 err
= mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port
, bridge_port
,
639 brport_flags
& BR_LEARNING
);
643 memcpy(&bridge_port
->flags
, &brport_flags
, sizeof(brport_flags
));
648 static int mlxsw_sp_ageing_set(struct mlxsw_sp
*mlxsw_sp
, u32 ageing_time
)
650 char sfdat_pl
[MLXSW_REG_SFDAT_LEN
];
653 mlxsw_reg_sfdat_pack(sfdat_pl
, ageing_time
);
654 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdat
), sfdat_pl
);
657 mlxsw_sp
->bridge
->ageing_time
= ageing_time
;
661 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
662 struct switchdev_trans
*trans
,
663 unsigned long ageing_clock_t
)
665 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
666 unsigned long ageing_jiffies
= clock_t_to_jiffies(ageing_clock_t
);
667 u32 ageing_time
= jiffies_to_msecs(ageing_jiffies
) / 1000;
669 if (switchdev_trans_ph_prepare(trans
)) {
670 if (ageing_time
< MLXSW_SP_MIN_AGEING_TIME
||
671 ageing_time
> MLXSW_SP_MAX_AGEING_TIME
)
677 return mlxsw_sp_ageing_set(mlxsw_sp
, ageing_time
);
680 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
681 struct switchdev_trans
*trans
,
682 struct net_device
*orig_dev
,
685 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
686 struct mlxsw_sp_bridge_device
*bridge_device
;
688 if (!switchdev_trans_ph_prepare(trans
))
691 bridge_device
= mlxsw_sp_bridge_device_find(mlxsw_sp
->bridge
, orig_dev
);
692 if (WARN_ON(!bridge_device
))
695 if (bridge_device
->vlan_enabled
== vlan_enabled
)
698 netdev_err(bridge_device
->dev
, "VLAN filtering can't be changed for existing bridge\n");
702 static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
703 struct switchdev_trans
*trans
,
704 struct net_device
*orig_dev
,
705 bool is_port_mc_router
)
707 struct mlxsw_sp_bridge_port
*bridge_port
;
709 if (switchdev_trans_ph_prepare(trans
))
712 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp_port
->mlxsw_sp
->bridge
,
714 if (WARN_ON(!bridge_port
))
717 if (!bridge_port
->bridge_device
->multicast_enabled
)
720 return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port
, bridge_port
,
721 MLXSW_SP_FLOOD_TYPE_MC
,
725 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
726 struct switchdev_trans
*trans
,
727 struct net_device
*orig_dev
,
730 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
731 struct mlxsw_sp_bridge_device
*bridge_device
;
732 struct mlxsw_sp_bridge_port
*bridge_port
;
735 if (switchdev_trans_ph_prepare(trans
))
738 /* It's possible we failed to enslave the port, yet this
739 * operation is executed due to it being deferred.
741 bridge_device
= mlxsw_sp_bridge_device_find(mlxsw_sp
->bridge
, orig_dev
);
745 list_for_each_entry(bridge_port
, &bridge_device
->ports_list
, list
) {
746 enum mlxsw_sp_flood_type packet_type
= MLXSW_SP_FLOOD_TYPE_MC
;
747 bool member
= mc_disabled
? true : bridge_port
->mrouter
;
749 err
= mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port
,
751 packet_type
, member
);
756 bridge_device
->multicast_enabled
= !mc_disabled
;
761 static int mlxsw_sp_port_attr_set(struct net_device
*dev
,
762 const struct switchdev_attr
*attr
,
763 struct switchdev_trans
*trans
)
765 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
769 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
770 err
= mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port
, trans
,
774 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
775 err
= mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port
, trans
,
777 attr
->u
.brport_flags
);
779 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME
:
780 err
= mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port
, trans
,
781 attr
->u
.ageing_time
);
783 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
784 err
= mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port
, trans
,
786 attr
->u
.vlan_filtering
);
788 case SWITCHDEV_ATTR_ID_PORT_MROUTER
:
789 err
= mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port
, trans
,
793 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED
:
794 err
= mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port
, trans
,
796 attr
->u
.mc_disabled
);
806 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port
*bridge_port
)
808 const struct mlxsw_sp_bridge_device
*bridge_device
;
810 bridge_device
= bridge_port
->bridge_device
;
811 return !bridge_device
->multicast_enabled
? true : bridge_port
->mrouter
;
815 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
,
816 struct mlxsw_sp_bridge_port
*bridge_port
)
818 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
819 struct mlxsw_sp_bridge_device
*bridge_device
;
820 u8 local_port
= mlxsw_sp_port
->local_port
;
821 u16 vid
= mlxsw_sp_port_vlan
->vid
;
822 struct mlxsw_sp_fid
*fid
;
825 bridge_device
= bridge_port
->bridge_device
;
826 fid
= bridge_device
->ops
->fid_get(bridge_device
, vid
);
830 err
= mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_UC
, local_port
,
831 bridge_port
->flags
& BR_FLOOD
);
833 goto err_fid_uc_flood_set
;
835 err
= mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_MC
, local_port
,
836 mlxsw_sp_mc_flood(bridge_port
));
838 goto err_fid_mc_flood_set
;
840 err
= mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_BC
, local_port
,
843 goto err_fid_bc_flood_set
;
845 err
= mlxsw_sp_fid_port_vid_map(fid
, mlxsw_sp_port
, vid
);
847 goto err_fid_port_vid_map
;
849 mlxsw_sp_port_vlan
->fid
= fid
;
853 err_fid_port_vid_map
:
854 mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_BC
, local_port
, false);
855 err_fid_bc_flood_set
:
856 mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_MC
, local_port
, false);
857 err_fid_mc_flood_set
:
858 mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_UC
, local_port
, false);
859 err_fid_uc_flood_set
:
860 mlxsw_sp_fid_put(fid
);
865 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
867 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
868 struct mlxsw_sp_fid
*fid
= mlxsw_sp_port_vlan
->fid
;
869 u8 local_port
= mlxsw_sp_port
->local_port
;
870 u16 vid
= mlxsw_sp_port_vlan
->vid
;
872 mlxsw_sp_port_vlan
->fid
= NULL
;
873 mlxsw_sp_fid_port_vid_unmap(fid
, mlxsw_sp_port
, vid
);
874 mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_BC
, local_port
, false);
875 mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_MC
, local_port
, false);
876 mlxsw_sp_fid_flood_set(fid
, MLXSW_SP_FLOOD_TYPE_UC
, local_port
, false);
877 mlxsw_sp_fid_put(fid
);
881 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port
*mlxsw_sp_port
,
882 u16 vid
, bool is_pvid
)
886 else if (mlxsw_sp_port
->pvid
== vid
)
887 return 0; /* Dis-allow untagged packets */
889 return mlxsw_sp_port
->pvid
;
893 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
,
894 struct mlxsw_sp_bridge_port
*bridge_port
)
896 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
897 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
898 u16 vid
= mlxsw_sp_port_vlan
->vid
;
901 /* No need to continue if only VLAN flags were changed */
902 if (mlxsw_sp_port_vlan
->bridge_port
)
905 err
= mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan
, bridge_port
);
909 err
= mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
,
910 bridge_port
->flags
& BR_LEARNING
);
912 goto err_port_vid_learning_set
;
914 err
= mlxsw_sp_port_vid_stp_set(mlxsw_sp_port
, vid
,
915 bridge_port
->stp_state
);
917 goto err_port_vid_stp_set
;
919 bridge_vlan
= mlxsw_sp_bridge_vlan_get(bridge_port
, vid
);
922 goto err_bridge_vlan_get
;
925 list_add(&mlxsw_sp_port_vlan
->bridge_vlan_node
,
926 &bridge_vlan
->port_vlan_list
);
928 mlxsw_sp_bridge_port_get(mlxsw_sp_port
->mlxsw_sp
->bridge
,
930 mlxsw_sp_port_vlan
->bridge_port
= bridge_port
;
935 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port
, vid
, BR_STATE_DISABLED
);
936 err_port_vid_stp_set
:
937 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, false);
938 err_port_vid_learning_set
:
939 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan
);
944 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
)
946 struct mlxsw_sp_port
*mlxsw_sp_port
= mlxsw_sp_port_vlan
->mlxsw_sp_port
;
947 struct mlxsw_sp_fid
*fid
= mlxsw_sp_port_vlan
->fid
;
948 struct mlxsw_sp_bridge_vlan
*bridge_vlan
;
949 struct mlxsw_sp_bridge_port
*bridge_port
;
950 u16 vid
= mlxsw_sp_port_vlan
->vid
;
953 if (WARN_ON(mlxsw_sp_fid_type(fid
) != MLXSW_SP_FID_TYPE_8021Q
&&
954 mlxsw_sp_fid_type(fid
) != MLXSW_SP_FID_TYPE_8021D
))
957 bridge_port
= mlxsw_sp_port_vlan
->bridge_port
;
958 bridge_vlan
= mlxsw_sp_bridge_vlan_find(bridge_port
, vid
);
959 last
= list_is_singular(&bridge_vlan
->port_vlan_list
);
961 list_del(&mlxsw_sp_port_vlan
->bridge_vlan_node
);
962 mlxsw_sp_bridge_vlan_put(bridge_vlan
);
963 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port
, vid
, BR_STATE_DISABLED
);
964 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port
, vid
, false);
966 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port
->mlxsw_sp
,
968 mlxsw_sp_fid_index(fid
));
969 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan
);
971 mlxsw_sp_bridge_port_put(mlxsw_sp_port
->mlxsw_sp
->bridge
, bridge_port
);
972 mlxsw_sp_port_vlan
->bridge_port
= NULL
;
976 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
977 struct mlxsw_sp_bridge_port
*bridge_port
,
978 u16 vid
, bool is_untagged
, bool is_pvid
)
980 u16 pvid
= mlxsw_sp_port_pvid_determine(mlxsw_sp_port
, vid
, is_pvid
);
981 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
982 u16 old_pvid
= mlxsw_sp_port
->pvid
;
985 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_get(mlxsw_sp_port
, vid
);
986 if (IS_ERR(mlxsw_sp_port_vlan
))
987 return PTR_ERR(mlxsw_sp_port_vlan
);
989 err
= mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, true,
992 goto err_port_vlan_set
;
994 err
= mlxsw_sp_port_pvid_set(mlxsw_sp_port
, pvid
);
996 goto err_port_pvid_set
;
998 err
= mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan
, bridge_port
);
1000 goto err_port_vlan_bridge_join
;
1004 err_port_vlan_bridge_join
:
1005 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, old_pvid
);
1007 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1009 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1013 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
1014 const struct switchdev_obj_port_vlan
*vlan
,
1015 struct switchdev_trans
*trans
)
1017 bool flag_untagged
= vlan
->flags
& BRIDGE_VLAN_INFO_UNTAGGED
;
1018 bool flag_pvid
= vlan
->flags
& BRIDGE_VLAN_INFO_PVID
;
1019 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1020 struct net_device
*orig_dev
= vlan
->obj
.orig_dev
;
1021 struct mlxsw_sp_bridge_port
*bridge_port
;
1024 if (switchdev_trans_ph_prepare(trans
))
1027 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp
->bridge
, orig_dev
);
1028 if (WARN_ON(!bridge_port
))
1031 if (!bridge_port
->bridge_device
->vlan_enabled
)
1034 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++) {
1037 err
= mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port
, bridge_port
,
1047 static enum mlxsw_reg_sfdf_flush_type
mlxsw_sp_fdb_flush_type(bool lagged
)
1049 return lagged
? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID
:
1050 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID
;
1054 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp
*mlxsw_sp
,
1055 struct mlxsw_sp_bridge_port
*bridge_port
,
1058 bool lagged
= bridge_port
->lagged
;
1059 char sfdf_pl
[MLXSW_REG_SFDF_LEN
];
1062 system_port
= lagged
? bridge_port
->lag_id
: bridge_port
->system_port
;
1063 mlxsw_reg_sfdf_pack(sfdf_pl
, mlxsw_sp_fdb_flush_type(lagged
));
1064 mlxsw_reg_sfdf_fid_set(sfdf_pl
, fid_index
);
1065 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl
, system_port
);
1067 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfdf
), sfdf_pl
);
1070 static enum mlxsw_reg_sfd_rec_policy
mlxsw_sp_sfd_rec_policy(bool dynamic
)
1072 return dynamic
? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS
:
1073 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY
;
1076 static enum mlxsw_reg_sfd_op
mlxsw_sp_sfd_op(bool adding
)
1078 return adding
? MLXSW_REG_SFD_OP_WRITE_EDIT
:
1079 MLXSW_REG_SFD_OP_WRITE_REMOVE
;
1082 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1083 const char *mac
, u16 fid
, bool adding
,
1084 enum mlxsw_reg_sfd_rec_action action
,
1090 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1094 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
1095 mlxsw_reg_sfd_uc_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
1096 mac
, fid
, action
, local_port
);
1097 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1103 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
1104 const char *mac
, u16 fid
, bool adding
,
1107 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
, adding
,
1108 MLXSW_REG_SFD_REC_ACTION_NOP
, dynamic
);
1111 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *mac
, u16 fid
,
1114 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, 0, mac
, fid
, adding
,
1115 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER
,
1119 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp
*mlxsw_sp
, u16 lag_id
,
1120 const char *mac
, u16 fid
, u16 lag_vid
,
1121 bool adding
, bool dynamic
)
1126 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1130 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
1131 mlxsw_reg_sfd_uc_lag_pack(sfd_pl
, 0, mlxsw_sp_sfd_rec_policy(dynamic
),
1132 mac
, fid
, MLXSW_REG_SFD_REC_ACTION_NOP
,
1134 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1141 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port
*mlxsw_sp_port
,
1142 struct switchdev_notifier_fdb_info
*fdb_info
, bool adding
)
1144 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1145 struct net_device
*orig_dev
= fdb_info
->info
.dev
;
1146 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1147 struct mlxsw_sp_bridge_device
*bridge_device
;
1148 struct mlxsw_sp_bridge_port
*bridge_port
;
1151 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp
->bridge
, orig_dev
);
1155 bridge_device
= bridge_port
->bridge_device
;
1156 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port
,
1159 if (!mlxsw_sp_port_vlan
)
1162 fid_index
= mlxsw_sp_fid_index(mlxsw_sp_port_vlan
->fid
);
1163 vid
= mlxsw_sp_port_vlan
->vid
;
1165 if (!bridge_port
->lagged
)
1166 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp
,
1167 bridge_port
->system_port
,
1168 fdb_info
->addr
, fid_index
,
1171 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
,
1172 bridge_port
->lag_id
,
1173 fdb_info
->addr
, fid_index
,
1174 vid
, adding
, false);
1177 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp
*mlxsw_sp
, const char *addr
,
1178 u16 fid
, u16 mid
, bool adding
)
1183 sfd_pl
= kmalloc(MLXSW_REG_SFD_LEN
, GFP_KERNEL
);
1187 mlxsw_reg_sfd_pack(sfd_pl
, mlxsw_sp_sfd_op(adding
), 0);
1188 mlxsw_reg_sfd_mc_pack(sfd_pl
, 0, addr
, fid
,
1189 MLXSW_REG_SFD_REC_ACTION_NOP
, mid
);
1190 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sfd
), sfd_pl
);
1195 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port
*mlxsw_sp_port
, u16 mid
,
1196 bool add
, bool clear_all_ports
)
1198 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1202 smid_pl
= kmalloc(MLXSW_REG_SMID_LEN
, GFP_KERNEL
);
1206 mlxsw_reg_smid_pack(smid_pl
, mid
, mlxsw_sp_port
->local_port
, add
);
1207 if (clear_all_ports
) {
1208 for (i
= 1; i
< mlxsw_core_max_ports(mlxsw_sp
->core
); i
++)
1209 if (mlxsw_sp
->ports
[i
])
1210 mlxsw_reg_smid_port_mask_set(smid_pl
, i
, 1);
1212 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(smid
), smid_pl
);
1217 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_get(struct mlxsw_sp
*mlxsw_sp
,
1218 const unsigned char *addr
,
1221 struct mlxsw_sp_mid
*mid
;
1223 list_for_each_entry(mid
, &mlxsw_sp
->bridge
->mids_list
, list
) {
1224 if (ether_addr_equal(mid
->addr
, addr
) && mid
->fid
== fid
)
1230 static struct mlxsw_sp_mid
*__mlxsw_sp_mc_alloc(struct mlxsw_sp
*mlxsw_sp
,
1231 const unsigned char *addr
,
1234 struct mlxsw_sp_mid
*mid
;
1237 mid_idx
= find_first_zero_bit(mlxsw_sp
->bridge
->mids_bitmap
,
1239 if (mid_idx
== MLXSW_SP_MID_MAX
)
1242 mid
= kzalloc(sizeof(*mid
), GFP_KERNEL
);
1246 set_bit(mid_idx
, mlxsw_sp
->bridge
->mids_bitmap
);
1247 ether_addr_copy(mid
->addr
, addr
);
1251 list_add_tail(&mid
->list
, &mlxsw_sp
->bridge
->mids_list
);
1256 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp
*mlxsw_sp
,
1257 struct mlxsw_sp_mid
*mid
)
1259 if (--mid
->ref_count
== 0) {
1260 list_del(&mid
->list
);
1261 clear_bit(mid
->mid
, mlxsw_sp
->bridge
->mids_bitmap
);
1268 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port
*mlxsw_sp_port
,
1269 const struct switchdev_obj_port_mdb
*mdb
,
1270 struct switchdev_trans
*trans
)
1272 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1273 struct net_device
*orig_dev
= mdb
->obj
.orig_dev
;
1274 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1275 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1276 struct mlxsw_sp_bridge_device
*bridge_device
;
1277 struct mlxsw_sp_bridge_port
*bridge_port
;
1278 struct mlxsw_sp_mid
*mid
;
1282 if (switchdev_trans_ph_prepare(trans
))
1285 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp
->bridge
, orig_dev
);
1286 if (WARN_ON(!bridge_port
))
1289 bridge_device
= bridge_port
->bridge_device
;
1290 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port
,
1293 if (WARN_ON(!mlxsw_sp_port_vlan
))
1296 fid_index
= mlxsw_sp_fid_index(mlxsw_sp_port_vlan
->fid
);
1298 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, fid_index
);
1300 mid
= __mlxsw_sp_mc_alloc(mlxsw_sp
, mdb
->addr
, fid_index
);
1302 netdev_err(dev
, "Unable to allocate MC group\n");
1308 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, true,
1309 mid
->ref_count
== 1);
1311 netdev_err(dev
, "Unable to set SMID\n");
1315 if (mid
->ref_count
== 1) {
1316 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid_index
,
1319 netdev_err(dev
, "Unable to set MC SFD\n");
1327 __mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
);
1331 static int mlxsw_sp_port_obj_add(struct net_device
*dev
,
1332 const struct switchdev_obj
*obj
,
1333 struct switchdev_trans
*trans
)
1335 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1339 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1340 err
= mlxsw_sp_port_vlans_add(mlxsw_sp_port
,
1341 SWITCHDEV_OBJ_PORT_VLAN(obj
),
1344 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1345 err
= mlxsw_sp_port_mdb_add(mlxsw_sp_port
,
1346 SWITCHDEV_OBJ_PORT_MDB(obj
),
1358 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1359 struct mlxsw_sp_bridge_port
*bridge_port
, u16 vid
)
1361 u16 pvid
= mlxsw_sp_port
->pvid
== vid
? 0 : vid
;
1362 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1364 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1365 if (WARN_ON(!mlxsw_sp_port_vlan
))
1368 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
1369 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, pvid
);
1370 mlxsw_sp_port_vlan_set(mlxsw_sp_port
, vid
, vid
, false, false);
1371 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1374 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1375 const struct switchdev_obj_port_vlan
*vlan
)
1377 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1378 struct net_device
*orig_dev
= vlan
->obj
.orig_dev
;
1379 struct mlxsw_sp_bridge_port
*bridge_port
;
1382 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp
->bridge
, orig_dev
);
1383 if (WARN_ON(!bridge_port
))
1386 if (!bridge_port
->bridge_device
->vlan_enabled
)
1389 for (vid
= vlan
->vid_begin
; vid
<= vlan
->vid_end
; vid
++)
1390 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port
, bridge_port
, vid
);
1395 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port
*mlxsw_sp_port
,
1396 const struct switchdev_obj_port_mdb
*mdb
)
1398 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1399 struct net_device
*orig_dev
= mdb
->obj
.orig_dev
;
1400 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1401 struct mlxsw_sp_bridge_device
*bridge_device
;
1402 struct net_device
*dev
= mlxsw_sp_port
->dev
;
1403 struct mlxsw_sp_bridge_port
*bridge_port
;
1404 struct mlxsw_sp_mid
*mid
;
1409 bridge_port
= mlxsw_sp_bridge_port_find(mlxsw_sp
->bridge
, orig_dev
);
1410 if (WARN_ON(!bridge_port
))
1413 bridge_device
= bridge_port
->bridge_device
;
1414 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port
,
1417 if (WARN_ON(!mlxsw_sp_port_vlan
))
1420 fid_index
= mlxsw_sp_fid_index(mlxsw_sp_port_vlan
->fid
);
1422 mid
= __mlxsw_sp_mc_get(mlxsw_sp
, mdb
->addr
, fid_index
);
1424 netdev_err(dev
, "Unable to remove port from MC DB\n");
1428 err
= mlxsw_sp_port_smid_set(mlxsw_sp_port
, mid
->mid
, false, false);
1430 netdev_err(dev
, "Unable to remove port from SMID\n");
1433 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp
, mid
)) {
1434 err
= mlxsw_sp_port_mdb_op(mlxsw_sp
, mdb
->addr
, fid_index
,
1437 netdev_err(dev
, "Unable to remove MC SFD\n");
1443 static int mlxsw_sp_port_obj_del(struct net_device
*dev
,
1444 const struct switchdev_obj
*obj
)
1446 struct mlxsw_sp_port
*mlxsw_sp_port
= netdev_priv(dev
);
1450 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1451 err
= mlxsw_sp_port_vlans_del(mlxsw_sp_port
,
1452 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1454 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1455 err
= mlxsw_sp_port_mdb_del(mlxsw_sp_port
,
1456 SWITCHDEV_OBJ_PORT_MDB(obj
));
1466 static struct mlxsw_sp_port
*mlxsw_sp_lag_rep_port(struct mlxsw_sp
*mlxsw_sp
,
1469 struct mlxsw_sp_port
*mlxsw_sp_port
;
1470 u64 max_lag_members
;
1473 max_lag_members
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
,
1475 for (i
= 0; i
< max_lag_members
; i
++) {
1476 mlxsw_sp_port
= mlxsw_sp_port_lagged_get(mlxsw_sp
, lag_id
, i
);
1478 return mlxsw_sp_port
;
1483 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops
= {
1484 .switchdev_port_attr_get
= mlxsw_sp_port_attr_get
,
1485 .switchdev_port_attr_set
= mlxsw_sp_port_attr_set
,
1486 .switchdev_port_obj_add
= mlxsw_sp_port_obj_add
,
1487 .switchdev_port_obj_del
= mlxsw_sp_port_obj_del
,
1491 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device
*bridge_device
,
1492 struct mlxsw_sp_bridge_port
*bridge_port
,
1493 struct mlxsw_sp_port
*mlxsw_sp_port
)
1495 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1497 if (is_vlan_dev(bridge_port
->dev
))
1500 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, 1);
1501 if (WARN_ON(!mlxsw_sp_port_vlan
))
1504 /* Let VLAN-aware bridge take care of its own VLANs */
1505 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan
);
1511 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device
*bridge_device
,
1512 struct mlxsw_sp_bridge_port
*bridge_port
,
1513 struct mlxsw_sp_port
*mlxsw_sp_port
)
1515 mlxsw_sp_port_vlan_get(mlxsw_sp_port
, 1);
1516 /* Make sure untagged frames are allowed to ingress */
1517 mlxsw_sp_port_pvid_set(mlxsw_sp_port
, 1);
1520 static struct mlxsw_sp_fid
*
1521 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device
*bridge_device
,
1524 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(bridge_device
->dev
);
1526 return mlxsw_sp_fid_8021q_get(mlxsw_sp
, vid
);
1529 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops
= {
1530 .port_join
= mlxsw_sp_bridge_8021q_port_join
,
1531 .port_leave
= mlxsw_sp_bridge_8021q_port_leave
,
1532 .fid_get
= mlxsw_sp_bridge_8021q_fid_get
,
1536 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port
*mlxsw_sp_port
,
1537 const struct net_device
*br_dev
)
1539 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1541 list_for_each_entry(mlxsw_sp_port_vlan
, &mlxsw_sp_port
->vlans_list
,
1543 if (mlxsw_sp_port_vlan
->bridge_port
&&
1544 mlxsw_sp_port_vlan
->bridge_port
->bridge_device
->dev
==
1553 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device
*bridge_device
,
1554 struct mlxsw_sp_bridge_port
*bridge_port
,
1555 struct mlxsw_sp_port
*mlxsw_sp_port
)
1557 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1560 if (!is_vlan_dev(bridge_port
->dev
))
1562 vid
= vlan_dev_vlan_id(bridge_port
->dev
);
1564 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1565 if (WARN_ON(!mlxsw_sp_port_vlan
))
1568 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port
, bridge_device
->dev
)) {
1569 netdev_err(mlxsw_sp_port
->dev
, "Can't bridge VLAN uppers of the same port\n");
1573 /* Port is no longer usable as a router interface */
1574 if (mlxsw_sp_port_vlan
->fid
)
1575 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan
);
1577 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan
, bridge_port
);
1581 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device
*bridge_device
,
1582 struct mlxsw_sp_bridge_port
*bridge_port
,
1583 struct mlxsw_sp_port
*mlxsw_sp_port
)
1585 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1586 u16 vid
= vlan_dev_vlan_id(bridge_port
->dev
);
1588 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port
, vid
);
1589 if (WARN_ON(!mlxsw_sp_port_vlan
))
1592 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan
);
1595 static struct mlxsw_sp_fid
*
1596 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device
*bridge_device
,
1599 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_lower_get(bridge_device
->dev
);
1601 return mlxsw_sp_fid_8021d_get(mlxsw_sp
, bridge_device
->dev
->ifindex
);
1604 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops
= {
1605 .port_join
= mlxsw_sp_bridge_8021d_port_join
,
1606 .port_leave
= mlxsw_sp_bridge_8021d_port_leave
,
1607 .fid_get
= mlxsw_sp_bridge_8021d_fid_get
,
1610 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port
*mlxsw_sp_port
,
1611 struct net_device
*brport_dev
,
1612 struct net_device
*br_dev
)
1614 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1615 struct mlxsw_sp_bridge_device
*bridge_device
;
1616 struct mlxsw_sp_bridge_port
*bridge_port
;
1619 bridge_port
= mlxsw_sp_bridge_port_get(mlxsw_sp
->bridge
, brport_dev
);
1620 if (IS_ERR(bridge_port
))
1621 return PTR_ERR(bridge_port
);
1622 bridge_device
= bridge_port
->bridge_device
;
1624 err
= bridge_device
->ops
->port_join(bridge_device
, bridge_port
,
1632 mlxsw_sp_bridge_port_put(mlxsw_sp
->bridge
, bridge_port
);
1636 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port
*mlxsw_sp_port
,
1637 struct net_device
*brport_dev
,
1638 struct net_device
*br_dev
)
1640 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1641 struct mlxsw_sp_bridge_device
*bridge_device
;
1642 struct mlxsw_sp_bridge_port
*bridge_port
;
1644 bridge_device
= mlxsw_sp_bridge_device_find(mlxsw_sp
->bridge
, br_dev
);
1647 bridge_port
= __mlxsw_sp_bridge_port_find(bridge_device
, brport_dev
);
1651 bridge_device
->ops
->port_leave(bridge_device
, bridge_port
,
1653 mlxsw_sp_bridge_port_put(mlxsw_sp
->bridge
, bridge_port
);
1657 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type
,
1658 const char *mac
, u16 vid
,
1659 struct net_device
*dev
)
1661 struct switchdev_notifier_fdb_info info
;
1665 call_switchdev_notifiers(type
, dev
, &info
.info
);
1668 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp
*mlxsw_sp
,
1669 char *sfn_pl
, int rec_index
,
1672 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1673 struct mlxsw_sp_bridge_device
*bridge_device
;
1674 struct mlxsw_sp_bridge_port
*bridge_port
;
1675 struct mlxsw_sp_port
*mlxsw_sp_port
;
1676 enum switchdev_notifier_type type
;
1680 bool do_notification
= true;
1683 mlxsw_reg_sfn_mac_unpack(sfn_pl
, rec_index
, mac
, &fid
, &local_port
);
1684 mlxsw_sp_port
= mlxsw_sp
->ports
[local_port
];
1685 if (!mlxsw_sp_port
) {
1686 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Incorrect local port in FDB notification\n");
1690 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port
, fid
);
1691 if (!mlxsw_sp_port_vlan
) {
1692 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching {Port, VID} following FDB notification\n");
1696 bridge_port
= mlxsw_sp_port_vlan
->bridge_port
;
1698 netdev_err(mlxsw_sp_port
->dev
, "{Port, VID} not associated with a bridge\n");
1702 bridge_device
= bridge_port
->bridge_device
;
1703 vid
= bridge_device
->vlan_enabled
? mlxsw_sp_port_vlan
->vid
: 0;
1706 err
= mlxsw_sp_port_fdb_uc_op(mlxsw_sp
, local_port
, mac
, fid
,
1709 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to set FDB entry\n");
1713 if (!do_notification
)
1715 type
= adding
? SWITCHDEV_FDB_ADD_TO_BRIDGE
: SWITCHDEV_FDB_DEL_TO_BRIDGE
;
1716 mlxsw_sp_fdb_call_notifiers(type
, mac
, vid
, bridge_port
->dev
);
1722 do_notification
= false;
1726 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp
*mlxsw_sp
,
1727 char *sfn_pl
, int rec_index
,
1730 struct mlxsw_sp_port_vlan
*mlxsw_sp_port_vlan
;
1731 struct mlxsw_sp_bridge_device
*bridge_device
;
1732 struct mlxsw_sp_bridge_port
*bridge_port
;
1733 struct mlxsw_sp_port
*mlxsw_sp_port
;
1734 enum switchdev_notifier_type type
;
1739 bool do_notification
= true;
1742 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl
, rec_index
, mac
, &fid
, &lag_id
);
1743 mlxsw_sp_port
= mlxsw_sp_lag_rep_port(mlxsw_sp
, lag_id
);
1744 if (!mlxsw_sp_port
) {
1745 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Cannot find port representor for LAG\n");
1749 mlxsw_sp_port_vlan
= mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port
, fid
);
1750 if (!mlxsw_sp_port_vlan
) {
1751 netdev_err(mlxsw_sp_port
->dev
, "Failed to find a matching {Port, VID} following FDB notification\n");
1755 bridge_port
= mlxsw_sp_port_vlan
->bridge_port
;
1757 netdev_err(mlxsw_sp_port
->dev
, "{Port, VID} not associated with a bridge\n");
1761 bridge_device
= bridge_port
->bridge_device
;
1762 vid
= bridge_device
->vlan_enabled
? mlxsw_sp_port_vlan
->vid
: 0;
1763 lag_vid
= mlxsw_sp_port_vlan
->vid
;
1766 err
= mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp
, lag_id
, mac
, fid
, lag_vid
,
1769 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to set FDB entry\n");
1773 if (!do_notification
)
1775 type
= adding
? SWITCHDEV_FDB_ADD_TO_BRIDGE
: SWITCHDEV_FDB_DEL_TO_BRIDGE
;
1776 mlxsw_sp_fdb_call_notifiers(type
, mac
, vid
, bridge_port
->dev
);
1782 do_notification
= false;
1786 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp
*mlxsw_sp
,
1787 char *sfn_pl
, int rec_index
)
1789 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl
, rec_index
)) {
1790 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC
:
1791 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1794 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC
:
1795 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp
, sfn_pl
,
1798 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG
:
1799 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1802 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG
:
1803 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp
, sfn_pl
,
1809 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp
*mlxsw_sp
)
1811 struct mlxsw_sp_bridge
*bridge
= mlxsw_sp
->bridge
;
1813 mlxsw_core_schedule_dw(&bridge
->fdb_notify
.dw
,
1814 msecs_to_jiffies(bridge
->fdb_notify
.interval
));
1817 static void mlxsw_sp_fdb_notify_work(struct work_struct
*work
)
1819 struct mlxsw_sp_bridge
*bridge
;
1820 struct mlxsw_sp
*mlxsw_sp
;
1826 sfn_pl
= kmalloc(MLXSW_REG_SFN_LEN
, GFP_KERNEL
);
1830 bridge
= container_of(work
, struct mlxsw_sp_bridge
, fdb_notify
.dw
.work
);
1831 mlxsw_sp
= bridge
->mlxsw_sp
;
1834 mlxsw_reg_sfn_pack(sfn_pl
);
1835 err
= mlxsw_reg_query(mlxsw_sp
->core
, MLXSW_REG(sfn
), sfn_pl
);
1837 dev_err_ratelimited(mlxsw_sp
->bus_info
->dev
, "Failed to get FDB notifications\n");
1840 num_rec
= mlxsw_reg_sfn_num_rec_get(sfn_pl
);
1841 for (i
= 0; i
< num_rec
; i
++)
1842 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp
, sfn_pl
, i
);
1847 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1850 struct mlxsw_sp_switchdev_event_work
{
1851 struct work_struct work
;
1852 struct switchdev_notifier_fdb_info fdb_info
;
1853 struct net_device
*dev
;
1854 unsigned long event
;
1857 static void mlxsw_sp_switchdev_event_work(struct work_struct
*work
)
1859 struct mlxsw_sp_switchdev_event_work
*switchdev_work
=
1860 container_of(work
, struct mlxsw_sp_switchdev_event_work
, work
);
1861 struct net_device
*dev
= switchdev_work
->dev
;
1862 struct switchdev_notifier_fdb_info
*fdb_info
;
1863 struct mlxsw_sp_port
*mlxsw_sp_port
;
1867 mlxsw_sp_port
= mlxsw_sp_port_dev_lower_find(dev
);
1871 switch (switchdev_work
->event
) {
1872 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
1873 fdb_info
= &switchdev_work
->fdb_info
;
1874 err
= mlxsw_sp_port_fdb_set(mlxsw_sp_port
, fdb_info
, true);
1877 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED
,
1879 fdb_info
->vid
, dev
);
1881 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1882 fdb_info
= &switchdev_work
->fdb_info
;
1883 mlxsw_sp_port_fdb_set(mlxsw_sp_port
, fdb_info
, false);
1889 kfree(switchdev_work
->fdb_info
.addr
);
1890 kfree(switchdev_work
);
1894 /* Called under rcu_read_lock() */
1895 static int mlxsw_sp_switchdev_event(struct notifier_block
*unused
,
1896 unsigned long event
, void *ptr
)
1898 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
1899 struct mlxsw_sp_switchdev_event_work
*switchdev_work
;
1900 struct switchdev_notifier_fdb_info
*fdb_info
= ptr
;
1902 if (!mlxsw_sp_port_dev_lower_find_rcu(dev
))
1905 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
1906 if (!switchdev_work
)
1909 INIT_WORK(&switchdev_work
->work
, mlxsw_sp_switchdev_event_work
);
1910 switchdev_work
->dev
= dev
;
1911 switchdev_work
->event
= event
;
1914 case SWITCHDEV_FDB_ADD_TO_DEVICE
: /* fall through */
1915 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1916 memcpy(&switchdev_work
->fdb_info
, ptr
,
1917 sizeof(switchdev_work
->fdb_info
));
1918 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
1919 if (!switchdev_work
->fdb_info
.addr
)
1920 goto err_addr_alloc
;
1921 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
1923 /* Take a reference on the device. This can be either
1924 * upper device containig mlxsw_sp_port or just a
1930 kfree(switchdev_work
);
1934 mlxsw_core_schedule_work(&switchdev_work
->work
);
1939 kfree(switchdev_work
);
1943 static struct notifier_block mlxsw_sp_switchdev_notifier
= {
1944 .notifier_call
= mlxsw_sp_switchdev_event
,
1947 static int mlxsw_sp_fdb_init(struct mlxsw_sp
*mlxsw_sp
)
1949 struct mlxsw_sp_bridge
*bridge
= mlxsw_sp
->bridge
;
1952 err
= mlxsw_sp_ageing_set(mlxsw_sp
, MLXSW_SP_DEFAULT_AGEING_TIME
);
1954 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to set default ageing time\n");
1958 err
= register_switchdev_notifier(&mlxsw_sp_switchdev_notifier
);
1960 dev_err(mlxsw_sp
->bus_info
->dev
, "Failed to register switchdev notifier\n");
1964 INIT_DELAYED_WORK(&bridge
->fdb_notify
.dw
, mlxsw_sp_fdb_notify_work
);
1965 bridge
->fdb_notify
.interval
= MLXSW_SP_DEFAULT_LEARNING_INTERVAL
;
1966 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp
);
1970 static void mlxsw_sp_fdb_fini(struct mlxsw_sp
*mlxsw_sp
)
1972 cancel_delayed_work_sync(&mlxsw_sp
->bridge
->fdb_notify
.dw
);
1973 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier
);
1977 int mlxsw_sp_switchdev_init(struct mlxsw_sp
*mlxsw_sp
)
1979 struct mlxsw_sp_bridge
*bridge
;
1981 bridge
= kzalloc(sizeof(*mlxsw_sp
->bridge
), GFP_KERNEL
);
1984 mlxsw_sp
->bridge
= bridge
;
1985 bridge
->mlxsw_sp
= mlxsw_sp
;
1987 INIT_LIST_HEAD(&mlxsw_sp
->bridge
->bridges_list
);
1988 INIT_LIST_HEAD(&mlxsw_sp
->bridge
->mids_list
);
1990 bridge
->bridge_8021q_ops
= &mlxsw_sp_bridge_8021q_ops
;
1991 bridge
->bridge_8021d_ops
= &mlxsw_sp_bridge_8021d_ops
;
1993 return mlxsw_sp_fdb_init(mlxsw_sp
);
1996 void mlxsw_sp_switchdev_fini(struct mlxsw_sp
*mlxsw_sp
)
1998 mlxsw_sp_fdb_fini(mlxsw_sp
);
1999 WARN_ON(!list_empty(&mlxsw_sp
->bridge
->mids_list
));
2000 WARN_ON(!list_empty(&mlxsw_sp
->bridge
->bridges_list
));
2001 kfree(mlxsw_sp
->bridge
);
2004 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
2006 mlxsw_sp_port
->dev
->switchdev_ops
= &mlxsw_sp_port_switchdev_ops
;
2009 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port
*mlxsw_sp_port
)