1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
4 #include <linux/list.h>
5 #include <linux/notifier.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
10 #include "bridge_priv.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/bridge_tracepoint.h"
14 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
15 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
16 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
17 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
18 (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
19 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
20 (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
21 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
22 (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
23 #define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
25 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
26 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
27 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
28 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
29 (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
30 #define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
32 #define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
35 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE
,
36 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE
,
37 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE
,
40 static const struct rhashtable_params fdb_ht_params
= {
41 .key_offset
= offsetof(struct mlx5_esw_bridge_fdb_entry
, key
),
42 .key_len
= sizeof(struct mlx5_esw_bridge_fdb_key
),
43 .head_offset
= offsetof(struct mlx5_esw_bridge_fdb_entry
, ht_node
),
44 .automatic_shrinking
= true,
48 MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG
= BIT(0),
51 struct mlx5_esw_bridge
{
54 struct list_head list
;
55 struct mlx5_esw_bridge_offloads
*br_offloads
;
57 struct list_head fdb_list
;
58 struct rhashtable fdb_ht
;
61 struct mlx5_flow_table
*egress_ft
;
62 struct mlx5_flow_group
*egress_vlan_fg
;
63 struct mlx5_flow_group
*egress_mac_fg
;
64 unsigned long ageing_time
;
69 mlx5_esw_bridge_fdb_offload_notify(struct net_device
*dev
, const unsigned char *addr
, u16 vid
,
72 struct switchdev_notifier_fdb_info send_info
= {};
74 send_info
.addr
= addr
;
76 send_info
.offloaded
= true;
77 call_switchdev_notifiers(val
, dev
, &send_info
.info
, NULL
);
80 static struct mlx5_flow_table
*
81 mlx5_esw_bridge_table_create(int max_fte
, u32 level
, struct mlx5_eswitch
*esw
)
83 struct mlx5_flow_table_attr ft_attr
= {};
84 struct mlx5_core_dev
*dev
= esw
->dev
;
85 struct mlx5_flow_namespace
*ns
;
86 struct mlx5_flow_table
*fdb
;
88 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
90 esw_warn(dev
, "Failed to get FDB namespace\n");
91 return ERR_PTR(-ENOENT
);
94 ft_attr
.flags
= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
;
95 ft_attr
.max_fte
= max_fte
;
96 ft_attr
.level
= level
;
97 ft_attr
.prio
= FDB_BR_OFFLOAD
;
98 fdb
= mlx5_create_flow_table(ns
, &ft_attr
);
100 esw_warn(dev
, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb
));
105 static struct mlx5_flow_group
*
106 mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch
*esw
, struct mlx5_flow_table
*ingress_ft
)
108 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
109 struct mlx5_flow_group
*fg
;
112 in
= kvzalloc(inlen
, GFP_KERNEL
);
114 return ERR_PTR(-ENOMEM
);
116 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
,
117 MLX5_MATCH_OUTER_HEADERS
| MLX5_MATCH_MISC_PARAMETERS_2
);
118 match
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
120 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.smac_47_16
);
121 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.smac_15_0
);
122 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.cvlan_tag
);
123 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.first_vid
);
125 MLX5_SET(fte_match_param
, match
, misc_parameters_2
.metadata_reg_c_0
,
126 mlx5_eswitch_get_vport_metadata_mask());
128 MLX5_SET(create_flow_group_in
, in
, start_flow_index
,
129 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM
);
130 MLX5_SET(create_flow_group_in
, in
, end_flow_index
,
131 MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO
);
133 fg
= mlx5_create_flow_group(ingress_ft
, in
);
137 "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
143 static struct mlx5_flow_group
*
144 mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch
*esw
,
145 struct mlx5_flow_table
*ingress_ft
)
147 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
148 struct mlx5_flow_group
*fg
;
151 in
= kvzalloc(inlen
, GFP_KERNEL
);
153 return ERR_PTR(-ENOMEM
);
155 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
,
156 MLX5_MATCH_OUTER_HEADERS
| MLX5_MATCH_MISC_PARAMETERS_2
);
157 match
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
159 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.smac_47_16
);
160 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.smac_15_0
);
161 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.cvlan_tag
);
163 MLX5_SET(fte_match_param
, match
, misc_parameters_2
.metadata_reg_c_0
,
164 mlx5_eswitch_get_vport_metadata_mask());
166 MLX5_SET(create_flow_group_in
, in
, start_flow_index
,
167 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM
);
168 MLX5_SET(create_flow_group_in
, in
, end_flow_index
,
169 MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO
);
171 fg
= mlx5_create_flow_group(ingress_ft
, in
);
174 "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
181 static struct mlx5_flow_group
*
182 mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch
*esw
, struct mlx5_flow_table
*ingress_ft
)
184 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
185 struct mlx5_flow_group
*fg
;
188 in
= kvzalloc(inlen
, GFP_KERNEL
);
190 return ERR_PTR(-ENOMEM
);
192 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
,
193 MLX5_MATCH_OUTER_HEADERS
| MLX5_MATCH_MISC_PARAMETERS_2
);
194 match
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
196 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.smac_47_16
);
197 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.smac_15_0
);
199 MLX5_SET(fte_match_param
, match
, misc_parameters_2
.metadata_reg_c_0
,
200 mlx5_eswitch_get_vport_metadata_mask());
202 MLX5_SET(create_flow_group_in
, in
, start_flow_index
,
203 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM
);
204 MLX5_SET(create_flow_group_in
, in
, end_flow_index
,
205 MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO
);
207 fg
= mlx5_create_flow_group(ingress_ft
, in
);
210 "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
217 static struct mlx5_flow_group
*
218 mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch
*esw
, struct mlx5_flow_table
*egress_ft
)
220 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
221 struct mlx5_flow_group
*fg
;
224 in
= kvzalloc(inlen
, GFP_KERNEL
);
226 return ERR_PTR(-ENOMEM
);
228 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
229 match
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
231 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.dmac_47_16
);
232 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.dmac_15_0
);
233 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.cvlan_tag
);
234 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.first_vid
);
236 MLX5_SET(create_flow_group_in
, in
, start_flow_index
,
237 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM
);
238 MLX5_SET(create_flow_group_in
, in
, end_flow_index
,
239 MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO
);
241 fg
= mlx5_create_flow_group(egress_ft
, in
);
244 "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
250 static struct mlx5_flow_group
*
251 mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch
*esw
, struct mlx5_flow_table
*egress_ft
)
253 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
254 struct mlx5_flow_group
*fg
;
257 in
= kvzalloc(inlen
, GFP_KERNEL
);
259 return ERR_PTR(-ENOMEM
);
261 MLX5_SET(create_flow_group_in
, in
, match_criteria_enable
, MLX5_MATCH_OUTER_HEADERS
);
262 match
= MLX5_ADDR_OF(create_flow_group_in
, in
, match_criteria
);
264 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.dmac_47_16
);
265 MLX5_SET_TO_ONES(fte_match_param
, match
, outer_headers
.dmac_15_0
);
267 MLX5_SET(create_flow_group_in
, in
, start_flow_index
,
268 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM
);
269 MLX5_SET(create_flow_group_in
, in
, end_flow_index
,
270 MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO
);
272 fg
= mlx5_create_flow_group(egress_ft
, in
);
275 "Failed to create bridge egress table MAC flow group (err=%ld)\n",
282 mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads
*br_offloads
)
284 struct mlx5_flow_group
*mac_fg
, *filter_fg
, *vlan_fg
;
285 struct mlx5_flow_table
*ingress_ft
, *skip_ft
;
288 if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads
->esw
))
291 ingress_ft
= mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE
,
292 MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE
,
294 if (IS_ERR(ingress_ft
))
295 return PTR_ERR(ingress_ft
);
297 skip_ft
= mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE
,
298 MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE
,
300 if (IS_ERR(skip_ft
)) {
301 err
= PTR_ERR(skip_ft
);
305 vlan_fg
= mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads
->esw
, ingress_ft
);
306 if (IS_ERR(vlan_fg
)) {
307 err
= PTR_ERR(vlan_fg
);
311 filter_fg
= mlx5_esw_bridge_ingress_filter_fg_create(br_offloads
->esw
, ingress_ft
);
312 if (IS_ERR(filter_fg
)) {
313 err
= PTR_ERR(filter_fg
);
317 mac_fg
= mlx5_esw_bridge_ingress_mac_fg_create(br_offloads
->esw
, ingress_ft
);
318 if (IS_ERR(mac_fg
)) {
319 err
= PTR_ERR(mac_fg
);
323 br_offloads
->ingress_ft
= ingress_ft
;
324 br_offloads
->skip_ft
= skip_ft
;
325 br_offloads
->ingress_vlan_fg
= vlan_fg
;
326 br_offloads
->ingress_filter_fg
= filter_fg
;
327 br_offloads
->ingress_mac_fg
= mac_fg
;
331 mlx5_destroy_flow_group(filter_fg
);
333 mlx5_destroy_flow_group(vlan_fg
);
335 mlx5_destroy_flow_table(skip_ft
);
337 mlx5_destroy_flow_table(ingress_ft
);
342 mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads
*br_offloads
)
344 mlx5_destroy_flow_group(br_offloads
->ingress_mac_fg
);
345 br_offloads
->ingress_mac_fg
= NULL
;
346 mlx5_destroy_flow_group(br_offloads
->ingress_filter_fg
);
347 br_offloads
->ingress_filter_fg
= NULL
;
348 mlx5_destroy_flow_group(br_offloads
->ingress_vlan_fg
);
349 br_offloads
->ingress_vlan_fg
= NULL
;
350 mlx5_destroy_flow_table(br_offloads
->skip_ft
);
351 br_offloads
->skip_ft
= NULL
;
352 mlx5_destroy_flow_table(br_offloads
->ingress_ft
);
353 br_offloads
->ingress_ft
= NULL
;
357 mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads
*br_offloads
,
358 struct mlx5_esw_bridge
*bridge
)
360 struct mlx5_flow_group
*mac_fg
, *vlan_fg
;
361 struct mlx5_flow_table
*egress_ft
;
364 egress_ft
= mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE
,
365 MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE
,
367 if (IS_ERR(egress_ft
))
368 return PTR_ERR(egress_ft
);
370 vlan_fg
= mlx5_esw_bridge_egress_vlan_fg_create(br_offloads
->esw
, egress_ft
);
371 if (IS_ERR(vlan_fg
)) {
372 err
= PTR_ERR(vlan_fg
);
376 mac_fg
= mlx5_esw_bridge_egress_mac_fg_create(br_offloads
->esw
, egress_ft
);
377 if (IS_ERR(mac_fg
)) {
378 err
= PTR_ERR(mac_fg
);
382 bridge
->egress_ft
= egress_ft
;
383 bridge
->egress_vlan_fg
= vlan_fg
;
384 bridge
->egress_mac_fg
= mac_fg
;
388 mlx5_destroy_flow_group(vlan_fg
);
390 mlx5_destroy_flow_table(egress_ft
);
395 mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge
*bridge
)
397 mlx5_destroy_flow_group(bridge
->egress_mac_fg
);
398 mlx5_destroy_flow_group(bridge
->egress_vlan_fg
);
399 mlx5_destroy_flow_table(bridge
->egress_ft
);
402 static struct mlx5_flow_handle
*
403 mlx5_esw_bridge_ingress_flow_create(u16 vport_num
, const unsigned char *addr
,
404 struct mlx5_esw_bridge_vlan
*vlan
, u32 counter_id
,
405 struct mlx5_esw_bridge
*bridge
)
407 struct mlx5_esw_bridge_offloads
*br_offloads
= bridge
->br_offloads
;
408 struct mlx5_flow_act flow_act
= {
409 .action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
| MLX5_FLOW_CONTEXT_ACTION_COUNT
,
410 .flags
= FLOW_ACT_NO_APPEND
,
412 struct mlx5_flow_destination dests
[2] = {};
413 struct mlx5_flow_spec
*rule_spec
;
414 struct mlx5_flow_handle
*handle
;
417 rule_spec
= kvzalloc(sizeof(*rule_spec
), GFP_KERNEL
);
419 return ERR_PTR(-ENOMEM
);
421 rule_spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
| MLX5_MATCH_MISC_PARAMETERS_2
;
423 smac_v
= MLX5_ADDR_OF(fte_match_param
, rule_spec
->match_value
,
424 outer_headers
.smac_47_16
);
425 ether_addr_copy(smac_v
, addr
);
426 smac_c
= MLX5_ADDR_OF(fte_match_param
, rule_spec
->match_criteria
,
427 outer_headers
.smac_47_16
);
428 eth_broadcast_addr(smac_c
);
430 MLX5_SET(fte_match_param
, rule_spec
->match_criteria
,
431 misc_parameters_2
.metadata_reg_c_0
, mlx5_eswitch_get_vport_metadata_mask());
432 MLX5_SET(fte_match_param
, rule_spec
->match_value
, misc_parameters_2
.metadata_reg_c_0
,
433 mlx5_eswitch_get_vport_metadata_for_match(br_offloads
->esw
, vport_num
));
435 if (vlan
&& vlan
->pkt_reformat_push
) {
436 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
437 flow_act
.pkt_reformat
= vlan
->pkt_reformat_push
;
439 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_criteria
,
440 outer_headers
.cvlan_tag
);
441 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_value
,
442 outer_headers
.cvlan_tag
);
443 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_criteria
,
444 outer_headers
.first_vid
);
445 MLX5_SET(fte_match_param
, rule_spec
->match_value
, outer_headers
.first_vid
,
449 dests
[0].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
450 dests
[0].ft
= bridge
->egress_ft
;
451 dests
[1].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
452 dests
[1].counter_id
= counter_id
;
454 handle
= mlx5_add_flow_rules(br_offloads
->ingress_ft
, rule_spec
, &flow_act
, dests
,
461 static struct mlx5_flow_handle
*
462 mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num
, const unsigned char *addr
,
463 struct mlx5_esw_bridge
*bridge
)
465 struct mlx5_esw_bridge_offloads
*br_offloads
= bridge
->br_offloads
;
466 struct mlx5_flow_destination dest
= {
467 .type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
,
468 .ft
= br_offloads
->skip_ft
,
470 struct mlx5_flow_act flow_act
= {
471 .action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
472 .flags
= FLOW_ACT_NO_APPEND
,
474 struct mlx5_flow_spec
*rule_spec
;
475 struct mlx5_flow_handle
*handle
;
478 rule_spec
= kvzalloc(sizeof(*rule_spec
), GFP_KERNEL
);
480 return ERR_PTR(-ENOMEM
);
482 rule_spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
| MLX5_MATCH_MISC_PARAMETERS_2
;
484 smac_v
= MLX5_ADDR_OF(fte_match_param
, rule_spec
->match_value
,
485 outer_headers
.smac_47_16
);
486 ether_addr_copy(smac_v
, addr
);
487 smac_c
= MLX5_ADDR_OF(fte_match_param
, rule_spec
->match_criteria
,
488 outer_headers
.smac_47_16
);
489 eth_broadcast_addr(smac_c
);
491 MLX5_SET(fte_match_param
, rule_spec
->match_criteria
,
492 misc_parameters_2
.metadata_reg_c_0
, mlx5_eswitch_get_vport_metadata_mask());
493 MLX5_SET(fte_match_param
, rule_spec
->match_value
, misc_parameters_2
.metadata_reg_c_0
,
494 mlx5_eswitch_get_vport_metadata_for_match(br_offloads
->esw
, vport_num
));
496 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_criteria
,
497 outer_headers
.cvlan_tag
);
498 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_value
,
499 outer_headers
.cvlan_tag
);
501 handle
= mlx5_add_flow_rules(br_offloads
->ingress_ft
, rule_spec
, &flow_act
, &dest
, 1);
507 static struct mlx5_flow_handle
*
508 mlx5_esw_bridge_egress_flow_create(u16 vport_num
, const unsigned char *addr
,
509 struct mlx5_esw_bridge_vlan
*vlan
,
510 struct mlx5_esw_bridge
*bridge
)
512 struct mlx5_flow_destination dest
= {
513 .type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
,
514 .vport
.num
= vport_num
,
516 struct mlx5_flow_act flow_act
= {
517 .action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
,
518 .flags
= FLOW_ACT_NO_APPEND
,
520 struct mlx5_flow_spec
*rule_spec
;
521 struct mlx5_flow_handle
*handle
;
524 rule_spec
= kvzalloc(sizeof(*rule_spec
), GFP_KERNEL
);
526 return ERR_PTR(-ENOMEM
);
528 rule_spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
530 dmac_v
= MLX5_ADDR_OF(fte_match_param
, rule_spec
->match_value
,
531 outer_headers
.dmac_47_16
);
532 ether_addr_copy(dmac_v
, addr
);
533 dmac_c
= MLX5_ADDR_OF(fte_match_param
, rule_spec
->match_criteria
,
534 outer_headers
.dmac_47_16
);
535 eth_broadcast_addr(dmac_c
);
538 if (vlan
->pkt_reformat_pop
) {
539 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
540 flow_act
.pkt_reformat
= vlan
->pkt_reformat_pop
;
543 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_criteria
,
544 outer_headers
.cvlan_tag
);
545 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_value
,
546 outer_headers
.cvlan_tag
);
547 MLX5_SET_TO_ONES(fte_match_param
, rule_spec
->match_criteria
,
548 outer_headers
.first_vid
);
549 MLX5_SET(fte_match_param
, rule_spec
->match_value
, outer_headers
.first_vid
,
553 handle
= mlx5_add_flow_rules(bridge
->egress_ft
, rule_spec
, &flow_act
, &dest
, 1);
559 static struct mlx5_esw_bridge
*mlx5_esw_bridge_create(int ifindex
,
560 struct mlx5_esw_bridge_offloads
*br_offloads
)
562 struct mlx5_esw_bridge
*bridge
;
565 bridge
= kvzalloc(sizeof(*bridge
), GFP_KERNEL
);
567 return ERR_PTR(-ENOMEM
);
569 bridge
->br_offloads
= br_offloads
;
570 err
= mlx5_esw_bridge_egress_table_init(br_offloads
, bridge
);
574 err
= rhashtable_init(&bridge
->fdb_ht
, &fdb_ht_params
);
578 INIT_LIST_HEAD(&bridge
->fdb_list
);
579 xa_init(&bridge
->vports
);
580 bridge
->ifindex
= ifindex
;
582 bridge
->ageing_time
= clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME
);
583 list_add(&bridge
->list
, &br_offloads
->bridges
);
588 mlx5_esw_bridge_egress_table_cleanup(bridge
);
594 static void mlx5_esw_bridge_get(struct mlx5_esw_bridge
*bridge
)
599 static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads
*br_offloads
,
600 struct mlx5_esw_bridge
*bridge
)
602 if (--bridge
->refcnt
)
605 mlx5_esw_bridge_egress_table_cleanup(bridge
);
606 WARN_ON(!xa_empty(&bridge
->vports
));
607 list_del(&bridge
->list
);
608 rhashtable_destroy(&bridge
->fdb_ht
);
611 if (list_empty(&br_offloads
->bridges
))
612 mlx5_esw_bridge_ingress_table_cleanup(br_offloads
);
615 static struct mlx5_esw_bridge
*
616 mlx5_esw_bridge_lookup(int ifindex
, struct mlx5_esw_bridge_offloads
*br_offloads
)
618 struct mlx5_esw_bridge
*bridge
;
622 list_for_each_entry(bridge
, &br_offloads
->bridges
, list
) {
623 if (bridge
->ifindex
== ifindex
) {
624 mlx5_esw_bridge_get(bridge
);
629 if (!br_offloads
->ingress_ft
) {
630 int err
= mlx5_esw_bridge_ingress_table_init(br_offloads
);
636 bridge
= mlx5_esw_bridge_create(ifindex
, br_offloads
);
637 if (IS_ERR(bridge
) && list_empty(&br_offloads
->bridges
))
638 mlx5_esw_bridge_ingress_table_cleanup(br_offloads
);
642 static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port
*port
,
643 struct mlx5_esw_bridge
*bridge
)
645 return xa_insert(&bridge
->vports
, port
->vport_num
, port
, GFP_KERNEL
);
648 static struct mlx5_esw_bridge_port
*
649 mlx5_esw_bridge_port_lookup(u16 vport_num
, struct mlx5_esw_bridge
*bridge
)
651 return xa_load(&bridge
->vports
, vport_num
);
654 static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port
*port
,
655 struct mlx5_esw_bridge
*bridge
)
657 xa_erase(&bridge
->vports
, port
->vport_num
);
660 static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse
,
661 struct mlx5_esw_bridge_fdb_entry
*entry
)
663 trace_mlx5_esw_bridge_fdb_entry_refresh(entry
);
665 entry
->lastuse
= lastuse
;
666 mlx5_esw_bridge_fdb_offload_notify(entry
->dev
, entry
->key
.addr
,
668 SWITCHDEV_FDB_ADD_TO_BRIDGE
);
672 mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry
*entry
,
673 struct mlx5_esw_bridge
*bridge
)
675 trace_mlx5_esw_bridge_fdb_entry_cleanup(entry
);
677 rhashtable_remove_fast(&bridge
->fdb_ht
, &entry
->ht_node
, fdb_ht_params
);
678 mlx5_del_flow_rules(entry
->egress_handle
);
679 if (entry
->filter_handle
)
680 mlx5_del_flow_rules(entry
->filter_handle
);
681 mlx5_del_flow_rules(entry
->ingress_handle
);
682 mlx5_fc_destroy(bridge
->br_offloads
->esw
->dev
, entry
->ingress_counter
);
683 list_del(&entry
->vlan_list
);
684 list_del(&entry
->list
);
688 static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge
*bridge
)
690 struct mlx5_esw_bridge_fdb_entry
*entry
, *tmp
;
692 list_for_each_entry_safe(entry
, tmp
, &bridge
->fdb_list
, list
) {
693 if (!(entry
->flags
& MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER
))
694 mlx5_esw_bridge_fdb_offload_notify(entry
->dev
, entry
->key
.addr
,
696 SWITCHDEV_FDB_DEL_TO_BRIDGE
);
697 mlx5_esw_bridge_fdb_entry_cleanup(entry
, bridge
);
701 static struct mlx5_esw_bridge_vlan
*
702 mlx5_esw_bridge_vlan_lookup(u16 vid
, struct mlx5_esw_bridge_port
*port
)
704 return xa_load(&port
->vlans
, vid
);
708 mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan
*vlan
, struct mlx5_eswitch
*esw
)
713 } vlan_hdr
= { htons(ETH_P_8021Q
), htons(vlan
->vid
) };
714 struct mlx5_pkt_reformat_params reformat_params
= {};
715 struct mlx5_pkt_reformat
*pkt_reformat
;
717 if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, reformat_insert
)) ||
718 MLX5_CAP_GEN_2(esw
->dev
, max_reformat_insert_size
) < sizeof(vlan_hdr
) ||
719 MLX5_CAP_GEN_2(esw
->dev
, max_reformat_insert_offset
) <
720 offsetof(struct vlan_ethhdr
, h_vlan_proto
)) {
721 esw_warn(esw
->dev
, "Packet reformat INSERT_HEADER is not supported\n");
725 reformat_params
.type
= MLX5_REFORMAT_TYPE_INSERT_HDR
;
726 reformat_params
.param_0
= MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
;
727 reformat_params
.param_1
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
728 reformat_params
.size
= sizeof(vlan_hdr
);
729 reformat_params
.data
= &vlan_hdr
;
730 pkt_reformat
= mlx5_packet_reformat_alloc(esw
->dev
,
732 MLX5_FLOW_NAMESPACE_FDB
);
733 if (IS_ERR(pkt_reformat
)) {
734 esw_warn(esw
->dev
, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
735 PTR_ERR(pkt_reformat
));
736 return PTR_ERR(pkt_reformat
);
739 vlan
->pkt_reformat_push
= pkt_reformat
;
744 mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan
*vlan
, struct mlx5_eswitch
*esw
)
746 mlx5_packet_reformat_dealloc(esw
->dev
, vlan
->pkt_reformat_push
);
747 vlan
->pkt_reformat_push
= NULL
;
751 mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan
*vlan
, struct mlx5_eswitch
*esw
)
753 struct mlx5_pkt_reformat_params reformat_params
= {};
754 struct mlx5_pkt_reformat
*pkt_reformat
;
756 if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw
->dev
, reformat_remove
)) ||
757 MLX5_CAP_GEN_2(esw
->dev
, max_reformat_remove_size
) < sizeof(struct vlan_hdr
) ||
758 MLX5_CAP_GEN_2(esw
->dev
, max_reformat_remove_offset
) <
759 offsetof(struct vlan_ethhdr
, h_vlan_proto
)) {
760 esw_warn(esw
->dev
, "Packet reformat REMOVE_HEADER is not supported\n");
764 reformat_params
.type
= MLX5_REFORMAT_TYPE_REMOVE_HDR
;
765 reformat_params
.param_0
= MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START
;
766 reformat_params
.param_1
= offsetof(struct vlan_ethhdr
, h_vlan_proto
);
767 reformat_params
.size
= sizeof(struct vlan_hdr
);
768 pkt_reformat
= mlx5_packet_reformat_alloc(esw
->dev
,
770 MLX5_FLOW_NAMESPACE_FDB
);
771 if (IS_ERR(pkt_reformat
)) {
772 esw_warn(esw
->dev
, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
773 PTR_ERR(pkt_reformat
));
774 return PTR_ERR(pkt_reformat
);
777 vlan
->pkt_reformat_pop
= pkt_reformat
;
782 mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan
*vlan
, struct mlx5_eswitch
*esw
)
784 mlx5_packet_reformat_dealloc(esw
->dev
, vlan
->pkt_reformat_pop
);
785 vlan
->pkt_reformat_pop
= NULL
;
788 static struct mlx5_esw_bridge_vlan
*
789 mlx5_esw_bridge_vlan_create(u16 vid
, u16 flags
, struct mlx5_esw_bridge_port
*port
,
790 struct mlx5_eswitch
*esw
)
792 struct mlx5_esw_bridge_vlan
*vlan
;
795 vlan
= kvzalloc(sizeof(*vlan
), GFP_KERNEL
);
797 return ERR_PTR(-ENOMEM
);
801 INIT_LIST_HEAD(&vlan
->fdb_list
);
803 if (flags
& BRIDGE_VLAN_INFO_PVID
) {
804 err
= mlx5_esw_bridge_vlan_push_create(vlan
, esw
);
808 if (flags
& BRIDGE_VLAN_INFO_UNTAGGED
) {
809 err
= mlx5_esw_bridge_vlan_pop_create(vlan
, esw
);
814 err
= xa_insert(&port
->vlans
, vid
, vlan
, GFP_KERNEL
);
818 trace_mlx5_esw_bridge_vlan_create(vlan
);
822 if (vlan
->pkt_reformat_pop
)
823 mlx5_esw_bridge_vlan_pop_cleanup(vlan
, esw
);
825 if (vlan
->pkt_reformat_push
)
826 mlx5_esw_bridge_vlan_push_cleanup(vlan
, esw
);
832 static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port
*port
,
833 struct mlx5_esw_bridge_vlan
*vlan
)
835 xa_erase(&port
->vlans
, vlan
->vid
);
838 static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan
*vlan
,
839 struct mlx5_esw_bridge
*bridge
)
841 struct mlx5_esw_bridge_fdb_entry
*entry
, *tmp
;
843 list_for_each_entry_safe(entry
, tmp
, &vlan
->fdb_list
, vlan_list
) {
844 if (!(entry
->flags
& MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER
))
845 mlx5_esw_bridge_fdb_offload_notify(entry
->dev
, entry
->key
.addr
,
847 SWITCHDEV_FDB_DEL_TO_BRIDGE
);
848 mlx5_esw_bridge_fdb_entry_cleanup(entry
, bridge
);
851 if (vlan
->pkt_reformat_pop
)
852 mlx5_esw_bridge_vlan_pop_cleanup(vlan
, bridge
->br_offloads
->esw
);
853 if (vlan
->pkt_reformat_push
)
854 mlx5_esw_bridge_vlan_push_cleanup(vlan
, bridge
->br_offloads
->esw
);
857 static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port
*port
,
858 struct mlx5_esw_bridge_vlan
*vlan
,
859 struct mlx5_esw_bridge
*bridge
)
861 trace_mlx5_esw_bridge_vlan_cleanup(vlan
);
862 mlx5_esw_bridge_vlan_flush(vlan
, bridge
);
863 mlx5_esw_bridge_vlan_erase(port
, vlan
);
867 static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port
*port
,
868 struct mlx5_esw_bridge
*bridge
)
870 struct mlx5_esw_bridge_vlan
*vlan
;
873 xa_for_each(&port
->vlans
, index
, vlan
)
874 mlx5_esw_bridge_vlan_cleanup(port
, vlan
, bridge
);
877 static struct mlx5_esw_bridge_vlan
*
878 mlx5_esw_bridge_port_vlan_lookup(u16 vid
, u16 vport_num
, struct mlx5_esw_bridge
*bridge
,
879 struct mlx5_eswitch
*esw
)
881 struct mlx5_esw_bridge_port
*port
;
882 struct mlx5_esw_bridge_vlan
*vlan
;
884 port
= mlx5_esw_bridge_port_lookup(vport_num
, bridge
);
886 /* FDB is added asynchronously on wq while port might have been deleted
887 * concurrently. Report on 'info' logging level and skip the FDB offload.
889 esw_info(esw
->dev
, "Failed to lookup bridge port (vport=%u)\n", vport_num
);
890 return ERR_PTR(-EINVAL
);
893 vlan
= mlx5_esw_bridge_vlan_lookup(vid
, port
);
895 /* FDB is added asynchronously on wq while vlan might have been deleted
896 * concurrently. Report on 'info' logging level and skip the FDB offload.
898 esw_info(esw
->dev
, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
900 return ERR_PTR(-EINVAL
);
906 static struct mlx5_esw_bridge_fdb_entry
*
907 mlx5_esw_bridge_fdb_entry_init(struct net_device
*dev
, u16 vport_num
, const unsigned char *addr
,
908 u16 vid
, bool added_by_user
, struct mlx5_eswitch
*esw
,
909 struct mlx5_esw_bridge
*bridge
)
911 struct mlx5_esw_bridge_vlan
*vlan
= NULL
;
912 struct mlx5_esw_bridge_fdb_entry
*entry
;
913 struct mlx5_flow_handle
*handle
;
914 struct mlx5_fc
*counter
;
915 struct mlx5e_priv
*priv
;
918 if (bridge
->flags
& MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG
&& vid
) {
919 vlan
= mlx5_esw_bridge_port_vlan_lookup(vid
, vport_num
, bridge
, esw
);
921 return ERR_CAST(vlan
);
924 priv
= netdev_priv(dev
);
925 entry
= kvzalloc(sizeof(*entry
), GFP_KERNEL
);
927 return ERR_PTR(-ENOMEM
);
929 ether_addr_copy(entry
->key
.addr
, addr
);
930 entry
->key
.vid
= vid
;
932 entry
->vport_num
= vport_num
;
933 entry
->lastuse
= jiffies
;
935 entry
->flags
|= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER
;
937 counter
= mlx5_fc_create(priv
->mdev
, true);
938 if (IS_ERR(counter
)) {
939 err
= PTR_ERR(counter
);
940 goto err_ingress_fc_create
;
942 entry
->ingress_counter
= counter
;
944 handle
= mlx5_esw_bridge_ingress_flow_create(vport_num
, addr
, vlan
, mlx5_fc_id(counter
),
946 if (IS_ERR(handle
)) {
947 err
= PTR_ERR(handle
);
948 esw_warn(esw
->dev
, "Failed to create ingress flow(vport=%u,err=%d)\n",
950 goto err_ingress_flow_create
;
952 entry
->ingress_handle
= handle
;
954 if (bridge
->flags
& MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG
) {
955 handle
= mlx5_esw_bridge_ingress_filter_flow_create(vport_num
, addr
, bridge
);
956 if (IS_ERR(handle
)) {
957 err
= PTR_ERR(handle
);
958 esw_warn(esw
->dev
, "Failed to create ingress filter(vport=%u,err=%d)\n",
960 goto err_ingress_filter_flow_create
;
962 entry
->filter_handle
= handle
;
965 handle
= mlx5_esw_bridge_egress_flow_create(vport_num
, addr
, vlan
, bridge
);
966 if (IS_ERR(handle
)) {
967 err
= PTR_ERR(handle
);
968 esw_warn(esw
->dev
, "Failed to create egress flow(vport=%u,err=%d)\n",
970 goto err_egress_flow_create
;
972 entry
->egress_handle
= handle
;
974 err
= rhashtable_insert_fast(&bridge
->fdb_ht
, &entry
->ht_node
, fdb_ht_params
);
976 esw_warn(esw
->dev
, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num
, err
);
981 list_add(&entry
->vlan_list
, &vlan
->fdb_list
);
983 INIT_LIST_HEAD(&entry
->vlan_list
);
984 list_add(&entry
->list
, &bridge
->fdb_list
);
986 trace_mlx5_esw_bridge_fdb_entry_init(entry
);
990 mlx5_del_flow_rules(entry
->egress_handle
);
991 err_egress_flow_create
:
992 if (entry
->filter_handle
)
993 mlx5_del_flow_rules(entry
->filter_handle
);
994 err_ingress_filter_flow_create
:
995 mlx5_del_flow_rules(entry
->ingress_handle
);
996 err_ingress_flow_create
:
997 mlx5_fc_destroy(priv
->mdev
, entry
->ingress_counter
);
998 err_ingress_fc_create
:
1000 return ERR_PTR(err
);
1003 int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time
, struct mlx5_eswitch
*esw
,
1004 struct mlx5_vport
*vport
)
1009 vport
->bridge
->ageing_time
= clock_t_to_jiffies(ageing_time
);
1013 int mlx5_esw_bridge_vlan_filtering_set(bool enable
, struct mlx5_eswitch
*esw
,
1014 struct mlx5_vport
*vport
)
1016 struct mlx5_esw_bridge
*bridge
;
1022 bridge
= vport
->bridge
;
1023 filtering
= bridge
->flags
& MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG
;
1024 if (filtering
== enable
)
1027 mlx5_esw_bridge_fdb_flush(bridge
);
1029 bridge
->flags
|= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG
;
1031 bridge
->flags
&= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG
;
1036 static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads
*br_offloads
,
1037 struct mlx5_esw_bridge
*bridge
,
1038 struct mlx5_vport
*vport
)
1040 struct mlx5_eswitch
*esw
= br_offloads
->esw
;
1041 struct mlx5_esw_bridge_port
*port
;
1044 port
= kvzalloc(sizeof(*port
), GFP_KERNEL
);
1047 goto err_port_alloc
;
1050 port
->vport_num
= vport
->vport
;
1051 xa_init(&port
->vlans
);
1052 err
= mlx5_esw_bridge_port_insert(port
, bridge
);
1054 esw_warn(esw
->dev
, "Failed to insert port metadata (vport=%u,err=%d)\n",
1056 goto err_port_insert
;
1058 trace_mlx5_esw_bridge_vport_init(port
);
1060 vport
->bridge
= bridge
;
1066 mlx5_esw_bridge_put(br_offloads
, bridge
);
1070 static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads
*br_offloads
,
1071 struct mlx5_vport
*vport
)
1073 struct mlx5_esw_bridge
*bridge
= vport
->bridge
;
1074 struct mlx5_esw_bridge_fdb_entry
*entry
, *tmp
;
1075 struct mlx5_esw_bridge_port
*port
;
1077 list_for_each_entry_safe(entry
, tmp
, &bridge
->fdb_list
, list
)
1078 if (entry
->vport_num
== vport
->vport
)
1079 mlx5_esw_bridge_fdb_entry_cleanup(entry
, bridge
);
1081 port
= mlx5_esw_bridge_port_lookup(vport
->vport
, bridge
);
1083 WARN(1, "Vport %u metadata not found on bridge", vport
->vport
);
1087 trace_mlx5_esw_bridge_vport_cleanup(port
);
1088 mlx5_esw_bridge_port_vlans_flush(port
, bridge
);
1089 mlx5_esw_bridge_port_erase(port
, bridge
);
1091 mlx5_esw_bridge_put(br_offloads
, bridge
);
1092 vport
->bridge
= NULL
;
1096 int mlx5_esw_bridge_vport_link(int ifindex
, struct mlx5_esw_bridge_offloads
*br_offloads
,
1097 struct mlx5_vport
*vport
, struct netlink_ext_ack
*extack
)
1099 struct mlx5_esw_bridge
*bridge
;
1102 WARN_ON(vport
->bridge
);
1104 bridge
= mlx5_esw_bridge_lookup(ifindex
, br_offloads
);
1105 if (IS_ERR(bridge
)) {
1106 NL_SET_ERR_MSG_MOD(extack
, "Error checking for existing bridge with same ifindex");
1107 return PTR_ERR(bridge
);
1110 err
= mlx5_esw_bridge_vport_init(br_offloads
, bridge
, vport
);
1112 NL_SET_ERR_MSG_MOD(extack
, "Error initializing port");
1116 int mlx5_esw_bridge_vport_unlink(int ifindex
, struct mlx5_esw_bridge_offloads
*br_offloads
,
1117 struct mlx5_vport
*vport
, struct netlink_ext_ack
*extack
)
1119 struct mlx5_esw_bridge
*bridge
= vport
->bridge
;
1123 NL_SET_ERR_MSG_MOD(extack
, "Port is not attached to any bridge");
1126 if (bridge
->ifindex
!= ifindex
) {
1127 NL_SET_ERR_MSG_MOD(extack
, "Port is attached to another bridge");
1131 err
= mlx5_esw_bridge_vport_cleanup(br_offloads
, vport
);
1133 NL_SET_ERR_MSG_MOD(extack
, "Port cleanup failed");
1137 int mlx5_esw_bridge_port_vlan_add(u16 vid
, u16 flags
, struct mlx5_eswitch
*esw
,
1138 struct mlx5_vport
*vport
, struct netlink_ext_ack
*extack
)
1140 struct mlx5_esw_bridge_port
*port
;
1141 struct mlx5_esw_bridge_vlan
*vlan
;
1143 port
= mlx5_esw_bridge_port_lookup(vport
->vport
, vport
->bridge
);
1147 vlan
= mlx5_esw_bridge_vlan_lookup(vid
, port
);
1149 if (vlan
->flags
== flags
)
1151 mlx5_esw_bridge_vlan_cleanup(port
, vlan
, vport
->bridge
);
1154 vlan
= mlx5_esw_bridge_vlan_create(vid
, flags
, port
, esw
);
1156 NL_SET_ERR_MSG_MOD(extack
, "Failed to create VLAN entry");
1157 return PTR_ERR(vlan
);
1162 void mlx5_esw_bridge_port_vlan_del(u16 vid
, struct mlx5_eswitch
*esw
, struct mlx5_vport
*vport
)
1164 struct mlx5_esw_bridge_port
*port
;
1165 struct mlx5_esw_bridge_vlan
*vlan
;
1167 port
= mlx5_esw_bridge_port_lookup(vport
->vport
, vport
->bridge
);
1171 vlan
= mlx5_esw_bridge_vlan_lookup(vid
, port
);
1174 mlx5_esw_bridge_vlan_cleanup(port
, vlan
, vport
->bridge
);
1177 void mlx5_esw_bridge_fdb_create(struct net_device
*dev
, struct mlx5_eswitch
*esw
,
1178 struct mlx5_vport
*vport
,
1179 struct switchdev_notifier_fdb_info
*fdb_info
)
1181 struct mlx5_esw_bridge
*bridge
= vport
->bridge
;
1182 struct mlx5_esw_bridge_fdb_entry
*entry
;
1183 u16 vport_num
= vport
->vport
;
1186 esw_info(esw
->dev
, "Vport is not assigned to bridge (vport=%u)\n", vport_num
);
1190 entry
= mlx5_esw_bridge_fdb_entry_init(dev
, vport_num
, fdb_info
->addr
, fdb_info
->vid
,
1191 fdb_info
->added_by_user
, esw
, bridge
);
1195 if (entry
->flags
& MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER
)
1196 mlx5_esw_bridge_fdb_offload_notify(dev
, entry
->key
.addr
, entry
->key
.vid
,
1197 SWITCHDEV_FDB_OFFLOADED
);
1199 /* Take over dynamic entries to prevent kernel bridge from aging them out. */
1200 mlx5_esw_bridge_fdb_offload_notify(dev
, entry
->key
.addr
, entry
->key
.vid
,
1201 SWITCHDEV_FDB_ADD_TO_BRIDGE
);
1204 void mlx5_esw_bridge_fdb_remove(struct net_device
*dev
, struct mlx5_eswitch
*esw
,
1205 struct mlx5_vport
*vport
,
1206 struct switchdev_notifier_fdb_info
*fdb_info
)
1208 struct mlx5_esw_bridge
*bridge
= vport
->bridge
;
1209 struct mlx5_esw_bridge_fdb_entry
*entry
;
1210 struct mlx5_esw_bridge_fdb_key key
;
1211 u16 vport_num
= vport
->vport
;
1214 esw_warn(esw
->dev
, "Vport is not assigned to bridge (vport=%u)\n", vport_num
);
1218 ether_addr_copy(key
.addr
, fdb_info
->addr
);
1219 key
.vid
= fdb_info
->vid
;
1220 entry
= rhashtable_lookup_fast(&bridge
->fdb_ht
, &key
, fdb_ht_params
);
1223 "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
1224 key
.addr
, key
.vid
, vport_num
);
1228 if (!(entry
->flags
& MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER
))
1229 mlx5_esw_bridge_fdb_offload_notify(dev
, entry
->key
.addr
, entry
->key
.vid
,
1230 SWITCHDEV_FDB_DEL_TO_BRIDGE
);
1231 mlx5_esw_bridge_fdb_entry_cleanup(entry
, bridge
);
1234 void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads
*br_offloads
)
1236 struct mlx5_esw_bridge_fdb_entry
*entry
, *tmp
;
1237 struct mlx5_esw_bridge
*bridge
;
1239 list_for_each_entry(bridge
, &br_offloads
->bridges
, list
) {
1240 list_for_each_entry_safe(entry
, tmp
, &bridge
->fdb_list
, list
) {
1241 unsigned long lastuse
=
1242 (unsigned long)mlx5_fc_query_lastuse(entry
->ingress_counter
);
1244 if (entry
->flags
& MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER
)
1247 if (time_after(lastuse
, entry
->lastuse
)) {
1248 mlx5_esw_bridge_fdb_entry_refresh(lastuse
, entry
);
1249 } else if (time_is_before_jiffies(entry
->lastuse
+ bridge
->ageing_time
)) {
1250 mlx5_esw_bridge_fdb_offload_notify(entry
->dev
, entry
->key
.addr
,
1252 SWITCHDEV_FDB_DEL_TO_BRIDGE
);
1253 mlx5_esw_bridge_fdb_entry_cleanup(entry
, bridge
);
1259 static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads
*br_offloads
)
1261 struct mlx5_eswitch
*esw
= br_offloads
->esw
;
1262 struct mlx5_vport
*vport
;
1265 mlx5_esw_for_each_vport(esw
, i
, vport
)
1267 mlx5_esw_bridge_vport_cleanup(br_offloads
, vport
);
1269 WARN_ONCE(!list_empty(&br_offloads
->bridges
),
1270 "Cleaning up bridge offloads while still having bridges attached\n");
1273 struct mlx5_esw_bridge_offloads
*mlx5_esw_bridge_init(struct mlx5_eswitch
*esw
)
1275 struct mlx5_esw_bridge_offloads
*br_offloads
;
1277 br_offloads
= kvzalloc(sizeof(*br_offloads
), GFP_KERNEL
);
1279 return ERR_PTR(-ENOMEM
);
1281 INIT_LIST_HEAD(&br_offloads
->bridges
);
1282 br_offloads
->esw
= esw
;
1283 esw
->br_offloads
= br_offloads
;
1288 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch
*esw
)
1290 struct mlx5_esw_bridge_offloads
*br_offloads
= esw
->br_offloads
;
1295 mlx5_esw_bridge_flush(br_offloads
);
1297 esw
->br_offloads
= NULL
;
1298 kvfree(br_offloads
);