2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
42 #include "lib/devcom.h"
51 /* There are two match-all miss flows, one for unicast dst mac and
54 #define MLX5_ESW_MISS_FLOWS (2)
56 #define fdb_prio_table(esw, chain, prio, level) \
57 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
59 #define UPLINK_REP_INDEX 0
61 /* The rep getter/iterator are only valid after esw->total_vports
62 * and vport->vport are initialized in mlx5_eswitch_init.
64 #define mlx5_esw_for_all_reps(esw, i, rep) \
65 for ((i) = MLX5_VPORT_PF; \
66 (rep) = &(esw)->offloads.vport_reps[i], \
67 (i) < (esw)->total_vports; (i)++)
69 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
70 for ((i) = MLX5_VPORT_FIRST_VF; \
71 (rep) = &(esw)->offloads.vport_reps[i], \
74 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
76 (rep) = &(esw)->offloads.vport_reps[i], \
77 (i) >= MLX5_VPORT_FIRST_VF; (i)--)
79 #define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
80 for ((vport) = MLX5_VPORT_FIRST_VF; \
81 (vport) <= (nvfs); (vport)++)
83 #define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
84 for ((vport) = (nvfs); \
85 (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
87 static struct mlx5_eswitch_rep
*mlx5_eswitch_get_rep(struct mlx5_eswitch
*esw
,
90 u16 idx
= mlx5_eswitch_vport_num_to_index(esw
, vport_num
);
92 WARN_ON(idx
> esw
->total_vports
- 1);
93 return &esw
->offloads
.vport_reps
[idx
];
96 static struct mlx5_flow_table
*
97 esw_get_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
);
99 esw_put_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
);
101 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch
*esw
)
103 return (!!(esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
));
106 u32
mlx5_eswitch_get_chain_range(struct mlx5_eswitch
*esw
)
108 if (esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
)
109 return FDB_MAX_CHAIN
;
114 u16
mlx5_eswitch_get_prio_range(struct mlx5_eswitch
*esw
)
116 if (esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
)
122 struct mlx5_flow_handle
*
123 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
124 struct mlx5_flow_spec
*spec
,
125 struct mlx5_esw_flow_attr
*attr
)
127 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
128 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
129 bool split
= !!(attr
->split_count
);
130 struct mlx5_flow_handle
*rule
;
131 struct mlx5_flow_table
*fdb
;
135 if (esw
->mode
!= SRIOV_OFFLOADS
)
136 return ERR_PTR(-EOPNOTSUPP
);
138 flow_act
.action
= attr
->action
;
139 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
140 if (!mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
141 flow_act
.action
&= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
142 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
143 else if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
) {
144 flow_act
.vlan
[0].ethtype
= ntohs(attr
->vlan_proto
[0]);
145 flow_act
.vlan
[0].vid
= attr
->vlan_vid
[0];
146 flow_act
.vlan
[0].prio
= attr
->vlan_prio
[0];
147 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2
) {
148 flow_act
.vlan
[1].ethtype
= ntohs(attr
->vlan_proto
[1]);
149 flow_act
.vlan
[1].vid
= attr
->vlan_vid
[1];
150 flow_act
.vlan
[1].prio
= attr
->vlan_prio
[1];
154 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
155 if (attr
->dest_chain
) {
156 struct mlx5_flow_table
*ft
;
158 ft
= esw_get_prio_table(esw
, attr
->dest_chain
, 1, 0);
161 goto err_create_goto_table
;
164 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
168 for (j
= attr
->split_count
; j
< attr
->out_count
; j
++) {
169 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
170 dest
[i
].vport
.num
= attr
->dests
[j
].rep
->vport
;
171 dest
[i
].vport
.vhca_id
=
172 MLX5_CAP_GEN(attr
->dests
[j
].mdev
, vhca_id
);
173 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
174 dest
[i
].vport
.flags
|=
175 MLX5_FLOW_DEST_VPORT_VHCA_ID
;
176 if (attr
->dests
[j
].flags
& MLX5_ESW_DEST_ENCAP
) {
177 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
178 flow_act
.reformat_id
= attr
->dests
[j
].encap_id
;
179 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
180 dest
[i
].vport
.reformat_id
=
181 attr
->dests
[j
].encap_id
;
187 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
188 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
189 dest
[i
].counter_id
= mlx5_fc_id(attr
->counter
);
193 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
194 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
196 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
197 MLX5_SET(fte_match_set_misc
, misc
,
198 source_eswitch_owner_vhca_id
,
199 MLX5_CAP_GEN(attr
->in_mdev
, vhca_id
));
201 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
202 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
203 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
204 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
205 source_eswitch_owner_vhca_id
);
207 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
208 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_DECAP
) {
209 if (attr
->tunnel_match_level
!= MLX5_MATCH_NONE
)
210 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
211 if (attr
->match_level
!= MLX5_MATCH_NONE
)
212 spec
->match_criteria_enable
|= MLX5_MATCH_INNER_HEADERS
;
213 } else if (attr
->match_level
!= MLX5_MATCH_NONE
) {
214 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
217 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
218 flow_act
.modify_id
= attr
->mod_hdr_id
;
220 fdb
= esw_get_prio_table(esw
, attr
->chain
, attr
->prio
, !!split
);
222 rule
= ERR_CAST(fdb
);
226 rule
= mlx5_add_flow_rules(fdb
, spec
, &flow_act
, dest
, i
);
230 esw
->offloads
.num_flows
++;
235 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, !!split
);
237 if (attr
->dest_chain
)
238 esw_put_prio_table(esw
, attr
->dest_chain
, 1, 0);
239 err_create_goto_table
:
243 struct mlx5_flow_handle
*
244 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
245 struct mlx5_flow_spec
*spec
,
246 struct mlx5_esw_flow_attr
*attr
)
248 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
249 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
250 struct mlx5_flow_table
*fast_fdb
;
251 struct mlx5_flow_table
*fwd_fdb
;
252 struct mlx5_flow_handle
*rule
;
256 fast_fdb
= esw_get_prio_table(esw
, attr
->chain
, attr
->prio
, 0);
257 if (IS_ERR(fast_fdb
)) {
258 rule
= ERR_CAST(fast_fdb
);
262 fwd_fdb
= esw_get_prio_table(esw
, attr
->chain
, attr
->prio
, 1);
263 if (IS_ERR(fwd_fdb
)) {
264 rule
= ERR_CAST(fwd_fdb
);
268 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
269 for (i
= 0; i
< attr
->split_count
; i
++) {
270 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
271 dest
[i
].vport
.num
= attr
->dests
[i
].rep
->vport
;
272 dest
[i
].vport
.vhca_id
=
273 MLX5_CAP_GEN(attr
->dests
[i
].mdev
, vhca_id
);
274 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
275 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
276 if (attr
->dests
[i
].flags
& MLX5_ESW_DEST_ENCAP
) {
277 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
278 dest
[i
].vport
.reformat_id
= attr
->dests
[i
].encap_id
;
281 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
282 dest
[i
].ft
= fwd_fdb
,
285 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
286 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
288 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
289 MLX5_SET(fte_match_set_misc
, misc
,
290 source_eswitch_owner_vhca_id
,
291 MLX5_CAP_GEN(attr
->in_mdev
, vhca_id
));
293 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
294 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
295 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
296 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
297 source_eswitch_owner_vhca_id
);
299 if (attr
->match_level
== MLX5_MATCH_NONE
)
300 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
302 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
|
303 MLX5_MATCH_MISC_PARAMETERS
;
305 rule
= mlx5_add_flow_rules(fast_fdb
, spec
, &flow_act
, dest
, i
);
310 esw
->offloads
.num_flows
++;
314 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 1);
316 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 0);
322 __mlx5_eswitch_del_rule(struct mlx5_eswitch
*esw
,
323 struct mlx5_flow_handle
*rule
,
324 struct mlx5_esw_flow_attr
*attr
,
327 bool split
= (attr
->split_count
> 0);
329 mlx5_del_flow_rules(rule
);
330 esw
->offloads
.num_flows
--;
333 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 1);
334 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 0);
336 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, !!split
);
337 if (attr
->dest_chain
)
338 esw_put_prio_table(esw
, attr
->dest_chain
, 1, 0);
343 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
344 struct mlx5_flow_handle
*rule
,
345 struct mlx5_esw_flow_attr
*attr
)
347 __mlx5_eswitch_del_rule(esw
, rule
, attr
, false);
351 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
352 struct mlx5_flow_handle
*rule
,
353 struct mlx5_esw_flow_attr
*attr
)
355 __mlx5_eswitch_del_rule(esw
, rule
, attr
, true);
358 static int esw_set_global_vlan_pop(struct mlx5_eswitch
*esw
, u8 val
)
360 struct mlx5_eswitch_rep
*rep
;
361 int vf_vport
, err
= 0;
363 esw_debug(esw
->dev
, "%s applying global %s policy\n", __func__
, val
? "pop" : "none");
364 for (vf_vport
= 1; vf_vport
< esw
->enabled_vports
; vf_vport
++) {
365 rep
= &esw
->offloads
.vport_reps
[vf_vport
];
366 if (rep
->rep_if
[REP_ETH
].state
!= REP_LOADED
)
369 err
= __mlx5_eswitch_set_vport_vlan(esw
, rep
->vport
, 0, 0, val
);
378 static struct mlx5_eswitch_rep
*
379 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr
*attr
, bool push
, bool pop
)
381 struct mlx5_eswitch_rep
*in_rep
, *out_rep
, *vport
= NULL
;
383 in_rep
= attr
->in_rep
;
384 out_rep
= attr
->dests
[0].rep
;
396 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr
*attr
,
397 bool push
, bool pop
, bool fwd
)
399 struct mlx5_eswitch_rep
*in_rep
, *out_rep
;
401 if ((push
|| pop
) && !fwd
)
404 in_rep
= attr
->in_rep
;
405 out_rep
= attr
->dests
[0].rep
;
407 if (push
&& in_rep
->vport
== MLX5_VPORT_UPLINK
)
410 if (pop
&& out_rep
->vport
== MLX5_VPORT_UPLINK
)
413 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
414 if (!push
&& !pop
&& fwd
)
415 if (in_rep
->vlan
&& out_rep
->vport
== MLX5_VPORT_UPLINK
)
418 /* protects against (1) setting rules with different vlans to push and
419 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
421 if (push
&& in_rep
->vlan_refcount
&& (in_rep
->vlan
!= attr
->vlan_vid
[0]))
430 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
431 struct mlx5_esw_flow_attr
*attr
)
433 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
434 struct mlx5_eswitch_rep
*vport
= NULL
;
438 /* nop if we're on the vlan push/pop non emulation mode */
439 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
442 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
443 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
444 fwd
= !!((attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) &&
447 err
= esw_add_vlan_action_check(attr
, push
, pop
, fwd
);
451 attr
->vlan_handled
= false;
453 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
455 if (!push
&& !pop
&& fwd
) {
456 /* tracks VF --> wire rules without vlan push action */
457 if (attr
->dests
[0].rep
->vport
== MLX5_VPORT_UPLINK
) {
458 vport
->vlan_refcount
++;
459 attr
->vlan_handled
= true;
468 if (!(offloads
->vlan_push_pop_refcount
)) {
469 /* it's the 1st vlan rule, apply global vlan pop policy */
470 err
= esw_set_global_vlan_pop(esw
, SET_VLAN_STRIP
);
474 offloads
->vlan_push_pop_refcount
++;
477 if (vport
->vlan_refcount
)
480 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
, attr
->vlan_vid
[0], 0,
481 SET_VLAN_INSERT
| SET_VLAN_STRIP
);
484 vport
->vlan
= attr
->vlan_vid
[0];
486 vport
->vlan_refcount
++;
490 attr
->vlan_handled
= true;
494 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
495 struct mlx5_esw_flow_attr
*attr
)
497 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
498 struct mlx5_eswitch_rep
*vport
= NULL
;
502 /* nop if we're on the vlan push/pop non emulation mode */
503 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
506 if (!attr
->vlan_handled
)
509 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
510 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
511 fwd
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
513 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
515 if (!push
&& !pop
&& fwd
) {
516 /* tracks VF --> wire rules without vlan push action */
517 if (attr
->dests
[0].rep
->vport
== MLX5_VPORT_UPLINK
)
518 vport
->vlan_refcount
--;
524 vport
->vlan_refcount
--;
525 if (vport
->vlan_refcount
)
526 goto skip_unset_push
;
529 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
,
530 0, 0, SET_VLAN_STRIP
);
536 offloads
->vlan_push_pop_refcount
--;
537 if (offloads
->vlan_push_pop_refcount
)
540 /* no more vlan rules, stop global vlan pop policy */
541 err
= esw_set_global_vlan_pop(esw
, 0);
547 struct mlx5_flow_handle
*
548 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch
*esw
, int vport
, u32 sqn
)
550 struct mlx5_flow_act flow_act
= {0};
551 struct mlx5_flow_destination dest
= {};
552 struct mlx5_flow_handle
*flow_rule
;
553 struct mlx5_flow_spec
*spec
;
556 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
558 flow_rule
= ERR_PTR(-ENOMEM
);
562 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
563 MLX5_SET(fte_match_set_misc
, misc
, source_sqn
, sqn
);
564 /* source vport is the esw manager */
565 MLX5_SET(fte_match_set_misc
, misc
, source_port
, esw
->manager_vport
);
567 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
568 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_sqn
);
569 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
571 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
572 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
573 dest
.vport
.num
= vport
;
574 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
576 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
, spec
,
577 &flow_act
, &dest
, 1);
578 if (IS_ERR(flow_rule
))
579 esw_warn(esw
->dev
, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule
));
584 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule
);
586 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
)
588 mlx5_del_flow_rules(rule
);
591 static void peer_miss_rules_setup(struct mlx5_core_dev
*peer_dev
,
592 struct mlx5_flow_spec
*spec
,
593 struct mlx5_flow_destination
*dest
)
595 void *misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
598 MLX5_SET(fte_match_set_misc
, misc
, source_eswitch_owner_vhca_id
,
599 MLX5_CAP_GEN(peer_dev
, vhca_id
));
601 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
603 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
605 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
606 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
607 source_eswitch_owner_vhca_id
);
609 dest
->type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
610 dest
->vport
.num
= peer_dev
->priv
.eswitch
->manager_vport
;
611 dest
->vport
.vhca_id
= MLX5_CAP_GEN(peer_dev
, vhca_id
);
612 dest
->vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
615 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
,
616 struct mlx5_core_dev
*peer_dev
)
618 struct mlx5_flow_destination dest
= {};
619 struct mlx5_flow_act flow_act
= {0};
620 struct mlx5_flow_handle
**flows
;
621 struct mlx5_flow_handle
*flow
;
622 struct mlx5_flow_spec
*spec
;
623 /* total vports is the same for both e-switches */
624 int nvports
= esw
->total_vports
;
628 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
632 peer_miss_rules_setup(peer_dev
, spec
, &dest
);
634 flows
= kvzalloc(nvports
* sizeof(*flows
), GFP_KERNEL
);
637 goto alloc_flows_err
;
640 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
641 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
644 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
645 MLX5_SET(fte_match_set_misc
, misc
, source_port
, MLX5_VPORT_PF
);
646 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
647 spec
, &flow_act
, &dest
, 1);
650 goto add_pf_flow_err
;
652 flows
[MLX5_VPORT_PF
] = flow
;
655 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
656 MLX5_SET(fte_match_set_misc
, misc
, source_port
, MLX5_VPORT_ECPF
);
657 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
658 spec
, &flow_act
, &dest
, 1);
661 goto add_ecpf_flow_err
;
663 flows
[mlx5_eswitch_ecpf_idx(esw
)] = flow
;
666 mlx5_esw_for_each_vf_vport(esw
, i
, mlx5_core_max_vfs(esw
->dev
)) {
667 MLX5_SET(fte_match_set_misc
, misc
, source_port
, i
);
668 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
669 spec
, &flow_act
, &dest
, 1);
672 goto add_vf_flow_err
;
677 esw
->fdb_table
.offloads
.peer_miss_rules
= flows
;
684 mlx5_esw_for_each_vf_vport_reverse(esw
, i
, nvports
)
685 mlx5_del_flow_rules(flows
[i
]);
687 if (mlx5_ecpf_vport_exists(esw
->dev
))
688 mlx5_del_flow_rules(flows
[mlx5_eswitch_ecpf_idx(esw
)]);
690 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
691 mlx5_del_flow_rules(flows
[MLX5_VPORT_PF
]);
693 esw_warn(esw
->dev
, "FDB: Failed to add peer miss flow rule err %d\n", err
);
700 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
)
702 struct mlx5_flow_handle
**flows
;
705 flows
= esw
->fdb_table
.offloads
.peer_miss_rules
;
707 mlx5_esw_for_each_vf_vport_reverse(esw
, i
, mlx5_core_max_vfs(esw
->dev
))
708 mlx5_del_flow_rules(flows
[i
]);
710 if (mlx5_ecpf_vport_exists(esw
->dev
))
711 mlx5_del_flow_rules(flows
[mlx5_eswitch_ecpf_idx(esw
)]);
713 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
))
714 mlx5_del_flow_rules(flows
[MLX5_VPORT_PF
]);
719 static int esw_add_fdb_miss_rule(struct mlx5_eswitch
*esw
)
721 struct mlx5_flow_act flow_act
= {0};
722 struct mlx5_flow_destination dest
= {};
723 struct mlx5_flow_handle
*flow_rule
= NULL
;
724 struct mlx5_flow_spec
*spec
;
731 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
737 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
738 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
740 dmac_c
= MLX5_ADDR_OF(fte_match_param
, headers_c
,
741 outer_headers
.dmac_47_16
);
744 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
745 dest
.vport
.num
= esw
->manager_vport
;
746 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
748 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
, spec
,
749 &flow_act
, &dest
, 1);
750 if (IS_ERR(flow_rule
)) {
751 err
= PTR_ERR(flow_rule
);
752 esw_warn(esw
->dev
, "FDB: Failed to add unicast miss flow rule err %d\n", err
);
756 esw
->fdb_table
.offloads
.miss_rule_uni
= flow_rule
;
758 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
760 dmac_v
= MLX5_ADDR_OF(fte_match_param
, headers_v
,
761 outer_headers
.dmac_47_16
);
763 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
, spec
,
764 &flow_act
, &dest
, 1);
765 if (IS_ERR(flow_rule
)) {
766 err
= PTR_ERR(flow_rule
);
767 esw_warn(esw
->dev
, "FDB: Failed to add multicast miss flow rule err %d\n", err
);
768 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
772 esw
->fdb_table
.offloads
.miss_rule_multi
= flow_rule
;
779 #define ESW_OFFLOADS_NUM_GROUPS 4
781 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
782 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
783 * for each flow table pool. We can allocate up to 16M of each pool,
784 * and we keep track of how much we used via put/get_sz_to_pool.
785 * Firmware doesn't report any of this for now.
786 * ESW_POOL is expected to be sorted from large to small
788 #define ESW_SIZE (16 * 1024 * 1024)
789 const unsigned int ESW_POOLS
[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
790 64 * 1024, 4 * 1024 };
793 get_sz_from_pool(struct mlx5_eswitch
*esw
)
797 for (i
= 0; i
< ARRAY_SIZE(ESW_POOLS
); i
++) {
798 if (esw
->fdb_table
.offloads
.fdb_left
[i
]) {
799 --esw
->fdb_table
.offloads
.fdb_left
[i
];
809 put_sz_to_pool(struct mlx5_eswitch
*esw
, int sz
)
813 for (i
= 0; i
< ARRAY_SIZE(ESW_POOLS
); i
++) {
814 if (sz
>= ESW_POOLS
[i
]) {
815 ++esw
->fdb_table
.offloads
.fdb_left
[i
];
821 static struct mlx5_flow_table
*
822 create_next_size_table(struct mlx5_eswitch
*esw
,
823 struct mlx5_flow_namespace
*ns
,
828 struct mlx5_flow_table
*fdb
;
831 sz
= get_sz_from_pool(esw
);
833 return ERR_PTR(-ENOSPC
);
835 fdb
= mlx5_create_auto_grouped_flow_table(ns
,
838 ESW_OFFLOADS_NUM_GROUPS
,
842 esw_warn(esw
->dev
, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
843 (int)PTR_ERR(fdb
), table_prio
, level
, sz
);
844 put_sz_to_pool(esw
, sz
);
850 static struct mlx5_flow_table
*
851 esw_get_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
)
853 struct mlx5_core_dev
*dev
= esw
->dev
;
854 struct mlx5_flow_table
*fdb
= NULL
;
855 struct mlx5_flow_namespace
*ns
;
856 int table_prio
, l
= 0;
859 if (chain
== FDB_SLOW_PATH_CHAIN
)
860 return esw
->fdb_table
.offloads
.slow_fdb
;
862 mutex_lock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
864 fdb
= fdb_prio_table(esw
, chain
, prio
, level
).fdb
;
866 /* take ref on earlier levels as well */
868 fdb_prio_table(esw
, chain
, prio
, level
--).num_rules
++;
869 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
873 ns
= mlx5_get_fdb_sub_ns(dev
, chain
);
875 esw_warn(dev
, "Failed to get FDB sub namespace\n");
876 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
877 return ERR_PTR(-EOPNOTSUPP
);
880 if (esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
)
881 flags
|= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
|
882 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
);
884 table_prio
= (chain
* FDB_MAX_PRIO
) + prio
- 1;
886 /* create earlier levels for correct fs_core lookup when
889 for (l
= 0; l
<= level
; l
++) {
890 if (fdb_prio_table(esw
, chain
, prio
, l
).fdb
) {
891 fdb_prio_table(esw
, chain
, prio
, l
).num_rules
++;
895 fdb
= create_next_size_table(esw
, ns
, table_prio
, l
, flags
);
901 fdb_prio_table(esw
, chain
, prio
, l
).fdb
= fdb
;
902 fdb_prio_table(esw
, chain
, prio
, l
).num_rules
= 1;
905 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
909 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
911 esw_put_prio_table(esw
, chain
, prio
, l
);
917 esw_put_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
)
921 if (chain
== FDB_SLOW_PATH_CHAIN
)
924 mutex_lock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
926 for (l
= level
; l
>= 0; l
--) {
927 if (--(fdb_prio_table(esw
, chain
, prio
, l
).num_rules
) > 0)
930 put_sz_to_pool(esw
, fdb_prio_table(esw
, chain
, prio
, l
).fdb
->max_fte
);
931 mlx5_destroy_flow_table(fdb_prio_table(esw
, chain
, prio
, l
).fdb
);
932 fdb_prio_table(esw
, chain
, prio
, l
).fdb
= NULL
;
935 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
938 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch
*esw
)
940 /* If lazy creation isn't supported, deref the fast path tables */
941 if (!(esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
)) {
942 esw_put_prio_table(esw
, 0, 1, 1);
943 esw_put_prio_table(esw
, 0, 1, 0);
947 #define MAX_PF_SQ 256
948 #define MAX_SQ_NVPORTS 32
950 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch
*esw
, int nvports
)
952 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
953 struct mlx5_flow_table_attr ft_attr
= {};
954 struct mlx5_core_dev
*dev
= esw
->dev
;
955 u32
*flow_group_in
, max_flow_counter
;
956 struct mlx5_flow_namespace
*root_ns
;
957 struct mlx5_flow_table
*fdb
= NULL
;
958 int table_size
, ix
, err
= 0, i
;
959 struct mlx5_flow_group
*g
;
960 u32 flags
= 0, fdb_max
;
961 void *match_criteria
;
964 esw_debug(esw
->dev
, "Create offloads FDB Tables\n");
965 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
969 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
971 esw_warn(dev
, "Failed to get FDB flow namespace\n");
976 max_flow_counter
= (MLX5_CAP_GEN(dev
, max_flow_counter_31_16
) << 16) |
977 MLX5_CAP_GEN(dev
, max_flow_counter_15_0
);
978 fdb_max
= 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
);
980 esw_debug(dev
, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
981 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
),
982 max_flow_counter
, ESW_OFFLOADS_NUM_GROUPS
,
985 for (i
= 0; i
< ARRAY_SIZE(ESW_POOLS
); i
++)
986 esw
->fdb_table
.offloads
.fdb_left
[i
] =
987 ESW_POOLS
[i
] <= fdb_max
? ESW_SIZE
/ ESW_POOLS
[i
] : 0;
989 table_size
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
+
990 MLX5_ESW_MISS_FLOWS
+ esw
->total_vports
;
992 /* create the slow path fdb with encap set, so further table instances
993 * can be created at run time while VFs are probed if the FW allows that.
995 if (esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
)
996 flags
|= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
|
997 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
);
999 ft_attr
.flags
= flags
;
1000 ft_attr
.max_fte
= table_size
;
1001 ft_attr
.prio
= FDB_SLOW_PATH
;
1003 fdb
= mlx5_create_flow_table(root_ns
, &ft_attr
);
1006 esw_warn(dev
, "Failed to create slow path FDB Table err %d\n", err
);
1009 esw
->fdb_table
.offloads
.slow_fdb
= fdb
;
1011 /* If lazy creation isn't supported, open the fast path tables now */
1012 if (!MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, multi_fdb_encap
) &&
1013 esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
) {
1014 esw
->fdb_table
.flags
&= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
;
1015 esw_warn(dev
, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1016 esw_get_prio_table(esw
, 0, 1, 0);
1017 esw_get_prio_table(esw
, 0, 1, 1);
1019 esw_debug(dev
, "Lazy creation of flow tables supported, deferring table opening\n");
1020 esw
->fdb_table
.flags
|= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
;
1023 /* create send-to-vport group */
1024 memset(flow_group_in
, 0, inlen
);
1025 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1026 MLX5_MATCH_MISC_PARAMETERS
);
1028 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1030 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_sqn
);
1031 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
1033 ix
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
;
1034 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1035 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
- 1);
1037 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1040 esw_warn(dev
, "Failed to create send-to-vport flow group err(%d)\n", err
);
1041 goto send_vport_err
;
1043 esw
->fdb_table
.offloads
.send_to_vport_grp
= g
;
1045 /* create peer esw miss group */
1046 memset(flow_group_in
, 0, inlen
);
1047 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1048 MLX5_MATCH_MISC_PARAMETERS
);
1050 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
1053 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
1054 misc_parameters
.source_port
);
1055 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
1056 misc_parameters
.source_eswitch_owner_vhca_id
);
1058 MLX5_SET(create_flow_group_in
, flow_group_in
,
1059 source_eswitch_owner_vhca_id_valid
, 1);
1060 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1061 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1062 ix
+ esw
->total_vports
- 1);
1063 ix
+= esw
->total_vports
;
1065 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1068 esw_warn(dev
, "Failed to create peer miss flow group err(%d)\n", err
);
1071 esw
->fdb_table
.offloads
.peer_miss_grp
= g
;
1073 /* create miss group */
1074 memset(flow_group_in
, 0, inlen
);
1075 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1076 MLX5_MATCH_OUTER_HEADERS
);
1077 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
1079 dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
1080 outer_headers
.dmac_47_16
);
1083 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1084 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
1085 ix
+ MLX5_ESW_MISS_FLOWS
);
1087 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1090 esw_warn(dev
, "Failed to create miss flow group err(%d)\n", err
);
1093 esw
->fdb_table
.offloads
.miss_grp
= g
;
1095 err
= esw_add_fdb_miss_rule(esw
);
1099 esw
->nvports
= nvports
;
1100 kvfree(flow_group_in
);
1104 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1106 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1108 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1110 esw_destroy_offloads_fast_fdb_tables(esw
);
1111 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1114 kvfree(flow_group_in
);
1118 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch
*esw
)
1120 if (!esw
->fdb_table
.offloads
.slow_fdb
)
1123 esw_debug(esw
->dev
, "Destroy offloads FDB Tables\n");
1124 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_multi
);
1125 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
1126 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1127 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1128 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1130 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1131 esw_destroy_offloads_fast_fdb_tables(esw
);
1134 static int esw_create_offloads_table(struct mlx5_eswitch
*esw
, int nvports
)
1136 struct mlx5_flow_table_attr ft_attr
= {};
1137 struct mlx5_core_dev
*dev
= esw
->dev
;
1138 struct mlx5_flow_table
*ft_offloads
;
1139 struct mlx5_flow_namespace
*ns
;
1142 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
1144 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
1148 ft_attr
.max_fte
= nvports
+ MLX5_ESW_MISS_FLOWS
;
1150 ft_offloads
= mlx5_create_flow_table(ns
, &ft_attr
);
1151 if (IS_ERR(ft_offloads
)) {
1152 err
= PTR_ERR(ft_offloads
);
1153 esw_warn(esw
->dev
, "Failed to create offloads table, err %d\n", err
);
1157 esw
->offloads
.ft_offloads
= ft_offloads
;
1161 static void esw_destroy_offloads_table(struct mlx5_eswitch
*esw
)
1163 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1165 mlx5_destroy_flow_table(offloads
->ft_offloads
);
1168 static int esw_create_vport_rx_group(struct mlx5_eswitch
*esw
, int nvports
)
1170 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1171 struct mlx5_flow_group
*g
;
1173 void *match_criteria
, *misc
;
1176 nvports
= nvports
+ MLX5_ESW_MISS_FLOWS
;
1177 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1181 /* create vport rx group */
1182 memset(flow_group_in
, 0, inlen
);
1183 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1184 MLX5_MATCH_MISC_PARAMETERS
);
1186 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1187 misc
= MLX5_ADDR_OF(fte_match_param
, match_criteria
, misc_parameters
);
1188 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
1190 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1191 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, nvports
- 1);
1193 g
= mlx5_create_flow_group(esw
->offloads
.ft_offloads
, flow_group_in
);
1197 mlx5_core_warn(esw
->dev
, "Failed to create vport rx group err %d\n", err
);
1201 esw
->offloads
.vport_rx_group
= g
;
1203 kvfree(flow_group_in
);
1207 static void esw_destroy_vport_rx_group(struct mlx5_eswitch
*esw
)
1209 mlx5_destroy_flow_group(esw
->offloads
.vport_rx_group
);
1212 struct mlx5_flow_handle
*
1213 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
,
1214 struct mlx5_flow_destination
*dest
)
1216 struct mlx5_flow_act flow_act
= {0};
1217 struct mlx5_flow_handle
*flow_rule
;
1218 struct mlx5_flow_spec
*spec
;
1221 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1223 flow_rule
= ERR_PTR(-ENOMEM
);
1227 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
1228 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
1230 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
1231 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
1233 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
1235 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1236 flow_rule
= mlx5_add_flow_rules(esw
->offloads
.ft_offloads
, spec
,
1237 &flow_act
, dest
, 1);
1238 if (IS_ERR(flow_rule
)) {
1239 esw_warn(esw
->dev
, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule
));
1248 static int esw_offloads_start(struct mlx5_eswitch
*esw
,
1249 struct netlink_ext_ack
*extack
)
1251 int err
, err1
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
1253 if (esw
->mode
!= SRIOV_LEGACY
&&
1254 !mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1255 NL_SET_ERR_MSG_MOD(extack
,
1256 "Can't set offloads mode, SRIOV legacy not enabled");
1260 mlx5_eswitch_disable_sriov(esw
);
1261 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
1263 NL_SET_ERR_MSG_MOD(extack
,
1264 "Failed setting eswitch to offloads");
1265 err1
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
1267 NL_SET_ERR_MSG_MOD(extack
,
1268 "Failed setting eswitch back to legacy");
1271 if (esw
->offloads
.inline_mode
== MLX5_INLINE_MODE_NONE
) {
1272 if (mlx5_eswitch_inline_mode_get(esw
,
1274 &esw
->offloads
.inline_mode
)) {
1275 esw
->offloads
.inline_mode
= MLX5_INLINE_MODE_L2
;
1276 NL_SET_ERR_MSG_MOD(extack
,
1277 "Inline mode is different between vports");
1283 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
)
1285 kfree(esw
->offloads
.vport_reps
);
1288 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
)
1290 int total_vfs
= MLX5_TOTAL_VPORTS(esw
->dev
);
1291 struct mlx5_core_dev
*dev
= esw
->dev
;
1292 struct mlx5_eswitch_rep
*rep
;
1293 u8 hw_id
[ETH_ALEN
], rep_type
;
1296 esw
->offloads
.vport_reps
= kcalloc(total_vfs
,
1297 sizeof(struct mlx5_eswitch_rep
),
1299 if (!esw
->offloads
.vport_reps
)
1302 mlx5_query_nic_vport_mac_address(dev
, 0, hw_id
);
1304 mlx5_esw_for_all_reps(esw
, vport
, rep
) {
1305 rep
->vport
= mlx5_eswitch_index_to_vport_num(esw
, vport
);
1306 ether_addr_copy(rep
->hw_id
, hw_id
);
1308 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++)
1309 rep
->rep_if
[rep_type
].state
= REP_UNREGISTERED
;
1315 static void __esw_offloads_unload_rep(struct mlx5_eswitch
*esw
,
1316 struct mlx5_eswitch_rep
*rep
, u8 rep_type
)
1318 if (rep
->rep_if
[rep_type
].state
!= REP_LOADED
)
1321 rep
->rep_if
[rep_type
].unload(rep
);
1322 rep
->rep_if
[rep_type
].state
= REP_REGISTERED
;
1325 static void __unload_reps_special_vport(struct mlx5_eswitch
*esw
, u8 rep_type
)
1327 struct mlx5_eswitch_rep
*rep
;
1329 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
1330 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_ECPF
);
1331 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1334 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1335 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_PF
);
1336 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1339 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
1340 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1343 static void __unload_reps_vf_vport(struct mlx5_eswitch
*esw
, int nvports
,
1346 struct mlx5_eswitch_rep
*rep
;
1349 mlx5_esw_for_each_vf_rep_reverse(esw
, i
, rep
, nvports
)
1350 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1353 static void esw_offloads_unload_vf_reps(struct mlx5_eswitch
*esw
, int nvports
)
1355 u8 rep_type
= NUM_REP_TYPES
;
1357 while (rep_type
-- > 0)
1358 __unload_reps_vf_vport(esw
, nvports
, rep_type
);
1361 static void __unload_reps_all_vport(struct mlx5_eswitch
*esw
, int nvports
,
1364 __unload_reps_vf_vport(esw
, nvports
, rep_type
);
1366 /* Special vports must be the last to unload. */
1367 __unload_reps_special_vport(esw
, rep_type
);
1370 static void esw_offloads_unload_all_reps(struct mlx5_eswitch
*esw
, int nvports
)
1372 u8 rep_type
= NUM_REP_TYPES
;
1374 while (rep_type
-- > 0)
1375 __unload_reps_all_vport(esw
, nvports
, rep_type
);
1378 static int __esw_offloads_load_rep(struct mlx5_eswitch
*esw
,
1379 struct mlx5_eswitch_rep
*rep
, u8 rep_type
)
1383 if (rep
->rep_if
[rep_type
].state
!= REP_REGISTERED
)
1386 err
= rep
->rep_if
[rep_type
].load(esw
->dev
, rep
);
1390 rep
->rep_if
[rep_type
].state
= REP_LOADED
;
1395 static int __load_reps_special_vport(struct mlx5_eswitch
*esw
, u8 rep_type
)
1397 struct mlx5_eswitch_rep
*rep
;
1400 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
1401 err
= __esw_offloads_load_rep(esw
, rep
, rep_type
);
1405 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1406 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_PF
);
1407 err
= __esw_offloads_load_rep(esw
, rep
, rep_type
);
1412 if (mlx5_ecpf_vport_exists(esw
->dev
)) {
1413 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_ECPF
);
1414 err
= __esw_offloads_load_rep(esw
, rep
, rep_type
);
1422 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1423 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_PF
);
1424 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1428 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
1429 __esw_offloads_unload_rep(esw
, rep
, rep_type
);
1433 static int __load_reps_vf_vport(struct mlx5_eswitch
*esw
, int nvports
,
1436 struct mlx5_eswitch_rep
*rep
;
1439 mlx5_esw_for_each_vf_rep(esw
, i
, rep
, nvports
) {
1440 err
= __esw_offloads_load_rep(esw
, rep
, rep_type
);
1448 __unload_reps_vf_vport(esw
, --i
, rep_type
);
1452 static int esw_offloads_load_vf_reps(struct mlx5_eswitch
*esw
, int nvports
)
1457 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++) {
1458 err
= __load_reps_vf_vport(esw
, nvports
, rep_type
);
1466 while (rep_type
-- > 0)
1467 __unload_reps_vf_vport(esw
, nvports
, rep_type
);
1471 static int __load_reps_all_vport(struct mlx5_eswitch
*esw
, int nvports
,
1476 /* Special vports must be loaded first. */
1477 err
= __load_reps_special_vport(esw
, rep_type
);
1481 err
= __load_reps_vf_vport(esw
, nvports
, rep_type
);
1488 __unload_reps_special_vport(esw
, rep_type
);
1492 static int esw_offloads_load_all_reps(struct mlx5_eswitch
*esw
, int nvports
)
1497 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++) {
1498 err
= __load_reps_all_vport(esw
, nvports
, rep_type
);
1506 while (rep_type
-- > 0)
1507 __unload_reps_all_vport(esw
, nvports
, rep_type
);
1511 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1512 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1514 static int mlx5_esw_offloads_pair(struct mlx5_eswitch
*esw
,
1515 struct mlx5_eswitch
*peer_esw
)
1519 err
= esw_add_fdb_peer_miss_rules(esw
, peer_esw
->dev
);
1526 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch
*esw
);
1528 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch
*esw
)
1530 mlx5e_tc_clean_fdb_peer_flows(esw
);
1531 esw_del_fdb_peer_miss_rules(esw
);
1534 static int mlx5_esw_offloads_devcom_event(int event
,
1538 struct mlx5_eswitch
*esw
= my_data
;
1539 struct mlx5_eswitch
*peer_esw
= event_data
;
1540 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1544 case ESW_OFFLOADS_DEVCOM_PAIR
:
1545 err
= mlx5_esw_offloads_pair(esw
, peer_esw
);
1549 err
= mlx5_esw_offloads_pair(peer_esw
, esw
);
1553 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, true);
1556 case ESW_OFFLOADS_DEVCOM_UNPAIR
:
1557 if (!mlx5_devcom_is_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
))
1560 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, false);
1561 mlx5_esw_offloads_unpair(peer_esw
);
1562 mlx5_esw_offloads_unpair(esw
);
1569 mlx5_esw_offloads_unpair(esw
);
1572 mlx5_core_err(esw
->dev
, "esw offloads devcom event failure, event %u err %d",
1577 static void esw_offloads_devcom_init(struct mlx5_eswitch
*esw
)
1579 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1581 INIT_LIST_HEAD(&esw
->offloads
.peer_flows
);
1582 mutex_init(&esw
->offloads
.peer_mutex
);
1584 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1587 mlx5_devcom_register_component(devcom
,
1588 MLX5_DEVCOM_ESW_OFFLOADS
,
1589 mlx5_esw_offloads_devcom_event
,
1592 mlx5_devcom_send_event(devcom
,
1593 MLX5_DEVCOM_ESW_OFFLOADS
,
1594 ESW_OFFLOADS_DEVCOM_PAIR
, esw
);
1597 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch
*esw
)
1599 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1601 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1604 mlx5_devcom_send_event(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
,
1605 ESW_OFFLOADS_DEVCOM_UNPAIR
, esw
);
1607 mlx5_devcom_unregister_component(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
1610 static int esw_offloads_steering_init(struct mlx5_eswitch
*esw
, int nvports
)
1614 mutex_init(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
1616 err
= esw_create_offloads_fdb_tables(esw
, nvports
);
1620 err
= esw_create_offloads_table(esw
, nvports
);
1624 err
= esw_create_vport_rx_group(esw
, nvports
);
1631 esw_destroy_offloads_table(esw
);
1634 esw_destroy_offloads_fdb_tables(esw
);
1639 static void esw_offloads_steering_cleanup(struct mlx5_eswitch
*esw
)
1641 esw_destroy_vport_rx_group(esw
);
1642 esw_destroy_offloads_table(esw
);
1643 esw_destroy_offloads_fdb_tables(esw
);
1646 static void esw_host_params_event_handler(struct work_struct
*work
)
1648 struct mlx5_host_work
*host_work
;
1649 struct mlx5_eswitch
*esw
;
1650 int err
, num_vf
= 0;
1652 host_work
= container_of(work
, struct mlx5_host_work
, work
);
1653 esw
= host_work
->esw
;
1655 err
= mlx5_query_host_params_num_vfs(esw
->dev
, &num_vf
);
1656 if (err
|| num_vf
== esw
->host_info
.num_vfs
)
1659 /* Number of VFs can only change from "0 to x" or "x to 0". */
1660 if (esw
->host_info
.num_vfs
> 0) {
1661 esw_offloads_unload_vf_reps(esw
, esw
->host_info
.num_vfs
);
1663 err
= esw_offloads_load_vf_reps(esw
, num_vf
);
1669 esw
->host_info
.num_vfs
= num_vf
;
1675 static int esw_host_params_event(struct notifier_block
*nb
,
1676 unsigned long type
, void *data
)
1678 struct mlx5_host_work
*host_work
;
1679 struct mlx5_host_info
*host_info
;
1680 struct mlx5_eswitch
*esw
;
1682 host_work
= kzalloc(sizeof(*host_work
), GFP_ATOMIC
);
1686 host_info
= mlx5_nb_cof(nb
, struct mlx5_host_info
, nb
);
1687 esw
= container_of(host_info
, struct mlx5_eswitch
, host_info
);
1689 host_work
->esw
= esw
;
1691 INIT_WORK(&host_work
->work
, esw_host_params_event_handler
);
1692 queue_work(esw
->work_queue
, &host_work
->work
);
1697 int esw_offloads_init(struct mlx5_eswitch
*esw
, int vf_nvports
,
1702 mutex_init(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
1704 err
= esw_offloads_steering_init(esw
, total_nvports
);
1708 err
= esw_offloads_load_all_reps(esw
, vf_nvports
);
1712 esw_offloads_devcom_init(esw
);
1714 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1715 MLX5_NB_INIT(&esw
->host_info
.nb
, esw_host_params_event
,
1716 HOST_PARAMS_CHANGE
);
1717 mlx5_eq_notifier_register(esw
->dev
, &esw
->host_info
.nb
);
1718 esw
->host_info
.num_vfs
= vf_nvports
;
1724 esw_offloads_steering_cleanup(esw
);
1728 static int esw_offloads_stop(struct mlx5_eswitch
*esw
,
1729 struct netlink_ext_ack
*extack
)
1731 int err
, err1
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
1733 mlx5_eswitch_disable_sriov(esw
);
1734 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
1736 NL_SET_ERR_MSG_MOD(extack
, "Failed setting eswitch to legacy");
1737 err1
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
1739 NL_SET_ERR_MSG_MOD(extack
,
1740 "Failed setting eswitch back to offloads");
1747 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
)
1751 if (mlx5_core_is_ecpf_esw_manager(esw
->dev
)) {
1752 mlx5_eq_notifier_unregister(esw
->dev
, &esw
->host_info
.nb
);
1753 flush_workqueue(esw
->work_queue
);
1754 num_vfs
= esw
->host_info
.num_vfs
;
1756 num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
1759 esw_offloads_devcom_cleanup(esw
);
1760 esw_offloads_unload_all_reps(esw
, num_vfs
);
1761 esw_offloads_steering_cleanup(esw
);
1764 static int esw_mode_from_devlink(u16 mode
, u16
*mlx5_mode
)
1767 case DEVLINK_ESWITCH_MODE_LEGACY
:
1768 *mlx5_mode
= SRIOV_LEGACY
;
1770 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
1771 *mlx5_mode
= SRIOV_OFFLOADS
;
1780 static int esw_mode_to_devlink(u16 mlx5_mode
, u16
*mode
)
1782 switch (mlx5_mode
) {
1784 *mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
1786 case SRIOV_OFFLOADS
:
1787 *mode
= DEVLINK_ESWITCH_MODE_SWITCHDEV
;
1796 static int esw_inline_mode_from_devlink(u8 mode
, u8
*mlx5_mode
)
1799 case DEVLINK_ESWITCH_INLINE_MODE_NONE
:
1800 *mlx5_mode
= MLX5_INLINE_MODE_NONE
;
1802 case DEVLINK_ESWITCH_INLINE_MODE_LINK
:
1803 *mlx5_mode
= MLX5_INLINE_MODE_L2
;
1805 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK
:
1806 *mlx5_mode
= MLX5_INLINE_MODE_IP
;
1808 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
:
1809 *mlx5_mode
= MLX5_INLINE_MODE_TCP_UDP
;
1818 static int esw_inline_mode_to_devlink(u8 mlx5_mode
, u8
*mode
)
1820 switch (mlx5_mode
) {
1821 case MLX5_INLINE_MODE_NONE
:
1822 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NONE
;
1824 case MLX5_INLINE_MODE_L2
:
1825 *mode
= DEVLINK_ESWITCH_INLINE_MODE_LINK
;
1827 case MLX5_INLINE_MODE_IP
:
1828 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NETWORK
;
1830 case MLX5_INLINE_MODE_TCP_UDP
:
1831 *mode
= DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
;
1840 static int mlx5_devlink_eswitch_check(struct devlink
*devlink
)
1842 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1844 if (MLX5_CAP_GEN(dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1847 if(!MLX5_ESWITCH_MANAGER(dev
))
1850 if (dev
->priv
.eswitch
->mode
== SRIOV_NONE
&&
1851 !mlx5_core_is_ecpf_esw_manager(dev
))
1857 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
1858 struct netlink_ext_ack
*extack
)
1860 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1861 u16 cur_mlx5_mode
, mlx5_mode
= 0;
1864 err
= mlx5_devlink_eswitch_check(devlink
);
1868 cur_mlx5_mode
= dev
->priv
.eswitch
->mode
;
1870 if (esw_mode_from_devlink(mode
, &mlx5_mode
))
1873 if (cur_mlx5_mode
== mlx5_mode
)
1876 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
1877 return esw_offloads_start(dev
->priv
.eswitch
, extack
);
1878 else if (mode
== DEVLINK_ESWITCH_MODE_LEGACY
)
1879 return esw_offloads_stop(dev
->priv
.eswitch
, extack
);
1884 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
1886 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1889 err
= mlx5_devlink_eswitch_check(devlink
);
1893 return esw_mode_to_devlink(dev
->priv
.eswitch
->mode
, mode
);
1896 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
1897 struct netlink_ext_ack
*extack
)
1899 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1900 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
1904 err
= mlx5_devlink_eswitch_check(devlink
);
1908 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
1909 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
1910 if (mode
== DEVLINK_ESWITCH_INLINE_MODE_NONE
)
1913 case MLX5_CAP_INLINE_MODE_L2
:
1914 NL_SET_ERR_MSG_MOD(extack
, "Inline mode can't be set");
1916 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
1920 if (esw
->offloads
.num_flows
> 0) {
1921 NL_SET_ERR_MSG_MOD(extack
,
1922 "Can't set inline mode when flows are configured");
1926 err
= esw_inline_mode_from_devlink(mode
, &mlx5_mode
);
1930 for (vport
= 1; vport
< esw
->enabled_vports
; vport
++) {
1931 err
= mlx5_modify_nic_vport_min_inline(dev
, vport
, mlx5_mode
);
1933 NL_SET_ERR_MSG_MOD(extack
,
1934 "Failed to set min inline on vport");
1935 goto revert_inline_mode
;
1939 esw
->offloads
.inline_mode
= mlx5_mode
;
1944 mlx5_modify_nic_vport_min_inline(dev
,
1946 esw
->offloads
.inline_mode
);
1951 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
)
1953 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1954 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
1957 err
= mlx5_devlink_eswitch_check(devlink
);
1961 return esw_inline_mode_to_devlink(esw
->offloads
.inline_mode
, mode
);
1964 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch
*esw
, int nvfs
, u8
*mode
)
1966 u8 prev_mlx5_mode
, mlx5_mode
= MLX5_INLINE_MODE_L2
;
1967 struct mlx5_core_dev
*dev
= esw
->dev
;
1970 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
1973 if (esw
->mode
== SRIOV_NONE
)
1976 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
1977 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
1978 mlx5_mode
= MLX5_INLINE_MODE_NONE
;
1980 case MLX5_CAP_INLINE_MODE_L2
:
1981 mlx5_mode
= MLX5_INLINE_MODE_L2
;
1983 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
1988 for (vport
= 1; vport
<= nvfs
; vport
++) {
1989 mlx5_query_nic_vport_min_inline(dev
, vport
, &mlx5_mode
);
1990 if (vport
> 1 && prev_mlx5_mode
!= mlx5_mode
)
1992 prev_mlx5_mode
= mlx5_mode
;
2000 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
, u8 encap
,
2001 struct netlink_ext_ack
*extack
)
2003 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2004 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2007 err
= mlx5_devlink_eswitch_check(devlink
);
2011 if (encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
&&
2012 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, reformat
) ||
2013 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, decap
)))
2016 if (encap
&& encap
!= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
)
2019 if (esw
->mode
== SRIOV_LEGACY
) {
2020 esw
->offloads
.encap
= encap
;
2024 if (esw
->offloads
.encap
== encap
)
2027 if (esw
->offloads
.num_flows
> 0) {
2028 NL_SET_ERR_MSG_MOD(extack
,
2029 "Can't set encapsulation when flows are configured");
2033 esw_destroy_offloads_fdb_tables(esw
);
2035 esw
->offloads
.encap
= encap
;
2037 err
= esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
2040 NL_SET_ERR_MSG_MOD(extack
,
2041 "Failed re-creating fast FDB table");
2042 esw
->offloads
.encap
= !encap
;
2043 (void)esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
2049 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
, u8
*encap
)
2051 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
2052 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
2055 err
= mlx5_devlink_eswitch_check(devlink
);
2059 *encap
= esw
->offloads
.encap
;
2063 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch
*esw
,
2064 struct mlx5_eswitch_rep_if
*__rep_if
,
2067 struct mlx5_eswitch_rep_if
*rep_if
;
2068 struct mlx5_eswitch_rep
*rep
;
2071 mlx5_esw_for_all_reps(esw
, i
, rep
) {
2072 rep_if
= &rep
->rep_if
[rep_type
];
2073 rep_if
->load
= __rep_if
->load
;
2074 rep_if
->unload
= __rep_if
->unload
;
2075 rep_if
->get_proto_dev
= __rep_if
->get_proto_dev
;
2076 rep_if
->priv
= __rep_if
->priv
;
2078 rep_if
->state
= REP_REGISTERED
;
2081 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps
);
2083 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch
*esw
, u8 rep_type
)
2085 u16 max_vf
= mlx5_core_max_vfs(esw
->dev
);
2086 struct mlx5_eswitch_rep
*rep
;
2089 if (esw
->mode
== SRIOV_OFFLOADS
)
2090 __unload_reps_all_vport(esw
, max_vf
, rep_type
);
2092 mlx5_esw_for_all_reps(esw
, i
, rep
)
2093 rep
->rep_if
[rep_type
].state
= REP_UNREGISTERED
;
2095 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps
);
2097 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
)
2099 struct mlx5_eswitch_rep
*rep
;
2101 rep
= mlx5_eswitch_get_rep(esw
, MLX5_VPORT_UPLINK
);
2102 return rep
->rep_if
[rep_type
].priv
;
2105 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch
*esw
,
2109 struct mlx5_eswitch_rep
*rep
;
2111 rep
= mlx5_eswitch_get_rep(esw
, vport
);
2113 if (rep
->rep_if
[rep_type
].state
== REP_LOADED
&&
2114 rep
->rep_if
[rep_type
].get_proto_dev
)
2115 return rep
->rep_if
[rep_type
].get_proto_dev(rep
);
2118 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev
);
2120 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch
*esw
, u8 rep_type
)
2122 return mlx5_eswitch_get_proto_dev(esw
, MLX5_VPORT_UPLINK
, rep_type
);
2124 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev
);
2126 struct mlx5_eswitch_rep
*mlx5_eswitch_vport_rep(struct mlx5_eswitch
*esw
,
2129 return mlx5_eswitch_get_rep(esw
, vport
);
2131 EXPORT_SYMBOL(mlx5_eswitch_vport_rep
);