2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
42 #include "lib/devcom.h"
49 #define fdb_prio_table(esw, chain, prio, level) \
50 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
52 static struct mlx5_flow_table
*
53 esw_get_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
);
55 esw_put_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
);
57 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch
*esw
)
59 return (!!(esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
));
62 u32
mlx5_eswitch_get_chain_range(struct mlx5_eswitch
*esw
)
64 if (esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
)
70 u16
mlx5_eswitch_get_prio_range(struct mlx5_eswitch
*esw
)
72 if (esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
)
78 struct mlx5_flow_handle
*
79 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch
*esw
,
80 struct mlx5_flow_spec
*spec
,
81 struct mlx5_esw_flow_attr
*attr
)
83 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
84 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
85 bool split
= !!(attr
->split_count
);
86 struct mlx5_flow_handle
*rule
;
87 struct mlx5_flow_table
*fdb
;
91 if (esw
->mode
!= SRIOV_OFFLOADS
)
92 return ERR_PTR(-EOPNOTSUPP
);
94 flow_act
.action
= attr
->action
;
95 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
96 if (!mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
97 flow_act
.action
&= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
|
98 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
99 else if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
) {
100 flow_act
.vlan
[0].ethtype
= ntohs(attr
->vlan_proto
[0]);
101 flow_act
.vlan
[0].vid
= attr
->vlan_vid
[0];
102 flow_act
.vlan
[0].prio
= attr
->vlan_prio
[0];
103 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2
) {
104 flow_act
.vlan
[1].ethtype
= ntohs(attr
->vlan_proto
[1]);
105 flow_act
.vlan
[1].vid
= attr
->vlan_vid
[1];
106 flow_act
.vlan
[1].prio
= attr
->vlan_prio
[1];
110 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) {
111 if (attr
->dest_chain
) {
112 struct mlx5_flow_table
*ft
;
114 ft
= esw_get_prio_table(esw
, attr
->dest_chain
, 1, 0);
117 goto err_create_goto_table
;
120 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
124 for (j
= attr
->split_count
; j
< attr
->out_count
; j
++) {
125 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
126 dest
[i
].vport
.num
= attr
->dests
[j
].rep
->vport
;
127 dest
[i
].vport
.vhca_id
=
128 MLX5_CAP_GEN(attr
->dests
[j
].mdev
, vhca_id
);
129 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
130 dest
[i
].vport
.flags
|=
131 MLX5_FLOW_DEST_VPORT_VHCA_ID
;
132 if (attr
->dests
[j
].flags
& MLX5_ESW_DEST_ENCAP
) {
133 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
134 flow_act
.reformat_id
= attr
->dests
[j
].encap_id
;
135 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
136 dest
[i
].vport
.reformat_id
=
137 attr
->dests
[j
].encap_id
;
143 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
144 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
145 dest
[i
].counter_id
= mlx5_fc_id(attr
->counter
);
149 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
150 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
152 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
153 MLX5_SET(fte_match_set_misc
, misc
,
154 source_eswitch_owner_vhca_id
,
155 MLX5_CAP_GEN(attr
->in_mdev
, vhca_id
));
157 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
158 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
159 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
160 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
161 source_eswitch_owner_vhca_id
);
163 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
164 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_DECAP
) {
165 if (attr
->tunnel_match_level
!= MLX5_MATCH_NONE
)
166 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
167 if (attr
->match_level
!= MLX5_MATCH_NONE
)
168 spec
->match_criteria_enable
|= MLX5_MATCH_INNER_HEADERS
;
169 } else if (attr
->match_level
!= MLX5_MATCH_NONE
) {
170 spec
->match_criteria_enable
|= MLX5_MATCH_OUTER_HEADERS
;
173 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
174 flow_act
.modify_id
= attr
->mod_hdr_id
;
176 fdb
= esw_get_prio_table(esw
, attr
->chain
, attr
->prio
, !!split
);
178 rule
= ERR_CAST(fdb
);
182 rule
= mlx5_add_flow_rules(fdb
, spec
, &flow_act
, dest
, i
);
186 esw
->offloads
.num_flows
++;
191 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, !!split
);
193 if (attr
->dest_chain
)
194 esw_put_prio_table(esw
, attr
->dest_chain
, 1, 0);
195 err_create_goto_table
:
199 struct mlx5_flow_handle
*
200 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch
*esw
,
201 struct mlx5_flow_spec
*spec
,
202 struct mlx5_esw_flow_attr
*attr
)
204 struct mlx5_flow_destination dest
[MLX5_MAX_FLOW_FWD_VPORTS
+ 1] = {};
205 struct mlx5_flow_act flow_act
= { .flags
= FLOW_ACT_NO_APPEND
, };
206 struct mlx5_flow_table
*fast_fdb
;
207 struct mlx5_flow_table
*fwd_fdb
;
208 struct mlx5_flow_handle
*rule
;
212 fast_fdb
= esw_get_prio_table(esw
, attr
->chain
, attr
->prio
, 0);
213 if (IS_ERR(fast_fdb
)) {
214 rule
= ERR_CAST(fast_fdb
);
218 fwd_fdb
= esw_get_prio_table(esw
, attr
->chain
, attr
->prio
, 1);
219 if (IS_ERR(fwd_fdb
)) {
220 rule
= ERR_CAST(fwd_fdb
);
224 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
225 for (i
= 0; i
< attr
->split_count
; i
++) {
226 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
227 dest
[i
].vport
.num
= attr
->dests
[i
].rep
->vport
;
228 dest
[i
].vport
.vhca_id
=
229 MLX5_CAP_GEN(attr
->dests
[i
].mdev
, vhca_id
);
230 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
231 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
232 if (attr
->dests
[i
].flags
& MLX5_ESW_DEST_ENCAP
) {
233 dest
[i
].vport
.flags
|= MLX5_FLOW_DEST_VPORT_REFORMAT_ID
;
234 dest
[i
].vport
.reformat_id
= attr
->dests
[i
].encap_id
;
237 dest
[i
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
238 dest
[i
].ft
= fwd_fdb
,
241 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
242 MLX5_SET(fte_match_set_misc
, misc
, source_port
, attr
->in_rep
->vport
);
244 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
245 MLX5_SET(fte_match_set_misc
, misc
,
246 source_eswitch_owner_vhca_id
,
247 MLX5_CAP_GEN(attr
->in_mdev
, vhca_id
));
249 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
250 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
251 if (MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
252 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
253 source_eswitch_owner_vhca_id
);
255 if (attr
->match_level
== MLX5_MATCH_NONE
)
256 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
258 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
|
259 MLX5_MATCH_MISC_PARAMETERS
;
261 rule
= mlx5_add_flow_rules(fast_fdb
, spec
, &flow_act
, dest
, i
);
266 esw
->offloads
.num_flows
++;
270 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 1);
272 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 0);
278 __mlx5_eswitch_del_rule(struct mlx5_eswitch
*esw
,
279 struct mlx5_flow_handle
*rule
,
280 struct mlx5_esw_flow_attr
*attr
,
283 bool split
= (attr
->split_count
> 0);
285 mlx5_del_flow_rules(rule
);
286 esw
->offloads
.num_flows
--;
289 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 1);
290 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, 0);
292 esw_put_prio_table(esw
, attr
->chain
, attr
->prio
, !!split
);
293 if (attr
->dest_chain
)
294 esw_put_prio_table(esw
, attr
->dest_chain
, 1, 0);
299 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch
*esw
,
300 struct mlx5_flow_handle
*rule
,
301 struct mlx5_esw_flow_attr
*attr
)
303 __mlx5_eswitch_del_rule(esw
, rule
, attr
, false);
307 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch
*esw
,
308 struct mlx5_flow_handle
*rule
,
309 struct mlx5_esw_flow_attr
*attr
)
311 __mlx5_eswitch_del_rule(esw
, rule
, attr
, true);
314 static int esw_set_global_vlan_pop(struct mlx5_eswitch
*esw
, u8 val
)
316 struct mlx5_eswitch_rep
*rep
;
317 int vf_vport
, err
= 0;
319 esw_debug(esw
->dev
, "%s applying global %s policy\n", __func__
, val
? "pop" : "none");
320 for (vf_vport
= 1; vf_vport
< esw
->enabled_vports
; vf_vport
++) {
321 rep
= &esw
->offloads
.vport_reps
[vf_vport
];
322 if (!rep
->rep_if
[REP_ETH
].valid
)
325 err
= __mlx5_eswitch_set_vport_vlan(esw
, rep
->vport
, 0, 0, val
);
334 static struct mlx5_eswitch_rep
*
335 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr
*attr
, bool push
, bool pop
)
337 struct mlx5_eswitch_rep
*in_rep
, *out_rep
, *vport
= NULL
;
339 in_rep
= attr
->in_rep
;
340 out_rep
= attr
->dests
[0].rep
;
352 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr
*attr
,
353 bool push
, bool pop
, bool fwd
)
355 struct mlx5_eswitch_rep
*in_rep
, *out_rep
;
357 if ((push
|| pop
) && !fwd
)
360 in_rep
= attr
->in_rep
;
361 out_rep
= attr
->dests
[0].rep
;
363 if (push
&& in_rep
->vport
== FDB_UPLINK_VPORT
)
366 if (pop
&& out_rep
->vport
== FDB_UPLINK_VPORT
)
369 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
370 if (!push
&& !pop
&& fwd
)
371 if (in_rep
->vlan
&& out_rep
->vport
== FDB_UPLINK_VPORT
)
374 /* protects against (1) setting rules with different vlans to push and
375 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
377 if (push
&& in_rep
->vlan_refcount
&& (in_rep
->vlan
!= attr
->vlan_vid
[0]))
386 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch
*esw
,
387 struct mlx5_esw_flow_attr
*attr
)
389 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
390 struct mlx5_eswitch_rep
*vport
= NULL
;
394 /* nop if we're on the vlan push/pop non emulation mode */
395 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
398 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
399 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
400 fwd
= !!((attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
) &&
403 err
= esw_add_vlan_action_check(attr
, push
, pop
, fwd
);
407 attr
->vlan_handled
= false;
409 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
411 if (!push
&& !pop
&& fwd
) {
412 /* tracks VF --> wire rules without vlan push action */
413 if (attr
->dests
[0].rep
->vport
== FDB_UPLINK_VPORT
) {
414 vport
->vlan_refcount
++;
415 attr
->vlan_handled
= true;
424 if (!(offloads
->vlan_push_pop_refcount
)) {
425 /* it's the 1st vlan rule, apply global vlan pop policy */
426 err
= esw_set_global_vlan_pop(esw
, SET_VLAN_STRIP
);
430 offloads
->vlan_push_pop_refcount
++;
433 if (vport
->vlan_refcount
)
436 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
, attr
->vlan_vid
[0], 0,
437 SET_VLAN_INSERT
| SET_VLAN_STRIP
);
440 vport
->vlan
= attr
->vlan_vid
[0];
442 vport
->vlan_refcount
++;
446 attr
->vlan_handled
= true;
450 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch
*esw
,
451 struct mlx5_esw_flow_attr
*attr
)
453 struct offloads_fdb
*offloads
= &esw
->fdb_table
.offloads
;
454 struct mlx5_eswitch_rep
*vport
= NULL
;
458 /* nop if we're on the vlan push/pop non emulation mode */
459 if (mlx5_eswitch_vlan_actions_supported(esw
->dev
, 1))
462 if (!attr
->vlan_handled
)
465 push
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH
);
466 pop
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_VLAN_POP
);
467 fwd
= !!(attr
->action
& MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
);
469 vport
= esw_vlan_action_get_vport(attr
, push
, pop
);
471 if (!push
&& !pop
&& fwd
) {
472 /* tracks VF --> wire rules without vlan push action */
473 if (attr
->dests
[0].rep
->vport
== FDB_UPLINK_VPORT
)
474 vport
->vlan_refcount
--;
480 vport
->vlan_refcount
--;
481 if (vport
->vlan_refcount
)
482 goto skip_unset_push
;
485 err
= __mlx5_eswitch_set_vport_vlan(esw
, vport
->vport
,
486 0, 0, SET_VLAN_STRIP
);
492 offloads
->vlan_push_pop_refcount
--;
493 if (offloads
->vlan_push_pop_refcount
)
496 /* no more vlan rules, stop global vlan pop policy */
497 err
= esw_set_global_vlan_pop(esw
, 0);
503 struct mlx5_flow_handle
*
504 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch
*esw
, int vport
, u32 sqn
)
506 struct mlx5_flow_act flow_act
= {0};
507 struct mlx5_flow_destination dest
= {};
508 struct mlx5_flow_handle
*flow_rule
;
509 struct mlx5_flow_spec
*spec
;
512 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
514 flow_rule
= ERR_PTR(-ENOMEM
);
518 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
519 MLX5_SET(fte_match_set_misc
, misc
, source_sqn
, sqn
);
520 MLX5_SET(fte_match_set_misc
, misc
, source_port
, 0x0); /* source vport is 0 */
522 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
523 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_sqn
);
524 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
526 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
527 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
528 dest
.vport
.num
= vport
;
529 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
531 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
, spec
,
532 &flow_act
, &dest
, 1);
533 if (IS_ERR(flow_rule
))
534 esw_warn(esw
->dev
, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule
));
539 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule
);
541 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle
*rule
)
543 mlx5_del_flow_rules(rule
);
546 static void peer_miss_rules_setup(struct mlx5_core_dev
*peer_dev
,
547 struct mlx5_flow_spec
*spec
,
548 struct mlx5_flow_destination
*dest
)
550 void *misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
553 MLX5_SET(fte_match_set_misc
, misc
, source_eswitch_owner_vhca_id
,
554 MLX5_CAP_GEN(peer_dev
, vhca_id
));
556 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
558 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
560 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
561 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
,
562 source_eswitch_owner_vhca_id
);
564 dest
->type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
566 dest
->vport
.vhca_id
= MLX5_CAP_GEN(peer_dev
, vhca_id
);
567 dest
->vport
.flags
|= MLX5_FLOW_DEST_VPORT_VHCA_ID
;
570 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
,
571 struct mlx5_core_dev
*peer_dev
)
573 struct mlx5_flow_destination dest
= {};
574 struct mlx5_flow_act flow_act
= {0};
575 struct mlx5_flow_handle
**flows
;
576 struct mlx5_flow_handle
*flow
;
577 struct mlx5_flow_spec
*spec
;
578 /* total vports is the same for both e-switches */
579 int nvports
= esw
->total_vports
;
583 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
587 peer_miss_rules_setup(peer_dev
, spec
, &dest
);
589 flows
= kvzalloc(nvports
* sizeof(*flows
), GFP_KERNEL
);
592 goto alloc_flows_err
;
595 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
596 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
599 for (i
= 1; i
< nvports
; i
++) {
600 MLX5_SET(fte_match_set_misc
, misc
, source_port
, i
);
601 flow
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
,
602 spec
, &flow_act
, &dest
, 1);
605 esw_warn(esw
->dev
, "FDB: Failed to add peer miss flow rule err %d\n", err
);
611 esw
->fdb_table
.offloads
.peer_miss_rules
= flows
;
617 for (i
--; i
> 0; i
--)
618 mlx5_del_flow_rules(flows
[i
]);
625 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch
*esw
)
627 struct mlx5_flow_handle
**flows
;
630 flows
= esw
->fdb_table
.offloads
.peer_miss_rules
;
632 for (i
= 1; i
< esw
->total_vports
; i
++)
633 mlx5_del_flow_rules(flows
[i
]);
638 static int esw_add_fdb_miss_rule(struct mlx5_eswitch
*esw
)
640 struct mlx5_flow_act flow_act
= {0};
641 struct mlx5_flow_destination dest
= {};
642 struct mlx5_flow_handle
*flow_rule
= NULL
;
643 struct mlx5_flow_spec
*spec
;
650 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
656 spec
->match_criteria_enable
= MLX5_MATCH_OUTER_HEADERS
;
657 headers_c
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
659 dmac_c
= MLX5_ADDR_OF(fte_match_param
, headers_c
,
660 outer_headers
.dmac_47_16
);
663 dest
.type
= MLX5_FLOW_DESTINATION_TYPE_VPORT
;
665 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
667 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
, spec
,
668 &flow_act
, &dest
, 1);
669 if (IS_ERR(flow_rule
)) {
670 err
= PTR_ERR(flow_rule
);
671 esw_warn(esw
->dev
, "FDB: Failed to add unicast miss flow rule err %d\n", err
);
675 esw
->fdb_table
.offloads
.miss_rule_uni
= flow_rule
;
677 headers_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
679 dmac_v
= MLX5_ADDR_OF(fte_match_param
, headers_v
,
680 outer_headers
.dmac_47_16
);
682 flow_rule
= mlx5_add_flow_rules(esw
->fdb_table
.offloads
.slow_fdb
, spec
,
683 &flow_act
, &dest
, 1);
684 if (IS_ERR(flow_rule
)) {
685 err
= PTR_ERR(flow_rule
);
686 esw_warn(esw
->dev
, "FDB: Failed to add multicast miss flow rule err %d\n", err
);
687 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
691 esw
->fdb_table
.offloads
.miss_rule_multi
= flow_rule
;
698 #define ESW_OFFLOADS_NUM_GROUPS 4
700 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
701 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
702 * for each flow table pool. We can allocate up to 16M of each pool,
703 * and we keep track of how much we used via put/get_sz_to_pool.
704 * Firmware doesn't report any of this for now.
705 * ESW_POOL is expected to be sorted from large to small
707 #define ESW_SIZE (16 * 1024 * 1024)
708 const unsigned int ESW_POOLS
[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
709 64 * 1024, 4 * 1024 };
712 get_sz_from_pool(struct mlx5_eswitch
*esw
)
716 for (i
= 0; i
< ARRAY_SIZE(ESW_POOLS
); i
++) {
717 if (esw
->fdb_table
.offloads
.fdb_left
[i
]) {
718 --esw
->fdb_table
.offloads
.fdb_left
[i
];
728 put_sz_to_pool(struct mlx5_eswitch
*esw
, int sz
)
732 for (i
= 0; i
< ARRAY_SIZE(ESW_POOLS
); i
++) {
733 if (sz
>= ESW_POOLS
[i
]) {
734 ++esw
->fdb_table
.offloads
.fdb_left
[i
];
740 static struct mlx5_flow_table
*
741 create_next_size_table(struct mlx5_eswitch
*esw
,
742 struct mlx5_flow_namespace
*ns
,
747 struct mlx5_flow_table
*fdb
;
750 sz
= get_sz_from_pool(esw
);
752 return ERR_PTR(-ENOSPC
);
754 fdb
= mlx5_create_auto_grouped_flow_table(ns
,
757 ESW_OFFLOADS_NUM_GROUPS
,
761 esw_warn(esw
->dev
, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
762 (int)PTR_ERR(fdb
), table_prio
, level
, sz
);
763 put_sz_to_pool(esw
, sz
);
769 static struct mlx5_flow_table
*
770 esw_get_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
)
772 struct mlx5_core_dev
*dev
= esw
->dev
;
773 struct mlx5_flow_table
*fdb
= NULL
;
774 struct mlx5_flow_namespace
*ns
;
775 int table_prio
, l
= 0;
778 if (chain
== FDB_SLOW_PATH_CHAIN
)
779 return esw
->fdb_table
.offloads
.slow_fdb
;
781 mutex_lock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
783 fdb
= fdb_prio_table(esw
, chain
, prio
, level
).fdb
;
785 /* take ref on earlier levels as well */
787 fdb_prio_table(esw
, chain
, prio
, level
--).num_rules
++;
788 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
792 ns
= mlx5_get_fdb_sub_ns(dev
, chain
);
794 esw_warn(dev
, "Failed to get FDB sub namespace\n");
795 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
796 return ERR_PTR(-EOPNOTSUPP
);
799 if (esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
)
800 flags
|= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
|
801 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
);
803 table_prio
= (chain
* FDB_MAX_PRIO
) + prio
- 1;
805 /* create earlier levels for correct fs_core lookup when
808 for (l
= 0; l
<= level
; l
++) {
809 if (fdb_prio_table(esw
, chain
, prio
, l
).fdb
) {
810 fdb_prio_table(esw
, chain
, prio
, l
).num_rules
++;
814 fdb
= create_next_size_table(esw
, ns
, table_prio
, l
, flags
);
820 fdb_prio_table(esw
, chain
, prio
, l
).fdb
= fdb
;
821 fdb_prio_table(esw
, chain
, prio
, l
).num_rules
= 1;
824 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
828 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
830 esw_put_prio_table(esw
, chain
, prio
, l
);
836 esw_put_prio_table(struct mlx5_eswitch
*esw
, u32 chain
, u16 prio
, int level
)
840 if (chain
== FDB_SLOW_PATH_CHAIN
)
843 mutex_lock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
845 for (l
= level
; l
>= 0; l
--) {
846 if (--(fdb_prio_table(esw
, chain
, prio
, l
).num_rules
) > 0)
849 put_sz_to_pool(esw
, fdb_prio_table(esw
, chain
, prio
, l
).fdb
->max_fte
);
850 mlx5_destroy_flow_table(fdb_prio_table(esw
, chain
, prio
, l
).fdb
);
851 fdb_prio_table(esw
, chain
, prio
, l
).fdb
= NULL
;
854 mutex_unlock(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
857 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch
*esw
)
859 /* If lazy creation isn't supported, deref the fast path tables */
860 if (!(esw
->fdb_table
.flags
& ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
)) {
861 esw_put_prio_table(esw
, 0, 1, 1);
862 esw_put_prio_table(esw
, 0, 1, 0);
866 #define MAX_PF_SQ 256
867 #define MAX_SQ_NVPORTS 32
869 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch
*esw
, int nvports
)
871 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
872 struct mlx5_flow_table_attr ft_attr
= {};
873 struct mlx5_core_dev
*dev
= esw
->dev
;
874 u32
*flow_group_in
, max_flow_counter
;
875 struct mlx5_flow_namespace
*root_ns
;
876 struct mlx5_flow_table
*fdb
= NULL
;
877 int table_size
, ix
, err
= 0, i
;
878 struct mlx5_flow_group
*g
;
879 u32 flags
= 0, fdb_max
;
880 void *match_criteria
;
883 esw_debug(esw
->dev
, "Create offloads FDB Tables\n");
884 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
888 root_ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_FDB
);
890 esw_warn(dev
, "Failed to get FDB flow namespace\n");
895 max_flow_counter
= (MLX5_CAP_GEN(dev
, max_flow_counter_31_16
) << 16) |
896 MLX5_CAP_GEN(dev
, max_flow_counter_15_0
);
897 fdb_max
= 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
);
899 esw_debug(dev
, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
900 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, log_max_ft_size
),
901 max_flow_counter
, ESW_OFFLOADS_NUM_GROUPS
,
904 for (i
= 0; i
< ARRAY_SIZE(ESW_POOLS
); i
++)
905 esw
->fdb_table
.offloads
.fdb_left
[i
] =
906 ESW_POOLS
[i
] <= fdb_max
? ESW_SIZE
/ ESW_POOLS
[i
] : 0;
908 table_size
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
+ 2 +
911 /* create the slow path fdb with encap set, so further table instances
912 * can be created at run time while VFs are probed if the FW allows that.
914 if (esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
)
915 flags
|= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
|
916 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
);
918 ft_attr
.flags
= flags
;
919 ft_attr
.max_fte
= table_size
;
920 ft_attr
.prio
= FDB_SLOW_PATH
;
922 fdb
= mlx5_create_flow_table(root_ns
, &ft_attr
);
925 esw_warn(dev
, "Failed to create slow path FDB Table err %d\n", err
);
928 esw
->fdb_table
.offloads
.slow_fdb
= fdb
;
930 /* If lazy creation isn't supported, open the fast path tables now */
931 if (!MLX5_CAP_ESW_FLOWTABLE(esw
->dev
, multi_fdb_encap
) &&
932 esw
->offloads
.encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
) {
933 esw
->fdb_table
.flags
&= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
;
934 esw_warn(dev
, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
935 esw_get_prio_table(esw
, 0, 1, 0);
936 esw_get_prio_table(esw
, 0, 1, 1);
938 esw_debug(dev
, "Lazy creation of flow tables supported, deferring table opening\n");
939 esw
->fdb_table
.flags
|= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED
;
942 /* create send-to-vport group */
943 memset(flow_group_in
, 0, inlen
);
944 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
945 MLX5_MATCH_MISC_PARAMETERS
);
947 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
949 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_sqn
);
950 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
, misc_parameters
.source_port
);
952 ix
= nvports
* MAX_SQ_NVPORTS
+ MAX_PF_SQ
;
953 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
954 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
- 1);
956 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
959 esw_warn(dev
, "Failed to create send-to-vport flow group err(%d)\n", err
);
962 esw
->fdb_table
.offloads
.send_to_vport_grp
= g
;
964 /* create peer esw miss group */
965 memset(flow_group_in
, 0, inlen
);
966 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
967 MLX5_MATCH_MISC_PARAMETERS
);
969 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
972 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
973 misc_parameters
.source_port
);
974 MLX5_SET_TO_ONES(fte_match_param
, match_criteria
,
975 misc_parameters
.source_eswitch_owner_vhca_id
);
977 MLX5_SET(create_flow_group_in
, flow_group_in
,
978 source_eswitch_owner_vhca_id_valid
, 1);
979 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
980 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
,
981 ix
+ esw
->total_vports
- 1);
982 ix
+= esw
->total_vports
;
984 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
987 esw_warn(dev
, "Failed to create peer miss flow group err(%d)\n", err
);
990 esw
->fdb_table
.offloads
.peer_miss_grp
= g
;
992 /* create miss group */
993 memset(flow_group_in
, 0, inlen
);
994 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
995 MLX5_MATCH_OUTER_HEADERS
);
996 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
,
998 dmac
= MLX5_ADDR_OF(fte_match_param
, match_criteria
,
999 outer_headers
.dmac_47_16
);
1002 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, ix
);
1003 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, ix
+ 2);
1005 g
= mlx5_create_flow_group(fdb
, flow_group_in
);
1008 esw_warn(dev
, "Failed to create miss flow group err(%d)\n", err
);
1011 esw
->fdb_table
.offloads
.miss_grp
= g
;
1013 err
= esw_add_fdb_miss_rule(esw
);
1017 esw
->nvports
= nvports
;
1018 kvfree(flow_group_in
);
1022 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1024 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1026 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1028 esw_destroy_offloads_fast_fdb_tables(esw
);
1029 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1032 kvfree(flow_group_in
);
1036 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch
*esw
)
1038 if (!esw
->fdb_table
.offloads
.slow_fdb
)
1041 esw_debug(esw
->dev
, "Destroy offloads FDB Tables\n");
1042 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_multi
);
1043 mlx5_del_flow_rules(esw
->fdb_table
.offloads
.miss_rule_uni
);
1044 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.send_to_vport_grp
);
1045 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.peer_miss_grp
);
1046 mlx5_destroy_flow_group(esw
->fdb_table
.offloads
.miss_grp
);
1048 mlx5_destroy_flow_table(esw
->fdb_table
.offloads
.slow_fdb
);
1049 esw_destroy_offloads_fast_fdb_tables(esw
);
1052 static int esw_create_offloads_table(struct mlx5_eswitch
*esw
)
1054 struct mlx5_flow_table_attr ft_attr
= {};
1055 struct mlx5_core_dev
*dev
= esw
->dev
;
1056 struct mlx5_flow_table
*ft_offloads
;
1057 struct mlx5_flow_namespace
*ns
;
1060 ns
= mlx5_get_flow_namespace(dev
, MLX5_FLOW_NAMESPACE_OFFLOADS
);
1062 esw_warn(esw
->dev
, "Failed to get offloads flow namespace\n");
1066 ft_attr
.max_fte
= dev
->priv
.sriov
.num_vfs
+ 2;
1068 ft_offloads
= mlx5_create_flow_table(ns
, &ft_attr
);
1069 if (IS_ERR(ft_offloads
)) {
1070 err
= PTR_ERR(ft_offloads
);
1071 esw_warn(esw
->dev
, "Failed to create offloads table, err %d\n", err
);
1075 esw
->offloads
.ft_offloads
= ft_offloads
;
1079 static void esw_destroy_offloads_table(struct mlx5_eswitch
*esw
)
1081 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1083 mlx5_destroy_flow_table(offloads
->ft_offloads
);
1086 static int esw_create_vport_rx_group(struct mlx5_eswitch
*esw
)
1088 int inlen
= MLX5_ST_SZ_BYTES(create_flow_group_in
);
1089 struct mlx5_flow_group
*g
;
1090 struct mlx5_priv
*priv
= &esw
->dev
->priv
;
1092 void *match_criteria
, *misc
;
1094 int nvports
= priv
->sriov
.num_vfs
+ 2;
1096 flow_group_in
= kvzalloc(inlen
, GFP_KERNEL
);
1100 /* create vport rx group */
1101 memset(flow_group_in
, 0, inlen
);
1102 MLX5_SET(create_flow_group_in
, flow_group_in
, match_criteria_enable
,
1103 MLX5_MATCH_MISC_PARAMETERS
);
1105 match_criteria
= MLX5_ADDR_OF(create_flow_group_in
, flow_group_in
, match_criteria
);
1106 misc
= MLX5_ADDR_OF(fte_match_param
, match_criteria
, misc_parameters
);
1107 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
1109 MLX5_SET(create_flow_group_in
, flow_group_in
, start_flow_index
, 0);
1110 MLX5_SET(create_flow_group_in
, flow_group_in
, end_flow_index
, nvports
- 1);
1112 g
= mlx5_create_flow_group(esw
->offloads
.ft_offloads
, flow_group_in
);
1116 mlx5_core_warn(esw
->dev
, "Failed to create vport rx group err %d\n", err
);
1120 esw
->offloads
.vport_rx_group
= g
;
1122 kvfree(flow_group_in
);
1126 static void esw_destroy_vport_rx_group(struct mlx5_eswitch
*esw
)
1128 mlx5_destroy_flow_group(esw
->offloads
.vport_rx_group
);
1131 struct mlx5_flow_handle
*
1132 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch
*esw
, int vport
,
1133 struct mlx5_flow_destination
*dest
)
1135 struct mlx5_flow_act flow_act
= {0};
1136 struct mlx5_flow_handle
*flow_rule
;
1137 struct mlx5_flow_spec
*spec
;
1140 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1142 flow_rule
= ERR_PTR(-ENOMEM
);
1146 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
, misc_parameters
);
1147 MLX5_SET(fte_match_set_misc
, misc
, source_port
, vport
);
1149 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
, misc_parameters
);
1150 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
1152 spec
->match_criteria_enable
= MLX5_MATCH_MISC_PARAMETERS
;
1154 flow_act
.action
= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1155 flow_rule
= mlx5_add_flow_rules(esw
->offloads
.ft_offloads
, spec
,
1156 &flow_act
, dest
, 1);
1157 if (IS_ERR(flow_rule
)) {
1158 esw_warn(esw
->dev
, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule
));
1167 static int esw_offloads_start(struct mlx5_eswitch
*esw
,
1168 struct netlink_ext_ack
*extack
)
1170 int err
, err1
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
1172 if (esw
->mode
!= SRIOV_LEGACY
) {
1173 NL_SET_ERR_MSG_MOD(extack
,
1174 "Can't set offloads mode, SRIOV legacy not enabled");
1178 mlx5_eswitch_disable_sriov(esw
);
1179 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
1181 NL_SET_ERR_MSG_MOD(extack
,
1182 "Failed setting eswitch to offloads");
1183 err1
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
1185 NL_SET_ERR_MSG_MOD(extack
,
1186 "Failed setting eswitch back to legacy");
1189 if (esw
->offloads
.inline_mode
== MLX5_INLINE_MODE_NONE
) {
1190 if (mlx5_eswitch_inline_mode_get(esw
,
1192 &esw
->offloads
.inline_mode
)) {
1193 esw
->offloads
.inline_mode
= MLX5_INLINE_MODE_L2
;
1194 NL_SET_ERR_MSG_MOD(extack
,
1195 "Inline mode is different between vports");
1201 void esw_offloads_cleanup_reps(struct mlx5_eswitch
*esw
)
1203 kfree(esw
->offloads
.vport_reps
);
1206 int esw_offloads_init_reps(struct mlx5_eswitch
*esw
)
1208 int total_vfs
= MLX5_TOTAL_VPORTS(esw
->dev
);
1209 struct mlx5_core_dev
*dev
= esw
->dev
;
1210 struct mlx5_esw_offload
*offloads
;
1211 struct mlx5_eswitch_rep
*rep
;
1215 esw
->offloads
.vport_reps
= kcalloc(total_vfs
,
1216 sizeof(struct mlx5_eswitch_rep
),
1218 if (!esw
->offloads
.vport_reps
)
1221 offloads
= &esw
->offloads
;
1222 mlx5_query_nic_vport_mac_address(dev
, 0, hw_id
);
1224 for (vport
= 0; vport
< total_vfs
; vport
++) {
1225 rep
= &offloads
->vport_reps
[vport
];
1228 ether_addr_copy(rep
->hw_id
, hw_id
);
1231 offloads
->vport_reps
[0].vport
= FDB_UPLINK_VPORT
;
1236 static void esw_offloads_unload_reps_type(struct mlx5_eswitch
*esw
, int nvports
,
1239 struct mlx5_eswitch_rep
*rep
;
1242 for (vport
= nvports
- 1; vport
>= 0; vport
--) {
1243 rep
= &esw
->offloads
.vport_reps
[vport
];
1244 if (!rep
->rep_if
[rep_type
].valid
)
1247 rep
->rep_if
[rep_type
].unload(rep
);
1251 static void esw_offloads_unload_reps(struct mlx5_eswitch
*esw
, int nvports
)
1253 u8 rep_type
= NUM_REP_TYPES
;
1255 while (rep_type
-- > 0)
1256 esw_offloads_unload_reps_type(esw
, nvports
, rep_type
);
1259 static int esw_offloads_load_reps_type(struct mlx5_eswitch
*esw
, int nvports
,
1262 struct mlx5_eswitch_rep
*rep
;
1266 for (vport
= 0; vport
< nvports
; vport
++) {
1267 rep
= &esw
->offloads
.vport_reps
[vport
];
1268 if (!rep
->rep_if
[rep_type
].valid
)
1271 err
= rep
->rep_if
[rep_type
].load(esw
->dev
, rep
);
1279 esw_offloads_unload_reps_type(esw
, vport
, rep_type
);
1283 static int esw_offloads_load_reps(struct mlx5_eswitch
*esw
, int nvports
)
1288 for (rep_type
= 0; rep_type
< NUM_REP_TYPES
; rep_type
++) {
1289 err
= esw_offloads_load_reps_type(esw
, nvports
, rep_type
);
1297 while (rep_type
-- > 0)
1298 esw_offloads_unload_reps_type(esw
, nvports
, rep_type
);
1302 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1303 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1305 static int mlx5_esw_offloads_pair(struct mlx5_eswitch
*esw
,
1306 struct mlx5_eswitch
*peer_esw
)
1310 err
= esw_add_fdb_peer_miss_rules(esw
, peer_esw
->dev
);
1317 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch
*esw
);
1319 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch
*esw
)
1321 mlx5e_tc_clean_fdb_peer_flows(esw
);
1322 esw_del_fdb_peer_miss_rules(esw
);
1325 static int mlx5_esw_offloads_devcom_event(int event
,
1329 struct mlx5_eswitch
*esw
= my_data
;
1330 struct mlx5_eswitch
*peer_esw
= event_data
;
1331 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1335 case ESW_OFFLOADS_DEVCOM_PAIR
:
1336 err
= mlx5_esw_offloads_pair(esw
, peer_esw
);
1340 err
= mlx5_esw_offloads_pair(peer_esw
, esw
);
1344 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, true);
1347 case ESW_OFFLOADS_DEVCOM_UNPAIR
:
1348 if (!mlx5_devcom_is_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
))
1351 mlx5_devcom_set_paired(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
, false);
1352 mlx5_esw_offloads_unpair(peer_esw
);
1353 mlx5_esw_offloads_unpair(esw
);
1360 mlx5_esw_offloads_unpair(esw
);
1363 mlx5_core_err(esw
->dev
, "esw offloads devcom event failure, event %u err %d",
1368 static void esw_offloads_devcom_init(struct mlx5_eswitch
*esw
)
1370 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1372 INIT_LIST_HEAD(&esw
->offloads
.peer_flows
);
1373 mutex_init(&esw
->offloads
.peer_mutex
);
1375 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1378 mlx5_devcom_register_component(devcom
,
1379 MLX5_DEVCOM_ESW_OFFLOADS
,
1380 mlx5_esw_offloads_devcom_event
,
1383 mlx5_devcom_send_event(devcom
,
1384 MLX5_DEVCOM_ESW_OFFLOADS
,
1385 ESW_OFFLOADS_DEVCOM_PAIR
, esw
);
1388 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch
*esw
)
1390 struct mlx5_devcom
*devcom
= esw
->dev
->priv
.devcom
;
1392 if (!MLX5_CAP_ESW(esw
->dev
, merged_eswitch
))
1395 mlx5_devcom_send_event(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
,
1396 ESW_OFFLOADS_DEVCOM_UNPAIR
, esw
);
1398 mlx5_devcom_unregister_component(devcom
, MLX5_DEVCOM_ESW_OFFLOADS
);
1401 int esw_offloads_init(struct mlx5_eswitch
*esw
, int nvports
)
1405 mutex_init(&esw
->fdb_table
.offloads
.fdb_prio_lock
);
1407 err
= esw_create_offloads_fdb_tables(esw
, nvports
);
1411 err
= esw_create_offloads_table(esw
);
1415 err
= esw_create_vport_rx_group(esw
);
1419 err
= esw_offloads_load_reps(esw
, nvports
);
1423 esw_offloads_devcom_init(esw
);
1427 esw_destroy_vport_rx_group(esw
);
1430 esw_destroy_offloads_table(esw
);
1433 esw_destroy_offloads_fdb_tables(esw
);
1438 static int esw_offloads_stop(struct mlx5_eswitch
*esw
,
1439 struct netlink_ext_ack
*extack
)
1441 int err
, err1
, num_vfs
= esw
->dev
->priv
.sriov
.num_vfs
;
1443 mlx5_eswitch_disable_sriov(esw
);
1444 err
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_LEGACY
);
1446 NL_SET_ERR_MSG_MOD(extack
, "Failed setting eswitch to legacy");
1447 err1
= mlx5_eswitch_enable_sriov(esw
, num_vfs
, SRIOV_OFFLOADS
);
1449 NL_SET_ERR_MSG_MOD(extack
,
1450 "Failed setting eswitch back to offloads");
1457 void esw_offloads_cleanup(struct mlx5_eswitch
*esw
, int nvports
)
1459 esw_offloads_devcom_cleanup(esw
);
1460 esw_offloads_unload_reps(esw
, nvports
);
1461 esw_destroy_vport_rx_group(esw
);
1462 esw_destroy_offloads_table(esw
);
1463 esw_destroy_offloads_fdb_tables(esw
);
1466 static int esw_mode_from_devlink(u16 mode
, u16
*mlx5_mode
)
1469 case DEVLINK_ESWITCH_MODE_LEGACY
:
1470 *mlx5_mode
= SRIOV_LEGACY
;
1472 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
1473 *mlx5_mode
= SRIOV_OFFLOADS
;
1482 static int esw_mode_to_devlink(u16 mlx5_mode
, u16
*mode
)
1484 switch (mlx5_mode
) {
1486 *mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
1488 case SRIOV_OFFLOADS
:
1489 *mode
= DEVLINK_ESWITCH_MODE_SWITCHDEV
;
1498 static int esw_inline_mode_from_devlink(u8 mode
, u8
*mlx5_mode
)
1501 case DEVLINK_ESWITCH_INLINE_MODE_NONE
:
1502 *mlx5_mode
= MLX5_INLINE_MODE_NONE
;
1504 case DEVLINK_ESWITCH_INLINE_MODE_LINK
:
1505 *mlx5_mode
= MLX5_INLINE_MODE_L2
;
1507 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK
:
1508 *mlx5_mode
= MLX5_INLINE_MODE_IP
;
1510 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
:
1511 *mlx5_mode
= MLX5_INLINE_MODE_TCP_UDP
;
1520 static int esw_inline_mode_to_devlink(u8 mlx5_mode
, u8
*mode
)
1522 switch (mlx5_mode
) {
1523 case MLX5_INLINE_MODE_NONE
:
1524 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NONE
;
1526 case MLX5_INLINE_MODE_L2
:
1527 *mode
= DEVLINK_ESWITCH_INLINE_MODE_LINK
;
1529 case MLX5_INLINE_MODE_IP
:
1530 *mode
= DEVLINK_ESWITCH_INLINE_MODE_NETWORK
;
1532 case MLX5_INLINE_MODE_TCP_UDP
:
1533 *mode
= DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT
;
1542 static int mlx5_devlink_eswitch_check(struct devlink
*devlink
)
1544 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1546 if (MLX5_CAP_GEN(dev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1549 if(!MLX5_ESWITCH_MANAGER(dev
))
1552 if (dev
->priv
.eswitch
->mode
== SRIOV_NONE
)
1558 int mlx5_devlink_eswitch_mode_set(struct devlink
*devlink
, u16 mode
,
1559 struct netlink_ext_ack
*extack
)
1561 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1562 u16 cur_mlx5_mode
, mlx5_mode
= 0;
1565 err
= mlx5_devlink_eswitch_check(devlink
);
1569 cur_mlx5_mode
= dev
->priv
.eswitch
->mode
;
1571 if (esw_mode_from_devlink(mode
, &mlx5_mode
))
1574 if (cur_mlx5_mode
== mlx5_mode
)
1577 if (mode
== DEVLINK_ESWITCH_MODE_SWITCHDEV
)
1578 return esw_offloads_start(dev
->priv
.eswitch
, extack
);
1579 else if (mode
== DEVLINK_ESWITCH_MODE_LEGACY
)
1580 return esw_offloads_stop(dev
->priv
.eswitch
, extack
);
1585 int mlx5_devlink_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
1587 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1590 err
= mlx5_devlink_eswitch_check(devlink
);
1594 return esw_mode_to_devlink(dev
->priv
.eswitch
->mode
, mode
);
1597 int mlx5_devlink_eswitch_inline_mode_set(struct devlink
*devlink
, u8 mode
,
1598 struct netlink_ext_ack
*extack
)
1600 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1601 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
1605 err
= mlx5_devlink_eswitch_check(devlink
);
1609 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
1610 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
1611 if (mode
== DEVLINK_ESWITCH_INLINE_MODE_NONE
)
1614 case MLX5_CAP_INLINE_MODE_L2
:
1615 NL_SET_ERR_MSG_MOD(extack
, "Inline mode can't be set");
1617 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
1621 if (esw
->offloads
.num_flows
> 0) {
1622 NL_SET_ERR_MSG_MOD(extack
,
1623 "Can't set inline mode when flows are configured");
1627 err
= esw_inline_mode_from_devlink(mode
, &mlx5_mode
);
1631 for (vport
= 1; vport
< esw
->enabled_vports
; vport
++) {
1632 err
= mlx5_modify_nic_vport_min_inline(dev
, vport
, mlx5_mode
);
1634 NL_SET_ERR_MSG_MOD(extack
,
1635 "Failed to set min inline on vport");
1636 goto revert_inline_mode
;
1640 esw
->offloads
.inline_mode
= mlx5_mode
;
1645 mlx5_modify_nic_vport_min_inline(dev
,
1647 esw
->offloads
.inline_mode
);
1652 int mlx5_devlink_eswitch_inline_mode_get(struct devlink
*devlink
, u8
*mode
)
1654 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1655 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
1658 err
= mlx5_devlink_eswitch_check(devlink
);
1662 return esw_inline_mode_to_devlink(esw
->offloads
.inline_mode
, mode
);
1665 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch
*esw
, int nvfs
, u8
*mode
)
1667 u8 prev_mlx5_mode
, mlx5_mode
= MLX5_INLINE_MODE_L2
;
1668 struct mlx5_core_dev
*dev
= esw
->dev
;
1671 if (!MLX5_CAP_GEN(dev
, vport_group_manager
))
1674 if (esw
->mode
== SRIOV_NONE
)
1677 switch (MLX5_CAP_ETH(dev
, wqe_inline_mode
)) {
1678 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED
:
1679 mlx5_mode
= MLX5_INLINE_MODE_NONE
;
1681 case MLX5_CAP_INLINE_MODE_L2
:
1682 mlx5_mode
= MLX5_INLINE_MODE_L2
;
1684 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT
:
1689 for (vport
= 1; vport
<= nvfs
; vport
++) {
1690 mlx5_query_nic_vport_min_inline(dev
, vport
, &mlx5_mode
);
1691 if (vport
> 1 && prev_mlx5_mode
!= mlx5_mode
)
1693 prev_mlx5_mode
= mlx5_mode
;
1701 int mlx5_devlink_eswitch_encap_mode_set(struct devlink
*devlink
, u8 encap
,
1702 struct netlink_ext_ack
*extack
)
1704 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1705 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
1708 err
= mlx5_devlink_eswitch_check(devlink
);
1712 if (encap
!= DEVLINK_ESWITCH_ENCAP_MODE_NONE
&&
1713 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, reformat
) ||
1714 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev
, decap
)))
1717 if (encap
&& encap
!= DEVLINK_ESWITCH_ENCAP_MODE_BASIC
)
1720 if (esw
->mode
== SRIOV_LEGACY
) {
1721 esw
->offloads
.encap
= encap
;
1725 if (esw
->offloads
.encap
== encap
)
1728 if (esw
->offloads
.num_flows
> 0) {
1729 NL_SET_ERR_MSG_MOD(extack
,
1730 "Can't set encapsulation when flows are configured");
1734 esw_destroy_offloads_fdb_tables(esw
);
1736 esw
->offloads
.encap
= encap
;
1738 err
= esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
1741 NL_SET_ERR_MSG_MOD(extack
,
1742 "Failed re-creating fast FDB table");
1743 esw
->offloads
.encap
= !encap
;
1744 (void)esw_create_offloads_fdb_tables(esw
, esw
->nvports
);
1750 int mlx5_devlink_eswitch_encap_mode_get(struct devlink
*devlink
, u8
*encap
)
1752 struct mlx5_core_dev
*dev
= devlink_priv(devlink
);
1753 struct mlx5_eswitch
*esw
= dev
->priv
.eswitch
;
1756 err
= mlx5_devlink_eswitch_check(devlink
);
1760 *encap
= esw
->offloads
.encap
;
1764 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch
*esw
,
1766 struct mlx5_eswitch_rep_if
*__rep_if
,
1769 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1770 struct mlx5_eswitch_rep_if
*rep_if
;
1772 rep_if
= &offloads
->vport_reps
[vport_index
].rep_if
[rep_type
];
1774 rep_if
->load
= __rep_if
->load
;
1775 rep_if
->unload
= __rep_if
->unload
;
1776 rep_if
->get_proto_dev
= __rep_if
->get_proto_dev
;
1777 rep_if
->priv
= __rep_if
->priv
;
1779 rep_if
->valid
= true;
1781 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep
);
1783 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch
*esw
,
1784 int vport_index
, u8 rep_type
)
1786 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1787 struct mlx5_eswitch_rep
*rep
;
1789 rep
= &offloads
->vport_reps
[vport_index
];
1791 if (esw
->mode
== SRIOV_OFFLOADS
&& esw
->vports
[vport_index
].enabled
)
1792 rep
->rep_if
[rep_type
].unload(rep
);
1794 rep
->rep_if
[rep_type
].valid
= false;
1796 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep
);
1798 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch
*esw
, u8 rep_type
)
1800 #define UPLINK_REP_INDEX 0
1801 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1802 struct mlx5_eswitch_rep
*rep
;
1804 rep
= &offloads
->vport_reps
[UPLINK_REP_INDEX
];
1805 return rep
->rep_if
[rep_type
].priv
;
1808 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch
*esw
,
1812 struct mlx5_esw_offload
*offloads
= &esw
->offloads
;
1813 struct mlx5_eswitch_rep
*rep
;
1815 if (vport
== FDB_UPLINK_VPORT
)
1816 vport
= UPLINK_REP_INDEX
;
1818 rep
= &offloads
->vport_reps
[vport
];
1820 if (rep
->rep_if
[rep_type
].valid
&&
1821 rep
->rep_if
[rep_type
].get_proto_dev
)
1822 return rep
->rep_if
[rep_type
].get_proto_dev(rep
);
1825 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev
);
1827 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch
*esw
, u8 rep_type
)
1829 return mlx5_eswitch_get_proto_dev(esw
, UPLINK_REP_INDEX
, rep_type
);
1831 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev
);
1833 struct mlx5_eswitch_rep
*mlx5_eswitch_vport_rep(struct mlx5_eswitch
*esw
,
1836 return &esw
->offloads
.vport_reps
[vport
];
1838 EXPORT_SYMBOL(mlx5_eswitch_vport_rep
);