]>
Commit | Line | Data |
---|---|---|
69697b6e OG |
1 | /* |
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/etherdevice.h> | |
34 | #include <linux/mlx5/driver.h> | |
35 | #include <linux/mlx5/mlx5_ifc.h> | |
36 | #include <linux/mlx5/vport.h> | |
37 | #include <linux/mlx5/fs.h> | |
38 | #include "mlx5_core.h" | |
39 | #include "eswitch.h" | |
e52c2802 PB |
40 | #include "en.h" |
41 | #include "fs_core.h" | |
69697b6e | 42 | |
1033665e OG |
43 | enum { |
44 | FDB_FAST_PATH = 0, | |
45 | FDB_SLOW_PATH | |
46 | }; | |
47 | ||
e52c2802 PB |
48 | #define fdb_prio_table(esw, chain, prio, level) \ |
49 | (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] | |
50 | ||
51 | static struct mlx5_flow_table * | |
52 | esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); | |
53 | static void | |
54 | esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); | |
55 | ||
56 | bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw) | |
57 | { | |
58 | return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)); | |
59 | } | |
60 | ||
61 | u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw) | |
62 | { | |
63 | if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) | |
64 | return FDB_MAX_CHAIN; | |
65 | ||
66 | return 0; | |
67 | } | |
68 | ||
69 | u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) | |
70 | { | |
71 | if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED) | |
72 | return FDB_MAX_PRIO; | |
73 | ||
74 | return U16_MAX; | |
75 | } | |
76 | ||
74491de9 | 77 | struct mlx5_flow_handle * |
3d80d1a2 OG |
78 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
79 | struct mlx5_flow_spec *spec, | |
776b12b6 | 80 | struct mlx5_esw_flow_attr *attr) |
3d80d1a2 | 81 | { |
592d3651 | 82 | struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; |
e52c2802 | 83 | bool mirror = !!(attr->mirror_count); |
66958ed9 | 84 | struct mlx5_flow_act flow_act = {0}; |
74491de9 | 85 | struct mlx5_flow_handle *rule; |
e52c2802 | 86 | struct mlx5_flow_table *fdb; |
592d3651 | 87 | int j, i = 0; |
3d80d1a2 OG |
88 | void *misc; |
89 | ||
90 | if (esw->mode != SRIOV_OFFLOADS) | |
91 | return ERR_PTR(-EOPNOTSUPP); | |
92 | ||
6acfbf38 OG |
93 | flow_act.action = attr->action; |
94 | /* if per flow vlan pop/push is emulated, don't set that into the firmware */ | |
cc495188 | 95 | if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) |
6acfbf38 OG |
96 | flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | |
97 | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | |
98 | else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { | |
1482bd3d JL |
99 | flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); |
100 | flow_act.vlan[0].vid = attr->vlan_vid[0]; | |
101 | flow_act.vlan[0].prio = attr->vlan_prio[0]; | |
cc495188 JL |
102 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { |
103 | flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]); | |
104 | flow_act.vlan[1].vid = attr->vlan_vid[1]; | |
105 | flow_act.vlan[1].prio = attr->vlan_prio[1]; | |
106 | } | |
6acfbf38 | 107 | } |
776b12b6 | 108 | |
66958ed9 | 109 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { |
e52c2802 PB |
110 | if (attr->dest_chain) { |
111 | struct mlx5_flow_table *ft; | |
112 | ||
113 | ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0); | |
114 | if (IS_ERR(ft)) { | |
115 | rule = ERR_CAST(ft); | |
116 | goto err_create_goto_table; | |
117 | } | |
118 | ||
119 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; | |
120 | dest[i].ft = ft; | |
592d3651 | 121 | i++; |
e52c2802 PB |
122 | } else { |
123 | for (j = attr->mirror_count; j < attr->out_count; j++) { | |
124 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | |
125 | dest[i].vport.num = attr->out_rep[j]->vport; | |
126 | dest[i].vport.vhca_id = | |
127 | MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); | |
128 | dest[i].vport.vhca_id_valid = | |
129 | !!MLX5_CAP_ESW(esw->dev, merged_eswitch); | |
130 | i++; | |
131 | } | |
56e858df | 132 | } |
e37a79e5 | 133 | } |
66958ed9 | 134 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { |
e37a79e5 | 135 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; |
171c7625 | 136 | dest[i].counter_id = mlx5_fc_id(attr->counter); |
e37a79e5 | 137 | i++; |
3d80d1a2 OG |
138 | } |
139 | ||
140 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); | |
776b12b6 | 141 | MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); |
3d80d1a2 | 142 | |
10ff5359 SK |
143 | if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
144 | MLX5_SET(fte_match_set_misc, misc, | |
145 | source_eswitch_owner_vhca_id, | |
146 | MLX5_CAP_GEN(attr->in_mdev, vhca_id)); | |
147 | ||
3d80d1a2 OG |
148 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
149 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
10ff5359 SK |
150 | if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
151 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, | |
152 | source_eswitch_owner_vhca_id); | |
3d80d1a2 | 153 | |
38aa51c1 OG |
154 | if (attr->match_level == MLX5_MATCH_NONE) |
155 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; | |
156 | else | |
157 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | | |
158 | MLX5_MATCH_MISC_PARAMETERS; | |
159 | ||
bbd00f7e HHZ |
160 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
161 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | |
3d80d1a2 | 162 | |
aa24670e | 163 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
d7e75a32 OG |
164 | flow_act.modify_id = attr->mod_hdr_id; |
165 | ||
60786f09 MB |
166 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) |
167 | flow_act.reformat_id = attr->encap_id; | |
a54e20b4 | 168 | |
e52c2802 PB |
169 | fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror); |
170 | if (IS_ERR(fdb)) { | |
171 | rule = ERR_CAST(fdb); | |
172 | goto err_esw_get; | |
173 | } | |
174 | ||
175 | rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i); | |
3d80d1a2 | 176 | if (IS_ERR(rule)) |
e52c2802 | 177 | goto err_add_rule; |
375f51e2 RD |
178 | else |
179 | esw->offloads.num_flows++; | |
3d80d1a2 | 180 | |
e52c2802 PB |
181 | return rule; |
182 | ||
183 | err_add_rule: | |
184 | esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); | |
185 | err_esw_get: | |
186 | if (attr->dest_chain) | |
187 | esw_put_prio_table(esw, attr->dest_chain, 1, 0); | |
188 | err_create_goto_table: | |
aa0cbbae | 189 | return rule; |
3d80d1a2 OG |
190 | } |
191 | ||
e4ad91f2 CM |
192 | struct mlx5_flow_handle * |
193 | mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, | |
194 | struct mlx5_flow_spec *spec, | |
195 | struct mlx5_esw_flow_attr *attr) | |
196 | { | |
197 | struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; | |
198 | struct mlx5_flow_act flow_act = {0}; | |
e52c2802 PB |
199 | struct mlx5_flow_table *fast_fdb; |
200 | struct mlx5_flow_table *fwd_fdb; | |
e4ad91f2 CM |
201 | struct mlx5_flow_handle *rule; |
202 | void *misc; | |
203 | int i; | |
204 | ||
e52c2802 PB |
205 | fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0); |
206 | if (IS_ERR(fast_fdb)) { | |
207 | rule = ERR_CAST(fast_fdb); | |
208 | goto err_get_fast; | |
209 | } | |
210 | ||
211 | fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1); | |
212 | if (IS_ERR(fwd_fdb)) { | |
213 | rule = ERR_CAST(fwd_fdb); | |
214 | goto err_get_fwd; | |
215 | } | |
216 | ||
e4ad91f2 CM |
217 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
218 | for (i = 0; i < attr->mirror_count; i++) { | |
219 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | |
220 | dest[i].vport.num = attr->out_rep[i]->vport; | |
221 | dest[i].vport.vhca_id = | |
222 | MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); | |
223 | dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); | |
224 | } | |
225 | dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; | |
e52c2802 | 226 | dest[i].ft = fwd_fdb, |
e4ad91f2 CM |
227 | i++; |
228 | ||
229 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); | |
230 | MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); | |
231 | ||
232 | if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) | |
233 | MLX5_SET(fte_match_set_misc, misc, | |
234 | source_eswitch_owner_vhca_id, | |
235 | MLX5_CAP_GEN(attr->in_mdev, vhca_id)); | |
236 | ||
237 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); | |
238 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
239 | if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) | |
240 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, | |
241 | source_eswitch_owner_vhca_id); | |
242 | ||
243 | if (attr->match_level == MLX5_MATCH_NONE) | |
244 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; | |
245 | else | |
246 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | | |
247 | MLX5_MATCH_MISC_PARAMETERS; | |
248 | ||
e52c2802 | 249 | rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); |
e4ad91f2 | 250 | |
e52c2802 PB |
251 | if (IS_ERR(rule)) |
252 | goto add_err; | |
e4ad91f2 | 253 | |
e52c2802 PB |
254 | esw->offloads.num_flows++; |
255 | ||
256 | return rule; | |
257 | add_err: | |
258 | esw_put_prio_table(esw, attr->chain, attr->prio, 1); | |
259 | err_get_fwd: | |
260 | esw_put_prio_table(esw, attr->chain, attr->prio, 0); | |
261 | err_get_fast: | |
e4ad91f2 CM |
262 | return rule; |
263 | } | |
264 | ||
e52c2802 PB |
265 | static void |
266 | __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, | |
267 | struct mlx5_flow_handle *rule, | |
268 | struct mlx5_esw_flow_attr *attr, | |
269 | bool fwd_rule) | |
270 | { | |
271 | bool mirror = (attr->mirror_count > 0); | |
272 | ||
273 | mlx5_del_flow_rules(rule); | |
274 | esw->offloads.num_flows--; | |
275 | ||
276 | if (fwd_rule) { | |
277 | esw_put_prio_table(esw, attr->chain, attr->prio, 1); | |
278 | esw_put_prio_table(esw, attr->chain, attr->prio, 0); | |
279 | } else { | |
280 | esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); | |
281 | if (attr->dest_chain) | |
282 | esw_put_prio_table(esw, attr->dest_chain, 1, 0); | |
283 | } | |
284 | } | |
285 | ||
d85cdccb OG |
286 | void |
287 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, | |
288 | struct mlx5_flow_handle *rule, | |
289 | struct mlx5_esw_flow_attr *attr) | |
290 | { | |
e52c2802 | 291 | __mlx5_eswitch_del_rule(esw, rule, attr, false); |
d85cdccb OG |
292 | } |
293 | ||
48265006 OG |
294 | void |
295 | mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, | |
296 | struct mlx5_flow_handle *rule, | |
297 | struct mlx5_esw_flow_attr *attr) | |
298 | { | |
e52c2802 | 299 | __mlx5_eswitch_del_rule(esw, rule, attr, true); |
48265006 OG |
300 | } |
301 | ||
f5f82476 OG |
302 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) |
303 | { | |
304 | struct mlx5_eswitch_rep *rep; | |
305 | int vf_vport, err = 0; | |
306 | ||
307 | esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); | |
308 | for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { | |
309 | rep = &esw->offloads.vport_reps[vf_vport]; | |
a4b97ab4 | 310 | if (!rep->rep_if[REP_ETH].valid) |
f5f82476 OG |
311 | continue; |
312 | ||
313 | err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); | |
314 | if (err) | |
315 | goto out; | |
316 | } | |
317 | ||
318 | out: | |
319 | return err; | |
320 | } | |
321 | ||
322 | static struct mlx5_eswitch_rep * | |
323 | esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) | |
324 | { | |
325 | struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; | |
326 | ||
327 | in_rep = attr->in_rep; | |
592d3651 | 328 | out_rep = attr->out_rep[0]; |
f5f82476 OG |
329 | |
330 | if (push) | |
331 | vport = in_rep; | |
332 | else if (pop) | |
333 | vport = out_rep; | |
334 | else | |
335 | vport = in_rep; | |
336 | ||
337 | return vport; | |
338 | } | |
339 | ||
340 | static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, | |
341 | bool push, bool pop, bool fwd) | |
342 | { | |
343 | struct mlx5_eswitch_rep *in_rep, *out_rep; | |
344 | ||
345 | if ((push || pop) && !fwd) | |
346 | goto out_notsupp; | |
347 | ||
348 | in_rep = attr->in_rep; | |
592d3651 | 349 | out_rep = attr->out_rep[0]; |
f5f82476 OG |
350 | |
351 | if (push && in_rep->vport == FDB_UPLINK_VPORT) | |
352 | goto out_notsupp; | |
353 | ||
354 | if (pop && out_rep->vport == FDB_UPLINK_VPORT) | |
355 | goto out_notsupp; | |
356 | ||
357 | /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ | |
358 | if (!push && !pop && fwd) | |
359 | if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT) | |
360 | goto out_notsupp; | |
361 | ||
362 | /* protects against (1) setting rules with different vlans to push and | |
363 | * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) | |
364 | */ | |
1482bd3d | 365 | if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) |
f5f82476 OG |
366 | goto out_notsupp; |
367 | ||
368 | return 0; | |
369 | ||
370 | out_notsupp: | |
9eb78923 | 371 | return -EOPNOTSUPP; |
f5f82476 OG |
372 | } |
373 | ||
374 | int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, | |
375 | struct mlx5_esw_flow_attr *attr) | |
376 | { | |
377 | struct offloads_fdb *offloads = &esw->fdb_table.offloads; | |
378 | struct mlx5_eswitch_rep *vport = NULL; | |
379 | bool push, pop, fwd; | |
380 | int err = 0; | |
381 | ||
6acfbf38 | 382 | /* nop if we're on the vlan push/pop non emulation mode */ |
cc495188 | 383 | if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) |
6acfbf38 OG |
384 | return 0; |
385 | ||
f5f82476 OG |
386 | push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); |
387 | pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | |
e52c2802 PB |
388 | fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && |
389 | !attr->dest_chain); | |
f5f82476 OG |
390 | |
391 | err = esw_add_vlan_action_check(attr, push, pop, fwd); | |
392 | if (err) | |
393 | return err; | |
394 | ||
395 | attr->vlan_handled = false; | |
396 | ||
397 | vport = esw_vlan_action_get_vport(attr, push, pop); | |
398 | ||
399 | if (!push && !pop && fwd) { | |
400 | /* tracks VF --> wire rules without vlan push action */ | |
592d3651 | 401 | if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { |
f5f82476 OG |
402 | vport->vlan_refcount++; |
403 | attr->vlan_handled = true; | |
404 | } | |
405 | ||
406 | return 0; | |
407 | } | |
408 | ||
409 | if (!push && !pop) | |
410 | return 0; | |
411 | ||
412 | if (!(offloads->vlan_push_pop_refcount)) { | |
413 | /* it's the 1st vlan rule, apply global vlan pop policy */ | |
414 | err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP); | |
415 | if (err) | |
416 | goto out; | |
417 | } | |
418 | offloads->vlan_push_pop_refcount++; | |
419 | ||
420 | if (push) { | |
421 | if (vport->vlan_refcount) | |
422 | goto skip_set_push; | |
423 | ||
1482bd3d | 424 | err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0, |
f5f82476 OG |
425 | SET_VLAN_INSERT | SET_VLAN_STRIP); |
426 | if (err) | |
427 | goto out; | |
1482bd3d | 428 | vport->vlan = attr->vlan_vid[0]; |
f5f82476 OG |
429 | skip_set_push: |
430 | vport->vlan_refcount++; | |
431 | } | |
432 | out: | |
433 | if (!err) | |
434 | attr->vlan_handled = true; | |
435 | return err; | |
436 | } | |
437 | ||
438 | int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, | |
439 | struct mlx5_esw_flow_attr *attr) | |
440 | { | |
441 | struct offloads_fdb *offloads = &esw->fdb_table.offloads; | |
442 | struct mlx5_eswitch_rep *vport = NULL; | |
443 | bool push, pop, fwd; | |
444 | int err = 0; | |
445 | ||
6acfbf38 | 446 | /* nop if we're on the vlan push/pop non emulation mode */ |
cc495188 | 447 | if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) |
6acfbf38 OG |
448 | return 0; |
449 | ||
f5f82476 OG |
450 | if (!attr->vlan_handled) |
451 | return 0; | |
452 | ||
453 | push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); | |
454 | pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | |
455 | fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); | |
456 | ||
457 | vport = esw_vlan_action_get_vport(attr, push, pop); | |
458 | ||
459 | if (!push && !pop && fwd) { | |
460 | /* tracks VF --> wire rules without vlan push action */ | |
592d3651 | 461 | if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) |
f5f82476 OG |
462 | vport->vlan_refcount--; |
463 | ||
464 | return 0; | |
465 | } | |
466 | ||
467 | if (push) { | |
468 | vport->vlan_refcount--; | |
469 | if (vport->vlan_refcount) | |
470 | goto skip_unset_push; | |
471 | ||
472 | vport->vlan = 0; | |
473 | err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, | |
474 | 0, 0, SET_VLAN_STRIP); | |
475 | if (err) | |
476 | goto out; | |
477 | } | |
478 | ||
479 | skip_unset_push: | |
480 | offloads->vlan_push_pop_refcount--; | |
481 | if (offloads->vlan_push_pop_refcount) | |
482 | return 0; | |
483 | ||
484 | /* no more vlan rules, stop global vlan pop policy */ | |
485 | err = esw_set_global_vlan_pop(esw, 0); | |
486 | ||
487 | out: | |
488 | return err; | |
489 | } | |
490 | ||
f7a68945 | 491 | struct mlx5_flow_handle * |
ab22be9b OG |
492 | mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) |
493 | { | |
66958ed9 | 494 | struct mlx5_flow_act flow_act = {0}; |
4c5009c5 | 495 | struct mlx5_flow_destination dest = {}; |
74491de9 | 496 | struct mlx5_flow_handle *flow_rule; |
c5bb1730 | 497 | struct mlx5_flow_spec *spec; |
ab22be9b OG |
498 | void *misc; |
499 | ||
1b9a07ee | 500 | spec = kvzalloc(sizeof(*spec), GFP_KERNEL); |
c5bb1730 | 501 | if (!spec) { |
ab22be9b OG |
502 | flow_rule = ERR_PTR(-ENOMEM); |
503 | goto out; | |
504 | } | |
505 | ||
c5bb1730 | 506 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
ab22be9b OG |
507 | MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); |
508 | MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ | |
509 | ||
c5bb1730 | 510 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
ab22be9b OG |
511 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); |
512 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
513 | ||
c5bb1730 | 514 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
ab22be9b | 515 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
b17f7fc1 | 516 | dest.vport.num = vport; |
66958ed9 | 517 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
ab22be9b | 518 | |
52fff327 | 519 | flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, |
66958ed9 | 520 | &flow_act, &dest, 1); |
ab22be9b OG |
521 | if (IS_ERR(flow_rule)) |
522 | esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); | |
523 | out: | |
c5bb1730 | 524 | kvfree(spec); |
ab22be9b OG |
525 | return flow_rule; |
526 | } | |
57cbd893 | 527 | EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule); |
ab22be9b | 528 | |
159fe639 MB |
529 | void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) |
530 | { | |
531 | mlx5_del_flow_rules(rule); | |
532 | } | |
533 | ||
3aa33572 OG |
534 | static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) |
535 | { | |
66958ed9 | 536 | struct mlx5_flow_act flow_act = {0}; |
4c5009c5 | 537 | struct mlx5_flow_destination dest = {}; |
74491de9 | 538 | struct mlx5_flow_handle *flow_rule = NULL; |
c5bb1730 | 539 | struct mlx5_flow_spec *spec; |
f80be543 MB |
540 | void *headers_c; |
541 | void *headers_v; | |
3aa33572 | 542 | int err = 0; |
f80be543 MB |
543 | u8 *dmac_c; |
544 | u8 *dmac_v; | |
3aa33572 | 545 | |
1b9a07ee | 546 | spec = kvzalloc(sizeof(*spec), GFP_KERNEL); |
c5bb1730 | 547 | if (!spec) { |
3aa33572 OG |
548 | err = -ENOMEM; |
549 | goto out; | |
550 | } | |
551 | ||
f80be543 MB |
552 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; |
553 | headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | |
554 | outer_headers); | |
555 | dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, | |
556 | outer_headers.dmac_47_16); | |
557 | dmac_c[0] = 0x01; | |
558 | ||
3aa33572 | 559 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
b17f7fc1 | 560 | dest.vport.num = 0; |
66958ed9 | 561 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
3aa33572 | 562 | |
52fff327 | 563 | flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, |
66958ed9 | 564 | &flow_act, &dest, 1); |
3aa33572 OG |
565 | if (IS_ERR(flow_rule)) { |
566 | err = PTR_ERR(flow_rule); | |
f80be543 | 567 | esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err); |
3aa33572 OG |
568 | goto out; |
569 | } | |
570 | ||
f80be543 MB |
571 | esw->fdb_table.offloads.miss_rule_uni = flow_rule; |
572 | ||
573 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, | |
574 | outer_headers); | |
575 | dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, | |
576 | outer_headers.dmac_47_16); | |
577 | dmac_v[0] = 0x01; | |
52fff327 | 578 | flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, |
f80be543 MB |
579 | &flow_act, &dest, 1); |
580 | if (IS_ERR(flow_rule)) { | |
581 | err = PTR_ERR(flow_rule); | |
582 | esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err); | |
583 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); | |
584 | goto out; | |
585 | } | |
586 | ||
587 | esw->fdb_table.offloads.miss_rule_multi = flow_rule; | |
588 | ||
3aa33572 | 589 | out: |
c5bb1730 | 590 | kvfree(spec); |
3aa33572 OG |
591 | return err; |
592 | } | |
593 | ||
1033665e | 594 | #define ESW_OFFLOADS_NUM_GROUPS 4 |
69697b6e | 595 | |
e52c2802 PB |
596 | /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), |
597 | * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated | |
598 | * for each flow table pool. We can allocate up to 16M of each pool, | |
599 | * and we keep track of how much we used via put/get_sz_to_pool. | |
600 | * Firmware doesn't report any of this for now. | |
601 | * ESW_POOL is expected to be sorted from large to small | |
602 | */ | |
603 | #define ESW_SIZE (16 * 1024 * 1024) | |
604 | const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, | |
605 | 64 * 1024, 4 * 1024 }; | |
606 | ||
607 | static int | |
608 | get_sz_from_pool(struct mlx5_eswitch *esw) | |
609 | { | |
610 | int sz = 0, i; | |
611 | ||
612 | for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { | |
613 | if (esw->fdb_table.offloads.fdb_left[i]) { | |
614 | --esw->fdb_table.offloads.fdb_left[i]; | |
615 | sz = ESW_POOLS[i]; | |
616 | break; | |
617 | } | |
618 | } | |
619 | ||
620 | return sz; | |
621 | } | |
622 | ||
623 | static void | |
624 | put_sz_to_pool(struct mlx5_eswitch *esw, int sz) | |
625 | { | |
626 | int i; | |
627 | ||
628 | for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) { | |
629 | if (sz >= ESW_POOLS[i]) { | |
630 | ++esw->fdb_table.offloads.fdb_left[i]; | |
631 | break; | |
632 | } | |
633 | } | |
634 | } | |
635 | ||
636 | static struct mlx5_flow_table * | |
637 | create_next_size_table(struct mlx5_eswitch *esw, | |
638 | struct mlx5_flow_namespace *ns, | |
639 | u16 table_prio, | |
640 | int level, | |
641 | u32 flags) | |
642 | { | |
643 | struct mlx5_flow_table *fdb; | |
644 | int sz; | |
645 | ||
646 | sz = get_sz_from_pool(esw); | |
647 | if (!sz) | |
648 | return ERR_PTR(-ENOSPC); | |
649 | ||
650 | fdb = mlx5_create_auto_grouped_flow_table(ns, | |
651 | table_prio, | |
652 | sz, | |
653 | ESW_OFFLOADS_NUM_GROUPS, | |
654 | level, | |
655 | flags); | |
656 | if (IS_ERR(fdb)) { | |
657 | esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n", | |
658 | (int)PTR_ERR(fdb), table_prio, level, sz); | |
659 | put_sz_to_pool(esw, sz); | |
660 | } | |
661 | ||
662 | return fdb; | |
663 | } | |
664 | ||
665 | static struct mlx5_flow_table * | |
666 | esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) | |
69697b6e | 667 | { |
69697b6e | 668 | struct mlx5_core_dev *dev = esw->dev; |
69697b6e | 669 | struct mlx5_flow_table *fdb = NULL; |
e52c2802 PB |
670 | struct mlx5_flow_namespace *ns; |
671 | int table_prio, l = 0; | |
bbd00f7e | 672 | u32 flags = 0; |
69697b6e | 673 | |
e52c2802 | 674 | mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); |
264d7bf3 | 675 | |
e52c2802 PB |
676 | fdb = fdb_prio_table(esw, chain, prio, level).fdb; |
677 | if (fdb) { | |
678 | /* take ref on earlier levels as well */ | |
679 | while (level >= 0) | |
680 | fdb_prio_table(esw, chain, prio, level--).num_rules++; | |
681 | mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); | |
682 | return fdb; | |
683 | } | |
69697b6e | 684 | |
e52c2802 PB |
685 | ns = mlx5_get_fdb_sub_ns(dev, chain); |
686 | if (!ns) { | |
687 | esw_warn(dev, "Failed to get FDB sub namespace\n"); | |
688 | mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); | |
689 | return ERR_PTR(-EOPNOTSUPP); | |
690 | } | |
a842dd04 | 691 | |
7768d197 | 692 | if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) |
60786f09 | 693 | flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | |
61444b45 | 694 | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); |
bbd00f7e | 695 | |
e52c2802 | 696 | table_prio = (chain * FDB_MAX_PRIO) + prio - 1; |
69697b6e | 697 | |
e52c2802 PB |
698 | /* create earlier levels for correct fs_core lookup when |
699 | * connecting tables | |
700 | */ | |
701 | for (l = 0; l <= level; l++) { | |
702 | if (fdb_prio_table(esw, chain, prio, l).fdb) { | |
703 | fdb_prio_table(esw, chain, prio, l).num_rules++; | |
704 | continue; | |
705 | } | |
a842dd04 | 706 | |
e52c2802 PB |
707 | fdb = create_next_size_table(esw, ns, table_prio, l, flags); |
708 | if (IS_ERR(fdb)) { | |
709 | l--; | |
710 | goto err_create_fdb; | |
711 | } | |
712 | ||
713 | fdb_prio_table(esw, chain, prio, l).fdb = fdb; | |
714 | fdb_prio_table(esw, chain, prio, l).num_rules = 1; | |
a842dd04 | 715 | } |
a842dd04 | 716 | |
e52c2802 PB |
717 | mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); |
718 | return fdb; | |
a842dd04 | 719 | |
e52c2802 PB |
720 | err_create_fdb: |
721 | mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); | |
722 | if (l >= 0) | |
723 | esw_put_prio_table(esw, chain, prio, l); | |
724 | ||
725 | return fdb; | |
1967ce6e OG |
726 | } |
727 | ||
e52c2802 PB |
728 | static void |
729 | esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level) | |
1967ce6e | 730 | { |
e52c2802 PB |
731 | int l; |
732 | ||
733 | mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock); | |
734 | ||
735 | for (l = level; l >= 0; l--) { | |
736 | if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0) | |
737 | continue; | |
738 | ||
739 | put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte); | |
740 | mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb); | |
741 | fdb_prio_table(esw, chain, prio, l).fdb = NULL; | |
742 | } | |
743 | ||
744 | mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock); | |
745 | } | |
746 | ||
747 | static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw) | |
748 | { | |
749 | /* If lazy creation isn't supported, deref the fast path tables */ | |
750 | if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) { | |
751 | esw_put_prio_table(esw, 0, 1, 1); | |
752 | esw_put_prio_table(esw, 0, 1, 0); | |
753 | } | |
1967ce6e OG |
754 | } |
755 | ||
756 | #define MAX_PF_SQ 256 | |
cd3d07e7 | 757 | #define MAX_SQ_NVPORTS 32 |
1967ce6e OG |
758 | |
759 | static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) | |
760 | { | |
761 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | |
762 | struct mlx5_flow_table_attr ft_attr = {}; | |
763 | struct mlx5_core_dev *dev = esw->dev; | |
e52c2802 | 764 | u32 *flow_group_in, max_flow_counter; |
1967ce6e OG |
765 | struct mlx5_flow_namespace *root_ns; |
766 | struct mlx5_flow_table *fdb = NULL; | |
e52c2802 | 767 | int table_size, ix, err = 0, i; |
1967ce6e | 768 | struct mlx5_flow_group *g; |
e52c2802 | 769 | u32 flags = 0, fdb_max; |
1967ce6e | 770 | void *match_criteria; |
f80be543 | 771 | u8 *dmac; |
1967ce6e OG |
772 | |
773 | esw_debug(esw->dev, "Create offloads FDB Tables\n"); | |
1b9a07ee | 774 | flow_group_in = kvzalloc(inlen, GFP_KERNEL); |
1967ce6e OG |
775 | if (!flow_group_in) |
776 | return -ENOMEM; | |
777 | ||
778 | root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); | |
779 | if (!root_ns) { | |
780 | esw_warn(dev, "Failed to get FDB flow namespace\n"); | |
781 | err = -EOPNOTSUPP; | |
782 | goto ns_err; | |
783 | } | |
784 | ||
e52c2802 PB |
785 | max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | |
786 | MLX5_CAP_GEN(dev, max_flow_counter_15_0); | |
787 | fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); | |
788 | ||
789 | esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", | |
790 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), | |
791 | max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, | |
792 | fdb_max); | |
793 | ||
794 | for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) | |
795 | esw->fdb_table.offloads.fdb_left[i] = | |
796 | ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; | |
1967ce6e | 797 | |
f80be543 | 798 | table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; |
b3ba5149 | 799 | |
e52c2802 PB |
800 | /* create the slow path fdb with encap set, so further table instances |
801 | * can be created at run time while VFs are probed if the FW allows that. | |
802 | */ | |
803 | if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) | |
804 | flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | | |
805 | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); | |
806 | ||
807 | ft_attr.flags = flags; | |
b3ba5149 ES |
808 | ft_attr.max_fte = table_size; |
809 | ft_attr.prio = FDB_SLOW_PATH; | |
810 | ||
811 | fdb = mlx5_create_flow_table(root_ns, &ft_attr); | |
1033665e OG |
812 | if (IS_ERR(fdb)) { |
813 | err = PTR_ERR(fdb); | |
814 | esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); | |
815 | goto slow_fdb_err; | |
816 | } | |
52fff327 | 817 | esw->fdb_table.offloads.slow_fdb = fdb; |
1033665e | 818 | |
e52c2802 PB |
819 | /* If lazy creation isn't supported, open the fast path tables now */ |
820 | if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) && | |
821 | esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) { | |
822 | esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; | |
823 | esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n"); | |
824 | esw_get_prio_table(esw, 0, 1, 0); | |
825 | esw_get_prio_table(esw, 0, 1, 1); | |
826 | } else { | |
827 | esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n"); | |
828 | esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; | |
829 | } | |
830 | ||
69697b6e OG |
831 | /* create send-to-vport group */ |
832 | memset(flow_group_in, 0, inlen); | |
833 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, | |
834 | MLX5_MATCH_MISC_PARAMETERS); | |
835 | ||
836 | match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); | |
837 | ||
838 | MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); | |
839 | MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); | |
840 | ||
cd3d07e7 | 841 | ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; |
69697b6e OG |
842 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); |
843 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); | |
844 | ||
845 | g = mlx5_create_flow_group(fdb, flow_group_in); | |
846 | if (IS_ERR(g)) { | |
847 | err = PTR_ERR(g); | |
848 | esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); | |
849 | goto send_vport_err; | |
850 | } | |
851 | esw->fdb_table.offloads.send_to_vport_grp = g; | |
852 | ||
853 | /* create miss group */ | |
854 | memset(flow_group_in, 0, inlen); | |
f80be543 MB |
855 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, |
856 | MLX5_MATCH_OUTER_HEADERS); | |
857 | match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, | |
858 | match_criteria); | |
859 | dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, | |
860 | outer_headers.dmac_47_16); | |
861 | dmac[0] = 0x01; | |
69697b6e OG |
862 | |
863 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); | |
f80be543 | 864 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2); |
69697b6e OG |
865 | |
866 | g = mlx5_create_flow_group(fdb, flow_group_in); | |
867 | if (IS_ERR(g)) { | |
868 | err = PTR_ERR(g); | |
869 | esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); | |
870 | goto miss_err; | |
871 | } | |
872 | esw->fdb_table.offloads.miss_grp = g; | |
873 | ||
3aa33572 OG |
874 | err = esw_add_fdb_miss_rule(esw); |
875 | if (err) | |
876 | goto miss_rule_err; | |
877 | ||
e52c2802 | 878 | esw->nvports = nvports; |
c88a026e | 879 | kvfree(flow_group_in); |
69697b6e OG |
880 | return 0; |
881 | ||
3aa33572 OG |
882 | miss_rule_err: |
883 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); | |
69697b6e OG |
884 | miss_err: |
885 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); | |
886 | send_vport_err: | |
e52c2802 | 887 | esw_destroy_offloads_fast_fdb_tables(esw); |
52fff327 | 888 | mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); |
1033665e | 889 | slow_fdb_err: |
69697b6e OG |
890 | ns_err: |
891 | kvfree(flow_group_in); | |
892 | return err; | |
893 | } | |
894 | ||
1967ce6e | 895 | static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) |
69697b6e | 896 | { |
e52c2802 | 897 | if (!esw->fdb_table.offloads.slow_fdb) |
69697b6e OG |
898 | return; |
899 | ||
1967ce6e | 900 | esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); |
f80be543 MB |
901 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); |
902 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); | |
69697b6e OG |
903 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
904 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); | |
905 | ||
52fff327 | 906 | mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); |
e52c2802 | 907 | esw_destroy_offloads_fast_fdb_tables(esw); |
69697b6e | 908 | } |
c116c6ee OG |
909 | |
910 | static int esw_create_offloads_table(struct mlx5_eswitch *esw) | |
911 | { | |
b3ba5149 | 912 | struct mlx5_flow_table_attr ft_attr = {}; |
c116c6ee | 913 | struct mlx5_core_dev *dev = esw->dev; |
b3ba5149 ES |
914 | struct mlx5_flow_table *ft_offloads; |
915 | struct mlx5_flow_namespace *ns; | |
c116c6ee OG |
916 | int err = 0; |
917 | ||
918 | ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); | |
919 | if (!ns) { | |
920 | esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); | |
eff596da | 921 | return -EOPNOTSUPP; |
c116c6ee OG |
922 | } |
923 | ||
b3ba5149 ES |
924 | ft_attr.max_fte = dev->priv.sriov.num_vfs + 2; |
925 | ||
926 | ft_offloads = mlx5_create_flow_table(ns, &ft_attr); | |
c116c6ee OG |
927 | if (IS_ERR(ft_offloads)) { |
928 | err = PTR_ERR(ft_offloads); | |
929 | esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); | |
930 | return err; | |
931 | } | |
932 | ||
933 | esw->offloads.ft_offloads = ft_offloads; | |
934 | return 0; | |
935 | } | |
936 | ||
937 | static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) | |
938 | { | |
939 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
940 | ||
941 | mlx5_destroy_flow_table(offloads->ft_offloads); | |
942 | } | |
fed9ce22 OG |
943 | |
944 | static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) | |
945 | { | |
946 | int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); | |
947 | struct mlx5_flow_group *g; | |
948 | struct mlx5_priv *priv = &esw->dev->priv; | |
949 | u32 *flow_group_in; | |
950 | void *match_criteria, *misc; | |
951 | int err = 0; | |
952 | int nvports = priv->sriov.num_vfs + 2; | |
953 | ||
1b9a07ee | 954 | flow_group_in = kvzalloc(inlen, GFP_KERNEL); |
fed9ce22 OG |
955 | if (!flow_group_in) |
956 | return -ENOMEM; | |
957 | ||
958 | /* create vport rx group */ | |
959 | memset(flow_group_in, 0, inlen); | |
960 | MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, | |
961 | MLX5_MATCH_MISC_PARAMETERS); | |
962 | ||
963 | match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); | |
964 | misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters); | |
965 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); | |
966 | ||
967 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); | |
968 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); | |
969 | ||
970 | g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); | |
971 | ||
972 | if (IS_ERR(g)) { | |
973 | err = PTR_ERR(g); | |
974 | mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); | |
975 | goto out; | |
976 | } | |
977 | ||
978 | esw->offloads.vport_rx_group = g; | |
979 | out: | |
e574978a | 980 | kvfree(flow_group_in); |
fed9ce22 OG |
981 | return err; |
982 | } | |
983 | ||
984 | static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) | |
985 | { | |
986 | mlx5_destroy_flow_group(esw->offloads.vport_rx_group); | |
987 | } | |
988 | ||
74491de9 | 989 | struct mlx5_flow_handle * |
c966f7d5 GT |
990 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, |
991 | struct mlx5_flow_destination *dest) | |
fed9ce22 | 992 | { |
66958ed9 | 993 | struct mlx5_flow_act flow_act = {0}; |
74491de9 | 994 | struct mlx5_flow_handle *flow_rule; |
c5bb1730 | 995 | struct mlx5_flow_spec *spec; |
fed9ce22 OG |
996 | void *misc; |
997 | ||
1b9a07ee | 998 | spec = kvzalloc(sizeof(*spec), GFP_KERNEL); |
c5bb1730 | 999 | if (!spec) { |
fed9ce22 OG |
1000 | flow_rule = ERR_PTR(-ENOMEM); |
1001 | goto out; | |
1002 | } | |
1003 | ||
c5bb1730 | 1004 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); |
fed9ce22 OG |
1005 | MLX5_SET(fte_match_set_misc, misc, source_port, vport); |
1006 | ||
c5bb1730 | 1007 | misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); |
fed9ce22 OG |
1008 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); |
1009 | ||
c5bb1730 | 1010 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
fed9ce22 | 1011 | |
66958ed9 | 1012 | flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; |
74491de9 | 1013 | flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec, |
c966f7d5 | 1014 | &flow_act, dest, 1); |
fed9ce22 OG |
1015 | if (IS_ERR(flow_rule)) { |
1016 | esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); | |
1017 | goto out; | |
1018 | } | |
1019 | ||
1020 | out: | |
c5bb1730 | 1021 | kvfree(spec); |
fed9ce22 OG |
1022 | return flow_rule; |
1023 | } | |
feae9087 | 1024 | |
db7ff19e EB |
1025 | static int esw_offloads_start(struct mlx5_eswitch *esw, |
1026 | struct netlink_ext_ack *extack) | |
c930a3ad | 1027 | { |
6c419ba8 | 1028 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
c930a3ad OG |
1029 | |
1030 | if (esw->mode != SRIOV_LEGACY) { | |
8c98ee77 EB |
1031 | NL_SET_ERR_MSG_MOD(extack, |
1032 | "Can't set offloads mode, SRIOV legacy not enabled"); | |
c930a3ad OG |
1033 | return -EINVAL; |
1034 | } | |
1035 | ||
1036 | mlx5_eswitch_disable_sriov(esw); | |
1037 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); | |
6c419ba8 | 1038 | if (err) { |
8c98ee77 EB |
1039 | NL_SET_ERR_MSG_MOD(extack, |
1040 | "Failed setting eswitch to offloads"); | |
6c419ba8 | 1041 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); |
8c98ee77 EB |
1042 | if (err1) { |
1043 | NL_SET_ERR_MSG_MOD(extack, | |
1044 | "Failed setting eswitch back to legacy"); | |
1045 | } | |
6c419ba8 | 1046 | } |
bffaa916 RD |
1047 | if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { |
1048 | if (mlx5_eswitch_inline_mode_get(esw, | |
1049 | num_vfs, | |
1050 | &esw->offloads.inline_mode)) { | |
1051 | esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; | |
8c98ee77 EB |
1052 | NL_SET_ERR_MSG_MOD(extack, |
1053 | "Inline mode is different between vports"); | |
bffaa916 RD |
1054 | } |
1055 | } | |
c930a3ad OG |
1056 | return err; |
1057 | } | |
1058 | ||
e8d31c4d MB |
1059 | void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) |
1060 | { | |
1061 | kfree(esw->offloads.vport_reps); | |
1062 | } | |
1063 | ||
1064 | int esw_offloads_init_reps(struct mlx5_eswitch *esw) | |
1065 | { | |
1066 | int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); | |
1067 | struct mlx5_core_dev *dev = esw->dev; | |
1068 | struct mlx5_esw_offload *offloads; | |
1069 | struct mlx5_eswitch_rep *rep; | |
1070 | u8 hw_id[ETH_ALEN]; | |
1071 | int vport; | |
1072 | ||
1073 | esw->offloads.vport_reps = kcalloc(total_vfs, | |
1074 | sizeof(struct mlx5_eswitch_rep), | |
1075 | GFP_KERNEL); | |
1076 | if (!esw->offloads.vport_reps) | |
1077 | return -ENOMEM; | |
1078 | ||
1079 | offloads = &esw->offloads; | |
1080 | mlx5_query_nic_vport_mac_address(dev, 0, hw_id); | |
1081 | ||
1082 | for (vport = 0; vport < total_vfs; vport++) { | |
1083 | rep = &offloads->vport_reps[vport]; | |
1084 | ||
1085 | rep->vport = vport; | |
1086 | ether_addr_copy(rep->hw_id, hw_id); | |
1087 | } | |
1088 | ||
1089 | offloads->vport_reps[0].vport = FDB_UPLINK_VPORT; | |
1090 | ||
1091 | return 0; | |
1092 | } | |
1093 | ||
a4b97ab4 MB |
1094 | static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, |
1095 | u8 rep_type) | |
6ed1803a MB |
1096 | { |
1097 | struct mlx5_eswitch_rep *rep; | |
1098 | int vport; | |
1099 | ||
1100 | for (vport = nvports - 1; vport >= 0; vport--) { | |
1101 | rep = &esw->offloads.vport_reps[vport]; | |
a4b97ab4 | 1102 | if (!rep->rep_if[rep_type].valid) |
6ed1803a MB |
1103 | continue; |
1104 | ||
a4b97ab4 | 1105 | rep->rep_if[rep_type].unload(rep); |
6ed1803a MB |
1106 | } |
1107 | } | |
1108 | ||
a4b97ab4 MB |
1109 | static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) |
1110 | { | |
1111 | u8 rep_type = NUM_REP_TYPES; | |
1112 | ||
1113 | while (rep_type-- > 0) | |
1114 | esw_offloads_unload_reps_type(esw, nvports, rep_type); | |
1115 | } | |
1116 | ||
1117 | static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, | |
1118 | u8 rep_type) | |
c930a3ad | 1119 | { |
cb67b832 HHZ |
1120 | struct mlx5_eswitch_rep *rep; |
1121 | int vport; | |
c930a3ad OG |
1122 | int err; |
1123 | ||
6ed1803a MB |
1124 | for (vport = 0; vport < nvports; vport++) { |
1125 | rep = &esw->offloads.vport_reps[vport]; | |
a4b97ab4 | 1126 | if (!rep->rep_if[rep_type].valid) |
6ed1803a MB |
1127 | continue; |
1128 | ||
a4b97ab4 | 1129 | err = rep->rep_if[rep_type].load(esw->dev, rep); |
6ed1803a MB |
1130 | if (err) |
1131 | goto err_reps; | |
1132 | } | |
1133 | ||
1134 | return 0; | |
1135 | ||
1136 | err_reps: | |
a4b97ab4 MB |
1137 | esw_offloads_unload_reps_type(esw, vport, rep_type); |
1138 | return err; | |
1139 | } | |
1140 | ||
1141 | static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) | |
1142 | { | |
1143 | u8 rep_type = 0; | |
1144 | int err; | |
1145 | ||
1146 | for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { | |
1147 | err = esw_offloads_load_reps_type(esw, nvports, rep_type); | |
1148 | if (err) | |
1149 | goto err_reps; | |
1150 | } | |
1151 | ||
1152 | return err; | |
1153 | ||
1154 | err_reps: | |
1155 | while (rep_type-- > 0) | |
1156 | esw_offloads_unload_reps_type(esw, nvports, rep_type); | |
6ed1803a MB |
1157 | return err; |
1158 | } | |
1159 | ||
1160 | int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) | |
1161 | { | |
1162 | int err; | |
1163 | ||
e52c2802 PB |
1164 | mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); |
1165 | ||
1967ce6e | 1166 | err = esw_create_offloads_fdb_tables(esw, nvports); |
c930a3ad | 1167 | if (err) |
c5447c70 | 1168 | return err; |
c930a3ad OG |
1169 | |
1170 | err = esw_create_offloads_table(esw); | |
1171 | if (err) | |
1172 | goto create_ft_err; | |
1173 | ||
1174 | err = esw_create_vport_rx_group(esw); | |
1175 | if (err) | |
1176 | goto create_fg_err; | |
1177 | ||
6ed1803a MB |
1178 | err = esw_offloads_load_reps(esw, nvports); |
1179 | if (err) | |
1180 | goto err_reps; | |
9da34cd3 | 1181 | |
c930a3ad OG |
1182 | return 0; |
1183 | ||
cb67b832 | 1184 | err_reps: |
cb67b832 HHZ |
1185 | esw_destroy_vport_rx_group(esw); |
1186 | ||
c930a3ad OG |
1187 | create_fg_err: |
1188 | esw_destroy_offloads_table(esw); | |
1189 | ||
1190 | create_ft_err: | |
1967ce6e | 1191 | esw_destroy_offloads_fdb_tables(esw); |
5bae8c03 | 1192 | |
c930a3ad OG |
1193 | return err; |
1194 | } | |
1195 | ||
db7ff19e EB |
1196 | static int esw_offloads_stop(struct mlx5_eswitch *esw, |
1197 | struct netlink_ext_ack *extack) | |
c930a3ad | 1198 | { |
6c419ba8 | 1199 | int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; |
c930a3ad OG |
1200 | |
1201 | mlx5_eswitch_disable_sriov(esw); | |
1202 | err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); | |
6c419ba8 | 1203 | if (err) { |
8c98ee77 | 1204 | NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); |
6c419ba8 | 1205 | err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); |
8c98ee77 EB |
1206 | if (err1) { |
1207 | NL_SET_ERR_MSG_MOD(extack, | |
1208 | "Failed setting eswitch back to offloads"); | |
1209 | } | |
6c419ba8 | 1210 | } |
c930a3ad | 1211 | |
5bae8c03 | 1212 | /* enable back PF RoCE */ |
c5447c70 | 1213 | mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); |
5bae8c03 | 1214 | |
c930a3ad OG |
1215 | return err; |
1216 | } | |
1217 | ||
1218 | void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) | |
1219 | { | |
6ed1803a | 1220 | esw_offloads_unload_reps(esw, nvports); |
c930a3ad OG |
1221 | esw_destroy_vport_rx_group(esw); |
1222 | esw_destroy_offloads_table(esw); | |
1967ce6e | 1223 | esw_destroy_offloads_fdb_tables(esw); |
c930a3ad OG |
1224 | } |
1225 | ||
ef78618b | 1226 | static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) |
c930a3ad OG |
1227 | { |
1228 | switch (mode) { | |
1229 | case DEVLINK_ESWITCH_MODE_LEGACY: | |
1230 | *mlx5_mode = SRIOV_LEGACY; | |
1231 | break; | |
1232 | case DEVLINK_ESWITCH_MODE_SWITCHDEV: | |
1233 | *mlx5_mode = SRIOV_OFFLOADS; | |
1234 | break; | |
1235 | default: | |
1236 | return -EINVAL; | |
1237 | } | |
1238 | ||
1239 | return 0; | |
1240 | } | |
1241 | ||
ef78618b OG |
1242 | static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) |
1243 | { | |
1244 | switch (mlx5_mode) { | |
1245 | case SRIOV_LEGACY: | |
1246 | *mode = DEVLINK_ESWITCH_MODE_LEGACY; | |
1247 | break; | |
1248 | case SRIOV_OFFLOADS: | |
1249 | *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; | |
1250 | break; | |
1251 | default: | |
1252 | return -EINVAL; | |
1253 | } | |
1254 | ||
1255 | return 0; | |
1256 | } | |
1257 | ||
bffaa916 RD |
1258 | static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) |
1259 | { | |
1260 | switch (mode) { | |
1261 | case DEVLINK_ESWITCH_INLINE_MODE_NONE: | |
1262 | *mlx5_mode = MLX5_INLINE_MODE_NONE; | |
1263 | break; | |
1264 | case DEVLINK_ESWITCH_INLINE_MODE_LINK: | |
1265 | *mlx5_mode = MLX5_INLINE_MODE_L2; | |
1266 | break; | |
1267 | case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: | |
1268 | *mlx5_mode = MLX5_INLINE_MODE_IP; | |
1269 | break; | |
1270 | case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: | |
1271 | *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; | |
1272 | break; | |
1273 | default: | |
1274 | return -EINVAL; | |
1275 | } | |
1276 | ||
1277 | return 0; | |
1278 | } | |
1279 | ||
1280 | static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) | |
1281 | { | |
1282 | switch (mlx5_mode) { | |
1283 | case MLX5_INLINE_MODE_NONE: | |
1284 | *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; | |
1285 | break; | |
1286 | case MLX5_INLINE_MODE_L2: | |
1287 | *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; | |
1288 | break; | |
1289 | case MLX5_INLINE_MODE_IP: | |
1290 | *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; | |
1291 | break; | |
1292 | case MLX5_INLINE_MODE_TCP_UDP: | |
1293 | *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; | |
1294 | break; | |
1295 | default: | |
1296 | return -EINVAL; | |
1297 | } | |
1298 | ||
1299 | return 0; | |
1300 | } | |
1301 | ||
9d1cef19 | 1302 | static int mlx5_devlink_eswitch_check(struct devlink *devlink) |
feae9087 | 1303 | { |
9d1cef19 | 1304 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
c930a3ad | 1305 | |
9d1cef19 OG |
1306 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) |
1307 | return -EOPNOTSUPP; | |
c930a3ad | 1308 | |
733d3e54 OG |
1309 | if(!MLX5_ESWITCH_MANAGER(dev)) |
1310 | return -EPERM; | |
c930a3ad | 1311 | |
9d1cef19 | 1312 | if (dev->priv.eswitch->mode == SRIOV_NONE) |
c930a3ad OG |
1313 | return -EOPNOTSUPP; |
1314 | ||
9d1cef19 OG |
1315 | return 0; |
1316 | } | |
1317 | ||
db7ff19e EB |
1318 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, |
1319 | struct netlink_ext_ack *extack) | |
9d1cef19 OG |
1320 | { |
1321 | struct mlx5_core_dev *dev = devlink_priv(devlink); | |
1322 | u16 cur_mlx5_mode, mlx5_mode = 0; | |
1323 | int err; | |
1324 | ||
1325 | err = mlx5_devlink_eswitch_check(devlink); | |
1326 | if (err) | |
1327 | return err; | |
1328 | ||
1329 | cur_mlx5_mode = dev->priv.eswitch->mode; | |
1330 | ||
ef78618b | 1331 | if (esw_mode_from_devlink(mode, &mlx5_mode)) |
c930a3ad OG |
1332 | return -EINVAL; |
1333 | ||
1334 | if (cur_mlx5_mode == mlx5_mode) | |
1335 | return 0; | |
1336 | ||
1337 | if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) | |
db7ff19e | 1338 | return esw_offloads_start(dev->priv.eswitch, extack); |
c930a3ad | 1339 | else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) |
db7ff19e | 1340 | return esw_offloads_stop(dev->priv.eswitch, extack); |
c930a3ad OG |
1341 | else |
1342 | return -EINVAL; | |
feae9087 OG |
1343 | } |
1344 | ||
1345 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) | |
1346 | { | |
9d1cef19 OG |
1347 | struct mlx5_core_dev *dev = devlink_priv(devlink); |
1348 | int err; | |
c930a3ad | 1349 | |
9d1cef19 OG |
1350 | err = mlx5_devlink_eswitch_check(devlink); |
1351 | if (err) | |
1352 | return err; | |
c930a3ad | 1353 | |
ef78618b | 1354 | return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); |
feae9087 | 1355 | } |
127ea380 | 1356 | |
db7ff19e EB |
1357 | int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, |
1358 | struct netlink_ext_ack *extack) | |
bffaa916 RD |
1359 | { |
1360 | struct mlx5_core_dev *dev = devlink_priv(devlink); | |
1361 | struct mlx5_eswitch *esw = dev->priv.eswitch; | |
c415f704 | 1362 | int err, vport; |
bffaa916 RD |
1363 | u8 mlx5_mode; |
1364 | ||
9d1cef19 OG |
1365 | err = mlx5_devlink_eswitch_check(devlink); |
1366 | if (err) | |
1367 | return err; | |
bffaa916 | 1368 | |
c415f704 OG |
1369 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
1370 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: | |
1371 | if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) | |
1372 | return 0; | |
1373 | /* fall through */ | |
1374 | case MLX5_CAP_INLINE_MODE_L2: | |
8c98ee77 | 1375 | NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); |
bffaa916 | 1376 | return -EOPNOTSUPP; |
c415f704 OG |
1377 | case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: |
1378 | break; | |
1379 | } | |
bffaa916 | 1380 | |
375f51e2 | 1381 | if (esw->offloads.num_flows > 0) { |
8c98ee77 EB |
1382 | NL_SET_ERR_MSG_MOD(extack, |
1383 | "Can't set inline mode when flows are configured"); | |
375f51e2 RD |
1384 | return -EOPNOTSUPP; |
1385 | } | |
1386 | ||
bffaa916 RD |
1387 | err = esw_inline_mode_from_devlink(mode, &mlx5_mode); |
1388 | if (err) | |
1389 | goto out; | |
1390 | ||
9d1cef19 | 1391 | for (vport = 1; vport < esw->enabled_vports; vport++) { |
bffaa916 RD |
1392 | err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); |
1393 | if (err) { | |
8c98ee77 EB |
1394 | NL_SET_ERR_MSG_MOD(extack, |
1395 | "Failed to set min inline on vport"); | |
bffaa916 RD |
1396 | goto revert_inline_mode; |
1397 | } | |
1398 | } | |
1399 | ||
1400 | esw->offloads.inline_mode = mlx5_mode; | |
1401 | return 0; | |
1402 | ||
1403 | revert_inline_mode: | |
1404 | while (--vport > 0) | |
1405 | mlx5_modify_nic_vport_min_inline(dev, | |
1406 | vport, | |
1407 | esw->offloads.inline_mode); | |
1408 | out: | |
1409 | return err; | |
1410 | } | |
1411 | ||
1412 | int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) | |
1413 | { | |
1414 | struct mlx5_core_dev *dev = devlink_priv(devlink); | |
1415 | struct mlx5_eswitch *esw = dev->priv.eswitch; | |
9d1cef19 | 1416 | int err; |
bffaa916 | 1417 | |
9d1cef19 OG |
1418 | err = mlx5_devlink_eswitch_check(devlink); |
1419 | if (err) | |
1420 | return err; | |
bffaa916 | 1421 | |
bffaa916 RD |
1422 | return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); |
1423 | } | |
1424 | ||
1425 | int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) | |
1426 | { | |
c415f704 | 1427 | u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; |
bffaa916 RD |
1428 | struct mlx5_core_dev *dev = esw->dev; |
1429 | int vport; | |
bffaa916 RD |
1430 | |
1431 | if (!MLX5_CAP_GEN(dev, vport_group_manager)) | |
1432 | return -EOPNOTSUPP; | |
1433 | ||
1434 | if (esw->mode == SRIOV_NONE) | |
1435 | return -EOPNOTSUPP; | |
1436 | ||
c415f704 OG |
1437 | switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { |
1438 | case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: | |
1439 | mlx5_mode = MLX5_INLINE_MODE_NONE; | |
1440 | goto out; | |
1441 | case MLX5_CAP_INLINE_MODE_L2: | |
1442 | mlx5_mode = MLX5_INLINE_MODE_L2; | |
1443 | goto out; | |
1444 | case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: | |
1445 | goto query_vports; | |
1446 | } | |
bffaa916 | 1447 | |
c415f704 | 1448 | query_vports: |
bffaa916 RD |
1449 | for (vport = 1; vport <= nvfs; vport++) { |
1450 | mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); | |
1451 | if (vport > 1 && prev_mlx5_mode != mlx5_mode) | |
1452 | return -EINVAL; | |
1453 | prev_mlx5_mode = mlx5_mode; | |
1454 | } | |
1455 | ||
c415f704 | 1456 | out: |
bffaa916 RD |
1457 | *mode = mlx5_mode; |
1458 | return 0; | |
1459 | } | |
1460 | ||
db7ff19e EB |
1461 | int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap, |
1462 | struct netlink_ext_ack *extack) | |
7768d197 RD |
1463 | { |
1464 | struct mlx5_core_dev *dev = devlink_priv(devlink); | |
1465 | struct mlx5_eswitch *esw = dev->priv.eswitch; | |
1466 | int err; | |
1467 | ||
9d1cef19 OG |
1468 | err = mlx5_devlink_eswitch_check(devlink); |
1469 | if (err) | |
1470 | return err; | |
7768d197 RD |
1471 | |
1472 | if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && | |
60786f09 | 1473 | (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) || |
7768d197 RD |
1474 | !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) |
1475 | return -EOPNOTSUPP; | |
1476 | ||
1477 | if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) | |
1478 | return -EOPNOTSUPP; | |
1479 | ||
1480 | if (esw->mode == SRIOV_LEGACY) { | |
1481 | esw->offloads.encap = encap; | |
1482 | return 0; | |
1483 | } | |
1484 | ||
1485 | if (esw->offloads.encap == encap) | |
1486 | return 0; | |
1487 | ||
1488 | if (esw->offloads.num_flows > 0) { | |
8c98ee77 EB |
1489 | NL_SET_ERR_MSG_MOD(extack, |
1490 | "Can't set encapsulation when flows are configured"); | |
7768d197 RD |
1491 | return -EOPNOTSUPP; |
1492 | } | |
1493 | ||
e52c2802 | 1494 | esw_destroy_offloads_fdb_tables(esw); |
7768d197 RD |
1495 | |
1496 | esw->offloads.encap = encap; | |
e52c2802 PB |
1497 | |
1498 | err = esw_create_offloads_fdb_tables(esw, esw->nvports); | |
1499 | ||
7768d197 | 1500 | if (err) { |
8c98ee77 EB |
1501 | NL_SET_ERR_MSG_MOD(extack, |
1502 | "Failed re-creating fast FDB table"); | |
7768d197 | 1503 | esw->offloads.encap = !encap; |
e52c2802 | 1504 | (void)esw_create_offloads_fdb_tables(esw, esw->nvports); |
7768d197 | 1505 | } |
e52c2802 | 1506 | |
7768d197 RD |
1507 | return err; |
1508 | } | |
1509 | ||
1510 | int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) | |
1511 | { | |
1512 | struct mlx5_core_dev *dev = devlink_priv(devlink); | |
1513 | struct mlx5_eswitch *esw = dev->priv.eswitch; | |
9d1cef19 | 1514 | int err; |
7768d197 | 1515 | |
9d1cef19 OG |
1516 | err = mlx5_devlink_eswitch_check(devlink); |
1517 | if (err) | |
1518 | return err; | |
7768d197 RD |
1519 | |
1520 | *encap = esw->offloads.encap; | |
1521 | return 0; | |
1522 | } | |
1523 | ||
127ea380 | 1524 | void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, |
9deb2241 | 1525 | int vport_index, |
a4b97ab4 MB |
1526 | struct mlx5_eswitch_rep_if *__rep_if, |
1527 | u8 rep_type) | |
127ea380 HHZ |
1528 | { |
1529 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
a4b97ab4 | 1530 | struct mlx5_eswitch_rep_if *rep_if; |
9deb2241 | 1531 | |
a4b97ab4 | 1532 | rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type]; |
127ea380 | 1533 | |
a4b97ab4 MB |
1534 | rep_if->load = __rep_if->load; |
1535 | rep_if->unload = __rep_if->unload; | |
22215908 | 1536 | rep_if->get_proto_dev = __rep_if->get_proto_dev; |
a4b97ab4 | 1537 | rep_if->priv = __rep_if->priv; |
127ea380 | 1538 | |
a4b97ab4 | 1539 | rep_if->valid = true; |
127ea380 | 1540 | } |
57cbd893 | 1541 | EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep); |
127ea380 HHZ |
1542 | |
1543 | void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, | |
a4b97ab4 | 1544 | int vport_index, u8 rep_type) |
127ea380 HHZ |
1545 | { |
1546 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
cb67b832 HHZ |
1547 | struct mlx5_eswitch_rep *rep; |
1548 | ||
9deb2241 | 1549 | rep = &offloads->vport_reps[vport_index]; |
cb67b832 | 1550 | |
9deb2241 | 1551 | if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) |
a4b97ab4 | 1552 | rep->rep_if[rep_type].unload(rep); |
127ea380 | 1553 | |
a4b97ab4 | 1554 | rep->rep_if[rep_type].valid = false; |
127ea380 | 1555 | } |
57cbd893 | 1556 | EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep); |
726293f1 | 1557 | |
a4b97ab4 | 1558 | void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) |
726293f1 HHZ |
1559 | { |
1560 | #define UPLINK_REP_INDEX 0 | |
1561 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
1562 | struct mlx5_eswitch_rep *rep; | |
1563 | ||
1564 | rep = &offloads->vport_reps[UPLINK_REP_INDEX]; | |
a4b97ab4 | 1565 | return rep->rep_if[rep_type].priv; |
726293f1 | 1566 | } |
22215908 MB |
1567 | |
1568 | void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, | |
1569 | int vport, | |
1570 | u8 rep_type) | |
1571 | { | |
1572 | struct mlx5_esw_offload *offloads = &esw->offloads; | |
1573 | struct mlx5_eswitch_rep *rep; | |
1574 | ||
1575 | if (vport == FDB_UPLINK_VPORT) | |
1576 | vport = UPLINK_REP_INDEX; | |
1577 | ||
1578 | rep = &offloads->vport_reps[vport]; | |
1579 | ||
1580 | if (rep->rep_if[rep_type].valid && | |
1581 | rep->rep_if[rep_type].get_proto_dev) | |
1582 | return rep->rep_if[rep_type].get_proto_dev(rep); | |
1583 | return NULL; | |
1584 | } | |
57cbd893 | 1585 | EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); |
22215908 MB |
1586 | |
1587 | void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) | |
1588 | { | |
1589 | return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); | |
1590 | } | |
57cbd893 MB |
1591 | EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); |
1592 | ||
1593 | struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, | |
1594 | int vport) | |
1595 | { | |
1596 | return &esw->offloads.vport_reps[vport]; | |
1597 | } | |
1598 | EXPORT_SYMBOL(mlx5_eswitch_vport_rep); |