]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
Merge tag 'omap-for-v5.0/fixes-rc7-signed' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "en.h"
41 #include "fs_core.h"
42 #include "lib/devcom.h"
43
44 enum {
45 FDB_FAST_PATH = 0,
46 FDB_SLOW_PATH
47 };
48
49 #define fdb_prio_table(esw, chain, prio, level) \
50 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
51
52 static struct mlx5_flow_table *
53 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
54 static void
55 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
56
57 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
58 {
59 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
60 }
61
62 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
63 {
64 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
65 return FDB_MAX_CHAIN;
66
67 return 0;
68 }
69
70 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
71 {
72 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
73 return FDB_MAX_PRIO;
74
75 return 1;
76 }
77
78 struct mlx5_flow_handle *
79 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
80 struct mlx5_flow_spec *spec,
81 struct mlx5_esw_flow_attr *attr)
82 {
83 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
84 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
85 bool split = !!(attr->split_count);
86 struct mlx5_flow_handle *rule;
87 struct mlx5_flow_table *fdb;
88 int j, i = 0;
89 void *misc;
90
91 if (esw->mode != SRIOV_OFFLOADS)
92 return ERR_PTR(-EOPNOTSUPP);
93
94 flow_act.action = attr->action;
95 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
96 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
97 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
98 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
99 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
100 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
101 flow_act.vlan[0].vid = attr->vlan_vid[0];
102 flow_act.vlan[0].prio = attr->vlan_prio[0];
103 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
104 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
105 flow_act.vlan[1].vid = attr->vlan_vid[1];
106 flow_act.vlan[1].prio = attr->vlan_prio[1];
107 }
108 }
109
110 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
111 if (attr->dest_chain) {
112 struct mlx5_flow_table *ft;
113
114 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
115 if (IS_ERR(ft)) {
116 rule = ERR_CAST(ft);
117 goto err_create_goto_table;
118 }
119
120 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
121 dest[i].ft = ft;
122 i++;
123 } else {
124 for (j = attr->split_count; j < attr->out_count; j++) {
125 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
126 dest[i].vport.num = attr->dests[j].rep->vport;
127 dest[i].vport.vhca_id =
128 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
129 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
130 dest[i].vport.flags |=
131 MLX5_FLOW_DEST_VPORT_VHCA_ID;
132 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
133 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
134 flow_act.reformat_id = attr->dests[j].encap_id;
135 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
136 dest[i].vport.reformat_id =
137 attr->dests[j].encap_id;
138 }
139 i++;
140 }
141 }
142 }
143 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
144 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
145 dest[i].counter_id = mlx5_fc_id(attr->counter);
146 i++;
147 }
148
149 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
150 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
151
152 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
153 MLX5_SET(fte_match_set_misc, misc,
154 source_eswitch_owner_vhca_id,
155 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
156
157 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
158 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
159 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
160 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
161 source_eswitch_owner_vhca_id);
162
163 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
164 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
165 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
166 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
167 if (attr->match_level != MLX5_MATCH_NONE)
168 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
169 } else if (attr->match_level != MLX5_MATCH_NONE) {
170 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
171 }
172
173 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
174 flow_act.modify_id = attr->mod_hdr_id;
175
176 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
177 if (IS_ERR(fdb)) {
178 rule = ERR_CAST(fdb);
179 goto err_esw_get;
180 }
181
182 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
183 if (IS_ERR(rule))
184 goto err_add_rule;
185 else
186 esw->offloads.num_flows++;
187
188 return rule;
189
190 err_add_rule:
191 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
192 err_esw_get:
193 if (attr->dest_chain)
194 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
195 err_create_goto_table:
196 return rule;
197 }
198
199 struct mlx5_flow_handle *
200 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
201 struct mlx5_flow_spec *spec,
202 struct mlx5_esw_flow_attr *attr)
203 {
204 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
205 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
206 struct mlx5_flow_table *fast_fdb;
207 struct mlx5_flow_table *fwd_fdb;
208 struct mlx5_flow_handle *rule;
209 void *misc;
210 int i;
211
212 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
213 if (IS_ERR(fast_fdb)) {
214 rule = ERR_CAST(fast_fdb);
215 goto err_get_fast;
216 }
217
218 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
219 if (IS_ERR(fwd_fdb)) {
220 rule = ERR_CAST(fwd_fdb);
221 goto err_get_fwd;
222 }
223
224 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
225 for (i = 0; i < attr->split_count; i++) {
226 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
227 dest[i].vport.num = attr->dests[i].rep->vport;
228 dest[i].vport.vhca_id =
229 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
230 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
231 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
232 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
233 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
234 dest[i].vport.reformat_id = attr->dests[i].encap_id;
235 }
236 }
237 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
238 dest[i].ft = fwd_fdb,
239 i++;
240
241 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
242 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
243
244 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
245 MLX5_SET(fte_match_set_misc, misc,
246 source_eswitch_owner_vhca_id,
247 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
248
249 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
250 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
251 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
252 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
253 source_eswitch_owner_vhca_id);
254
255 if (attr->match_level == MLX5_MATCH_NONE)
256 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
257 else
258 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
259 MLX5_MATCH_MISC_PARAMETERS;
260
261 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
262
263 if (IS_ERR(rule))
264 goto add_err;
265
266 esw->offloads.num_flows++;
267
268 return rule;
269 add_err:
270 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
271 err_get_fwd:
272 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
273 err_get_fast:
274 return rule;
275 }
276
277 static void
278 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
279 struct mlx5_flow_handle *rule,
280 struct mlx5_esw_flow_attr *attr,
281 bool fwd_rule)
282 {
283 bool split = (attr->split_count > 0);
284
285 mlx5_del_flow_rules(rule);
286 esw->offloads.num_flows--;
287
288 if (fwd_rule) {
289 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
290 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
291 } else {
292 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
293 if (attr->dest_chain)
294 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
295 }
296 }
297
298 void
299 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
300 struct mlx5_flow_handle *rule,
301 struct mlx5_esw_flow_attr *attr)
302 {
303 __mlx5_eswitch_del_rule(esw, rule, attr, false);
304 }
305
306 void
307 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
308 struct mlx5_flow_handle *rule,
309 struct mlx5_esw_flow_attr *attr)
310 {
311 __mlx5_eswitch_del_rule(esw, rule, attr, true);
312 }
313
314 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
315 {
316 struct mlx5_eswitch_rep *rep;
317 int vf_vport, err = 0;
318
319 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
320 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
321 rep = &esw->offloads.vport_reps[vf_vport];
322 if (!rep->rep_if[REP_ETH].valid)
323 continue;
324
325 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
326 if (err)
327 goto out;
328 }
329
330 out:
331 return err;
332 }
333
334 static struct mlx5_eswitch_rep *
335 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
336 {
337 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
338
339 in_rep = attr->in_rep;
340 out_rep = attr->dests[0].rep;
341
342 if (push)
343 vport = in_rep;
344 else if (pop)
345 vport = out_rep;
346 else
347 vport = in_rep;
348
349 return vport;
350 }
351
352 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
353 bool push, bool pop, bool fwd)
354 {
355 struct mlx5_eswitch_rep *in_rep, *out_rep;
356
357 if ((push || pop) && !fwd)
358 goto out_notsupp;
359
360 in_rep = attr->in_rep;
361 out_rep = attr->dests[0].rep;
362
363 if (push && in_rep->vport == FDB_UPLINK_VPORT)
364 goto out_notsupp;
365
366 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
367 goto out_notsupp;
368
369 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
370 if (!push && !pop && fwd)
371 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
372 goto out_notsupp;
373
374 /* protects against (1) setting rules with different vlans to push and
375 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
376 */
377 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
378 goto out_notsupp;
379
380 return 0;
381
382 out_notsupp:
383 return -EOPNOTSUPP;
384 }
385
386 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
387 struct mlx5_esw_flow_attr *attr)
388 {
389 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
390 struct mlx5_eswitch_rep *vport = NULL;
391 bool push, pop, fwd;
392 int err = 0;
393
394 /* nop if we're on the vlan push/pop non emulation mode */
395 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
396 return 0;
397
398 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
399 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
400 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
401 !attr->dest_chain);
402
403 err = esw_add_vlan_action_check(attr, push, pop, fwd);
404 if (err)
405 return err;
406
407 attr->vlan_handled = false;
408
409 vport = esw_vlan_action_get_vport(attr, push, pop);
410
411 if (!push && !pop && fwd) {
412 /* tracks VF --> wire rules without vlan push action */
413 if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
414 vport->vlan_refcount++;
415 attr->vlan_handled = true;
416 }
417
418 return 0;
419 }
420
421 if (!push && !pop)
422 return 0;
423
424 if (!(offloads->vlan_push_pop_refcount)) {
425 /* it's the 1st vlan rule, apply global vlan pop policy */
426 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
427 if (err)
428 goto out;
429 }
430 offloads->vlan_push_pop_refcount++;
431
432 if (push) {
433 if (vport->vlan_refcount)
434 goto skip_set_push;
435
436 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
437 SET_VLAN_INSERT | SET_VLAN_STRIP);
438 if (err)
439 goto out;
440 vport->vlan = attr->vlan_vid[0];
441 skip_set_push:
442 vport->vlan_refcount++;
443 }
444 out:
445 if (!err)
446 attr->vlan_handled = true;
447 return err;
448 }
449
450 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
451 struct mlx5_esw_flow_attr *attr)
452 {
453 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
454 struct mlx5_eswitch_rep *vport = NULL;
455 bool push, pop, fwd;
456 int err = 0;
457
458 /* nop if we're on the vlan push/pop non emulation mode */
459 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
460 return 0;
461
462 if (!attr->vlan_handled)
463 return 0;
464
465 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
466 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
467 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
468
469 vport = esw_vlan_action_get_vport(attr, push, pop);
470
471 if (!push && !pop && fwd) {
472 /* tracks VF --> wire rules without vlan push action */
473 if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
474 vport->vlan_refcount--;
475
476 return 0;
477 }
478
479 if (push) {
480 vport->vlan_refcount--;
481 if (vport->vlan_refcount)
482 goto skip_unset_push;
483
484 vport->vlan = 0;
485 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
486 0, 0, SET_VLAN_STRIP);
487 if (err)
488 goto out;
489 }
490
491 skip_unset_push:
492 offloads->vlan_push_pop_refcount--;
493 if (offloads->vlan_push_pop_refcount)
494 return 0;
495
496 /* no more vlan rules, stop global vlan pop policy */
497 err = esw_set_global_vlan_pop(esw, 0);
498
499 out:
500 return err;
501 }
502
503 struct mlx5_flow_handle *
504 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
505 {
506 struct mlx5_flow_act flow_act = {0};
507 struct mlx5_flow_destination dest = {};
508 struct mlx5_flow_handle *flow_rule;
509 struct mlx5_flow_spec *spec;
510 void *misc;
511
512 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
513 if (!spec) {
514 flow_rule = ERR_PTR(-ENOMEM);
515 goto out;
516 }
517
518 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
519 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
520 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
521
522 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
523 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
524 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
525
526 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
527 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
528 dest.vport.num = vport;
529 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
530
531 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
532 &flow_act, &dest, 1);
533 if (IS_ERR(flow_rule))
534 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
535 out:
536 kvfree(spec);
537 return flow_rule;
538 }
539 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
540
541 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
542 {
543 mlx5_del_flow_rules(rule);
544 }
545
546 static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
547 struct mlx5_flow_spec *spec,
548 struct mlx5_flow_destination *dest)
549 {
550 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
551 misc_parameters);
552
553 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
554 MLX5_CAP_GEN(peer_dev, vhca_id));
555
556 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
557
558 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
559 misc_parameters);
560 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
561 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
562 source_eswitch_owner_vhca_id);
563
564 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
565 dest->vport.num = 0;
566 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
567 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
568 }
569
570 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
571 struct mlx5_core_dev *peer_dev)
572 {
573 struct mlx5_flow_destination dest = {};
574 struct mlx5_flow_act flow_act = {0};
575 struct mlx5_flow_handle **flows;
576 struct mlx5_flow_handle *flow;
577 struct mlx5_flow_spec *spec;
578 /* total vports is the same for both e-switches */
579 int nvports = esw->total_vports;
580 void *misc;
581 int err, i;
582
583 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
584 if (!spec)
585 return -ENOMEM;
586
587 peer_miss_rules_setup(peer_dev, spec, &dest);
588
589 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
590 if (!flows) {
591 err = -ENOMEM;
592 goto alloc_flows_err;
593 }
594
595 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
596 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
597 misc_parameters);
598
599 for (i = 1; i < nvports; i++) {
600 MLX5_SET(fte_match_set_misc, misc, source_port, i);
601 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
602 spec, &flow_act, &dest, 1);
603 if (IS_ERR(flow)) {
604 err = PTR_ERR(flow);
605 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
606 goto add_flow_err;
607 }
608 flows[i] = flow;
609 }
610
611 esw->fdb_table.offloads.peer_miss_rules = flows;
612
613 kvfree(spec);
614 return 0;
615
616 add_flow_err:
617 for (i--; i > 0; i--)
618 mlx5_del_flow_rules(flows[i]);
619 kvfree(flows);
620 alloc_flows_err:
621 kvfree(spec);
622 return err;
623 }
624
625 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
626 {
627 struct mlx5_flow_handle **flows;
628 int i;
629
630 flows = esw->fdb_table.offloads.peer_miss_rules;
631
632 for (i = 1; i < esw->total_vports; i++)
633 mlx5_del_flow_rules(flows[i]);
634
635 kvfree(flows);
636 }
637
638 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
639 {
640 struct mlx5_flow_act flow_act = {0};
641 struct mlx5_flow_destination dest = {};
642 struct mlx5_flow_handle *flow_rule = NULL;
643 struct mlx5_flow_spec *spec;
644 void *headers_c;
645 void *headers_v;
646 int err = 0;
647 u8 *dmac_c;
648 u8 *dmac_v;
649
650 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
651 if (!spec) {
652 err = -ENOMEM;
653 goto out;
654 }
655
656 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
657 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
658 outer_headers);
659 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
660 outer_headers.dmac_47_16);
661 dmac_c[0] = 0x01;
662
663 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
664 dest.vport.num = 0;
665 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
666
667 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
668 &flow_act, &dest, 1);
669 if (IS_ERR(flow_rule)) {
670 err = PTR_ERR(flow_rule);
671 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
672 goto out;
673 }
674
675 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
676
677 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
678 outer_headers);
679 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
680 outer_headers.dmac_47_16);
681 dmac_v[0] = 0x01;
682 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
683 &flow_act, &dest, 1);
684 if (IS_ERR(flow_rule)) {
685 err = PTR_ERR(flow_rule);
686 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
687 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
688 goto out;
689 }
690
691 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
692
693 out:
694 kvfree(spec);
695 return err;
696 }
697
698 #define ESW_OFFLOADS_NUM_GROUPS 4
699
700 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
701 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
702 * for each flow table pool. We can allocate up to 16M of each pool,
703 * and we keep track of how much we used via put/get_sz_to_pool.
704 * Firmware doesn't report any of this for now.
705 * ESW_POOL is expected to be sorted from large to small
706 */
707 #define ESW_SIZE (16 * 1024 * 1024)
708 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
709 64 * 1024, 4 * 1024 };
710
711 static int
712 get_sz_from_pool(struct mlx5_eswitch *esw)
713 {
714 int sz = 0, i;
715
716 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
717 if (esw->fdb_table.offloads.fdb_left[i]) {
718 --esw->fdb_table.offloads.fdb_left[i];
719 sz = ESW_POOLS[i];
720 break;
721 }
722 }
723
724 return sz;
725 }
726
727 static void
728 put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
729 {
730 int i;
731
732 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
733 if (sz >= ESW_POOLS[i]) {
734 ++esw->fdb_table.offloads.fdb_left[i];
735 break;
736 }
737 }
738 }
739
740 static struct mlx5_flow_table *
741 create_next_size_table(struct mlx5_eswitch *esw,
742 struct mlx5_flow_namespace *ns,
743 u16 table_prio,
744 int level,
745 u32 flags)
746 {
747 struct mlx5_flow_table *fdb;
748 int sz;
749
750 sz = get_sz_from_pool(esw);
751 if (!sz)
752 return ERR_PTR(-ENOSPC);
753
754 fdb = mlx5_create_auto_grouped_flow_table(ns,
755 table_prio,
756 sz,
757 ESW_OFFLOADS_NUM_GROUPS,
758 level,
759 flags);
760 if (IS_ERR(fdb)) {
761 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
762 (int)PTR_ERR(fdb), table_prio, level, sz);
763 put_sz_to_pool(esw, sz);
764 }
765
766 return fdb;
767 }
768
769 static struct mlx5_flow_table *
770 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
771 {
772 struct mlx5_core_dev *dev = esw->dev;
773 struct mlx5_flow_table *fdb = NULL;
774 struct mlx5_flow_namespace *ns;
775 int table_prio, l = 0;
776 u32 flags = 0;
777
778 if (chain == FDB_SLOW_PATH_CHAIN)
779 return esw->fdb_table.offloads.slow_fdb;
780
781 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
782
783 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
784 if (fdb) {
785 /* take ref on earlier levels as well */
786 while (level >= 0)
787 fdb_prio_table(esw, chain, prio, level--).num_rules++;
788 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
789 return fdb;
790 }
791
792 ns = mlx5_get_fdb_sub_ns(dev, chain);
793 if (!ns) {
794 esw_warn(dev, "Failed to get FDB sub namespace\n");
795 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
796 return ERR_PTR(-EOPNOTSUPP);
797 }
798
799 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
800 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
801 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
802
803 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
804
805 /* create earlier levels for correct fs_core lookup when
806 * connecting tables
807 */
808 for (l = 0; l <= level; l++) {
809 if (fdb_prio_table(esw, chain, prio, l).fdb) {
810 fdb_prio_table(esw, chain, prio, l).num_rules++;
811 continue;
812 }
813
814 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
815 if (IS_ERR(fdb)) {
816 l--;
817 goto err_create_fdb;
818 }
819
820 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
821 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
822 }
823
824 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
825 return fdb;
826
827 err_create_fdb:
828 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
829 if (l >= 0)
830 esw_put_prio_table(esw, chain, prio, l);
831
832 return fdb;
833 }
834
835 static void
836 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
837 {
838 int l;
839
840 if (chain == FDB_SLOW_PATH_CHAIN)
841 return;
842
843 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
844
845 for (l = level; l >= 0; l--) {
846 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
847 continue;
848
849 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
850 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
851 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
852 }
853
854 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
855 }
856
857 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
858 {
859 /* If lazy creation isn't supported, deref the fast path tables */
860 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
861 esw_put_prio_table(esw, 0, 1, 1);
862 esw_put_prio_table(esw, 0, 1, 0);
863 }
864 }
865
866 #define MAX_PF_SQ 256
867 #define MAX_SQ_NVPORTS 32
868
869 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
870 {
871 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
872 struct mlx5_flow_table_attr ft_attr = {};
873 struct mlx5_core_dev *dev = esw->dev;
874 u32 *flow_group_in, max_flow_counter;
875 struct mlx5_flow_namespace *root_ns;
876 struct mlx5_flow_table *fdb = NULL;
877 int table_size, ix, err = 0, i;
878 struct mlx5_flow_group *g;
879 u32 flags = 0, fdb_max;
880 void *match_criteria;
881 u8 *dmac;
882
883 esw_debug(esw->dev, "Create offloads FDB Tables\n");
884 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
885 if (!flow_group_in)
886 return -ENOMEM;
887
888 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
889 if (!root_ns) {
890 esw_warn(dev, "Failed to get FDB flow namespace\n");
891 err = -EOPNOTSUPP;
892 goto ns_err;
893 }
894
895 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
896 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
897 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
898
899 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
900 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
901 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
902 fdb_max);
903
904 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
905 esw->fdb_table.offloads.fdb_left[i] =
906 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
907
908 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
909 esw->total_vports;
910
911 /* create the slow path fdb with encap set, so further table instances
912 * can be created at run time while VFs are probed if the FW allows that.
913 */
914 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
915 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
916 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
917
918 ft_attr.flags = flags;
919 ft_attr.max_fte = table_size;
920 ft_attr.prio = FDB_SLOW_PATH;
921
922 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
923 if (IS_ERR(fdb)) {
924 err = PTR_ERR(fdb);
925 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
926 goto slow_fdb_err;
927 }
928 esw->fdb_table.offloads.slow_fdb = fdb;
929
930 /* If lazy creation isn't supported, open the fast path tables now */
931 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
932 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
933 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
934 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
935 esw_get_prio_table(esw, 0, 1, 0);
936 esw_get_prio_table(esw, 0, 1, 1);
937 } else {
938 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
939 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
940 }
941
942 /* create send-to-vport group */
943 memset(flow_group_in, 0, inlen);
944 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
945 MLX5_MATCH_MISC_PARAMETERS);
946
947 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
948
949 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
950 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
951
952 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
953 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
954 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
955
956 g = mlx5_create_flow_group(fdb, flow_group_in);
957 if (IS_ERR(g)) {
958 err = PTR_ERR(g);
959 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
960 goto send_vport_err;
961 }
962 esw->fdb_table.offloads.send_to_vport_grp = g;
963
964 /* create peer esw miss group */
965 memset(flow_group_in, 0, inlen);
966 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
967 MLX5_MATCH_MISC_PARAMETERS);
968
969 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
970 match_criteria);
971
972 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
973 misc_parameters.source_port);
974 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
975 misc_parameters.source_eswitch_owner_vhca_id);
976
977 MLX5_SET(create_flow_group_in, flow_group_in,
978 source_eswitch_owner_vhca_id_valid, 1);
979 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
980 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
981 ix + esw->total_vports - 1);
982 ix += esw->total_vports;
983
984 g = mlx5_create_flow_group(fdb, flow_group_in);
985 if (IS_ERR(g)) {
986 err = PTR_ERR(g);
987 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
988 goto peer_miss_err;
989 }
990 esw->fdb_table.offloads.peer_miss_grp = g;
991
992 /* create miss group */
993 memset(flow_group_in, 0, inlen);
994 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
995 MLX5_MATCH_OUTER_HEADERS);
996 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
997 match_criteria);
998 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
999 outer_headers.dmac_47_16);
1000 dmac[0] = 0x01;
1001
1002 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1003 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
1004
1005 g = mlx5_create_flow_group(fdb, flow_group_in);
1006 if (IS_ERR(g)) {
1007 err = PTR_ERR(g);
1008 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1009 goto miss_err;
1010 }
1011 esw->fdb_table.offloads.miss_grp = g;
1012
1013 err = esw_add_fdb_miss_rule(esw);
1014 if (err)
1015 goto miss_rule_err;
1016
1017 esw->nvports = nvports;
1018 kvfree(flow_group_in);
1019 return 0;
1020
1021 miss_rule_err:
1022 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1023 miss_err:
1024 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1025 peer_miss_err:
1026 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1027 send_vport_err:
1028 esw_destroy_offloads_fast_fdb_tables(esw);
1029 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1030 slow_fdb_err:
1031 ns_err:
1032 kvfree(flow_group_in);
1033 return err;
1034 }
1035
1036 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1037 {
1038 if (!esw->fdb_table.offloads.slow_fdb)
1039 return;
1040
1041 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1042 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1043 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1044 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1045 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1046 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1047
1048 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1049 esw_destroy_offloads_fast_fdb_tables(esw);
1050 }
1051
1052 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1053 {
1054 struct mlx5_flow_table_attr ft_attr = {};
1055 struct mlx5_core_dev *dev = esw->dev;
1056 struct mlx5_flow_table *ft_offloads;
1057 struct mlx5_flow_namespace *ns;
1058 int err = 0;
1059
1060 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1061 if (!ns) {
1062 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1063 return -EOPNOTSUPP;
1064 }
1065
1066 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
1067
1068 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1069 if (IS_ERR(ft_offloads)) {
1070 err = PTR_ERR(ft_offloads);
1071 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1072 return err;
1073 }
1074
1075 esw->offloads.ft_offloads = ft_offloads;
1076 return 0;
1077 }
1078
1079 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1080 {
1081 struct mlx5_esw_offload *offloads = &esw->offloads;
1082
1083 mlx5_destroy_flow_table(offloads->ft_offloads);
1084 }
1085
1086 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1087 {
1088 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1089 struct mlx5_flow_group *g;
1090 struct mlx5_priv *priv = &esw->dev->priv;
1091 u32 *flow_group_in;
1092 void *match_criteria, *misc;
1093 int err = 0;
1094 int nvports = priv->sriov.num_vfs + 2;
1095
1096 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1097 if (!flow_group_in)
1098 return -ENOMEM;
1099
1100 /* create vport rx group */
1101 memset(flow_group_in, 0, inlen);
1102 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1103 MLX5_MATCH_MISC_PARAMETERS);
1104
1105 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1106 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1107 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1108
1109 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1110 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1111
1112 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1113
1114 if (IS_ERR(g)) {
1115 err = PTR_ERR(g);
1116 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1117 goto out;
1118 }
1119
1120 esw->offloads.vport_rx_group = g;
1121 out:
1122 kvfree(flow_group_in);
1123 return err;
1124 }
1125
1126 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1127 {
1128 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1129 }
1130
1131 struct mlx5_flow_handle *
1132 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1133 struct mlx5_flow_destination *dest)
1134 {
1135 struct mlx5_flow_act flow_act = {0};
1136 struct mlx5_flow_handle *flow_rule;
1137 struct mlx5_flow_spec *spec;
1138 void *misc;
1139
1140 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1141 if (!spec) {
1142 flow_rule = ERR_PTR(-ENOMEM);
1143 goto out;
1144 }
1145
1146 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1147 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1148
1149 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1150 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1151
1152 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1153
1154 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1155 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1156 &flow_act, dest, 1);
1157 if (IS_ERR(flow_rule)) {
1158 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1159 goto out;
1160 }
1161
1162 out:
1163 kvfree(spec);
1164 return flow_rule;
1165 }
1166
1167 static int esw_offloads_start(struct mlx5_eswitch *esw,
1168 struct netlink_ext_ack *extack)
1169 {
1170 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1171
1172 if (esw->mode != SRIOV_LEGACY) {
1173 NL_SET_ERR_MSG_MOD(extack,
1174 "Can't set offloads mode, SRIOV legacy not enabled");
1175 return -EINVAL;
1176 }
1177
1178 mlx5_eswitch_disable_sriov(esw);
1179 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1180 if (err) {
1181 NL_SET_ERR_MSG_MOD(extack,
1182 "Failed setting eswitch to offloads");
1183 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1184 if (err1) {
1185 NL_SET_ERR_MSG_MOD(extack,
1186 "Failed setting eswitch back to legacy");
1187 }
1188 }
1189 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1190 if (mlx5_eswitch_inline_mode_get(esw,
1191 num_vfs,
1192 &esw->offloads.inline_mode)) {
1193 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1194 NL_SET_ERR_MSG_MOD(extack,
1195 "Inline mode is different between vports");
1196 }
1197 }
1198 return err;
1199 }
1200
1201 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1202 {
1203 kfree(esw->offloads.vport_reps);
1204 }
1205
1206 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1207 {
1208 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
1209 struct mlx5_core_dev *dev = esw->dev;
1210 struct mlx5_esw_offload *offloads;
1211 struct mlx5_eswitch_rep *rep;
1212 u8 hw_id[ETH_ALEN];
1213 int vport;
1214
1215 esw->offloads.vport_reps = kcalloc(total_vfs,
1216 sizeof(struct mlx5_eswitch_rep),
1217 GFP_KERNEL);
1218 if (!esw->offloads.vport_reps)
1219 return -ENOMEM;
1220
1221 offloads = &esw->offloads;
1222 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1223
1224 for (vport = 0; vport < total_vfs; vport++) {
1225 rep = &offloads->vport_reps[vport];
1226
1227 rep->vport = vport;
1228 ether_addr_copy(rep->hw_id, hw_id);
1229 }
1230
1231 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
1232
1233 return 0;
1234 }
1235
1236 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
1237 u8 rep_type)
1238 {
1239 struct mlx5_eswitch_rep *rep;
1240 int vport;
1241
1242 for (vport = nvports - 1; vport >= 0; vport--) {
1243 rep = &esw->offloads.vport_reps[vport];
1244 if (!rep->rep_if[rep_type].valid)
1245 continue;
1246
1247 rep->rep_if[rep_type].unload(rep);
1248 }
1249 }
1250
1251 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
1252 {
1253 u8 rep_type = NUM_REP_TYPES;
1254
1255 while (rep_type-- > 0)
1256 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1257 }
1258
1259 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
1260 u8 rep_type)
1261 {
1262 struct mlx5_eswitch_rep *rep;
1263 int vport;
1264 int err;
1265
1266 for (vport = 0; vport < nvports; vport++) {
1267 rep = &esw->offloads.vport_reps[vport];
1268 if (!rep->rep_if[rep_type].valid)
1269 continue;
1270
1271 err = rep->rep_if[rep_type].load(esw->dev, rep);
1272 if (err)
1273 goto err_reps;
1274 }
1275
1276 return 0;
1277
1278 err_reps:
1279 esw_offloads_unload_reps_type(esw, vport, rep_type);
1280 return err;
1281 }
1282
1283 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
1284 {
1285 u8 rep_type = 0;
1286 int err;
1287
1288 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1289 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
1290 if (err)
1291 goto err_reps;
1292 }
1293
1294 return err;
1295
1296 err_reps:
1297 while (rep_type-- > 0)
1298 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1299 return err;
1300 }
1301
1302 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1303 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1304
1305 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1306 struct mlx5_eswitch *peer_esw)
1307 {
1308 int err;
1309
1310 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1311 if (err)
1312 return err;
1313
1314 return 0;
1315 }
1316
1317 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
1318
1319 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1320 {
1321 mlx5e_tc_clean_fdb_peer_flows(esw);
1322 esw_del_fdb_peer_miss_rules(esw);
1323 }
1324
1325 static int mlx5_esw_offloads_devcom_event(int event,
1326 void *my_data,
1327 void *event_data)
1328 {
1329 struct mlx5_eswitch *esw = my_data;
1330 struct mlx5_eswitch *peer_esw = event_data;
1331 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1332 int err;
1333
1334 switch (event) {
1335 case ESW_OFFLOADS_DEVCOM_PAIR:
1336 err = mlx5_esw_offloads_pair(esw, peer_esw);
1337 if (err)
1338 goto err_out;
1339
1340 err = mlx5_esw_offloads_pair(peer_esw, esw);
1341 if (err)
1342 goto err_pair;
1343
1344 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1345 break;
1346
1347 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1348 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1349 break;
1350
1351 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1352 mlx5_esw_offloads_unpair(peer_esw);
1353 mlx5_esw_offloads_unpair(esw);
1354 break;
1355 }
1356
1357 return 0;
1358
1359 err_pair:
1360 mlx5_esw_offloads_unpair(esw);
1361
1362 err_out:
1363 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1364 event, err);
1365 return err;
1366 }
1367
1368 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1369 {
1370 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1371
1372 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1373 mutex_init(&esw->offloads.peer_mutex);
1374
1375 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1376 return;
1377
1378 mlx5_devcom_register_component(devcom,
1379 MLX5_DEVCOM_ESW_OFFLOADS,
1380 mlx5_esw_offloads_devcom_event,
1381 esw);
1382
1383 mlx5_devcom_send_event(devcom,
1384 MLX5_DEVCOM_ESW_OFFLOADS,
1385 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1386 }
1387
1388 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1389 {
1390 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1391
1392 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1393 return;
1394
1395 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1396 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1397
1398 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1399 }
1400
1401 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1402 {
1403 int err;
1404
1405 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1406
1407 err = esw_create_offloads_fdb_tables(esw, nvports);
1408 if (err)
1409 return err;
1410
1411 err = esw_create_offloads_table(esw);
1412 if (err)
1413 goto create_ft_err;
1414
1415 err = esw_create_vport_rx_group(esw);
1416 if (err)
1417 goto create_fg_err;
1418
1419 err = esw_offloads_load_reps(esw, nvports);
1420 if (err)
1421 goto err_reps;
1422
1423 esw_offloads_devcom_init(esw);
1424 return 0;
1425
1426 err_reps:
1427 esw_destroy_vport_rx_group(esw);
1428
1429 create_fg_err:
1430 esw_destroy_offloads_table(esw);
1431
1432 create_ft_err:
1433 esw_destroy_offloads_fdb_tables(esw);
1434
1435 return err;
1436 }
1437
1438 static int esw_offloads_stop(struct mlx5_eswitch *esw,
1439 struct netlink_ext_ack *extack)
1440 {
1441 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1442
1443 mlx5_eswitch_disable_sriov(esw);
1444 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1445 if (err) {
1446 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
1447 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1448 if (err1) {
1449 NL_SET_ERR_MSG_MOD(extack,
1450 "Failed setting eswitch back to offloads");
1451 }
1452 }
1453
1454 return err;
1455 }
1456
1457 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
1458 {
1459 esw_offloads_devcom_cleanup(esw);
1460 esw_offloads_unload_reps(esw, nvports);
1461 esw_destroy_vport_rx_group(esw);
1462 esw_destroy_offloads_table(esw);
1463 esw_destroy_offloads_fdb_tables(esw);
1464 }
1465
1466 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1467 {
1468 switch (mode) {
1469 case DEVLINK_ESWITCH_MODE_LEGACY:
1470 *mlx5_mode = SRIOV_LEGACY;
1471 break;
1472 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1473 *mlx5_mode = SRIOV_OFFLOADS;
1474 break;
1475 default:
1476 return -EINVAL;
1477 }
1478
1479 return 0;
1480 }
1481
1482 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1483 {
1484 switch (mlx5_mode) {
1485 case SRIOV_LEGACY:
1486 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1487 break;
1488 case SRIOV_OFFLOADS:
1489 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1490 break;
1491 default:
1492 return -EINVAL;
1493 }
1494
1495 return 0;
1496 }
1497
1498 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1499 {
1500 switch (mode) {
1501 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1502 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1503 break;
1504 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1505 *mlx5_mode = MLX5_INLINE_MODE_L2;
1506 break;
1507 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1508 *mlx5_mode = MLX5_INLINE_MODE_IP;
1509 break;
1510 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1511 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1512 break;
1513 default:
1514 return -EINVAL;
1515 }
1516
1517 return 0;
1518 }
1519
1520 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1521 {
1522 switch (mlx5_mode) {
1523 case MLX5_INLINE_MODE_NONE:
1524 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1525 break;
1526 case MLX5_INLINE_MODE_L2:
1527 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1528 break;
1529 case MLX5_INLINE_MODE_IP:
1530 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1531 break;
1532 case MLX5_INLINE_MODE_TCP_UDP:
1533 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1534 break;
1535 default:
1536 return -EINVAL;
1537 }
1538
1539 return 0;
1540 }
1541
1542 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1543 {
1544 struct mlx5_core_dev *dev = devlink_priv(devlink);
1545
1546 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1547 return -EOPNOTSUPP;
1548
1549 if(!MLX5_ESWITCH_MANAGER(dev))
1550 return -EPERM;
1551
1552 if (dev->priv.eswitch->mode == SRIOV_NONE)
1553 return -EOPNOTSUPP;
1554
1555 return 0;
1556 }
1557
1558 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1559 struct netlink_ext_ack *extack)
1560 {
1561 struct mlx5_core_dev *dev = devlink_priv(devlink);
1562 u16 cur_mlx5_mode, mlx5_mode = 0;
1563 int err;
1564
1565 err = mlx5_devlink_eswitch_check(devlink);
1566 if (err)
1567 return err;
1568
1569 cur_mlx5_mode = dev->priv.eswitch->mode;
1570
1571 if (esw_mode_from_devlink(mode, &mlx5_mode))
1572 return -EINVAL;
1573
1574 if (cur_mlx5_mode == mlx5_mode)
1575 return 0;
1576
1577 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1578 return esw_offloads_start(dev->priv.eswitch, extack);
1579 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1580 return esw_offloads_stop(dev->priv.eswitch, extack);
1581 else
1582 return -EINVAL;
1583 }
1584
1585 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1586 {
1587 struct mlx5_core_dev *dev = devlink_priv(devlink);
1588 int err;
1589
1590 err = mlx5_devlink_eswitch_check(devlink);
1591 if (err)
1592 return err;
1593
1594 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1595 }
1596
1597 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1598 struct netlink_ext_ack *extack)
1599 {
1600 struct mlx5_core_dev *dev = devlink_priv(devlink);
1601 struct mlx5_eswitch *esw = dev->priv.eswitch;
1602 int err, vport;
1603 u8 mlx5_mode;
1604
1605 err = mlx5_devlink_eswitch_check(devlink);
1606 if (err)
1607 return err;
1608
1609 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1610 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1611 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1612 return 0;
1613 /* fall through */
1614 case MLX5_CAP_INLINE_MODE_L2:
1615 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
1616 return -EOPNOTSUPP;
1617 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1618 break;
1619 }
1620
1621 if (esw->offloads.num_flows > 0) {
1622 NL_SET_ERR_MSG_MOD(extack,
1623 "Can't set inline mode when flows are configured");
1624 return -EOPNOTSUPP;
1625 }
1626
1627 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1628 if (err)
1629 goto out;
1630
1631 for (vport = 1; vport < esw->enabled_vports; vport++) {
1632 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1633 if (err) {
1634 NL_SET_ERR_MSG_MOD(extack,
1635 "Failed to set min inline on vport");
1636 goto revert_inline_mode;
1637 }
1638 }
1639
1640 esw->offloads.inline_mode = mlx5_mode;
1641 return 0;
1642
1643 revert_inline_mode:
1644 while (--vport > 0)
1645 mlx5_modify_nic_vport_min_inline(dev,
1646 vport,
1647 esw->offloads.inline_mode);
1648 out:
1649 return err;
1650 }
1651
1652 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1653 {
1654 struct mlx5_core_dev *dev = devlink_priv(devlink);
1655 struct mlx5_eswitch *esw = dev->priv.eswitch;
1656 int err;
1657
1658 err = mlx5_devlink_eswitch_check(devlink);
1659 if (err)
1660 return err;
1661
1662 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1663 }
1664
1665 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1666 {
1667 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1668 struct mlx5_core_dev *dev = esw->dev;
1669 int vport;
1670
1671 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1672 return -EOPNOTSUPP;
1673
1674 if (esw->mode == SRIOV_NONE)
1675 return -EOPNOTSUPP;
1676
1677 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1678 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1679 mlx5_mode = MLX5_INLINE_MODE_NONE;
1680 goto out;
1681 case MLX5_CAP_INLINE_MODE_L2:
1682 mlx5_mode = MLX5_INLINE_MODE_L2;
1683 goto out;
1684 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1685 goto query_vports;
1686 }
1687
1688 query_vports:
1689 for (vport = 1; vport <= nvfs; vport++) {
1690 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1691 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1692 return -EINVAL;
1693 prev_mlx5_mode = mlx5_mode;
1694 }
1695
1696 out:
1697 *mode = mlx5_mode;
1698 return 0;
1699 }
1700
1701 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
1702 struct netlink_ext_ack *extack)
1703 {
1704 struct mlx5_core_dev *dev = devlink_priv(devlink);
1705 struct mlx5_eswitch *esw = dev->priv.eswitch;
1706 int err;
1707
1708 err = mlx5_devlink_eswitch_check(devlink);
1709 if (err)
1710 return err;
1711
1712 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1713 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
1714 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1715 return -EOPNOTSUPP;
1716
1717 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1718 return -EOPNOTSUPP;
1719
1720 if (esw->mode == SRIOV_LEGACY) {
1721 esw->offloads.encap = encap;
1722 return 0;
1723 }
1724
1725 if (esw->offloads.encap == encap)
1726 return 0;
1727
1728 if (esw->offloads.num_flows > 0) {
1729 NL_SET_ERR_MSG_MOD(extack,
1730 "Can't set encapsulation when flows are configured");
1731 return -EOPNOTSUPP;
1732 }
1733
1734 esw_destroy_offloads_fdb_tables(esw);
1735
1736 esw->offloads.encap = encap;
1737
1738 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
1739
1740 if (err) {
1741 NL_SET_ERR_MSG_MOD(extack,
1742 "Failed re-creating fast FDB table");
1743 esw->offloads.encap = !encap;
1744 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
1745 }
1746
1747 return err;
1748 }
1749
1750 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1751 {
1752 struct mlx5_core_dev *dev = devlink_priv(devlink);
1753 struct mlx5_eswitch *esw = dev->priv.eswitch;
1754 int err;
1755
1756 err = mlx5_devlink_eswitch_check(devlink);
1757 if (err)
1758 return err;
1759
1760 *encap = esw->offloads.encap;
1761 return 0;
1762 }
1763
1764 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1765 int vport_index,
1766 struct mlx5_eswitch_rep_if *__rep_if,
1767 u8 rep_type)
1768 {
1769 struct mlx5_esw_offload *offloads = &esw->offloads;
1770 struct mlx5_eswitch_rep_if *rep_if;
1771
1772 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1773
1774 rep_if->load = __rep_if->load;
1775 rep_if->unload = __rep_if->unload;
1776 rep_if->get_proto_dev = __rep_if->get_proto_dev;
1777 rep_if->priv = __rep_if->priv;
1778
1779 rep_if->valid = true;
1780 }
1781 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1782
1783 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1784 int vport_index, u8 rep_type)
1785 {
1786 struct mlx5_esw_offload *offloads = &esw->offloads;
1787 struct mlx5_eswitch_rep *rep;
1788
1789 rep = &offloads->vport_reps[vport_index];
1790
1791 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1792 rep->rep_if[rep_type].unload(rep);
1793
1794 rep->rep_if[rep_type].valid = false;
1795 }
1796 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1797
1798 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1799 {
1800 #define UPLINK_REP_INDEX 0
1801 struct mlx5_esw_offload *offloads = &esw->offloads;
1802 struct mlx5_eswitch_rep *rep;
1803
1804 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1805 return rep->rep_if[rep_type].priv;
1806 }
1807
1808 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1809 int vport,
1810 u8 rep_type)
1811 {
1812 struct mlx5_esw_offload *offloads = &esw->offloads;
1813 struct mlx5_eswitch_rep *rep;
1814
1815 if (vport == FDB_UPLINK_VPORT)
1816 vport = UPLINK_REP_INDEX;
1817
1818 rep = &offloads->vport_reps[vport];
1819
1820 if (rep->rep_if[rep_type].valid &&
1821 rep->rep_if[rep_type].get_proto_dev)
1822 return rep->rep_if[rep_type].get_proto_dev(rep);
1823 return NULL;
1824 }
1825 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1826
1827 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1828 {
1829 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1830 }
1831 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1832
1833 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1834 int vport)
1835 {
1836 return &esw->offloads.vport_reps[vport];
1837 }
1838 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);