]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
mdio: Demote print from info to debug in mdio_driver_register
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / eswitch_offloads.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40
41 enum {
42 FDB_FAST_PATH = 0,
43 FDB_SLOW_PATH
44 };
45
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
50 {
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
55 void *misc;
56 int i = 0;
57
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
60
61 flow_act.action = attr->action;
62
63 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
64 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
65 dest[i].vport_num = attr->out_rep->vport;
66 i++;
67 }
68 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
69 counter = mlx5_fc_create(esw->dev, true);
70 if (IS_ERR(counter))
71 return ERR_CAST(counter);
72 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
73 dest[i].counter = counter;
74 i++;
75 }
76
77 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
78 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
79
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
81 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
82
83 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
84 MLX5_MATCH_MISC_PARAMETERS;
85 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
86 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
87
88 if (attr->encap)
89 flow_act.encap_id = attr->encap->encap_id;
90
91 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
92 spec, &flow_act, dest, i);
93 if (IS_ERR(rule))
94 mlx5_fc_destroy(esw->dev, counter);
95
96 return rule;
97 }
98
99 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
100 {
101 struct mlx5_eswitch_rep *rep;
102 int vf_vport, err = 0;
103
104 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
105 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
106 rep = &esw->offloads.vport_reps[vf_vport];
107 if (!rep->valid)
108 continue;
109
110 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
111 if (err)
112 goto out;
113 }
114
115 out:
116 return err;
117 }
118
119 static struct mlx5_eswitch_rep *
120 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
121 {
122 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
123
124 in_rep = attr->in_rep;
125 out_rep = attr->out_rep;
126
127 if (push)
128 vport = in_rep;
129 else if (pop)
130 vport = out_rep;
131 else
132 vport = in_rep;
133
134 return vport;
135 }
136
137 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
138 bool push, bool pop, bool fwd)
139 {
140 struct mlx5_eswitch_rep *in_rep, *out_rep;
141
142 if ((push || pop) && !fwd)
143 goto out_notsupp;
144
145 in_rep = attr->in_rep;
146 out_rep = attr->out_rep;
147
148 if (push && in_rep->vport == FDB_UPLINK_VPORT)
149 goto out_notsupp;
150
151 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
152 goto out_notsupp;
153
154 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
155 if (!push && !pop && fwd)
156 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
157 goto out_notsupp;
158
159 /* protects against (1) setting rules with different vlans to push and
160 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
161 */
162 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
163 goto out_notsupp;
164
165 return 0;
166
167 out_notsupp:
168 return -ENOTSUPP;
169 }
170
171 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
172 struct mlx5_esw_flow_attr *attr)
173 {
174 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
175 struct mlx5_eswitch_rep *vport = NULL;
176 bool push, pop, fwd;
177 int err = 0;
178
179 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
180 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
181 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
182
183 err = esw_add_vlan_action_check(attr, push, pop, fwd);
184 if (err)
185 return err;
186
187 attr->vlan_handled = false;
188
189 vport = esw_vlan_action_get_vport(attr, push, pop);
190
191 if (!push && !pop && fwd) {
192 /* tracks VF --> wire rules without vlan push action */
193 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
194 vport->vlan_refcount++;
195 attr->vlan_handled = true;
196 }
197
198 return 0;
199 }
200
201 if (!push && !pop)
202 return 0;
203
204 if (!(offloads->vlan_push_pop_refcount)) {
205 /* it's the 1st vlan rule, apply global vlan pop policy */
206 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
207 if (err)
208 goto out;
209 }
210 offloads->vlan_push_pop_refcount++;
211
212 if (push) {
213 if (vport->vlan_refcount)
214 goto skip_set_push;
215
216 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
217 SET_VLAN_INSERT | SET_VLAN_STRIP);
218 if (err)
219 goto out;
220 vport->vlan = attr->vlan;
221 skip_set_push:
222 vport->vlan_refcount++;
223 }
224 out:
225 if (!err)
226 attr->vlan_handled = true;
227 return err;
228 }
229
230 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
231 struct mlx5_esw_flow_attr *attr)
232 {
233 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
234 struct mlx5_eswitch_rep *vport = NULL;
235 bool push, pop, fwd;
236 int err = 0;
237
238 if (!attr->vlan_handled)
239 return 0;
240
241 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
242 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
243 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
244
245 vport = esw_vlan_action_get_vport(attr, push, pop);
246
247 if (!push && !pop && fwd) {
248 /* tracks VF --> wire rules without vlan push action */
249 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
250 vport->vlan_refcount--;
251
252 return 0;
253 }
254
255 if (push) {
256 vport->vlan_refcount--;
257 if (vport->vlan_refcount)
258 goto skip_unset_push;
259
260 vport->vlan = 0;
261 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
262 0, 0, SET_VLAN_STRIP);
263 if (err)
264 goto out;
265 }
266
267 skip_unset_push:
268 offloads->vlan_push_pop_refcount--;
269 if (offloads->vlan_push_pop_refcount)
270 return 0;
271
272 /* no more vlan rules, stop global vlan pop policy */
273 err = esw_set_global_vlan_pop(esw, 0);
274
275 out:
276 return err;
277 }
278
279 static struct mlx5_flow_handle *
280 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
281 {
282 struct mlx5_flow_act flow_act = {0};
283 struct mlx5_flow_destination dest;
284 struct mlx5_flow_handle *flow_rule;
285 struct mlx5_flow_spec *spec;
286 void *misc;
287
288 spec = mlx5_vzalloc(sizeof(*spec));
289 if (!spec) {
290 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
291 flow_rule = ERR_PTR(-ENOMEM);
292 goto out;
293 }
294
295 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
296 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
297 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
298
299 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
300 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
301 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
302
303 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
304 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
305 dest.vport_num = vport;
306 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
307
308 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
309 &flow_act, &dest, 1);
310 if (IS_ERR(flow_rule))
311 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
312 out:
313 kvfree(spec);
314 return flow_rule;
315 }
316
317 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
318 struct mlx5_eswitch_rep *rep)
319 {
320 struct mlx5_esw_sq *esw_sq, *tmp;
321
322 if (esw->mode != SRIOV_OFFLOADS)
323 return;
324
325 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
326 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
327 list_del(&esw_sq->list);
328 kfree(esw_sq);
329 }
330 }
331
332 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
333 struct mlx5_eswitch_rep *rep,
334 u16 *sqns_array, int sqns_num)
335 {
336 struct mlx5_flow_handle *flow_rule;
337 struct mlx5_esw_sq *esw_sq;
338 int err;
339 int i;
340
341 if (esw->mode != SRIOV_OFFLOADS)
342 return 0;
343
344 for (i = 0; i < sqns_num; i++) {
345 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
346 if (!esw_sq) {
347 err = -ENOMEM;
348 goto out_err;
349 }
350
351 /* Add re-inject rule to the PF/representor sqs */
352 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
353 rep->vport,
354 sqns_array[i]);
355 if (IS_ERR(flow_rule)) {
356 err = PTR_ERR(flow_rule);
357 kfree(esw_sq);
358 goto out_err;
359 }
360 esw_sq->send_to_vport_rule = flow_rule;
361 list_add(&esw_sq->list, &rep->vport_sqs_list);
362 }
363 return 0;
364
365 out_err:
366 mlx5_eswitch_sqs2vport_stop(esw, rep);
367 return err;
368 }
369
370 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
371 {
372 struct mlx5_flow_act flow_act = {0};
373 struct mlx5_flow_destination dest;
374 struct mlx5_flow_handle *flow_rule = NULL;
375 struct mlx5_flow_spec *spec;
376 int err = 0;
377
378 spec = mlx5_vzalloc(sizeof(*spec));
379 if (!spec) {
380 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
381 err = -ENOMEM;
382 goto out;
383 }
384
385 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
386 dest.vport_num = 0;
387 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
388
389 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
390 &flow_act, &dest, 1);
391 if (IS_ERR(flow_rule)) {
392 err = PTR_ERR(flow_rule);
393 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
394 goto out;
395 }
396
397 esw->fdb_table.offloads.miss_rule = flow_rule;
398 out:
399 kvfree(spec);
400 return err;
401 }
402
403 #define MAX_PF_SQ 256
404 #define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
405 #define ESW_OFFLOADS_NUM_GROUPS 4
406
407 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
408 {
409 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
410 struct mlx5_core_dev *dev = esw->dev;
411 struct mlx5_flow_namespace *root_ns;
412 struct mlx5_flow_table *fdb = NULL;
413 struct mlx5_flow_group *g;
414 u32 *flow_group_in;
415 void *match_criteria;
416 int table_size, ix, err = 0;
417 u32 flags = 0;
418
419 flow_group_in = mlx5_vzalloc(inlen);
420 if (!flow_group_in)
421 return -ENOMEM;
422
423 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
424 if (!root_ns) {
425 esw_warn(dev, "Failed to get FDB flow namespace\n");
426 goto ns_err;
427 }
428
429 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
430 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
431
432 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
433 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
434 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
435
436 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
437 ESW_OFFLOADS_NUM_ENTRIES,
438 ESW_OFFLOADS_NUM_GROUPS, 0,
439 flags);
440 if (IS_ERR(fdb)) {
441 err = PTR_ERR(fdb);
442 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
443 goto fast_fdb_err;
444 }
445 esw->fdb_table.fdb = fdb;
446
447 table_size = nvports + MAX_PF_SQ + 1;
448 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
449 if (IS_ERR(fdb)) {
450 err = PTR_ERR(fdb);
451 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
452 goto slow_fdb_err;
453 }
454 esw->fdb_table.offloads.fdb = fdb;
455
456 /* create send-to-vport group */
457 memset(flow_group_in, 0, inlen);
458 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
459 MLX5_MATCH_MISC_PARAMETERS);
460
461 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
462
463 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
464 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
465
466 ix = nvports + MAX_PF_SQ;
467 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
468 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
469
470 g = mlx5_create_flow_group(fdb, flow_group_in);
471 if (IS_ERR(g)) {
472 err = PTR_ERR(g);
473 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
474 goto send_vport_err;
475 }
476 esw->fdb_table.offloads.send_to_vport_grp = g;
477
478 /* create miss group */
479 memset(flow_group_in, 0, inlen);
480 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
481
482 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
483 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
484
485 g = mlx5_create_flow_group(fdb, flow_group_in);
486 if (IS_ERR(g)) {
487 err = PTR_ERR(g);
488 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
489 goto miss_err;
490 }
491 esw->fdb_table.offloads.miss_grp = g;
492
493 err = esw_add_fdb_miss_rule(esw);
494 if (err)
495 goto miss_rule_err;
496
497 return 0;
498
499 miss_rule_err:
500 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
501 miss_err:
502 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
503 send_vport_err:
504 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
505 slow_fdb_err:
506 mlx5_destroy_flow_table(esw->fdb_table.fdb);
507 fast_fdb_err:
508 ns_err:
509 kvfree(flow_group_in);
510 return err;
511 }
512
513 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
514 {
515 if (!esw->fdb_table.fdb)
516 return;
517
518 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
519 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
520 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
521 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
522
523 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
524 mlx5_destroy_flow_table(esw->fdb_table.fdb);
525 }
526
527 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
528 {
529 struct mlx5_flow_namespace *ns;
530 struct mlx5_flow_table *ft_offloads;
531 struct mlx5_core_dev *dev = esw->dev;
532 int err = 0;
533
534 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
535 if (!ns) {
536 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
537 return -ENOMEM;
538 }
539
540 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
541 if (IS_ERR(ft_offloads)) {
542 err = PTR_ERR(ft_offloads);
543 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
544 return err;
545 }
546
547 esw->offloads.ft_offloads = ft_offloads;
548 return 0;
549 }
550
551 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
552 {
553 struct mlx5_esw_offload *offloads = &esw->offloads;
554
555 mlx5_destroy_flow_table(offloads->ft_offloads);
556 }
557
558 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
559 {
560 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
561 struct mlx5_flow_group *g;
562 struct mlx5_priv *priv = &esw->dev->priv;
563 u32 *flow_group_in;
564 void *match_criteria, *misc;
565 int err = 0;
566 int nvports = priv->sriov.num_vfs + 2;
567
568 flow_group_in = mlx5_vzalloc(inlen);
569 if (!flow_group_in)
570 return -ENOMEM;
571
572 /* create vport rx group */
573 memset(flow_group_in, 0, inlen);
574 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
575 MLX5_MATCH_MISC_PARAMETERS);
576
577 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
578 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
579 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
580
581 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
582 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
583
584 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
585
586 if (IS_ERR(g)) {
587 err = PTR_ERR(g);
588 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
589 goto out;
590 }
591
592 esw->offloads.vport_rx_group = g;
593 out:
594 kfree(flow_group_in);
595 return err;
596 }
597
598 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
599 {
600 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
601 }
602
603 struct mlx5_flow_handle *
604 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
605 {
606 struct mlx5_flow_act flow_act = {0};
607 struct mlx5_flow_destination dest;
608 struct mlx5_flow_handle *flow_rule;
609 struct mlx5_flow_spec *spec;
610 void *misc;
611
612 spec = mlx5_vzalloc(sizeof(*spec));
613 if (!spec) {
614 esw_warn(esw->dev, "Failed to alloc match parameters\n");
615 flow_rule = ERR_PTR(-ENOMEM);
616 goto out;
617 }
618
619 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
620 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
621
622 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
623 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
624
625 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
626 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
627 dest.tir_num = tirn;
628
629 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
630 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
631 &flow_act, &dest, 1);
632 if (IS_ERR(flow_rule)) {
633 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
634 goto out;
635 }
636
637 out:
638 kvfree(spec);
639 return flow_rule;
640 }
641
642 static int esw_offloads_start(struct mlx5_eswitch *esw)
643 {
644 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
645
646 if (esw->mode != SRIOV_LEGACY) {
647 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
648 return -EINVAL;
649 }
650
651 mlx5_eswitch_disable_sriov(esw);
652 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
653 if (err) {
654 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
655 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
656 if (err1)
657 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
658 }
659 return err;
660 }
661
662 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
663 {
664 struct mlx5_eswitch_rep *rep;
665 int vport;
666 int err;
667
668 err = esw_create_offloads_fdb_table(esw, nvports);
669 if (err)
670 return err;
671
672 err = esw_create_offloads_table(esw);
673 if (err)
674 goto create_ft_err;
675
676 err = esw_create_vport_rx_group(esw);
677 if (err)
678 goto create_fg_err;
679
680 for (vport = 0; vport < nvports; vport++) {
681 rep = &esw->offloads.vport_reps[vport];
682 if (!rep->valid)
683 continue;
684
685 err = rep->load(esw, rep);
686 if (err)
687 goto err_reps;
688 }
689 return 0;
690
691 err_reps:
692 for (vport--; vport >= 0; vport--) {
693 rep = &esw->offloads.vport_reps[vport];
694 if (!rep->valid)
695 continue;
696 rep->unload(esw, rep);
697 }
698 esw_destroy_vport_rx_group(esw);
699
700 create_fg_err:
701 esw_destroy_offloads_table(esw);
702
703 create_ft_err:
704 esw_destroy_offloads_fdb_table(esw);
705 return err;
706 }
707
708 static int esw_offloads_stop(struct mlx5_eswitch *esw)
709 {
710 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
711
712 mlx5_eswitch_disable_sriov(esw);
713 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
714 if (err) {
715 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
716 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
717 if (err1)
718 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
719 }
720
721 return err;
722 }
723
724 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
725 {
726 struct mlx5_eswitch_rep *rep;
727 int vport;
728
729 for (vport = 0; vport < nvports; vport++) {
730 rep = &esw->offloads.vport_reps[vport];
731 if (!rep->valid)
732 continue;
733 rep->unload(esw, rep);
734 }
735
736 esw_destroy_vport_rx_group(esw);
737 esw_destroy_offloads_table(esw);
738 esw_destroy_offloads_fdb_table(esw);
739 }
740
741 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
742 {
743 switch (mode) {
744 case DEVLINK_ESWITCH_MODE_LEGACY:
745 *mlx5_mode = SRIOV_LEGACY;
746 break;
747 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
748 *mlx5_mode = SRIOV_OFFLOADS;
749 break;
750 default:
751 return -EINVAL;
752 }
753
754 return 0;
755 }
756
757 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
758 {
759 switch (mlx5_mode) {
760 case SRIOV_LEGACY:
761 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
762 break;
763 case SRIOV_OFFLOADS:
764 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
765 break;
766 default:
767 return -EINVAL;
768 }
769
770 return 0;
771 }
772
773 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
774 {
775 struct mlx5_core_dev *dev;
776 u16 cur_mlx5_mode, mlx5_mode = 0;
777
778 dev = devlink_priv(devlink);
779
780 if (!MLX5_CAP_GEN(dev, vport_group_manager))
781 return -EOPNOTSUPP;
782
783 cur_mlx5_mode = dev->priv.eswitch->mode;
784
785 if (cur_mlx5_mode == SRIOV_NONE)
786 return -EOPNOTSUPP;
787
788 if (esw_mode_from_devlink(mode, &mlx5_mode))
789 return -EINVAL;
790
791 if (cur_mlx5_mode == mlx5_mode)
792 return 0;
793
794 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
795 return esw_offloads_start(dev->priv.eswitch);
796 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
797 return esw_offloads_stop(dev->priv.eswitch);
798 else
799 return -EINVAL;
800 }
801
802 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
803 {
804 struct mlx5_core_dev *dev;
805
806 dev = devlink_priv(devlink);
807
808 if (!MLX5_CAP_GEN(dev, vport_group_manager))
809 return -EOPNOTSUPP;
810
811 if (dev->priv.eswitch->mode == SRIOV_NONE)
812 return -EOPNOTSUPP;
813
814 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
815 }
816
817 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
818 int vport_index,
819 struct mlx5_eswitch_rep *__rep)
820 {
821 struct mlx5_esw_offload *offloads = &esw->offloads;
822 struct mlx5_eswitch_rep *rep;
823
824 rep = &offloads->vport_reps[vport_index];
825
826 memset(rep, 0, sizeof(*rep));
827
828 rep->load = __rep->load;
829 rep->unload = __rep->unload;
830 rep->vport = __rep->vport;
831 rep->priv_data = __rep->priv_data;
832 ether_addr_copy(rep->hw_id, __rep->hw_id);
833
834 INIT_LIST_HEAD(&rep->vport_sqs_list);
835 rep->valid = true;
836 }
837
838 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
839 int vport_index)
840 {
841 struct mlx5_esw_offload *offloads = &esw->offloads;
842 struct mlx5_eswitch_rep *rep;
843
844 rep = &offloads->vport_reps[vport_index];
845
846 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
847 rep->unload(esw, rep);
848
849 rep->valid = false;
850 }