]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
MAINTAINERS: Update MAX77802 PMIC entry
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_cmd.c
1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41
42 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft)
44 {
45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47
48 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
49 ft->underlay_qpn == 0)
50 return 0;
51
52 MLX5_SET(set_flow_table_root_in, in, opcode,
53 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
54 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
55 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
56 if (ft->vport) {
57 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
58 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
59 }
60
61 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
62 ft->underlay_qpn != 0)
63 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, ft->underlay_qpn);
64
65 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
66 }
67
68 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
69 u16 vport,
70 enum fs_flow_table_op_mod op_mod,
71 enum fs_flow_table_type type, unsigned int level,
72 unsigned int log_size, struct mlx5_flow_table
73 *next_ft, unsigned int *table_id, u32 flags)
74 {
75 int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
76 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
77 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
78 int err;
79
80 MLX5_SET(create_flow_table_in, in, opcode,
81 MLX5_CMD_OP_CREATE_FLOW_TABLE);
82
83 MLX5_SET(create_flow_table_in, in, table_type, type);
84 MLX5_SET(create_flow_table_in, in, level, level);
85 MLX5_SET(create_flow_table_in, in, log_size, log_size);
86 if (vport) {
87 MLX5_SET(create_flow_table_in, in, vport_number, vport);
88 MLX5_SET(create_flow_table_in, in, other_vport, 1);
89 }
90
91 MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
92 MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
93
94 switch (op_mod) {
95 case FS_FT_OP_MOD_NORMAL:
96 if (next_ft) {
97 MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
98 MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
99 }
100 break;
101
102 case FS_FT_OP_MOD_LAG_DEMUX:
103 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
104 if (next_ft)
105 MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
106 next_ft->id);
107 break;
108 }
109
110 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
111 if (!err)
112 *table_id = MLX5_GET(create_flow_table_out, out,
113 table_id);
114 return err;
115 }
116
117 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
118 struct mlx5_flow_table *ft)
119 {
120 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
121 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
122
123 MLX5_SET(destroy_flow_table_in, in, opcode,
124 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
125 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
126 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
127 if (ft->vport) {
128 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
129 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
130 }
131
132 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
133 }
134
135 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
136 struct mlx5_flow_table *ft,
137 struct mlx5_flow_table *next_ft)
138 {
139 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
140 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
141
142 MLX5_SET(modify_flow_table_in, in, opcode,
143 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
144 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
145 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
146
147 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
148 MLX5_SET(modify_flow_table_in, in, modify_field_select,
149 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
150 if (next_ft) {
151 MLX5_SET(modify_flow_table_in, in,
152 lag_master_next_table_id, next_ft->id);
153 } else {
154 MLX5_SET(modify_flow_table_in, in,
155 lag_master_next_table_id, 0);
156 }
157 } else {
158 if (ft->vport) {
159 MLX5_SET(modify_flow_table_in, in, vport_number,
160 ft->vport);
161 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
162 }
163 MLX5_SET(modify_flow_table_in, in, modify_field_select,
164 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
165 if (next_ft) {
166 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
167 MLX5_SET(modify_flow_table_in, in, table_miss_id,
168 next_ft->id);
169 } else {
170 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
171 }
172 }
173
174 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
175 }
176
177 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
178 struct mlx5_flow_table *ft,
179 u32 *in,
180 unsigned int *group_id)
181 {
182 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
183 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
184 int err;
185
186 MLX5_SET(create_flow_group_in, in, opcode,
187 MLX5_CMD_OP_CREATE_FLOW_GROUP);
188 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
189 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
190 if (ft->vport) {
191 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
192 MLX5_SET(create_flow_group_in, in, other_vport, 1);
193 }
194
195 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
196 if (!err)
197 *group_id = MLX5_GET(create_flow_group_out, out,
198 group_id);
199 return err;
200 }
201
202 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
203 struct mlx5_flow_table *ft,
204 unsigned int group_id)
205 {
206 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
207 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
208
209 MLX5_SET(destroy_flow_group_in, in, opcode,
210 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
211 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
212 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
213 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
214 if (ft->vport) {
215 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
216 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
217 }
218
219 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
220 }
221
222 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
223 int opmod, int modify_mask,
224 struct mlx5_flow_table *ft,
225 unsigned group_id,
226 struct fs_fte *fte)
227 {
228 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
229 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
230 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
231 struct mlx5_flow_rule *dst;
232 void *in_flow_context;
233 void *in_match_value;
234 void *in_dests;
235 u32 *in;
236 int err;
237
238 in = mlx5_vzalloc(inlen);
239 if (!in) {
240 mlx5_core_warn(dev, "failed to allocate inbox\n");
241 return -ENOMEM;
242 }
243
244 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
245 MLX5_SET(set_fte_in, in, op_mod, opmod);
246 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
247 MLX5_SET(set_fte_in, in, table_type, ft->type);
248 MLX5_SET(set_fte_in, in, table_id, ft->id);
249 MLX5_SET(set_fte_in, in, flow_index, fte->index);
250 if (ft->vport) {
251 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
252 MLX5_SET(set_fte_in, in, other_vport, 1);
253 }
254
255 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
256 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
257 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
258 MLX5_SET(flow_context, in_flow_context, action, fte->action);
259 MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
260 MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
261 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
262 match_value);
263 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
264
265 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
266 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
267 int list_size = 0;
268
269 list_for_each_entry(dst, &fte->node.children, node.list) {
270 unsigned int id;
271
272 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
273 continue;
274
275 MLX5_SET(dest_format_struct, in_dests, destination_type,
276 dst->dest_attr.type);
277 if (dst->dest_attr.type ==
278 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
279 id = dst->dest_attr.ft->id;
280 } else {
281 id = dst->dest_attr.tir_num;
282 }
283 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
284 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
285 list_size++;
286 }
287
288 MLX5_SET(flow_context, in_flow_context, destination_list_size,
289 list_size);
290 }
291
292 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
293 int list_size = 0;
294
295 list_for_each_entry(dst, &fte->node.children, node.list) {
296 if (dst->dest_attr.type !=
297 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
298 continue;
299
300 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
301 dst->dest_attr.counter->id);
302 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
303 list_size++;
304 }
305
306 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
307 list_size);
308 }
309
310 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
311 kvfree(in);
312 return err;
313 }
314
315 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
316 struct mlx5_flow_table *ft,
317 unsigned group_id,
318 struct fs_fte *fte)
319 {
320 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
321 }
322
323 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
324 struct mlx5_flow_table *ft,
325 unsigned group_id,
326 int modify_mask,
327 struct fs_fte *fte)
328 {
329 int opmod;
330 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
331 flow_table_properties_nic_receive.
332 flow_modify_en);
333 if (!atomic_mod_cap)
334 return -EOPNOTSUPP;
335 opmod = 1;
336
337 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
338 }
339
340 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
341 struct mlx5_flow_table *ft,
342 unsigned int index)
343 {
344 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
345 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
346
347 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
348 MLX5_SET(delete_fte_in, in, table_type, ft->type);
349 MLX5_SET(delete_fte_in, in, table_id, ft->id);
350 MLX5_SET(delete_fte_in, in, flow_index, index);
351 if (ft->vport) {
352 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
353 MLX5_SET(delete_fte_in, in, other_vport, 1);
354 }
355
356 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
357 }
358
359 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
360 {
361 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
362 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
363 int err;
364
365 MLX5_SET(alloc_flow_counter_in, in, opcode,
366 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
367
368 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
369 if (!err)
370 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
371 return err;
372 }
373
374 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
375 {
376 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
377 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
378
379 MLX5_SET(dealloc_flow_counter_in, in, opcode,
380 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
381 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
382 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
383 }
384
385 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
386 u64 *packets, u64 *bytes)
387 {
388 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
389 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
390 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
391 void *stats;
392 int err = 0;
393
394 MLX5_SET(query_flow_counter_in, in, opcode,
395 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
396 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
397 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
398 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
399 if (err)
400 return err;
401
402 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
403 *packets = MLX5_GET64(traffic_counter, stats, packets);
404 *bytes = MLX5_GET64(traffic_counter, stats, octets);
405 return 0;
406 }
407
408 struct mlx5_cmd_fc_bulk {
409 u16 id;
410 int num;
411 int outlen;
412 u32 out[0];
413 };
414
415 struct mlx5_cmd_fc_bulk *
416 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
417 {
418 struct mlx5_cmd_fc_bulk *b;
419 int outlen =
420 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
421 MLX5_ST_SZ_BYTES(traffic_counter) * num;
422
423 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
424 if (!b)
425 return NULL;
426
427 b->id = id;
428 b->num = num;
429 b->outlen = outlen;
430
431 return b;
432 }
433
434 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
435 {
436 kfree(b);
437 }
438
439 int
440 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
441 {
442 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
443
444 MLX5_SET(query_flow_counter_in, in, opcode,
445 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
446 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
447 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
448 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
449 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
450 }
451
452 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
453 struct mlx5_cmd_fc_bulk *b, u16 id,
454 u64 *packets, u64 *bytes)
455 {
456 int index = id - b->id;
457 void *stats;
458
459 if (index < 0 || index >= b->num) {
460 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
461 id, b->id, b->id + b->num - 1);
462 return;
463 }
464
465 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
466 flow_statistics[index]);
467 *packets = MLX5_GET64(traffic_counter, stats, packets);
468 *bytes = MLX5_GET64(traffic_counter, stats, octets);
469 }
470
471 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
472 int header_type,
473 size_t size,
474 void *encap_header,
475 u32 *encap_id)
476 {
477 int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
478 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
479 void *encap_header_in;
480 void *header;
481 int inlen;
482 int err;
483 u32 *in;
484
485 if (size > max_encap_size) {
486 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
487 size, max_encap_size);
488 return -EINVAL;
489 }
490
491 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
492 GFP_KERNEL);
493 if (!in)
494 return -ENOMEM;
495
496 encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
497 header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
498 inlen = header - (void *)in + size;
499
500 memset(in, 0, inlen);
501 MLX5_SET(alloc_encap_header_in, in, opcode,
502 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
503 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
504 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
505 memcpy(header, encap_header, size);
506
507 memset(out, 0, sizeof(out));
508 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
509
510 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
511 kfree(in);
512 return err;
513 }
514
515 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
516 {
517 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
518 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
519
520 memset(in, 0, sizeof(in));
521 MLX5_SET(dealloc_encap_header_in, in, opcode,
522 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
523 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
524
525 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
526 }
527
528 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
529 u8 namespace, u8 num_actions,
530 void *modify_actions, u32 *modify_header_id)
531 {
532 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
533 int max_actions, actions_size, inlen, err;
534 void *actions_in;
535 u8 table_type;
536 u32 *in;
537
538 switch (namespace) {
539 case MLX5_FLOW_NAMESPACE_FDB:
540 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
541 table_type = FS_FT_FDB;
542 break;
543 case MLX5_FLOW_NAMESPACE_KERNEL:
544 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
545 table_type = FS_FT_NIC_RX;
546 break;
547 default:
548 return -EOPNOTSUPP;
549 }
550
551 if (num_actions > max_actions) {
552 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
553 num_actions, max_actions);
554 return -EOPNOTSUPP;
555 }
556
557 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
558 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
559
560 in = kzalloc(inlen, GFP_KERNEL);
561 if (!in)
562 return -ENOMEM;
563
564 MLX5_SET(alloc_modify_header_context_in, in, opcode,
565 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
566 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
567 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
568
569 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
570 memcpy(actions_in, modify_actions, actions_size);
571
572 memset(out, 0, sizeof(out));
573 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
574
575 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
576 kfree(in);
577 return err;
578 }
579
580 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
581 {
582 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
583 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
584
585 memset(in, 0, sizeof(in));
586 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
587 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
588 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
589 modify_header_id);
590
591 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
592 }