]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/infiniband/hw/mlx5/flow.c
RDMA/mlx5: Delete create QP flags obfuscation
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / flow.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/uverbs_std_types.h>
11 #include <rdma/mlx5_user_ioctl_cmds.h>
12 #include <rdma/mlx5_user_ioctl_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
16 #include "mlx5_ib.h"
17
18 #define UVERBS_MODULE_NAME mlx5_ib
19 #include <rdma/uverbs_named_ioctl.h>
20
21 static int
22 mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
23 enum mlx5_flow_namespace_type *namespace)
24 {
25 switch (table_type) {
26 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
27 *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
28 break;
29 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
30 *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
31 break;
32 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
33 *namespace = MLX5_FLOW_NAMESPACE_FDB;
34 break;
35 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
36 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
37 break;
38 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
39 *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
40 break;
41 default:
42 return -EINVAL;
43 }
44
45 return 0;
46 }
47
48 static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
49 [MLX5_IB_FLOW_TYPE_NORMAL] = {
50 .type = UVERBS_ATTR_TYPE_PTR_IN,
51 .u.ptr = {
52 .len = sizeof(u16), /* data is priority */
53 .min_len = sizeof(u16),
54 }
55 },
56 [MLX5_IB_FLOW_TYPE_SNIFFER] = {
57 .type = UVERBS_ATTR_TYPE_PTR_IN,
58 UVERBS_ATTR_NO_DATA(),
59 },
60 [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
61 .type = UVERBS_ATTR_TYPE_PTR_IN,
62 UVERBS_ATTR_NO_DATA(),
63 },
64 [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
65 .type = UVERBS_ATTR_TYPE_PTR_IN,
66 UVERBS_ATTR_NO_DATA(),
67 },
68 };
69
70 #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
71 static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
72 struct uverbs_attr_bundle *attrs)
73 {
74 struct mlx5_flow_context flow_context = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
75 struct mlx5_ib_flow_handler *flow_handler;
76 struct mlx5_ib_flow_matcher *fs_matcher;
77 struct ib_uobject **arr_flow_actions;
78 struct ib_uflow_resources *uflow_res;
79 struct mlx5_flow_act flow_act = {};
80 void *devx_obj;
81 int dest_id, dest_type;
82 void *cmd_in;
83 int inlen;
84 bool dest_devx, dest_qp;
85 struct ib_qp *qp = NULL;
86 struct ib_uobject *uobj =
87 uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
88 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
89 int len, ret, i;
90 u32 counter_id = 0;
91 u32 *offset_attr;
92 u32 offset = 0;
93
94 if (!capable(CAP_NET_RAW))
95 return -EPERM;
96
97 dest_devx =
98 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
99 dest_qp = uverbs_attr_is_valid(attrs,
100 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
101
102 fs_matcher = uverbs_attr_get_obj(attrs,
103 MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
104 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
105 ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
106 return -EINVAL;
107
108 /* Allow only DEVX object as dest when inserting to FDB */
109 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx)
110 return -EINVAL;
111
112 /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
113 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
114 ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
115 return -EINVAL;
116
117 if (dest_devx) {
118 devx_obj = uverbs_attr_get_obj(
119 attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
120 if (IS_ERR(devx_obj))
121 return PTR_ERR(devx_obj);
122
123 /* Verify that the given DEVX object is a flow
124 * steering destination.
125 */
126 if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
127 return -EINVAL;
128 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
129 if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB ||
130 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
131 dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
132 return -EINVAL;
133 } else if (dest_qp) {
134 struct mlx5_ib_qp *mqp;
135
136 qp = uverbs_attr_get_obj(attrs,
137 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
138 if (IS_ERR(qp))
139 return PTR_ERR(qp);
140
141 if (qp->qp_type != IB_QPT_RAW_PACKET)
142 return -EINVAL;
143
144 mqp = to_mqp(qp);
145 if (mqp->is_rss)
146 dest_id = mqp->rss_qp.tirn;
147 else
148 dest_id = mqp->raw_packet_qp.rq.tirn;
149 dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
150 } else {
151 dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
152 }
153
154 len = uverbs_attr_get_uobjs_arr(attrs,
155 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
156 if (len) {
157 devx_obj = arr_flow_actions[0]->object;
158
159 if (uverbs_attr_is_valid(attrs,
160 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
161
162 int num_offsets = uverbs_attr_ptr_get_array_size(
163 attrs,
164 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
165 sizeof(u32));
166
167 if (num_offsets != 1)
168 return -EINVAL;
169
170 offset_attr = uverbs_attr_get_alloced_ptr(
171 attrs,
172 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
173 offset = *offset_attr;
174 }
175
176 if (!mlx5_ib_devx_is_flow_counter(devx_obj, offset,
177 &counter_id))
178 return -EINVAL;
179
180 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
181 }
182
183 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
184 fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
185 return -EINVAL;
186
187 cmd_in = uverbs_attr_get_alloced_ptr(
188 attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
189 inlen = uverbs_attr_get_len(attrs,
190 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
191
192 uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
193 if (!uflow_res)
194 return -ENOMEM;
195
196 len = uverbs_attr_get_uobjs_arr(attrs,
197 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
198 for (i = 0; i < len; i++) {
199 struct mlx5_ib_flow_action *maction =
200 to_mflow_act(arr_flow_actions[i]->object);
201
202 ret = parse_flow_flow_action(maction, false, &flow_act);
203 if (ret)
204 goto err_out;
205 flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
206 arr_flow_actions[i]->object);
207 }
208
209 ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
210 MLX5_IB_ATTR_CREATE_FLOW_TAG);
211 if (!ret) {
212 if (flow_context.flow_tag >= BIT(24)) {
213 ret = -EINVAL;
214 goto err_out;
215 }
216 flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
217 }
218
219 flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher,
220 &flow_context,
221 &flow_act,
222 counter_id,
223 cmd_in, inlen,
224 dest_id, dest_type);
225 if (IS_ERR(flow_handler)) {
226 ret = PTR_ERR(flow_handler);
227 goto err_out;
228 }
229
230 ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
231
232 return 0;
233 err_out:
234 ib_uverbs_flow_resources_free(uflow_res);
235 return ret;
236 }
237
238 static int flow_matcher_cleanup(struct ib_uobject *uobject,
239 enum rdma_remove_reason why,
240 struct uverbs_attr_bundle *attrs)
241 {
242 struct mlx5_ib_flow_matcher *obj = uobject->object;
243 int ret;
244
245 ret = ib_destroy_usecnt(&obj->usecnt, why, uobject);
246 if (ret)
247 return ret;
248
249 kfree(obj);
250 return 0;
251 }
252
253 static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
254 struct mlx5_ib_flow_matcher *obj)
255 {
256 enum mlx5_ib_uapi_flow_table_type ft_type =
257 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
258 u32 flags;
259 int err;
260
261 /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
262 * users should switch to it. We leave this to not break userspace
263 */
264 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
265 uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
266 return -EINVAL;
267
268 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
269 err = uverbs_get_const(&ft_type, attrs,
270 MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
271 if (err)
272 return err;
273
274 err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
275 if (err)
276 return err;
277
278 return 0;
279 }
280
281 if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
282 err = uverbs_get_flags32(&flags, attrs,
283 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
284 IB_FLOW_ATTR_FLAGS_EGRESS);
285 if (err)
286 return err;
287
288 if (flags) {
289 mlx5_ib_ft_type_to_namespace(
290 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
291 &obj->ns_type);
292 return 0;
293 }
294 }
295
296 obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
297
298 return 0;
299 }
300
301 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
302 struct uverbs_attr_bundle *attrs)
303 {
304 struct ib_uobject *uobj = uverbs_attr_get_uobject(
305 attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
306 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
307 struct mlx5_ib_flow_matcher *obj;
308 int err;
309
310 obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
311 if (!obj)
312 return -ENOMEM;
313
314 obj->mask_len = uverbs_attr_get_len(
315 attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
316 err = uverbs_copy_from(&obj->matcher_mask,
317 attrs,
318 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
319 if (err)
320 goto end;
321
322 obj->flow_type = uverbs_attr_get_enum_id(
323 attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
324
325 if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
326 err = uverbs_copy_from(&obj->priority,
327 attrs,
328 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
329 if (err)
330 goto end;
331 }
332
333 err = uverbs_copy_from(&obj->match_criteria_enable,
334 attrs,
335 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
336 if (err)
337 goto end;
338
339 err = mlx5_ib_matcher_ns(attrs, obj);
340 if (err)
341 goto end;
342
343 uobj->object = obj;
344 obj->mdev = dev->mdev;
345 atomic_set(&obj->usecnt, 0);
346 return 0;
347
348 end:
349 kfree(obj);
350 return err;
351 }
352
353 void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
354 {
355 switch (maction->flow_action_raw.sub_type) {
356 case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
357 mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
358 maction->flow_action_raw.modify_hdr);
359 break;
360 case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
361 mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
362 maction->flow_action_raw.pkt_reformat);
363 break;
364 case MLX5_IB_FLOW_ACTION_DECAP:
365 break;
366 default:
367 break;
368 }
369 }
370
371 static struct ib_flow_action *
372 mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
373 enum mlx5_ib_uapi_flow_table_type ft_type,
374 u8 num_actions, void *in)
375 {
376 enum mlx5_flow_namespace_type namespace;
377 struct mlx5_ib_flow_action *maction;
378 int ret;
379
380 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
381 if (ret)
382 return ERR_PTR(-EINVAL);
383
384 maction = kzalloc(sizeof(*maction), GFP_KERNEL);
385 if (!maction)
386 return ERR_PTR(-ENOMEM);
387
388 maction->flow_action_raw.modify_hdr =
389 mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
390
391 if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
392 ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
393 kfree(maction);
394 return ERR_PTR(ret);
395 }
396 maction->flow_action_raw.sub_type =
397 MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
398 maction->flow_action_raw.dev = dev;
399
400 return &maction->ib_action;
401 }
402
403 static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
404 {
405 return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
406 max_modify_header_actions) ||
407 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
408 }
409
410 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
411 struct uverbs_attr_bundle *attrs)
412 {
413 struct ib_uobject *uobj = uverbs_attr_get_uobject(
414 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
415 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
416 enum mlx5_ib_uapi_flow_table_type ft_type;
417 struct ib_flow_action *action;
418 int num_actions;
419 void *in;
420 int ret;
421
422 if (!mlx5_ib_modify_header_supported(mdev))
423 return -EOPNOTSUPP;
424
425 in = uverbs_attr_get_alloced_ptr(attrs,
426 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
427
428 num_actions = uverbs_attr_ptr_get_array_size(
429 attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
430 MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
431 if (num_actions < 0)
432 return num_actions;
433
434 ret = uverbs_get_const(&ft_type, attrs,
435 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
436 if (ret)
437 return ret;
438 action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
439 if (IS_ERR(action))
440 return PTR_ERR(action);
441
442 uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
443 IB_FLOW_ACTION_UNSPECIFIED);
444
445 return 0;
446 }
447
448 static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
449 u8 packet_reformat_type,
450 u8 ft_type)
451 {
452 switch (packet_reformat_type) {
453 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
454 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
455 return MLX5_CAP_FLOWTABLE(ibdev->mdev,
456 encap_general_header);
457 break;
458 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
459 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
460 return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
461 reformat_l2_to_l3_tunnel);
462 break;
463 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
464 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
465 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
466 reformat_l3_tunnel_to_l2);
467 break;
468 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
469 if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
470 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
471 break;
472 default:
473 break;
474 }
475
476 return false;
477 }
478
479 static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
480 {
481 switch (dv_prt) {
482 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
483 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
484 break;
485 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
486 *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
487 break;
488 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
489 *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
490 break;
491 default:
492 return -EINVAL;
493 }
494
495 return 0;
496 }
497
498 static int mlx5_ib_flow_action_create_packet_reformat_ctx(
499 struct mlx5_ib_dev *dev,
500 struct mlx5_ib_flow_action *maction,
501 u8 ft_type, u8 dv_prt,
502 void *in, size_t len)
503 {
504 enum mlx5_flow_namespace_type namespace;
505 u8 prm_prt;
506 int ret;
507
508 ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
509 if (ret)
510 return ret;
511
512 ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
513 if (ret)
514 return ret;
515
516 maction->flow_action_raw.pkt_reformat =
517 mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
518 in, namespace);
519 if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
520 ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
521 return ret;
522 }
523
524 maction->flow_action_raw.sub_type =
525 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
526 maction->flow_action_raw.dev = dev;
527
528 return 0;
529 }
530
531 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
532 struct uverbs_attr_bundle *attrs)
533 {
534 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
535 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
536 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
537 enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
538 enum mlx5_ib_uapi_flow_table_type ft_type;
539 struct mlx5_ib_flow_action *maction;
540 int ret;
541
542 ret = uverbs_get_const(&ft_type, attrs,
543 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
544 if (ret)
545 return ret;
546
547 ret = uverbs_get_const(&dv_prt, attrs,
548 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
549 if (ret)
550 return ret;
551
552 if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
553 return -EOPNOTSUPP;
554
555 maction = kzalloc(sizeof(*maction), GFP_KERNEL);
556 if (!maction)
557 return -ENOMEM;
558
559 if (dv_prt ==
560 MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
561 maction->flow_action_raw.sub_type =
562 MLX5_IB_FLOW_ACTION_DECAP;
563 maction->flow_action_raw.dev = mdev;
564 } else {
565 void *in;
566 int len;
567
568 in = uverbs_attr_get_alloced_ptr(attrs,
569 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
570 if (IS_ERR(in)) {
571 ret = PTR_ERR(in);
572 goto free_maction;
573 }
574
575 len = uverbs_attr_get_len(attrs,
576 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
577
578 ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
579 maction, ft_type, dv_prt, in, len);
580 if (ret)
581 goto free_maction;
582 }
583
584 uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
585 IB_FLOW_ACTION_UNSPECIFIED);
586 return 0;
587
588 free_maction:
589 kfree(maction);
590 return ret;
591 }
592
593 DECLARE_UVERBS_NAMED_METHOD(
594 MLX5_IB_METHOD_CREATE_FLOW,
595 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
596 UVERBS_OBJECT_FLOW,
597 UVERBS_ACCESS_NEW,
598 UA_MANDATORY),
599 UVERBS_ATTR_PTR_IN(
600 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
601 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
602 UA_MANDATORY,
603 UA_ALLOC_AND_COPY),
604 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
605 MLX5_IB_OBJECT_FLOW_MATCHER,
606 UVERBS_ACCESS_READ,
607 UA_MANDATORY),
608 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
609 UVERBS_OBJECT_QP,
610 UVERBS_ACCESS_READ),
611 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
612 MLX5_IB_OBJECT_DEVX_OBJ,
613 UVERBS_ACCESS_READ),
614 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
615 UVERBS_OBJECT_FLOW_ACTION,
616 UVERBS_ACCESS_READ, 1,
617 MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
618 UA_OPTIONAL),
619 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
620 UVERBS_ATTR_TYPE(u32),
621 UA_OPTIONAL),
622 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
623 MLX5_IB_OBJECT_DEVX_OBJ,
624 UVERBS_ACCESS_READ, 1, 1,
625 UA_OPTIONAL),
626 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
627 UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
628 UA_OPTIONAL,
629 UA_ALLOC_AND_COPY));
630
631 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
632 MLX5_IB_METHOD_DESTROY_FLOW,
633 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
634 UVERBS_OBJECT_FLOW,
635 UVERBS_ACCESS_DESTROY,
636 UA_MANDATORY));
637
638 ADD_UVERBS_METHODS(mlx5_ib_fs,
639 UVERBS_OBJECT_FLOW,
640 &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
641 &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
642
643 DECLARE_UVERBS_NAMED_METHOD(
644 MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
645 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
646 UVERBS_OBJECT_FLOW_ACTION,
647 UVERBS_ACCESS_NEW,
648 UA_MANDATORY),
649 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
650 UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
651 set_add_copy_action_in_auto)),
652 UA_MANDATORY,
653 UA_ALLOC_AND_COPY),
654 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
655 enum mlx5_ib_uapi_flow_table_type,
656 UA_MANDATORY));
657
658 DECLARE_UVERBS_NAMED_METHOD(
659 MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
660 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
661 UVERBS_OBJECT_FLOW_ACTION,
662 UVERBS_ACCESS_NEW,
663 UA_MANDATORY),
664 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
665 UVERBS_ATTR_MIN_SIZE(1),
666 UA_ALLOC_AND_COPY,
667 UA_OPTIONAL),
668 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
669 enum mlx5_ib_uapi_flow_action_packet_reformat_type,
670 UA_MANDATORY),
671 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
672 enum mlx5_ib_uapi_flow_table_type,
673 UA_MANDATORY));
674
675 ADD_UVERBS_METHODS(
676 mlx5_ib_flow_actions,
677 UVERBS_OBJECT_FLOW_ACTION,
678 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
679 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
680
681 DECLARE_UVERBS_NAMED_METHOD(
682 MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
683 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
684 MLX5_IB_OBJECT_FLOW_MATCHER,
685 UVERBS_ACCESS_NEW,
686 UA_MANDATORY),
687 UVERBS_ATTR_PTR_IN(
688 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
689 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
690 UA_MANDATORY),
691 UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
692 mlx5_ib_flow_type,
693 UA_MANDATORY),
694 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
695 UVERBS_ATTR_TYPE(u8),
696 UA_MANDATORY),
697 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
698 enum ib_flow_flags,
699 UA_OPTIONAL),
700 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
701 enum mlx5_ib_uapi_flow_table_type,
702 UA_OPTIONAL));
703
704 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
705 MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
706 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
707 MLX5_IB_OBJECT_FLOW_MATCHER,
708 UVERBS_ACCESS_DESTROY,
709 UA_MANDATORY));
710
711 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
712 UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
713 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
714 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
715
716 const struct uapi_definition mlx5_ib_flow_defs[] = {
717 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
718 MLX5_IB_OBJECT_FLOW_MATCHER),
719 UAPI_DEF_CHAIN_OBJ_TREE(
720 UVERBS_OBJECT_FLOW,
721 &mlx5_ib_fs),
722 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
723 &mlx5_ib_flow_actions),
724 {},
725 };