]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/mlx5/mlx5_flow.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / mlx5 / mlx5_flow.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
4 */
5
9f95a23c 6#include <netinet/in.h>
11fdf7f2
TL
7#include <sys/queue.h>
8#include <stdalign.h>
9#include <stdint.h>
10#include <string.h>
11
12/* Verbs header. */
13/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14#ifdef PEDANTIC
15#pragma GCC diagnostic ignored "-Wpedantic"
16#endif
17#include <infiniband/verbs.h>
18#ifdef PEDANTIC
19#pragma GCC diagnostic error "-Wpedantic"
20#endif
21
22#include <rte_common.h>
23#include <rte_ether.h>
11fdf7f2
TL
24#include <rte_ethdev_driver.h>
25#include <rte_flow.h>
26#include <rte_flow_driver.h>
27#include <rte_malloc.h>
28#include <rte_ip.h>
29
30#include "mlx5.h"
31#include "mlx5_defs.h"
9f95a23c 32#include "mlx5_flow.h"
11fdf7f2 33#include "mlx5_glue.h"
9f95a23c
TL
34#include "mlx5_prm.h"
35#include "mlx5_rxtx.h"
11fdf7f2
TL
36
37/* Dev ops structure defined in mlx5.c */
38extern const struct eth_dev_ops mlx5_dev_ops;
39extern const struct eth_dev_ops mlx5_dev_ops_isolate;
40
9f95a23c
TL
41/** Device flow drivers. */
42#ifdef HAVE_IBV_FLOW_DV_SUPPORT
43extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
44#endif
45extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops;
46extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
47
48const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
49
50const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
51 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
52#ifdef HAVE_IBV_FLOW_DV_SUPPORT
53 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
54#endif
55 [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops,
56 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
57 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
58};
11fdf7f2
TL
59
60enum mlx5_expansion {
61 MLX5_EXPANSION_ROOT,
62 MLX5_EXPANSION_ROOT_OUTER,
63 MLX5_EXPANSION_ROOT_ETH_VLAN,
64 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
65 MLX5_EXPANSION_OUTER_ETH,
66 MLX5_EXPANSION_OUTER_ETH_VLAN,
67 MLX5_EXPANSION_OUTER_VLAN,
68 MLX5_EXPANSION_OUTER_IPV4,
69 MLX5_EXPANSION_OUTER_IPV4_UDP,
70 MLX5_EXPANSION_OUTER_IPV4_TCP,
71 MLX5_EXPANSION_OUTER_IPV6,
72 MLX5_EXPANSION_OUTER_IPV6_UDP,
73 MLX5_EXPANSION_OUTER_IPV6_TCP,
74 MLX5_EXPANSION_VXLAN,
75 MLX5_EXPANSION_VXLAN_GPE,
76 MLX5_EXPANSION_GRE,
77 MLX5_EXPANSION_MPLS,
78 MLX5_EXPANSION_ETH,
79 MLX5_EXPANSION_ETH_VLAN,
80 MLX5_EXPANSION_VLAN,
81 MLX5_EXPANSION_IPV4,
82 MLX5_EXPANSION_IPV4_UDP,
83 MLX5_EXPANSION_IPV4_TCP,
84 MLX5_EXPANSION_IPV6,
85 MLX5_EXPANSION_IPV6_UDP,
86 MLX5_EXPANSION_IPV6_TCP,
87};
88
89/** Supported expansion of items. */
90static const struct rte_flow_expand_node mlx5_support_expansion[] = {
91 [MLX5_EXPANSION_ROOT] = {
92 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
93 MLX5_EXPANSION_IPV4,
94 MLX5_EXPANSION_IPV6),
95 .type = RTE_FLOW_ITEM_TYPE_END,
96 },
97 [MLX5_EXPANSION_ROOT_OUTER] = {
98 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
99 MLX5_EXPANSION_OUTER_IPV4,
100 MLX5_EXPANSION_OUTER_IPV6),
101 .type = RTE_FLOW_ITEM_TYPE_END,
102 },
103 [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
104 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
105 .type = RTE_FLOW_ITEM_TYPE_END,
106 },
107 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
108 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
109 .type = RTE_FLOW_ITEM_TYPE_END,
110 },
111 [MLX5_EXPANSION_OUTER_ETH] = {
112 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
113 MLX5_EXPANSION_OUTER_IPV6,
114 MLX5_EXPANSION_MPLS),
115 .type = RTE_FLOW_ITEM_TYPE_ETH,
116 .rss_types = 0,
117 },
118 [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
119 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
120 .type = RTE_FLOW_ITEM_TYPE_ETH,
121 .rss_types = 0,
122 },
123 [MLX5_EXPANSION_OUTER_VLAN] = {
124 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
125 MLX5_EXPANSION_OUTER_IPV6),
126 .type = RTE_FLOW_ITEM_TYPE_VLAN,
127 },
128 [MLX5_EXPANSION_OUTER_IPV4] = {
129 .next = RTE_FLOW_EXPAND_RSS_NEXT
130 (MLX5_EXPANSION_OUTER_IPV4_UDP,
131 MLX5_EXPANSION_OUTER_IPV4_TCP,
132 MLX5_EXPANSION_GRE),
133 .type = RTE_FLOW_ITEM_TYPE_IPV4,
134 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
135 ETH_RSS_NONFRAG_IPV4_OTHER,
136 },
137 [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
138 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
139 MLX5_EXPANSION_VXLAN_GPE),
140 .type = RTE_FLOW_ITEM_TYPE_UDP,
141 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
142 },
143 [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
144 .type = RTE_FLOW_ITEM_TYPE_TCP,
145 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
146 },
147 [MLX5_EXPANSION_OUTER_IPV6] = {
148 .next = RTE_FLOW_EXPAND_RSS_NEXT
149 (MLX5_EXPANSION_OUTER_IPV6_UDP,
150 MLX5_EXPANSION_OUTER_IPV6_TCP),
151 .type = RTE_FLOW_ITEM_TYPE_IPV6,
152 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
153 ETH_RSS_NONFRAG_IPV6_OTHER,
154 },
155 [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
156 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
157 MLX5_EXPANSION_VXLAN_GPE),
158 .type = RTE_FLOW_ITEM_TYPE_UDP,
159 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
160 },
161 [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
162 .type = RTE_FLOW_ITEM_TYPE_TCP,
163 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
164 },
165 [MLX5_EXPANSION_VXLAN] = {
166 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
167 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
168 },
169 [MLX5_EXPANSION_VXLAN_GPE] = {
170 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
171 MLX5_EXPANSION_IPV4,
172 MLX5_EXPANSION_IPV6),
173 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
174 },
175 [MLX5_EXPANSION_GRE] = {
176 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
177 .type = RTE_FLOW_ITEM_TYPE_GRE,
178 },
179 [MLX5_EXPANSION_MPLS] = {
180 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
181 MLX5_EXPANSION_IPV6),
182 .type = RTE_FLOW_ITEM_TYPE_MPLS,
183 },
184 [MLX5_EXPANSION_ETH] = {
185 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
186 MLX5_EXPANSION_IPV6),
187 .type = RTE_FLOW_ITEM_TYPE_ETH,
188 },
189 [MLX5_EXPANSION_ETH_VLAN] = {
190 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
191 .type = RTE_FLOW_ITEM_TYPE_ETH,
192 },
193 [MLX5_EXPANSION_VLAN] = {
194 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
195 MLX5_EXPANSION_IPV6),
196 .type = RTE_FLOW_ITEM_TYPE_VLAN,
197 },
198 [MLX5_EXPANSION_IPV4] = {
199 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
200 MLX5_EXPANSION_IPV4_TCP),
201 .type = RTE_FLOW_ITEM_TYPE_IPV4,
202 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
203 ETH_RSS_NONFRAG_IPV4_OTHER,
204 },
205 [MLX5_EXPANSION_IPV4_UDP] = {
206 .type = RTE_FLOW_ITEM_TYPE_UDP,
207 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
208 },
209 [MLX5_EXPANSION_IPV4_TCP] = {
210 .type = RTE_FLOW_ITEM_TYPE_TCP,
211 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
212 },
213 [MLX5_EXPANSION_IPV6] = {
214 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
215 MLX5_EXPANSION_IPV6_TCP),
216 .type = RTE_FLOW_ITEM_TYPE_IPV6,
217 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
218 ETH_RSS_NONFRAG_IPV6_OTHER,
219 },
220 [MLX5_EXPANSION_IPV6_UDP] = {
221 .type = RTE_FLOW_ITEM_TYPE_UDP,
222 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
223 },
224 [MLX5_EXPANSION_IPV6_TCP] = {
225 .type = RTE_FLOW_ITEM_TYPE_TCP,
226 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
227 },
228};
229
11fdf7f2
TL
230static const struct rte_flow_ops mlx5_flow_ops = {
231 .validate = mlx5_flow_validate,
232 .create = mlx5_flow_create,
233 .destroy = mlx5_flow_destroy,
234 .flush = mlx5_flow_flush,
235 .isolate = mlx5_flow_isolate,
236 .query = mlx5_flow_query,
237};
238
239/* Convert FDIR request to Generic flow. */
240struct mlx5_fdir {
241 struct rte_flow_attr attr;
11fdf7f2
TL
242 struct rte_flow_item items[4];
243 struct rte_flow_item_eth l2;
244 struct rte_flow_item_eth l2_mask;
245 union {
246 struct rte_flow_item_ipv4 ipv4;
247 struct rte_flow_item_ipv6 ipv6;
248 } l3;
249 union {
250 struct rte_flow_item_ipv4 ipv4;
251 struct rte_flow_item_ipv6 ipv6;
252 } l3_mask;
253 union {
254 struct rte_flow_item_udp udp;
255 struct rte_flow_item_tcp tcp;
256 } l4;
257 union {
258 struct rte_flow_item_udp udp;
259 struct rte_flow_item_tcp tcp;
260 } l4_mask;
9f95a23c 261 struct rte_flow_action actions[2];
11fdf7f2
TL
262 struct rte_flow_action_queue queue;
263};
264
11fdf7f2
TL
265/* Map of Verbs to Flow priority with 8 Verbs priorities. */
266static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
267 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
268};
269
270/* Map of Verbs to Flow priority with 16 Verbs priorities. */
271static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
272 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
273 { 9, 10, 11 }, { 12, 13, 14 },
274};
275
276/* Tunnel information. */
277struct mlx5_flow_tunnel_info {
9f95a23c 278 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
11fdf7f2
TL
279 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
280};
281
282static struct mlx5_flow_tunnel_info tunnels_info[] = {
283 {
284 .tunnel = MLX5_FLOW_LAYER_VXLAN,
285 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
286 },
287 {
288 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
289 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
290 },
291 {
292 .tunnel = MLX5_FLOW_LAYER_GRE,
293 .ptype = RTE_PTYPE_TUNNEL_GRE,
294 },
295 {
296 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
9f95a23c 297 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
11fdf7f2
TL
298 },
299 {
300 .tunnel = MLX5_FLOW_LAYER_MPLS,
301 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
302 },
303};
304
305/**
306 * Discover the maximum number of priority available.
307 *
308 * @param[in] dev
9f95a23c 309 * Pointer to the Ethernet device structure.
11fdf7f2
TL
310 *
311 * @return
312 * number of supported flow priority on success, a negative errno
313 * value otherwise and rte_errno is set.
314 */
315int
316mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
317{
9f95a23c 318 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
319 struct {
320 struct ibv_flow_attr attr;
321 struct ibv_flow_spec_eth eth;
322 struct ibv_flow_spec_action_drop drop;
323 } flow_attr = {
324 .attr = {
325 .num_of_specs = 2,
9f95a23c 326 .port = (uint8_t)priv->ibv_port,
11fdf7f2
TL
327 },
328 .eth = {
329 .type = IBV_FLOW_SPEC_ETH,
330 .size = sizeof(struct ibv_flow_spec_eth),
331 },
332 .drop = {
333 .size = sizeof(struct ibv_flow_spec_action_drop),
334 .type = IBV_FLOW_SPEC_ACTION_DROP,
335 },
336 };
337 struct ibv_flow *flow;
338 struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
339 uint16_t vprio[] = { 8, 16 };
340 int i;
341 int priority = 0;
342
343 if (!drop) {
344 rte_errno = ENOTSUP;
345 return -rte_errno;
346 }
347 for (i = 0; i != RTE_DIM(vprio); i++) {
348 flow_attr.attr.priority = vprio[i] - 1;
349 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
350 if (!flow)
351 break;
352 claim_zero(mlx5_glue->destroy_flow(flow));
353 priority = vprio[i];
354 }
9f95a23c 355 mlx5_hrxq_drop_release(dev);
11fdf7f2
TL
356 switch (priority) {
357 case 8:
358 priority = RTE_DIM(priority_map_3);
359 break;
360 case 16:
361 priority = RTE_DIM(priority_map_5);
362 break;
363 default:
364 rte_errno = ENOTSUP;
365 DRV_LOG(ERR,
366 "port %u verbs maximum priority: %d expected 8/16",
9f95a23c 367 dev->data->port_id, priority);
11fdf7f2
TL
368 return -rte_errno;
369 }
11fdf7f2
TL
370 DRV_LOG(INFO, "port %u flow maximum priority: %d",
371 dev->data->port_id, priority);
372 return priority;
373}
374
375/**
9f95a23c 376 * Adjust flow priority based on the highest layer and the request priority.
11fdf7f2 377 *
9f95a23c
TL
378 * @param[in] dev
379 * Pointer to the Ethernet device structure.
380 * @param[in] priority
381 * The rule base priority.
382 * @param[in] subpriority
383 * The priority based on the items.
384 *
385 * @return
386 * The new priority.
11fdf7f2 387 */
9f95a23c
TL
388uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
389 uint32_t subpriority)
11fdf7f2 390{
9f95a23c
TL
391 uint32_t res = 0;
392 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
393
394 switch (priv->config.flow_prio) {
395 case RTE_DIM(priority_map_3):
9f95a23c 396 res = priority_map_3[priority][subpriority];
11fdf7f2
TL
397 break;
398 case RTE_DIM(priority_map_5):
9f95a23c 399 res = priority_map_5[priority][subpriority];
11fdf7f2
TL
400 break;
401 }
9f95a23c 402 return res;
11fdf7f2
TL
403}
404
405/**
406 * Verify the @p item specifications (spec, last, mask) are compatible with the
407 * NIC capabilities.
408 *
409 * @param[in] item
410 * Item specification.
411 * @param[in] mask
412 * @p item->mask or flow default bit-masks.
413 * @param[in] nic_mask
414 * Bit-masks covering supported fields by the NIC to compare with user mask.
415 * @param[in] size
416 * Bit-masks size in bytes.
417 * @param[out] error
418 * Pointer to error structure.
419 *
420 * @return
421 * 0 on success, a negative errno value otherwise and rte_errno is set.
422 */
9f95a23c 423int
11fdf7f2
TL
424mlx5_flow_item_acceptable(const struct rte_flow_item *item,
425 const uint8_t *mask,
426 const uint8_t *nic_mask,
427 unsigned int size,
428 struct rte_flow_error *error)
429{
430 unsigned int i;
431
432 assert(nic_mask);
433 for (i = 0; i < size; ++i)
434 if ((nic_mask[i] | mask[i]) != nic_mask[i])
435 return rte_flow_error_set(error, ENOTSUP,
436 RTE_FLOW_ERROR_TYPE_ITEM,
437 item,
438 "mask enables non supported"
439 " bits");
440 if (!item->spec && (item->mask || item->last))
441 return rte_flow_error_set(error, EINVAL,
9f95a23c 442 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
443 "mask/last without a spec is not"
444 " supported");
445 if (item->spec && item->last) {
446 uint8_t spec[size];
447 uint8_t last[size];
448 unsigned int i;
449 int ret;
450
451 for (i = 0; i < size; ++i) {
452 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
453 last[i] = ((const uint8_t *)item->last)[i] & mask[i];
454 }
455 ret = memcmp(spec, last, size);
456 if (ret != 0)
9f95a23c 457 return rte_flow_error_set(error, EINVAL,
11fdf7f2
TL
458 RTE_FLOW_ERROR_TYPE_ITEM,
459 item,
9f95a23c 460 "range is not valid");
11fdf7f2
TL
461 }
462 return 0;
463}
464
465/**
9f95a23c 466 * Adjust the hash fields according to the @p flow information.
11fdf7f2 467 *
9f95a23c
TL
468 * @param[in] dev_flow.
469 * Pointer to the mlx5_flow.
11fdf7f2
TL
470 * @param[in] tunnel
471 * 1 when the hash field is for a tunnel item.
472 * @param[in] layer_types
473 * ETH_RSS_* types.
474 * @param[in] hash_fields
475 * Item hash fields.
9f95a23c
TL
476 *
477 * @return
478 * The hash fileds that should be used.
11fdf7f2 479 */
9f95a23c
TL
480uint64_t
481mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
482 int tunnel __rte_unused, uint64_t layer_types,
483 uint64_t hash_fields)
11fdf7f2 484{
9f95a23c 485 struct rte_flow *flow = dev_flow->flow;
11fdf7f2 486#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
9f95a23c
TL
487 int rss_request_inner = flow->rss.level >= 2;
488
489 /* Check RSS hash level for tunnel. */
490 if (tunnel && rss_request_inner)
491 hash_fields |= IBV_RX_HASH_INNER;
492 else if (tunnel || rss_request_inner)
493 return 0;
11fdf7f2 494#endif
9f95a23c 495 /* Check if requested layer matches RSS hash fields. */
11fdf7f2 496 if (!(flow->rss.types & layer_types))
9f95a23c
TL
497 return 0;
498 return hash_fields;
11fdf7f2
TL
499}
500
501/**
9f95a23c
TL
502 * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
503 * if several tunnel rules are used on this queue, the tunnel ptype will be
504 * cleared.
11fdf7f2 505 *
9f95a23c
TL
506 * @param rxq_ctrl
507 * Rx queue to update.
11fdf7f2 508 */
9f95a23c
TL
509static void
510flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
11fdf7f2 511{
9f95a23c
TL
512 unsigned int i;
513 uint32_t tunnel_ptype = 0;
11fdf7f2 514
9f95a23c
TL
515 /* Look up for the ptype to use. */
516 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
517 if (!rxq_ctrl->flow_tunnels_n[i])
518 continue;
519 if (!tunnel_ptype) {
520 tunnel_ptype = tunnels_info[i].ptype;
521 } else {
522 tunnel_ptype = 0;
523 break;
11fdf7f2 524 }
11fdf7f2 525 }
9f95a23c 526 rxq_ctrl->rxq.tunnel = tunnel_ptype;
11fdf7f2
TL
527}
528
529/**
9f95a23c
TL
530 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
531 * flow.
11fdf7f2 532 *
9f95a23c
TL
533 * @param[in] dev
534 * Pointer to the Ethernet device structure.
535 * @param[in] dev_flow
536 * Pointer to device flow structure.
11fdf7f2
TL
537 */
538static void
9f95a23c 539flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
11fdf7f2 540{
9f95a23c
TL
541 struct mlx5_priv *priv = dev->data->dev_private;
542 struct rte_flow *flow = dev_flow->flow;
543 const int mark = !!(flow->actions &
544 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
545 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
11fdf7f2 546 unsigned int i;
9f95a23c
TL
547
548 for (i = 0; i != flow->rss.queue_num; ++i) {
549 int idx = (*flow->queue)[i];
550 struct mlx5_rxq_ctrl *rxq_ctrl =
551 container_of((*priv->rxqs)[idx],
552 struct mlx5_rxq_ctrl, rxq);
553
554 if (mark) {
555 rxq_ctrl->rxq.mark = 1;
556 rxq_ctrl->flow_mark_n++;
557 }
558 if (tunnel) {
559 unsigned int j;
560
561 /* Increase the counter matching the flow. */
562 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
563 if ((tunnels_info[j].tunnel &
564 dev_flow->layers) ==
565 tunnels_info[j].tunnel) {
566 rxq_ctrl->flow_tunnels_n[j]++;
567 break;
568 }
569 }
570 flow_rxq_tunnel_ptype_update(rxq_ctrl);
11fdf7f2 571 }
11fdf7f2
TL
572 }
573}
574
575/**
9f95a23c 576 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
11fdf7f2 577 *
9f95a23c
TL
578 * @param[in] dev
579 * Pointer to the Ethernet device structure.
580 * @param[in] flow
11fdf7f2 581 * Pointer to flow structure.
11fdf7f2 582 */
9f95a23c
TL
583static void
584flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
11fdf7f2 585{
9f95a23c 586 struct mlx5_flow *dev_flow;
11fdf7f2 587
9f95a23c
TL
588 LIST_FOREACH(dev_flow, &flow->dev_flows, next)
589 flow_drv_rxq_flags_set(dev, dev_flow);
11fdf7f2
TL
590}
591
592/**
9f95a23c
TL
593 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
594 * device flow if no other flow uses it with the same kind of request.
11fdf7f2 595 *
9f95a23c
TL
596 * @param dev
597 * Pointer to Ethernet device.
598 * @param[in] dev_flow
599 * Pointer to the device flow.
11fdf7f2 600 */
9f95a23c
TL
601static void
602flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
11fdf7f2 603{
9f95a23c
TL
604 struct mlx5_priv *priv = dev->data->dev_private;
605 struct rte_flow *flow = dev_flow->flow;
606 const int mark = !!(flow->actions &
607 (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
608 const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
609 unsigned int i;
610
611 assert(dev->data->dev_started);
612 for (i = 0; i != flow->rss.queue_num; ++i) {
613 int idx = (*flow->queue)[i];
614 struct mlx5_rxq_ctrl *rxq_ctrl =
615 container_of((*priv->rxqs)[idx],
616 struct mlx5_rxq_ctrl, rxq);
617
618 if (mark) {
619 rxq_ctrl->flow_mark_n--;
620 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
621 }
622 if (tunnel) {
623 unsigned int j;
624
625 /* Decrease the counter matching the flow. */
626 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
627 if ((tunnels_info[j].tunnel &
628 dev_flow->layers) ==
629 tunnels_info[j].tunnel) {
630 rxq_ctrl->flow_tunnels_n[j]--;
631 break;
632 }
633 }
634 flow_rxq_tunnel_ptype_update(rxq_ctrl);
635 }
636 }
637}
638
639/**
640 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
641 * @p flow if no other flow uses it with the same kind of request.
642 *
643 * @param dev
644 * Pointer to Ethernet device.
645 * @param[in] flow
646 * Pointer to the flow.
647 */
648static void
649flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
650{
651 struct mlx5_flow *dev_flow;
652
653 LIST_FOREACH(dev_flow, &flow->dev_flows, next)
654 flow_drv_rxq_flags_trim(dev, dev_flow);
655}
656
657/**
658 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
659 *
660 * @param dev
661 * Pointer to Ethernet device.
662 */
663static void
664flow_rxq_flags_clear(struct rte_eth_dev *dev)
665{
666 struct mlx5_priv *priv = dev->data->dev_private;
667 unsigned int i;
668
669 for (i = 0; i != priv->rxqs_n; ++i) {
670 struct mlx5_rxq_ctrl *rxq_ctrl;
671 unsigned int j;
672
673 if (!(*priv->rxqs)[i])
674 continue;
675 rxq_ctrl = container_of((*priv->rxqs)[i],
676 struct mlx5_rxq_ctrl, rxq);
677 rxq_ctrl->flow_mark_n = 0;
678 rxq_ctrl->rxq.mark = 0;
679 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
680 rxq_ctrl->flow_tunnels_n[j] = 0;
681 rxq_ctrl->rxq.tunnel = 0;
682 }
683}
684
685/*
686 * Validate the flag action.
687 *
688 * @param[in] action_flags
689 * Bit-fields that holds the actions detected until now.
690 * @param[in] attr
691 * Attributes of flow that includes this action.
692 * @param[out] error
693 * Pointer to error structure.
694 *
695 * @return
696 * 0 on success, a negative errno value otherwise and rte_errno is set.
697 */
698int
699mlx5_flow_validate_action_flag(uint64_t action_flags,
700 const struct rte_flow_attr *attr,
701 struct rte_flow_error *error)
702{
703
704 if (action_flags & MLX5_FLOW_ACTION_DROP)
705 return rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
707 "can't drop and flag in same flow");
708 if (action_flags & MLX5_FLOW_ACTION_MARK)
709 return rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
711 "can't mark and flag in same flow");
712 if (action_flags & MLX5_FLOW_ACTION_FLAG)
713 return rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
715 "can't have 2 flag"
716 " actions in same flow");
717 if (attr->egress)
718 return rte_flow_error_set(error, ENOTSUP,
719 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
720 "flag action not supported for "
721 "egress");
722 return 0;
723}
724
725/*
726 * Validate the mark action.
727 *
728 * @param[in] action
729 * Pointer to the queue action.
730 * @param[in] action_flags
731 * Bit-fields that holds the actions detected until now.
732 * @param[in] attr
733 * Attributes of flow that includes this action.
734 * @param[out] error
735 * Pointer to error structure.
736 *
737 * @return
738 * 0 on success, a negative errno value otherwise and rte_errno is set.
739 */
740int
741mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
742 uint64_t action_flags,
743 const struct rte_flow_attr *attr,
744 struct rte_flow_error *error)
745{
746 const struct rte_flow_action_mark *mark = action->conf;
747
748 if (!mark)
749 return rte_flow_error_set(error, EINVAL,
750 RTE_FLOW_ERROR_TYPE_ACTION,
751 action,
752 "configuration cannot be null");
753 if (mark->id >= MLX5_FLOW_MARK_MAX)
754 return rte_flow_error_set(error, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
756 &mark->id,
757 "mark id must in 0 <= id < "
758 RTE_STR(MLX5_FLOW_MARK_MAX));
759 if (action_flags & MLX5_FLOW_ACTION_DROP)
760 return rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
762 "can't drop and mark in same flow");
763 if (action_flags & MLX5_FLOW_ACTION_FLAG)
764 return rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
766 "can't flag and mark in same flow");
767 if (action_flags & MLX5_FLOW_ACTION_MARK)
768 return rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
770 "can't have 2 mark actions in same"
771 " flow");
772 if (attr->egress)
773 return rte_flow_error_set(error, ENOTSUP,
774 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
775 "mark action not supported for "
776 "egress");
777 return 0;
778}
779
780/*
781 * Validate the drop action.
782 *
783 * @param[in] action_flags
784 * Bit-fields that holds the actions detected until now.
785 * @param[in] attr
786 * Attributes of flow that includes this action.
787 * @param[out] error
788 * Pointer to error structure.
789 *
790 * @return
791 * 0 on success, a negative errno value otherwise and rte_errno is set.
792 */
793int
794mlx5_flow_validate_action_drop(uint64_t action_flags,
795 const struct rte_flow_attr *attr,
796 struct rte_flow_error *error)
797{
798 if (action_flags & MLX5_FLOW_ACTION_FLAG)
799 return rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
801 "can't drop and flag in same flow");
802 if (action_flags & MLX5_FLOW_ACTION_MARK)
803 return rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
805 "can't drop and mark in same flow");
806 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
807 return rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
809 "can't have 2 fate actions in"
810 " same flow");
811 if (attr->egress)
812 return rte_flow_error_set(error, ENOTSUP,
813 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
814 "drop action not supported for "
815 "egress");
816 return 0;
817}
818
819/*
820 * Validate the queue action.
821 *
822 * @param[in] action
823 * Pointer to the queue action.
824 * @param[in] action_flags
825 * Bit-fields that holds the actions detected until now.
826 * @param[in] dev
827 * Pointer to the Ethernet device structure.
828 * @param[in] attr
829 * Attributes of flow that includes this action.
830 * @param[out] error
831 * Pointer to error structure.
832 *
833 * @return
834 * 0 on success, a negative errno value otherwise and rte_errno is set.
835 */
836int
837mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
838 uint64_t action_flags,
839 struct rte_eth_dev *dev,
840 const struct rte_flow_attr *attr,
841 struct rte_flow_error *error)
842{
843 struct mlx5_priv *priv = dev->data->dev_private;
844 const struct rte_flow_action_queue *queue = action->conf;
845
846 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
847 return rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
849 "can't have 2 fate actions in"
850 " same flow");
851 if (!priv->rxqs_n)
852 return rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
854 NULL, "No Rx queues configured");
855 if (queue->index >= priv->rxqs_n)
856 return rte_flow_error_set(error, EINVAL,
857 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
858 &queue->index,
859 "queue index out of range");
860 if (!(*priv->rxqs)[queue->index])
861 return rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
863 &queue->index,
864 "queue is not configured");
865 if (attr->egress)
866 return rte_flow_error_set(error, ENOTSUP,
867 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
868 "queue action not supported for "
869 "egress");
870 return 0;
871}
872
873/*
874 * Validate the rss action.
875 *
876 * @param[in] action
877 * Pointer to the queue action.
878 * @param[in] action_flags
879 * Bit-fields that holds the actions detected until now.
880 * @param[in] dev
881 * Pointer to the Ethernet device structure.
882 * @param[in] attr
883 * Attributes of flow that includes this action.
884 * @param[in] item_flags
885 * Items that were detected.
886 * @param[out] error
887 * Pointer to error structure.
888 *
889 * @return
890 * 0 on success, a negative errno value otherwise and rte_errno is set.
891 */
892int
893mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
894 uint64_t action_flags,
895 struct rte_eth_dev *dev,
896 const struct rte_flow_attr *attr,
897 uint64_t item_flags,
898 struct rte_flow_error *error)
899{
900 struct mlx5_priv *priv = dev->data->dev_private;
901 const struct rte_flow_action_rss *rss = action->conf;
902 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
903 unsigned int i;
904
905 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
906 return rte_flow_error_set(error, EINVAL,
907 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
908 "can't have 2 fate actions"
909 " in same flow");
910 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
911 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
912 return rte_flow_error_set(error, ENOTSUP,
913 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
914 &rss->func,
915 "RSS hash function not supported");
916#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
917 if (rss->level > 2)
918#else
919 if (rss->level > 1)
920#endif
921 return rte_flow_error_set(error, ENOTSUP,
922 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
923 &rss->level,
924 "tunnel RSS is not supported");
925 /* allow RSS key_len 0 in case of NULL (default) RSS key. */
926 if (rss->key_len == 0 && rss->key != NULL)
927 return rte_flow_error_set(error, ENOTSUP,
928 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
929 &rss->key_len,
930 "RSS hash key length 0");
931 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
932 return rte_flow_error_set(error, ENOTSUP,
933 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
934 &rss->key_len,
935 "RSS hash key too small");
936 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
937 return rte_flow_error_set(error, ENOTSUP,
938 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
939 &rss->key_len,
940 "RSS hash key too large");
941 if (rss->queue_num > priv->config.ind_table_max_size)
942 return rte_flow_error_set(error, ENOTSUP,
943 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
944 &rss->queue_num,
945 "number of queues too large");
946 if (rss->types & MLX5_RSS_HF_MASK)
947 return rte_flow_error_set(error, ENOTSUP,
948 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
949 &rss->types,
950 "some RSS protocols are not"
951 " supported");
952 if (!priv->rxqs_n)
953 return rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
955 NULL, "No Rx queues configured");
956 if (!rss->queue_num)
957 return rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
959 NULL, "No queues configured");
960 for (i = 0; i != rss->queue_num; ++i) {
961 if (!(*priv->rxqs)[rss->queue[i]])
962 return rte_flow_error_set
963 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
964 &rss->queue[i], "queue is not configured");
965 }
966 if (attr->egress)
967 return rte_flow_error_set(error, ENOTSUP,
968 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
969 "rss action not supported for "
970 "egress");
971 if (rss->level > 1 && !tunnel)
972 return rte_flow_error_set(error, EINVAL,
973 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
974 "inner RSS is not supported for "
975 "non-tunnel flows");
976 return 0;
977}
978
979/*
980 * Validate the count action.
981 *
982 * @param[in] dev
983 * Pointer to the Ethernet device structure.
984 * @param[in] attr
985 * Attributes of flow that includes this action.
986 * @param[out] error
987 * Pointer to error structure.
988 *
989 * @return
990 * 0 on success, a negative errno value otherwise and rte_errno is set.
991 */
992int
993mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
994 const struct rte_flow_attr *attr,
995 struct rte_flow_error *error)
996{
997 if (attr->egress)
998 return rte_flow_error_set(error, ENOTSUP,
999 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1000 "count action not supported for "
1001 "egress");
1002 return 0;
1003}
1004
1005/**
1006 * Verify the @p attributes will be correctly understood by the NIC and store
1007 * them in the @p flow if everything is correct.
1008 *
1009 * @param[in] dev
1010 * Pointer to the Ethernet device structure.
1011 * @param[in] attributes
1012 * Pointer to flow attributes
1013 * @param[out] error
1014 * Pointer to error structure.
1015 *
1016 * @return
1017 * 0 on success, a negative errno value otherwise and rte_errno is set.
1018 */
1019int
1020mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1021 const struct rte_flow_attr *attributes,
1022 struct rte_flow_error *error)
1023{
1024 struct mlx5_priv *priv = dev->data->dev_private;
1025 uint32_t priority_max = priv->config.flow_prio - 1;
1026
1027 if (attributes->group)
1028 return rte_flow_error_set(error, ENOTSUP,
1029 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1030 NULL, "groups is not supported");
1031 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1032 attributes->priority >= priority_max)
1033 return rte_flow_error_set(error, ENOTSUP,
1034 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1035 NULL, "priority out of range");
1036 if (attributes->egress)
1037 return rte_flow_error_set(error, ENOTSUP,
1038 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1039 "egress is not supported");
1040 if (attributes->transfer)
1041 return rte_flow_error_set(error, ENOTSUP,
1042 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1043 NULL, "transfer is not supported");
1044 if (!attributes->ingress)
1045 return rte_flow_error_set(error, EINVAL,
1046 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1047 NULL,
1048 "ingress attribute is mandatory");
1049 return 0;
1050}
1051
1052/**
1053 * Validate Ethernet item.
1054 *
1055 * @param[in] item
1056 * Item specification.
1057 * @param[in] item_flags
1058 * Bit-fields that holds the items detected until now.
1059 * @param[out] error
1060 * Pointer to error structure.
1061 *
1062 * @return
1063 * 0 on success, a negative errno value otherwise and rte_errno is set.
1064 */
1065int
1066mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1067 uint64_t item_flags,
1068 struct rte_flow_error *error)
1069{
1070 const struct rte_flow_item_eth *mask = item->mask;
1071 const struct rte_flow_item_eth nic_mask = {
1072 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1073 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1074 .type = RTE_BE16(0xffff),
1075 };
1076 int ret;
1077 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1078 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1079 MLX5_FLOW_LAYER_OUTER_L2;
1080
1081 if (item_flags & ethm)
1082 return rte_flow_error_set(error, ENOTSUP,
1083 RTE_FLOW_ERROR_TYPE_ITEM, item,
1084 "multiple L2 layers not supported");
1085 if (!mask)
1086 mask = &rte_flow_item_eth_mask;
1087 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1088 (const uint8_t *)&nic_mask,
1089 sizeof(struct rte_flow_item_eth),
1090 error);
1091 return ret;
1092}
1093
1094/**
1095 * Validate VLAN item.
1096 *
1097 * @param[in] item
1098 * Item specification.
1099 * @param[in] item_flags
1100 * Bit-fields that holds the items detected until now.
1101 * @param[out] error
1102 * Pointer to error structure.
1103 *
1104 * @return
1105 * 0 on success, a negative errno value otherwise and rte_errno is set.
1106 */
1107int
1108mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1109 uint64_t item_flags,
1110 struct rte_flow_error *error)
1111{
1112 const struct rte_flow_item_vlan *spec = item->spec;
1113 const struct rte_flow_item_vlan *mask = item->mask;
1114 const struct rte_flow_item_vlan nic_mask = {
1115 .tci = RTE_BE16(0x0fff),
1116 .inner_type = RTE_BE16(0xffff),
1117 };
1118 uint16_t vlan_tag = 0;
1119 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1120 int ret;
1121 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1122 MLX5_FLOW_LAYER_INNER_L4) :
1123 (MLX5_FLOW_LAYER_OUTER_L3 |
1124 MLX5_FLOW_LAYER_OUTER_L4);
1125 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1126 MLX5_FLOW_LAYER_OUTER_VLAN;
1127
1128 if (item_flags & vlanm)
1129 return rte_flow_error_set(error, EINVAL,
1130 RTE_FLOW_ERROR_TYPE_ITEM, item,
1131 "multiple VLAN layers not supported");
1132 else if ((item_flags & l34m) != 0)
1133 return rte_flow_error_set(error, EINVAL,
1134 RTE_FLOW_ERROR_TYPE_ITEM, item,
1135 "L2 layer cannot follow L3/L4 layer");
1136 if (!mask)
1137 mask = &rte_flow_item_vlan_mask;
1138 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1139 (const uint8_t *)&nic_mask,
1140 sizeof(struct rte_flow_item_vlan),
1141 error);
1142 if (ret)
1143 return ret;
1144 if (spec) {
1145 vlan_tag = spec->tci;
1146 vlan_tag &= mask->tci;
1147 }
1148 /*
1149 * From verbs perspective an empty VLAN is equivalent
1150 * to a packet without VLAN layer.
1151 */
1152 if (!vlan_tag)
1153 return rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1155 item->spec,
1156 "VLAN cannot be empty");
1157 return 0;
1158}
1159
1160/**
1161 * Validate IPV4 item.
1162 *
1163 * @param[in] item
1164 * Item specification.
1165 * @param[in] item_flags
1166 * Bit-fields that holds the items detected until now.
1167 * @param[in] acc_mask
1168 * Acceptable mask, if NULL default internal default mask
1169 * will be used to check whether item fields are supported.
1170 * @param[out] error
1171 * Pointer to error structure.
1172 *
1173 * @return
1174 * 0 on success, a negative errno value otherwise and rte_errno is set.
1175 */
1176int
1177mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1178 uint64_t item_flags,
1179 const struct rte_flow_item_ipv4 *acc_mask,
1180 struct rte_flow_error *error)
1181{
1182 const struct rte_flow_item_ipv4 *mask = item->mask;
1183 const struct rte_flow_item_ipv4 nic_mask = {
11fdf7f2
TL
1184 .hdr = {
1185 .src_addr = RTE_BE32(0xffffffff),
1186 .dst_addr = RTE_BE32(0xffffffff),
1187 .type_of_service = 0xff,
1188 .next_proto_id = 0xff,
1189 },
1190 };
9f95a23c
TL
1191 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1192 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1193 MLX5_FLOW_LAYER_OUTER_L3;
1194 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1195 MLX5_FLOW_LAYER_OUTER_L4;
11fdf7f2
TL
1196 int ret;
1197
9f95a23c 1198 if (item_flags & l3m)
11fdf7f2 1199 return rte_flow_error_set(error, ENOTSUP,
9f95a23c 1200 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1201 "multiple L3 layers not supported");
9f95a23c
TL
1202 else if (item_flags & l4m)
1203 return rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1205 "L3 cannot follow an L4 layer.");
1206 if (!mask)
1207 mask = &rte_flow_item_ipv4_mask;
9f95a23c
TL
1208 else if (mask->hdr.next_proto_id != 0 &&
1209 mask->hdr.next_proto_id != 0xff)
1210 return rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1212 "partial mask is not supported"
1213 " for protocol");
1214 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1215 acc_mask ? (const uint8_t *)acc_mask
1216 : (const uint8_t *)&nic_mask,
1217 sizeof(struct rte_flow_item_ipv4),
1218 error);
11fdf7f2
TL
1219 if (ret < 0)
1220 return ret;
9f95a23c 1221 return 0;
11fdf7f2
TL
1222}
1223
1224/**
9f95a23c 1225 * Validate IPV6 item.
11fdf7f2
TL
1226 *
1227 * @param[in] item
1228 * Item specification.
9f95a23c
TL
1229 * @param[in] item_flags
1230 * Bit-fields that holds the items detected until now.
1231 * @param[in] acc_mask
1232 * Acceptable mask, if NULL default internal default mask
1233 * will be used to check whether item fields are supported.
11fdf7f2
TL
1234 * @param[out] error
1235 * Pointer to error structure.
1236 *
1237 * @return
9f95a23c 1238 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1239 */
9f95a23c
TL
1240int
1241mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1242 uint64_t item_flags,
1243 const struct rte_flow_item_ipv6 *acc_mask,
1244 struct rte_flow_error *error)
11fdf7f2 1245{
11fdf7f2
TL
1246 const struct rte_flow_item_ipv6 *mask = item->mask;
1247 const struct rte_flow_item_ipv6 nic_mask = {
1248 .hdr = {
1249 .src_addr =
1250 "\xff\xff\xff\xff\xff\xff\xff\xff"
1251 "\xff\xff\xff\xff\xff\xff\xff\xff",
1252 .dst_addr =
1253 "\xff\xff\xff\xff\xff\xff\xff\xff"
1254 "\xff\xff\xff\xff\xff\xff\xff\xff",
1255 .vtc_flow = RTE_BE32(0xffffffff),
1256 .proto = 0xff,
1257 .hop_limits = 0xff,
1258 },
1259 };
9f95a23c
TL
1260 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1261 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1262 MLX5_FLOW_LAYER_OUTER_L3;
1263 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1264 MLX5_FLOW_LAYER_OUTER_L4;
11fdf7f2
TL
1265 int ret;
1266
9f95a23c 1267 if (item_flags & l3m)
11fdf7f2 1268 return rte_flow_error_set(error, ENOTSUP,
9f95a23c 1269 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1270 "multiple L3 layers not supported");
9f95a23c
TL
1271 else if (item_flags & l4m)
1272 return rte_flow_error_set(error, EINVAL,
1273 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1274 "L3 cannot follow an L4 layer.");
11fdf7f2
TL
1275 if (!mask)
1276 mask = &rte_flow_item_ipv6_mask;
9f95a23c
TL
1277 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1278 acc_mask ? (const uint8_t *)acc_mask
1279 : (const uint8_t *)&nic_mask,
1280 sizeof(struct rte_flow_item_ipv6),
1281 error);
11fdf7f2
TL
1282 if (ret < 0)
1283 return ret;
9f95a23c 1284 return 0;
11fdf7f2
TL
1285}
1286
1287/**
9f95a23c 1288 * Validate UDP item.
11fdf7f2
TL
1289 *
1290 * @param[in] item
1291 * Item specification.
9f95a23c
TL
1292 * @param[in] item_flags
1293 * Bit-fields that holds the items detected until now.
1294 * @param[in] target_protocol
1295 * The next protocol in the previous item.
1296 * @param[in] flow_mask
1297 * mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.
11fdf7f2
TL
1298 * @param[out] error
1299 * Pointer to error structure.
1300 *
1301 * @return
9f95a23c 1302 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1303 */
9f95a23c
TL
1304int
1305mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1306 uint64_t item_flags,
1307 uint8_t target_protocol,
1308 struct rte_flow_error *error)
11fdf7f2 1309{
11fdf7f2 1310 const struct rte_flow_item_udp *mask = item->mask;
9f95a23c
TL
1311 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1312 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1313 MLX5_FLOW_LAYER_OUTER_L3;
1314 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1315 MLX5_FLOW_LAYER_OUTER_L4;
11fdf7f2
TL
1316 int ret;
1317
9f95a23c
TL
1318 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1319 return rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1321 "protocol filtering not compatible"
1322 " with UDP layer");
9f95a23c
TL
1323 if (!(item_flags & l3m))
1324 return rte_flow_error_set(error, EINVAL,
1325 RTE_FLOW_ERROR_TYPE_ITEM, item,
1326 "L3 is mandatory to filter on L4");
1327 if (item_flags & l4m)
1328 return rte_flow_error_set(error, EINVAL,
1329 RTE_FLOW_ERROR_TYPE_ITEM, item,
1330 "multiple L4 layers not supported");
11fdf7f2
TL
1331 if (!mask)
1332 mask = &rte_flow_item_udp_mask;
1333 ret = mlx5_flow_item_acceptable
1334 (item, (const uint8_t *)mask,
1335 (const uint8_t *)&rte_flow_item_udp_mask,
1336 sizeof(struct rte_flow_item_udp), error);
1337 if (ret < 0)
1338 return ret;
9f95a23c 1339 return 0;
11fdf7f2
TL
1340}
1341
1342/**
9f95a23c 1343 * Validate TCP item.
11fdf7f2
TL
1344 *
1345 * @param[in] item
1346 * Item specification.
9f95a23c
TL
1347 * @param[in] item_flags
1348 * Bit-fields that holds the items detected until now.
1349 * @param[in] target_protocol
1350 * The next protocol in the previous item.
11fdf7f2
TL
1351 * @param[out] error
1352 * Pointer to error structure.
1353 *
1354 * @return
9f95a23c 1355 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1356 */
9f95a23c
TL
1357int
1358mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1359 uint64_t item_flags,
1360 uint8_t target_protocol,
1361 const struct rte_flow_item_tcp *flow_mask,
1362 struct rte_flow_error *error)
11fdf7f2 1363{
11fdf7f2 1364 const struct rte_flow_item_tcp *mask = item->mask;
9f95a23c
TL
1365 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1366 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1367 MLX5_FLOW_LAYER_OUTER_L3;
1368 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1369 MLX5_FLOW_LAYER_OUTER_L4;
11fdf7f2
TL
1370 int ret;
1371
9f95a23c
TL
1372 assert(flow_mask);
1373 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1374 return rte_flow_error_set(error, EINVAL,
1375 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1376 "protocol filtering not compatible"
1377 " with TCP layer");
9f95a23c
TL
1378 if (!(item_flags & l3m))
1379 return rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1381 "L3 is mandatory to filter on L4");
9f95a23c
TL
1382 if (item_flags & l4m)
1383 return rte_flow_error_set(error, EINVAL,
1384 RTE_FLOW_ERROR_TYPE_ITEM, item,
1385 "multiple L4 layers not supported");
11fdf7f2
TL
1386 if (!mask)
1387 mask = &rte_flow_item_tcp_mask;
1388 ret = mlx5_flow_item_acceptable
1389 (item, (const uint8_t *)mask,
9f95a23c 1390 (const uint8_t *)flow_mask,
11fdf7f2
TL
1391 sizeof(struct rte_flow_item_tcp), error);
1392 if (ret < 0)
1393 return ret;
9f95a23c 1394 return 0;
11fdf7f2
TL
1395}
1396
1397/**
9f95a23c 1398 * Validate VXLAN item.
11fdf7f2
TL
1399 *
1400 * @param[in] item
1401 * Item specification.
9f95a23c
TL
1402 * @param[in] item_flags
1403 * Bit-fields that holds the items detected until now.
1404 * @param[in] target_protocol
1405 * The next protocol in the previous item.
11fdf7f2
TL
1406 * @param[out] error
1407 * Pointer to error structure.
1408 *
1409 * @return
9f95a23c 1410 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1411 */
9f95a23c
TL
1412int
1413mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1414 uint64_t item_flags,
1415 struct rte_flow_error *error)
11fdf7f2
TL
1416{
1417 const struct rte_flow_item_vxlan *spec = item->spec;
1418 const struct rte_flow_item_vxlan *mask = item->mask;
11fdf7f2
TL
1419 int ret;
1420 union vni {
1421 uint32_t vlan_id;
1422 uint8_t vni[4];
1423 } id = { .vlan_id = 0, };
9f95a23c 1424 uint32_t vlan_id = 0;
11fdf7f2 1425
9f95a23c
TL
1426
1427 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
11fdf7f2 1428 return rte_flow_error_set(error, ENOTSUP,
9f95a23c
TL
1429 RTE_FLOW_ERROR_TYPE_ITEM, item,
1430 "multiple tunnel layers not"
1431 " supported");
11fdf7f2
TL
1432 /*
1433 * Verify only UDPv4 is present as defined in
1434 * https://tools.ietf.org/html/rfc7348
1435 */
9f95a23c
TL
1436 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1437 return rte_flow_error_set(error, EINVAL,
1438 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1439 "no outer UDP layer found");
1440 if (!mask)
1441 mask = &rte_flow_item_vxlan_mask;
1442 ret = mlx5_flow_item_acceptable
1443 (item, (const uint8_t *)mask,
1444 (const uint8_t *)&rte_flow_item_vxlan_mask,
9f95a23c
TL
1445 sizeof(struct rte_flow_item_vxlan),
1446 error);
11fdf7f2
TL
1447 if (ret < 0)
1448 return ret;
1449 if (spec) {
1450 memcpy(&id.vni[1], spec->vni, 3);
9f95a23c 1451 vlan_id = id.vlan_id;
11fdf7f2 1452 memcpy(&id.vni[1], mask->vni, 3);
9f95a23c 1453 vlan_id &= id.vlan_id;
11fdf7f2
TL
1454 }
1455 /*
1456 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1457 * only this layer is defined in the Verbs specification it is
1458 * interpreted as wildcard and all packets will match this
1459 * rule, if it follows a full stack layer (ex: eth / ipv4 /
1460 * udp), all packets matching the layers before will also
1461 * match this rule. To avoid such situation, VNI 0 is
1462 * currently refused.
1463 */
9f95a23c
TL
1464 if (!vlan_id)
1465 return rte_flow_error_set(error, ENOTSUP,
1466 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1467 "VXLAN vni cannot be 0");
9f95a23c
TL
1468 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1469 return rte_flow_error_set(error, ENOTSUP,
1470 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1471 "VXLAN tunnel must be fully defined");
9f95a23c 1472 return 0;
11fdf7f2
TL
1473}
1474
1475/**
9f95a23c 1476 * Validate VXLAN_GPE item.
11fdf7f2 1477 *
11fdf7f2
TL
1478 * @param[in] item
1479 * Item specification.
9f95a23c
TL
1480 * @param[in] item_flags
1481 * Bit-fields that holds the items detected until now.
1482 * @param[in] priv
1483 * Pointer to the private data structure.
1484 * @param[in] target_protocol
1485 * The next protocol in the previous item.
11fdf7f2
TL
1486 * @param[out] error
1487 * Pointer to error structure.
1488 *
1489 * @return
9f95a23c 1490 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1491 */
9f95a23c
TL
1492int
1493mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1494 uint64_t item_flags,
1495 struct rte_eth_dev *dev,
1496 struct rte_flow_error *error)
11fdf7f2 1497{
9f95a23c 1498 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
1499 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1500 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
11fdf7f2
TL
1501 int ret;
1502 union vni {
1503 uint32_t vlan_id;
1504 uint8_t vni[4];
1505 } id = { .vlan_id = 0, };
9f95a23c 1506 uint32_t vlan_id = 0;
11fdf7f2 1507
9f95a23c 1508 if (!priv->config.l3_vxlan_en)
11fdf7f2 1509 return rte_flow_error_set(error, ENOTSUP,
9f95a23c 1510 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1511 "L3 VXLAN is not enabled by device"
1512 " parameter and/or not configured in"
1513 " firmware");
9f95a23c 1514 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
11fdf7f2 1515 return rte_flow_error_set(error, ENOTSUP,
9f95a23c
TL
1516 RTE_FLOW_ERROR_TYPE_ITEM, item,
1517 "multiple tunnel layers not"
1518 " supported");
11fdf7f2
TL
1519 /*
1520 * Verify only UDPv4 is present as defined in
1521 * https://tools.ietf.org/html/rfc7348
1522 */
9f95a23c
TL
1523 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1524 return rte_flow_error_set(error, EINVAL,
1525 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1526 "no outer UDP layer found");
1527 if (!mask)
1528 mask = &rte_flow_item_vxlan_gpe_mask;
1529 ret = mlx5_flow_item_acceptable
1530 (item, (const uint8_t *)mask,
1531 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
9f95a23c
TL
1532 sizeof(struct rte_flow_item_vxlan_gpe),
1533 error);
11fdf7f2
TL
1534 if (ret < 0)
1535 return ret;
1536 if (spec) {
9f95a23c
TL
1537 if (spec->protocol)
1538 return rte_flow_error_set(error, ENOTSUP,
1539 RTE_FLOW_ERROR_TYPE_ITEM,
1540 item,
1541 "VxLAN-GPE protocol"
1542 " not supported");
11fdf7f2 1543 memcpy(&id.vni[1], spec->vni, 3);
9f95a23c 1544 vlan_id = id.vlan_id;
11fdf7f2 1545 memcpy(&id.vni[1], mask->vni, 3);
9f95a23c 1546 vlan_id &= id.vlan_id;
11fdf7f2
TL
1547 }
1548 /*
1549 * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1550 * layer is defined in the Verbs specification it is interpreted as
1551 * wildcard and all packets will match this rule, if it follows a full
1552 * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1553 * before will also match this rule. To avoid such situation, VNI 0
1554 * is currently refused.
1555 */
9f95a23c
TL
1556 if (!vlan_id)
1557 return rte_flow_error_set(error, ENOTSUP,
1558 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2 1559 "VXLAN-GPE vni cannot be 0");
9f95a23c
TL
1560 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1561 return rte_flow_error_set(error, ENOTSUP,
1562 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1563 "VXLAN-GPE tunnel must be fully"
1564 " defined");
9f95a23c 1565 return 0;
11fdf7f2
TL
1566}
1567
1568/**
9f95a23c 1569 * Validate GRE item.
11fdf7f2 1570 *
11fdf7f2
TL
1571 * @param[in] item
1572 * Item specification.
9f95a23c
TL
1573 * @param[in] item_flags
1574 * Bit flags to mark detected items.
1575 * @param[in] target_protocol
1576 * The next protocol in the previous item.
11fdf7f2
TL
1577 * @param[out] error
1578 * Pointer to error structure.
1579 *
1580 * @return
9f95a23c 1581 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1582 */
9f95a23c
TL
1583int
1584mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
1585 uint64_t item_flags,
1586 uint8_t target_protocol,
1587 struct rte_flow_error *error)
11fdf7f2 1588{
9f95a23c 1589 const struct rte_flow_item_gre *spec __rte_unused = item->spec;
11fdf7f2 1590 const struct rte_flow_item_gre *mask = item->mask;
11fdf7f2
TL
1591 int ret;
1592
9f95a23c
TL
1593 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
1594 return rte_flow_error_set(error, EINVAL,
1595 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1596 "protocol filtering not compatible"
1597 " with this GRE layer");
9f95a23c 1598 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
11fdf7f2 1599 return rte_flow_error_set(error, ENOTSUP,
9f95a23c
TL
1600 RTE_FLOW_ERROR_TYPE_ITEM, item,
1601 "multiple tunnel layers not"
1602 " supported");
1603 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
11fdf7f2 1604 return rte_flow_error_set(error, ENOTSUP,
9f95a23c 1605 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1606 "L3 Layer is missing");
1607 if (!mask)
1608 mask = &rte_flow_item_gre_mask;
1609 ret = mlx5_flow_item_acceptable
1610 (item, (const uint8_t *)mask,
1611 (const uint8_t *)&rte_flow_item_gre_mask,
1612 sizeof(struct rte_flow_item_gre), error);
1613 if (ret < 0)
1614 return ret;
9f95a23c 1615#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
11fdf7f2
TL
1616 if (spec && (spec->protocol & mask->protocol))
1617 return rte_flow_error_set(error, ENOTSUP,
9f95a23c 1618 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1619 "without MPLS support the"
1620 " specification cannot be used for"
1621 " filtering");
9f95a23c
TL
1622#endif
1623 return 0;
11fdf7f2
TL
1624}
1625
1626/**
9f95a23c 1627 * Validate MPLS item.
11fdf7f2 1628 *
9f95a23c
TL
1629 * @param[in] dev
1630 * Pointer to the rte_eth_dev structure.
11fdf7f2
TL
1631 * @param[in] item
1632 * Item specification.
9f95a23c
TL
1633 * @param[in] item_flags
1634 * Bit-fields that holds the items detected until now.
1635 * @param[in] prev_layer
1636 * The protocol layer indicated in previous item.
11fdf7f2
TL
1637 * @param[out] error
1638 * Pointer to error structure.
1639 *
1640 * @return
9f95a23c 1641 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1642 */
9f95a23c
TL
1643int
1644mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
1645 const struct rte_flow_item *item __rte_unused,
1646 uint64_t item_flags __rte_unused,
1647 uint64_t prev_layer __rte_unused,
1648 struct rte_flow_error *error)
11fdf7f2
TL
1649{
1650#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
11fdf7f2 1651 const struct rte_flow_item_mpls *mask = item->mask;
9f95a23c 1652 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
1653 int ret;
1654
9f95a23c 1655 if (!priv->config.mpls_en)
11fdf7f2 1656 return rte_flow_error_set(error, ENOTSUP,
9f95a23c
TL
1657 RTE_FLOW_ERROR_TYPE_ITEM, item,
1658 "MPLS not supported or"
1659 " disabled in firmware"
1660 " configuration.");
1661 /* MPLS over IP, UDP, GRE is allowed */
1662 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
1663 MLX5_FLOW_LAYER_OUTER_L4_UDP |
1664 MLX5_FLOW_LAYER_GRE)))
1665 return rte_flow_error_set(error, EINVAL,
1666 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1667 "protocol filtering not compatible"
1668 " with MPLS layer");
1669 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
9f95a23c
TL
1670 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
1671 !(item_flags & MLX5_FLOW_LAYER_GRE))
11fdf7f2 1672 return rte_flow_error_set(error, ENOTSUP,
9f95a23c
TL
1673 RTE_FLOW_ERROR_TYPE_ITEM, item,
1674 "multiple tunnel layers not"
1675 " supported");
11fdf7f2
TL
1676 if (!mask)
1677 mask = &rte_flow_item_mpls_mask;
1678 ret = mlx5_flow_item_acceptable
1679 (item, (const uint8_t *)mask,
1680 (const uint8_t *)&rte_flow_item_mpls_mask,
1681 sizeof(struct rte_flow_item_mpls), error);
1682 if (ret < 0)
1683 return ret;
9f95a23c
TL
1684 return 0;
1685#endif
11fdf7f2 1686 return rte_flow_error_set(error, ENOTSUP,
9f95a23c 1687 RTE_FLOW_ERROR_TYPE_ITEM, item,
11fdf7f2
TL
1688 "MPLS is not supported by Verbs, please"
1689 " update.");
1690}
1691
11fdf7f2 1692static int
9f95a23c
TL
1693flow_null_validate(struct rte_eth_dev *dev __rte_unused,
1694 const struct rte_flow_attr *attr __rte_unused,
1695 const struct rte_flow_item items[] __rte_unused,
1696 const struct rte_flow_action actions[] __rte_unused,
1697 struct rte_flow_error *error __rte_unused)
11fdf7f2 1698{
9f95a23c
TL
1699 rte_errno = ENOTSUP;
1700 return -rte_errno;
11fdf7f2
TL
1701}
1702
9f95a23c
TL
1703static struct mlx5_flow *
1704flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
1705 const struct rte_flow_item items[] __rte_unused,
1706 const struct rte_flow_action actions[] __rte_unused,
1707 struct rte_flow_error *error __rte_unused)
1708{
1709 rte_errno = ENOTSUP;
1710 return NULL;
11fdf7f2
TL
1711}
1712
11fdf7f2 1713static int
9f95a23c
TL
1714flow_null_translate(struct rte_eth_dev *dev __rte_unused,
1715 struct mlx5_flow *dev_flow __rte_unused,
1716 const struct rte_flow_attr *attr __rte_unused,
1717 const struct rte_flow_item items[] __rte_unused,
1718 const struct rte_flow_action actions[] __rte_unused,
1719 struct rte_flow_error *error __rte_unused)
1720{
1721 rte_errno = ENOTSUP;
1722 return -rte_errno;
1723}
11fdf7f2 1724
9f95a23c
TL
1725static int
1726flow_null_apply(struct rte_eth_dev *dev __rte_unused,
1727 struct rte_flow *flow __rte_unused,
1728 struct rte_flow_error *error __rte_unused)
1729{
1730 rte_errno = ENOTSUP;
1731 return -rte_errno;
11fdf7f2
TL
1732}
1733
11fdf7f2 1734static void
9f95a23c
TL
1735flow_null_remove(struct rte_eth_dev *dev __rte_unused,
1736 struct rte_flow *flow __rte_unused)
11fdf7f2 1737{
9f95a23c 1738}
11fdf7f2 1739
9f95a23c
TL
1740static void
1741flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
1742 struct rte_flow *flow __rte_unused)
1743{
11fdf7f2
TL
1744}
1745
11fdf7f2 1746static int
9f95a23c
TL
1747flow_null_query(struct rte_eth_dev *dev __rte_unused,
1748 struct rte_flow *flow __rte_unused,
1749 const struct rte_flow_action *actions __rte_unused,
1750 void *data __rte_unused,
1751 struct rte_flow_error *error __rte_unused)
11fdf7f2 1752{
9f95a23c
TL
1753 rte_errno = ENOTSUP;
1754 return -rte_errno;
11fdf7f2
TL
1755}
1756
9f95a23c
TL
1757/* Void driver to protect from null pointer reference. */
1758const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
1759 .validate = flow_null_validate,
1760 .prepare = flow_null_prepare,
1761 .translate = flow_null_translate,
1762 .apply = flow_null_apply,
1763 .remove = flow_null_remove,
1764 .destroy = flow_null_destroy,
1765 .query = flow_null_query,
1766};
1767
11fdf7f2 1768/**
9f95a23c
TL
1769 * Select flow driver type according to flow attributes and device
1770 * configuration.
1771 *
1772 * @param[in] dev
1773 * Pointer to the dev structure.
1774 * @param[in] attr
1775 * Pointer to the flow attributes.
11fdf7f2
TL
1776 *
1777 * @return
9f95a23c 1778 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
11fdf7f2 1779 */
9f95a23c
TL
1780static enum mlx5_flow_drv_type
1781flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
1782{
1783 struct mlx5_priv *priv = dev->data->dev_private;
1784 enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
11fdf7f2 1785
9f95a23c
TL
1786 if (attr->transfer && !priv->config.dv_esw_en)
1787 type = MLX5_FLOW_TYPE_TCF;
1788 else
1789 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
1790 MLX5_FLOW_TYPE_VERBS;
1791 return type;
11fdf7f2
TL
1792}
1793
9f95a23c
TL
1794#define flow_get_drv_ops(type) flow_drv_ops[type]
1795
11fdf7f2 1796/**
9f95a23c
TL
1797 * Flow driver validation API. This abstracts calling driver specific functions.
1798 * The type of flow driver is determined according to flow attributes.
11fdf7f2
TL
1799 *
1800 * @param[in] dev
9f95a23c
TL
1801 * Pointer to the dev structure.
1802 * @param[in] attr
1803 * Pointer to the flow attributes.
1804 * @param[in] items
1805 * Pointer to the list of items.
11fdf7f2 1806 * @param[in] actions
9f95a23c 1807 * Pointer to the list of actions.
11fdf7f2 1808 * @param[out] error
9f95a23c 1809 * Pointer to the error structure.
11fdf7f2
TL
1810 *
1811 * @return
9f95a23c 1812 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1813 */
9f95a23c
TL
1814static inline int
1815flow_drv_validate(struct rte_eth_dev *dev,
1816 const struct rte_flow_attr *attr,
1817 const struct rte_flow_item items[],
11fdf7f2 1818 const struct rte_flow_action actions[],
11fdf7f2
TL
1819 struct rte_flow_error *error)
1820{
9f95a23c
TL
1821 const struct mlx5_flow_driver_ops *fops;
1822 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
11fdf7f2 1823
9f95a23c
TL
1824 fops = flow_get_drv_ops(type);
1825 return fops->validate(dev, attr, items, actions, error);
11fdf7f2
TL
1826}
1827
1828/**
9f95a23c
TL
1829 * Flow driver preparation API. This abstracts calling driver specific
1830 * functions. Parent flow (rte_flow) should have driver type (drv_type). It
1831 * calculates the size of memory required for device flow, allocates the memory,
1832 * initializes the device flow and returns the pointer.
1833 *
1834 * @note
1835 * This function initializes device flow structure such as dv, tcf or verbs in
1836 * struct mlx5_flow. However, it is caller's responsibility to initialize the
1837 * rest. For example, adding returning device flow to flow->dev_flow list and
1838 * setting backward reference to the flow should be done out of this function.
1839 * layers field is not filled either.
11fdf7f2 1840 *
11fdf7f2 1841 * @param[in] attr
9f95a23c
TL
1842 * Pointer to the flow attributes.
1843 * @param[in] items
1844 * Pointer to the list of items.
11fdf7f2 1845 * @param[in] actions
9f95a23c 1846 * Pointer to the list of actions.
11fdf7f2 1847 * @param[out] error
9f95a23c 1848 * Pointer to the error structure.
11fdf7f2
TL
1849 *
1850 * @return
9f95a23c 1851 * Pointer to device flow on success, otherwise NULL and rte_errno is set.
11fdf7f2 1852 */
9f95a23c
TL
1853static inline struct mlx5_flow *
1854flow_drv_prepare(const struct rte_flow *flow,
1855 const struct rte_flow_attr *attr,
1856 const struct rte_flow_item items[],
1857 const struct rte_flow_action actions[],
1858 struct rte_flow_error *error)
11fdf7f2 1859{
9f95a23c
TL
1860 const struct mlx5_flow_driver_ops *fops;
1861 enum mlx5_flow_drv_type type = flow->drv_type;
11fdf7f2 1862
9f95a23c
TL
1863 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1864 fops = flow_get_drv_ops(type);
1865 return fops->prepare(attr, items, actions, error);
11fdf7f2
TL
1866}
1867
1868/**
9f95a23c
TL
1869 * Flow driver translation API. This abstracts calling driver specific
1870 * functions. Parent flow (rte_flow) should have driver type (drv_type). It
1871 * translates a generic flow into a driver flow. flow_drv_prepare() must
1872 * precede.
1873 *
1874 * @note
1875 * dev_flow->layers could be filled as a result of parsing during translation
1876 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
1877 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
1878 * flow->actions could be overwritten even though all the expanded dev_flows
1879 * have the same actions.
11fdf7f2
TL
1880 *
1881 * @param[in] dev
9f95a23c
TL
1882 * Pointer to the rte dev structure.
1883 * @param[in, out] dev_flow
1884 * Pointer to the mlx5 flow.
1885 * @param[in] attr
1886 * Pointer to the flow attributes.
1887 * @param[in] items
1888 * Pointer to the list of items.
11fdf7f2 1889 * @param[in] actions
9f95a23c 1890 * Pointer to the list of actions.
11fdf7f2 1891 * @param[out] error
9f95a23c 1892 * Pointer to the error structure.
11fdf7f2
TL
1893 *
1894 * @return
9f95a23c 1895 * 0 on success, a negative errno value otherwise and rte_errno is set.
11fdf7f2 1896 */
9f95a23c
TL
1897static inline int
1898flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
1899 const struct rte_flow_attr *attr,
1900 const struct rte_flow_item items[],
1901 const struct rte_flow_action actions[],
1902 struct rte_flow_error *error)
11fdf7f2 1903{
9f95a23c
TL
1904 const struct mlx5_flow_driver_ops *fops;
1905 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
11fdf7f2 1906
9f95a23c
TL
1907 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1908 fops = flow_get_drv_ops(type);
1909 return fops->translate(dev, dev_flow, attr, items, actions, error);
11fdf7f2
TL
1910}
1911
1912/**
9f95a23c
TL
1913 * Flow driver apply API. This abstracts calling driver specific functions.
1914 * Parent flow (rte_flow) should have driver type (drv_type). It applies
1915 * translated driver flows on to device. flow_drv_translate() must precede.
11fdf7f2
TL
1916 *
1917 * @param[in] dev
9f95a23c
TL
1918 * Pointer to Ethernet device structure.
1919 * @param[in, out] flow
11fdf7f2 1920 * Pointer to flow structure.
9f95a23c
TL
1921 * @param[out] error
1922 * Pointer to error structure.
1923 *
1924 * @return
1925 * 0 on success, a negative errno value otherwise and rte_errno is set.
1926 */
1927static inline int
1928flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1929 struct rte_flow_error *error)
1930{
1931 const struct mlx5_flow_driver_ops *fops;
1932 enum mlx5_flow_drv_type type = flow->drv_type;
11fdf7f2 1933
9f95a23c
TL
1934 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1935 fops = flow_get_drv_ops(type);
1936 return fops->apply(dev, flow, error);
11fdf7f2
TL
1937}
1938
1939/**
9f95a23c
TL
1940 * Flow driver remove API. This abstracts calling driver specific functions.
1941 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
1942 * on device. All the resources of the flow should be freed by calling
1943 * flow_drv_destroy().
11fdf7f2 1944 *
9f95a23c 1945 * @param[in] dev
11fdf7f2 1946 * Pointer to Ethernet device.
9f95a23c
TL
1947 * @param[in, out] flow
1948 * Pointer to flow structure.
11fdf7f2 1949 */
9f95a23c
TL
1950static inline void
1951flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11fdf7f2 1952{
9f95a23c
TL
1953 const struct mlx5_flow_driver_ops *fops;
1954 enum mlx5_flow_drv_type type = flow->drv_type;
11fdf7f2 1955
9f95a23c
TL
1956 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1957 fops = flow_get_drv_ops(type);
1958 fops->remove(dev, flow);
11fdf7f2
TL
1959}
1960
1961/**
9f95a23c
TL
1962 * Flow driver destroy API. This abstracts calling driver specific functions.
1963 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
1964 * on device and releases resources of the flow.
11fdf7f2 1965 *
9f95a23c 1966 * @param[in] dev
11fdf7f2 1967 * Pointer to Ethernet device.
9f95a23c
TL
1968 * @param[in, out] flow
1969 * Pointer to flow structure.
11fdf7f2 1970 */
9f95a23c
TL
1971static inline void
1972flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11fdf7f2 1973{
9f95a23c
TL
1974 const struct mlx5_flow_driver_ops *fops;
1975 enum mlx5_flow_drv_type type = flow->drv_type;
11fdf7f2 1976
9f95a23c
TL
1977 assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1978 fops = flow_get_drv_ops(type);
1979 fops->destroy(dev, flow);
11fdf7f2
TL
1980}
1981
1982/**
1983 * Validate a flow supported by the NIC.
1984 *
1985 * @see rte_flow_validate()
1986 * @see rte_flow_ops
1987 */
1988int
1989mlx5_flow_validate(struct rte_eth_dev *dev,
1990 const struct rte_flow_attr *attr,
1991 const struct rte_flow_item items[],
1992 const struct rte_flow_action actions[],
1993 struct rte_flow_error *error)
1994{
9f95a23c 1995 int ret;
11fdf7f2 1996
9f95a23c 1997 ret = flow_drv_validate(dev, attr, items, actions, error);
11fdf7f2
TL
1998 if (ret < 0)
1999 return ret;
2000 return 0;
2001}
2002
2003/**
9f95a23c 2004 * Get RSS action from the action list.
11fdf7f2 2005 *
9f95a23c
TL
2006 * @param[in] actions
2007 * Pointer to the list of actions.
2008 *
2009 * @return
2010 * Pointer to the RSS action if exist, else return NULL.
11fdf7f2 2011 */
9f95a23c
TL
2012static const struct rte_flow_action_rss*
2013flow_get_rss_action(const struct rte_flow_action actions[])
11fdf7f2 2014{
9f95a23c
TL
2015 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2016 switch (actions->type) {
2017 case RTE_FLOW_ACTION_TYPE_RSS:
2018 return (const struct rte_flow_action_rss *)
2019 actions->conf;
2020 default:
2021 break;
11fdf7f2
TL
2022 }
2023 }
9f95a23c 2024 return NULL;
11fdf7f2
TL
2025}
2026
9f95a23c
TL
2027static unsigned int
2028find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
11fdf7f2 2029{
9f95a23c
TL
2030 const struct rte_flow_item *item;
2031 unsigned int has_vlan = 0;
2032
2033 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2034 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2035 has_vlan = 1;
2036 break;
11fdf7f2
TL
2037 }
2038 }
9f95a23c
TL
2039 if (has_vlan)
2040 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2041 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2042 return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2043 MLX5_EXPANSION_ROOT_OUTER;
11fdf7f2
TL
2044}
2045
2046/**
2047 * Create a flow and add it to @p list.
2048 *
2049 * @param dev
2050 * Pointer to Ethernet device.
2051 * @param list
2052 * Pointer to a TAILQ flow list.
2053 * @param[in] attr
2054 * Flow rule attributes.
2055 * @param[in] items
2056 * Pattern specification (list terminated by the END pattern item).
2057 * @param[in] actions
2058 * Associated actions (list terminated by the END action).
2059 * @param[out] error
2060 * Perform verbose error reporting if not NULL.
2061 *
2062 * @return
2063 * A flow on success, NULL otherwise and rte_errno is set.
2064 */
2065static struct rte_flow *
9f95a23c
TL
2066flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2067 const struct rte_flow_attr *attr,
2068 const struct rte_flow_item items[],
2069 const struct rte_flow_action actions[],
2070 struct rte_flow_error *error)
11fdf7f2
TL
2071{
2072 struct rte_flow *flow = NULL;
9f95a23c
TL
2073 struct mlx5_flow *dev_flow;
2074 const struct rte_flow_action_rss *rss;
2075 union {
2076 struct rte_flow_expand_rss buf;
2077 uint8_t buffer[2048];
2078 } expand_buffer;
2079 struct rte_flow_expand_rss *buf = &expand_buffer.buf;
11fdf7f2 2080 int ret;
9f95a23c
TL
2081 uint32_t i;
2082 uint32_t flow_size;
11fdf7f2 2083
9f95a23c 2084 ret = flow_drv_validate(dev, attr, items, actions, error);
11fdf7f2
TL
2085 if (ret < 0)
2086 return NULL;
9f95a23c
TL
2087 flow_size = sizeof(struct rte_flow);
2088 rss = flow_get_rss_action(actions);
2089 if (rss)
2090 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
2091 sizeof(void *));
2092 else
2093 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
2094 flow = rte_calloc(__func__, 1, flow_size, 0);
2095 flow->drv_type = flow_get_drv_type(dev, attr);
2096 flow->ingress = attr->ingress;
2097 flow->transfer = attr->transfer;
2098 assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
2099 flow->drv_type < MLX5_FLOW_TYPE_MAX);
2100 flow->queue = (void *)(flow + 1);
2101 LIST_INIT(&flow->dev_flows);
2102 if (rss && rss->types) {
2103 unsigned int graph_root;
2104
2105 graph_root = find_graph_root(items, rss->level);
2106 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
2107 items, rss->types,
2108 mlx5_support_expansion,
2109 graph_root);
2110 assert(ret > 0 &&
2111 (unsigned int)ret < sizeof(expand_buffer.buffer));
2112 } else {
2113 buf->entries = 1;
2114 buf->entry[0].pattern = (void *)(uintptr_t)items;
11fdf7f2 2115 }
9f95a23c
TL
2116 for (i = 0; i < buf->entries; ++i) {
2117 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
2118 actions, error);
2119 if (!dev_flow)
2120 goto error;
2121 dev_flow->flow = flow;
2122 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
2123 ret = flow_drv_translate(dev, dev_flow, attr,
2124 buf->entry[i].pattern,
2125 actions, error);
2126 if (ret < 0)
2127 goto error;
11fdf7f2 2128 }
11fdf7f2 2129 if (dev->data->dev_started) {
9f95a23c
TL
2130 ret = flow_drv_apply(dev, flow, error);
2131 if (ret < 0)
2132 goto error;
11fdf7f2
TL
2133 }
2134 TAILQ_INSERT_TAIL(list, flow, next);
9f95a23c 2135 flow_rxq_flags_set(dev, flow);
11fdf7f2 2136 return flow;
9f95a23c
TL
2137error:
2138 ret = rte_errno; /* Save rte_errno before cleanup. */
2139 assert(flow);
2140 flow_drv_destroy(dev, flow);
2141 rte_free(flow);
2142 rte_errno = ret; /* Restore rte_errno. */
2143 return NULL;
11fdf7f2
TL
2144}
2145
2146/**
2147 * Create a flow.
2148 *
2149 * @see rte_flow_create()
2150 * @see rte_flow_ops
2151 */
2152struct rte_flow *
2153mlx5_flow_create(struct rte_eth_dev *dev,
2154 const struct rte_flow_attr *attr,
2155 const struct rte_flow_item items[],
2156 const struct rte_flow_action actions[],
2157 struct rte_flow_error *error)
2158{
9f95a23c
TL
2159 struct mlx5_priv *priv = (struct mlx5_priv *)dev->data->dev_private;
2160
2161 return flow_list_create(dev, &priv->flows,
2162 attr, items, actions, error);
11fdf7f2
TL
2163}
2164
2165/**
2166 * Destroy a flow in a list.
2167 *
2168 * @param dev
2169 * Pointer to Ethernet device.
2170 * @param list
2171 * Pointer to a TAILQ flow list.
2172 * @param[in] flow
2173 * Flow to destroy.
2174 */
2175static void
9f95a23c
TL
2176flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2177 struct rte_flow *flow)
11fdf7f2 2178{
11fdf7f2
TL
2179 /*
2180 * Update RX queue flags only if port is started, otherwise it is
2181 * already clean.
2182 */
2183 if (dev->data->dev_started)
9f95a23c
TL
2184 flow_rxq_flags_trim(dev, flow);
2185 flow_drv_destroy(dev, flow);
2186 TAILQ_REMOVE(list, flow, next);
2187 rte_free(flow->fdir);
11fdf7f2
TL
2188 rte_free(flow);
2189}
2190
2191/**
2192 * Destroy all flows.
2193 *
2194 * @param dev
2195 * Pointer to Ethernet device.
2196 * @param list
2197 * Pointer to a TAILQ flow list.
2198 */
2199void
2200mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
2201{
2202 while (!TAILQ_EMPTY(list)) {
2203 struct rte_flow *flow;
2204
2205 flow = TAILQ_FIRST(list);
9f95a23c 2206 flow_list_destroy(dev, list, flow);
11fdf7f2
TL
2207 }
2208}
2209
2210/**
2211 * Remove all flows.
2212 *
2213 * @param dev
2214 * Pointer to Ethernet device.
2215 * @param list
2216 * Pointer to a TAILQ flow list.
2217 */
2218void
2219mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
2220{
2221 struct rte_flow *flow;
2222
2223 TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
9f95a23c
TL
2224 flow_drv_remove(dev, flow);
2225 flow_rxq_flags_clear(dev);
11fdf7f2
TL
2226}
2227
2228/**
2229 * Add all flows.
2230 *
2231 * @param dev
2232 * Pointer to Ethernet device.
2233 * @param list
2234 * Pointer to a TAILQ flow list.
2235 *
2236 * @return
2237 * 0 on success, a negative errno value otherwise and rte_errno is set.
2238 */
2239int
2240mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
2241{
2242 struct rte_flow *flow;
2243 struct rte_flow_error error;
2244 int ret = 0;
2245
2246 TAILQ_FOREACH(flow, list, next) {
9f95a23c 2247 ret = flow_drv_apply(dev, flow, &error);
11fdf7f2
TL
2248 if (ret < 0)
2249 goto error;
9f95a23c 2250 flow_rxq_flags_set(dev, flow);
11fdf7f2
TL
2251 }
2252 return 0;
2253error:
2254 ret = rte_errno; /* Save rte_errno before cleanup. */
2255 mlx5_flow_stop(dev, list);
2256 rte_errno = ret; /* Restore rte_errno. */
2257 return -rte_errno;
2258}
2259
2260/**
2261 * Verify the flow list is empty
2262 *
2263 * @param dev
2264 * Pointer to Ethernet device.
2265 *
2266 * @return the number of flows not released.
2267 */
2268int
2269mlx5_flow_verify(struct rte_eth_dev *dev)
2270{
9f95a23c 2271 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
2272 struct rte_flow *flow;
2273 int ret = 0;
2274
2275 TAILQ_FOREACH(flow, &priv->flows, next) {
2276 DRV_LOG(DEBUG, "port %u flow %p still referenced",
2277 dev->data->port_id, (void *)flow);
2278 ++ret;
2279 }
2280 return ret;
2281}
2282
2283/**
2284 * Enable a control flow configured from the control plane.
2285 *
2286 * @param dev
2287 * Pointer to Ethernet device.
2288 * @param eth_spec
2289 * An Ethernet flow spec to apply.
2290 * @param eth_mask
2291 * An Ethernet flow mask to apply.
2292 * @param vlan_spec
2293 * A VLAN flow spec to apply.
2294 * @param vlan_mask
2295 * A VLAN flow mask to apply.
2296 *
2297 * @return
2298 * 0 on success, a negative errno value otherwise and rte_errno is set.
2299 */
2300int
2301mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
2302 struct rte_flow_item_eth *eth_spec,
2303 struct rte_flow_item_eth *eth_mask,
2304 struct rte_flow_item_vlan *vlan_spec,
2305 struct rte_flow_item_vlan *vlan_mask)
2306{
9f95a23c 2307 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
2308 const struct rte_flow_attr attr = {
2309 .ingress = 1,
2310 .priority = MLX5_FLOW_PRIO_RSVD,
2311 };
2312 struct rte_flow_item items[] = {
2313 {
2314 .type = RTE_FLOW_ITEM_TYPE_ETH,
2315 .spec = eth_spec,
2316 .last = NULL,
2317 .mask = eth_mask,
2318 },
2319 {
2320 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
9f95a23c 2321 RTE_FLOW_ITEM_TYPE_END,
11fdf7f2
TL
2322 .spec = vlan_spec,
2323 .last = NULL,
2324 .mask = vlan_mask,
2325 },
2326 {
2327 .type = RTE_FLOW_ITEM_TYPE_END,
2328 },
2329 };
2330 uint16_t queue[priv->reta_idx_n];
2331 struct rte_flow_action_rss action_rss = {
2332 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2333 .level = 0,
2334 .types = priv->rss_conf.rss_hf,
2335 .key_len = priv->rss_conf.rss_key_len,
2336 .queue_num = priv->reta_idx_n,
2337 .key = priv->rss_conf.rss_key,
2338 .queue = queue,
2339 };
2340 struct rte_flow_action actions[] = {
2341 {
2342 .type = RTE_FLOW_ACTION_TYPE_RSS,
2343 .conf = &action_rss,
2344 },
2345 {
2346 .type = RTE_FLOW_ACTION_TYPE_END,
2347 },
2348 };
2349 struct rte_flow *flow;
2350 struct rte_flow_error error;
2351 unsigned int i;
2352
9f95a23c
TL
2353 if (!priv->reta_idx_n || !priv->rxqs_n) {
2354 return 0;
11fdf7f2
TL
2355 }
2356 for (i = 0; i != priv->reta_idx_n; ++i)
2357 queue[i] = (*priv->reta_idx)[i];
9f95a23c
TL
2358 flow = flow_list_create(dev, &priv->ctrl_flows,
2359 &attr, items, actions, &error);
11fdf7f2
TL
2360 if (!flow)
2361 return -rte_errno;
2362 return 0;
2363}
2364
2365/**
2366 * Enable a flow control configured from the control plane.
2367 *
2368 * @param dev
2369 * Pointer to Ethernet device.
2370 * @param eth_spec
2371 * An Ethernet flow spec to apply.
2372 * @param eth_mask
2373 * An Ethernet flow mask to apply.
2374 *
2375 * @return
2376 * 0 on success, a negative errno value otherwise and rte_errno is set.
2377 */
2378int
2379mlx5_ctrl_flow(struct rte_eth_dev *dev,
2380 struct rte_flow_item_eth *eth_spec,
2381 struct rte_flow_item_eth *eth_mask)
2382{
2383 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
2384}
2385
2386/**
2387 * Destroy a flow.
2388 *
2389 * @see rte_flow_destroy()
2390 * @see rte_flow_ops
2391 */
2392int
2393mlx5_flow_destroy(struct rte_eth_dev *dev,
2394 struct rte_flow *flow,
2395 struct rte_flow_error *error __rte_unused)
2396{
9f95a23c 2397 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2 2398
9f95a23c 2399 flow_list_destroy(dev, &priv->flows, flow);
11fdf7f2
TL
2400 return 0;
2401}
2402
2403/**
2404 * Destroy all flows.
2405 *
2406 * @see rte_flow_flush()
2407 * @see rte_flow_ops
2408 */
2409int
2410mlx5_flow_flush(struct rte_eth_dev *dev,
2411 struct rte_flow_error *error __rte_unused)
2412{
9f95a23c 2413 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
2414
2415 mlx5_flow_list_flush(dev, &priv->flows);
2416 return 0;
2417}
2418
2419/**
2420 * Isolated mode.
2421 *
2422 * @see rte_flow_isolate()
2423 * @see rte_flow_ops
2424 */
2425int
2426mlx5_flow_isolate(struct rte_eth_dev *dev,
2427 int enable,
2428 struct rte_flow_error *error)
2429{
9f95a23c 2430 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
2431
2432 if (dev->data->dev_started) {
2433 rte_flow_error_set(error, EBUSY,
2434 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2435 NULL,
2436 "port must be stopped first");
2437 return -rte_errno;
2438 }
2439 priv->isolated = !!enable;
2440 if (enable)
2441 dev->dev_ops = &mlx5_dev_ops_isolate;
2442 else
2443 dev->dev_ops = &mlx5_dev_ops;
2444 return 0;
2445}
2446
2447/**
9f95a23c 2448 * Query a flow.
11fdf7f2 2449 *
9f95a23c
TL
2450 * @see rte_flow_query()
2451 * @see rte_flow_ops
11fdf7f2
TL
2452 */
2453static int
9f95a23c
TL
2454flow_drv_query(struct rte_eth_dev *dev,
2455 struct rte_flow *flow,
2456 const struct rte_flow_action *actions,
2457 void *data,
2458 struct rte_flow_error *error)
2459{
2460 const struct mlx5_flow_driver_ops *fops;
2461 enum mlx5_flow_drv_type ftype = flow->drv_type;
11fdf7f2 2462
9f95a23c
TL
2463 assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
2464 fops = flow_get_drv_ops(ftype);
2465
2466 return fops->query(dev, flow, actions, data, error);
11fdf7f2
TL
2467}
2468
2469/**
9f95a23c 2470 * Query a flow.
11fdf7f2
TL
2471 *
2472 * @see rte_flow_query()
2473 * @see rte_flow_ops
2474 */
2475int
9f95a23c 2476mlx5_flow_query(struct rte_eth_dev *dev,
11fdf7f2
TL
2477 struct rte_flow *flow,
2478 const struct rte_flow_action *actions,
2479 void *data,
2480 struct rte_flow_error *error)
2481{
9f95a23c 2482 int ret;
11fdf7f2 2483
9f95a23c
TL
2484 ret = flow_drv_query(dev, flow, actions, data, error);
2485 if (ret < 0)
2486 return ret;
11fdf7f2
TL
2487 return 0;
2488}
2489
2490/**
2491 * Convert a flow director filter to a generic flow.
2492 *
2493 * @param dev
2494 * Pointer to Ethernet device.
2495 * @param fdir_filter
2496 * Flow director filter to add.
2497 * @param attributes
2498 * Generic flow parameters structure.
2499 *
2500 * @return
2501 * 0 on success, a negative errno value otherwise and rte_errno is set.
2502 */
2503static int
9f95a23c 2504flow_fdir_filter_convert(struct rte_eth_dev *dev,
11fdf7f2
TL
2505 const struct rte_eth_fdir_filter *fdir_filter,
2506 struct mlx5_fdir *attributes)
2507{
9f95a23c 2508 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
2509 const struct rte_eth_fdir_input *input = &fdir_filter->input;
2510 const struct rte_eth_fdir_masks *mask =
2511 &dev->data->dev_conf.fdir_conf.mask;
2512
2513 /* Validate queue number. */
2514 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
2515 DRV_LOG(ERR, "port %u invalid queue number %d",
2516 dev->data->port_id, fdir_filter->action.rx_queue);
2517 rte_errno = EINVAL;
2518 return -rte_errno;
2519 }
2520 attributes->attr.ingress = 1;
2521 attributes->items[0] = (struct rte_flow_item) {
2522 .type = RTE_FLOW_ITEM_TYPE_ETH,
2523 .spec = &attributes->l2,
2524 .mask = &attributes->l2_mask,
2525 };
2526 switch (fdir_filter->action.behavior) {
2527 case RTE_ETH_FDIR_ACCEPT:
2528 attributes->actions[0] = (struct rte_flow_action){
2529 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
2530 .conf = &attributes->queue,
2531 };
2532 break;
2533 case RTE_ETH_FDIR_REJECT:
2534 attributes->actions[0] = (struct rte_flow_action){
2535 .type = RTE_FLOW_ACTION_TYPE_DROP,
2536 };
2537 break;
2538 default:
2539 DRV_LOG(ERR, "port %u invalid behavior %d",
2540 dev->data->port_id,
2541 fdir_filter->action.behavior);
2542 rte_errno = ENOTSUP;
2543 return -rte_errno;
2544 }
2545 attributes->queue.index = fdir_filter->action.rx_queue;
2546 /* Handle L3. */
2547 switch (fdir_filter->input.flow_type) {
2548 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2549 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2550 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2551 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
2552 .src_addr = input->flow.ip4_flow.src_ip,
2553 .dst_addr = input->flow.ip4_flow.dst_ip,
2554 .time_to_live = input->flow.ip4_flow.ttl,
2555 .type_of_service = input->flow.ip4_flow.tos,
11fdf7f2
TL
2556 };
2557 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
2558 .src_addr = mask->ipv4_mask.src_ip,
2559 .dst_addr = mask->ipv4_mask.dst_ip,
2560 .time_to_live = mask->ipv4_mask.ttl,
2561 .type_of_service = mask->ipv4_mask.tos,
2562 .next_proto_id = mask->ipv4_mask.proto,
2563 };
2564 attributes->items[1] = (struct rte_flow_item){
2565 .type = RTE_FLOW_ITEM_TYPE_IPV4,
2566 .spec = &attributes->l3,
2567 .mask = &attributes->l3_mask,
2568 };
2569 break;
2570 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2571 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2572 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2573 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
2574 .hop_limits = input->flow.ipv6_flow.hop_limits,
2575 .proto = input->flow.ipv6_flow.proto,
2576 };
2577
2578 memcpy(attributes->l3.ipv6.hdr.src_addr,
2579 input->flow.ipv6_flow.src_ip,
2580 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2581 memcpy(attributes->l3.ipv6.hdr.dst_addr,
2582 input->flow.ipv6_flow.dst_ip,
2583 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2584 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
2585 mask->ipv6_mask.src_ip,
2586 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
2587 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
2588 mask->ipv6_mask.dst_ip,
2589 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
2590 attributes->items[1] = (struct rte_flow_item){
2591 .type = RTE_FLOW_ITEM_TYPE_IPV6,
2592 .spec = &attributes->l3,
2593 .mask = &attributes->l3_mask,
2594 };
2595 break;
2596 default:
2597 DRV_LOG(ERR, "port %u invalid flow type%d",
2598 dev->data->port_id, fdir_filter->input.flow_type);
2599 rte_errno = ENOTSUP;
2600 return -rte_errno;
2601 }
2602 /* Handle L4. */
2603 switch (fdir_filter->input.flow_type) {
2604 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2605 attributes->l4.udp.hdr = (struct udp_hdr){
2606 .src_port = input->flow.udp4_flow.src_port,
2607 .dst_port = input->flow.udp4_flow.dst_port,
2608 };
2609 attributes->l4_mask.udp.hdr = (struct udp_hdr){
2610 .src_port = mask->src_port_mask,
2611 .dst_port = mask->dst_port_mask,
2612 };
2613 attributes->items[2] = (struct rte_flow_item){
2614 .type = RTE_FLOW_ITEM_TYPE_UDP,
2615 .spec = &attributes->l4,
2616 .mask = &attributes->l4_mask,
2617 };
2618 break;
2619 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2620 attributes->l4.tcp.hdr = (struct tcp_hdr){
2621 .src_port = input->flow.tcp4_flow.src_port,
2622 .dst_port = input->flow.tcp4_flow.dst_port,
2623 };
2624 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
2625 .src_port = mask->src_port_mask,
2626 .dst_port = mask->dst_port_mask,
2627 };
2628 attributes->items[2] = (struct rte_flow_item){
2629 .type = RTE_FLOW_ITEM_TYPE_TCP,
2630 .spec = &attributes->l4,
2631 .mask = &attributes->l4_mask,
2632 };
2633 break;
2634 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2635 attributes->l4.udp.hdr = (struct udp_hdr){
2636 .src_port = input->flow.udp6_flow.src_port,
2637 .dst_port = input->flow.udp6_flow.dst_port,
2638 };
2639 attributes->l4_mask.udp.hdr = (struct udp_hdr){
2640 .src_port = mask->src_port_mask,
2641 .dst_port = mask->dst_port_mask,
2642 };
2643 attributes->items[2] = (struct rte_flow_item){
2644 .type = RTE_FLOW_ITEM_TYPE_UDP,
2645 .spec = &attributes->l4,
2646 .mask = &attributes->l4_mask,
2647 };
2648 break;
2649 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2650 attributes->l4.tcp.hdr = (struct tcp_hdr){
2651 .src_port = input->flow.tcp6_flow.src_port,
2652 .dst_port = input->flow.tcp6_flow.dst_port,
2653 };
2654 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
2655 .src_port = mask->src_port_mask,
2656 .dst_port = mask->dst_port_mask,
2657 };
2658 attributes->items[2] = (struct rte_flow_item){
2659 .type = RTE_FLOW_ITEM_TYPE_TCP,
2660 .spec = &attributes->l4,
2661 .mask = &attributes->l4_mask,
2662 };
2663 break;
2664 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2665 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2666 break;
2667 default:
2668 DRV_LOG(ERR, "port %u invalid flow type%d",
2669 dev->data->port_id, fdir_filter->input.flow_type);
2670 rte_errno = ENOTSUP;
2671 return -rte_errno;
2672 }
2673 return 0;
2674}
2675
9f95a23c
TL
2676#define FLOW_FDIR_CMP(f1, f2, fld) \
2677 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
2678
2679/**
2680 * Compare two FDIR flows. If items and actions are identical, the two flows are
2681 * regarded as same.
2682 *
2683 * @param dev
2684 * Pointer to Ethernet device.
2685 * @param f1
2686 * FDIR flow to compare.
2687 * @param f2
2688 * FDIR flow to compare.
2689 *
2690 * @return
2691 * Zero on match, 1 otherwise.
2692 */
2693static int
2694flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
2695{
2696 if (FLOW_FDIR_CMP(f1, f2, attr) ||
2697 FLOW_FDIR_CMP(f1, f2, l2) ||
2698 FLOW_FDIR_CMP(f1, f2, l2_mask) ||
2699 FLOW_FDIR_CMP(f1, f2, l3) ||
2700 FLOW_FDIR_CMP(f1, f2, l3_mask) ||
2701 FLOW_FDIR_CMP(f1, f2, l4) ||
2702 FLOW_FDIR_CMP(f1, f2, l4_mask) ||
2703 FLOW_FDIR_CMP(f1, f2, actions[0].type))
2704 return 1;
2705 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
2706 FLOW_FDIR_CMP(f1, f2, queue))
2707 return 1;
2708 return 0;
2709}
2710
2711/**
2712 * Search device flow list to find out a matched FDIR flow.
2713 *
2714 * @param dev
2715 * Pointer to Ethernet device.
2716 * @param fdir_flow
2717 * FDIR flow to lookup.
2718 *
2719 * @return
2720 * Pointer of flow if found, NULL otherwise.
2721 */
2722static struct rte_flow *
2723flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
2724{
2725 struct mlx5_priv *priv = dev->data->dev_private;
2726 struct rte_flow *flow = NULL;
2727
2728 assert(fdir_flow);
2729 TAILQ_FOREACH(flow, &priv->flows, next) {
2730 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
2731 DRV_LOG(DEBUG, "port %u found FDIR flow %p",
2732 dev->data->port_id, (void *)flow);
2733 break;
2734 }
2735 }
2736 return flow;
2737}
2738
11fdf7f2
TL
2739/**
2740 * Add new flow director filter and store it in list.
2741 *
2742 * @param dev
2743 * Pointer to Ethernet device.
2744 * @param fdir_filter
2745 * Flow director filter to add.
2746 *
2747 * @return
2748 * 0 on success, a negative errno value otherwise and rte_errno is set.
2749 */
2750static int
9f95a23c 2751flow_fdir_filter_add(struct rte_eth_dev *dev,
11fdf7f2
TL
2752 const struct rte_eth_fdir_filter *fdir_filter)
2753{
9f95a23c
TL
2754 struct mlx5_priv *priv = dev->data->dev_private;
2755 struct mlx5_fdir *fdir_flow;
11fdf7f2
TL
2756 struct rte_flow *flow;
2757 int ret;
2758
9f95a23c
TL
2759 fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
2760 if (!fdir_flow) {
2761 rte_errno = ENOMEM;
2762 return -rte_errno;
2763 }
2764 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
11fdf7f2 2765 if (ret)
9f95a23c
TL
2766 goto error;
2767 flow = flow_fdir_filter_lookup(dev, fdir_flow);
11fdf7f2 2768 if (flow) {
9f95a23c
TL
2769 rte_errno = EEXIST;
2770 goto error;
11fdf7f2 2771 }
9f95a23c
TL
2772 flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
2773 fdir_flow->items, fdir_flow->actions, NULL);
2774 if (!flow)
2775 goto error;
2776 assert(!flow->fdir);
2777 flow->fdir = fdir_flow;
2778 DRV_LOG(DEBUG, "port %u created FDIR flow %p",
2779 dev->data->port_id, (void *)flow);
2780 return 0;
2781error:
2782 rte_free(fdir_flow);
11fdf7f2
TL
2783 return -rte_errno;
2784}
2785
2786/**
2787 * Delete specific filter.
2788 *
2789 * @param dev
2790 * Pointer to Ethernet device.
2791 * @param fdir_filter
2792 * Filter to be deleted.
2793 *
2794 * @return
2795 * 0 on success, a negative errno value otherwise and rte_errno is set.
2796 */
2797static int
9f95a23c
TL
2798flow_fdir_filter_delete(struct rte_eth_dev *dev,
2799 const struct rte_eth_fdir_filter *fdir_filter)
11fdf7f2 2800{
9f95a23c
TL
2801 struct mlx5_priv *priv = dev->data->dev_private;
2802 struct rte_flow *flow;
2803 struct mlx5_fdir fdir_flow = {
2804 .attr.group = 0,
2805 };
2806 int ret;
2807
2808 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
2809 if (ret)
2810 return -rte_errno;
2811 flow = flow_fdir_filter_lookup(dev, &fdir_flow);
2812 if (!flow) {
2813 rte_errno = ENOENT;
2814 return -rte_errno;
2815 }
2816 flow_list_destroy(dev, &priv->flows, flow);
2817 DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
2818 dev->data->port_id, (void *)flow);
2819 return 0;
11fdf7f2
TL
2820}
2821
2822/**
2823 * Update queue for specific filter.
2824 *
2825 * @param dev
2826 * Pointer to Ethernet device.
2827 * @param fdir_filter
2828 * Filter to be updated.
2829 *
2830 * @return
2831 * 0 on success, a negative errno value otherwise and rte_errno is set.
2832 */
2833static int
9f95a23c 2834flow_fdir_filter_update(struct rte_eth_dev *dev,
11fdf7f2
TL
2835 const struct rte_eth_fdir_filter *fdir_filter)
2836{
2837 int ret;
2838
9f95a23c 2839 ret = flow_fdir_filter_delete(dev, fdir_filter);
11fdf7f2
TL
2840 if (ret)
2841 return ret;
9f95a23c 2842 return flow_fdir_filter_add(dev, fdir_filter);
11fdf7f2
TL
2843}
2844
2845/**
2846 * Flush all filters.
2847 *
2848 * @param dev
2849 * Pointer to Ethernet device.
2850 */
2851static void
9f95a23c 2852flow_fdir_filter_flush(struct rte_eth_dev *dev)
11fdf7f2 2853{
9f95a23c 2854 struct mlx5_priv *priv = dev->data->dev_private;
11fdf7f2
TL
2855
2856 mlx5_flow_list_flush(dev, &priv->flows);
2857}
2858
2859/**
2860 * Get flow director information.
2861 *
2862 * @param dev
2863 * Pointer to Ethernet device.
2864 * @param[out] fdir_info
2865 * Resulting flow director information.
2866 */
2867static void
9f95a23c 2868flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
11fdf7f2
TL
2869{
2870 struct rte_eth_fdir_masks *mask =
2871 &dev->data->dev_conf.fdir_conf.mask;
2872
2873 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
2874 fdir_info->guarant_spc = 0;
2875 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
2876 fdir_info->max_flexpayload = 0;
2877 fdir_info->flow_types_mask[0] = 0;
2878 fdir_info->flex_payload_unit = 0;
2879 fdir_info->max_flex_payload_segment_num = 0;
2880 fdir_info->flex_payload_limit = 0;
2881 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
2882}
2883
2884/**
2885 * Deal with flow director operations.
2886 *
2887 * @param dev
2888 * Pointer to Ethernet device.
2889 * @param filter_op
2890 * Operation to perform.
2891 * @param arg
2892 * Pointer to operation-specific structure.
2893 *
2894 * @return
2895 * 0 on success, a negative errno value otherwise and rte_errno is set.
2896 */
2897static int
9f95a23c 2898flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
11fdf7f2
TL
2899 void *arg)
2900{
2901 enum rte_fdir_mode fdir_mode =
2902 dev->data->dev_conf.fdir_conf.mode;
2903
2904 if (filter_op == RTE_ETH_FILTER_NOP)
2905 return 0;
2906 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
2907 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2908 DRV_LOG(ERR, "port %u flow director mode %d not supported",
2909 dev->data->port_id, fdir_mode);
2910 rte_errno = EINVAL;
2911 return -rte_errno;
2912 }
2913 switch (filter_op) {
2914 case RTE_ETH_FILTER_ADD:
9f95a23c 2915 return flow_fdir_filter_add(dev, arg);
11fdf7f2 2916 case RTE_ETH_FILTER_UPDATE:
9f95a23c 2917 return flow_fdir_filter_update(dev, arg);
11fdf7f2 2918 case RTE_ETH_FILTER_DELETE:
9f95a23c 2919 return flow_fdir_filter_delete(dev, arg);
11fdf7f2 2920 case RTE_ETH_FILTER_FLUSH:
9f95a23c 2921 flow_fdir_filter_flush(dev);
11fdf7f2
TL
2922 break;
2923 case RTE_ETH_FILTER_INFO:
9f95a23c 2924 flow_fdir_info_get(dev, arg);
11fdf7f2
TL
2925 break;
2926 default:
2927 DRV_LOG(DEBUG, "port %u unknown operation %u",
2928 dev->data->port_id, filter_op);
2929 rte_errno = EINVAL;
2930 return -rte_errno;
2931 }
2932 return 0;
2933}
2934
2935/**
2936 * Manage filter operations.
2937 *
2938 * @param dev
2939 * Pointer to Ethernet device structure.
2940 * @param filter_type
2941 * Filter type.
2942 * @param filter_op
2943 * Operation to perform.
2944 * @param arg
2945 * Pointer to operation-specific structure.
2946 *
2947 * @return
2948 * 0 on success, a negative errno value otherwise and rte_errno is set.
2949 */
2950int
2951mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
2952 enum rte_filter_type filter_type,
2953 enum rte_filter_op filter_op,
2954 void *arg)
2955{
2956 switch (filter_type) {
2957 case RTE_ETH_FILTER_GENERIC:
2958 if (filter_op != RTE_ETH_FILTER_GET) {
2959 rte_errno = EINVAL;
2960 return -rte_errno;
2961 }
2962 *(const void **)arg = &mlx5_flow_ops;
2963 return 0;
2964 case RTE_ETH_FILTER_FDIR:
9f95a23c 2965 return flow_fdir_ctrl_func(dev, filter_op, arg);
11fdf7f2
TL
2966 default:
2967 DRV_LOG(ERR, "port %u filter type (%d) not supported",
2968 dev->data->port_id, filter_type);
2969 rte_errno = ENOTSUP;
2970 return -rte_errno;
2971 }
2972 return 0;
2973}