]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ice/ice_fdir_filter.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ice / ice_fdir_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <rte_flow.h>
7 #include <rte_hash.h>
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
13 #include "ice_rxtx.h"
14 #include "ice_generic_flow.h"
15
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
18
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
20
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
22 ICE_INSET_DMAC | \
23 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35 ICE_FDIR_INSET_ETH_IPV4 | \
36 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
37
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
39 ICE_INSET_DMAC | \
40 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
42
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44 ICE_FDIR_INSET_ETH_IPV6 | \
45 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
46
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
50
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
54
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
57
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59 ICE_FDIR_INSET_VXLAN_IPV4 | \
60 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
61
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63 ICE_FDIR_INSET_VXLAN_IPV4 | \
64 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
65
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67 ICE_FDIR_INSET_VXLAN_IPV4 | \
68 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
69
70 #define ICE_FDIR_INSET_GTPU (\
71 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
72
73 #define ICE_FDIR_INSET_GTPU_EH (\
74 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
75 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
76
77 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
78 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
79 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
80 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
81 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
82 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
83 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
84 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
85 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
86 {pattern_eth_ipv4_udp_vxlan_ipv4,
87 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
88 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
89 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
90 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
91 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
92 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
93 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
94 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
95 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
96 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
97 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
98 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
99 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
100 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
101 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
102 };
103
104 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
105 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
106 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
107 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
108 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
109 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
110 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
111 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
112 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
113 {pattern_eth_ipv4_udp_vxlan_ipv4,
114 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
115 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
116 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
118 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
120 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
121 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
122 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
123 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
124 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
125 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
126 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
128 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
129 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_GTPU, ICE_INSET_NONE},
130 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_GTPU_EH, ICE_INSET_NONE},
131 };
132
133 static struct ice_flow_parser ice_fdir_parser_os;
134 static struct ice_flow_parser ice_fdir_parser_comms;
135
136 static int
137 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
138
139 static const struct rte_memzone *
140 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
141 {
142 const struct rte_memzone *mz;
143
144 mz = rte_memzone_lookup(name);
145 if (mz)
146 return mz;
147
148 return rte_memzone_reserve_aligned(name, len, socket_id,
149 RTE_MEMZONE_IOVA_CONTIG,
150 ICE_RING_BASE_ALIGN);
151 }
152
153 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
154
155 static int
156 ice_fdir_prof_alloc(struct ice_hw *hw)
157 {
158 enum ice_fltr_ptype ptype, fltr_ptype;
159
160 if (!hw->fdir_prof) {
161 hw->fdir_prof = (struct ice_fd_hw_prof **)
162 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
163 sizeof(*hw->fdir_prof));
164 if (!hw->fdir_prof)
165 return -ENOMEM;
166 }
167 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
168 ptype < ICE_FLTR_PTYPE_MAX;
169 ptype++) {
170 if (!hw->fdir_prof[ptype]) {
171 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
172 ice_malloc(hw, sizeof(**hw->fdir_prof));
173 if (!hw->fdir_prof[ptype])
174 goto fail_mem;
175 }
176 }
177 return 0;
178
179 fail_mem:
180 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
181 fltr_ptype < ptype;
182 fltr_ptype++) {
183 rte_free(hw->fdir_prof[fltr_ptype]);
184 hw->fdir_prof[fltr_ptype] = NULL;
185 }
186
187 rte_free(hw->fdir_prof);
188 hw->fdir_prof = NULL;
189
190 return -ENOMEM;
191 }
192
193 static int
194 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
195 struct ice_fdir_counter_pool_container *container,
196 uint32_t index_start,
197 uint32_t len)
198 {
199 struct ice_fdir_counter_pool *pool;
200 uint32_t i;
201 int ret = 0;
202
203 pool = rte_zmalloc("ice_fdir_counter_pool",
204 sizeof(*pool) +
205 sizeof(struct ice_fdir_counter) * len,
206 0);
207 if (!pool) {
208 PMD_INIT_LOG(ERR,
209 "Failed to allocate memory for fdir counter pool");
210 return -ENOMEM;
211 }
212
213 TAILQ_INIT(&pool->counter_list);
214 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
215
216 for (i = 0; i < len; i++) {
217 struct ice_fdir_counter *counter = &pool->counters[i];
218
219 counter->hw_index = index_start + i;
220 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
221 }
222
223 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
224 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
225 ret = -EINVAL;
226 goto free_pool;
227 }
228
229 container->pools[container->index_free++] = pool;
230 return 0;
231
232 free_pool:
233 rte_free(pool);
234 return ret;
235 }
236
237 static int
238 ice_fdir_counter_init(struct ice_pf *pf)
239 {
240 struct ice_hw *hw = ICE_PF_TO_HW(pf);
241 struct ice_fdir_info *fdir_info = &pf->fdir;
242 struct ice_fdir_counter_pool_container *container =
243 &fdir_info->counter;
244 uint32_t cnt_index, len;
245 int ret;
246
247 TAILQ_INIT(&container->pool_list);
248
249 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
250 len = ICE_FDIR_COUNTERS_PER_BLOCK;
251
252 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
253 if (ret) {
254 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
255 return ret;
256 }
257
258 return 0;
259 }
260
261 static int
262 ice_fdir_counter_release(struct ice_pf *pf)
263 {
264 struct ice_fdir_info *fdir_info = &pf->fdir;
265 struct ice_fdir_counter_pool_container *container =
266 &fdir_info->counter;
267 uint8_t i;
268
269 for (i = 0; i < container->index_free; i++) {
270 rte_free(container->pools[i]);
271 container->pools[i] = NULL;
272 }
273
274 TAILQ_INIT(&container->pool_list);
275 container->index_free = 0;
276
277 return 0;
278 }
279
280 static struct ice_fdir_counter *
281 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
282 *container,
283 uint32_t id)
284 {
285 struct ice_fdir_counter_pool *pool;
286 struct ice_fdir_counter *counter;
287 int i;
288
289 TAILQ_FOREACH(pool, &container->pool_list, next) {
290 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
291 counter = &pool->counters[i];
292
293 if (counter->shared &&
294 counter->ref_cnt &&
295 counter->id == id)
296 return counter;
297 }
298 }
299
300 return NULL;
301 }
302
303 static struct ice_fdir_counter *
304 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
305 {
306 struct ice_hw *hw = ICE_PF_TO_HW(pf);
307 struct ice_fdir_info *fdir_info = &pf->fdir;
308 struct ice_fdir_counter_pool_container *container =
309 &fdir_info->counter;
310 struct ice_fdir_counter_pool *pool = NULL;
311 struct ice_fdir_counter *counter_free = NULL;
312
313 if (shared) {
314 counter_free = ice_fdir_counter_shared_search(container, id);
315 if (counter_free) {
316 if (counter_free->ref_cnt + 1 == 0) {
317 rte_errno = E2BIG;
318 return NULL;
319 }
320 counter_free->ref_cnt++;
321 return counter_free;
322 }
323 }
324
325 TAILQ_FOREACH(pool, &container->pool_list, next) {
326 counter_free = TAILQ_FIRST(&pool->counter_list);
327 if (counter_free)
328 break;
329 counter_free = NULL;
330 }
331
332 if (!counter_free) {
333 PMD_DRV_LOG(ERR, "No free counter found\n");
334 return NULL;
335 }
336
337 counter_free->shared = shared;
338 counter_free->id = id;
339 counter_free->ref_cnt = 1;
340 counter_free->pool = pool;
341
342 /* reset statistic counter value */
343 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
344 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
345
346 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
347 if (TAILQ_EMPTY(&pool->counter_list)) {
348 TAILQ_REMOVE(&container->pool_list, pool, next);
349 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
350 }
351
352 return counter_free;
353 }
354
355 static void
356 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
357 struct ice_fdir_counter *counter)
358 {
359 if (!counter)
360 return;
361
362 if (--counter->ref_cnt == 0) {
363 struct ice_fdir_counter_pool *pool = counter->pool;
364
365 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
366 }
367 }
368
369 static int
370 ice_fdir_init_filter_list(struct ice_pf *pf)
371 {
372 struct rte_eth_dev *dev = pf->adapter->eth_dev;
373 struct ice_fdir_info *fdir_info = &pf->fdir;
374 char fdir_hash_name[RTE_HASH_NAMESIZE];
375 int ret;
376
377 struct rte_hash_parameters fdir_hash_params = {
378 .name = fdir_hash_name,
379 .entries = ICE_MAX_FDIR_FILTER_NUM,
380 .key_len = sizeof(struct ice_fdir_fltr_pattern),
381 .hash_func = rte_hash_crc,
382 .hash_func_init_val = 0,
383 .socket_id = rte_socket_id(),
384 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
385 };
386
387 /* Initialize hash */
388 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
389 "fdir_%s", dev->device->name);
390 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
391 if (!fdir_info->hash_table) {
392 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
393 return -EINVAL;
394 }
395 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
396 sizeof(*fdir_info->hash_map) *
397 ICE_MAX_FDIR_FILTER_NUM,
398 0);
399 if (!fdir_info->hash_map) {
400 PMD_INIT_LOG(ERR,
401 "Failed to allocate memory for fdir hash map!");
402 ret = -ENOMEM;
403 goto err_fdir_hash_map_alloc;
404 }
405 return 0;
406
407 err_fdir_hash_map_alloc:
408 rte_hash_free(fdir_info->hash_table);
409
410 return ret;
411 }
412
413 static void
414 ice_fdir_release_filter_list(struct ice_pf *pf)
415 {
416 struct ice_fdir_info *fdir_info = &pf->fdir;
417
418 if (fdir_info->hash_map)
419 rte_free(fdir_info->hash_map);
420 if (fdir_info->hash_table)
421 rte_hash_free(fdir_info->hash_table);
422
423 fdir_info->hash_map = NULL;
424 fdir_info->hash_table = NULL;
425 }
426
427 /*
428 * ice_fdir_setup - reserve and initialize the Flow Director resources
429 * @pf: board private structure
430 */
431 static int
432 ice_fdir_setup(struct ice_pf *pf)
433 {
434 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
435 struct ice_hw *hw = ICE_PF_TO_HW(pf);
436 const struct rte_memzone *mz = NULL;
437 char z_name[RTE_MEMZONE_NAMESIZE];
438 struct ice_vsi *vsi;
439 int err = ICE_SUCCESS;
440
441 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
442 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
443 return -ENOTSUP;
444 }
445
446 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
447 " fd_fltr_best_effort = %u.",
448 hw->func_caps.fd_fltr_guar,
449 hw->func_caps.fd_fltr_best_effort);
450
451 if (pf->fdir.fdir_vsi) {
452 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
453 return ICE_SUCCESS;
454 }
455
456 /* make new FDIR VSI */
457 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
458 if (!vsi) {
459 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
460 return -EINVAL;
461 }
462 pf->fdir.fdir_vsi = vsi;
463
464 err = ice_fdir_init_filter_list(pf);
465 if (err) {
466 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
467 return -EINVAL;
468 }
469
470 err = ice_fdir_counter_init(pf);
471 if (err) {
472 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
473 return -EINVAL;
474 }
475
476 /*Fdir tx queue setup*/
477 err = ice_fdir_setup_tx_resources(pf);
478 if (err) {
479 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
480 goto fail_setup_tx;
481 }
482
483 /*Fdir rx queue setup*/
484 err = ice_fdir_setup_rx_resources(pf);
485 if (err) {
486 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
487 goto fail_setup_rx;
488 }
489
490 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
491 if (err) {
492 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
493 goto fail_mem;
494 }
495
496 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
497 if (err) {
498 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
499 goto fail_mem;
500 }
501
502 /* Enable FDIR MSIX interrupt */
503 vsi->nb_used_qps = 1;
504 ice_vsi_queues_bind_intr(vsi);
505 ice_vsi_enable_queues_intr(vsi);
506
507 /* reserve memory for the fdir programming packet */
508 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
509 ICE_FDIR_MZ_NAME,
510 eth_dev->data->port_id);
511 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
512 if (!mz) {
513 PMD_DRV_LOG(ERR, "Cannot init memzone for "
514 "flow director program packet.");
515 err = -ENOMEM;
516 goto fail_mem;
517 }
518 pf->fdir.prg_pkt = mz->addr;
519 pf->fdir.dma_addr = mz->iova;
520 pf->fdir.mz = mz;
521
522 err = ice_fdir_prof_alloc(hw);
523 if (err) {
524 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
525 "flow director profile.");
526 err = -ENOMEM;
527 goto fail_prof;
528 }
529
530 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
531 vsi->base_queue);
532 return ICE_SUCCESS;
533
534 fail_prof:
535 rte_memzone_free(pf->fdir.mz);
536 pf->fdir.mz = NULL;
537 fail_mem:
538 ice_rx_queue_release(pf->fdir.rxq);
539 pf->fdir.rxq = NULL;
540 fail_setup_rx:
541 ice_tx_queue_release(pf->fdir.txq);
542 pf->fdir.txq = NULL;
543 fail_setup_tx:
544 ice_release_vsi(vsi);
545 pf->fdir.fdir_vsi = NULL;
546 return err;
547 }
548
549 static void
550 ice_fdir_prof_free(struct ice_hw *hw)
551 {
552 enum ice_fltr_ptype ptype;
553
554 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
555 ptype < ICE_FLTR_PTYPE_MAX;
556 ptype++) {
557 rte_free(hw->fdir_prof[ptype]);
558 hw->fdir_prof[ptype] = NULL;
559 }
560
561 rte_free(hw->fdir_prof);
562 hw->fdir_prof = NULL;
563 }
564
565 /* Remove a profile for some filter type */
566 static void
567 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
568 {
569 struct ice_hw *hw = ICE_PF_TO_HW(pf);
570 struct ice_fd_hw_prof *hw_prof;
571 uint64_t prof_id;
572 uint16_t vsi_num;
573 int i;
574
575 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
576 return;
577
578 hw_prof = hw->fdir_prof[ptype];
579
580 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
581 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
582 if (hw_prof->entry_h[i][is_tunnel]) {
583 vsi_num = ice_get_hw_vsi_num(hw,
584 hw_prof->vsi_h[i]);
585 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
586 vsi_num, ptype);
587 ice_flow_rem_entry(hw, ICE_BLK_FD,
588 hw_prof->entry_h[i][is_tunnel]);
589 hw_prof->entry_h[i][is_tunnel] = 0;
590 }
591 }
592 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
593 rte_free(hw_prof->fdir_seg[is_tunnel]);
594 hw_prof->fdir_seg[is_tunnel] = NULL;
595
596 for (i = 0; i < hw_prof->cnt; i++)
597 hw_prof->vsi_h[i] = 0;
598 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
599 }
600
601 /* Remove all created profiles */
602 static void
603 ice_fdir_prof_rm_all(struct ice_pf *pf)
604 {
605 enum ice_fltr_ptype ptype;
606
607 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
608 ptype < ICE_FLTR_PTYPE_MAX;
609 ptype++) {
610 ice_fdir_prof_rm(pf, ptype, false);
611 ice_fdir_prof_rm(pf, ptype, true);
612 }
613 }
614
615 /*
616 * ice_fdir_teardown - release the Flow Director resources
617 * @pf: board private structure
618 */
619 static void
620 ice_fdir_teardown(struct ice_pf *pf)
621 {
622 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
623 struct ice_hw *hw = ICE_PF_TO_HW(pf);
624 struct ice_vsi *vsi;
625 int err;
626
627 vsi = pf->fdir.fdir_vsi;
628 if (!vsi)
629 return;
630
631 ice_vsi_disable_queues_intr(vsi);
632
633 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
634 if (err)
635 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
636
637 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
638 if (err)
639 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
640
641 err = ice_fdir_counter_release(pf);
642 if (err)
643 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
644
645 ice_fdir_release_filter_list(pf);
646
647 ice_tx_queue_release(pf->fdir.txq);
648 pf->fdir.txq = NULL;
649 ice_rx_queue_release(pf->fdir.rxq);
650 pf->fdir.rxq = NULL;
651 ice_fdir_prof_rm_all(pf);
652 ice_fdir_prof_free(hw);
653 ice_release_vsi(vsi);
654 pf->fdir.fdir_vsi = NULL;
655
656 if (pf->fdir.mz) {
657 err = rte_memzone_free(pf->fdir.mz);
658 pf->fdir.mz = NULL;
659 if (err)
660 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
661 }
662 }
663
664 static int
665 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
666 enum ice_fltr_ptype ptype,
667 struct ice_flow_seg_info *seg,
668 bool is_tunnel)
669 {
670 struct ice_hw *hw = ICE_PF_TO_HW(pf);
671 struct ice_flow_seg_info *ori_seg;
672 struct ice_fd_hw_prof *hw_prof;
673
674 hw_prof = hw->fdir_prof[ptype];
675 ori_seg = hw_prof->fdir_seg[is_tunnel];
676
677 /* profile does not exist */
678 if (!ori_seg)
679 return 0;
680
681 /* if no input set conflict, return -EEXIST */
682 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
683 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
684 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
685 ptype);
686 return -EEXIST;
687 }
688
689 /* a rule with input set conflict already exist, so give up */
690 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
691 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
692 ptype);
693 return -EINVAL;
694 }
695
696 /* it's safe to delete an empty profile */
697 ice_fdir_prof_rm(pf, ptype, is_tunnel);
698 return 0;
699 }
700
701 static bool
702 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
703 enum ice_fltr_ptype ptype,
704 bool is_tunnel)
705 {
706 struct ice_hw *hw = ICE_PF_TO_HW(pf);
707 struct ice_fd_hw_prof *hw_prof;
708 struct ice_flow_seg_info *seg;
709
710 hw_prof = hw->fdir_prof[ptype];
711 seg = hw_prof->fdir_seg[is_tunnel];
712
713 /* profile does not exist */
714 if (!seg)
715 return true;
716
717 /* profile exists and rule exists, fail to resolve the conflict */
718 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
719 return false;
720
721 /* it's safe to delete an empty profile */
722 ice_fdir_prof_rm(pf, ptype, is_tunnel);
723
724 return true;
725 }
726
727 static int
728 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
729 enum ice_fltr_ptype ptype,
730 bool is_tunnel)
731 {
732 enum ice_fltr_ptype cflct_ptype;
733
734 switch (ptype) {
735 /* IPv4 */
736 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
737 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
738 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
739 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
740 if (!ice_fdir_prof_resolve_conflict
741 (pf, cflct_ptype, is_tunnel))
742 goto err;
743 break;
744 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
745 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
746 if (!ice_fdir_prof_resolve_conflict
747 (pf, cflct_ptype, is_tunnel))
748 goto err;
749 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
750 if (!ice_fdir_prof_resolve_conflict
751 (pf, cflct_ptype, is_tunnel))
752 goto err;
753 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
754 if (!ice_fdir_prof_resolve_conflict
755 (pf, cflct_ptype, is_tunnel))
756 goto err;
757 break;
758 /* IPv4 GTPU */
759 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
760 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
761 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
762 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
763 if (!ice_fdir_prof_resolve_conflict
764 (pf, cflct_ptype, is_tunnel))
765 goto err;
766 break;
767 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
768 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
769 if (!ice_fdir_prof_resolve_conflict
770 (pf, cflct_ptype, is_tunnel))
771 goto err;
772 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
773 if (!ice_fdir_prof_resolve_conflict
774 (pf, cflct_ptype, is_tunnel))
775 goto err;
776 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
777 if (!ice_fdir_prof_resolve_conflict
778 (pf, cflct_ptype, is_tunnel))
779 goto err;
780 break;
781 /* IPv6 */
782 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
783 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
784 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
785 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
786 if (!ice_fdir_prof_resolve_conflict
787 (pf, cflct_ptype, is_tunnel))
788 goto err;
789 break;
790 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
791 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
792 if (!ice_fdir_prof_resolve_conflict
793 (pf, cflct_ptype, is_tunnel))
794 goto err;
795 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
796 if (!ice_fdir_prof_resolve_conflict
797 (pf, cflct_ptype, is_tunnel))
798 goto err;
799 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
800 if (!ice_fdir_prof_resolve_conflict
801 (pf, cflct_ptype, is_tunnel))
802 goto err;
803 break;
804 default:
805 break;
806 }
807 return 0;
808 err:
809 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
810 ptype, cflct_ptype);
811 return -EINVAL;
812 }
813
814 static int
815 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
816 struct ice_vsi *ctrl_vsi,
817 struct ice_flow_seg_info *seg,
818 enum ice_fltr_ptype ptype,
819 bool is_tunnel)
820 {
821 struct ice_hw *hw = ICE_PF_TO_HW(pf);
822 enum ice_flow_dir dir = ICE_FLOW_RX;
823 struct ice_fd_hw_prof *hw_prof;
824 struct ice_flow_prof *prof;
825 uint64_t entry_1 = 0;
826 uint64_t entry_2 = 0;
827 uint16_t vsi_num;
828 int ret;
829 uint64_t prof_id;
830
831 /* check if have input set conflict on current profile. */
832 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
833 if (ret)
834 return ret;
835
836 /* check if the profile is conflict with other profile. */
837 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
838 if (ret)
839 return ret;
840
841 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
842 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
843 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
844 if (ret)
845 return ret;
846 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
847 vsi->idx, ICE_FLOW_PRIO_NORMAL,
848 seg, NULL, 0, &entry_1);
849 if (ret) {
850 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
851 ptype);
852 goto err_add_prof;
853 }
854 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
855 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
856 seg, NULL, 0, &entry_2);
857 if (ret) {
858 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
859 ptype);
860 goto err_add_entry;
861 }
862
863 hw_prof = hw->fdir_prof[ptype];
864 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
865 hw_prof->cnt = 0;
866 hw_prof->fdir_seg[is_tunnel] = seg;
867 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
868 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
869 pf->hw_prof_cnt[ptype][is_tunnel]++;
870 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
871 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
872 pf->hw_prof_cnt[ptype][is_tunnel]++;
873
874 return ret;
875
876 err_add_entry:
877 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
878 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
879 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
880 err_add_prof:
881 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
882
883 return ret;
884 }
885
886 static void
887 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
888 {
889 uint32_t i, j;
890
891 struct ice_inset_map {
892 uint64_t inset;
893 enum ice_flow_field fld;
894 };
895 static const struct ice_inset_map ice_inset_map[] = {
896 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
897 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
898 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
899 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
900 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
901 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
902 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
903 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
904 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
905 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
906 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
907 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
908 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
909 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
910 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
911 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
912 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
913 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
914 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
915 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
916 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
917 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
918 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
919 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
920 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
921 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
922 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
923 };
924
925 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
926 if ((inset & ice_inset_map[i].inset) ==
927 ice_inset_map[i].inset)
928 field[j++] = ice_inset_map[i].fld;
929 }
930 }
931
932 static int
933 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
934 uint64_t input_set, enum ice_fdir_tunnel_type ttype)
935 {
936 struct ice_flow_seg_info *seg;
937 struct ice_flow_seg_info *seg_tun = NULL;
938 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
939 bool is_tunnel;
940 int i, ret;
941
942 if (!input_set)
943 return -EINVAL;
944
945 seg = (struct ice_flow_seg_info *)
946 ice_malloc(hw, sizeof(*seg));
947 if (!seg) {
948 PMD_DRV_LOG(ERR, "No memory can be allocated");
949 return -ENOMEM;
950 }
951
952 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
953 field[i] = ICE_FLOW_FIELD_IDX_MAX;
954 ice_fdir_input_set_parse(input_set, field);
955
956 switch (flow) {
957 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
958 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
959 ICE_FLOW_SEG_HDR_IPV4);
960 break;
961 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
962 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
963 ICE_FLOW_SEG_HDR_IPV4);
964 break;
965 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
966 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
967 ICE_FLOW_SEG_HDR_IPV4);
968 break;
969 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
970 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
971 break;
972 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
973 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
974 ICE_FLOW_SEG_HDR_IPV6);
975 break;
976 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
977 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
978 ICE_FLOW_SEG_HDR_IPV6);
979 break;
980 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
981 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
982 ICE_FLOW_SEG_HDR_IPV6);
983 break;
984 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
985 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
986 break;
987 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
988 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
989 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
990 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
991 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU)
992 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
993 ICE_FLOW_SEG_HDR_IPV4);
994 else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
995 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
996 ICE_FLOW_SEG_HDR_GTPU_IP |
997 ICE_FLOW_SEG_HDR_IPV4);
998 else
999 PMD_DRV_LOG(ERR, "not supported tunnel type.");
1000 break;
1001 default:
1002 PMD_DRV_LOG(ERR, "not supported filter type.");
1003 break;
1004 }
1005
1006 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1007 ice_flow_set_fld(seg, field[i],
1008 ICE_FLOW_FLD_OFF_INVAL,
1009 ICE_FLOW_FLD_OFF_INVAL,
1010 ICE_FLOW_FLD_OFF_INVAL, false);
1011 }
1012
1013 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1014 if (!is_tunnel) {
1015 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1016 seg, flow, false);
1017 } else {
1018 seg_tun = (struct ice_flow_seg_info *)
1019 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1020 if (!seg_tun) {
1021 PMD_DRV_LOG(ERR, "No memory can be allocated");
1022 rte_free(seg);
1023 return -ENOMEM;
1024 }
1025 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1026 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1027 seg_tun, flow, true);
1028 }
1029
1030 if (!ret) {
1031 return ret;
1032 } else if (ret < 0) {
1033 rte_free(seg);
1034 if (is_tunnel)
1035 rte_free(seg_tun);
1036 return (ret == -EEXIST) ? 0 : ret;
1037 } else {
1038 return ret;
1039 }
1040 }
1041
1042 static void
1043 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1044 bool is_tunnel, bool add)
1045 {
1046 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1047 int cnt;
1048
1049 cnt = (add) ? 1 : -1;
1050 hw->fdir_active_fltr += cnt;
1051 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1052 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1053 else
1054 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1055 }
1056
1057 static int
1058 ice_fdir_init(struct ice_adapter *ad)
1059 {
1060 struct ice_pf *pf = &ad->pf;
1061 struct ice_flow_parser *parser;
1062 int ret;
1063
1064 if (ad->hw.dcf_enabled)
1065 return 0;
1066
1067 ret = ice_fdir_setup(pf);
1068 if (ret)
1069 return ret;
1070
1071 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1072 parser = &ice_fdir_parser_comms;
1073 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1074 parser = &ice_fdir_parser_os;
1075 else
1076 return -EINVAL;
1077
1078 return ice_register_parser(parser, ad);
1079 }
1080
1081 static void
1082 ice_fdir_uninit(struct ice_adapter *ad)
1083 {
1084 struct ice_pf *pf = &ad->pf;
1085 struct ice_flow_parser *parser;
1086
1087 if (ad->hw.dcf_enabled)
1088 return;
1089
1090 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1091 parser = &ice_fdir_parser_comms;
1092 else
1093 parser = &ice_fdir_parser_os;
1094
1095 ice_unregister_parser(parser, ad);
1096
1097 ice_fdir_teardown(pf);
1098 }
1099
1100 static int
1101 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1102 {
1103 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1104 return 1;
1105 else
1106 return 0;
1107 }
1108
1109 static int
1110 ice_fdir_add_del_filter(struct ice_pf *pf,
1111 struct ice_fdir_filter_conf *filter,
1112 bool add)
1113 {
1114 struct ice_fltr_desc desc;
1115 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1116 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1117 bool is_tun;
1118 int ret;
1119
1120 filter->input.dest_vsi = pf->main_vsi->idx;
1121
1122 memset(&desc, 0, sizeof(desc));
1123 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1124
1125 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1126
1127 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1128 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1129 if (ret) {
1130 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1131 return -EINVAL;
1132 }
1133
1134 return ice_fdir_programming(pf, &desc);
1135 }
1136
1137 static void
1138 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1139 struct ice_fdir_filter_conf *filter)
1140 {
1141 struct ice_fdir_fltr *input = &filter->input;
1142 memset(key, 0, sizeof(*key));
1143
1144 key->flow_type = input->flow_type;
1145 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1146 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1147 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1148 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1149
1150 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1151 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1152
1153 key->tunnel_type = filter->tunnel_type;
1154 }
1155
1156 /* Check if there exists the flow director filter */
1157 static struct ice_fdir_filter_conf *
1158 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1159 const struct ice_fdir_fltr_pattern *key)
1160 {
1161 int ret;
1162
1163 ret = rte_hash_lookup(fdir_info->hash_table, key);
1164 if (ret < 0)
1165 return NULL;
1166
1167 return fdir_info->hash_map[ret];
1168 }
1169
1170 /* Add a flow director entry into the SW list */
1171 static int
1172 ice_fdir_entry_insert(struct ice_pf *pf,
1173 struct ice_fdir_filter_conf *entry,
1174 struct ice_fdir_fltr_pattern *key)
1175 {
1176 struct ice_fdir_info *fdir_info = &pf->fdir;
1177 int ret;
1178
1179 ret = rte_hash_add_key(fdir_info->hash_table, key);
1180 if (ret < 0) {
1181 PMD_DRV_LOG(ERR,
1182 "Failed to insert fdir entry to hash table %d!",
1183 ret);
1184 return ret;
1185 }
1186 fdir_info->hash_map[ret] = entry;
1187
1188 return 0;
1189 }
1190
1191 /* Delete a flow director entry from the SW list */
1192 static int
1193 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1194 {
1195 struct ice_fdir_info *fdir_info = &pf->fdir;
1196 int ret;
1197
1198 ret = rte_hash_del_key(fdir_info->hash_table, key);
1199 if (ret < 0) {
1200 PMD_DRV_LOG(ERR,
1201 "Failed to delete fdir filter to hash table %d!",
1202 ret);
1203 return ret;
1204 }
1205 fdir_info->hash_map[ret] = NULL;
1206
1207 return 0;
1208 }
1209
1210 static int
1211 ice_fdir_create_filter(struct ice_adapter *ad,
1212 struct rte_flow *flow,
1213 void *meta,
1214 struct rte_flow_error *error)
1215 {
1216 struct ice_pf *pf = &ad->pf;
1217 struct ice_fdir_filter_conf *filter = meta;
1218 struct ice_fdir_info *fdir_info = &pf->fdir;
1219 struct ice_fdir_filter_conf *entry, *node;
1220 struct ice_fdir_fltr_pattern key;
1221 bool is_tun;
1222 int ret;
1223
1224 ice_fdir_extract_fltr_key(&key, filter);
1225 node = ice_fdir_entry_lookup(fdir_info, &key);
1226 if (node) {
1227 rte_flow_error_set(error, EEXIST,
1228 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1229 "Rule already exists!");
1230 return -rte_errno;
1231 }
1232
1233 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1234 if (!entry) {
1235 rte_flow_error_set(error, ENOMEM,
1236 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1237 "Failed to allocate memory");
1238 return -rte_errno;
1239 }
1240
1241 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1242
1243 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1244 filter->input_set, filter->tunnel_type);
1245 if (ret) {
1246 rte_flow_error_set(error, -ret,
1247 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1248 "Profile configure failed.");
1249 goto free_entry;
1250 }
1251
1252 /* alloc counter for FDIR */
1253 if (filter->input.cnt_ena) {
1254 struct rte_flow_action_count *act_count = &filter->act_count;
1255
1256 filter->counter = ice_fdir_counter_alloc(pf,
1257 act_count->shared,
1258 act_count->id);
1259 if (!filter->counter) {
1260 rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1262 "Failed to alloc FDIR counter.");
1263 goto free_entry;
1264 }
1265 filter->input.cnt_index = filter->counter->hw_index;
1266 }
1267
1268 ret = ice_fdir_add_del_filter(pf, filter, true);
1269 if (ret) {
1270 rte_flow_error_set(error, -ret,
1271 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1272 "Add filter rule failed.");
1273 goto free_counter;
1274 }
1275
1276 rte_memcpy(entry, filter, sizeof(*entry));
1277 ret = ice_fdir_entry_insert(pf, entry, &key);
1278 if (ret) {
1279 rte_flow_error_set(error, -ret,
1280 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1281 "Insert entry to table failed.");
1282 goto free_entry;
1283 }
1284
1285 flow->rule = entry;
1286 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1287
1288 return 0;
1289
1290 free_counter:
1291 if (filter->counter) {
1292 ice_fdir_counter_free(pf, filter->counter);
1293 filter->counter = NULL;
1294 }
1295
1296 free_entry:
1297 rte_free(entry);
1298 return -rte_errno;
1299 }
1300
1301 static int
1302 ice_fdir_destroy_filter(struct ice_adapter *ad,
1303 struct rte_flow *flow,
1304 struct rte_flow_error *error)
1305 {
1306 struct ice_pf *pf = &ad->pf;
1307 struct ice_fdir_info *fdir_info = &pf->fdir;
1308 struct ice_fdir_filter_conf *filter, *entry;
1309 struct ice_fdir_fltr_pattern key;
1310 bool is_tun;
1311 int ret;
1312
1313 filter = (struct ice_fdir_filter_conf *)flow->rule;
1314
1315 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1316
1317 if (filter->counter) {
1318 ice_fdir_counter_free(pf, filter->counter);
1319 filter->counter = NULL;
1320 }
1321
1322 ice_fdir_extract_fltr_key(&key, filter);
1323 entry = ice_fdir_entry_lookup(fdir_info, &key);
1324 if (!entry) {
1325 rte_flow_error_set(error, ENOENT,
1326 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1327 "Can't find entry.");
1328 return -rte_errno;
1329 }
1330
1331 ret = ice_fdir_add_del_filter(pf, filter, false);
1332 if (ret) {
1333 rte_flow_error_set(error, -ret,
1334 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1335 "Del filter rule failed.");
1336 return -rte_errno;
1337 }
1338
1339 ret = ice_fdir_entry_del(pf, &key);
1340 if (ret) {
1341 rte_flow_error_set(error, -ret,
1342 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1343 "Remove entry from table failed.");
1344 return -rte_errno;
1345 }
1346
1347 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1348 flow->rule = NULL;
1349
1350 rte_free(filter);
1351
1352 return 0;
1353 }
1354
1355 static int
1356 ice_fdir_query_count(struct ice_adapter *ad,
1357 struct rte_flow *flow,
1358 struct rte_flow_query_count *flow_stats,
1359 struct rte_flow_error *error)
1360 {
1361 struct ice_pf *pf = &ad->pf;
1362 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1363 struct ice_fdir_filter_conf *filter = flow->rule;
1364 struct ice_fdir_counter *counter = filter->counter;
1365 uint64_t hits_lo, hits_hi;
1366
1367 if (!counter) {
1368 rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ACTION,
1370 NULL,
1371 "FDIR counters not available");
1372 return -rte_errno;
1373 }
1374
1375 /*
1376 * Reading the low 32-bits latches the high 32-bits into a shadow
1377 * register. Reading the high 32-bit returns the value in the
1378 * shadow register.
1379 */
1380 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1381 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1382
1383 flow_stats->hits_set = 1;
1384 flow_stats->hits = hits_lo | (hits_hi << 32);
1385 flow_stats->bytes_set = 0;
1386 flow_stats->bytes = 0;
1387
1388 if (flow_stats->reset) {
1389 /* reset statistic counter value */
1390 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1391 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1392 }
1393
1394 return 0;
1395 }
1396
1397 static struct ice_flow_engine ice_fdir_engine = {
1398 .init = ice_fdir_init,
1399 .uninit = ice_fdir_uninit,
1400 .create = ice_fdir_create_filter,
1401 .destroy = ice_fdir_destroy_filter,
1402 .query_count = ice_fdir_query_count,
1403 .type = ICE_FLOW_ENGINE_FDIR,
1404 };
1405
1406 static int
1407 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1408 struct rte_flow_error *error,
1409 const struct rte_flow_action *act,
1410 struct ice_fdir_filter_conf *filter)
1411 {
1412 const struct rte_flow_action_rss *rss = act->conf;
1413 uint32_t i;
1414
1415 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1416 rte_flow_error_set(error, EINVAL,
1417 RTE_FLOW_ERROR_TYPE_ACTION, act,
1418 "Invalid action.");
1419 return -rte_errno;
1420 }
1421
1422 if (rss->queue_num <= 1) {
1423 rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ACTION, act,
1425 "Queue region size can't be 0 or 1.");
1426 return -rte_errno;
1427 }
1428
1429 /* check if queue index for queue region is continuous */
1430 for (i = 0; i < rss->queue_num - 1; i++) {
1431 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1432 rte_flow_error_set(error, EINVAL,
1433 RTE_FLOW_ERROR_TYPE_ACTION, act,
1434 "Discontinuous queue region");
1435 return -rte_errno;
1436 }
1437 }
1438
1439 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1440 rte_flow_error_set(error, EINVAL,
1441 RTE_FLOW_ERROR_TYPE_ACTION, act,
1442 "Invalid queue region indexes.");
1443 return -rte_errno;
1444 }
1445
1446 if (!(rte_is_power_of_2(rss->queue_num) &&
1447 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1448 rte_flow_error_set(error, EINVAL,
1449 RTE_FLOW_ERROR_TYPE_ACTION, act,
1450 "The region size should be any of the following values:"
1451 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1452 "of queues do not exceed the VSI allocation.");
1453 return -rte_errno;
1454 }
1455
1456 filter->input.q_index = rss->queue[0];
1457 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1458 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1459
1460 return 0;
1461 }
1462
1463 static int
1464 ice_fdir_parse_action(struct ice_adapter *ad,
1465 const struct rte_flow_action actions[],
1466 struct rte_flow_error *error,
1467 struct ice_fdir_filter_conf *filter)
1468 {
1469 struct ice_pf *pf = &ad->pf;
1470 const struct rte_flow_action_queue *act_q;
1471 const struct rte_flow_action_mark *mark_spec = NULL;
1472 const struct rte_flow_action_count *act_count;
1473 uint32_t dest_num = 0;
1474 uint32_t mark_num = 0;
1475 uint32_t counter_num = 0;
1476 int ret;
1477
1478 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1479 switch (actions->type) {
1480 case RTE_FLOW_ACTION_TYPE_VOID:
1481 break;
1482 case RTE_FLOW_ACTION_TYPE_QUEUE:
1483 dest_num++;
1484
1485 act_q = actions->conf;
1486 filter->input.q_index = act_q->index;
1487 if (filter->input.q_index >=
1488 pf->dev_data->nb_rx_queues) {
1489 rte_flow_error_set(error, EINVAL,
1490 RTE_FLOW_ERROR_TYPE_ACTION,
1491 actions,
1492 "Invalid queue for FDIR.");
1493 return -rte_errno;
1494 }
1495 filter->input.dest_ctl =
1496 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1497 break;
1498 case RTE_FLOW_ACTION_TYPE_DROP:
1499 dest_num++;
1500
1501 filter->input.dest_ctl =
1502 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1503 break;
1504 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1505 dest_num++;
1506
1507 filter->input.dest_ctl =
1508 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1509 break;
1510 case RTE_FLOW_ACTION_TYPE_RSS:
1511 dest_num++;
1512
1513 ret = ice_fdir_parse_action_qregion(pf,
1514 error, actions, filter);
1515 if (ret)
1516 return ret;
1517 break;
1518 case RTE_FLOW_ACTION_TYPE_MARK:
1519 mark_num++;
1520
1521 mark_spec = actions->conf;
1522 filter->input.fltr_id = mark_spec->id;
1523 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1524 break;
1525 case RTE_FLOW_ACTION_TYPE_COUNT:
1526 counter_num++;
1527
1528 act_count = actions->conf;
1529 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1530 rte_memcpy(&filter->act_count, act_count,
1531 sizeof(filter->act_count));
1532
1533 break;
1534 default:
1535 rte_flow_error_set(error, EINVAL,
1536 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1537 "Invalid action.");
1538 return -rte_errno;
1539 }
1540 }
1541
1542 if (dest_num >= 2) {
1543 rte_flow_error_set(error, EINVAL,
1544 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1545 "Unsupported action combination");
1546 return -rte_errno;
1547 }
1548
1549 if (mark_num >= 2) {
1550 rte_flow_error_set(error, EINVAL,
1551 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1552 "Too many mark actions");
1553 return -rte_errno;
1554 }
1555
1556 if (counter_num >= 2) {
1557 rte_flow_error_set(error, EINVAL,
1558 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1559 "Too many count actions");
1560 return -rte_errno;
1561 }
1562
1563 if (dest_num + mark_num + counter_num == 0) {
1564 rte_flow_error_set(error, EINVAL,
1565 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1566 "Empty action");
1567 return -rte_errno;
1568 }
1569
1570 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1571 if (dest_num == 0)
1572 filter->input.dest_ctl =
1573 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1574
1575 return 0;
1576 }
1577
1578 static int
1579 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1580 const struct rte_flow_item pattern[],
1581 struct rte_flow_error *error,
1582 struct ice_fdir_filter_conf *filter)
1583 {
1584 const struct rte_flow_item *item = pattern;
1585 enum rte_flow_item_type item_type;
1586 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1587 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1588 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1589 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1590 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1591 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1592 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1593 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1594 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1595 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1596 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1597 uint64_t input_set = ICE_INSET_NONE;
1598 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1599 uint8_t ipv6_addr_mask[16] = {
1600 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1601 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1602 };
1603 uint32_t vtc_flow_cpu;
1604
1605
1606 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1607 if (item->last) {
1608 rte_flow_error_set(error, EINVAL,
1609 RTE_FLOW_ERROR_TYPE_ITEM,
1610 item,
1611 "Not support range");
1612 return -rte_errno;
1613 }
1614 item_type = item->type;
1615
1616 switch (item_type) {
1617 case RTE_FLOW_ITEM_TYPE_ETH:
1618 eth_spec = item->spec;
1619 eth_mask = item->mask;
1620
1621 if (eth_spec && eth_mask) {
1622 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1623 !rte_is_zero_ether_addr(&eth_mask->src)) {
1624 rte_flow_error_set(error, EINVAL,
1625 RTE_FLOW_ERROR_TYPE_ITEM,
1626 item,
1627 "Src mac not support");
1628 return -rte_errno;
1629 }
1630
1631 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1632 rte_flow_error_set(error, EINVAL,
1633 RTE_FLOW_ERROR_TYPE_ITEM,
1634 item,
1635 "Invalid mac addr mask");
1636 return -rte_errno;
1637 }
1638
1639 input_set |= ICE_INSET_DMAC;
1640 rte_memcpy(&filter->input.ext_data.dst_mac,
1641 &eth_spec->dst,
1642 RTE_ETHER_ADDR_LEN);
1643 }
1644 break;
1645 case RTE_FLOW_ITEM_TYPE_IPV4:
1646 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1647 ipv4_spec = item->spec;
1648 ipv4_mask = item->mask;
1649
1650 if (ipv4_spec && ipv4_mask) {
1651 /* Check IPv4 mask and update input set */
1652 if (ipv4_mask->hdr.version_ihl ||
1653 ipv4_mask->hdr.total_length ||
1654 ipv4_mask->hdr.packet_id ||
1655 ipv4_mask->hdr.fragment_offset ||
1656 ipv4_mask->hdr.hdr_checksum) {
1657 rte_flow_error_set(error, EINVAL,
1658 RTE_FLOW_ERROR_TYPE_ITEM,
1659 item,
1660 "Invalid IPv4 mask.");
1661 return -rte_errno;
1662 }
1663 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1664 input_set |= tunnel_type ?
1665 ICE_INSET_TUN_IPV4_SRC :
1666 ICE_INSET_IPV4_SRC;
1667 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1668 input_set |= tunnel_type ?
1669 ICE_INSET_TUN_IPV4_DST :
1670 ICE_INSET_IPV4_DST;
1671 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1672 input_set |= ICE_INSET_IPV4_TOS;
1673 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1674 input_set |= ICE_INSET_IPV4_TTL;
1675 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1676 input_set |= ICE_INSET_IPV4_PROTO;
1677
1678 filter->input.ip.v4.dst_ip =
1679 ipv4_spec->hdr.src_addr;
1680 filter->input.ip.v4.src_ip =
1681 ipv4_spec->hdr.dst_addr;
1682 filter->input.ip.v4.tos =
1683 ipv4_spec->hdr.type_of_service;
1684 filter->input.ip.v4.ttl =
1685 ipv4_spec->hdr.time_to_live;
1686 filter->input.ip.v4.proto =
1687 ipv4_spec->hdr.next_proto_id;
1688 }
1689
1690 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1691 break;
1692 case RTE_FLOW_ITEM_TYPE_IPV6:
1693 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1694 ipv6_spec = item->spec;
1695 ipv6_mask = item->mask;
1696
1697 if (ipv6_spec && ipv6_mask) {
1698 /* Check IPv6 mask and update input set */
1699 if (ipv6_mask->hdr.payload_len) {
1700 rte_flow_error_set(error, EINVAL,
1701 RTE_FLOW_ERROR_TYPE_ITEM,
1702 item,
1703 "Invalid IPv6 mask");
1704 return -rte_errno;
1705 }
1706
1707 if (!memcmp(ipv6_mask->hdr.src_addr,
1708 ipv6_addr_mask,
1709 RTE_DIM(ipv6_mask->hdr.src_addr)))
1710 input_set |= ICE_INSET_IPV6_SRC;
1711 if (!memcmp(ipv6_mask->hdr.dst_addr,
1712 ipv6_addr_mask,
1713 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1714 input_set |= ICE_INSET_IPV6_DST;
1715
1716 if ((ipv6_mask->hdr.vtc_flow &
1717 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1718 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1719 input_set |= ICE_INSET_IPV6_TC;
1720 if (ipv6_mask->hdr.proto == UINT8_MAX)
1721 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1722 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1723 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1724
1725 rte_memcpy(filter->input.ip.v6.dst_ip,
1726 ipv6_spec->hdr.src_addr, 16);
1727 rte_memcpy(filter->input.ip.v6.src_ip,
1728 ipv6_spec->hdr.dst_addr, 16);
1729
1730 vtc_flow_cpu =
1731 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1732 filter->input.ip.v6.tc =
1733 (uint8_t)(vtc_flow_cpu >>
1734 ICE_FDIR_IPV6_TC_OFFSET);
1735 filter->input.ip.v6.proto =
1736 ipv6_spec->hdr.proto;
1737 filter->input.ip.v6.hlim =
1738 ipv6_spec->hdr.hop_limits;
1739 }
1740
1741 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1742 break;
1743 case RTE_FLOW_ITEM_TYPE_TCP:
1744 tcp_spec = item->spec;
1745 tcp_mask = item->mask;
1746
1747 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1748 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1749 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1750 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1751
1752 if (tcp_spec && tcp_mask) {
1753 /* Check TCP mask and update input set */
1754 if (tcp_mask->hdr.sent_seq ||
1755 tcp_mask->hdr.recv_ack ||
1756 tcp_mask->hdr.data_off ||
1757 tcp_mask->hdr.tcp_flags ||
1758 tcp_mask->hdr.rx_win ||
1759 tcp_mask->hdr.cksum ||
1760 tcp_mask->hdr.tcp_urp) {
1761 rte_flow_error_set(error, EINVAL,
1762 RTE_FLOW_ERROR_TYPE_ITEM,
1763 item,
1764 "Invalid TCP mask");
1765 return -rte_errno;
1766 }
1767
1768 if (tcp_mask->hdr.src_port == UINT16_MAX)
1769 input_set |= tunnel_type ?
1770 ICE_INSET_TUN_TCP_SRC_PORT :
1771 ICE_INSET_TCP_SRC_PORT;
1772 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1773 input_set |= tunnel_type ?
1774 ICE_INSET_TUN_TCP_DST_PORT :
1775 ICE_INSET_TCP_DST_PORT;
1776
1777 /* Get filter info */
1778 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1779 filter->input.ip.v4.dst_port =
1780 tcp_spec->hdr.src_port;
1781 filter->input.ip.v4.src_port =
1782 tcp_spec->hdr.dst_port;
1783 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1784 filter->input.ip.v6.dst_port =
1785 tcp_spec->hdr.src_port;
1786 filter->input.ip.v6.src_port =
1787 tcp_spec->hdr.dst_port;
1788 }
1789 }
1790 break;
1791 case RTE_FLOW_ITEM_TYPE_UDP:
1792 udp_spec = item->spec;
1793 udp_mask = item->mask;
1794
1795 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1796 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1797 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1798 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1799
1800 if (udp_spec && udp_mask) {
1801 /* Check UDP mask and update input set*/
1802 if (udp_mask->hdr.dgram_len ||
1803 udp_mask->hdr.dgram_cksum) {
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item,
1807 "Invalid UDP mask");
1808 return -rte_errno;
1809 }
1810
1811 if (udp_mask->hdr.src_port == UINT16_MAX)
1812 input_set |= tunnel_type ?
1813 ICE_INSET_TUN_UDP_SRC_PORT :
1814 ICE_INSET_UDP_SRC_PORT;
1815 if (udp_mask->hdr.dst_port == UINT16_MAX)
1816 input_set |= tunnel_type ?
1817 ICE_INSET_TUN_UDP_DST_PORT :
1818 ICE_INSET_UDP_DST_PORT;
1819
1820 /* Get filter info */
1821 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1822 filter->input.ip.v4.dst_port =
1823 udp_spec->hdr.src_port;
1824 filter->input.ip.v4.src_port =
1825 udp_spec->hdr.dst_port;
1826 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1827 filter->input.ip.v6.src_port =
1828 udp_spec->hdr.dst_port;
1829 filter->input.ip.v6.dst_port =
1830 udp_spec->hdr.src_port;
1831 }
1832 }
1833 break;
1834 case RTE_FLOW_ITEM_TYPE_SCTP:
1835 sctp_spec = item->spec;
1836 sctp_mask = item->mask;
1837
1838 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1839 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1840 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1841 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1842
1843 if (sctp_spec && sctp_mask) {
1844 /* Check SCTP mask and update input set */
1845 if (sctp_mask->hdr.cksum) {
1846 rte_flow_error_set(error, EINVAL,
1847 RTE_FLOW_ERROR_TYPE_ITEM,
1848 item,
1849 "Invalid UDP mask");
1850 return -rte_errno;
1851 }
1852
1853 if (sctp_mask->hdr.src_port == UINT16_MAX)
1854 input_set |= tunnel_type ?
1855 ICE_INSET_TUN_SCTP_SRC_PORT :
1856 ICE_INSET_SCTP_SRC_PORT;
1857 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1858 input_set |= tunnel_type ?
1859 ICE_INSET_TUN_SCTP_DST_PORT :
1860 ICE_INSET_SCTP_DST_PORT;
1861
1862 /* Get filter info */
1863 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1864 filter->input.ip.v4.dst_port =
1865 sctp_spec->hdr.src_port;
1866 filter->input.ip.v4.src_port =
1867 sctp_spec->hdr.dst_port;
1868 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1869 filter->input.ip.v6.dst_port =
1870 sctp_spec->hdr.src_port;
1871 filter->input.ip.v6.src_port =
1872 sctp_spec->hdr.dst_port;
1873 }
1874 }
1875 break;
1876 case RTE_FLOW_ITEM_TYPE_VOID:
1877 break;
1878 case RTE_FLOW_ITEM_TYPE_VXLAN:
1879 l3 = RTE_FLOW_ITEM_TYPE_END;
1880 vxlan_spec = item->spec;
1881 vxlan_mask = item->mask;
1882
1883 if (vxlan_spec || vxlan_mask) {
1884 rte_flow_error_set(error, EINVAL,
1885 RTE_FLOW_ERROR_TYPE_ITEM,
1886 item,
1887 "Invalid vxlan field");
1888 return -rte_errno;
1889 }
1890
1891 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1892 break;
1893 case RTE_FLOW_ITEM_TYPE_GTPU:
1894 l3 = RTE_FLOW_ITEM_TYPE_END;
1895 gtp_spec = item->spec;
1896 gtp_mask = item->mask;
1897
1898 if (gtp_spec && gtp_mask) {
1899 if (gtp_mask->v_pt_rsv_flags ||
1900 gtp_mask->msg_type ||
1901 gtp_mask->msg_len) {
1902 rte_flow_error_set(error, EINVAL,
1903 RTE_FLOW_ERROR_TYPE_ITEM,
1904 item,
1905 "Invalid GTP mask");
1906 return -rte_errno;
1907 }
1908
1909 if (gtp_mask->teid == UINT32_MAX)
1910 input_set |= ICE_INSET_GTPU_TEID;
1911
1912 filter->input.gtpu_data.teid = gtp_spec->teid;
1913 }
1914
1915 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1916 break;
1917 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1918 gtp_psc_spec = item->spec;
1919 gtp_psc_mask = item->mask;
1920
1921 if (gtp_psc_spec && gtp_psc_mask) {
1922 if (gtp_psc_mask->qfi == UINT8_MAX)
1923 input_set |= ICE_INSET_GTPU_QFI;
1924
1925 filter->input.gtpu_data.qfi =
1926 gtp_psc_spec->qfi;
1927 }
1928 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1929 break;
1930 default:
1931 rte_flow_error_set(error, EINVAL,
1932 RTE_FLOW_ERROR_TYPE_ITEM,
1933 item,
1934 "Invalid pattern item.");
1935 return -rte_errno;
1936 }
1937 }
1938
1939 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU ||
1940 tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1941 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1942
1943 filter->tunnel_type = tunnel_type;
1944 filter->input.flow_type = flow_type;
1945 filter->input_set = input_set;
1946
1947 return 0;
1948 }
1949
1950 static int
1951 ice_fdir_parse(struct ice_adapter *ad,
1952 struct ice_pattern_match_item *array,
1953 uint32_t array_len,
1954 const struct rte_flow_item pattern[],
1955 const struct rte_flow_action actions[],
1956 void **meta,
1957 struct rte_flow_error *error)
1958 {
1959 struct ice_pf *pf = &ad->pf;
1960 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1961 struct ice_pattern_match_item *item = NULL;
1962 uint64_t input_set;
1963 int ret;
1964
1965 memset(filter, 0, sizeof(*filter));
1966 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1967 if (!item)
1968 return -rte_errno;
1969
1970 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1971 if (ret)
1972 goto error;
1973 input_set = filter->input_set;
1974 if (!input_set || input_set & ~item->input_set_mask) {
1975 rte_flow_error_set(error, EINVAL,
1976 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1977 pattern,
1978 "Invalid input set");
1979 ret = -rte_errno;
1980 goto error;
1981 }
1982
1983 ret = ice_fdir_parse_action(ad, actions, error, filter);
1984 if (ret)
1985 goto error;
1986
1987 if (meta)
1988 *meta = filter;
1989 error:
1990 rte_free(item);
1991 return ret;
1992 }
1993
1994 static struct ice_flow_parser ice_fdir_parser_os = {
1995 .engine = &ice_fdir_engine,
1996 .array = ice_fdir_pattern_os,
1997 .array_len = RTE_DIM(ice_fdir_pattern_os),
1998 .parse_pattern_action = ice_fdir_parse,
1999 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2000 };
2001
2002 static struct ice_flow_parser ice_fdir_parser_comms = {
2003 .engine = &ice_fdir_engine,
2004 .array = ice_fdir_pattern_comms,
2005 .array_len = RTE_DIM(ice_fdir_pattern_comms),
2006 .parse_pattern_action = ice_fdir_parse,
2007 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2008 };
2009
2010 RTE_INIT(ice_fdir_engine_register)
2011 {
2012 ice_register_flow_engine(&ice_fdir_engine);
2013 }