]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/ipn3ke/ipn3ke_flow.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / ipn3ke / ipn3ke_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_io.h>
14 #include <rte_debug.h>
15 #include <rte_ether.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_log.h>
18 #include <rte_malloc.h>
19 #include <rte_eth_ctrl.h>
20 #include <rte_tailq.h>
21
22 #include "ipn3ke_rawdev_api.h"
23 #include "ipn3ke_flow.h"
24 #include "ipn3ke_logs.h"
25 #include "ipn3ke_ethdev.h"
26
27 /** Static initializer for items. */
28 #define FLOW_PATTERNS(...) \
29 ((const enum rte_flow_item_type []) { \
30 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
31 })
32
33 enum IPN3KE_HASH_KEY_TYPE {
34 IPN3KE_HASH_KEY_VXLAN,
35 IPN3KE_HASH_KEY_MAC,
36 IPN3KE_HASH_KEY_QINQ,
37 IPN3KE_HASH_KEY_MPLS,
38 IPN3KE_HASH_KEY_IP_TCP,
39 IPN3KE_HASH_KEY_IP_UDP,
40 IPN3KE_HASH_KEY_IP_NVGRE,
41 IPN3KE_HASH_KEY_VXLAN_IP_UDP,
42 };
43
44 struct ipn3ke_flow_parse {
45 uint32_t mark:1; /**< Set if the flow is marked. */
46 uint32_t drop:1; /**< ACL drop. */
47 uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS;
48 uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */
49 uint8_t key_len; /**< Length in bit. */
50 uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)];
51 /**< key1, key2 */
52 };
53
54 typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[],
55 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser);
56
57
58 struct ipn3ke_flow_pattern {
59 const enum rte_flow_item_type *const items;
60
61 pattern_filter_t filter;
62 };
63
64 /*
65 * @ RTL definition:
66 * typedef struct packed {
67 * logic [47:0] vxlan_inner_mac;
68 * logic [23:0] vxlan_vni;
69 * } Hash_Key_Vxlan_t;
70 *
71 * @ flow items:
72 * RTE_FLOW_ITEM_TYPE_VXLAN
73 * RTE_FLOW_ITEM_TYPE_ETH
74 */
75 static int
76 ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],
77 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
78 {
79 const struct rte_flow_item_vxlan *vxlan = NULL;
80 const struct rte_flow_item_eth *eth = NULL;
81 const struct rte_flow_item *item;
82
83 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
84 if (/*!item->spec || item->mask || */item->last) {
85 rte_flow_error_set(error,
86 EINVAL,
87 RTE_FLOW_ERROR_TYPE_ITEM,
88 item,
89 "Only support item with 'spec'");
90 return -rte_errno;
91 }
92
93 switch (item->type) {
94 case RTE_FLOW_ITEM_TYPE_ETH:
95 eth = item->spec;
96
97 rte_memcpy(&parser->key[0],
98 eth->src.addr_bytes,
99 ETHER_ADDR_LEN);
100 break;
101
102 case RTE_FLOW_ITEM_TYPE_VXLAN:
103 vxlan = item->spec;
104
105 rte_memcpy(&parser->key[6], vxlan->vni, 3);
106 break;
107
108 default:
109 rte_flow_error_set(error,
110 EINVAL,
111 RTE_FLOW_ERROR_TYPE_ITEM,
112 item,
113 "Not support item type");
114 return -rte_errno;
115 }
116 }
117
118 if (vxlan != NULL && eth != NULL) {
119 parser->key_len = 48 + 24;
120 return 0;
121 }
122
123 rte_flow_error_set(error,
124 EINVAL,
125 RTE_FLOW_ERROR_TYPE_ITEM,
126 patterns,
127 "Missed some patterns");
128 return -rte_errno;
129 }
130
131 /*
132 * @ RTL definition:
133 * typedef struct packed {
134 * logic [47:0] eth_smac;
135 * } Hash_Key_Mac_t;
136 *
137 * @ flow items:
138 * RTE_FLOW_ITEM_TYPE_ETH
139 */
140 static int
141 ipn3ke_pattern_mac(const struct rte_flow_item patterns[],
142 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
143 {
144 const struct rte_flow_item_eth *eth = NULL;
145 const struct rte_flow_item *item;
146
147 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
148 if (!item->spec || item->mask || item->last) {
149 rte_flow_error_set(error,
150 EINVAL,
151 RTE_FLOW_ERROR_TYPE_ITEM,
152 item,
153 "Only support item with 'spec'");
154 return -rte_errno;
155 }
156
157 switch (item->type) {
158 case RTE_FLOW_ITEM_TYPE_ETH:
159 eth = item->spec;
160
161 rte_memcpy(parser->key,
162 eth->src.addr_bytes,
163 ETHER_ADDR_LEN);
164 break;
165
166 default:
167 rte_flow_error_set(error,
168 EINVAL,
169 RTE_FLOW_ERROR_TYPE_ITEM,
170 item,
171 "Not support item type");
172 return -rte_errno;
173 }
174 }
175
176 if (eth != NULL) {
177 parser->key_len = 48;
178 return 0;
179 }
180
181 rte_flow_error_set(error,
182 EINVAL,
183 RTE_FLOW_ERROR_TYPE_ITEM,
184 patterns,
185 "Missed some patterns");
186 return -rte_errno;
187 }
188
189 /*
190 * @ RTL definition:
191 * typedef struct packed {
192 * logic [11:0] outer_vlan_id;
193 * logic [11:0] inner_vlan_id;
194 * } Hash_Key_QinQ_t;
195 *
196 * @ flow items:
197 * RTE_FLOW_ITEM_TYPE_VLAN
198 * RTE_FLOW_ITEM_TYPE_VLAN
199 */
200 static int
201 ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],
202 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
203 {
204 const struct rte_flow_item_vlan *outer_vlan = NULL;
205 const struct rte_flow_item_vlan *inner_vlan = NULL;
206 const struct rte_flow_item *item;
207 uint16_t tci;
208
209 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
210 if (!item->spec || item->mask || item->last) {
211 rte_flow_error_set(error,
212 EINVAL,
213 RTE_FLOW_ERROR_TYPE_ITEM,
214 item,
215 "Only support item with 'spec'");
216 return -rte_errno;
217 }
218
219 switch (item->type) {
220 case RTE_FLOW_ITEM_TYPE_VLAN:
221 if (!outer_vlan) {
222 outer_vlan = item->spec;
223
224 tci = rte_be_to_cpu_16(outer_vlan->tci);
225 parser->key[0] = (tci & 0xff0) >> 4;
226 parser->key[1] |= (tci & 0x00f) << 4;
227 } else {
228 inner_vlan = item->spec;
229
230 tci = rte_be_to_cpu_16(inner_vlan->tci);
231 parser->key[1] |= (tci & 0xf00) >> 8;
232 parser->key[2] = (tci & 0x0ff);
233 }
234 break;
235
236 default:
237 rte_flow_error_set(error,
238 EINVAL,
239 RTE_FLOW_ERROR_TYPE_ITEM,
240 item,
241 "Not support item type");
242 return -rte_errno;
243 }
244 }
245
246 if (outer_vlan != NULL && inner_vlan != NULL) {
247 parser->key_len = 12 + 12;
248 return 0;
249 }
250
251 rte_flow_error_set(error,
252 EINVAL,
253 RTE_FLOW_ERROR_TYPE_ITEM,
254 patterns,
255 "Missed some patterns");
256 return -rte_errno;
257 }
258
259 /*
260 * @ RTL definition:
261 * typedef struct packed {
262 * logic [19:0] mpls_label1;
263 * logic [19:0] mpls_label2;
264 * } Hash_Key_Mpls_t;
265 *
266 * @ flow items:
267 * RTE_FLOW_ITEM_TYPE_MPLS
268 * RTE_FLOW_ITEM_TYPE_MPLS
269 */
270 static int
271 ipn3ke_pattern_mpls(const struct rte_flow_item patterns[],
272 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
273 {
274 const struct rte_flow_item_mpls *mpls1 = NULL;
275 const struct rte_flow_item_mpls *mpls2 = NULL;
276 const struct rte_flow_item *item;
277
278 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
279 if (!item->spec || item->mask || item->last) {
280 rte_flow_error_set(error,
281 EINVAL,
282 RTE_FLOW_ERROR_TYPE_ITEM,
283 item,
284 "Only support item with 'spec'");
285 return -rte_errno;
286 }
287
288 switch (item->type) {
289 case RTE_FLOW_ITEM_TYPE_MPLS:
290 if (!mpls1) {
291 mpls1 = item->spec;
292
293 parser->key[0] = mpls1->label_tc_s[0];
294 parser->key[1] = mpls1->label_tc_s[1];
295 parser->key[2] = mpls1->label_tc_s[2] & 0xf0;
296 } else {
297 mpls2 = item->spec;
298
299 parser->key[2] |=
300 ((mpls2->label_tc_s[0] & 0xf0) >> 4);
301 parser->key[3] =
302 ((mpls2->label_tc_s[0] & 0xf) << 4) |
303 ((mpls2->label_tc_s[1] & 0xf0) >> 4);
304 parser->key[4] =
305 ((mpls2->label_tc_s[1] & 0xf) << 4) |
306 ((mpls2->label_tc_s[2] & 0xf0) >> 4);
307 }
308 break;
309
310 default:
311 rte_flow_error_set(error,
312 EINVAL,
313 RTE_FLOW_ERROR_TYPE_ITEM,
314 item,
315 "Not support item type");
316 return -rte_errno;
317 }
318 }
319
320 if (mpls1 != NULL && mpls2 != NULL) {
321 parser->key_len = 20 + 20;
322 return 0;
323 }
324
325 rte_flow_error_set(error,
326 EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM,
328 patterns,
329 "Missed some patterns");
330 return -rte_errno;
331 }
332
333 /*
334 * @ RTL definition:
335 * typedef struct packed {
336 * logic [31:0] ip_sa;
337 * logic [15:0] tcp_sport;
338 * } Hash_Key_Ip_Tcp_t;
339 *
340 * @ flow items:
341 * RTE_FLOW_ITEM_TYPE_IPV4
342 * RTE_FLOW_ITEM_TYPE_TCP
343 */
344 static int
345 ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[],
346 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
347 {
348 const struct rte_flow_item_ipv4 *ipv4 = NULL;
349 const struct rte_flow_item_tcp *tcp = NULL;
350 const struct rte_flow_item *item;
351
352 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
353 if (!item->spec || item->mask || item->last) {
354 rte_flow_error_set(error,
355 EINVAL,
356 RTE_FLOW_ERROR_TYPE_ITEM,
357 item,
358 "Only support item with 'spec'");
359 return -rte_errno;
360 }
361
362 switch (item->type) {
363 case RTE_FLOW_ITEM_TYPE_IPV4:
364 ipv4 = item->spec;
365
366 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
367 break;
368
369 case RTE_FLOW_ITEM_TYPE_TCP:
370 tcp = item->spec;
371
372 rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2);
373 break;
374
375 default:
376 rte_flow_error_set(error,
377 EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM,
379 item,
380 "Not support item type");
381 return -rte_errno;
382 }
383 }
384
385 if (ipv4 != NULL && tcp != NULL) {
386 parser->key_len = 32 + 16;
387 return 0;
388 }
389
390 rte_flow_error_set(error,
391 EINVAL,
392 RTE_FLOW_ERROR_TYPE_ITEM,
393 patterns,
394 "Missed some patterns");
395 return -rte_errno;
396 }
397
398 /*
399 * @ RTL definition:
400 * typedef struct packed {
401 * logic [31:0] ip_sa;
402 * logic [15:0] udp_sport;
403 * } Hash_Key_Ip_Udp_t;
404 *
405 * @ flow items:
406 * RTE_FLOW_ITEM_TYPE_IPV4
407 * RTE_FLOW_ITEM_TYPE_UDP
408 */
409 static int
410 ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[],
411 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
412 {
413 const struct rte_flow_item_ipv4 *ipv4 = NULL;
414 const struct rte_flow_item_udp *udp = NULL;
415 const struct rte_flow_item *item;
416
417 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
418 if (!item->spec || item->mask || item->last) {
419 rte_flow_error_set(error,
420 EINVAL,
421 RTE_FLOW_ERROR_TYPE_ITEM,
422 item,
423 "Only support item with 'spec'");
424 return -rte_errno;
425 }
426
427 switch (item->type) {
428 case RTE_FLOW_ITEM_TYPE_IPV4:
429 ipv4 = item->spec;
430
431 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
432 break;
433
434 case RTE_FLOW_ITEM_TYPE_UDP:
435 udp = item->spec;
436
437 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
438 break;
439
440 default:
441 rte_flow_error_set(error,
442 EINVAL,
443 RTE_FLOW_ERROR_TYPE_ITEM,
444 item,
445 "Not support item type");
446 return -rte_errno;
447 }
448 }
449
450 if (ipv4 != NULL && udp != NULL) {
451 parser->key_len = 32 + 16;
452 return 0;
453 }
454
455 rte_flow_error_set(error,
456 EINVAL,
457 RTE_FLOW_ERROR_TYPE_ITEM,
458 patterns,
459 "Missed some patterns");
460 return -rte_errno;
461 }
462
463 /*
464 * @ RTL definition:
465 * typedef struct packed {
466 * logic [31:0] ip_sa;
467 * logic [15:0] udp_sport;
468 * logic [23:0] vsid;
469 * } Hash_Key_Ip_Nvgre_t;
470 *
471 * @ flow items:
472 * RTE_FLOW_ITEM_TYPE_IPV4
473 * RTE_FLOW_ITEM_TYPE_UDP
474 * RTE_FLOW_ITEM_TYPE_NVGRE
475 */
476 static int
477 ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[],
478 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
479 {
480 const struct rte_flow_item_nvgre *nvgre = NULL;
481 const struct rte_flow_item_ipv4 *ipv4 = NULL;
482 const struct rte_flow_item_udp *udp = NULL;
483 const struct rte_flow_item *item;
484
485 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
486 if (!item->spec || item->mask || item->last) {
487 rte_flow_error_set(error,
488 EINVAL,
489 RTE_FLOW_ERROR_TYPE_ITEM,
490 item,
491 "Only support item with 'spec'");
492 return -rte_errno;
493 }
494
495 switch (item->type) {
496 case RTE_FLOW_ITEM_TYPE_IPV4:
497 ipv4 = item->spec;
498
499 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
500 break;
501
502 case RTE_FLOW_ITEM_TYPE_UDP:
503 udp = item->spec;
504
505 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
506 break;
507
508 case RTE_FLOW_ITEM_TYPE_NVGRE:
509 nvgre = item->spec;
510
511 rte_memcpy(&parser->key[6], nvgre->tni, 3);
512 break;
513
514 default:
515 rte_flow_error_set(error,
516 EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM,
518 item,
519 "Not support item type");
520 return -rte_errno;
521 }
522 }
523
524 if (ipv4 != NULL && udp != NULL && nvgre != NULL) {
525 parser->key_len = 32 + 16 + 24;
526 return 0;
527 }
528
529 rte_flow_error_set(error,
530 EINVAL,
531 RTE_FLOW_ERROR_TYPE_ITEM,
532 patterns,
533 "Missed some patterns");
534 return -rte_errno;
535 }
536
537 /*
538 * @ RTL definition:
539 * typedef struct packed{
540 * logic [23:0] vxlan_vni;
541 * logic [31:0] ip_sa;
542 * logic [15:0] udp_sport;
543 * } Hash_Key_Vxlan_Ip_Udp_t;
544 *
545 * @ flow items:
546 * RTE_FLOW_ITEM_TYPE_VXLAN
547 * RTE_FLOW_ITEM_TYPE_IPV4
548 * RTE_FLOW_ITEM_TYPE_UDP
549 */
550 static int
551 ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[],
552 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
553 {
554 const struct rte_flow_item_vxlan *vxlan = NULL;
555 const struct rte_flow_item_ipv4 *ipv4 = NULL;
556 const struct rte_flow_item_udp *udp = NULL;
557 const struct rte_flow_item *item;
558
559 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
560 if (!item->spec || item->mask || item->last) {
561 rte_flow_error_set(error,
562 EINVAL,
563 RTE_FLOW_ERROR_TYPE_ITEM,
564 item,
565 "Only support item with 'spec'");
566 return -rte_errno;
567 }
568
569 switch (item->type) {
570 case RTE_FLOW_ITEM_TYPE_VXLAN:
571 vxlan = item->spec;
572
573 rte_memcpy(&parser->key[0], vxlan->vni, 3);
574 break;
575
576 case RTE_FLOW_ITEM_TYPE_IPV4:
577 ipv4 = item->spec;
578
579 rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4);
580 break;
581
582 case RTE_FLOW_ITEM_TYPE_UDP:
583 udp = item->spec;
584
585 rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2);
586 break;
587
588 default:
589 rte_flow_error_set(error,
590 EINVAL,
591 RTE_FLOW_ERROR_TYPE_ITEM,
592 item,
593 "Not support item type");
594 return -rte_errno;
595 }
596 }
597
598 if (vxlan != NULL && ipv4 != NULL && udp != NULL) {
599 parser->key_len = 24 + 32 + 16;
600 return 0;
601 }
602
603 rte_flow_error_set(error,
604 EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM,
606 patterns,
607 "Missed some patterns");
608 return -rte_errno;
609 }
610
611 static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = {
612 [IPN3KE_HASH_KEY_VXLAN] = {
613 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
614 RTE_FLOW_ITEM_TYPE_ETH),
615 .filter = ipn3ke_pattern_vxlan,
616 },
617
618 [IPN3KE_HASH_KEY_MAC] = {
619 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH),
620 .filter = ipn3ke_pattern_mac,
621 },
622
623 [IPN3KE_HASH_KEY_QINQ] = {
624 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN,
625 RTE_FLOW_ITEM_TYPE_VLAN),
626 .filter = ipn3ke_pattern_qinq,
627 },
628
629 [IPN3KE_HASH_KEY_MPLS] = {
630 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS,
631 RTE_FLOW_ITEM_TYPE_MPLS),
632 .filter = ipn3ke_pattern_mpls,
633 },
634
635 [IPN3KE_HASH_KEY_IP_TCP] = {
636 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
637 RTE_FLOW_ITEM_TYPE_TCP),
638 .filter = ipn3ke_pattern_ip_tcp,
639 },
640
641 [IPN3KE_HASH_KEY_IP_UDP] = {
642 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
643 RTE_FLOW_ITEM_TYPE_UDP),
644 .filter = ipn3ke_pattern_ip_udp,
645 },
646
647 [IPN3KE_HASH_KEY_IP_NVGRE] = {
648 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
649 RTE_FLOW_ITEM_TYPE_UDP,
650 RTE_FLOW_ITEM_TYPE_NVGRE),
651 .filter = ipn3ke_pattern_ip_nvgre,
652 },
653
654 [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = {
655 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
656 RTE_FLOW_ITEM_TYPE_IPV4,
657 RTE_FLOW_ITEM_TYPE_UDP),
658 .filter = ipn3ke_pattern_vxlan_ip_udp,
659 },
660 };
661
662 static int
663 ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr,
664 struct rte_flow_error *error)
665 {
666 if (!attr) {
667 rte_flow_error_set(error,
668 EINVAL,
669 RTE_FLOW_ERROR_TYPE_ATTR,
670 NULL,
671 "NULL attribute.");
672 return -rte_errno;
673 }
674
675 if (attr->group) {
676 rte_flow_error_set(error,
677 ENOTSUP,
678 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
679 NULL,
680 "groups are not supported");
681 return -rte_errno;
682 }
683
684 if (attr->egress) {
685 rte_flow_error_set(error,
686 ENOTSUP,
687 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
688 NULL,
689 "egress is not supported");
690 return -rte_errno;
691 }
692
693 if (attr->transfer) {
694 rte_flow_error_set(error,
695 ENOTSUP,
696 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
697 NULL,
698 "transfer is not supported");
699 return -rte_errno;
700 }
701
702 if (!attr->ingress) {
703 rte_flow_error_set(error,
704 ENOTSUP,
705 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
706 NULL,
707 "only ingress is supported");
708 return -rte_errno;
709 }
710
711 return 0;
712 }
713
714 static int
715 ipn3ke_flow_convert_actions(const struct rte_flow_action actions[],
716 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
717 {
718 const struct rte_flow_action_mark *mark = NULL;
719
720 if (!actions) {
721 rte_flow_error_set(error,
722 EINVAL,
723 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
724 NULL,
725 "NULL action.");
726 return -rte_errno;
727 }
728
729 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
730 switch (actions->type) {
731 case RTE_FLOW_ACTION_TYPE_VOID:
732 break;
733
734 case RTE_FLOW_ACTION_TYPE_MARK:
735 if (mark) {
736 rte_flow_error_set(error,
737 ENOTSUP,
738 RTE_FLOW_ERROR_TYPE_ACTION,
739 actions,
740 "duplicated mark");
741 return -rte_errno;
742 }
743
744 mark = actions->conf;
745 if (!mark) {
746 rte_flow_error_set(error,
747 EINVAL,
748 RTE_FLOW_ERROR_TYPE_ACTION,
749 actions,
750 "mark must be defined");
751 return -rte_errno;
752 } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) {
753 rte_flow_error_set(error,
754 ENOTSUP,
755 RTE_FLOW_ERROR_TYPE_ACTION,
756 actions,
757 "mark id is out of range");
758 return -rte_errno;
759 }
760
761 parser->mark = 1;
762 parser->mark_id = mark->id;
763 break;
764
765 case RTE_FLOW_ACTION_TYPE_DROP:
766 parser->drop = 1;
767 break;
768
769 default:
770 rte_flow_error_set(error,
771 ENOTSUP,
772 RTE_FLOW_ERROR_TYPE_ACTION,
773 actions,
774 "invalid action");
775 return -rte_errno;
776 }
777 }
778
779 if (!parser->drop && !parser->mark) {
780 rte_flow_error_set(error,
781 EINVAL,
782 RTE_FLOW_ERROR_TYPE_ACTION,
783 actions,
784 "no valid actions");
785 return -rte_errno;
786 }
787
788 return 0;
789 }
790
791 static bool
792 ipn3ke_match_pattern(const enum rte_flow_item_type *patterns,
793 const struct rte_flow_item *input)
794 {
795 const struct rte_flow_item *item = input;
796
797 while ((*patterns == item->type) &&
798 (*patterns != RTE_FLOW_ITEM_TYPE_END)) {
799 patterns++;
800 item++;
801 }
802
803 return (*patterns == RTE_FLOW_ITEM_TYPE_END &&
804 item->type == RTE_FLOW_ITEM_TYPE_END);
805 }
806
807 static pattern_filter_t
808 ipn3ke_find_filter_func(const struct rte_flow_item *input,
809 uint32_t *idx)
810 {
811 pattern_filter_t filter = NULL;
812 uint32_t i;
813
814 for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) {
815 if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items,
816 input)) {
817 filter = ipn3ke_supported_patterns[i].filter;
818 *idx = i;
819 break;
820 }
821 }
822
823 return filter;
824 }
825
826 static int
827 ipn3ke_flow_convert_items(const struct rte_flow_item items[],
828 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
829 {
830 pattern_filter_t filter = NULL;
831 uint32_t idx;
832
833 if (!items) {
834 rte_flow_error_set(error,
835 EINVAL,
836 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
837 NULL,
838 "NULL pattern.");
839 return -rte_errno;
840 }
841
842 filter = ipn3ke_find_filter_func(items, &idx);
843
844 if (!filter) {
845 rte_flow_error_set(error,
846 EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM,
848 items,
849 "Unsupported pattern");
850 return -rte_errno;
851 }
852
853 parser->key_type = idx;
854
855 return filter(items, error, parser);
856 }
857
858 /* Put the least @nbits of @data into @offset of @dst bits stream, and
859 * the @offset starts from MSB to LSB in each byte.
860 *
861 * MSB LSB
862 * +------+------+------+------+
863 * | | | | |
864 * +------+------+------+------+
865 * ^ ^
866 * |<- data: nbits ->|
867 * |
868 * offset
869 */
870 static void
871 copy_data_bits(uint8_t *dst, uint64_t data,
872 uint32_t offset, uint8_t nbits)
873 {
874 uint8_t set, *p = &dst[offset / BITS_PER_BYTE];
875 uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE);
876 uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE);
877 uint32_t size = offset + nbits;
878
879 if (nbits > (sizeof(data) * BITS_PER_BYTE)) {
880 IPN3KE_AFU_PMD_ERR("nbits is out of range");
881 return;
882 }
883
884 while (nbits - bits_to_set >= 0) {
885 set = data >> (nbits - bits_to_set);
886
887 *p &= ~mask_to_set;
888 *p |= (set & mask_to_set);
889
890 nbits -= bits_to_set;
891 bits_to_set = BITS_PER_BYTE;
892 mask_to_set = 0xff;
893 p++;
894 }
895
896 if (nbits) {
897 uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE);
898
899 set = data << shift;
900 mask_to_set = 0xff << shift;
901
902 *p &= ~mask_to_set;
903 *p |= (set & mask_to_set);
904 }
905 }
906
907 static void
908 ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser,
909 struct rte_flow *flow)
910 {
911 uint32_t i, shift_bytes, len_in_bytes, offset;
912 uint64_t key;
913 uint8_t *dst;
914
915 dst = flow->rule.key;
916
917 copy_data_bits(dst,
918 parser->key_type,
919 IPN3KE_FLOW_KEY_ID_OFFSET,
920 IPN3KE_FLOW_KEY_ID_BITS);
921
922 /* The MSb of key is filled to 0 when it is less than
923 * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is
924 * save as MSB byte first in the array, it needs to move
925 * the bits before formatting them.
926 */
927 key = 0;
928 shift_bytes = 0;
929 len_in_bytes = BITS_TO_BYTES(parser->key_len);
930 offset = (IPN3KE_FLOW_KEY_DATA_OFFSET +
931 IPN3KE_FLOW_KEY_DATA_BITS -
932 parser->key_len);
933
934 for (i = 0; i < len_in_bytes; i++) {
935 key = (key << 8) | parser->key[i];
936
937 if (++shift_bytes == sizeof(key)) {
938 shift_bytes = 0;
939
940 copy_data_bits(dst, key, offset,
941 sizeof(key) * BITS_PER_BYTE);
942 offset += sizeof(key) * BITS_PER_BYTE;
943 key = 0;
944 }
945 }
946
947 if (shift_bytes != 0) {
948 uint32_t rem_bits;
949
950 rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE);
951 key >>= (shift_bytes * 8 - rem_bits);
952 copy_data_bits(dst, key, offset, rem_bits);
953 }
954 }
955
956 static void
957 ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser,
958 struct rte_flow *flow)
959 {
960 uint8_t *dst;
961
962 if (parser->drop)
963 return;
964
965 dst = flow->rule.result;
966
967 copy_data_bits(dst,
968 1,
969 IPN3KE_FLOW_RESULT_ACL_OFFSET,
970 IPN3KE_FLOW_RESULT_ACL_BITS);
971
972 copy_data_bits(dst,
973 parser->mark_id,
974 IPN3KE_FLOW_RESULT_UID_OFFSET,
975 IPN3KE_FLOW_RESULT_UID_BITS);
976 }
977
978 #define MHL_COMMAND_TIME_COUNT 0xFFFF
979 #define MHL_COMMAND_TIME_INTERVAL_US 10
980
981 static int
982 ipn3ke_flow_hw_update(struct ipn3ke_hw *hw,
983 struct rte_flow *flow, uint32_t is_add)
984 {
985 uint32_t *pdata = NULL;
986 uint32_t data;
987 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
988 uint32_t i;
989
990 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n");
991
992 pdata = (uint32_t *)flow->rule.key;
993 IPN3KE_AFU_PMD_DEBUG(" - key :");
994
995 for (i = 0; i < RTE_DIM(flow->rule.key); i++)
996 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]);
997
998 for (i = 0; i < 4; i++)
999 IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i]));
1000 IPN3KE_AFU_PMD_DEBUG("\n");
1001
1002 pdata = (uint32_t *)flow->rule.result;
1003 IPN3KE_AFU_PMD_DEBUG(" - result:");
1004
1005 for (i = 0; i < RTE_DIM(flow->rule.result); i++)
1006 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]);
1007
1008 for (i = 0; i < 1; i++)
1009 IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]);
1010 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n");
1011
1012 pdata = (uint32_t *)flow->rule.key;
1013
1014 IPN3KE_MASK_WRITE_REG(hw,
1015 IPN3KE_CLF_MHL_KEY_0,
1016 0,
1017 ipn3ke_swap32(pdata[3]),
1018 IPN3KE_CLF_MHL_KEY_MASK);
1019
1020 IPN3KE_MASK_WRITE_REG(hw,
1021 IPN3KE_CLF_MHL_KEY_1,
1022 0,
1023 ipn3ke_swap32(pdata[2]),
1024 IPN3KE_CLF_MHL_KEY_MASK);
1025
1026 IPN3KE_MASK_WRITE_REG(hw,
1027 IPN3KE_CLF_MHL_KEY_2,
1028 0,
1029 ipn3ke_swap32(pdata[1]),
1030 IPN3KE_CLF_MHL_KEY_MASK);
1031
1032 IPN3KE_MASK_WRITE_REG(hw,
1033 IPN3KE_CLF_MHL_KEY_3,
1034 0,
1035 ipn3ke_swap32(pdata[0]),
1036 IPN3KE_CLF_MHL_KEY_MASK);
1037
1038 pdata = (uint32_t *)flow->rule.result;
1039 IPN3KE_MASK_WRITE_REG(hw,
1040 IPN3KE_CLF_MHL_RES,
1041 0,
1042 ipn3ke_swap32(pdata[0]),
1043 IPN3KE_CLF_MHL_RES_MASK);
1044
1045 /* insert/delete the key and result */
1046 data = 0;
1047 data = IPN3KE_MASK_READ_REG(hw,
1048 IPN3KE_CLF_MHL_MGMT_CTRL,
1049 0,
1050 0x80000000);
1051 time_out = MHL_COMMAND_TIME_COUNT;
1052 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1053 (time_out > 0)) {
1054 data = IPN3KE_MASK_READ_REG(hw,
1055 IPN3KE_CLF_MHL_MGMT_CTRL,
1056 0,
1057 0x80000000);
1058 time_out--;
1059 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1060 }
1061 if (!time_out)
1062 return -1;
1063 if (is_add)
1064 IPN3KE_MASK_WRITE_REG(hw,
1065 IPN3KE_CLF_MHL_MGMT_CTRL,
1066 0,
1067 IPN3KE_CLF_MHL_MGMT_CTRL_INSERT,
1068 0x3);
1069 else
1070 IPN3KE_MASK_WRITE_REG(hw,
1071 IPN3KE_CLF_MHL_MGMT_CTRL,
1072 0,
1073 IPN3KE_CLF_MHL_MGMT_CTRL_DELETE,
1074 0x3);
1075
1076 return 0;
1077 }
1078
1079 static int
1080 ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw)
1081 {
1082 uint32_t data;
1083 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
1084
1085 /* flush the MHL lookup table */
1086 data = 0;
1087 data = IPN3KE_MASK_READ_REG(hw,
1088 IPN3KE_CLF_MHL_MGMT_CTRL,
1089 0,
1090 0x80000000);
1091 time_out = MHL_COMMAND_TIME_COUNT;
1092 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1093 (time_out > 0)) {
1094 data = IPN3KE_MASK_READ_REG(hw,
1095 IPN3KE_CLF_MHL_MGMT_CTRL,
1096 0,
1097 0x80000000);
1098 time_out--;
1099 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1100 }
1101 if (!time_out)
1102 return -1;
1103 IPN3KE_MASK_WRITE_REG(hw,
1104 IPN3KE_CLF_MHL_MGMT_CTRL,
1105 0,
1106 IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH,
1107 0x3);
1108
1109 return 0;
1110 }
1111
1112 static void
1113 ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw,
1114 struct ipn3ke_flow_parse *parser, struct rte_flow *flow)
1115 {
1116 ipn3ke_flow_key_generation(parser, flow);
1117 ipn3ke_flow_result_generation(parser, flow);
1118 ipn3ke_flow_hw_update(hw, flow, 1);
1119 }
1120
1121 static int
1122 ipn3ke_flow_convert(const struct rte_flow_attr *attr,
1123 const struct rte_flow_item items[],
1124 const struct rte_flow_action actions[], struct rte_flow_error *error,
1125 struct ipn3ke_flow_parse *parser)
1126 {
1127 int ret;
1128
1129 ret = ipn3ke_flow_convert_attributes(attr, error);
1130 if (ret)
1131 return ret;
1132
1133 ret = ipn3ke_flow_convert_actions(actions, error, parser);
1134 if (ret)
1135 return ret;
1136
1137 ret = ipn3ke_flow_convert_items(items, error, parser);
1138 if (ret)
1139 return ret;
1140
1141 return 0;
1142 }
1143
1144 static int
1145 ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev,
1146 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1147 const struct rte_flow_action actions[], struct rte_flow_error *error)
1148 {
1149 struct ipn3ke_flow_parse parser = {0};
1150 return ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1151 }
1152
1153 static struct rte_flow *
1154 ipn3ke_flow_create(struct rte_eth_dev *dev,
1155 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1156 const struct rte_flow_action actions[], struct rte_flow_error *error)
1157 {
1158 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1159 struct ipn3ke_flow_parse parser = {0};
1160 struct rte_flow *flow;
1161 int ret;
1162
1163 if (hw->flow_num_entries == hw->flow_max_entries) {
1164 rte_flow_error_set(error,
1165 ENOBUFS,
1166 RTE_FLOW_ERROR_TYPE_HANDLE,
1167 NULL,
1168 "The flow table is full.");
1169 return NULL;
1170 }
1171
1172 ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1173 if (ret < 0) {
1174 rte_flow_error_set(error,
1175 -ret,
1176 RTE_FLOW_ERROR_TYPE_HANDLE,
1177 NULL,
1178 "Failed to create flow.");
1179 return NULL;
1180 }
1181
1182 flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0);
1183 if (!flow) {
1184 rte_flow_error_set(error,
1185 ENOMEM,
1186 RTE_FLOW_ERROR_TYPE_HANDLE,
1187 NULL,
1188 "Failed to allocate memory");
1189 return flow;
1190 }
1191
1192 ipn3ke_flow_convert_finalise(hw, &parser, flow);
1193
1194 TAILQ_INSERT_TAIL(&hw->flow_list, flow, next);
1195
1196 return flow;
1197 }
1198
1199 static int
1200 ipn3ke_flow_destroy(struct rte_eth_dev *dev,
1201 struct rte_flow *flow, struct rte_flow_error *error)
1202 {
1203 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1204 int ret = 0;
1205
1206 ret = ipn3ke_flow_hw_update(hw, flow, 0);
1207 if (!ret) {
1208 TAILQ_REMOVE(&hw->flow_list, flow, next);
1209 rte_free(flow);
1210 } else {
1211 rte_flow_error_set(error,
1212 -ret,
1213 RTE_FLOW_ERROR_TYPE_HANDLE,
1214 NULL,
1215 "Failed to destroy flow.");
1216 }
1217
1218 return ret;
1219 }
1220
1221 static int
1222 ipn3ke_flow_flush(struct rte_eth_dev *dev,
1223 __rte_unused struct rte_flow_error *error)
1224 {
1225 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1226 struct rte_flow *flow, *temp;
1227
1228 TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) {
1229 TAILQ_REMOVE(&hw->flow_list, flow, next);
1230 rte_free(flow);
1231 }
1232
1233 return ipn3ke_flow_hw_flush(hw);
1234 }
1235
1236 int ipn3ke_flow_init(void *dev)
1237 {
1238 struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev;
1239 uint32_t data;
1240
1241 /* disable rx classifier bypass */
1242 IPN3KE_MASK_WRITE_REG(hw,
1243 IPN3KE_CLF_RX_TEST,
1244 0, 0, 0x1);
1245
1246 data = 0;
1247 data = IPN3KE_MASK_READ_REG(hw,
1248 IPN3KE_CLF_RX_TEST,
1249 0,
1250 0x1);
1251 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data);
1252
1253 /* configure base mac address */
1254 IPN3KE_MASK_WRITE_REG(hw,
1255 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1256 0,
1257 0x2457,
1258 0xFFFF);
1259
1260 data = 0;
1261 data = IPN3KE_MASK_READ_REG(hw,
1262 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1263 0,
1264 0xFFFF);
1265 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data);
1266
1267 IPN3KE_MASK_WRITE_REG(hw,
1268 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1269 0,
1270 0x9bdf1000,
1271 0xFFFFFFFF);
1272
1273 data = 0;
1274 data = IPN3KE_MASK_READ_REG(hw,
1275 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1276 0,
1277 0xFFFFFFFF);
1278 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data);
1279
1280
1281 /* configure hash lookup rules enable */
1282 IPN3KE_MASK_WRITE_REG(hw,
1283 IPN3KE_CLF_LKUP_ENABLE,
1284 0,
1285 0xFD,
1286 0xFF);
1287
1288 data = 0;
1289 data = IPN3KE_MASK_READ_REG(hw,
1290 IPN3KE_CLF_LKUP_ENABLE,
1291 0,
1292 0xFF);
1293 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
1294
1295
1296 /* configure rx parse config, settings associatied with VxLAN */
1297 IPN3KE_MASK_WRITE_REG(hw,
1298 IPN3KE_CLF_RX_PARSE_CFG,
1299 0,
1300 0x212b5,
1301 0x3FFFF);
1302
1303 data = 0;
1304 data = IPN3KE_MASK_READ_REG(hw,
1305 IPN3KE_CLF_RX_PARSE_CFG,
1306 0,
1307 0x3FFFF);
1308 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data);
1309
1310
1311 /* configure QinQ S-Tag */
1312 IPN3KE_MASK_WRITE_REG(hw,
1313 IPN3KE_CLF_QINQ_STAG,
1314 0,
1315 0x88a8,
1316 0xFFFF);
1317
1318 data = 0;
1319 data = IPN3KE_MASK_READ_REG(hw,
1320 IPN3KE_CLF_QINQ_STAG,
1321 0,
1322 0xFFFF);
1323 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data);
1324
1325
1326 /* configure gen ctrl */
1327 IPN3KE_MASK_WRITE_REG(hw,
1328 IPN3KE_CLF_MHL_GEN_CTRL,
1329 0,
1330 0x3,
1331 0x3);
1332
1333 data = 0;
1334 data = IPN3KE_MASK_READ_REG(hw,
1335 IPN3KE_CLF_MHL_GEN_CTRL,
1336 0,
1337 0x1F);
1338 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data);
1339
1340
1341 /* clear monitoring register */
1342 IPN3KE_MASK_WRITE_REG(hw,
1343 IPN3KE_CLF_MHL_MON_0,
1344 0,
1345 0xFFFFFFFF,
1346 0xFFFFFFFF);
1347
1348 data = 0;
1349 data = IPN3KE_MASK_READ_REG(hw,
1350 IPN3KE_CLF_MHL_MON_0,
1351 0,
1352 0xFFFFFFFF);
1353 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data);
1354
1355
1356 ipn3ke_flow_hw_flush(hw);
1357
1358 TAILQ_INIT(&hw->flow_list);
1359 hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw,
1360 IPN3KE_CLF_EM_NUM,
1361 0,
1362 0xFFFFFFFF);
1363 hw->flow_num_entries = 0;
1364
1365 return 0;
1366 }
1367
1368 const struct rte_flow_ops ipn3ke_flow_ops = {
1369 .validate = ipn3ke_flow_validate,
1370 .create = ipn3ke_flow_create,
1371 .destroy = ipn3ke_flow_destroy,
1372 .flush = ipn3ke_flow_flush,
1373 };
1374