]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/examples/ipsec-secgw/sa.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / examples / ipsec-secgw / sa.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
7c673cae
FG
3 */
4
5/*
6 * Security Associations
7 */
8#include <sys/types.h>
9#include <netinet/in.h>
10#include <netinet/ip.h>
11#include <netinet/ip6.h>
12
13#include <rte_memzone.h>
14#include <rte_crypto.h>
9f95a23c 15#include <rte_security.h>
7c673cae
FG
16#include <rte_cryptodev.h>
17#include <rte_byteorder.h>
18#include <rte_errno.h>
19#include <rte_ip.h>
20#include <rte_random.h>
9f95a23c
TL
21#include <rte_ethdev.h>
22#include <rte_malloc.h>
7c673cae
FG
23
24#include "ipsec.h"
25#include "esp.h"
26#include "parser.h"
27
9f95a23c
TL
28#define IPDEFTTL 64
29
7c673cae
FG
30struct supported_cipher_algo {
31 const char *keyword;
32 enum rte_crypto_cipher_algorithm algo;
33 uint16_t iv_len;
34 uint16_t block_size;
35 uint16_t key_len;
36};
37
38struct supported_auth_algo {
39 const char *keyword;
40 enum rte_crypto_auth_algorithm algo;
41 uint16_t digest_len;
42 uint16_t key_len;
7c673cae
FG
43 uint8_t key_not_req;
44};
45
9f95a23c
TL
46struct supported_aead_algo {
47 const char *keyword;
48 enum rte_crypto_aead_algorithm algo;
49 uint16_t iv_len;
50 uint16_t block_size;
51 uint16_t digest_len;
52 uint16_t key_len;
53 uint8_t aad_len;
54};
55
56
7c673cae
FG
57const struct supported_cipher_algo cipher_algos[] = {
58 {
59 .keyword = "null",
60 .algo = RTE_CRYPTO_CIPHER_NULL,
61 .iv_len = 0,
62 .block_size = 4,
63 .key_len = 0
64 },
65 {
66 .keyword = "aes-128-cbc",
67 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
68 .iv_len = 16,
69 .block_size = 16,
70 .key_len = 16
71 },
72 {
9f95a23c
TL
73 .keyword = "aes-256-cbc",
74 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
75 .iv_len = 16,
76 .block_size = 16,
77 .key_len = 32
7c673cae
FG
78 },
79 {
80 .keyword = "aes-128-ctr",
81 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
82 .iv_len = 8,
9f95a23c 83 .block_size = 4,
7c673cae 84 .key_len = 20
9f95a23c
TL
85 },
86 {
87 .keyword = "3des-cbc",
88 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
89 .iv_len = 8,
90 .block_size = 8,
91 .key_len = 24
7c673cae
FG
92 }
93};
94
95const struct supported_auth_algo auth_algos[] = {
96 {
97 .keyword = "null",
98 .algo = RTE_CRYPTO_AUTH_NULL,
99 .digest_len = 0,
100 .key_len = 0,
101 .key_not_req = 1
102 },
103 {
104 .keyword = "sha1-hmac",
105 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
106 .digest_len = 12,
107 .key_len = 20
108 },
11fdf7f2
TL
109 {
110 .keyword = "sha256-hmac",
111 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
112 .digest_len = 12,
113 .key_len = 32
9f95a23c
TL
114 }
115};
116
117const struct supported_aead_algo aead_algos[] = {
7c673cae
FG
118 {
119 .keyword = "aes-128-gcm",
9f95a23c
TL
120 .algo = RTE_CRYPTO_AEAD_AES_GCM,
121 .iv_len = 8,
122 .block_size = 4,
123 .key_len = 20,
7c673cae
FG
124 .digest_len = 16,
125 .aad_len = 8,
7c673cae
FG
126 }
127};
128
9f95a23c
TL
129static struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
130static uint32_t nb_sa_out;
7c673cae 131
9f95a23c
TL
132static struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
133static uint32_t nb_sa_in;
7c673cae
FG
134
135static const struct supported_cipher_algo *
136find_match_cipher_algo(const char *cipher_keyword)
137{
138 size_t i;
139
140 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
141 const struct supported_cipher_algo *algo =
142 &cipher_algos[i];
143
144 if (strcmp(cipher_keyword, algo->keyword) == 0)
145 return algo;
146 }
147
148 return NULL;
149}
150
151static const struct supported_auth_algo *
152find_match_auth_algo(const char *auth_keyword)
153{
154 size_t i;
155
156 for (i = 0; i < RTE_DIM(auth_algos); i++) {
157 const struct supported_auth_algo *algo =
158 &auth_algos[i];
159
160 if (strcmp(auth_keyword, algo->keyword) == 0)
161 return algo;
162 }
163
164 return NULL;
165}
166
9f95a23c
TL
167static const struct supported_aead_algo *
168find_match_aead_algo(const char *aead_keyword)
169{
170 size_t i;
171
172 for (i = 0; i < RTE_DIM(aead_algos); i++) {
173 const struct supported_aead_algo *algo =
174 &aead_algos[i];
175
176 if (strcmp(aead_keyword, algo->keyword) == 0)
177 return algo;
178 }
179
180 return NULL;
181}
182
7c673cae
FG
183/** parse_key_string
184 * parse x:x:x:x.... hex number key string into uint8_t *key
185 * return:
186 * > 0: number of bytes parsed
187 * 0: failed
188 */
189static uint32_t
190parse_key_string(const char *key_str, uint8_t *key)
191{
192 const char *pt_start = key_str, *pt_end = key_str;
193 uint32_t nb_bytes = 0;
194
195 while (pt_end != NULL) {
196 char sub_str[3] = {0};
197
198 pt_end = strchr(pt_start, ':');
199
200 if (pt_end == NULL) {
201 if (strlen(pt_start) > 2)
202 return 0;
203 strncpy(sub_str, pt_start, 2);
204 } else {
205 if (pt_end - pt_start > 2)
206 return 0;
207
208 strncpy(sub_str, pt_start, pt_end - pt_start);
209 pt_start = pt_end + 1;
210 }
211
212 key[nb_bytes++] = strtol(sub_str, NULL, 16);
213 }
214
215 return nb_bytes;
216}
217
218void
219parse_sa_tokens(char **tokens, uint32_t n_tokens,
220 struct parse_status *status)
221{
222 struct ipsec_sa *rule = NULL;
223 uint32_t ti; /*token index*/
224 uint32_t *ri /*rule index*/;
225 uint32_t cipher_algo_p = 0;
226 uint32_t auth_algo_p = 0;
9f95a23c 227 uint32_t aead_algo_p = 0;
7c673cae
FG
228 uint32_t src_p = 0;
229 uint32_t dst_p = 0;
230 uint32_t mode_p = 0;
9f95a23c
TL
231 uint32_t type_p = 0;
232 uint32_t portid_p = 0;
7c673cae
FG
233
234 if (strcmp(tokens[0], "in") == 0) {
235 ri = &nb_sa_in;
236
237 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
238 "too many sa rules, abort insertion\n");
239 if (status->status < 0)
240 return;
241
242 rule = &sa_in[*ri];
243 } else {
244 ri = &nb_sa_out;
245
246 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
247 "too many sa rules, abort insertion\n");
248 if (status->status < 0)
249 return;
250
251 rule = &sa_out[*ri];
252 }
253
254 /* spi number */
255 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
256 if (status->status < 0)
257 return;
9f95a23c
TL
258 if (atoi(tokens[1]) == INVALID_SPI)
259 return;
7c673cae
FG
260 rule->spi = atoi(tokens[1]);
261
262 for (ti = 2; ti < n_tokens; ti++) {
263 if (strcmp(tokens[ti], "mode") == 0) {
264 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
265 if (status->status < 0)
266 return;
267
268 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
269 if (status->status < 0)
270 return;
271
272 if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
273 rule->flags = IP4_TUNNEL;
274 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
275 rule->flags = IP6_TUNNEL;
276 else if (strcmp(tokens[ti], "transport") == 0)
277 rule->flags = TRANSPORT;
278 else {
279 APP_CHECK(0, status, "unrecognized "
280 "input \"%s\"", tokens[ti]);
281 return;
282 }
283
284 mode_p = 1;
285 continue;
286 }
287
288 if (strcmp(tokens[ti], "cipher_algo") == 0) {
289 const struct supported_cipher_algo *algo;
290 uint32_t key_len;
291
292 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
293 status);
294 if (status->status < 0)
295 return;
296
297 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
298 if (status->status < 0)
299 return;
300
301 algo = find_match_cipher_algo(tokens[ti]);
302
303 APP_CHECK(algo != NULL, status, "unrecognized "
304 "input \"%s\"", tokens[ti]);
305
306 rule->cipher_algo = algo->algo;
307 rule->block_size = algo->block_size;
308 rule->iv_len = algo->iv_len;
309 rule->cipher_key_len = algo->key_len;
310
311 /* for NULL algorithm, no cipher key required */
312 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
313 cipher_algo_p = 1;
314 continue;
315 }
316
317 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
318 if (status->status < 0)
319 return;
320
321 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
322 status, "unrecognized input \"%s\", "
323 "expect \"cipher_key\"", tokens[ti]);
324 if (status->status < 0)
325 return;
326
327 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
328 if (status->status < 0)
329 return;
330
331 key_len = parse_key_string(tokens[ti],
332 rule->cipher_key);
333 APP_CHECK(key_len == rule->cipher_key_len, status,
334 "unrecognized input \"%s\"", tokens[ti]);
335 if (status->status < 0)
336 return;
337
9f95a23c
TL
338 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC ||
339 algo->algo == RTE_CRYPTO_CIPHER_3DES_CBC)
7c673cae
FG
340 rule->salt = (uint32_t)rte_rand();
341
9f95a23c 342 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
7c673cae
FG
343 key_len -= 4;
344 rule->cipher_key_len = key_len;
345 memcpy(&rule->salt,
346 &rule->cipher_key[key_len], 4);
347 }
348
349 cipher_algo_p = 1;
350 continue;
351 }
352
353 if (strcmp(tokens[ti], "auth_algo") == 0) {
354 const struct supported_auth_algo *algo;
355 uint32_t key_len;
356
357 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
358 status);
359 if (status->status < 0)
360 return;
361
362 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
363 if (status->status < 0)
364 return;
365
366 algo = find_match_auth_algo(tokens[ti]);
367 APP_CHECK(algo != NULL, status, "unrecognized "
368 "input \"%s\"", tokens[ti]);
369
370 rule->auth_algo = algo->algo;
371 rule->auth_key_len = algo->key_len;
372 rule->digest_len = algo->digest_len;
7c673cae
FG
373
374 /* NULL algorithm and combined algos do not
375 * require auth key
376 */
377 if (algo->key_not_req) {
378 auth_algo_p = 1;
379 continue;
380 }
381
382 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
383 if (status->status < 0)
384 return;
385
386 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
387 status, "unrecognized input \"%s\", "
388 "expect \"auth_key\"", tokens[ti]);
389 if (status->status < 0)
390 return;
391
392 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
393 if (status->status < 0)
394 return;
395
396 key_len = parse_key_string(tokens[ti],
397 rule->auth_key);
398 APP_CHECK(key_len == rule->auth_key_len, status,
399 "unrecognized input \"%s\"", tokens[ti]);
400 if (status->status < 0)
401 return;
402
403 auth_algo_p = 1;
404 continue;
405 }
406
9f95a23c
TL
407 if (strcmp(tokens[ti], "aead_algo") == 0) {
408 const struct supported_aead_algo *algo;
409 uint32_t key_len;
410
411 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
412 status);
413 if (status->status < 0)
414 return;
415
416 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
417 if (status->status < 0)
418 return;
419
420 algo = find_match_aead_algo(tokens[ti]);
421
422 APP_CHECK(algo != NULL, status, "unrecognized "
423 "input \"%s\"", tokens[ti]);
424
425 rule->aead_algo = algo->algo;
426 rule->cipher_key_len = algo->key_len;
427 rule->digest_len = algo->digest_len;
428 rule->aad_len = algo->aad_len;
429 rule->block_size = algo->block_size;
430 rule->iv_len = algo->iv_len;
431
432 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
433 if (status->status < 0)
434 return;
435
436 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
437 status, "unrecognized input \"%s\", "
438 "expect \"aead_key\"", tokens[ti]);
439 if (status->status < 0)
440 return;
441
442 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
443 if (status->status < 0)
444 return;
445
446 key_len = parse_key_string(tokens[ti],
447 rule->cipher_key);
448 APP_CHECK(key_len == rule->cipher_key_len, status,
449 "unrecognized input \"%s\"", tokens[ti]);
450 if (status->status < 0)
451 return;
452
453 key_len -= 4;
454 rule->cipher_key_len = key_len;
455 memcpy(&rule->salt,
456 &rule->cipher_key[key_len], 4);
457
458 aead_algo_p = 1;
459 continue;
460 }
461
7c673cae
FG
462 if (strcmp(tokens[ti], "src") == 0) {
463 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
464 if (status->status < 0)
465 return;
466
467 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
468 if (status->status < 0)
469 return;
470
471 if (rule->flags == IP4_TUNNEL) {
472 struct in_addr ip;
473
474 APP_CHECK(parse_ipv4_addr(tokens[ti],
475 &ip, NULL) == 0, status,
476 "unrecognized input \"%s\", "
477 "expect valid ipv4 addr",
478 tokens[ti]);
479 if (status->status < 0)
480 return;
481 rule->src.ip.ip4 = rte_bswap32(
482 (uint32_t)ip.s_addr);
483 } else if (rule->flags == IP6_TUNNEL) {
484 struct in6_addr ip;
485
486 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
487 NULL) == 0, status,
488 "unrecognized input \"%s\", "
489 "expect valid ipv6 addr",
490 tokens[ti]);
491 if (status->status < 0)
492 return;
493 memcpy(rule->src.ip.ip6.ip6_b,
494 ip.s6_addr, 16);
495 } else if (rule->flags == TRANSPORT) {
496 APP_CHECK(0, status, "unrecognized input "
497 "\"%s\"", tokens[ti]);
498 return;
499 }
500
501 src_p = 1;
502 continue;
503 }
504
505 if (strcmp(tokens[ti], "dst") == 0) {
506 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
507 if (status->status < 0)
508 return;
509
510 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
511 if (status->status < 0)
512 return;
513
514 if (rule->flags == IP4_TUNNEL) {
515 struct in_addr ip;
516
517 APP_CHECK(parse_ipv4_addr(tokens[ti],
518 &ip, NULL) == 0, status,
519 "unrecognized input \"%s\", "
520 "expect valid ipv4 addr",
521 tokens[ti]);
522 if (status->status < 0)
523 return;
524 rule->dst.ip.ip4 = rte_bswap32(
525 (uint32_t)ip.s_addr);
526 } else if (rule->flags == IP6_TUNNEL) {
527 struct in6_addr ip;
528
529 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
530 NULL) == 0, status,
531 "unrecognized input \"%s\", "
532 "expect valid ipv6 addr",
533 tokens[ti]);
534 if (status->status < 0)
535 return;
536 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
537 } else if (rule->flags == TRANSPORT) {
538 APP_CHECK(0, status, "unrecognized "
539 "input \"%s\"", tokens[ti]);
540 return;
541 }
542
543 dst_p = 1;
544 continue;
545 }
546
9f95a23c
TL
547 if (strcmp(tokens[ti], "type") == 0) {
548 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
549 if (status->status < 0)
550 return;
551
552 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
553 if (status->status < 0)
554 return;
555
556 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
557 rule->type =
558 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
559 else if (strcmp(tokens[ti],
560 "inline-protocol-offload") == 0)
561 rule->type =
562 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
563 else if (strcmp(tokens[ti],
564 "lookaside-protocol-offload") == 0)
565 rule->type =
566 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
567 else if (strcmp(tokens[ti], "no-offload") == 0)
568 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
569 else {
570 APP_CHECK(0, status, "Invalid input \"%s\"",
571 tokens[ti]);
572 return;
573 }
574
575 type_p = 1;
576 continue;
577 }
578
579 if (strcmp(tokens[ti], "port_id") == 0) {
580 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
581 if (status->status < 0)
582 return;
583 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
584 if (status->status < 0)
585 return;
586 rule->portid = atoi(tokens[ti]);
587 if (status->status < 0)
588 return;
589 portid_p = 1;
590 continue;
591 }
592
7c673cae
FG
593 /* unrecognizeable input */
594 APP_CHECK(0, status, "unrecognized input \"%s\"",
595 tokens[ti]);
596 return;
597 }
598
9f95a23c
TL
599 if (aead_algo_p) {
600 APP_CHECK(cipher_algo_p == 0, status,
601 "AEAD used, no need for cipher options");
602 if (status->status < 0)
603 return;
7c673cae 604
9f95a23c
TL
605 APP_CHECK(auth_algo_p == 0, status,
606 "AEAD used, no need for auth options");
607 if (status->status < 0)
608 return;
609 } else {
610 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
611 if (status->status < 0)
612 return;
613
614 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
615 if (status->status < 0)
616 return;
617 }
7c673cae
FG
618
619 APP_CHECK(mode_p == 1, status, "missing mode option");
620 if (status->status < 0)
621 return;
622
9f95a23c
TL
623 if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
624 printf("Missing portid option, falling back to non-offload\n");
625
626 if (!type_p || !portid_p) {
627 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
628 rule->portid = -1;
629 }
630
7c673cae
FG
631 *ri = *ri + 1;
632}
633
9f95a23c 634static void
7c673cae
FG
635print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
636{
637 uint32_t i;
638 uint8_t a, b, c, d;
639
640 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
641
642 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
9f95a23c
TL
643 if (cipher_algos[i].algo == sa->cipher_algo &&
644 cipher_algos[i].key_len == sa->cipher_key_len) {
7c673cae
FG
645 printf("%s ", cipher_algos[i].keyword);
646 break;
647 }
648 }
649
650 for (i = 0; i < RTE_DIM(auth_algos); i++) {
651 if (auth_algos[i].algo == sa->auth_algo) {
652 printf("%s ", auth_algos[i].keyword);
653 break;
654 }
655 }
656
9f95a23c
TL
657 for (i = 0; i < RTE_DIM(aead_algos); i++) {
658 if (aead_algos[i].algo == sa->aead_algo) {
659 printf("%s ", aead_algos[i].keyword);
660 break;
661 }
662 }
663
7c673cae
FG
664 printf("mode:");
665
666 switch (sa->flags) {
667 case IP4_TUNNEL:
668 printf("IP4Tunnel ");
669 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
670 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
671 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
672 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
673 break;
674 case IP6_TUNNEL:
675 printf("IP6Tunnel ");
676 for (i = 0; i < 16; i++) {
677 if (i % 2 && i != 15)
678 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
679 else
680 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
681 }
682 printf(" ");
683 for (i = 0; i < 16; i++) {
684 if (i % 2 && i != 15)
685 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
686 else
687 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
688 }
689 break;
690 case TRANSPORT:
9f95a23c
TL
691 printf("Transport ");
692 break;
693 }
694 printf(" type:");
695 switch (sa->type) {
696 case RTE_SECURITY_ACTION_TYPE_NONE:
697 printf("no-offload ");
698 break;
699 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
700 printf("inline-crypto-offload ");
701 break;
702 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
703 printf("inline-protocol-offload ");
704 break;
705 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
706 printf("lookaside-protocol-offload ");
7c673cae
FG
707 break;
708 }
709 printf("\n");
710}
711
712struct sa_ctx {
9f95a23c 713 void *satbl; /* pointer to array of rte_ipsec_sa objects*/
7c673cae 714 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
9f95a23c
TL
715 union {
716 struct {
717 struct rte_crypto_sym_xform a;
718 struct rte_crypto_sym_xform b;
719 };
7c673cae
FG
720 } xf[IPSEC_SA_MAX_ENTRIES];
721};
722
723static struct sa_ctx *
724sa_create(const char *name, int32_t socket_id)
725{
726 char s[PATH_MAX];
727 struct sa_ctx *sa_ctx;
728 uint32_t mz_size;
729 const struct rte_memzone *mz;
730
731 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
732
733 /* Create SA array table */
9f95a23c
TL
734 printf("Creating SA context with %u maximum entries on socket %d\n",
735 IPSEC_SA_MAX_ENTRIES, socket_id);
7c673cae
FG
736
737 mz_size = sizeof(struct sa_ctx);
738 mz = rte_memzone_reserve(s, mz_size, socket_id,
739 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
740 if (mz == NULL) {
741 printf("Failed to allocate SA DB memory\n");
742 rte_errno = -ENOMEM;
743 return NULL;
744 }
745
746 sa_ctx = (struct sa_ctx *)mz->addr;
747
748 return sa_ctx;
749}
750
9f95a23c
TL
751static int
752check_eth_dev_caps(uint16_t portid, uint32_t inbound)
753{
754 struct rte_eth_dev_info dev_info;
755
756 rte_eth_dev_info_get(portid, &dev_info);
757
758 if (inbound) {
759 if ((dev_info.rx_offload_capa &
760 DEV_RX_OFFLOAD_SECURITY) == 0) {
761 RTE_LOG(WARNING, PORT,
762 "hardware RX IPSec offload is not supported\n");
763 return -EINVAL;
764 }
765
766 } else { /* outbound */
767 if ((dev_info.tx_offload_capa &
768 DEV_TX_OFFLOAD_SECURITY) == 0) {
769 RTE_LOG(WARNING, PORT,
770 "hardware TX IPSec offload is not supported\n");
771 return -EINVAL;
772 }
773 }
774 return 0;
775}
776
777
7c673cae
FG
778static int
779sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
780 uint32_t nb_entries, uint32_t inbound)
781{
782 struct ipsec_sa *sa;
783 uint32_t i, idx;
9f95a23c
TL
784 uint16_t iv_length, aad_length;
785
786 /* for ESN upper 32 bits of SQN also need to be part of AAD */
787 aad_length = (app_sa_prm.enable_esn != 0) ? sizeof(uint32_t) : 0;
7c673cae
FG
788
789 for (i = 0; i < nb_entries; i++) {
790 idx = SPI2IDX(entries[i].spi);
791 sa = &sa_ctx->sa[idx];
792 if (sa->spi != 0) {
793 printf("Index %u already in use by SPI %u\n",
794 idx, sa->spi);
795 return -EINVAL;
796 }
797 *sa = entries[i];
798 sa->seq = 0;
799
9f95a23c
TL
800 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
801 sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
802 if (check_eth_dev_caps(sa->portid, inbound))
803 return -EINVAL;
804 }
805
806 sa->direction = (inbound == 1) ?
807 RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
808 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
809
7c673cae
FG
810 switch (sa->flags) {
811 case IP4_TUNNEL:
812 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
813 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
814 }
815
9f95a23c
TL
816 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
817 iv_length = 16;
7c673cae 818
9f95a23c
TL
819 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
820 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
821 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
822 sa_ctx->xf[idx].a.aead.key.length =
7c673cae 823 sa->cipher_key_len;
9f95a23c
TL
824 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
825 RTE_CRYPTO_AEAD_OP_DECRYPT :
826 RTE_CRYPTO_AEAD_OP_ENCRYPT;
7c673cae 827 sa_ctx->xf[idx].a.next = NULL;
9f95a23c
TL
828 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
829 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
830 sa_ctx->xf[idx].a.aead.aad_length =
831 sa->aad_len + aad_length;
832 sa_ctx->xf[idx].a.aead.digest_length =
7c673cae 833 sa->digest_len;
7c673cae 834
9f95a23c 835 sa->xforms = &sa_ctx->xf[idx].a;
7c673cae 836
9f95a23c
TL
837 print_one_sa_rule(sa, inbound);
838 } else {
839 switch (sa->cipher_algo) {
840 case RTE_CRYPTO_CIPHER_NULL:
841 case RTE_CRYPTO_CIPHER_3DES_CBC:
842 case RTE_CRYPTO_CIPHER_AES_CBC:
843 iv_length = sa->iv_len;
844 break;
845 case RTE_CRYPTO_CIPHER_AES_CTR:
846 iv_length = 16;
847 break;
848 default:
849 RTE_LOG(ERR, IPSEC_ESP,
850 "unsupported cipher algorithm %u\n",
851 sa->cipher_algo);
852 return -EINVAL;
853 }
854
855 if (inbound) {
856 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
857 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
858 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
859 sa_ctx->xf[idx].b.cipher.key.length =
860 sa->cipher_key_len;
861 sa_ctx->xf[idx].b.cipher.op =
862 RTE_CRYPTO_CIPHER_OP_DECRYPT;
863 sa_ctx->xf[idx].b.next = NULL;
864 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
865 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
866
867 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
868 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
869 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
870 sa_ctx->xf[idx].a.auth.key.length =
871 sa->auth_key_len;
872 sa_ctx->xf[idx].a.auth.digest_length =
873 sa->digest_len;
874 sa_ctx->xf[idx].a.auth.op =
875 RTE_CRYPTO_AUTH_OP_VERIFY;
876 } else { /* outbound */
877 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
878 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
879 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
880 sa_ctx->xf[idx].a.cipher.key.length =
881 sa->cipher_key_len;
882 sa_ctx->xf[idx].a.cipher.op =
883 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
884 sa_ctx->xf[idx].a.next = NULL;
885 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
886 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
887
888 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
889 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
890 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
891 sa_ctx->xf[idx].b.auth.key.length =
892 sa->auth_key_len;
893 sa_ctx->xf[idx].b.auth.digest_length =
894 sa->digest_len;
895 sa_ctx->xf[idx].b.auth.op =
896 RTE_CRYPTO_AUTH_OP_GENERATE;
897 }
898
899 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
900 sa_ctx->xf[idx].b.next = NULL;
901 sa->xforms = &sa_ctx->xf[idx].a;
902
903 print_one_sa_rule(sa, inbound);
904 }
7c673cae
FG
905 }
906
907 return 0;
908}
909
910static inline int
911sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
912 uint32_t nb_entries)
913{
914 return sa_add_rules(sa_ctx, entries, nb_entries, 0);
915}
916
917static inline int
918sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
919 uint32_t nb_entries)
920{
921 return sa_add_rules(sa_ctx, entries, nb_entries, 1);
922}
923
9f95a23c
TL
924/*
925 * helper function, fills parameters that are identical for all SAs
926 */
927static void
928fill_ipsec_app_sa_prm(struct rte_ipsec_sa_prm *prm,
929 const struct app_sa_prm *app_prm)
930{
931 memset(prm, 0, sizeof(*prm));
932
933 prm->flags = app_prm->flags;
934 prm->ipsec_xform.options.esn = app_prm->enable_esn;
935 prm->replay_win_sz = app_prm->window_size;
936}
937
938/*
939 * Helper function, tries to determine next_proto for SPI
940 * by searching though SP rules.
941 */
942static int
943get_spi_proto(uint32_t spi, enum rte_security_ipsec_sa_direction dir)
944{
945 int32_t rc4, rc6;
946
947 rc4 = sp4_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
948 rc6 = sp6_spi_present(spi, dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
949
950 if (rc4 >= 0) {
951 if (rc6 >= 0) {
952 RTE_LOG(ERR, IPSEC,
953 "%s: SPI %u used simultaeously by "
954 "IPv4(%d) and IPv6 (%d) SP rules\n",
955 __func__, spi, rc4, rc6);
956 return -EINVAL;
957 } else
958 return IPPROTO_IPIP;
959 } else if (rc6 < 0) {
960 RTE_LOG(ERR, IPSEC,
961 "%s: SPI %u is not used by any SP rule\n",
962 __func__, spi);
963 return -EINVAL;
964 } else
965 return IPPROTO_IPV6;
966}
967
968static int
969fill_ipsec_sa_prm(struct rte_ipsec_sa_prm *prm, const struct ipsec_sa *ss,
970 const struct ipv4_hdr *v4, struct ipv6_hdr *v6)
971{
972 int32_t rc;
973
974 /*
975 * Try to get SPI next proto by searching that SPI in SPD.
976 * probably not the optimal way, but there seems nothing
977 * better right now.
978 */
979 rc = get_spi_proto(ss->spi, ss->direction);
980 if (rc < 0)
981 return rc;
982
983 fill_ipsec_app_sa_prm(prm, &app_sa_prm);
984 prm->userdata = (uintptr_t)ss;
985
986 /* setup ipsec xform */
987 prm->ipsec_xform.spi = ss->spi;
988 prm->ipsec_xform.salt = ss->salt;
989 prm->ipsec_xform.direction = ss->direction;
990 prm->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP;
991 prm->ipsec_xform.mode = (ss->flags == TRANSPORT) ?
992 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT :
993 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
994
995 if (ss->flags == IP4_TUNNEL) {
996 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4;
997 prm->tun.hdr_len = sizeof(*v4);
998 prm->tun.next_proto = rc;
999 prm->tun.hdr = v4;
1000 } else if (ss->flags == IP6_TUNNEL) {
1001 prm->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV6;
1002 prm->tun.hdr_len = sizeof(*v6);
1003 prm->tun.next_proto = rc;
1004 prm->tun.hdr = v6;
1005 } else {
1006 /* transport mode */
1007 prm->trs.proto = rc;
1008 }
1009
1010 /* setup crypto section */
1011 prm->crypto_xform = ss->xforms;
1012 return 0;
1013}
1014
1015static void
1016fill_ipsec_session(struct rte_ipsec_session *ss, struct rte_ipsec_sa *sa,
1017 const struct ipsec_sa *lsa)
1018{
1019 ss->sa = sa;
1020 ss->type = lsa->type;
1021
1022 /* setup crypto section */
1023 if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
1024 ss->crypto.ses = lsa->crypto_session;
1025 /* setup session action type */
1026 } else {
1027 ss->security.ses = lsa->sec_session;
1028 ss->security.ctx = lsa->security_ctx;
1029 ss->security.ol_flags = lsa->ol_flags;
1030 }
1031}
1032
1033/*
1034 * Initialise related rte_ipsec_sa object.
1035 */
1036static int
1037ipsec_sa_init(struct ipsec_sa *lsa, struct rte_ipsec_sa *sa, uint32_t sa_size)
1038{
1039 int rc;
1040 struct rte_ipsec_sa_prm prm;
1041 struct ipv4_hdr v4 = {
1042 .version_ihl = IPVERSION << 4 |
1043 sizeof(v4) / IPV4_IHL_MULTIPLIER,
1044 .time_to_live = IPDEFTTL,
1045 .next_proto_id = IPPROTO_ESP,
1046 .src_addr = lsa->src.ip.ip4,
1047 .dst_addr = lsa->dst.ip.ip4,
1048 };
1049 struct ipv6_hdr v6 = {
1050 .vtc_flow = htonl(IP6_VERSION << 28),
1051 .proto = IPPROTO_ESP,
1052 };
1053
1054 if (lsa->flags == IP6_TUNNEL) {
1055 memcpy(v6.src_addr, lsa->src.ip.ip6.ip6_b, sizeof(v6.src_addr));
1056 memcpy(v6.dst_addr, lsa->dst.ip.ip6.ip6_b, sizeof(v6.dst_addr));
1057 }
1058
1059 rc = fill_ipsec_sa_prm(&prm, lsa, &v4, &v6);
1060 if (rc == 0)
1061 rc = rte_ipsec_sa_init(sa, &prm, sa_size);
1062 if (rc < 0)
1063 return rc;
1064
1065 fill_ipsec_session(&lsa->ips, sa, lsa);
1066 return 0;
1067}
1068
1069/*
1070 * Allocate space and init rte_ipsec_sa strcutures,
1071 * one per session.
1072 */
1073static int
1074ipsec_satbl_init(struct sa_ctx *ctx, const struct ipsec_sa *ent,
1075 uint32_t nb_ent, int32_t socket)
1076{
1077 int32_t rc, sz;
1078 uint32_t i, idx;
1079 size_t tsz;
1080 struct rte_ipsec_sa *sa;
1081 struct ipsec_sa *lsa;
1082 struct rte_ipsec_sa_prm prm;
1083
1084 /* determine SA size */
1085 idx = SPI2IDX(ent[0].spi);
1086 fill_ipsec_sa_prm(&prm, ctx->sa + idx, NULL, NULL);
1087 sz = rte_ipsec_sa_size(&prm);
1088 if (sz < 0) {
1089 RTE_LOG(ERR, IPSEC, "%s(%p, %u, %d): "
1090 "failed to determine SA size, error code: %d\n",
1091 __func__, ctx, nb_ent, socket, sz);
1092 return sz;
1093 }
1094
1095 tsz = sz * nb_ent;
1096
1097 ctx->satbl = rte_zmalloc_socket(NULL, tsz, RTE_CACHE_LINE_SIZE, socket);
1098 if (ctx->satbl == NULL) {
1099 RTE_LOG(ERR, IPSEC,
1100 "%s(%p, %u, %d): failed to allocate %zu bytes\n",
1101 __func__, ctx, nb_ent, socket, tsz);
1102 return -ENOMEM;
1103 }
1104
1105 rc = 0;
1106 for (i = 0; i != nb_ent && rc == 0; i++) {
1107
1108 idx = SPI2IDX(ent[i].spi);
1109
1110 sa = (struct rte_ipsec_sa *)((uintptr_t)ctx->satbl + sz * i);
1111 lsa = ctx->sa + idx;
1112
1113 rc = ipsec_sa_init(lsa, sa, sz);
1114 }
1115
1116 return rc;
1117}
1118
1119/*
1120 * Walk through all SA rules to find an SA with given SPI
1121 */
1122int
1123sa_spi_present(uint32_t spi, int inbound)
1124{
1125 uint32_t i, num;
1126 const struct ipsec_sa *sar;
1127
1128 if (inbound != 0) {
1129 sar = sa_in;
1130 num = nb_sa_in;
1131 } else {
1132 sar = sa_out;
1133 num = nb_sa_out;
1134 }
1135
1136 for (i = 0; i != num; i++) {
1137 if (sar[i].spi == spi)
1138 return i;
1139 }
1140
1141 return -ENOENT;
1142}
1143
7c673cae
FG
1144void
1145sa_init(struct socket_ctx *ctx, int32_t socket_id)
1146{
9f95a23c 1147 int32_t rc;
7c673cae
FG
1148 const char *name;
1149
1150 if (ctx == NULL)
1151 rte_exit(EXIT_FAILURE, "NULL context.\n");
1152
1153 if (ctx->sa_in != NULL)
1154 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
1155 "initialized\n", socket_id);
1156
1157 if (ctx->sa_out != NULL)
1158 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
1159 "initialized\n", socket_id);
1160
1161 if (nb_sa_in > 0) {
1162 name = "sa_in";
1163 ctx->sa_in = sa_create(name, socket_id);
1164 if (ctx->sa_in == NULL)
1165 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1166 "context %s in socket %d\n", rte_errno,
1167 name, socket_id);
1168
1169 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
9f95a23c
TL
1170
1171 if (app_sa_prm.enable != 0) {
1172 rc = ipsec_satbl_init(ctx->sa_in, sa_in, nb_sa_in,
1173 socket_id);
1174 if (rc != 0)
1175 rte_exit(EXIT_FAILURE,
1176 "failed to init inbound SAs\n");
1177 }
7c673cae
FG
1178 } else
1179 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
1180
1181 if (nb_sa_out > 0) {
1182 name = "sa_out";
1183 ctx->sa_out = sa_create(name, socket_id);
1184 if (ctx->sa_out == NULL)
1185 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
1186 "context %s in socket %d\n", rte_errno,
1187 name, socket_id);
1188
1189 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
9f95a23c
TL
1190
1191 if (app_sa_prm.enable != 0) {
1192 rc = ipsec_satbl_init(ctx->sa_out, sa_out, nb_sa_out,
1193 socket_id);
1194 if (rc != 0)
1195 rte_exit(EXIT_FAILURE,
1196 "failed to init outbound SAs\n");
1197 }
7c673cae
FG
1198 } else
1199 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
1200 "specified\n");
1201}
1202
1203int
1204inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
1205{
1206 struct ipsec_mbuf_metadata *priv;
9f95a23c 1207 struct ipsec_sa *sa;
7c673cae 1208
9f95a23c
TL
1209 priv = get_priv(m);
1210 sa = priv->sa;
1211 if (sa != NULL)
1212 return (sa_ctx->sa[sa_idx].spi == sa->spi);
7c673cae 1213
9f95a23c
TL
1214 RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
1215 return 0;
7c673cae
FG
1216}
1217
1218static inline void
1219single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
1220 struct ipsec_sa **sa_ret)
1221{
1222 struct esp_hdr *esp;
1223 struct ip *ip;
1224 uint32_t *src4_addr;
1225 uint8_t *src6_addr;
1226 struct ipsec_sa *sa;
1227
1228 *sa_ret = NULL;
1229
1230 ip = rte_pktmbuf_mtod(pkt, struct ip *);
1231 if (ip->ip_v == IPVERSION)
1232 esp = (struct esp_hdr *)(ip + 1);
1233 else
1234 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
1235
1236 if (esp->spi == INVALID_SPI)
1237 return;
1238
1239 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
1240 if (rte_be_to_cpu_32(esp->spi) != sa->spi)
1241 return;
1242
1243 switch (sa->flags) {
1244 case IP4_TUNNEL:
1245 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
1246 if ((ip->ip_v == IPVERSION) &&
1247 (sa->src.ip.ip4 == *src4_addr) &&
1248 (sa->dst.ip.ip4 == *(src4_addr + 1)))
1249 *sa_ret = sa;
1250 break;
1251 case IP6_TUNNEL:
1252 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
1253 if ((ip->ip_v == IP6_VERSION) &&
1254 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
1255 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
1256 *sa_ret = sa;
1257 break;
1258 case TRANSPORT:
1259 *sa_ret = sa;
1260 }
1261}
1262
1263void
1264inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
1265 struct ipsec_sa *sa[], uint16_t nb_pkts)
1266{
1267 uint32_t i;
1268
1269 for (i = 0; i < nb_pkts; i++)
1270 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1271}
1272
1273void
1274outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1275 struct ipsec_sa *sa[], uint16_t nb_pkts)
1276{
1277 uint32_t i;
1278
1279 for (i = 0; i < nb_pkts; i++)
1280 sa[i] = &sa_ctx->sa[sa_idx[i]];
1281}
9f95a23c
TL
1282
1283/*
1284 * Select HW offloads to be used.
1285 */
1286int
1287sa_check_offloads(uint16_t port_id, uint64_t *rx_offloads,
1288 uint64_t *tx_offloads)
1289{
1290 struct ipsec_sa *rule;
1291 uint32_t idx_sa;
1292
1293 *rx_offloads = 0;
1294 *tx_offloads = 0;
1295
1296 /* Check for inbound rules that use offloads and use this port */
1297 for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) {
1298 rule = &sa_in[idx_sa];
1299 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1300 rule->type ==
1301 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1302 && rule->portid == port_id)
1303 *rx_offloads |= DEV_RX_OFFLOAD_SECURITY;
1304 }
1305
1306 /* Check for outbound rules that use offloads and use this port */
1307 for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) {
1308 rule = &sa_out[idx_sa];
1309 if ((rule->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
1310 rule->type ==
1311 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
1312 && rule->portid == port_id)
1313 *tx_offloads |= DEV_TX_OFFLOAD_SECURITY;
1314 }
1315 return 0;
1316}