1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
6 * Security Associations
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
29 struct supported_cipher_algo
{
31 enum rte_crypto_cipher_algorithm algo
;
37 struct supported_auth_algo
{
39 enum rte_crypto_auth_algorithm algo
;
45 struct supported_aead_algo
{
47 enum rte_crypto_aead_algorithm algo
;
56 const struct supported_cipher_algo cipher_algos
[] = {
59 .algo
= RTE_CRYPTO_CIPHER_NULL
,
65 .keyword
= "aes-128-cbc",
66 .algo
= RTE_CRYPTO_CIPHER_AES_CBC
,
72 .keyword
= "aes-256-cbc",
73 .algo
= RTE_CRYPTO_CIPHER_AES_CBC
,
79 .keyword
= "aes-128-ctr",
80 .algo
= RTE_CRYPTO_CIPHER_AES_CTR
,
82 .block_size
= 16, /* XXX AESNI MB limition, should be 4 */
87 const struct supported_auth_algo auth_algos
[] = {
90 .algo
= RTE_CRYPTO_AUTH_NULL
,
96 .keyword
= "sha1-hmac",
97 .algo
= RTE_CRYPTO_AUTH_SHA1_HMAC
,
102 .keyword
= "sha256-hmac",
103 .algo
= RTE_CRYPTO_AUTH_SHA256_HMAC
,
109 const struct supported_aead_algo aead_algos
[] = {
111 .keyword
= "aes-128-gcm",
112 .algo
= RTE_CRYPTO_AEAD_AES_GCM
,
121 struct ipsec_sa sa_out
[IPSEC_SA_MAX_ENTRIES
];
124 struct ipsec_sa sa_in
[IPSEC_SA_MAX_ENTRIES
];
127 static const struct supported_cipher_algo
*
128 find_match_cipher_algo(const char *cipher_keyword
)
132 for (i
= 0; i
< RTE_DIM(cipher_algos
); i
++) {
133 const struct supported_cipher_algo
*algo
=
136 if (strcmp(cipher_keyword
, algo
->keyword
) == 0)
143 static const struct supported_auth_algo
*
144 find_match_auth_algo(const char *auth_keyword
)
148 for (i
= 0; i
< RTE_DIM(auth_algos
); i
++) {
149 const struct supported_auth_algo
*algo
=
152 if (strcmp(auth_keyword
, algo
->keyword
) == 0)
159 static const struct supported_aead_algo
*
160 find_match_aead_algo(const char *aead_keyword
)
164 for (i
= 0; i
< RTE_DIM(aead_algos
); i
++) {
165 const struct supported_aead_algo
*algo
=
168 if (strcmp(aead_keyword
, algo
->keyword
) == 0)
176 * parse x:x:x:x.... hex number key string into uint8_t *key
178 * > 0: number of bytes parsed
182 parse_key_string(const char *key_str
, uint8_t *key
)
184 const char *pt_start
= key_str
, *pt_end
= key_str
;
185 uint32_t nb_bytes
= 0;
187 while (pt_end
!= NULL
) {
188 char sub_str
[3] = {0};
190 pt_end
= strchr(pt_start
, ':');
192 if (pt_end
== NULL
) {
193 if (strlen(pt_start
) > 2)
195 strncpy(sub_str
, pt_start
, 2);
197 if (pt_end
- pt_start
> 2)
200 strncpy(sub_str
, pt_start
, pt_end
- pt_start
);
201 pt_start
= pt_end
+ 1;
204 key
[nb_bytes
++] = strtol(sub_str
, NULL
, 16);
211 parse_sa_tokens(char **tokens
, uint32_t n_tokens
,
212 struct parse_status
*status
)
214 struct ipsec_sa
*rule
= NULL
;
215 uint32_t ti
; /*token index*/
216 uint32_t *ri
/*rule index*/;
217 uint32_t cipher_algo_p
= 0;
218 uint32_t auth_algo_p
= 0;
219 uint32_t aead_algo_p
= 0;
224 uint32_t portid_p
= 0;
226 if (strcmp(tokens
[0], "in") == 0) {
229 APP_CHECK(*ri
<= IPSEC_SA_MAX_ENTRIES
- 1, status
,
230 "too many sa rules, abort insertion\n");
231 if (status
->status
< 0)
238 APP_CHECK(*ri
<= IPSEC_SA_MAX_ENTRIES
- 1, status
,
239 "too many sa rules, abort insertion\n");
240 if (status
->status
< 0)
247 APP_CHECK_TOKEN_IS_NUM(tokens
, 1, status
);
248 if (status
->status
< 0)
250 if (atoi(tokens
[1]) == INVALID_SPI
)
252 rule
->spi
= atoi(tokens
[1]);
254 for (ti
= 2; ti
< n_tokens
; ti
++) {
255 if (strcmp(tokens
[ti
], "mode") == 0) {
256 APP_CHECK_PRESENCE(mode_p
, tokens
[ti
], status
);
257 if (status
->status
< 0)
260 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
261 if (status
->status
< 0)
264 if (strcmp(tokens
[ti
], "ipv4-tunnel") == 0)
265 rule
->flags
= IP4_TUNNEL
;
266 else if (strcmp(tokens
[ti
], "ipv6-tunnel") == 0)
267 rule
->flags
= IP6_TUNNEL
;
268 else if (strcmp(tokens
[ti
], "transport") == 0)
269 rule
->flags
= TRANSPORT
;
271 APP_CHECK(0, status
, "unrecognized "
272 "input \"%s\"", tokens
[ti
]);
280 if (strcmp(tokens
[ti
], "cipher_algo") == 0) {
281 const struct supported_cipher_algo
*algo
;
284 APP_CHECK_PRESENCE(cipher_algo_p
, tokens
[ti
],
286 if (status
->status
< 0)
289 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
290 if (status
->status
< 0)
293 algo
= find_match_cipher_algo(tokens
[ti
]);
295 APP_CHECK(algo
!= NULL
, status
, "unrecognized "
296 "input \"%s\"", tokens
[ti
]);
298 rule
->cipher_algo
= algo
->algo
;
299 rule
->block_size
= algo
->block_size
;
300 rule
->iv_len
= algo
->iv_len
;
301 rule
->cipher_key_len
= algo
->key_len
;
303 /* for NULL algorithm, no cipher key required */
304 if (rule
->cipher_algo
== RTE_CRYPTO_CIPHER_NULL
) {
309 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
310 if (status
->status
< 0)
313 APP_CHECK(strcmp(tokens
[ti
], "cipher_key") == 0,
314 status
, "unrecognized input \"%s\", "
315 "expect \"cipher_key\"", tokens
[ti
]);
316 if (status
->status
< 0)
319 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
320 if (status
->status
< 0)
323 key_len
= parse_key_string(tokens
[ti
],
325 APP_CHECK(key_len
== rule
->cipher_key_len
, status
,
326 "unrecognized input \"%s\"", tokens
[ti
]);
327 if (status
->status
< 0)
330 if (algo
->algo
== RTE_CRYPTO_CIPHER_AES_CBC
)
331 rule
->salt
= (uint32_t)rte_rand();
333 if (algo
->algo
== RTE_CRYPTO_CIPHER_AES_CTR
) {
335 rule
->cipher_key_len
= key_len
;
337 &rule
->cipher_key
[key_len
], 4);
344 if (strcmp(tokens
[ti
], "auth_algo") == 0) {
345 const struct supported_auth_algo
*algo
;
348 APP_CHECK_PRESENCE(auth_algo_p
, tokens
[ti
],
350 if (status
->status
< 0)
353 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
354 if (status
->status
< 0)
357 algo
= find_match_auth_algo(tokens
[ti
]);
358 APP_CHECK(algo
!= NULL
, status
, "unrecognized "
359 "input \"%s\"", tokens
[ti
]);
361 rule
->auth_algo
= algo
->algo
;
362 rule
->auth_key_len
= algo
->key_len
;
363 rule
->digest_len
= algo
->digest_len
;
365 /* NULL algorithm and combined algos do not
368 if (algo
->key_not_req
) {
373 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
374 if (status
->status
< 0)
377 APP_CHECK(strcmp(tokens
[ti
], "auth_key") == 0,
378 status
, "unrecognized input \"%s\", "
379 "expect \"auth_key\"", tokens
[ti
]);
380 if (status
->status
< 0)
383 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
384 if (status
->status
< 0)
387 key_len
= parse_key_string(tokens
[ti
],
389 APP_CHECK(key_len
== rule
->auth_key_len
, status
,
390 "unrecognized input \"%s\"", tokens
[ti
]);
391 if (status
->status
< 0)
398 if (strcmp(tokens
[ti
], "aead_algo") == 0) {
399 const struct supported_aead_algo
*algo
;
402 APP_CHECK_PRESENCE(aead_algo_p
, tokens
[ti
],
404 if (status
->status
< 0)
407 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
408 if (status
->status
< 0)
411 algo
= find_match_aead_algo(tokens
[ti
]);
413 APP_CHECK(algo
!= NULL
, status
, "unrecognized "
414 "input \"%s\"", tokens
[ti
]);
416 rule
->aead_algo
= algo
->algo
;
417 rule
->cipher_key_len
= algo
->key_len
;
418 rule
->digest_len
= algo
->digest_len
;
419 rule
->aad_len
= algo
->aad_len
;
420 rule
->block_size
= algo
->block_size
;
421 rule
->iv_len
= algo
->iv_len
;
423 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
424 if (status
->status
< 0)
427 APP_CHECK(strcmp(tokens
[ti
], "aead_key") == 0,
428 status
, "unrecognized input \"%s\", "
429 "expect \"aead_key\"", tokens
[ti
]);
430 if (status
->status
< 0)
433 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
434 if (status
->status
< 0)
437 key_len
= parse_key_string(tokens
[ti
],
439 APP_CHECK(key_len
== rule
->cipher_key_len
, status
,
440 "unrecognized input \"%s\"", tokens
[ti
]);
441 if (status
->status
< 0)
445 rule
->cipher_key_len
= key_len
;
447 &rule
->cipher_key
[key_len
], 4);
453 if (strcmp(tokens
[ti
], "src") == 0) {
454 APP_CHECK_PRESENCE(src_p
, tokens
[ti
], status
);
455 if (status
->status
< 0)
458 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
459 if (status
->status
< 0)
462 if (rule
->flags
== IP4_TUNNEL
) {
465 APP_CHECK(parse_ipv4_addr(tokens
[ti
],
466 &ip
, NULL
) == 0, status
,
467 "unrecognized input \"%s\", "
468 "expect valid ipv4 addr",
470 if (status
->status
< 0)
472 rule
->src
.ip
.ip4
= rte_bswap32(
473 (uint32_t)ip
.s_addr
);
474 } else if (rule
->flags
== IP6_TUNNEL
) {
477 APP_CHECK(parse_ipv6_addr(tokens
[ti
], &ip
,
479 "unrecognized input \"%s\", "
480 "expect valid ipv6 addr",
482 if (status
->status
< 0)
484 memcpy(rule
->src
.ip
.ip6
.ip6_b
,
486 } else if (rule
->flags
== TRANSPORT
) {
487 APP_CHECK(0, status
, "unrecognized input "
488 "\"%s\"", tokens
[ti
]);
496 if (strcmp(tokens
[ti
], "dst") == 0) {
497 APP_CHECK_PRESENCE(dst_p
, tokens
[ti
], status
);
498 if (status
->status
< 0)
501 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
502 if (status
->status
< 0)
505 if (rule
->flags
== IP4_TUNNEL
) {
508 APP_CHECK(parse_ipv4_addr(tokens
[ti
],
509 &ip
, NULL
) == 0, status
,
510 "unrecognized input \"%s\", "
511 "expect valid ipv4 addr",
513 if (status
->status
< 0)
515 rule
->dst
.ip
.ip4
= rte_bswap32(
516 (uint32_t)ip
.s_addr
);
517 } else if (rule
->flags
== IP6_TUNNEL
) {
520 APP_CHECK(parse_ipv6_addr(tokens
[ti
], &ip
,
522 "unrecognized input \"%s\", "
523 "expect valid ipv6 addr",
525 if (status
->status
< 0)
527 memcpy(rule
->dst
.ip
.ip6
.ip6_b
, ip
.s6_addr
, 16);
528 } else if (rule
->flags
== TRANSPORT
) {
529 APP_CHECK(0, status
, "unrecognized "
530 "input \"%s\"", tokens
[ti
]);
538 if (strcmp(tokens
[ti
], "type") == 0) {
539 APP_CHECK_PRESENCE(type_p
, tokens
[ti
], status
);
540 if (status
->status
< 0)
543 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
544 if (status
->status
< 0)
547 if (strcmp(tokens
[ti
], "inline-crypto-offload") == 0)
549 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
;
550 else if (strcmp(tokens
[ti
],
551 "inline-protocol-offload") == 0)
553 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
;
554 else if (strcmp(tokens
[ti
],
555 "lookaside-protocol-offload") == 0)
557 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
;
558 else if (strcmp(tokens
[ti
], "no-offload") == 0)
559 rule
->type
= RTE_SECURITY_ACTION_TYPE_NONE
;
561 APP_CHECK(0, status
, "Invalid input \"%s\"",
570 if (strcmp(tokens
[ti
], "port_id") == 0) {
571 APP_CHECK_PRESENCE(portid_p
, tokens
[ti
], status
);
572 if (status
->status
< 0)
574 INCREMENT_TOKEN_INDEX(ti
, n_tokens
, status
);
575 if (status
->status
< 0)
577 rule
->portid
= atoi(tokens
[ti
]);
578 if (status
->status
< 0)
584 /* unrecognizeable input */
585 APP_CHECK(0, status
, "unrecognized input \"%s\"",
591 APP_CHECK(cipher_algo_p
== 0, status
,
592 "AEAD used, no need for cipher options");
593 if (status
->status
< 0)
596 APP_CHECK(auth_algo_p
== 0, status
,
597 "AEAD used, no need for auth options");
598 if (status
->status
< 0)
601 APP_CHECK(cipher_algo_p
== 1, status
, "missing cipher or AEAD options");
602 if (status
->status
< 0)
605 APP_CHECK(auth_algo_p
== 1, status
, "missing auth or AEAD options");
606 if (status
->status
< 0)
610 APP_CHECK(mode_p
== 1, status
, "missing mode option");
611 if (status
->status
< 0)
614 if ((rule
->type
!= RTE_SECURITY_ACTION_TYPE_NONE
) && (portid_p
== 0))
615 printf("Missing portid option, falling back to non-offload\n");
617 if (!type_p
|| !portid_p
) {
618 rule
->type
= RTE_SECURITY_ACTION_TYPE_NONE
;
626 print_one_sa_rule(const struct ipsec_sa
*sa
, int inbound
)
631 printf("\tspi_%s(%3u):", inbound
?"in":"out", sa
->spi
);
633 for (i
= 0; i
< RTE_DIM(cipher_algos
); i
++) {
634 if (cipher_algos
[i
].algo
== sa
->cipher_algo
&&
635 cipher_algos
[i
].key_len
== sa
->cipher_key_len
) {
636 printf("%s ", cipher_algos
[i
].keyword
);
641 for (i
= 0; i
< RTE_DIM(auth_algos
); i
++) {
642 if (auth_algos
[i
].algo
== sa
->auth_algo
) {
643 printf("%s ", auth_algos
[i
].keyword
);
648 for (i
= 0; i
< RTE_DIM(aead_algos
); i
++) {
649 if (aead_algos
[i
].algo
== sa
->aead_algo
) {
650 printf("%s ", aead_algos
[i
].keyword
);
659 printf("IP4Tunnel ");
660 uint32_t_to_char(sa
->src
.ip
.ip4
, &a
, &b
, &c
, &d
);
661 printf("%hhu.%hhu.%hhu.%hhu ", d
, c
, b
, a
);
662 uint32_t_to_char(sa
->dst
.ip
.ip4
, &a
, &b
, &c
, &d
);
663 printf("%hhu.%hhu.%hhu.%hhu", d
, c
, b
, a
);
666 printf("IP6Tunnel ");
667 for (i
= 0; i
< 16; i
++) {
668 if (i
% 2 && i
!= 15)
669 printf("%.2x:", sa
->src
.ip
.ip6
.ip6_b
[i
]);
671 printf("%.2x", sa
->src
.ip
.ip6
.ip6_b
[i
]);
674 for (i
= 0; i
< 16; i
++) {
675 if (i
% 2 && i
!= 15)
676 printf("%.2x:", sa
->dst
.ip
.ip6
.ip6_b
[i
]);
678 printf("%.2x", sa
->dst
.ip
.ip6
.ip6_b
[i
]);
689 struct ipsec_sa sa
[IPSEC_SA_MAX_ENTRIES
];
692 struct rte_crypto_sym_xform a
;
693 struct rte_crypto_sym_xform b
;
695 } xf
[IPSEC_SA_MAX_ENTRIES
];
698 static struct sa_ctx
*
699 sa_create(const char *name
, int32_t socket_id
)
702 struct sa_ctx
*sa_ctx
;
704 const struct rte_memzone
*mz
;
706 snprintf(s
, sizeof(s
), "%s_%u", name
, socket_id
);
708 /* Create SA array table */
709 printf("Creating SA context with %u maximum entries\n",
710 IPSEC_SA_MAX_ENTRIES
);
712 mz_size
= sizeof(struct sa_ctx
);
713 mz
= rte_memzone_reserve(s
, mz_size
, socket_id
,
714 RTE_MEMZONE_1GB
| RTE_MEMZONE_SIZE_HINT_ONLY
);
716 printf("Failed to allocate SA DB memory\n");
721 sa_ctx
= (struct sa_ctx
*)mz
->addr
;
727 check_eth_dev_caps(uint16_t portid
, uint32_t inbound
)
729 struct rte_eth_dev_info dev_info
;
731 rte_eth_dev_info_get(portid
, &dev_info
);
734 if ((dev_info
.rx_offload_capa
&
735 DEV_RX_OFFLOAD_SECURITY
) == 0) {
736 RTE_LOG(WARNING
, PORT
,
737 "hardware RX IPSec offload is not supported\n");
741 } else { /* outbound */
742 if ((dev_info
.tx_offload_capa
&
743 DEV_TX_OFFLOAD_SECURITY
) == 0) {
744 RTE_LOG(WARNING
, PORT
,
745 "hardware TX IPSec offload is not supported\n");
754 sa_add_rules(struct sa_ctx
*sa_ctx
, const struct ipsec_sa entries
[],
755 uint32_t nb_entries
, uint32_t inbound
)
761 for (i
= 0; i
< nb_entries
; i
++) {
762 idx
= SPI2IDX(entries
[i
].spi
);
763 sa
= &sa_ctx
->sa
[idx
];
765 printf("Index %u already in use by SPI %u\n",
772 if (sa
->type
== RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
||
773 sa
->type
== RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO
) {
774 if (check_eth_dev_caps(sa
->portid
, inbound
))
778 sa
->direction
= (inbound
== 1) ?
779 RTE_SECURITY_IPSEC_SA_DIR_INGRESS
:
780 RTE_SECURITY_IPSEC_SA_DIR_EGRESS
;
784 sa
->src
.ip
.ip4
= rte_cpu_to_be_32(sa
->src
.ip
.ip4
);
785 sa
->dst
.ip
.ip4
= rte_cpu_to_be_32(sa
->dst
.ip
.ip4
);
788 if (sa
->aead_algo
== RTE_CRYPTO_AEAD_AES_GCM
) {
791 sa_ctx
->xf
[idx
].a
.type
= RTE_CRYPTO_SYM_XFORM_AEAD
;
792 sa_ctx
->xf
[idx
].a
.aead
.algo
= sa
->aead_algo
;
793 sa_ctx
->xf
[idx
].a
.aead
.key
.data
= sa
->cipher_key
;
794 sa_ctx
->xf
[idx
].a
.aead
.key
.length
=
796 sa_ctx
->xf
[idx
].a
.aead
.op
= (inbound
== 1) ?
797 RTE_CRYPTO_AEAD_OP_DECRYPT
:
798 RTE_CRYPTO_AEAD_OP_ENCRYPT
;
799 sa_ctx
->xf
[idx
].a
.next
= NULL
;
800 sa_ctx
->xf
[idx
].a
.aead
.iv
.offset
= IV_OFFSET
;
801 sa_ctx
->xf
[idx
].a
.aead
.iv
.length
= iv_length
;
802 sa_ctx
->xf
[idx
].a
.aead
.aad_length
=
804 sa_ctx
->xf
[idx
].a
.aead
.digest_length
=
807 sa
->xforms
= &sa_ctx
->xf
[idx
].a
;
809 print_one_sa_rule(sa
, inbound
);
811 switch (sa
->cipher_algo
) {
812 case RTE_CRYPTO_CIPHER_NULL
:
813 case RTE_CRYPTO_CIPHER_AES_CBC
:
814 iv_length
= sa
->iv_len
;
816 case RTE_CRYPTO_CIPHER_AES_CTR
:
820 RTE_LOG(ERR
, IPSEC_ESP
,
821 "unsupported cipher algorithm %u\n",
827 sa_ctx
->xf
[idx
].b
.type
= RTE_CRYPTO_SYM_XFORM_CIPHER
;
828 sa_ctx
->xf
[idx
].b
.cipher
.algo
= sa
->cipher_algo
;
829 sa_ctx
->xf
[idx
].b
.cipher
.key
.data
= sa
->cipher_key
;
830 sa_ctx
->xf
[idx
].b
.cipher
.key
.length
=
832 sa_ctx
->xf
[idx
].b
.cipher
.op
=
833 RTE_CRYPTO_CIPHER_OP_DECRYPT
;
834 sa_ctx
->xf
[idx
].b
.next
= NULL
;
835 sa_ctx
->xf
[idx
].b
.cipher
.iv
.offset
= IV_OFFSET
;
836 sa_ctx
->xf
[idx
].b
.cipher
.iv
.length
= iv_length
;
838 sa_ctx
->xf
[idx
].a
.type
= RTE_CRYPTO_SYM_XFORM_AUTH
;
839 sa_ctx
->xf
[idx
].a
.auth
.algo
= sa
->auth_algo
;
840 sa_ctx
->xf
[idx
].a
.auth
.key
.data
= sa
->auth_key
;
841 sa_ctx
->xf
[idx
].a
.auth
.key
.length
=
843 sa_ctx
->xf
[idx
].a
.auth
.digest_length
=
845 sa_ctx
->xf
[idx
].a
.auth
.op
=
846 RTE_CRYPTO_AUTH_OP_VERIFY
;
847 } else { /* outbound */
848 sa_ctx
->xf
[idx
].a
.type
= RTE_CRYPTO_SYM_XFORM_CIPHER
;
849 sa_ctx
->xf
[idx
].a
.cipher
.algo
= sa
->cipher_algo
;
850 sa_ctx
->xf
[idx
].a
.cipher
.key
.data
= sa
->cipher_key
;
851 sa_ctx
->xf
[idx
].a
.cipher
.key
.length
=
853 sa_ctx
->xf
[idx
].a
.cipher
.op
=
854 RTE_CRYPTO_CIPHER_OP_ENCRYPT
;
855 sa_ctx
->xf
[idx
].a
.next
= NULL
;
856 sa_ctx
->xf
[idx
].a
.cipher
.iv
.offset
= IV_OFFSET
;
857 sa_ctx
->xf
[idx
].a
.cipher
.iv
.length
= iv_length
;
859 sa_ctx
->xf
[idx
].b
.type
= RTE_CRYPTO_SYM_XFORM_AUTH
;
860 sa_ctx
->xf
[idx
].b
.auth
.algo
= sa
->auth_algo
;
861 sa_ctx
->xf
[idx
].b
.auth
.key
.data
= sa
->auth_key
;
862 sa_ctx
->xf
[idx
].b
.auth
.key
.length
=
864 sa_ctx
->xf
[idx
].b
.auth
.digest_length
=
866 sa_ctx
->xf
[idx
].b
.auth
.op
=
867 RTE_CRYPTO_AUTH_OP_GENERATE
;
870 sa_ctx
->xf
[idx
].a
.next
= &sa_ctx
->xf
[idx
].b
;
871 sa_ctx
->xf
[idx
].b
.next
= NULL
;
872 sa
->xforms
= &sa_ctx
->xf
[idx
].a
;
874 print_one_sa_rule(sa
, inbound
);
882 sa_out_add_rules(struct sa_ctx
*sa_ctx
, const struct ipsec_sa entries
[],
885 return sa_add_rules(sa_ctx
, entries
, nb_entries
, 0);
889 sa_in_add_rules(struct sa_ctx
*sa_ctx
, const struct ipsec_sa entries
[],
892 return sa_add_rules(sa_ctx
, entries
, nb_entries
, 1);
896 sa_init(struct socket_ctx
*ctx
, int32_t socket_id
)
901 rte_exit(EXIT_FAILURE
, "NULL context.\n");
903 if (ctx
->sa_in
!= NULL
)
904 rte_exit(EXIT_FAILURE
, "Inbound SA DB for socket %u already "
905 "initialized\n", socket_id
);
907 if (ctx
->sa_out
!= NULL
)
908 rte_exit(EXIT_FAILURE
, "Outbound SA DB for socket %u already "
909 "initialized\n", socket_id
);
913 ctx
->sa_in
= sa_create(name
, socket_id
);
914 if (ctx
->sa_in
== NULL
)
915 rte_exit(EXIT_FAILURE
, "Error [%d] creating SA "
916 "context %s in socket %d\n", rte_errno
,
919 sa_in_add_rules(ctx
->sa_in
, sa_in
, nb_sa_in
);
921 RTE_LOG(WARNING
, IPSEC
, "No SA Inbound rule specified\n");
925 ctx
->sa_out
= sa_create(name
, socket_id
);
926 if (ctx
->sa_out
== NULL
)
927 rte_exit(EXIT_FAILURE
, "Error [%d] creating SA "
928 "context %s in socket %d\n", rte_errno
,
931 sa_out_add_rules(ctx
->sa_out
, sa_out
, nb_sa_out
);
933 RTE_LOG(WARNING
, IPSEC
, "No SA Outbound rule "
938 inbound_sa_check(struct sa_ctx
*sa_ctx
, struct rte_mbuf
*m
, uint32_t sa_idx
)
940 struct ipsec_mbuf_metadata
*priv
;
944 return (sa_ctx
->sa
[sa_idx
].spi
== priv
->sa
->spi
);
948 single_inbound_lookup(struct ipsec_sa
*sadb
, struct rte_mbuf
*pkt
,
949 struct ipsec_sa
**sa_ret
)
959 ip
= rte_pktmbuf_mtod(pkt
, struct ip
*);
960 if (ip
->ip_v
== IPVERSION
)
961 esp
= (struct esp_hdr
*)(ip
+ 1);
963 esp
= (struct esp_hdr
*)(((struct ip6_hdr
*)ip
) + 1);
965 if (esp
->spi
== INVALID_SPI
)
968 sa
= &sadb
[SPI2IDX(rte_be_to_cpu_32(esp
->spi
))];
969 if (rte_be_to_cpu_32(esp
->spi
) != sa
->spi
)
974 src4_addr
= RTE_PTR_ADD(ip
, offsetof(struct ip
, ip_src
));
975 if ((ip
->ip_v
== IPVERSION
) &&
976 (sa
->src
.ip
.ip4
== *src4_addr
) &&
977 (sa
->dst
.ip
.ip4
== *(src4_addr
+ 1)))
981 src6_addr
= RTE_PTR_ADD(ip
, offsetof(struct ip6_hdr
, ip6_src
));
982 if ((ip
->ip_v
== IP6_VERSION
) &&
983 !memcmp(&sa
->src
.ip
.ip6
.ip6
, src6_addr
, 16) &&
984 !memcmp(&sa
->dst
.ip
.ip6
.ip6
, src6_addr
+ 16, 16))
993 inbound_sa_lookup(struct sa_ctx
*sa_ctx
, struct rte_mbuf
*pkts
[],
994 struct ipsec_sa
*sa
[], uint16_t nb_pkts
)
998 for (i
= 0; i
< nb_pkts
; i
++)
999 single_inbound_lookup(sa_ctx
->sa
, pkts
[i
], &sa
[i
]);
1003 outbound_sa_lookup(struct sa_ctx
*sa_ctx
, uint32_t sa_idx
[],
1004 struct ipsec_sa
*sa
[], uint16_t nb_pkts
)
1008 for (i
= 0; i
< nb_pkts
; i
++)
1009 sa
[i
] = &sa_ctx
->sa
[sa_idx
[i
]];