]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/examples/ipsec-secgw/sa.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / examples / ipsec-secgw / sa.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 /*
6 * Security Associations
7 */
8 #include <sys/types.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12
13 #include <rte_memzone.h>
14 #include <rte_crypto.h>
15 #include <rte_security.h>
16 #include <rte_cryptodev.h>
17 #include <rte_byteorder.h>
18 #include <rte_errno.h>
19 #include <rte_ip.h>
20 #include <rte_random.h>
21 #include <rte_ethdev.h>
22
23 #include "ipsec.h"
24 #include "esp.h"
25 #include "parser.h"
26
27 #define IPDEFTTL 64
28
29 struct supported_cipher_algo {
30 const char *keyword;
31 enum rte_crypto_cipher_algorithm algo;
32 uint16_t iv_len;
33 uint16_t block_size;
34 uint16_t key_len;
35 };
36
37 struct supported_auth_algo {
38 const char *keyword;
39 enum rte_crypto_auth_algorithm algo;
40 uint16_t digest_len;
41 uint16_t key_len;
42 uint8_t key_not_req;
43 };
44
45 struct supported_aead_algo {
46 const char *keyword;
47 enum rte_crypto_aead_algorithm algo;
48 uint16_t iv_len;
49 uint16_t block_size;
50 uint16_t digest_len;
51 uint16_t key_len;
52 uint8_t aad_len;
53 };
54
55
56 const struct supported_cipher_algo cipher_algos[] = {
57 {
58 .keyword = "null",
59 .algo = RTE_CRYPTO_CIPHER_NULL,
60 .iv_len = 0,
61 .block_size = 4,
62 .key_len = 0
63 },
64 {
65 .keyword = "aes-128-cbc",
66 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
67 .iv_len = 16,
68 .block_size = 16,
69 .key_len = 16
70 },
71 {
72 .keyword = "aes-256-cbc",
73 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
74 .iv_len = 16,
75 .block_size = 16,
76 .key_len = 32
77 },
78 {
79 .keyword = "aes-128-ctr",
80 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
81 .iv_len = 8,
82 .block_size = 16, /* XXX AESNI MB limition, should be 4 */
83 .key_len = 20
84 }
85 };
86
87 const struct supported_auth_algo auth_algos[] = {
88 {
89 .keyword = "null",
90 .algo = RTE_CRYPTO_AUTH_NULL,
91 .digest_len = 0,
92 .key_len = 0,
93 .key_not_req = 1
94 },
95 {
96 .keyword = "sha1-hmac",
97 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
98 .digest_len = 12,
99 .key_len = 20
100 },
101 {
102 .keyword = "sha256-hmac",
103 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
104 .digest_len = 12,
105 .key_len = 32
106 }
107 };
108
109 const struct supported_aead_algo aead_algos[] = {
110 {
111 .keyword = "aes-128-gcm",
112 .algo = RTE_CRYPTO_AEAD_AES_GCM,
113 .iv_len = 8,
114 .block_size = 4,
115 .key_len = 20,
116 .digest_len = 16,
117 .aad_len = 8,
118 }
119 };
120
121 struct ipsec_sa sa_out[IPSEC_SA_MAX_ENTRIES];
122 uint32_t nb_sa_out;
123
124 struct ipsec_sa sa_in[IPSEC_SA_MAX_ENTRIES];
125 uint32_t nb_sa_in;
126
127 static const struct supported_cipher_algo *
128 find_match_cipher_algo(const char *cipher_keyword)
129 {
130 size_t i;
131
132 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
133 const struct supported_cipher_algo *algo =
134 &cipher_algos[i];
135
136 if (strcmp(cipher_keyword, algo->keyword) == 0)
137 return algo;
138 }
139
140 return NULL;
141 }
142
143 static const struct supported_auth_algo *
144 find_match_auth_algo(const char *auth_keyword)
145 {
146 size_t i;
147
148 for (i = 0; i < RTE_DIM(auth_algos); i++) {
149 const struct supported_auth_algo *algo =
150 &auth_algos[i];
151
152 if (strcmp(auth_keyword, algo->keyword) == 0)
153 return algo;
154 }
155
156 return NULL;
157 }
158
159 static const struct supported_aead_algo *
160 find_match_aead_algo(const char *aead_keyword)
161 {
162 size_t i;
163
164 for (i = 0; i < RTE_DIM(aead_algos); i++) {
165 const struct supported_aead_algo *algo =
166 &aead_algos[i];
167
168 if (strcmp(aead_keyword, algo->keyword) == 0)
169 return algo;
170 }
171
172 return NULL;
173 }
174
175 /** parse_key_string
176 * parse x:x:x:x.... hex number key string into uint8_t *key
177 * return:
178 * > 0: number of bytes parsed
179 * 0: failed
180 */
181 static uint32_t
182 parse_key_string(const char *key_str, uint8_t *key)
183 {
184 const char *pt_start = key_str, *pt_end = key_str;
185 uint32_t nb_bytes = 0;
186
187 while (pt_end != NULL) {
188 char sub_str[3] = {0};
189
190 pt_end = strchr(pt_start, ':');
191
192 if (pt_end == NULL) {
193 if (strlen(pt_start) > 2)
194 return 0;
195 strncpy(sub_str, pt_start, 2);
196 } else {
197 if (pt_end - pt_start > 2)
198 return 0;
199
200 strncpy(sub_str, pt_start, pt_end - pt_start);
201 pt_start = pt_end + 1;
202 }
203
204 key[nb_bytes++] = strtol(sub_str, NULL, 16);
205 }
206
207 return nb_bytes;
208 }
209
210 void
211 parse_sa_tokens(char **tokens, uint32_t n_tokens,
212 struct parse_status *status)
213 {
214 struct ipsec_sa *rule = NULL;
215 uint32_t ti; /*token index*/
216 uint32_t *ri /*rule index*/;
217 uint32_t cipher_algo_p = 0;
218 uint32_t auth_algo_p = 0;
219 uint32_t aead_algo_p = 0;
220 uint32_t src_p = 0;
221 uint32_t dst_p = 0;
222 uint32_t mode_p = 0;
223 uint32_t type_p = 0;
224 uint32_t portid_p = 0;
225
226 if (strcmp(tokens[0], "in") == 0) {
227 ri = &nb_sa_in;
228
229 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
230 "too many sa rules, abort insertion\n");
231 if (status->status < 0)
232 return;
233
234 rule = &sa_in[*ri];
235 } else {
236 ri = &nb_sa_out;
237
238 APP_CHECK(*ri <= IPSEC_SA_MAX_ENTRIES - 1, status,
239 "too many sa rules, abort insertion\n");
240 if (status->status < 0)
241 return;
242
243 rule = &sa_out[*ri];
244 }
245
246 /* spi number */
247 APP_CHECK_TOKEN_IS_NUM(tokens, 1, status);
248 if (status->status < 0)
249 return;
250 if (atoi(tokens[1]) == INVALID_SPI)
251 return;
252 rule->spi = atoi(tokens[1]);
253
254 for (ti = 2; ti < n_tokens; ti++) {
255 if (strcmp(tokens[ti], "mode") == 0) {
256 APP_CHECK_PRESENCE(mode_p, tokens[ti], status);
257 if (status->status < 0)
258 return;
259
260 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
261 if (status->status < 0)
262 return;
263
264 if (strcmp(tokens[ti], "ipv4-tunnel") == 0)
265 rule->flags = IP4_TUNNEL;
266 else if (strcmp(tokens[ti], "ipv6-tunnel") == 0)
267 rule->flags = IP6_TUNNEL;
268 else if (strcmp(tokens[ti], "transport") == 0)
269 rule->flags = TRANSPORT;
270 else {
271 APP_CHECK(0, status, "unrecognized "
272 "input \"%s\"", tokens[ti]);
273 return;
274 }
275
276 mode_p = 1;
277 continue;
278 }
279
280 if (strcmp(tokens[ti], "cipher_algo") == 0) {
281 const struct supported_cipher_algo *algo;
282 uint32_t key_len;
283
284 APP_CHECK_PRESENCE(cipher_algo_p, tokens[ti],
285 status);
286 if (status->status < 0)
287 return;
288
289 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
290 if (status->status < 0)
291 return;
292
293 algo = find_match_cipher_algo(tokens[ti]);
294
295 APP_CHECK(algo != NULL, status, "unrecognized "
296 "input \"%s\"", tokens[ti]);
297
298 rule->cipher_algo = algo->algo;
299 rule->block_size = algo->block_size;
300 rule->iv_len = algo->iv_len;
301 rule->cipher_key_len = algo->key_len;
302
303 /* for NULL algorithm, no cipher key required */
304 if (rule->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
305 cipher_algo_p = 1;
306 continue;
307 }
308
309 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
310 if (status->status < 0)
311 return;
312
313 APP_CHECK(strcmp(tokens[ti], "cipher_key") == 0,
314 status, "unrecognized input \"%s\", "
315 "expect \"cipher_key\"", tokens[ti]);
316 if (status->status < 0)
317 return;
318
319 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
320 if (status->status < 0)
321 return;
322
323 key_len = parse_key_string(tokens[ti],
324 rule->cipher_key);
325 APP_CHECK(key_len == rule->cipher_key_len, status,
326 "unrecognized input \"%s\"", tokens[ti]);
327 if (status->status < 0)
328 return;
329
330 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CBC)
331 rule->salt = (uint32_t)rte_rand();
332
333 if (algo->algo == RTE_CRYPTO_CIPHER_AES_CTR) {
334 key_len -= 4;
335 rule->cipher_key_len = key_len;
336 memcpy(&rule->salt,
337 &rule->cipher_key[key_len], 4);
338 }
339
340 cipher_algo_p = 1;
341 continue;
342 }
343
344 if (strcmp(tokens[ti], "auth_algo") == 0) {
345 const struct supported_auth_algo *algo;
346 uint32_t key_len;
347
348 APP_CHECK_PRESENCE(auth_algo_p, tokens[ti],
349 status);
350 if (status->status < 0)
351 return;
352
353 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
354 if (status->status < 0)
355 return;
356
357 algo = find_match_auth_algo(tokens[ti]);
358 APP_CHECK(algo != NULL, status, "unrecognized "
359 "input \"%s\"", tokens[ti]);
360
361 rule->auth_algo = algo->algo;
362 rule->auth_key_len = algo->key_len;
363 rule->digest_len = algo->digest_len;
364
365 /* NULL algorithm and combined algos do not
366 * require auth key
367 */
368 if (algo->key_not_req) {
369 auth_algo_p = 1;
370 continue;
371 }
372
373 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
374 if (status->status < 0)
375 return;
376
377 APP_CHECK(strcmp(tokens[ti], "auth_key") == 0,
378 status, "unrecognized input \"%s\", "
379 "expect \"auth_key\"", tokens[ti]);
380 if (status->status < 0)
381 return;
382
383 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
384 if (status->status < 0)
385 return;
386
387 key_len = parse_key_string(tokens[ti],
388 rule->auth_key);
389 APP_CHECK(key_len == rule->auth_key_len, status,
390 "unrecognized input \"%s\"", tokens[ti]);
391 if (status->status < 0)
392 return;
393
394 auth_algo_p = 1;
395 continue;
396 }
397
398 if (strcmp(tokens[ti], "aead_algo") == 0) {
399 const struct supported_aead_algo *algo;
400 uint32_t key_len;
401
402 APP_CHECK_PRESENCE(aead_algo_p, tokens[ti],
403 status);
404 if (status->status < 0)
405 return;
406
407 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
408 if (status->status < 0)
409 return;
410
411 algo = find_match_aead_algo(tokens[ti]);
412
413 APP_CHECK(algo != NULL, status, "unrecognized "
414 "input \"%s\"", tokens[ti]);
415
416 rule->aead_algo = algo->algo;
417 rule->cipher_key_len = algo->key_len;
418 rule->digest_len = algo->digest_len;
419 rule->aad_len = algo->aad_len;
420 rule->block_size = algo->block_size;
421 rule->iv_len = algo->iv_len;
422
423 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
424 if (status->status < 0)
425 return;
426
427 APP_CHECK(strcmp(tokens[ti], "aead_key") == 0,
428 status, "unrecognized input \"%s\", "
429 "expect \"aead_key\"", tokens[ti]);
430 if (status->status < 0)
431 return;
432
433 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
434 if (status->status < 0)
435 return;
436
437 key_len = parse_key_string(tokens[ti],
438 rule->cipher_key);
439 APP_CHECK(key_len == rule->cipher_key_len, status,
440 "unrecognized input \"%s\"", tokens[ti]);
441 if (status->status < 0)
442 return;
443
444 key_len -= 4;
445 rule->cipher_key_len = key_len;
446 memcpy(&rule->salt,
447 &rule->cipher_key[key_len], 4);
448
449 aead_algo_p = 1;
450 continue;
451 }
452
453 if (strcmp(tokens[ti], "src") == 0) {
454 APP_CHECK_PRESENCE(src_p, tokens[ti], status);
455 if (status->status < 0)
456 return;
457
458 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
459 if (status->status < 0)
460 return;
461
462 if (rule->flags == IP4_TUNNEL) {
463 struct in_addr ip;
464
465 APP_CHECK(parse_ipv4_addr(tokens[ti],
466 &ip, NULL) == 0, status,
467 "unrecognized input \"%s\", "
468 "expect valid ipv4 addr",
469 tokens[ti]);
470 if (status->status < 0)
471 return;
472 rule->src.ip.ip4 = rte_bswap32(
473 (uint32_t)ip.s_addr);
474 } else if (rule->flags == IP6_TUNNEL) {
475 struct in6_addr ip;
476
477 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
478 NULL) == 0, status,
479 "unrecognized input \"%s\", "
480 "expect valid ipv6 addr",
481 tokens[ti]);
482 if (status->status < 0)
483 return;
484 memcpy(rule->src.ip.ip6.ip6_b,
485 ip.s6_addr, 16);
486 } else if (rule->flags == TRANSPORT) {
487 APP_CHECK(0, status, "unrecognized input "
488 "\"%s\"", tokens[ti]);
489 return;
490 }
491
492 src_p = 1;
493 continue;
494 }
495
496 if (strcmp(tokens[ti], "dst") == 0) {
497 APP_CHECK_PRESENCE(dst_p, tokens[ti], status);
498 if (status->status < 0)
499 return;
500
501 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
502 if (status->status < 0)
503 return;
504
505 if (rule->flags == IP4_TUNNEL) {
506 struct in_addr ip;
507
508 APP_CHECK(parse_ipv4_addr(tokens[ti],
509 &ip, NULL) == 0, status,
510 "unrecognized input \"%s\", "
511 "expect valid ipv4 addr",
512 tokens[ti]);
513 if (status->status < 0)
514 return;
515 rule->dst.ip.ip4 = rte_bswap32(
516 (uint32_t)ip.s_addr);
517 } else if (rule->flags == IP6_TUNNEL) {
518 struct in6_addr ip;
519
520 APP_CHECK(parse_ipv6_addr(tokens[ti], &ip,
521 NULL) == 0, status,
522 "unrecognized input \"%s\", "
523 "expect valid ipv6 addr",
524 tokens[ti]);
525 if (status->status < 0)
526 return;
527 memcpy(rule->dst.ip.ip6.ip6_b, ip.s6_addr, 16);
528 } else if (rule->flags == TRANSPORT) {
529 APP_CHECK(0, status, "unrecognized "
530 "input \"%s\"", tokens[ti]);
531 return;
532 }
533
534 dst_p = 1;
535 continue;
536 }
537
538 if (strcmp(tokens[ti], "type") == 0) {
539 APP_CHECK_PRESENCE(type_p, tokens[ti], status);
540 if (status->status < 0)
541 return;
542
543 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
544 if (status->status < 0)
545 return;
546
547 if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
548 rule->type =
549 RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
550 else if (strcmp(tokens[ti],
551 "inline-protocol-offload") == 0)
552 rule->type =
553 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
554 else if (strcmp(tokens[ti],
555 "lookaside-protocol-offload") == 0)
556 rule->type =
557 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
558 else if (strcmp(tokens[ti], "no-offload") == 0)
559 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
560 else {
561 APP_CHECK(0, status, "Invalid input \"%s\"",
562 tokens[ti]);
563 return;
564 }
565
566 type_p = 1;
567 continue;
568 }
569
570 if (strcmp(tokens[ti], "port_id") == 0) {
571 APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
572 if (status->status < 0)
573 return;
574 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
575 if (status->status < 0)
576 return;
577 rule->portid = atoi(tokens[ti]);
578 if (status->status < 0)
579 return;
580 portid_p = 1;
581 continue;
582 }
583
584 /* unrecognizeable input */
585 APP_CHECK(0, status, "unrecognized input \"%s\"",
586 tokens[ti]);
587 return;
588 }
589
590 if (aead_algo_p) {
591 APP_CHECK(cipher_algo_p == 0, status,
592 "AEAD used, no need for cipher options");
593 if (status->status < 0)
594 return;
595
596 APP_CHECK(auth_algo_p == 0, status,
597 "AEAD used, no need for auth options");
598 if (status->status < 0)
599 return;
600 } else {
601 APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
602 if (status->status < 0)
603 return;
604
605 APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
606 if (status->status < 0)
607 return;
608 }
609
610 APP_CHECK(mode_p == 1, status, "missing mode option");
611 if (status->status < 0)
612 return;
613
614 if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
615 printf("Missing portid option, falling back to non-offload\n");
616
617 if (!type_p || !portid_p) {
618 rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
619 rule->portid = -1;
620 }
621
622 *ri = *ri + 1;
623 }
624
625 static inline void
626 print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
627 {
628 uint32_t i;
629 uint8_t a, b, c, d;
630
631 printf("\tspi_%s(%3u):", inbound?"in":"out", sa->spi);
632
633 for (i = 0; i < RTE_DIM(cipher_algos); i++) {
634 if (cipher_algos[i].algo == sa->cipher_algo &&
635 cipher_algos[i].key_len == sa->cipher_key_len) {
636 printf("%s ", cipher_algos[i].keyword);
637 break;
638 }
639 }
640
641 for (i = 0; i < RTE_DIM(auth_algos); i++) {
642 if (auth_algos[i].algo == sa->auth_algo) {
643 printf("%s ", auth_algos[i].keyword);
644 break;
645 }
646 }
647
648 for (i = 0; i < RTE_DIM(aead_algos); i++) {
649 if (aead_algos[i].algo == sa->aead_algo) {
650 printf("%s ", aead_algos[i].keyword);
651 break;
652 }
653 }
654
655 printf("mode:");
656
657 switch (sa->flags) {
658 case IP4_TUNNEL:
659 printf("IP4Tunnel ");
660 uint32_t_to_char(sa->src.ip.ip4, &a, &b, &c, &d);
661 printf("%hhu.%hhu.%hhu.%hhu ", d, c, b, a);
662 uint32_t_to_char(sa->dst.ip.ip4, &a, &b, &c, &d);
663 printf("%hhu.%hhu.%hhu.%hhu", d, c, b, a);
664 break;
665 case IP6_TUNNEL:
666 printf("IP6Tunnel ");
667 for (i = 0; i < 16; i++) {
668 if (i % 2 && i != 15)
669 printf("%.2x:", sa->src.ip.ip6.ip6_b[i]);
670 else
671 printf("%.2x", sa->src.ip.ip6.ip6_b[i]);
672 }
673 printf(" ");
674 for (i = 0; i < 16; i++) {
675 if (i % 2 && i != 15)
676 printf("%.2x:", sa->dst.ip.ip6.ip6_b[i]);
677 else
678 printf("%.2x", sa->dst.ip.ip6.ip6_b[i]);
679 }
680 break;
681 case TRANSPORT:
682 printf("Transport");
683 break;
684 }
685 printf("\n");
686 }
687
688 struct sa_ctx {
689 struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
690 union {
691 struct {
692 struct rte_crypto_sym_xform a;
693 struct rte_crypto_sym_xform b;
694 };
695 } xf[IPSEC_SA_MAX_ENTRIES];
696 };
697
698 static struct sa_ctx *
699 sa_create(const char *name, int32_t socket_id)
700 {
701 char s[PATH_MAX];
702 struct sa_ctx *sa_ctx;
703 uint32_t mz_size;
704 const struct rte_memzone *mz;
705
706 snprintf(s, sizeof(s), "%s_%u", name, socket_id);
707
708 /* Create SA array table */
709 printf("Creating SA context with %u maximum entries\n",
710 IPSEC_SA_MAX_ENTRIES);
711
712 mz_size = sizeof(struct sa_ctx);
713 mz = rte_memzone_reserve(s, mz_size, socket_id,
714 RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY);
715 if (mz == NULL) {
716 printf("Failed to allocate SA DB memory\n");
717 rte_errno = -ENOMEM;
718 return NULL;
719 }
720
721 sa_ctx = (struct sa_ctx *)mz->addr;
722
723 return sa_ctx;
724 }
725
726 static int
727 check_eth_dev_caps(uint16_t portid, uint32_t inbound)
728 {
729 struct rte_eth_dev_info dev_info;
730
731 rte_eth_dev_info_get(portid, &dev_info);
732
733 if (inbound) {
734 if ((dev_info.rx_offload_capa &
735 DEV_RX_OFFLOAD_SECURITY) == 0) {
736 RTE_LOG(WARNING, PORT,
737 "hardware RX IPSec offload is not supported\n");
738 return -EINVAL;
739 }
740
741 } else { /* outbound */
742 if ((dev_info.tx_offload_capa &
743 DEV_TX_OFFLOAD_SECURITY) == 0) {
744 RTE_LOG(WARNING, PORT,
745 "hardware TX IPSec offload is not supported\n");
746 return -EINVAL;
747 }
748 }
749 return 0;
750 }
751
752
753 static int
754 sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
755 uint32_t nb_entries, uint32_t inbound)
756 {
757 struct ipsec_sa *sa;
758 uint32_t i, idx;
759 uint16_t iv_length;
760
761 for (i = 0; i < nb_entries; i++) {
762 idx = SPI2IDX(entries[i].spi);
763 sa = &sa_ctx->sa[idx];
764 if (sa->spi != 0) {
765 printf("Index %u already in use by SPI %u\n",
766 idx, sa->spi);
767 return -EINVAL;
768 }
769 *sa = entries[i];
770 sa->seq = 0;
771
772 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
773 sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
774 if (check_eth_dev_caps(sa->portid, inbound))
775 return -EINVAL;
776 }
777
778 sa->direction = (inbound == 1) ?
779 RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
780 RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
781
782 switch (sa->flags) {
783 case IP4_TUNNEL:
784 sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
785 sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
786 }
787
788 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
789 iv_length = 16;
790
791 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
792 sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
793 sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
794 sa_ctx->xf[idx].a.aead.key.length =
795 sa->cipher_key_len;
796 sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
797 RTE_CRYPTO_AEAD_OP_DECRYPT :
798 RTE_CRYPTO_AEAD_OP_ENCRYPT;
799 sa_ctx->xf[idx].a.next = NULL;
800 sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
801 sa_ctx->xf[idx].a.aead.iv.length = iv_length;
802 sa_ctx->xf[idx].a.aead.aad_length =
803 sa->aad_len;
804 sa_ctx->xf[idx].a.aead.digest_length =
805 sa->digest_len;
806
807 sa->xforms = &sa_ctx->xf[idx].a;
808
809 print_one_sa_rule(sa, inbound);
810 } else {
811 switch (sa->cipher_algo) {
812 case RTE_CRYPTO_CIPHER_NULL:
813 case RTE_CRYPTO_CIPHER_AES_CBC:
814 iv_length = sa->iv_len;
815 break;
816 case RTE_CRYPTO_CIPHER_AES_CTR:
817 iv_length = 16;
818 break;
819 default:
820 RTE_LOG(ERR, IPSEC_ESP,
821 "unsupported cipher algorithm %u\n",
822 sa->cipher_algo);
823 return -EINVAL;
824 }
825
826 if (inbound) {
827 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
828 sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
829 sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
830 sa_ctx->xf[idx].b.cipher.key.length =
831 sa->cipher_key_len;
832 sa_ctx->xf[idx].b.cipher.op =
833 RTE_CRYPTO_CIPHER_OP_DECRYPT;
834 sa_ctx->xf[idx].b.next = NULL;
835 sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
836 sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
837
838 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
839 sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
840 sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
841 sa_ctx->xf[idx].a.auth.key.length =
842 sa->auth_key_len;
843 sa_ctx->xf[idx].a.auth.digest_length =
844 sa->digest_len;
845 sa_ctx->xf[idx].a.auth.op =
846 RTE_CRYPTO_AUTH_OP_VERIFY;
847 } else { /* outbound */
848 sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
849 sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
850 sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
851 sa_ctx->xf[idx].a.cipher.key.length =
852 sa->cipher_key_len;
853 sa_ctx->xf[idx].a.cipher.op =
854 RTE_CRYPTO_CIPHER_OP_ENCRYPT;
855 sa_ctx->xf[idx].a.next = NULL;
856 sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
857 sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
858
859 sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
860 sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
861 sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
862 sa_ctx->xf[idx].b.auth.key.length =
863 sa->auth_key_len;
864 sa_ctx->xf[idx].b.auth.digest_length =
865 sa->digest_len;
866 sa_ctx->xf[idx].b.auth.op =
867 RTE_CRYPTO_AUTH_OP_GENERATE;
868 }
869
870 sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
871 sa_ctx->xf[idx].b.next = NULL;
872 sa->xforms = &sa_ctx->xf[idx].a;
873
874 print_one_sa_rule(sa, inbound);
875 }
876 }
877
878 return 0;
879 }
880
881 static inline int
882 sa_out_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
883 uint32_t nb_entries)
884 {
885 return sa_add_rules(sa_ctx, entries, nb_entries, 0);
886 }
887
888 static inline int
889 sa_in_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
890 uint32_t nb_entries)
891 {
892 return sa_add_rules(sa_ctx, entries, nb_entries, 1);
893 }
894
895 void
896 sa_init(struct socket_ctx *ctx, int32_t socket_id)
897 {
898 const char *name;
899
900 if (ctx == NULL)
901 rte_exit(EXIT_FAILURE, "NULL context.\n");
902
903 if (ctx->sa_in != NULL)
904 rte_exit(EXIT_FAILURE, "Inbound SA DB for socket %u already "
905 "initialized\n", socket_id);
906
907 if (ctx->sa_out != NULL)
908 rte_exit(EXIT_FAILURE, "Outbound SA DB for socket %u already "
909 "initialized\n", socket_id);
910
911 if (nb_sa_in > 0) {
912 name = "sa_in";
913 ctx->sa_in = sa_create(name, socket_id);
914 if (ctx->sa_in == NULL)
915 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
916 "context %s in socket %d\n", rte_errno,
917 name, socket_id);
918
919 sa_in_add_rules(ctx->sa_in, sa_in, nb_sa_in);
920 } else
921 RTE_LOG(WARNING, IPSEC, "No SA Inbound rule specified\n");
922
923 if (nb_sa_out > 0) {
924 name = "sa_out";
925 ctx->sa_out = sa_create(name, socket_id);
926 if (ctx->sa_out == NULL)
927 rte_exit(EXIT_FAILURE, "Error [%d] creating SA "
928 "context %s in socket %d\n", rte_errno,
929 name, socket_id);
930
931 sa_out_add_rules(ctx->sa_out, sa_out, nb_sa_out);
932 } else
933 RTE_LOG(WARNING, IPSEC, "No SA Outbound rule "
934 "specified\n");
935 }
936
937 int
938 inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
939 {
940 struct ipsec_mbuf_metadata *priv;
941
942 priv = get_priv(m);
943
944 return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
945 }
946
947 static inline void
948 single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
949 struct ipsec_sa **sa_ret)
950 {
951 struct esp_hdr *esp;
952 struct ip *ip;
953 uint32_t *src4_addr;
954 uint8_t *src6_addr;
955 struct ipsec_sa *sa;
956
957 *sa_ret = NULL;
958
959 ip = rte_pktmbuf_mtod(pkt, struct ip *);
960 if (ip->ip_v == IPVERSION)
961 esp = (struct esp_hdr *)(ip + 1);
962 else
963 esp = (struct esp_hdr *)(((struct ip6_hdr *)ip) + 1);
964
965 if (esp->spi == INVALID_SPI)
966 return;
967
968 sa = &sadb[SPI2IDX(rte_be_to_cpu_32(esp->spi))];
969 if (rte_be_to_cpu_32(esp->spi) != sa->spi)
970 return;
971
972 switch (sa->flags) {
973 case IP4_TUNNEL:
974 src4_addr = RTE_PTR_ADD(ip, offsetof(struct ip, ip_src));
975 if ((ip->ip_v == IPVERSION) &&
976 (sa->src.ip.ip4 == *src4_addr) &&
977 (sa->dst.ip.ip4 == *(src4_addr + 1)))
978 *sa_ret = sa;
979 break;
980 case IP6_TUNNEL:
981 src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
982 if ((ip->ip_v == IP6_VERSION) &&
983 !memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
984 !memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
985 *sa_ret = sa;
986 break;
987 case TRANSPORT:
988 *sa_ret = sa;
989 }
990 }
991
992 void
993 inbound_sa_lookup(struct sa_ctx *sa_ctx, struct rte_mbuf *pkts[],
994 struct ipsec_sa *sa[], uint16_t nb_pkts)
995 {
996 uint32_t i;
997
998 for (i = 0; i < nb_pkts; i++)
999 single_inbound_lookup(sa_ctx->sa, pkts[i], &sa[i]);
1000 }
1001
1002 void
1003 outbound_sa_lookup(struct sa_ctx *sa_ctx, uint32_t sa_idx[],
1004 struct ipsec_sa *sa[], uint16_t nb_pkts)
1005 {
1006 uint32_t i;
1007
1008 for (i = 0; i < nb_pkts; i++)
1009 sa[i] = &sa_ctx->sa[sa_idx[i]];
1010 }