]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2020 Intel Corporation | |
3 | */ | |
4 | ||
5 | #include <stdio.h> | |
6 | #include <rte_ip.h> | |
7 | #include <rte_malloc.h> | |
8 | #include <rte_ring.h> | |
9 | #include <rte_mbuf.h> | |
10 | #include <rte_cycles.h> | |
11 | #include <rte_ipsec.h> | |
12 | #include <rte_random.h> | |
13 | ||
14 | #include "test.h" | |
15 | #include "test_cryptodev.h" | |
16 | ||
17 | #define RING_SIZE 4096 | |
18 | #define BURST_SIZE 64 | |
19 | #define NUM_MBUF 4095 | |
20 | #define DEFAULT_SPI 7 | |
21 | ||
22 | struct ipsec_test_cfg { | |
23 | uint32_t replay_win_sz; | |
24 | uint32_t esn; | |
25 | uint64_t flags; | |
26 | enum rte_crypto_sym_xform_type type; | |
27 | }; | |
28 | ||
29 | struct rte_mempool *mbuf_pool, *cop_pool; | |
30 | ||
31 | struct stats_counter { | |
32 | uint64_t nb_prepare_call; | |
33 | uint64_t nb_prepare_pkt; | |
34 | uint64_t nb_process_call; | |
35 | uint64_t nb_process_pkt; | |
36 | uint64_t prepare_ticks_elapsed; | |
37 | uint64_t process_ticks_elapsed; | |
38 | }; | |
39 | ||
40 | struct ipsec_sa { | |
41 | struct rte_ipsec_session ss[2]; | |
42 | struct rte_ipsec_sa_prm sa_prm; | |
43 | struct rte_security_ipsec_xform ipsec_xform; | |
44 | struct rte_crypto_sym_xform cipher_xform; | |
45 | struct rte_crypto_sym_xform auth_xform; | |
46 | struct rte_crypto_sym_xform aead_xform; | |
47 | struct rte_crypto_sym_xform *crypto_xforms; | |
48 | struct rte_crypto_op *cop[BURST_SIZE]; | |
49 | enum rte_crypto_sym_xform_type type; | |
50 | struct stats_counter cnt; | |
51 | uint32_t replay_win_sz; | |
52 | uint32_t sa_flags; | |
53 | }; | |
54 | ||
55 | static const struct ipsec_test_cfg test_cfg[] = { | |
56 | {0, 0, 0, RTE_CRYPTO_SYM_XFORM_AEAD}, | |
57 | {0, 0, 0, RTE_CRYPTO_SYM_XFORM_CIPHER}, | |
58 | {128, 1, 0, RTE_CRYPTO_SYM_XFORM_AEAD}, | |
59 | {128, 1, 0, RTE_CRYPTO_SYM_XFORM_CIPHER}, | |
60 | ||
61 | }; | |
62 | ||
63 | static struct rte_ipv4_hdr ipv4_outer = { | |
64 | .version_ihl = IPVERSION << 4 | | |
65 | sizeof(ipv4_outer) / RTE_IPV4_IHL_MULTIPLIER, | |
66 | .time_to_live = IPDEFTTL, | |
67 | .next_proto_id = IPPROTO_ESP, | |
68 | .src_addr = RTE_IPV4(192, 168, 1, 100), | |
69 | .dst_addr = RTE_IPV4(192, 168, 2, 100), | |
70 | }; | |
71 | ||
72 | static struct rte_ring *ring_inb_prepare; | |
73 | static struct rte_ring *ring_inb_process; | |
74 | static struct rte_ring *ring_outb_prepare; | |
75 | static struct rte_ring *ring_outb_process; | |
76 | ||
77 | struct supported_cipher_algo { | |
78 | const char *keyword; | |
79 | enum rte_crypto_cipher_algorithm algo; | |
80 | uint16_t iv_len; | |
81 | uint16_t block_size; | |
82 | uint16_t key_len; | |
83 | }; | |
84 | ||
85 | struct supported_auth_algo { | |
86 | const char *keyword; | |
87 | enum rte_crypto_auth_algorithm algo; | |
88 | uint16_t digest_len; | |
89 | uint16_t key_len; | |
90 | uint8_t key_not_req; | |
91 | }; | |
92 | ||
93 | struct supported_aead_algo { | |
94 | const char *keyword; | |
95 | enum rte_crypto_aead_algorithm algo; | |
96 | uint16_t iv_len; | |
97 | uint16_t block_size; | |
98 | uint16_t digest_len; | |
99 | uint16_t key_len; | |
100 | uint8_t aad_len; | |
101 | }; | |
102 | ||
103 | const struct supported_cipher_algo cipher_algo[] = { | |
104 | { | |
105 | .keyword = "aes-128-cbc", | |
106 | .algo = RTE_CRYPTO_CIPHER_AES_CBC, | |
107 | .iv_len = 16, | |
108 | .block_size = 16, | |
109 | .key_len = 16 | |
110 | } | |
111 | }; | |
112 | ||
113 | const struct supported_auth_algo auth_algo[] = { | |
114 | { | |
115 | .keyword = "sha1-hmac", | |
116 | .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, | |
117 | .digest_len = 12, | |
118 | .key_len = 20 | |
119 | } | |
120 | }; | |
121 | ||
122 | const struct supported_aead_algo aead_algo[] = { | |
123 | { | |
124 | .keyword = "aes-128-gcm", | |
125 | .algo = RTE_CRYPTO_AEAD_AES_GCM, | |
126 | .iv_len = 8, | |
127 | .block_size = 4, | |
128 | .key_len = 20, | |
129 | .digest_len = 16, | |
130 | .aad_len = 8, | |
131 | } | |
132 | }; | |
133 | ||
134 | static struct rte_mbuf *generate_mbuf_data(struct rte_mempool *mpool) | |
135 | { | |
136 | struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mpool); | |
137 | ||
138 | if (mbuf) { | |
139 | mbuf->data_len = 64; | |
140 | mbuf->pkt_len = 64; | |
141 | } | |
142 | ||
143 | return mbuf; | |
144 | } | |
145 | ||
146 | static int | |
147 | fill_ipsec_param(struct ipsec_sa *sa) | |
148 | { | |
149 | struct rte_ipsec_sa_prm *prm = &sa->sa_prm; | |
150 | ||
151 | memset(prm, 0, sizeof(*prm)); | |
152 | ||
153 | prm->flags = sa->sa_flags; | |
154 | ||
155 | /* setup ipsec xform */ | |
156 | prm->ipsec_xform = sa->ipsec_xform; | |
157 | prm->ipsec_xform.salt = (uint32_t)rte_rand(); | |
158 | prm->ipsec_xform.replay_win_sz = sa->replay_win_sz; | |
159 | ||
160 | /* setup tunnel related fields */ | |
161 | prm->tun.hdr_len = sizeof(ipv4_outer); | |
162 | prm->tun.next_proto = IPPROTO_IPIP; | |
163 | prm->tun.hdr = &ipv4_outer; | |
164 | ||
165 | if (sa->type == RTE_CRYPTO_SYM_XFORM_AEAD) { | |
166 | sa->aead_xform.type = sa->type; | |
167 | sa->aead_xform.aead.algo = aead_algo->algo; | |
168 | sa->aead_xform.next = NULL; | |
169 | sa->aead_xform.aead.digest_length = aead_algo->digest_len; | |
170 | sa->aead_xform.aead.iv.offset = IV_OFFSET; | |
171 | sa->aead_xform.aead.iv.length = 12; | |
172 | ||
173 | if (sa->ipsec_xform.direction == | |
174 | RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { | |
175 | sa->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; | |
176 | } else { | |
177 | sa->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; | |
178 | } | |
179 | ||
180 | sa->crypto_xforms = &sa->aead_xform; | |
181 | } else { | |
182 | sa->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; | |
183 | sa->cipher_xform.cipher.algo = cipher_algo->algo; | |
184 | sa->cipher_xform.cipher.iv.offset = IV_OFFSET; | |
185 | sa->cipher_xform.cipher.iv.length = 12; | |
186 | sa->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH; | |
187 | sa->auth_xform.auth.algo = auth_algo->algo; | |
188 | sa->auth_xform.auth.digest_length = auth_algo->digest_len; | |
189 | ||
190 | ||
191 | if (sa->ipsec_xform.direction == | |
192 | RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { | |
193 | sa->cipher_xform.cipher.op = | |
194 | RTE_CRYPTO_CIPHER_OP_DECRYPT; | |
195 | sa->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; | |
196 | sa->cipher_xform.next = NULL; | |
197 | sa->auth_xform.next = &sa->cipher_xform; | |
198 | sa->crypto_xforms = &sa->auth_xform; | |
199 | } else { | |
200 | sa->cipher_xform.cipher.op = | |
201 | RTE_CRYPTO_CIPHER_OP_ENCRYPT; | |
202 | sa->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; | |
203 | sa->auth_xform.next = NULL; | |
204 | sa->cipher_xform.next = &sa->auth_xform; | |
205 | sa->crypto_xforms = &sa->cipher_xform; | |
206 | } | |
207 | } | |
208 | ||
209 | prm->crypto_xform = sa->crypto_xforms; | |
210 | ||
211 | return TEST_SUCCESS; | |
212 | } | |
213 | ||
214 | static int | |
215 | create_sa(enum rte_security_session_action_type action_type, | |
216 | struct ipsec_sa *sa) | |
217 | { | |
218 | static struct rte_cryptodev_sym_session dummy_ses; | |
219 | size_t sz; | |
220 | int rc; | |
221 | ||
222 | memset(&sa->ss[0], 0, sizeof(sa->ss[0])); | |
223 | ||
224 | rc = fill_ipsec_param(sa); | |
225 | if (rc != 0) { | |
226 | printf("failed to fill ipsec param\n"); | |
227 | return TEST_FAILED; | |
228 | } | |
229 | ||
230 | sz = rte_ipsec_sa_size(&sa->sa_prm); | |
231 | TEST_ASSERT(sz > 0, "rte_ipsec_sa_size() failed\n"); | |
232 | ||
233 | sa->ss[0].sa = rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE); | |
234 | TEST_ASSERT_NOT_NULL(sa->ss[0].sa, | |
235 | "failed to allocate memory for rte_ipsec_sa\n"); | |
236 | ||
237 | sa->ss[0].type = action_type; | |
238 | sa->ss[0].crypto.ses = &dummy_ses; | |
239 | ||
240 | rc = rte_ipsec_sa_init(sa->ss[0].sa, &sa->sa_prm, sz); | |
241 | rc = (rc > 0 && (uint32_t)rc <= sz) ? 0 : -EINVAL; | |
242 | ||
243 | if (rc == 0) | |
244 | rc = rte_ipsec_session_prepare(&sa->ss[0]); | |
245 | else | |
246 | return TEST_FAILED; | |
247 | ||
248 | return TEST_SUCCESS; | |
249 | } | |
250 | ||
251 | static int | |
252 | packet_prepare(struct rte_mbuf **buf, struct ipsec_sa *sa, | |
253 | uint16_t num_pkts) | |
254 | { | |
255 | uint64_t time_stamp; | |
256 | uint16_t k = 0, i; | |
257 | ||
258 | for (i = 0; i < num_pkts; i++) { | |
259 | ||
260 | sa->cop[i] = rte_crypto_op_alloc(cop_pool, | |
261 | RTE_CRYPTO_OP_TYPE_SYMMETRIC); | |
262 | ||
263 | if (sa->cop[i] == NULL) { | |
264 | ||
265 | RTE_LOG(ERR, USER1, | |
266 | "Failed to allocate symmetric crypto op\n"); | |
267 | ||
268 | return k; | |
269 | } | |
270 | } | |
271 | ||
272 | time_stamp = rte_rdtsc_precise(); | |
273 | ||
274 | k = rte_ipsec_pkt_crypto_prepare(&sa->ss[0], buf, | |
275 | sa->cop, num_pkts); | |
276 | ||
277 | time_stamp = rte_rdtsc_precise() - time_stamp; | |
278 | ||
279 | if (k != num_pkts) { | |
280 | RTE_LOG(ERR, USER1, "rte_ipsec_pkt_crypto_prepare fail\n"); | |
281 | return k; | |
282 | } | |
283 | ||
284 | sa->cnt.prepare_ticks_elapsed += time_stamp; | |
285 | sa->cnt.nb_prepare_call++; | |
286 | sa->cnt.nb_prepare_pkt += k; | |
287 | ||
288 | for (i = 0; i < num_pkts; i++) | |
289 | rte_crypto_op_free(sa->cop[i]); | |
290 | ||
291 | return k; | |
292 | } | |
293 | ||
294 | static int | |
295 | packet_process(struct rte_mbuf **buf, struct ipsec_sa *sa, | |
296 | uint16_t num_pkts) | |
297 | { | |
298 | uint64_t time_stamp; | |
299 | uint16_t k = 0; | |
300 | ||
301 | time_stamp = rte_rdtsc_precise(); | |
302 | ||
303 | k = rte_ipsec_pkt_process(&sa->ss[0], buf, num_pkts); | |
304 | ||
305 | time_stamp = rte_rdtsc_precise() - time_stamp; | |
306 | ||
307 | if (k != num_pkts) { | |
308 | RTE_LOG(ERR, USER1, "rte_ipsec_pkt_process fail\n"); | |
309 | return k; | |
310 | } | |
311 | ||
312 | sa->cnt.process_ticks_elapsed += time_stamp; | |
313 | sa->cnt.nb_process_call++; | |
314 | sa->cnt.nb_process_pkt += k; | |
315 | ||
316 | return k; | |
317 | } | |
318 | ||
319 | static int | |
320 | create_traffic(struct ipsec_sa *sa, struct rte_ring *deq_ring, | |
321 | struct rte_ring *enq_ring, struct rte_ring *ring) | |
322 | { | |
323 | struct rte_mbuf *mbuf[BURST_SIZE]; | |
324 | uint16_t num_pkts, n; | |
325 | ||
326 | while (rte_ring_empty(deq_ring) == 0) { | |
327 | ||
328 | num_pkts = rte_ring_sc_dequeue_burst(deq_ring, (void **)mbuf, | |
329 | RTE_DIM(mbuf), NULL); | |
330 | ||
331 | if (num_pkts == 0) | |
332 | return TEST_FAILED; | |
333 | ||
334 | n = packet_prepare(mbuf, sa, num_pkts); | |
335 | if (n != num_pkts) | |
336 | return TEST_FAILED; | |
337 | ||
338 | num_pkts = rte_ring_sp_enqueue_burst(enq_ring, (void **)mbuf, | |
339 | num_pkts, NULL); | |
340 | if (num_pkts == 0) | |
341 | return TEST_FAILED; | |
342 | } | |
343 | ||
344 | deq_ring = enq_ring; | |
345 | enq_ring = ring; | |
346 | ||
347 | while (rte_ring_empty(deq_ring) == 0) { | |
348 | ||
349 | num_pkts = rte_ring_sc_dequeue_burst(deq_ring, (void **)mbuf, | |
350 | RTE_DIM(mbuf), NULL); | |
351 | if (num_pkts == 0) | |
352 | return TEST_FAILED; | |
353 | ||
354 | n = packet_process(mbuf, sa, num_pkts); | |
355 | if (n != num_pkts) | |
356 | return TEST_FAILED; | |
357 | ||
358 | num_pkts = rte_ring_sp_enqueue_burst(enq_ring, (void **)mbuf, | |
359 | num_pkts, NULL); | |
360 | if (num_pkts == 0) | |
361 | return TEST_FAILED; | |
362 | } | |
363 | ||
364 | return TEST_SUCCESS; | |
365 | } | |
366 | ||
367 | static void | |
368 | fill_ipsec_sa_out(const struct ipsec_test_cfg *test_cfg, | |
369 | struct ipsec_sa *sa) | |
370 | { | |
371 | sa->ipsec_xform.spi = DEFAULT_SPI; | |
372 | sa->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS; | |
373 | sa->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP; | |
374 | sa->ipsec_xform.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; | |
375 | sa->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; | |
376 | sa->ipsec_xform.options.esn = test_cfg->esn; | |
377 | sa->type = test_cfg->type; | |
378 | sa->replay_win_sz = test_cfg->replay_win_sz; | |
379 | sa->sa_flags = test_cfg->flags; | |
380 | sa->cnt.nb_prepare_call = 0; | |
381 | sa->cnt.nb_prepare_pkt = 0; | |
382 | sa->cnt.nb_process_call = 0; | |
383 | sa->cnt.nb_process_pkt = 0; | |
384 | sa->cnt.process_ticks_elapsed = 0; | |
385 | sa->cnt.prepare_ticks_elapsed = 0; | |
386 | ||
387 | } | |
388 | ||
389 | static void | |
390 | fill_ipsec_sa_in(const struct ipsec_test_cfg *test_cfg, | |
391 | struct ipsec_sa *sa) | |
392 | { | |
393 | sa->ipsec_xform.spi = DEFAULT_SPI; | |
394 | sa->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS; | |
395 | sa->ipsec_xform.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP; | |
396 | sa->ipsec_xform.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL; | |
397 | sa->ipsec_xform.tunnel.type = RTE_SECURITY_IPSEC_TUNNEL_IPV4; | |
398 | sa->ipsec_xform.options.esn = test_cfg->esn; | |
399 | sa->type = test_cfg->type; | |
400 | sa->replay_win_sz = test_cfg->replay_win_sz; | |
401 | sa->sa_flags = test_cfg->flags; | |
402 | sa->cnt.nb_prepare_call = 0; | |
403 | sa->cnt.nb_prepare_pkt = 0; | |
404 | sa->cnt.nb_process_call = 0; | |
405 | sa->cnt.nb_process_pkt = 0; | |
406 | sa->cnt.process_ticks_elapsed = 0; | |
407 | sa->cnt.prepare_ticks_elapsed = 0; | |
408 | } | |
409 | ||
410 | static int | |
411 | init_sa_session(const struct ipsec_test_cfg *test_cfg, | |
412 | struct ipsec_sa *sa_out, struct ipsec_sa *sa_in) | |
413 | { | |
414 | ||
415 | int rc; | |
416 | ||
417 | fill_ipsec_sa_in(test_cfg, sa_in); | |
418 | fill_ipsec_sa_out(test_cfg, sa_out); | |
419 | ||
420 | rc = create_sa(RTE_SECURITY_ACTION_TYPE_NONE, sa_out); | |
421 | if (rc != 0) { | |
422 | RTE_LOG(ERR, USER1, "out bound create_sa failed, cfg\n"); | |
423 | return TEST_FAILED; | |
424 | } | |
425 | ||
426 | rc = create_sa(RTE_SECURITY_ACTION_TYPE_NONE, sa_in); | |
427 | if (rc != 0) { | |
428 | RTE_LOG(ERR, USER1, "out bound create_sa failed, cfg\n"); | |
429 | return TEST_FAILED; | |
430 | } | |
431 | ||
432 | return TEST_SUCCESS; | |
433 | } | |
434 | ||
435 | static int | |
436 | testsuite_setup(void) | |
437 | { | |
438 | struct rte_mbuf *mbuf; | |
439 | int i; | |
440 | ||
441 | mbuf_pool = rte_pktmbuf_pool_create("IPSEC_PERF_MBUFPOOL", | |
442 | NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE, | |
443 | rte_socket_id()); | |
444 | if (mbuf_pool == NULL) { | |
445 | RTE_LOG(ERR, USER1, "Can't create MBUFPOOL\n"); | |
446 | return TEST_FAILED; | |
447 | } | |
448 | ||
449 | cop_pool = rte_crypto_op_pool_create( | |
450 | "MBUF_CRYPTO_SYM_OP_POOL", | |
451 | RTE_CRYPTO_OP_TYPE_SYMMETRIC, | |
452 | NUM_MBUFS, MBUF_CACHE_SIZE, | |
453 | DEFAULT_NUM_XFORMS * | |
454 | sizeof(struct rte_crypto_sym_xform) + | |
455 | MAXIMUM_IV_LENGTH, | |
456 | rte_socket_id()); | |
457 | if (cop_pool == NULL) { | |
458 | RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n"); | |
459 | return TEST_FAILED; | |
460 | } | |
461 | ||
462 | ring_inb_prepare = rte_ring_create("ipsec_test_ring_inb_prepare", | |
463 | RING_SIZE, SOCKET_ID_ANY, 0); | |
464 | if (ring_inb_prepare == NULL) | |
465 | return TEST_FAILED; | |
466 | ||
467 | ring_inb_process = rte_ring_create("ipsec_test_ring_inb_process", | |
468 | RING_SIZE, SOCKET_ID_ANY, 0); | |
469 | if (ring_inb_process == NULL) | |
470 | return TEST_FAILED; | |
471 | ||
472 | ring_outb_prepare = rte_ring_create("ipsec_test_ring_outb_prepare", | |
473 | RING_SIZE, SOCKET_ID_ANY, 0); | |
474 | if (ring_outb_prepare == NULL) | |
475 | return TEST_FAILED; | |
476 | ||
477 | ring_outb_process = rte_ring_create("ipsec_test_ring_outb_process", | |
478 | RING_SIZE, SOCKET_ID_ANY, 0); | |
479 | if (ring_outb_process == NULL) | |
480 | return TEST_FAILED; | |
481 | ||
482 | for (i = 0; i < NUM_MBUF; i++) { | |
483 | mbuf = generate_mbuf_data(mbuf_pool); | |
484 | ||
485 | if (mbuf && rte_ring_sp_enqueue_bulk(ring_inb_prepare, | |
486 | (void **)&mbuf, 1, NULL)) | |
487 | continue; | |
488 | else | |
489 | return TEST_FAILED; | |
490 | } | |
491 | ||
492 | return TEST_SUCCESS; | |
493 | } | |
494 | ||
495 | static int | |
496 | measure_performance(struct ipsec_sa *sa_out, struct ipsec_sa *sa_in) | |
497 | { | |
498 | uint64_t time_diff = 0; | |
499 | uint64_t begin = 0; | |
500 | uint64_t hz = rte_get_timer_hz(); | |
501 | ||
502 | begin = rte_get_timer_cycles(); | |
503 | ||
504 | do { | |
505 | if (create_traffic(sa_out, ring_inb_prepare, ring_inb_process, | |
506 | ring_outb_prepare) < 0) | |
507 | return TEST_FAILED; | |
508 | ||
509 | if (create_traffic(sa_in, ring_outb_prepare, ring_outb_process, | |
510 | ring_inb_prepare) < 0) | |
511 | return TEST_FAILED; | |
512 | ||
513 | time_diff = rte_get_timer_cycles() - begin; | |
514 | ||
515 | } while (time_diff < (hz * 10)); | |
516 | ||
517 | return TEST_SUCCESS; | |
518 | } | |
519 | ||
520 | static void | |
521 | print_metrics(const struct ipsec_test_cfg *test_cfg, | |
522 | struct ipsec_sa *sa_out, struct ipsec_sa *sa_in) | |
523 | { | |
524 | printf("\nMetrics of libipsec prepare/process api:\n"); | |
525 | ||
526 | printf("replay window size = %u\n", test_cfg->replay_win_sz); | |
527 | if (test_cfg->esn) | |
528 | printf("replay esn is enabled\n"); | |
529 | else | |
530 | printf("replay esn is disabled\n"); | |
531 | if (test_cfg->type == RTE_CRYPTO_SYM_XFORM_AEAD) | |
532 | printf("AEAD algo is AES_GCM\n"); | |
533 | else | |
534 | printf("CIPHER/AUTH algo is AES_CBC/SHA1\n"); | |
535 | ||
536 | ||
537 | printf("avg cycles for a pkt prepare in outbound is = %.2Lf\n", | |
538 | (long double)sa_out->cnt.prepare_ticks_elapsed | |
539 | / sa_out->cnt.nb_prepare_pkt); | |
540 | printf("avg cycles for a pkt process in outbound is = %.2Lf\n", | |
541 | (long double)sa_out->cnt.process_ticks_elapsed | |
542 | / sa_out->cnt.nb_process_pkt); | |
543 | printf("avg cycles for a pkt prepare in inbound is = %.2Lf\n", | |
544 | (long double)sa_in->cnt.prepare_ticks_elapsed | |
545 | / sa_in->cnt.nb_prepare_pkt); | |
546 | printf("avg cycles for a pkt process in inbound is = %.2Lf\n", | |
547 | (long double)sa_in->cnt.process_ticks_elapsed | |
548 | / sa_in->cnt.nb_process_pkt); | |
549 | ||
550 | } | |
551 | ||
552 | static void | |
553 | testsuite_teardown(void) | |
554 | { | |
555 | if (mbuf_pool != NULL) { | |
556 | RTE_LOG(DEBUG, USER1, "MBUFPOOL count %u\n", | |
557 | rte_mempool_avail_count(mbuf_pool)); | |
558 | rte_mempool_free(mbuf_pool); | |
559 | mbuf_pool = NULL; | |
560 | } | |
561 | ||
562 | if (cop_pool != NULL) { | |
563 | RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n", | |
564 | rte_mempool_avail_count(cop_pool)); | |
565 | rte_mempool_free(cop_pool); | |
566 | cop_pool = NULL; | |
567 | } | |
568 | ||
569 | rte_ring_free(ring_inb_prepare); | |
570 | rte_ring_free(ring_inb_process); | |
571 | rte_ring_free(ring_outb_prepare); | |
572 | rte_ring_free(ring_outb_process); | |
573 | ||
574 | ring_inb_prepare = NULL; | |
575 | ring_inb_process = NULL; | |
576 | ring_outb_prepare = NULL; | |
577 | ring_outb_process = NULL; | |
578 | } | |
579 | ||
580 | static int | |
581 | test_libipsec_perf(void) | |
582 | { | |
583 | struct ipsec_sa sa_out; | |
584 | struct ipsec_sa sa_in; | |
585 | uint32_t i; | |
586 | int ret; | |
587 | ||
588 | if (testsuite_setup() < 0) { | |
589 | testsuite_teardown(); | |
590 | return TEST_FAILED; | |
591 | } | |
592 | ||
593 | for (i = 0; i < RTE_DIM(test_cfg) ; i++) { | |
594 | ||
595 | ret = init_sa_session(&test_cfg[i], &sa_out, &sa_in); | |
596 | if (ret != 0) { | |
597 | testsuite_teardown(); | |
598 | return TEST_FAILED; | |
599 | } | |
600 | ||
601 | if (measure_performance(&sa_out, &sa_in) < 0) { | |
602 | testsuite_teardown(); | |
603 | return TEST_FAILED; | |
604 | } | |
605 | ||
606 | print_metrics(&test_cfg[i], &sa_out, &sa_in); | |
607 | } | |
608 | ||
609 | testsuite_teardown(); | |
610 | ||
611 | return TEST_SUCCESS; | |
612 | } | |
613 | ||
614 | REGISTER_TEST_COMMAND(ipsec_perf_autotest, test_libipsec_perf); |