]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/test/test/test_compressdev.c
update download target update for octopus release
[ceph.git] / ceph / src / spdk / dpdk / test / test / test_compressdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4 #include <string.h>
5 #include <zlib.h>
6 #include <math.h>
7
8 #include <rte_cycles.h>
9 #include <rte_malloc.h>
10 #include <rte_mempool.h>
11 #include <rte_mbuf.h>
12 #include <rte_compressdev.h>
13
14 #include "test_compressdev_test_buffer.h"
15 #include "test.h"
16
17 #define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
18
19 #define DEFAULT_WINDOW_SIZE 15
20 #define DEFAULT_MEM_LEVEL 8
21 #define MAX_DEQD_RETRIES 10
22 #define DEQUEUE_WAIT_TIME 10000
23
24 /*
25 * 30% extra size for compressed data compared to original data,
26 * in case data size cannot be reduced and it is actually bigger
27 * due to the compress block headers
28 */
29 #define COMPRESS_BUF_SIZE_RATIO 1.3
30 #define NUM_LARGE_MBUFS 16
31 #define SMALL_SEG_SIZE 256
32 #define MAX_SEGS 16
33 #define NUM_OPS 16
34 #define NUM_MAX_XFORMS 16
35 #define NUM_MAX_INFLIGHT_OPS 128
36 #define CACHE_SIZE 0
37
38 const char *
39 huffman_type_strings[] = {
40 [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
41 [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
42 [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
43 };
44
45 enum zlib_direction {
46 ZLIB_NONE,
47 ZLIB_COMPRESS,
48 ZLIB_DECOMPRESS,
49 ZLIB_ALL
50 };
51
52 struct priv_op_data {
53 uint16_t orig_idx;
54 };
55
56 struct comp_testsuite_params {
57 struct rte_mempool *large_mbuf_pool;
58 struct rte_mempool *small_mbuf_pool;
59 struct rte_mempool *op_pool;
60 struct rte_comp_xform *def_comp_xform;
61 struct rte_comp_xform *def_decomp_xform;
62 };
63
64 static struct comp_testsuite_params testsuite_params = { 0 };
65
66 static void
67 testsuite_teardown(void)
68 {
69 struct comp_testsuite_params *ts_params = &testsuite_params;
70
71 rte_mempool_free(ts_params->large_mbuf_pool);
72 rte_mempool_free(ts_params->small_mbuf_pool);
73 rte_mempool_free(ts_params->op_pool);
74 rte_free(ts_params->def_comp_xform);
75 rte_free(ts_params->def_decomp_xform);
76 }
77
78 static int
79 testsuite_setup(void)
80 {
81 struct comp_testsuite_params *ts_params = &testsuite_params;
82 uint32_t max_buf_size = 0;
83 unsigned int i;
84
85 if (rte_compressdev_count() == 0) {
86 RTE_LOG(ERR, USER1, "Need at least one compress device\n");
87 return TEST_FAILED;
88 }
89
90 RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
91 rte_compressdev_name_get(0));
92
93 for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
94 max_buf_size = RTE_MAX(max_buf_size,
95 strlen(compress_test_bufs[i]) + 1);
96
97 /*
98 * Buffers to be used in compression and decompression.
99 * Since decompressed data might be larger than
100 * compressed data (due to block header),
101 * buffers should be big enough for both cases.
102 */
103 max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
104 ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
105 NUM_LARGE_MBUFS,
106 CACHE_SIZE, 0,
107 max_buf_size + RTE_PKTMBUF_HEADROOM,
108 rte_socket_id());
109 if (ts_params->large_mbuf_pool == NULL) {
110 RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
111 return TEST_FAILED;
112 }
113
114 /* Create mempool with smaller buffers for SGL testing */
115 ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
116 NUM_LARGE_MBUFS * MAX_SEGS,
117 CACHE_SIZE, 0,
118 SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
119 rte_socket_id());
120 if (ts_params->small_mbuf_pool == NULL) {
121 RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
122 goto exit;
123 }
124
125 ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
126 0, sizeof(struct priv_op_data),
127 rte_socket_id());
128 if (ts_params->op_pool == NULL) {
129 RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
130 goto exit;
131 }
132
133 ts_params->def_comp_xform =
134 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
135 if (ts_params->def_comp_xform == NULL) {
136 RTE_LOG(ERR, USER1,
137 "Default compress xform could not be created\n");
138 goto exit;
139 }
140 ts_params->def_decomp_xform =
141 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
142 if (ts_params->def_decomp_xform == NULL) {
143 RTE_LOG(ERR, USER1,
144 "Default decompress xform could not be created\n");
145 goto exit;
146 }
147
148 /* Initializes default values for compress/decompress xforms */
149 ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
150 ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
151 ts_params->def_comp_xform->compress.deflate.huffman =
152 RTE_COMP_HUFFMAN_DEFAULT;
153 ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
154 ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
155 ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
156
157 ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
158 ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
159 ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
160 ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
161
162 return TEST_SUCCESS;
163
164 exit:
165 testsuite_teardown();
166
167 return TEST_FAILED;
168 }
169
170 static int
171 generic_ut_setup(void)
172 {
173 /* Configure compressdev (one device, one queue pair) */
174 struct rte_compressdev_config config = {
175 .socket_id = rte_socket_id(),
176 .nb_queue_pairs = 1,
177 .max_nb_priv_xforms = NUM_MAX_XFORMS,
178 .max_nb_streams = 0
179 };
180
181 if (rte_compressdev_configure(0, &config) < 0) {
182 RTE_LOG(ERR, USER1, "Device configuration failed\n");
183 return -1;
184 }
185
186 if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
187 rte_socket_id()) < 0) {
188 RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
189 return -1;
190 }
191
192 if (rte_compressdev_start(0) < 0) {
193 RTE_LOG(ERR, USER1, "Device could not be started\n");
194 return -1;
195 }
196
197 return 0;
198 }
199
200 static void
201 generic_ut_teardown(void)
202 {
203 rte_compressdev_stop(0);
204 if (rte_compressdev_close(0) < 0)
205 RTE_LOG(ERR, USER1, "Device could not be closed\n");
206 }
207
208 static int
209 test_compressdev_invalid_configuration(void)
210 {
211 struct rte_compressdev_config invalid_config;
212 struct rte_compressdev_config valid_config = {
213 .socket_id = rte_socket_id(),
214 .nb_queue_pairs = 1,
215 .max_nb_priv_xforms = NUM_MAX_XFORMS,
216 .max_nb_streams = 0
217 };
218 struct rte_compressdev_info dev_info;
219
220 /* Invalid configuration with 0 queue pairs */
221 memcpy(&invalid_config, &valid_config,
222 sizeof(struct rte_compressdev_config));
223 invalid_config.nb_queue_pairs = 0;
224
225 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
226 "Device configuration was successful "
227 "with no queue pairs (invalid)\n");
228
229 /*
230 * Invalid configuration with too many queue pairs
231 * (if there is an actual maximum number of queue pairs)
232 */
233 rte_compressdev_info_get(0, &dev_info);
234 if (dev_info.max_nb_queue_pairs != 0) {
235 memcpy(&invalid_config, &valid_config,
236 sizeof(struct rte_compressdev_config));
237 invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
238
239 TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
240 "Device configuration was successful "
241 "with too many queue pairs (invalid)\n");
242 }
243
244 /* Invalid queue pair setup, with no number of queue pairs set */
245 TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
246 NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
247 "Queue pair setup was successful "
248 "with no queue pairs set (invalid)\n");
249
250 return TEST_SUCCESS;
251 }
252
253 static int
254 compare_buffers(const char *buffer1, uint32_t buffer1_len,
255 const char *buffer2, uint32_t buffer2_len)
256 {
257 if (buffer1_len != buffer2_len) {
258 RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
259 return -1;
260 }
261
262 if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
263 RTE_LOG(ERR, USER1, "Buffers are different\n");
264 return -1;
265 }
266
267 return 0;
268 }
269
270 /*
271 * Maps compressdev and Zlib flush flags
272 */
273 static int
274 map_zlib_flush_flag(enum rte_comp_flush_flag flag)
275 {
276 switch (flag) {
277 case RTE_COMP_FLUSH_NONE:
278 return Z_NO_FLUSH;
279 case RTE_COMP_FLUSH_SYNC:
280 return Z_SYNC_FLUSH;
281 case RTE_COMP_FLUSH_FULL:
282 return Z_FULL_FLUSH;
283 case RTE_COMP_FLUSH_FINAL:
284 return Z_FINISH;
285 /*
286 * There should be only the values above,
287 * so this should never happen
288 */
289 default:
290 return -1;
291 }
292 }
293
294 static int
295 compress_zlib(struct rte_comp_op *op,
296 const struct rte_comp_xform *xform, int mem_level)
297 {
298 z_stream stream;
299 int zlib_flush;
300 int strategy, window_bits, comp_level;
301 int ret = TEST_FAILED;
302 uint8_t *single_src_buf = NULL;
303 uint8_t *single_dst_buf = NULL;
304
305 /* initialize zlib stream */
306 stream.zalloc = Z_NULL;
307 stream.zfree = Z_NULL;
308 stream.opaque = Z_NULL;
309
310 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
311 strategy = Z_FIXED;
312 else
313 strategy = Z_DEFAULT_STRATEGY;
314
315 /*
316 * Window bits is the base two logarithm of the window size (in bytes).
317 * When doing raw DEFLATE, this number will be negative.
318 */
319 window_bits = -(xform->compress.window_size);
320
321 comp_level = xform->compress.level;
322
323 if (comp_level != RTE_COMP_LEVEL_NONE)
324 ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
325 window_bits, mem_level, strategy);
326 else
327 ret = deflateInit(&stream, Z_NO_COMPRESSION);
328
329 if (ret != Z_OK) {
330 printf("Zlib deflate could not be initialized\n");
331 goto exit;
332 }
333
334 /* Assuming stateless operation */
335 /* SGL */
336 if (op->m_src->nb_segs > 1) {
337 single_src_buf = rte_malloc(NULL,
338 rte_pktmbuf_pkt_len(op->m_src), 0);
339 if (single_src_buf == NULL) {
340 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
341 goto exit;
342 }
343 single_dst_buf = rte_malloc(NULL,
344 rte_pktmbuf_pkt_len(op->m_dst), 0);
345 if (single_dst_buf == NULL) {
346 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
347 goto exit;
348 }
349 if (rte_pktmbuf_read(op->m_src, 0,
350 rte_pktmbuf_pkt_len(op->m_src),
351 single_src_buf) == NULL) {
352 RTE_LOG(ERR, USER1,
353 "Buffer could not be read entirely\n");
354 goto exit;
355 }
356
357 stream.avail_in = op->src.length;
358 stream.next_in = single_src_buf;
359 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
360 stream.next_out = single_dst_buf;
361
362 } else {
363 stream.avail_in = op->src.length;
364 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
365 stream.avail_out = op->m_dst->data_len;
366 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
367 }
368 /* Stateless operation, all buffer will be compressed in one go */
369 zlib_flush = map_zlib_flush_flag(op->flush_flag);
370 ret = deflate(&stream, zlib_flush);
371
372 if (stream.avail_in != 0) {
373 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
374 goto exit;
375 }
376
377 if (ret != Z_STREAM_END)
378 goto exit;
379
380 /* Copy data to destination SGL */
381 if (op->m_src->nb_segs > 1) {
382 uint32_t remaining_data = stream.total_out;
383 uint8_t *src_data = single_dst_buf;
384 struct rte_mbuf *dst_buf = op->m_dst;
385
386 while (remaining_data > 0) {
387 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
388 uint8_t *);
389 /* Last segment */
390 if (remaining_data < dst_buf->data_len) {
391 memcpy(dst_data, src_data, remaining_data);
392 remaining_data = 0;
393 } else {
394 memcpy(dst_data, src_data, dst_buf->data_len);
395 remaining_data -= dst_buf->data_len;
396 src_data += dst_buf->data_len;
397 dst_buf = dst_buf->next;
398 }
399 }
400 }
401
402 op->consumed = stream.total_in;
403 op->produced = stream.total_out;
404 op->status = RTE_COMP_OP_STATUS_SUCCESS;
405
406 deflateReset(&stream);
407
408 ret = 0;
409 exit:
410 deflateEnd(&stream);
411 rte_free(single_src_buf);
412 rte_free(single_dst_buf);
413
414 return ret;
415 }
416
417 static int
418 decompress_zlib(struct rte_comp_op *op,
419 const struct rte_comp_xform *xform)
420 {
421 z_stream stream;
422 int window_bits;
423 int zlib_flush;
424 int ret = TEST_FAILED;
425 uint8_t *single_src_buf = NULL;
426 uint8_t *single_dst_buf = NULL;
427
428 /* initialize zlib stream */
429 stream.zalloc = Z_NULL;
430 stream.zfree = Z_NULL;
431 stream.opaque = Z_NULL;
432
433 /*
434 * Window bits is the base two logarithm of the window size (in bytes).
435 * When doing raw DEFLATE, this number will be negative.
436 */
437 window_bits = -(xform->decompress.window_size);
438
439 ret = inflateInit2(&stream, window_bits);
440
441 if (ret != Z_OK) {
442 printf("Zlib deflate could not be initialized\n");
443 goto exit;
444 }
445
446 /* Assuming stateless operation */
447 /* SGL */
448 if (op->m_src->nb_segs > 1) {
449 single_src_buf = rte_malloc(NULL,
450 rte_pktmbuf_pkt_len(op->m_src), 0);
451 if (single_src_buf == NULL) {
452 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
453 goto exit;
454 }
455 single_dst_buf = rte_malloc(NULL,
456 rte_pktmbuf_pkt_len(op->m_dst), 0);
457 if (single_dst_buf == NULL) {
458 RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
459 goto exit;
460 }
461 if (rte_pktmbuf_read(op->m_src, 0,
462 rte_pktmbuf_pkt_len(op->m_src),
463 single_src_buf) == NULL) {
464 RTE_LOG(ERR, USER1,
465 "Buffer could not be read entirely\n");
466 goto exit;
467 }
468
469 stream.avail_in = op->src.length;
470 stream.next_in = single_src_buf;
471 stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
472 stream.next_out = single_dst_buf;
473
474 } else {
475 stream.avail_in = op->src.length;
476 stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
477 stream.avail_out = op->m_dst->data_len;
478 stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
479 }
480
481 /* Stateless operation, all buffer will be compressed in one go */
482 zlib_flush = map_zlib_flush_flag(op->flush_flag);
483 ret = inflate(&stream, zlib_flush);
484
485 if (stream.avail_in != 0) {
486 RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
487 goto exit;
488 }
489
490 if (ret != Z_STREAM_END)
491 goto exit;
492
493 if (op->m_src->nb_segs > 1) {
494 uint32_t remaining_data = stream.total_out;
495 uint8_t *src_data = single_dst_buf;
496 struct rte_mbuf *dst_buf = op->m_dst;
497
498 while (remaining_data > 0) {
499 uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
500 uint8_t *);
501 /* Last segment */
502 if (remaining_data < dst_buf->data_len) {
503 memcpy(dst_data, src_data, remaining_data);
504 remaining_data = 0;
505 } else {
506 memcpy(dst_data, src_data, dst_buf->data_len);
507 remaining_data -= dst_buf->data_len;
508 src_data += dst_buf->data_len;
509 dst_buf = dst_buf->next;
510 }
511 }
512 }
513
514 op->consumed = stream.total_in;
515 op->produced = stream.total_out;
516 op->status = RTE_COMP_OP_STATUS_SUCCESS;
517
518 inflateReset(&stream);
519
520 ret = 0;
521 exit:
522 inflateEnd(&stream);
523
524 return ret;
525 }
526
527 static int
528 prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
529 uint32_t total_data_size,
530 struct rte_mempool *small_mbuf_pool,
531 struct rte_mempool *large_mbuf_pool,
532 uint8_t limit_segs_in_sgl)
533 {
534 uint32_t remaining_data = total_data_size;
535 uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
536 struct rte_mempool *pool;
537 struct rte_mbuf *next_seg;
538 uint32_t data_size;
539 char *buf_ptr;
540 const char *data_ptr = test_buf;
541 uint16_t i;
542 int ret;
543
544 if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
545 num_remaining_segs = limit_segs_in_sgl - 1;
546
547 /*
548 * Allocate data in the first segment (header) and
549 * copy data if test buffer is provided
550 */
551 if (remaining_data < SMALL_SEG_SIZE)
552 data_size = remaining_data;
553 else
554 data_size = SMALL_SEG_SIZE;
555 buf_ptr = rte_pktmbuf_append(head_buf, data_size);
556 if (buf_ptr == NULL) {
557 RTE_LOG(ERR, USER1,
558 "Not enough space in the 1st buffer\n");
559 return -1;
560 }
561
562 if (data_ptr != NULL) {
563 /* Copy characters without NULL terminator */
564 strncpy(buf_ptr, data_ptr, data_size);
565 data_ptr += data_size;
566 }
567 remaining_data -= data_size;
568 num_remaining_segs--;
569
570 /*
571 * Allocate the rest of the segments,
572 * copy the rest of the data and chain the segments.
573 */
574 for (i = 0; i < num_remaining_segs; i++) {
575
576 if (i == (num_remaining_segs - 1)) {
577 /* last segment */
578 if (remaining_data > SMALL_SEG_SIZE)
579 pool = large_mbuf_pool;
580 else
581 pool = small_mbuf_pool;
582 data_size = remaining_data;
583 } else {
584 data_size = SMALL_SEG_SIZE;
585 pool = small_mbuf_pool;
586 }
587
588 next_seg = rte_pktmbuf_alloc(pool);
589 if (next_seg == NULL) {
590 RTE_LOG(ERR, USER1,
591 "New segment could not be allocated "
592 "from the mempool\n");
593 return -1;
594 }
595 buf_ptr = rte_pktmbuf_append(next_seg, data_size);
596 if (buf_ptr == NULL) {
597 RTE_LOG(ERR, USER1,
598 "Not enough space in the buffer\n");
599 rte_pktmbuf_free(next_seg);
600 return -1;
601 }
602 if (data_ptr != NULL) {
603 /* Copy characters without NULL terminator */
604 strncpy(buf_ptr, data_ptr, data_size);
605 data_ptr += data_size;
606 }
607 remaining_data -= data_size;
608
609 ret = rte_pktmbuf_chain(head_buf, next_seg);
610 if (ret != 0) {
611 rte_pktmbuf_free(next_seg);
612 RTE_LOG(ERR, USER1,
613 "Segment could not chained\n");
614 return -1;
615 }
616 }
617
618 return 0;
619 }
620
621 /*
622 * Compresses and decompresses buffer with compressdev API and Zlib API
623 */
624 static int
625 test_deflate_comp_decomp(const char * const test_bufs[],
626 unsigned int num_bufs,
627 uint16_t buf_idx[],
628 struct rte_comp_xform *compress_xforms[],
629 struct rte_comp_xform *decompress_xforms[],
630 unsigned int num_xforms,
631 enum rte_comp_op_type state,
632 unsigned int sgl,
633 enum zlib_direction zlib_dir)
634 {
635 struct comp_testsuite_params *ts_params = &testsuite_params;
636 int ret_status = -1;
637 int ret;
638 struct rte_mbuf *uncomp_bufs[num_bufs];
639 struct rte_mbuf *comp_bufs[num_bufs];
640 struct rte_comp_op *ops[num_bufs];
641 struct rte_comp_op *ops_processed[num_bufs];
642 void *priv_xforms[num_bufs];
643 uint16_t num_enqd, num_deqd, num_total_deqd;
644 uint16_t num_priv_xforms = 0;
645 unsigned int deqd_retries = 0;
646 struct priv_op_data *priv_data;
647 char *buf_ptr;
648 unsigned int i;
649 struct rte_mempool *buf_pool;
650 uint32_t data_size;
651 const struct rte_compressdev_capabilities *capa =
652 rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
653 char *contig_buf = NULL;
654
655 /* Initialize all arrays to NULL */
656 memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
657 memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
658 memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
659 memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
660 memset(priv_xforms, 0, sizeof(void *) * num_bufs);
661
662 if (sgl)
663 buf_pool = ts_params->small_mbuf_pool;
664 else
665 buf_pool = ts_params->large_mbuf_pool;
666
667 /* Prepare the source mbufs with the data */
668 ret = rte_pktmbuf_alloc_bulk(buf_pool,
669 uncomp_bufs, num_bufs);
670 if (ret < 0) {
671 RTE_LOG(ERR, USER1,
672 "Source mbufs could not be allocated "
673 "from the mempool\n");
674 goto exit;
675 }
676
677 if (sgl) {
678 for (i = 0; i < num_bufs; i++) {
679 data_size = strlen(test_bufs[i]) + 1;
680 if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
681 data_size,
682 ts_params->small_mbuf_pool,
683 ts_params->large_mbuf_pool,
684 MAX_SEGS) < 0)
685 goto exit;
686 }
687 } else {
688 for (i = 0; i < num_bufs; i++) {
689 data_size = strlen(test_bufs[i]) + 1;
690 buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
691 snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
692 }
693 }
694
695 /* Prepare the destination mbufs */
696 ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
697 if (ret < 0) {
698 RTE_LOG(ERR, USER1,
699 "Destination mbufs could not be allocated "
700 "from the mempool\n");
701 goto exit;
702 }
703
704 if (sgl) {
705 for (i = 0; i < num_bufs; i++) {
706 data_size = strlen(test_bufs[i]) *
707 COMPRESS_BUF_SIZE_RATIO;
708 if (prepare_sgl_bufs(NULL, comp_bufs[i],
709 data_size,
710 ts_params->small_mbuf_pool,
711 ts_params->large_mbuf_pool,
712 MAX_SEGS) < 0)
713 goto exit;
714 }
715
716 } else {
717 for (i = 0; i < num_bufs; i++) {
718 data_size = strlen(test_bufs[i]) *
719 COMPRESS_BUF_SIZE_RATIO;
720 rte_pktmbuf_append(comp_bufs[i], data_size);
721 }
722 }
723
724 /* Build the compression operations */
725 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
726 if (ret < 0) {
727 RTE_LOG(ERR, USER1,
728 "Compress operations could not be allocated "
729 "from the mempool\n");
730 goto exit;
731 }
732
733 for (i = 0; i < num_bufs; i++) {
734 ops[i]->m_src = uncomp_bufs[i];
735 ops[i]->m_dst = comp_bufs[i];
736 ops[i]->src.offset = 0;
737 ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
738 ops[i]->dst.offset = 0;
739 if (state == RTE_COMP_OP_STATELESS) {
740 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
741 } else {
742 RTE_LOG(ERR, USER1,
743 "Stateful operations are not supported "
744 "in these tests yet\n");
745 goto exit;
746 }
747 ops[i]->input_chksum = 0;
748 /*
749 * Store original operation index in private data,
750 * since ordering does not have to be maintained,
751 * when dequeueing from compressdev, so a comparison
752 * at the end of the test can be done.
753 */
754 priv_data = (struct priv_op_data *) (ops[i] + 1);
755 priv_data->orig_idx = i;
756 }
757
758 /* Compress data (either with Zlib API or compressdev API */
759 if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
760 for (i = 0; i < num_bufs; i++) {
761 const struct rte_comp_xform *compress_xform =
762 compress_xforms[i % num_xforms];
763 ret = compress_zlib(ops[i], compress_xform,
764 DEFAULT_MEM_LEVEL);
765 if (ret < 0)
766 goto exit;
767
768 ops_processed[i] = ops[i];
769 }
770 } else {
771 /* Create compress private xform data */
772 for (i = 0; i < num_xforms; i++) {
773 ret = rte_compressdev_private_xform_create(0,
774 (const struct rte_comp_xform *)compress_xforms[i],
775 &priv_xforms[i]);
776 if (ret < 0) {
777 RTE_LOG(ERR, USER1,
778 "Compression private xform "
779 "could not be created\n");
780 goto exit;
781 }
782 num_priv_xforms++;
783 }
784
785 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
786 /* Attach shareable private xform data to ops */
787 for (i = 0; i < num_bufs; i++)
788 ops[i]->private_xform = priv_xforms[i % num_xforms];
789 } else {
790 /* Create rest of the private xforms for the other ops */
791 for (i = num_xforms; i < num_bufs; i++) {
792 ret = rte_compressdev_private_xform_create(0,
793 compress_xforms[i % num_xforms],
794 &priv_xforms[i]);
795 if (ret < 0) {
796 RTE_LOG(ERR, USER1,
797 "Compression private xform "
798 "could not be created\n");
799 goto exit;
800 }
801 num_priv_xforms++;
802 }
803
804 /* Attach non shareable private xform data to ops */
805 for (i = 0; i < num_bufs; i++)
806 ops[i]->private_xform = priv_xforms[i];
807 }
808
809 /* Enqueue and dequeue all operations */
810 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
811 if (num_enqd < num_bufs) {
812 RTE_LOG(ERR, USER1,
813 "The operations could not be enqueued\n");
814 goto exit;
815 }
816
817 num_total_deqd = 0;
818 do {
819 /*
820 * If retrying a dequeue call, wait for 10 ms to allow
821 * enough time to the driver to process the operations
822 */
823 if (deqd_retries != 0) {
824 /*
825 * Avoid infinite loop if not all the
826 * operations get out of the device
827 */
828 if (deqd_retries == MAX_DEQD_RETRIES) {
829 RTE_LOG(ERR, USER1,
830 "Not all operations could be "
831 "dequeued\n");
832 goto exit;
833 }
834 usleep(DEQUEUE_WAIT_TIME);
835 }
836 num_deqd = rte_compressdev_dequeue_burst(0, 0,
837 &ops_processed[num_total_deqd], num_bufs);
838 num_total_deqd += num_deqd;
839 deqd_retries++;
840 } while (num_total_deqd < num_enqd);
841
842 deqd_retries = 0;
843
844 /* Free compress private xforms */
845 for (i = 0; i < num_priv_xforms; i++) {
846 rte_compressdev_private_xform_free(0, priv_xforms[i]);
847 priv_xforms[i] = NULL;
848 }
849 num_priv_xforms = 0;
850 }
851
852 for (i = 0; i < num_bufs; i++) {
853 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
854 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
855 const struct rte_comp_compress_xform *compress_xform =
856 &compress_xforms[xform_idx]->compress;
857 enum rte_comp_huffman huffman_type =
858 compress_xform->deflate.huffman;
859 RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
860 "(level = %d, huffman = %s)\n",
861 buf_idx[priv_data->orig_idx],
862 ops_processed[i]->consumed, ops_processed[i]->produced,
863 compress_xform->level,
864 huffman_type_strings[huffman_type]);
865 RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
866 (float)ops_processed[i]->produced /
867 ops_processed[i]->consumed * 100);
868 ops[i] = NULL;
869 }
870
871 /*
872 * Check operation status and free source mbufs (destination mbuf and
873 * compress operation information is needed for the decompression stage)
874 */
875 for (i = 0; i < num_bufs; i++) {
876 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
877 RTE_LOG(ERR, USER1,
878 "Some operations were not successful\n");
879 goto exit;
880 }
881 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
882 rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
883 uncomp_bufs[priv_data->orig_idx] = NULL;
884 }
885
886 /* Allocate buffers for decompressed data */
887 ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
888 if (ret < 0) {
889 RTE_LOG(ERR, USER1,
890 "Destination mbufs could not be allocated "
891 "from the mempool\n");
892 goto exit;
893 }
894
895 if (sgl) {
896 for (i = 0; i < num_bufs; i++) {
897 priv_data = (struct priv_op_data *)
898 (ops_processed[i] + 1);
899 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
900 if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
901 data_size,
902 ts_params->small_mbuf_pool,
903 ts_params->large_mbuf_pool,
904 MAX_SEGS) < 0)
905 goto exit;
906 }
907
908 } else {
909 for (i = 0; i < num_bufs; i++) {
910 priv_data = (struct priv_op_data *)
911 (ops_processed[i] + 1);
912 data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
913 rte_pktmbuf_append(uncomp_bufs[i], data_size);
914 }
915 }
916
917 /* Build the decompression operations */
918 ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
919 if (ret < 0) {
920 RTE_LOG(ERR, USER1,
921 "Decompress operations could not be allocated "
922 "from the mempool\n");
923 goto exit;
924 }
925
926 /* Source buffer is the compressed data from the previous operations */
927 for (i = 0; i < num_bufs; i++) {
928 ops[i]->m_src = ops_processed[i]->m_dst;
929 ops[i]->m_dst = uncomp_bufs[i];
930 ops[i]->src.offset = 0;
931 /*
932 * Set the length of the compressed data to the
933 * number of bytes that were produced in the previous stage
934 */
935 ops[i]->src.length = ops_processed[i]->produced;
936 ops[i]->dst.offset = 0;
937 if (state == RTE_COMP_OP_STATELESS) {
938 ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
939 } else {
940 RTE_LOG(ERR, USER1,
941 "Stateful operations are not supported "
942 "in these tests yet\n");
943 goto exit;
944 }
945 ops[i]->input_chksum = 0;
946 /*
947 * Copy private data from previous operations,
948 * to keep the pointer to the original buffer
949 */
950 memcpy(ops[i] + 1, ops_processed[i] + 1,
951 sizeof(struct priv_op_data));
952 }
953
954 /*
955 * Free the previous compress operations,
956 * as it is not needed anymore
957 */
958 for (i = 0; i < num_bufs; i++) {
959 rte_comp_op_free(ops_processed[i]);
960 ops_processed[i] = NULL;
961 }
962
963 /* Decompress data (either with Zlib API or compressdev API */
964 if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
965 for (i = 0; i < num_bufs; i++) {
966 priv_data = (struct priv_op_data *)(ops[i] + 1);
967 uint16_t xform_idx = priv_data->orig_idx % num_xforms;
968 const struct rte_comp_xform *decompress_xform =
969 decompress_xforms[xform_idx];
970
971 ret = decompress_zlib(ops[i], decompress_xform);
972 if (ret < 0)
973 goto exit;
974
975 ops_processed[i] = ops[i];
976 }
977 } else {
978 /* Create decompress private xform data */
979 for (i = 0; i < num_xforms; i++) {
980 ret = rte_compressdev_private_xform_create(0,
981 (const struct rte_comp_xform *)decompress_xforms[i],
982 &priv_xforms[i]);
983 if (ret < 0) {
984 RTE_LOG(ERR, USER1,
985 "Decompression private xform "
986 "could not be created\n");
987 goto exit;
988 }
989 num_priv_xforms++;
990 }
991
992 if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
993 /* Attach shareable private xform data to ops */
994 for (i = 0; i < num_bufs; i++) {
995 priv_data = (struct priv_op_data *)(ops[i] + 1);
996 uint16_t xform_idx = priv_data->orig_idx %
997 num_xforms;
998 ops[i]->private_xform = priv_xforms[xform_idx];
999 }
1000 } else {
1001 /* Create rest of the private xforms for the other ops */
1002 for (i = num_xforms; i < num_bufs; i++) {
1003 ret = rte_compressdev_private_xform_create(0,
1004 decompress_xforms[i % num_xforms],
1005 &priv_xforms[i]);
1006 if (ret < 0) {
1007 RTE_LOG(ERR, USER1,
1008 "Decompression private xform "
1009 "could not be created\n");
1010 goto exit;
1011 }
1012 num_priv_xforms++;
1013 }
1014
1015 /* Attach non shareable private xform data to ops */
1016 for (i = 0; i < num_bufs; i++) {
1017 priv_data = (struct priv_op_data *)(ops[i] + 1);
1018 uint16_t xform_idx = priv_data->orig_idx;
1019 ops[i]->private_xform = priv_xforms[xform_idx];
1020 }
1021 }
1022
1023 /* Enqueue and dequeue all operations */
1024 num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
1025 if (num_enqd < num_bufs) {
1026 RTE_LOG(ERR, USER1,
1027 "The operations could not be enqueued\n");
1028 goto exit;
1029 }
1030
1031 num_total_deqd = 0;
1032 do {
1033 /*
1034 * If retrying a dequeue call, wait for 10 ms to allow
1035 * enough time to the driver to process the operations
1036 */
1037 if (deqd_retries != 0) {
1038 /*
1039 * Avoid infinite loop if not all the
1040 * operations get out of the device
1041 */
1042 if (deqd_retries == MAX_DEQD_RETRIES) {
1043 RTE_LOG(ERR, USER1,
1044 "Not all operations could be "
1045 "dequeued\n");
1046 goto exit;
1047 }
1048 usleep(DEQUEUE_WAIT_TIME);
1049 }
1050 num_deqd = rte_compressdev_dequeue_burst(0, 0,
1051 &ops_processed[num_total_deqd], num_bufs);
1052 num_total_deqd += num_deqd;
1053 deqd_retries++;
1054 } while (num_total_deqd < num_enqd);
1055
1056 deqd_retries = 0;
1057 }
1058
1059 for (i = 0; i < num_bufs; i++) {
1060 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1061 RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
1062 buf_idx[priv_data->orig_idx],
1063 ops_processed[i]->consumed, ops_processed[i]->produced);
1064 ops[i] = NULL;
1065 }
1066
1067 /*
1068 * Check operation status and free source mbuf (destination mbuf and
1069 * compress operation information is still needed)
1070 */
1071 for (i = 0; i < num_bufs; i++) {
1072 if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
1073 RTE_LOG(ERR, USER1,
1074 "Some operations were not successful\n");
1075 goto exit;
1076 }
1077 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1078 rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
1079 comp_bufs[priv_data->orig_idx] = NULL;
1080 }
1081
1082 /*
1083 * Compare the original stream with the decompressed stream
1084 * (in size and the data)
1085 */
1086 for (i = 0; i < num_bufs; i++) {
1087 priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
1088 const char *buf1 = test_bufs[priv_data->orig_idx];
1089 const char *buf2;
1090 contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
1091 if (contig_buf == NULL) {
1092 RTE_LOG(ERR, USER1, "Contiguous buffer could not "
1093 "be allocated\n");
1094 goto exit;
1095 }
1096
1097 buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
1098 ops_processed[i]->produced, contig_buf);
1099
1100 if (compare_buffers(buf1, strlen(buf1) + 1,
1101 buf2, ops_processed[i]->produced) < 0)
1102 goto exit;
1103
1104 rte_free(contig_buf);
1105 contig_buf = NULL;
1106 }
1107
1108 ret_status = 0;
1109
1110 exit:
1111 /* Free resources */
1112 for (i = 0; i < num_bufs; i++) {
1113 rte_pktmbuf_free(uncomp_bufs[i]);
1114 rte_pktmbuf_free(comp_bufs[i]);
1115 rte_comp_op_free(ops[i]);
1116 rte_comp_op_free(ops_processed[i]);
1117 }
1118 for (i = 0; i < num_priv_xforms; i++) {
1119 if (priv_xforms[i] != NULL)
1120 rte_compressdev_private_xform_free(0, priv_xforms[i]);
1121 }
1122 rte_free(contig_buf);
1123
1124 return ret_status;
1125 }
1126
1127 static int
1128 test_compressdev_deflate_stateless_fixed(void)
1129 {
1130 struct comp_testsuite_params *ts_params = &testsuite_params;
1131 const char *test_buffer;
1132 uint16_t i;
1133 int ret;
1134 const struct rte_compressdev_capabilities *capab;
1135
1136 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1137 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1138
1139 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
1140 return -ENOTSUP;
1141
1142 struct rte_comp_xform *compress_xform =
1143 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1144
1145 if (compress_xform == NULL) {
1146 RTE_LOG(ERR, USER1,
1147 "Compress xform could not be created\n");
1148 ret = TEST_FAILED;
1149 goto exit;
1150 }
1151
1152 memcpy(compress_xform, ts_params->def_comp_xform,
1153 sizeof(struct rte_comp_xform));
1154 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
1155
1156 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1157 test_buffer = compress_test_bufs[i];
1158
1159 /* Compress with compressdev, decompress with Zlib */
1160 if (test_deflate_comp_decomp(&test_buffer, 1,
1161 &i,
1162 &compress_xform,
1163 &ts_params->def_decomp_xform,
1164 1,
1165 RTE_COMP_OP_STATELESS,
1166 0,
1167 ZLIB_DECOMPRESS) < 0) {
1168 ret = TEST_FAILED;
1169 goto exit;
1170 }
1171
1172 /* Compress with Zlib, decompress with compressdev */
1173 if (test_deflate_comp_decomp(&test_buffer, 1,
1174 &i,
1175 &compress_xform,
1176 &ts_params->def_decomp_xform,
1177 1,
1178 RTE_COMP_OP_STATELESS,
1179 0,
1180 ZLIB_COMPRESS) < 0) {
1181 ret = TEST_FAILED;
1182 goto exit;
1183 }
1184 }
1185
1186 ret = TEST_SUCCESS;
1187
1188 exit:
1189 rte_free(compress_xform);
1190 return ret;
1191 }
1192
1193 static int
1194 test_compressdev_deflate_stateless_dynamic(void)
1195 {
1196 struct comp_testsuite_params *ts_params = &testsuite_params;
1197 const char *test_buffer;
1198 uint16_t i;
1199 int ret;
1200 struct rte_comp_xform *compress_xform =
1201 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1202
1203 const struct rte_compressdev_capabilities *capab;
1204
1205 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1206 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1207
1208 if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
1209 return -ENOTSUP;
1210
1211 if (compress_xform == NULL) {
1212 RTE_LOG(ERR, USER1,
1213 "Compress xform could not be created\n");
1214 ret = TEST_FAILED;
1215 goto exit;
1216 }
1217
1218 memcpy(compress_xform, ts_params->def_comp_xform,
1219 sizeof(struct rte_comp_xform));
1220 compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
1221
1222 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1223 test_buffer = compress_test_bufs[i];
1224
1225 /* Compress with compressdev, decompress with Zlib */
1226 if (test_deflate_comp_decomp(&test_buffer, 1,
1227 &i,
1228 &compress_xform,
1229 &ts_params->def_decomp_xform,
1230 1,
1231 RTE_COMP_OP_STATELESS,
1232 0,
1233 ZLIB_DECOMPRESS) < 0) {
1234 ret = TEST_FAILED;
1235 goto exit;
1236 }
1237
1238 /* Compress with Zlib, decompress with compressdev */
1239 if (test_deflate_comp_decomp(&test_buffer, 1,
1240 &i,
1241 &compress_xform,
1242 &ts_params->def_decomp_xform,
1243 1,
1244 RTE_COMP_OP_STATELESS,
1245 0,
1246 ZLIB_COMPRESS) < 0) {
1247 ret = TEST_FAILED;
1248 goto exit;
1249 }
1250 }
1251
1252 ret = TEST_SUCCESS;
1253
1254 exit:
1255 rte_free(compress_xform);
1256 return ret;
1257 }
1258
1259 static int
1260 test_compressdev_deflate_stateless_multi_op(void)
1261 {
1262 struct comp_testsuite_params *ts_params = &testsuite_params;
1263 uint16_t num_bufs = RTE_DIM(compress_test_bufs);
1264 uint16_t buf_idx[num_bufs];
1265 uint16_t i;
1266
1267 for (i = 0; i < num_bufs; i++)
1268 buf_idx[i] = i;
1269
1270 /* Compress with compressdev, decompress with Zlib */
1271 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1272 buf_idx,
1273 &ts_params->def_comp_xform,
1274 &ts_params->def_decomp_xform,
1275 1,
1276 RTE_COMP_OP_STATELESS,
1277 0,
1278 ZLIB_DECOMPRESS) < 0)
1279 return TEST_FAILED;
1280
1281 /* Compress with Zlib, decompress with compressdev */
1282 if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
1283 buf_idx,
1284 &ts_params->def_comp_xform,
1285 &ts_params->def_decomp_xform,
1286 1,
1287 RTE_COMP_OP_STATELESS,
1288 0,
1289 ZLIB_COMPRESS) < 0)
1290 return TEST_FAILED;
1291
1292 return TEST_SUCCESS;
1293 }
1294
1295 static int
1296 test_compressdev_deflate_stateless_multi_level(void)
1297 {
1298 struct comp_testsuite_params *ts_params = &testsuite_params;
1299 const char *test_buffer;
1300 unsigned int level;
1301 uint16_t i;
1302 int ret;
1303 struct rte_comp_xform *compress_xform =
1304 rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
1305
1306 if (compress_xform == NULL) {
1307 RTE_LOG(ERR, USER1,
1308 "Compress xform could not be created\n");
1309 ret = TEST_FAILED;
1310 goto exit;
1311 }
1312
1313 memcpy(compress_xform, ts_params->def_comp_xform,
1314 sizeof(struct rte_comp_xform));
1315
1316 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1317 test_buffer = compress_test_bufs[i];
1318 for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
1319 level++) {
1320 compress_xform->compress.level = level;
1321 /* Compress with compressdev, decompress with Zlib */
1322 if (test_deflate_comp_decomp(&test_buffer, 1,
1323 &i,
1324 &compress_xform,
1325 &ts_params->def_decomp_xform,
1326 1,
1327 RTE_COMP_OP_STATELESS,
1328 0,
1329 ZLIB_DECOMPRESS) < 0) {
1330 ret = TEST_FAILED;
1331 goto exit;
1332 }
1333 }
1334 }
1335
1336 ret = TEST_SUCCESS;
1337
1338 exit:
1339 rte_free(compress_xform);
1340 return ret;
1341 }
1342
1343 #define NUM_XFORMS 3
1344 static int
1345 test_compressdev_deflate_stateless_multi_xform(void)
1346 {
1347 struct comp_testsuite_params *ts_params = &testsuite_params;
1348 uint16_t num_bufs = NUM_XFORMS;
1349 struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
1350 struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
1351 const char *test_buffers[NUM_XFORMS];
1352 uint16_t i;
1353 unsigned int level = RTE_COMP_LEVEL_MIN;
1354 uint16_t buf_idx[num_bufs];
1355
1356 int ret;
1357
1358 /* Create multiple xforms with various levels */
1359 for (i = 0; i < NUM_XFORMS; i++) {
1360 compress_xforms[i] = rte_malloc(NULL,
1361 sizeof(struct rte_comp_xform), 0);
1362 if (compress_xforms[i] == NULL) {
1363 RTE_LOG(ERR, USER1,
1364 "Compress xform could not be created\n");
1365 ret = TEST_FAILED;
1366 goto exit;
1367 }
1368
1369 memcpy(compress_xforms[i], ts_params->def_comp_xform,
1370 sizeof(struct rte_comp_xform));
1371 compress_xforms[i]->compress.level = level;
1372 level++;
1373
1374 decompress_xforms[i] = rte_malloc(NULL,
1375 sizeof(struct rte_comp_xform), 0);
1376 if (decompress_xforms[i] == NULL) {
1377 RTE_LOG(ERR, USER1,
1378 "Decompress xform could not be created\n");
1379 ret = TEST_FAILED;
1380 goto exit;
1381 }
1382
1383 memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
1384 sizeof(struct rte_comp_xform));
1385 }
1386
1387 for (i = 0; i < NUM_XFORMS; i++) {
1388 buf_idx[i] = 0;
1389 /* Use the same buffer in all sessions */
1390 test_buffers[i] = compress_test_bufs[0];
1391 }
1392 /* Compress with compressdev, decompress with Zlib */
1393 if (test_deflate_comp_decomp(test_buffers, num_bufs,
1394 buf_idx,
1395 compress_xforms,
1396 decompress_xforms,
1397 NUM_XFORMS,
1398 RTE_COMP_OP_STATELESS,
1399 0,
1400 ZLIB_DECOMPRESS) < 0) {
1401 ret = TEST_FAILED;
1402 goto exit;
1403 }
1404
1405 ret = TEST_SUCCESS;
1406 exit:
1407 for (i = 0; i < NUM_XFORMS; i++) {
1408 rte_free(compress_xforms[i]);
1409 rte_free(decompress_xforms[i]);
1410 }
1411
1412 return ret;
1413 }
1414
1415 static int
1416 test_compressdev_deflate_stateless_sgl(void)
1417 {
1418 struct comp_testsuite_params *ts_params = &testsuite_params;
1419 uint16_t i;
1420 const char *test_buffer;
1421 const struct rte_compressdev_capabilities *capab;
1422
1423 capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
1424 TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
1425
1426 if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
1427 return -ENOTSUP;
1428
1429 for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
1430 test_buffer = compress_test_bufs[i];
1431 /* Compress with compressdev, decompress with Zlib */
1432 if (test_deflate_comp_decomp(&test_buffer, 1,
1433 &i,
1434 &ts_params->def_comp_xform,
1435 &ts_params->def_decomp_xform,
1436 1,
1437 RTE_COMP_OP_STATELESS,
1438 1,
1439 ZLIB_DECOMPRESS) < 0)
1440 return TEST_FAILED;
1441
1442 /* Compress with Zlib, decompress with compressdev */
1443 if (test_deflate_comp_decomp(&test_buffer, 1,
1444 &i,
1445 &ts_params->def_comp_xform,
1446 &ts_params->def_decomp_xform,
1447 1,
1448 RTE_COMP_OP_STATELESS,
1449 1,
1450 ZLIB_COMPRESS) < 0)
1451 return TEST_FAILED;
1452 }
1453
1454 return TEST_SUCCESS;
1455 }
1456
1457 static struct unit_test_suite compressdev_testsuite = {
1458 .suite_name = "compressdev unit test suite",
1459 .setup = testsuite_setup,
1460 .teardown = testsuite_teardown,
1461 .unit_test_cases = {
1462 TEST_CASE_ST(NULL, NULL,
1463 test_compressdev_invalid_configuration),
1464 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1465 test_compressdev_deflate_stateless_fixed),
1466 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1467 test_compressdev_deflate_stateless_dynamic),
1468 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1469 test_compressdev_deflate_stateless_multi_op),
1470 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1471 test_compressdev_deflate_stateless_multi_level),
1472 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1473 test_compressdev_deflate_stateless_multi_xform),
1474 TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
1475 test_compressdev_deflate_stateless_sgl),
1476 TEST_CASES_END() /**< NULL terminate unit test array */
1477 }
1478 };
1479
1480 static int
1481 test_compressdev(void)
1482 {
1483 return unit_test_suite_runner(&compressdev_testsuite);
1484 }
1485
1486 REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);