]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/app/test-compress-perf/comp_perf_test_verify.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / app / test-compress-perf / comp_perf_test_verify.c
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5#include <rte_malloc.h>
6#include <rte_eal.h>
7#include <rte_log.h>
8#include <rte_compressdev.h>
9
10#include "comp_perf_test_verify.h"
f67539c2
TL
11#include "comp_perf_test_common.h"
12
13void
14cperf_verify_test_destructor(void *arg)
15{
16 if (arg) {
17 comp_perf_free_memory(
18 ((struct cperf_verify_ctx *)arg)->options,
19 &((struct cperf_verify_ctx *)arg)->mem);
20 rte_free(arg);
21 }
22}
23
24void *
25cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
26 struct comp_test_data *options)
27{
28 struct cperf_verify_ctx *ctx = NULL;
29
30 ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
31
32 if (ctx == NULL)
33 return NULL;
34
35 ctx->mem.dev_id = dev_id;
36 ctx->mem.qp_id = qp_id;
37 ctx->options = options;
38
39 if (!comp_perf_allocate_memory(ctx->options, &ctx->mem) &&
40 !prepare_bufs(ctx->options, &ctx->mem))
41 return ctx;
42
43 cperf_verify_test_destructor(ctx);
44 return NULL;
45}
9f95a23c
TL
46
47static int
f67539c2 48main_loop(struct cperf_verify_ctx *ctx, enum rte_comp_xform_type type)
9f95a23c 49{
f67539c2
TL
50 struct comp_test_data *test_data = ctx->options;
51 uint8_t *output_data_ptr = NULL;
52 size_t *output_data_sz = NULL;
53 struct cperf_mem_resources *mem = &ctx->mem;
54
55 uint8_t dev_id = mem->dev_id;
9f95a23c
TL
56 uint32_t i, iter, num_iter;
57 struct rte_comp_op **ops, **deq_ops;
58 void *priv_xform = NULL;
59 struct rte_comp_xform xform;
60 size_t output_size = 0;
61 struct rte_mbuf **input_bufs, **output_bufs;
62 int res = 0;
63 int allocated = 0;
64 uint32_t out_seg_sz;
65
66 if (test_data == NULL || !test_data->burst_sz) {
67 RTE_LOG(ERR, USER1,
68 "Unknown burst size\n");
69 return -1;
70 }
71
72 ops = rte_zmalloc_socket(NULL,
f67539c2 73 2 * mem->total_bufs * sizeof(struct rte_comp_op *),
9f95a23c
TL
74 0, rte_socket_id());
75
76 if (ops == NULL) {
77 RTE_LOG(ERR, USER1,
78 "Can't allocate memory for ops strucures\n");
79 return -1;
80 }
81
f67539c2 82 deq_ops = &ops[mem->total_bufs];
9f95a23c
TL
83
84 if (type == RTE_COMP_COMPRESS) {
85 xform = (struct rte_comp_xform) {
86 .type = RTE_COMP_COMPRESS,
87 .compress = {
88 .algo = RTE_COMP_ALGO_DEFLATE,
89 .deflate.huffman = test_data->huffman_enc,
f67539c2 90 .level = test_data->level,
9f95a23c
TL
91 .window_size = test_data->window_sz,
92 .chksum = RTE_COMP_CHECKSUM_NONE,
93 .hash_algo = RTE_COMP_HASH_ALGO_NONE
94 }
95 };
f67539c2
TL
96 output_data_ptr = ctx->mem.compressed_data;
97 output_data_sz = &ctx->comp_data_sz;
98 input_bufs = mem->decomp_bufs;
99 output_bufs = mem->comp_bufs;
9f95a23c
TL
100 out_seg_sz = test_data->out_seg_sz;
101 } else {
102 xform = (struct rte_comp_xform) {
103 .type = RTE_COMP_DECOMPRESS,
104 .decompress = {
105 .algo = RTE_COMP_ALGO_DEFLATE,
106 .chksum = RTE_COMP_CHECKSUM_NONE,
107 .window_size = test_data->window_sz,
108 .hash_algo = RTE_COMP_HASH_ALGO_NONE
109 }
110 };
f67539c2
TL
111 output_data_ptr = ctx->mem.decompressed_data;
112 output_data_sz = &ctx->decomp_data_sz;
113 input_bufs = mem->comp_bufs;
114 output_bufs = mem->decomp_bufs;
9f95a23c
TL
115 out_seg_sz = test_data->seg_sz;
116 }
117
118 /* Create private xform */
119 if (rte_compressdev_private_xform_create(dev_id, &xform,
120 &priv_xform) < 0) {
121 RTE_LOG(ERR, USER1, "Private xform could not be created\n");
122 res = -1;
123 goto end;
124 }
125
126 num_iter = 1;
127
128 for (iter = 0; iter < num_iter; iter++) {
f67539c2
TL
129 uint32_t total_ops = mem->total_bufs;
130 uint32_t remaining_ops = mem->total_bufs;
9f95a23c
TL
131 uint32_t total_deq_ops = 0;
132 uint32_t total_enq_ops = 0;
133 uint16_t ops_unused = 0;
134 uint16_t num_enq = 0;
135 uint16_t num_deq = 0;
136
137 output_size = 0;
138
139 while (remaining_ops > 0) {
140 uint16_t num_ops = RTE_MIN(remaining_ops,
141 test_data->burst_sz);
142 uint16_t ops_needed = num_ops - ops_unused;
143
144 /*
145 * Move the unused operations from the previous
146 * enqueue_burst call to the front, to maintain order
147 */
148 if ((ops_unused > 0) && (num_enq > 0)) {
149 size_t nb_b_to_mov =
150 ops_unused * sizeof(struct rte_comp_op *);
151
152 memmove(ops, &ops[num_enq], nb_b_to_mov);
153 }
154
155 /* Allocate compression operations */
156 if (ops_needed && !rte_comp_op_bulk_alloc(
f67539c2 157 mem->op_pool,
9f95a23c
TL
158 &ops[ops_unused],
159 ops_needed)) {
160 RTE_LOG(ERR, USER1,
161 "Could not allocate enough operations\n");
162 res = -1;
163 goto end;
164 }
165 allocated += ops_needed;
166
167 for (i = 0; i < ops_needed; i++) {
168 /*
169 * Calculate next buffer to attach to operation
170 */
171 uint32_t buf_id = total_enq_ops + i +
172 ops_unused;
173 uint16_t op_id = ops_unused + i;
174 /* Reset all data in output buffers */
175 struct rte_mbuf *m = output_bufs[buf_id];
176
177 m->pkt_len = out_seg_sz * m->nb_segs;
178 while (m) {
179 m->data_len = m->buf_len - m->data_off;
180 m = m->next;
181 }
182 ops[op_id]->m_src = input_bufs[buf_id];
183 ops[op_id]->m_dst = output_bufs[buf_id];
184 ops[op_id]->src.offset = 0;
185 ops[op_id]->src.length =
186 rte_pktmbuf_pkt_len(input_bufs[buf_id]);
187 ops[op_id]->dst.offset = 0;
188 ops[op_id]->flush_flag = RTE_COMP_FLUSH_FINAL;
189 ops[op_id]->input_chksum = buf_id;
190 ops[op_id]->private_xform = priv_xform;
191 }
192
f67539c2
TL
193 if (unlikely(test_data->perf_comp_force_stop))
194 goto end;
195
196 num_enq = rte_compressdev_enqueue_burst(dev_id,
197 mem->qp_id, ops,
9f95a23c
TL
198 num_ops);
199 if (num_enq == 0) {
200 struct rte_compressdev_stats stats;
201
202 rte_compressdev_stats_get(dev_id, &stats);
203 if (stats.enqueue_err_count) {
204 res = -1;
205 goto end;
206 }
207 }
208
209 ops_unused = num_ops - num_enq;
210 remaining_ops -= num_enq;
211 total_enq_ops += num_enq;
212
f67539c2
TL
213 num_deq = rte_compressdev_dequeue_burst(dev_id,
214 mem->qp_id,
9f95a23c
TL
215 deq_ops,
216 test_data->burst_sz);
217 total_deq_ops += num_deq;
218
219 for (i = 0; i < num_deq; i++) {
220 struct rte_comp_op *op = deq_ops[i];
221
f67539c2
TL
222 if (op->status ==
223 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED ||
224 op->status ==
225 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
226 RTE_LOG(ERR, USER1,
227"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n");
228 res = -1;
229 goto end;
230 } else if (op->status !=
231 RTE_COMP_OP_STATUS_SUCCESS) {
9f95a23c
TL
232 RTE_LOG(ERR, USER1,
233 "Some operations were not successful\n");
234 goto end;
235 }
236
237 const void *read_data_addr =
238 rte_pktmbuf_read(op->m_dst, 0,
239 op->produced, output_data_ptr);
240 if (read_data_addr == NULL) {
241 RTE_LOG(ERR, USER1,
242 "Could not copy buffer in destination\n");
243 res = -1;
244 goto end;
245 }
246
247 if (read_data_addr != output_data_ptr)
248 rte_memcpy(output_data_ptr,
249 rte_pktmbuf_mtod(op->m_dst,
250 uint8_t *),
251 op->produced);
252 output_data_ptr += op->produced;
253 output_size += op->produced;
254
255 }
256
257
258 if (iter == num_iter - 1) {
259 for (i = 0; i < num_deq; i++) {
260 struct rte_comp_op *op = deq_ops[i];
261 struct rte_mbuf *m = op->m_dst;
262
263 m->pkt_len = op->produced;
264 uint32_t remaining_data = op->produced;
265 uint16_t data_to_append;
266
267 while (remaining_data > 0) {
268 data_to_append =
269 RTE_MIN(remaining_data,
270 out_seg_sz);
271 m->data_len = data_to_append;
272 remaining_data -=
273 data_to_append;
274 m = m->next;
275 }
276 }
277 }
f67539c2 278 rte_mempool_put_bulk(mem->op_pool,
9f95a23c
TL
279 (void **)deq_ops, num_deq);
280 allocated -= num_deq;
281 }
282
283 /* Dequeue the last operations */
284 while (total_deq_ops < total_ops) {
f67539c2
TL
285 if (unlikely(test_data->perf_comp_force_stop))
286 goto end;
287
288 num_deq = rte_compressdev_dequeue_burst(dev_id,
289 mem->qp_id,
290 deq_ops,
291 test_data->burst_sz);
9f95a23c
TL
292 if (num_deq == 0) {
293 struct rte_compressdev_stats stats;
294
295 rte_compressdev_stats_get(dev_id, &stats);
296 if (stats.dequeue_err_count) {
297 res = -1;
298 goto end;
299 }
300 }
301
302 total_deq_ops += num_deq;
303
304 for (i = 0; i < num_deq; i++) {
305 struct rte_comp_op *op = deq_ops[i];
306
f67539c2
TL
307 if (op->status ==
308 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED ||
309 op->status ==
310 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE) {
311 RTE_LOG(ERR, USER1,
312"Out of space error occurred due to uncompressible input data expanding to larger than destination buffer. Increase the EXPANSE_RATIO constant to use this data.\n");
313 res = -1;
314 goto end;
315 } else if (op->status !=
316 RTE_COMP_OP_STATUS_SUCCESS) {
9f95a23c
TL
317 RTE_LOG(ERR, USER1,
318 "Some operations were not successful\n");
319 goto end;
320 }
9f95a23c
TL
321 const void *read_data_addr =
322 rte_pktmbuf_read(op->m_dst,
323 op->dst.offset,
324 op->produced, output_data_ptr);
325 if (read_data_addr == NULL) {
326 RTE_LOG(ERR, USER1,
327 "Could not copy buffer in destination\n");
328 res = -1;
329 goto end;
330 }
331
332 if (read_data_addr != output_data_ptr)
333 rte_memcpy(output_data_ptr,
334 rte_pktmbuf_mtod(
335 op->m_dst, uint8_t *),
336 op->produced);
337 output_data_ptr += op->produced;
338 output_size += op->produced;
339
340 }
341
342 if (iter == num_iter - 1) {
343 for (i = 0; i < num_deq; i++) {
344 struct rte_comp_op *op = deq_ops[i];
345 struct rte_mbuf *m = op->m_dst;
346
347 m->pkt_len = op->produced;
348 uint32_t remaining_data = op->produced;
349 uint16_t data_to_append;
350
351 while (remaining_data > 0) {
352 data_to_append =
353 RTE_MIN(remaining_data,
354 out_seg_sz);
355 m->data_len = data_to_append;
356 remaining_data -=
357 data_to_append;
358 m = m->next;
359 }
360 }
361 }
f67539c2 362 rte_mempool_put_bulk(mem->op_pool,
9f95a23c
TL
363 (void **)deq_ops, num_deq);
364 allocated -= num_deq;
365 }
366 }
367
368 if (output_data_sz)
369 *output_data_sz = output_size;
370end:
f67539c2 371 rte_mempool_put_bulk(mem->op_pool, (void **)ops, allocated);
9f95a23c
TL
372 rte_compressdev_private_xform_free(dev_id, priv_xform);
373 rte_free(ops);
9f95a23c 374
f67539c2
TL
375 if (test_data->perf_comp_force_stop) {
376 RTE_LOG(ERR, USER1,
377 "lcore: %d Perf. test has been aborted by user\n",
378 mem->lcore_id);
379 res = -1;
380 }
9f95a23c 381
f67539c2
TL
382 return res;
383}
9f95a23c
TL
384
385int
f67539c2 386cperf_verify_test_runner(void *test_ctx)
9f95a23c 387{
f67539c2
TL
388 struct cperf_verify_ctx *ctx = test_ctx;
389 struct comp_test_data *test_data = ctx->options;
9f95a23c 390 int ret = EXIT_SUCCESS;
f67539c2
TL
391 static rte_atomic16_t display_once = RTE_ATOMIC16_INIT(0);
392 uint32_t lcore = rte_lcore_id();
393
394 ctx->mem.lcore_id = lcore;
9f95a23c
TL
395
396 test_data->ratio = 0;
397
f67539c2 398 if (main_loop(ctx, RTE_COMP_COMPRESS) < 0) {
9f95a23c
TL
399 ret = EXIT_FAILURE;
400 goto end;
401 }
402
f67539c2 403 if (main_loop(ctx, RTE_COMP_DECOMPRESS) < 0) {
9f95a23c
TL
404 ret = EXIT_FAILURE;
405 goto end;
406 }
407
f67539c2 408 if (ctx->decomp_data_sz != test_data->input_data_sz) {
9f95a23c
TL
409 RTE_LOG(ERR, USER1,
410 "Decompressed data length not equal to input data length\n");
411 RTE_LOG(ERR, USER1,
412 "Decompressed size = %zu, expected = %zu\n",
f67539c2 413 ctx->decomp_data_sz, test_data->input_data_sz);
9f95a23c
TL
414 ret = EXIT_FAILURE;
415 goto end;
416 } else {
f67539c2 417 if (memcmp(ctx->mem.decompressed_data,
9f95a23c
TL
418 test_data->input_data,
419 test_data->input_data_sz) != 0) {
420 RTE_LOG(ERR, USER1,
421 "Decompressed data is not the same as file data\n");
422 ret = EXIT_FAILURE;
423 goto end;
424 }
425 }
426
f67539c2 427 ctx->ratio = (double) ctx->comp_data_sz /
9f95a23c
TL
428 test_data->input_data_sz * 100;
429
f67539c2
TL
430 if (!ctx->silent) {
431 if (rte_atomic16_test_and_set(&display_once)) {
432 printf("%12s%6s%12s%17s\n",
433 "lcore id", "Level", "Comp size", "Comp ratio [%]");
434 }
435 printf("%12u%6u%12zu%17.2f\n",
436 ctx->mem.lcore_id,
437 test_data->level, ctx->comp_data_sz, ctx->ratio);
438 }
439
9f95a23c
TL
440end:
441 return ret;
442}