]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/compress/octeontx/otx_zip_pmd.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / compress / octeontx / otx_zip_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
3 */
4
5 #include <string.h>
6
7 #include <rte_byteorder.h>
8 #include <rte_common.h>
9 #include <rte_cpuflags.h>
10 #include <rte_malloc.h>
11
12 #include "otx_zip.h"
13
14 static const struct rte_compressdev_capabilities
15 octtx_zip_pmd_capabilities[] = {
16 { .algo = RTE_COMP_ALGO_DEFLATE,
17 /* Deflate */
18 .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
19 RTE_COMP_FF_HUFFMAN_DYNAMIC,
20 /* Non sharable Priv XFORM and Stateless */
21 .window_size = {
22 .min = 1,
23 .max = 14,
24 .increment = 1
25 /* size supported 2^1 to 2^14 */
26 },
27 },
28 RTE_COMP_END_OF_CAPABILITIES_LIST()
29 };
30
31 /*
32 * Reset session to default state for next set of stateless operation
33 */
34 static inline void
35 reset_stream(struct zip_stream *z_stream)
36 {
37 union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
38
39 inst->s.bf = 1;
40 inst->s.ef = 0;
41 }
42
43 int
44 zip_process_op(struct rte_comp_op *op,
45 struct zipvf_qp *qp,
46 struct zip_stream *zstrm)
47 {
48 union zip_inst_s *inst = zstrm->inst;
49 volatile union zip_zres_s *zresult = NULL;
50
51
52 if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
53 (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
54 (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
55 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
56 ZIP_PMD_ERR("Segmented packet is not supported\n");
57 return 0;
58 }
59
60 zipvf_prepare_cmd_stateless(op, zstrm);
61
62 zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
63 zresult->s.compcode = 0;
64
65 #ifdef ZIP_DBG
66 zip_dump_instruction(inst);
67 #endif
68
69 /* Submit zip command */
70 zipvf_push_command(qp, (void *)inst);
71
72 /* Check and Process results in sync mode */
73 do {
74 } while (!zresult->s.compcode);
75
76 if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
77 op->status = RTE_COMP_OP_STATUS_SUCCESS;
78 } else {
79 /* FATAL error cannot do anything */
80 ZIP_PMD_ERR("operation failed with error code:%d\n",
81 zresult->s.compcode);
82 if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
83 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
84 else
85 op->status = RTE_COMP_OP_STATUS_ERROR;
86 }
87
88 ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
89
90 /* Update op stats */
91 switch (op->status) {
92 case RTE_COMP_OP_STATUS_SUCCESS:
93 op->consumed = zresult->s.totalbytesread;
94 /* Fall-through */
95 case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
96 op->produced = zresult->s.totalbyteswritten;
97 break;
98 default:
99 ZIP_PMD_ERR("stats not updated for status:%d\n",
100 op->status);
101 break;
102 }
103 /* zstream is reset irrespective of result */
104 reset_stream(zstrm);
105
106 zresult->s.compcode = ZIP_COMP_E_NOTDONE;
107 return 0;
108 }
109
110 /** Parse xform parameters and setup a stream */
111 static int
112 zip_set_stream_parameters(struct rte_compressdev *dev,
113 const struct rte_comp_xform *xform,
114 struct zip_stream *z_stream)
115 {
116 int ret;
117 union zip_inst_s *inst;
118 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
119 void *res;
120
121 /* Allocate resources required by a stream */
122 ret = rte_mempool_get_bulk(vf->zip_mp,
123 z_stream->bufs, MAX_BUFS_PER_STREAM);
124 if (ret < 0)
125 return -1;
126
127 /* get one command buffer from pool and set up */
128 inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
129 res = z_stream->bufs[RES_BUF];
130
131 memset(inst->u, 0, sizeof(inst->u));
132
133 /* set bf for only first ops of stream */
134 inst->s.bf = 1;
135
136 if (xform->type == RTE_COMP_COMPRESS) {
137 inst->s.op = ZIP_OP_E_COMP;
138
139 switch (xform->compress.deflate.huffman) {
140 case RTE_COMP_HUFFMAN_DEFAULT:
141 inst->s.cc = ZIP_CC_DEFAULT;
142 break;
143 case RTE_COMP_HUFFMAN_FIXED:
144 inst->s.cc = ZIP_CC_FIXED_HUFF;
145 break;
146 case RTE_COMP_HUFFMAN_DYNAMIC:
147 inst->s.cc = ZIP_CC_DYN_HUFF;
148 break;
149 default:
150 ret = -1;
151 goto err;
152 }
153
154 switch (xform->compress.level) {
155 case RTE_COMP_LEVEL_MIN:
156 inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
157 break;
158 case RTE_COMP_LEVEL_MAX:
159 inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
160 break;
161 case RTE_COMP_LEVEL_NONE:
162 ZIP_PMD_ERR("Compression level not supported");
163 ret = -1;
164 goto err;
165 default:
166 /* for any value between min and max , choose
167 * PMD default.
168 */
169 inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
170 break;
171 }
172 } else if (xform->type == RTE_COMP_DECOMPRESS) {
173 inst->s.op = ZIP_OP_E_DECOMP;
174 /* from HRM,
175 * For DEFLATE decompression, [CC] must be 0x0.
176 * For decompression, [SS] must be 0x0
177 */
178 inst->s.cc = 0;
179 /* Speed bit should not be set for decompression */
180 inst->s.ss = 0;
181 /* decompression context is supported only for STATEFUL
182 * operations. Currently we support STATELESS ONLY so
183 * skip setting of ctx pointer
184 */
185
186 } else {
187 ZIP_PMD_ERR("\nxform type not supported");
188 ret = -1;
189 goto err;
190 }
191
192 inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
193 inst->s.res_ptr_ctl.s.length = 0;
194
195 z_stream->inst = inst;
196 z_stream->func = zip_process_op;
197
198 return 0;
199
200 err:
201 rte_mempool_put_bulk(vf->zip_mp,
202 (void *)&(z_stream->bufs[0]),
203 MAX_BUFS_PER_STREAM);
204
205 return ret;
206 }
207
208 /** Configure device */
209 static int
210 zip_pmd_config(struct rte_compressdev *dev,
211 struct rte_compressdev_config *config)
212 {
213 int nb_streams;
214 char res_pool[RTE_MEMZONE_NAMESIZE];
215 struct zip_vf *vf;
216 struct rte_mempool *zip_buf_mp;
217
218 if (!config || !dev)
219 return -EIO;
220
221 vf = (struct zip_vf *)(dev->data->dev_private);
222
223 /* create pool with maximum numbers of resources
224 * required by streams
225 */
226
227 /* use common pool for non-shareable priv_xform and stream */
228 nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
229
230 snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
231 dev->data->dev_id);
232
233 /** TBD Should we use the per core object cache for stream resources */
234 zip_buf_mp = rte_mempool_create(
235 res_pool,
236 nb_streams * MAX_BUFS_PER_STREAM,
237 ZIP_BUF_SIZE,
238 0,
239 0,
240 NULL,
241 NULL,
242 NULL,
243 NULL,
244 SOCKET_ID_ANY,
245 0);
246
247 if (zip_buf_mp == NULL) {
248 ZIP_PMD_ERR(
249 "Failed to create buf mempool octtx_zip_res_pool%u",
250 dev->data->dev_id);
251 return -1;
252 }
253
254 vf->zip_mp = zip_buf_mp;
255
256 return 0;
257 }
258
259 /** Start device */
260 static int
261 zip_pmd_start(__rte_unused struct rte_compressdev *dev)
262 {
263 return 0;
264 }
265
266 /** Stop device */
267 static void
268 zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
269 {
270
271 }
272
273 /** Close device */
274 static int
275 zip_pmd_close(struct rte_compressdev *dev)
276 {
277 if (dev == NULL)
278 return -1;
279
280 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
281 rte_mempool_free(vf->zip_mp);
282
283 return 0;
284 }
285
286 /** Get device statistics */
287 static void
288 zip_pmd_stats_get(struct rte_compressdev *dev,
289 struct rte_compressdev_stats *stats)
290 {
291 int qp_id;
292
293 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
294 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
295
296 stats->enqueued_count += qp->qp_stats.enqueued_count;
297 stats->dequeued_count += qp->qp_stats.dequeued_count;
298
299 stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
300 stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
301 }
302 }
303
304 /** Reset device statistics */
305 static void
306 zip_pmd_stats_reset(struct rte_compressdev *dev)
307 {
308 int qp_id;
309
310 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
311 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
312 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
313 }
314 }
315
316 /** Get device info */
317 static void
318 zip_pmd_info_get(struct rte_compressdev *dev,
319 struct rte_compressdev_info *dev_info)
320 {
321 struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
322
323 if (dev_info != NULL) {
324 dev_info->driver_name = dev->device->driver->name;
325 dev_info->feature_flags = dev->feature_flags;
326 dev_info->capabilities = octtx_zip_pmd_capabilities;
327 dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
328 }
329 }
330
331 /** Release queue pair */
332 static int
333 zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
334 {
335 struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
336
337 if (qp != NULL) {
338 zipvf_q_term(qp);
339
340 if (qp->processed_pkts)
341 rte_ring_free(qp->processed_pkts);
342
343 rte_free(qp);
344 dev->data->queue_pairs[qp_id] = NULL;
345 }
346 return 0;
347 }
348
349 /** Create a ring to place process packets on */
350 static struct rte_ring *
351 zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
352 unsigned int ring_size, int socket_id)
353 {
354 struct rte_ring *r;
355
356 r = rte_ring_lookup(qp->name);
357 if (r) {
358 if (rte_ring_get_size(r) >= ring_size) {
359 ZIP_PMD_INFO("Reusing existing ring %s for processed"
360 " packets", qp->name);
361 return r;
362 }
363
364 ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
365 " packets", qp->name);
366 return NULL;
367 }
368
369 return rte_ring_create(qp->name, ring_size, socket_id,
370 RING_F_EXACT_SZ);
371 }
372
373 /** Setup a queue pair */
374 static int
375 zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
376 uint32_t max_inflight_ops, int socket_id)
377 {
378 struct zipvf_qp *qp = NULL;
379 struct zip_vf *vf;
380 char *name;
381 int ret;
382
383 if (!dev)
384 return -1;
385
386 vf = (struct zip_vf *) (dev->data->dev_private);
387
388 /* Free memory prior to re-allocation if needed. */
389 if (dev->data->queue_pairs[qp_id] != NULL) {
390 ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
391 return 0;
392 }
393
394 name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
395 snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
396 "zip_pmd_%u_qp_%u",
397 dev->data->dev_id, qp_id);
398
399 /* Allocate the queue pair data structure. */
400 qp = rte_zmalloc_socket(name, sizeof(*qp),
401 RTE_CACHE_LINE_SIZE, socket_id);
402 if (qp == NULL)
403 return (-ENOMEM);
404
405 qp->name = name;
406
407 /* Create completion queue upto max_inflight_ops */
408 qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
409 max_inflight_ops, socket_id);
410 if (qp->processed_pkts == NULL)
411 goto qp_setup_cleanup;
412
413 qp->id = qp_id;
414 qp->vf = vf;
415
416 ret = zipvf_q_init(qp);
417 if (ret < 0)
418 goto qp_setup_cleanup;
419
420 dev->data->queue_pairs[qp_id] = qp;
421
422 memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
423 return 0;
424
425 qp_setup_cleanup:
426 if (qp->processed_pkts)
427 rte_ring_free(qp->processed_pkts);
428 if (qp)
429 rte_free(qp);
430 return -1;
431 }
432
433 static int
434 zip_pmd_stream_create(struct rte_compressdev *dev,
435 const struct rte_comp_xform *xform, void **stream)
436 {
437 int ret;
438 struct zip_stream *strm = NULL;
439
440 strm = rte_malloc(NULL,
441 sizeof(struct zip_stream), 0);
442
443 if (strm == NULL)
444 return (-ENOMEM);
445
446 ret = zip_set_stream_parameters(dev, xform, strm);
447 if (ret < 0) {
448 ZIP_PMD_ERR("failed configure xform parameters");
449 rte_free(strm);
450 return ret;
451 }
452 *stream = strm;
453 return 0;
454 }
455
456 static int
457 zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
458 {
459 struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
460 struct zip_stream *z_stream;
461
462 if (stream == NULL)
463 return 0;
464
465 z_stream = (struct zip_stream *)stream;
466
467 /* Free resources back to pool */
468 rte_mempool_put_bulk(vf->zip_mp,
469 (void *)&(z_stream->bufs[0]),
470 MAX_BUFS_PER_STREAM);
471
472 /* Zero out the whole structure */
473 memset(stream, 0, sizeof(struct zip_stream));
474 rte_free(stream);
475
476 return 0;
477 }
478
479
480 static uint16_t
481 zip_pmd_enqueue_burst_sync(void *queue_pair,
482 struct rte_comp_op **ops, uint16_t nb_ops)
483 {
484 struct zipvf_qp *qp = queue_pair;
485 struct rte_comp_op *op;
486 struct zip_stream *zstrm;
487 int i, ret = 0;
488 uint16_t enqd = 0;
489
490 for (i = 0; i < nb_ops; i++) {
491 op = ops[i];
492
493 if (op->op_type == RTE_COMP_OP_STATEFUL) {
494 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
495 } else {
496 /* process stateless ops */
497 zstrm = (struct zip_stream *)op->private_xform;
498 if (unlikely(zstrm == NULL))
499 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
500 else
501 ret = zstrm->func(op, qp, zstrm);
502 }
503
504 /* Whatever is out of op, put it into completion queue with
505 * its status
506 */
507 if (!ret)
508 ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
509
510 if (unlikely(ret < 0)) {
511 /* increment count if failed to enqueue op*/
512 qp->qp_stats.enqueue_err_count++;
513 } else {
514 qp->qp_stats.enqueued_count++;
515 enqd++;
516 }
517 }
518 return enqd;
519 }
520
521 static uint16_t
522 zip_pmd_dequeue_burst_sync(void *queue_pair,
523 struct rte_comp_op **ops, uint16_t nb_ops)
524 {
525 struct zipvf_qp *qp = queue_pair;
526
527 unsigned int nb_dequeued = 0;
528
529 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
530 (void **)ops, nb_ops, NULL);
531 qp->qp_stats.dequeued_count += nb_dequeued;
532
533 return nb_dequeued;
534 }
535
536 static struct rte_compressdev_ops octtx_zip_pmd_ops = {
537 .dev_configure = zip_pmd_config,
538 .dev_start = zip_pmd_start,
539 .dev_stop = zip_pmd_stop,
540 .dev_close = zip_pmd_close,
541
542 .stats_get = zip_pmd_stats_get,
543 .stats_reset = zip_pmd_stats_reset,
544
545 .dev_infos_get = zip_pmd_info_get,
546
547 .queue_pair_setup = zip_pmd_qp_setup,
548 .queue_pair_release = zip_pmd_qp_release,
549
550 .private_xform_create = zip_pmd_stream_create,
551 .private_xform_free = zip_pmd_stream_free,
552 .stream_create = NULL,
553 .stream_free = NULL
554 };
555
556 static int
557 zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
558 struct rte_pci_device *pci_dev)
559 {
560 int ret = 0;
561 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
562 struct rte_compressdev *compressdev;
563 struct rte_compressdev_pmd_init_params init_params = {
564 "",
565 rte_socket_id(),
566 };
567
568 ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
569 (unsigned int)pci_dev->id.vendor_id,
570 (unsigned int)pci_dev->id.device_id);
571
572 rte_pci_device_name(&pci_dev->addr, compressdev_name,
573 sizeof(compressdev_name));
574
575 compressdev = rte_compressdev_pmd_create(compressdev_name,
576 &pci_dev->device, sizeof(struct zip_vf), &init_params);
577 if (compressdev == NULL) {
578 ZIP_PMD_ERR("driver %s: create failed", init_params.name);
579 return -ENODEV;
580 }
581
582 /*
583 * create only if proc_type is primary.
584 */
585 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
586 /* create vf dev with given pmd dev id */
587 ret = zipvf_create(compressdev);
588 if (ret < 0) {
589 ZIP_PMD_ERR("Device creation failed");
590 rte_compressdev_pmd_destroy(compressdev);
591 return ret;
592 }
593 }
594
595 compressdev->dev_ops = &octtx_zip_pmd_ops;
596 /* register rx/tx burst functions for data path */
597 compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
598 compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
599 compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
600 return ret;
601 }
602
603 static int
604 zip_pci_remove(struct rte_pci_device *pci_dev)
605 {
606 struct rte_compressdev *compressdev;
607 char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
608
609 if (pci_dev == NULL) {
610 ZIP_PMD_ERR(" Invalid PCI Device\n");
611 return -EINVAL;
612 }
613 rte_pci_device_name(&pci_dev->addr, compressdev_name,
614 sizeof(compressdev_name));
615
616 compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
617 if (compressdev == NULL)
618 return -ENODEV;
619
620 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
621 if (zipvf_destroy(compressdev) < 0)
622 return -ENODEV;
623 }
624 return rte_compressdev_pmd_destroy(compressdev);
625 }
626
627 static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
628 {
629 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
630 PCI_DEVICE_ID_OCTEONTX_ZIPVF),
631 },
632 {
633 .device_id = 0
634 },
635 };
636
637 /**
638 * Structure that represents a PCI driver
639 */
640 static struct rte_pci_driver octtx_zip_pmd = {
641 .id_table = pci_id_octtx_zipvf_table,
642 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
643 .probe = zip_pci_probe,
644 .remove = zip_pci_remove,
645 };
646
647 RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
648 RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
649
650 RTE_INIT(octtx_zip_init_log)
651 {
652 octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
653 if (octtx_zip_logtype_driver >= 0)
654 rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
655 }