]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_compressdev/rte_comp.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_compressdev / rte_comp.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4
5#include "rte_comp.h"
6#include "rte_compressdev.h"
7#include "rte_compressdev_internal.h"
8
f67539c2 9const char *
11fdf7f2
TL
10rte_comp_get_feature_name(uint64_t flag)
11{
12 switch (flag) {
13 case RTE_COMP_FF_STATEFUL_COMPRESSION:
14 return "STATEFUL_COMPRESSION";
15 case RTE_COMP_FF_STATEFUL_DECOMPRESSION:
16 return "STATEFUL_DECOMPRESSION";
17 case RTE_COMP_FF_OOP_SGL_IN_SGL_OUT:
18 return "OOP_SGL_IN_SGL_OUT";
19 case RTE_COMP_FF_OOP_SGL_IN_LB_OUT:
20 return "OOP_SGL_IN_LB_OUT";
21 case RTE_COMP_FF_OOP_LB_IN_SGL_OUT:
22 return "OOP_LB_IN_SGL_OUT";
23 case RTE_COMP_FF_MULTI_PKT_CHECKSUM:
24 return "MULTI_PKT_CHECKSUM";
25 case RTE_COMP_FF_ADLER32_CHECKSUM:
26 return "ADLER32_CHECKSUM";
27 case RTE_COMP_FF_CRC32_CHECKSUM:
28 return "CRC32_CHECKSUM";
29 case RTE_COMP_FF_CRC32_ADLER32_CHECKSUM:
30 return "CRC32_ADLER32_CHECKSUM";
31 case RTE_COMP_FF_NONCOMPRESSED_BLOCKS:
32 return "NONCOMPRESSED_BLOCKS";
33 case RTE_COMP_FF_SHA1_HASH:
34 return "SHA1_HASH";
35 case RTE_COMP_FF_SHA2_SHA256_HASH:
36 return "SHA2_SHA256_HASH";
37 case RTE_COMP_FF_SHAREABLE_PRIV_XFORM:
38 return "SHAREABLE_PRIV_XFORM";
39 case RTE_COMP_FF_HUFFMAN_FIXED:
40 return "HUFFMAN_FIXED";
41 case RTE_COMP_FF_HUFFMAN_DYNAMIC:
42 return "HUFFMAN_DYNAMIC";
43 default:
44 return NULL;
45 }
46}
47
48/**
49 * Reset the fields of an operation to their default values.
50 *
51 * @note The private data associated with the operation is not zeroed.
52 *
53 * @param op
54 * The operation to be reset
55 */
56static inline void
57rte_comp_op_reset(struct rte_comp_op *op)
58{
59 struct rte_mempool *tmp_mp = op->mempool;
60 rte_iova_t tmp_iova_addr = op->iova_addr;
61
62 memset(op, 0, sizeof(struct rte_comp_op));
63 op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
64 op->iova_addr = tmp_iova_addr;
65 op->mempool = tmp_mp;
66}
67
68/**
69 * Private data structure belonging to an operation pool.
70 */
71struct rte_comp_op_pool_private {
72 uint16_t user_size;
73 /**< Size of private user data with each operation. */
74};
75
76/**
77 * Bulk allocate raw element from mempool and return as comp operations
78 *
79 * @param mempool
80 * Compress operation mempool
81 * @param ops
82 * Array to place allocated operations
83 * @param nb_ops
84 * Number of operations to allocate
85 * @return
9f95a23c
TL
86 * - nb_ops: Success, the nb_ops requested was allocated
87 * - 0: Not enough entries in the mempool; no ops are retrieved.
11fdf7f2
TL
88 */
89static inline int
90rte_comp_op_raw_bulk_alloc(struct rte_mempool *mempool,
91 struct rte_comp_op **ops, uint16_t nb_ops)
92{
93 if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
94 return nb_ops;
95
96 return 0;
97}
98
99/** Initialise rte_comp_op mempool element */
100static void
101rte_comp_op_init(struct rte_mempool *mempool,
102 __rte_unused void *opaque_arg,
103 void *_op_data,
104 __rte_unused unsigned int i)
105{
106 struct rte_comp_op *op = _op_data;
107
108 memset(_op_data, 0, mempool->elt_size);
109
110 op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
111 op->iova_addr = rte_mem_virt2iova(_op_data);
112 op->mempool = mempool;
113}
114
f67539c2 115struct rte_mempool *
11fdf7f2
TL
116rte_comp_op_pool_create(const char *name,
117 unsigned int nb_elts, unsigned int cache_size,
118 uint16_t user_size, int socket_id)
119{
120 struct rte_comp_op_pool_private *priv;
121
122 unsigned int elt_size = sizeof(struct rte_comp_op) + user_size;
123
124 /* lookup mempool in case already allocated */
125 struct rte_mempool *mp = rte_mempool_lookup(name);
126
127 if (mp != NULL) {
128 priv = (struct rte_comp_op_pool_private *)
129 rte_mempool_get_priv(mp);
130
131 if (mp->elt_size != elt_size ||
132 mp->cache_size < cache_size ||
133 mp->size < nb_elts ||
134 priv->user_size < user_size) {
135 mp = NULL;
136 COMPRESSDEV_LOG(ERR,
137 "Mempool %s already exists but with incompatible parameters",
138 name);
139 return NULL;
140 }
141 return mp;
142 }
143
144 mp = rte_mempool_create(
145 name,
146 nb_elts,
147 elt_size,
148 cache_size,
149 sizeof(struct rte_comp_op_pool_private),
150 NULL,
151 NULL,
152 rte_comp_op_init,
153 NULL,
154 socket_id,
155 0);
156
157 if (mp == NULL) {
158 COMPRESSDEV_LOG(ERR, "Failed to create mempool %s", name);
159 return NULL;
160 }
161
162 priv = (struct rte_comp_op_pool_private *)
163 rte_mempool_get_priv(mp);
164
165 priv->user_size = user_size;
166
167 return mp;
168}
169
f67539c2 170struct rte_comp_op *
11fdf7f2
TL
171rte_comp_op_alloc(struct rte_mempool *mempool)
172{
173 struct rte_comp_op *op = NULL;
174 int retval;
175
176 retval = rte_comp_op_raw_bulk_alloc(mempool, &op, 1);
9f95a23c 177 if (unlikely(retval != 1))
11fdf7f2
TL
178 return NULL;
179
180 rte_comp_op_reset(op);
181
182 return op;
183}
184
f67539c2 185int
11fdf7f2
TL
186rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
187 struct rte_comp_op **ops, uint16_t nb_ops)
188{
9f95a23c 189 int retval;
11fdf7f2
TL
190 uint16_t i;
191
9f95a23c
TL
192 retval = rte_comp_op_raw_bulk_alloc(mempool, ops, nb_ops);
193 if (unlikely(retval != nb_ops))
194 return 0;
11fdf7f2
TL
195
196 for (i = 0; i < nb_ops; i++)
197 rte_comp_op_reset(ops[i]);
198
199 return nb_ops;
200}
201
202/**
203 * free operation structure
204 * If operation has been allocate from a rte_mempool, then the operation will
205 * be returned to the mempool.
206 *
207 * @param op
208 * Compress operation
209 */
f67539c2 210void
11fdf7f2
TL
211rte_comp_op_free(struct rte_comp_op *op)
212{
213 if (op != NULL && op->mempool != NULL)
214 rte_mempool_put(op->mempool, op);
215}
9f95a23c 216
f67539c2 217void
9f95a23c
TL
218rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops)
219{
220 uint16_t i;
221
222 for (i = 0; i < nb_ops; i++) {
223 if (ops[i] != NULL && ops[i]->mempool != NULL)
224 rte_mempool_put(ops[i]->mempool, ops[i]);
225 ops[i] = NULL;
226 }
227}