]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_compressdev/rte_compressdev.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_compressdev / rte_compressdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
3 */
4
5 #ifndef _RTE_COMPRESSDEV_H_
6 #define _RTE_COMPRESSDEV_H_
7
8 /**
9 * @file rte_compressdev.h
10 *
11 * RTE Compression Device APIs
12 *
13 * Defines comp device APIs for the provisioning of compression operations.
14 */
15
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19
20 #include <rte_common.h>
21
22 #include "rte_comp.h"
23
24 /**
25 * Parameter log base 2 range description.
26 * Final value will be 2^value.
27 */
28 struct rte_param_log2_range {
29 uint8_t min; /**< Minimum log2 value */
30 uint8_t max; /**< Maximum log2 value */
31 uint8_t increment;
32 /**< If a range of sizes are supported,
33 * this parameter is used to indicate
34 * increments in base 2 log byte value
35 * that are supported between the minimum and maximum
36 */
37 };
38
39 /** Structure used to capture a capability of a comp device */
40 struct rte_compressdev_capabilities {
41 enum rte_comp_algorithm algo;
42 /* Compression algorithm */
43 uint64_t comp_feature_flags;
44 /**< Bitmask of flags for compression service features */
45 struct rte_param_log2_range window_size;
46 /**< Window size range in base two log byte values */
47 };
48
49 /** Macro used at end of comp PMD list */
50 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \
51 { RTE_COMP_ALGO_UNSPECIFIED }
52
53 __rte_experimental
54 const struct rte_compressdev_capabilities *
55 rte_compressdev_capability_get(uint8_t dev_id,
56 enum rte_comp_algorithm algo);
57
58 /**
59 * compression device supported feature flags
60 *
61 * @note New features flags should be added to the end of the list
62 *
63 * Keep these flags synchronised with rte_compressdev_get_feature_name()
64 */
65 #define RTE_COMPDEV_FF_HW_ACCELERATED (1ULL << 0)
66 /**< Operations are off-loaded to an external hardware accelerator */
67 #define RTE_COMPDEV_FF_CPU_SSE (1ULL << 1)
68 /**< Utilises CPU SIMD SSE instructions */
69 #define RTE_COMPDEV_FF_CPU_AVX (1ULL << 2)
70 /**< Utilises CPU SIMD AVX instructions */
71 #define RTE_COMPDEV_FF_CPU_AVX2 (1ULL << 3)
72 /**< Utilises CPU SIMD AVX2 instructions */
73 #define RTE_COMPDEV_FF_CPU_AVX512 (1ULL << 4)
74 /**< Utilises CPU SIMD AVX512 instructions */
75 #define RTE_COMPDEV_FF_CPU_NEON (1ULL << 5)
76 /**< Utilises CPU NEON instructions */
77 #define RTE_COMPDEV_FF_OP_DONE_IN_DEQUEUE (1ULL << 6)
78 /**< A PMD should set this if the bulk of the
79 * processing is done during the dequeue. It should leave it
80 * cleared if the processing is done during the enqueue (default).
81 * Applications can use this as a hint for tuning.
82 */
83
84 /**
85 * Get the name of a compress device feature flag.
86 *
87 * @param flag
88 * The mask describing the flag
89 *
90 * @return
91 * The name of this flag, or NULL if it's not a valid feature flag.
92 */
93 __rte_experimental
94 const char *
95 rte_compressdev_get_feature_name(uint64_t flag);
96
97 /** comp device information */
98 struct rte_compressdev_info {
99 const char *driver_name; /**< Driver name. */
100 uint64_t feature_flags; /**< Feature flags */
101 const struct rte_compressdev_capabilities *capabilities;
102 /**< Array of devices supported capabilities */
103 uint16_t max_nb_queue_pairs;
104 /**< Maximum number of queues pairs supported by device.
105 * (If 0, there is no limit in maximum number of queue pairs)
106 */
107 };
108
109 /** comp device statistics */
110 struct rte_compressdev_stats {
111 uint64_t enqueued_count;
112 /**< Count of all operations enqueued */
113 uint64_t dequeued_count;
114 /**< Count of all operations dequeued */
115
116 uint64_t enqueue_err_count;
117 /**< Total error count on operations enqueued */
118 uint64_t dequeue_err_count;
119 /**< Total error count on operations dequeued */
120 };
121
122
123 /**
124 * Get the device identifier for the named compress device.
125 *
126 * @param name
127 * Device name to select the device structure
128 * @return
129 * - Returns compress device identifier on success.
130 * - Return -1 on failure to find named compress device.
131 */
132 __rte_experimental
133 int
134 rte_compressdev_get_dev_id(const char *name);
135
136 /**
137 * Get the compress device name given a device identifier.
138 *
139 * @param dev_id
140 * Compress device identifier
141 * @return
142 * - Returns compress device name.
143 * - Returns NULL if compress device is not present.
144 */
145 __rte_experimental
146 const char *
147 rte_compressdev_name_get(uint8_t dev_id);
148
149 /**
150 * Get the total number of compress devices that have been successfully
151 * initialised.
152 *
153 * @return
154 * - The total number of usable compress devices.
155 */
156 __rte_experimental
157 uint8_t
158 rte_compressdev_count(void);
159
160 /**
161 * Get number and identifiers of attached comp devices that
162 * use the same compress driver.
163 *
164 * @param driver_name
165 * Driver name
166 * @param devices
167 * Output devices identifiers
168 * @param nb_devices
169 * Maximal number of devices
170 *
171 * @return
172 * Returns number of attached compress devices.
173 */
174 __rte_experimental
175 uint8_t
176 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
177 uint8_t nb_devices);
178
179 /*
180 * Return the NUMA socket to which a device is connected.
181 *
182 * @param dev_id
183 * Compress device identifier
184 * @return
185 * The NUMA socket id to which the device is connected or
186 * a default of zero if the socket could not be determined.
187 * -1 if returned is the dev_id value is out of range.
188 */
189 __rte_experimental
190 int
191 rte_compressdev_socket_id(uint8_t dev_id);
192
193 /** Compress device configuration structure */
194 struct rte_compressdev_config {
195 int socket_id;
196 /**< Socket on which to allocate resources */
197 uint16_t nb_queue_pairs;
198 /**< Total number of queue pairs to configure on a device */
199 uint16_t max_nb_priv_xforms;
200 /**< Max number of private_xforms which will be created on the device */
201 uint16_t max_nb_streams;
202 /**< Max number of streams which will be created on the device */
203 };
204
205 /**
206 * Configure a device.
207 *
208 * This function must be invoked first before any other function in the
209 * API. This function can also be re-invoked when a device is in the
210 * stopped state.
211 *
212 * @param dev_id
213 * Compress device identifier
214 * @param config
215 * The compress device configuration
216 * @return
217 * - 0: Success, device configured.
218 * - <0: Error code returned by the driver configuration function.
219 */
220 __rte_experimental
221 int
222 rte_compressdev_configure(uint8_t dev_id,
223 struct rte_compressdev_config *config);
224
225 /**
226 * Start a device.
227 *
228 * The device start step is called after configuring the device and setting up
229 * its queue pairs.
230 * On success, data-path functions exported by the API (enqueue/dequeue, etc)
231 * can be invoked.
232 *
233 * @param dev_id
234 * Compress device identifier
235 * @return
236 * - 0: Success, device started.
237 * - <0: Error code of the driver device start function.
238 */
239 __rte_experimental
240 int
241 rte_compressdev_start(uint8_t dev_id);
242
243 /**
244 * Stop a device. The device can be restarted with a call to
245 * rte_compressdev_start()
246 *
247 * @param dev_id
248 * Compress device identifier
249 */
250 __rte_experimental
251 void
252 rte_compressdev_stop(uint8_t dev_id);
253
254 /**
255 * Close an device.
256 * The memory allocated in the device gets freed.
257 * After calling this function, in order to use
258 * the device again, it is required to
259 * configure the device again.
260 *
261 * @param dev_id
262 * Compress device identifier
263 *
264 * @return
265 * - 0 on successfully closing device
266 * - <0 on failure to close device
267 */
268 __rte_experimental
269 int
270 rte_compressdev_close(uint8_t dev_id);
271
272 /**
273 * Allocate and set up a receive queue pair for a device.
274 * This should only be called when the device is stopped.
275 *
276 *
277 * @param dev_id
278 * Compress device identifier
279 * @param queue_pair_id
280 * The index of the queue pairs to set up. The
281 * value must be in the range [0, nb_queue_pair - 1]
282 * previously supplied to rte_compressdev_configure()
283 * @param max_inflight_ops
284 * Max number of ops which the qp will have to
285 * accommodate simultaneously
286 * @param socket_id
287 * The *socket_id* argument is the socket identifier
288 * in case of NUMA. The value can be *SOCKET_ID_ANY*
289 * if there is no NUMA constraint for the DMA memory
290 * allocated for the receive queue pair
291 * @return
292 * - 0: Success, queue pair correctly set up.
293 * - <0: Queue pair configuration failed
294 */
295 __rte_experimental
296 int
297 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
298 uint32_t max_inflight_ops, int socket_id);
299
300 /**
301 * Get the number of queue pairs on a specific comp device
302 *
303 * @param dev_id
304 * Compress device identifier
305 * @return
306 * - The number of configured queue pairs.
307 */
308 __rte_experimental
309 uint16_t
310 rte_compressdev_queue_pair_count(uint8_t dev_id);
311
312
313 /**
314 * Retrieve the general I/O statistics of a device.
315 *
316 * @param dev_id
317 * The identifier of the device
318 * @param stats
319 * A pointer to a structure of type
320 * *rte_compressdev_stats* to be filled with the
321 * values of device counters
322 * @return
323 * - Zero if successful.
324 * - Non-zero otherwise.
325 */
326 __rte_experimental
327 int
328 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
329
330 /**
331 * Reset the general I/O statistics of a device.
332 *
333 * @param dev_id
334 * The identifier of the device.
335 */
336 __rte_experimental
337 void
338 rte_compressdev_stats_reset(uint8_t dev_id);
339
340 /**
341 * Retrieve the contextual information of a device.
342 *
343 * @param dev_id
344 * Compress device identifier
345 * @param dev_info
346 * A pointer to a structure of type *rte_compressdev_info*
347 * to be filled with the contextual information of the device
348 *
349 * @note The capabilities field of dev_info is set to point to the first
350 * element of an array of struct rte_compressdev_capabilities.
351 * The element after the last valid element has it's op field set to
352 * RTE_COMP_ALGO_LIST_END.
353 */
354 __rte_experimental
355 void
356 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
357
358 /**
359 *
360 * Dequeue a burst of processed compression operations from a queue on the comp
361 * device. The dequeued operation are stored in *rte_comp_op* structures
362 * whose pointers are supplied in the *ops* array.
363 *
364 * The rte_compressdev_dequeue_burst() function returns the number of ops
365 * actually dequeued, which is the number of *rte_comp_op* data structures
366 * effectively supplied into the *ops* array.
367 *
368 * A return value equal to *nb_ops* indicates that the queue contained
369 * at least *nb_ops* operations, and this is likely to signify that other
370 * processed operations remain in the devices output queue. Applications
371 * implementing a "retrieve as many processed operations as possible" policy
372 * can check this specific case and keep invoking the
373 * rte_compressdev_dequeue_burst() function until a value less than
374 * *nb_ops* is returned.
375 *
376 * The rte_compressdev_dequeue_burst() function does not provide any error
377 * notification to avoid the corresponding overhead.
378 *
379 * @note: operation ordering is not maintained within the queue pair.
380 *
381 * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the
382 * op must be resubmitted with the same input data and a larger output buffer.
383 * op.produced is usually 0, but in decompression cases a PMD may return > 0
384 * and the application may find it useful to inspect that data.
385 * This status is only returned on STATELESS ops.
386 *
387 * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used
388 * and next op in stream should continue on from op.consumed+1 with a fresh
389 * output buffer.
390 * Consumed=0, produced=0 is an unusual but allowed case. There may be useful
391 * state/history stored in the PMD, even though no output was produced yet.
392 *
393 *
394 * @param dev_id
395 * Compress device identifier
396 * @param qp_id
397 * The index of the queue pair from which to retrieve
398 * processed operations. The value must be in the range
399 * [0, nb_queue_pair - 1] previously supplied to
400 * rte_compressdev_configure()
401 * @param ops
402 * The address of an array of pointers to
403 * *rte_comp_op* structures that must be
404 * large enough to store *nb_ops* pointers in it
405 * @param nb_ops
406 * The maximum number of operations to dequeue
407 * @return
408 * - The number of operations actually dequeued, which is the number
409 * of pointers to *rte_comp_op* structures effectively supplied to the
410 * *ops* array.
411 */
412 __rte_experimental
413 uint16_t
414 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
415 struct rte_comp_op **ops, uint16_t nb_ops);
416
417 /**
418 * Enqueue a burst of operations for processing on a compression device.
419 *
420 * The rte_compressdev_enqueue_burst() function is invoked to place
421 * comp operations on the queue *qp_id* of the device designated by
422 * its *dev_id*.
423 *
424 * The *nb_ops* parameter is the number of operations to process which are
425 * supplied in the *ops* array of *rte_comp_op* structures.
426 *
427 * The rte_compressdev_enqueue_burst() function returns the number of
428 * operations it actually enqueued for processing. A return value equal to
429 * *nb_ops* means that all packets have been enqueued.
430 *
431 * @note All compression operations are Out-of-place (OOP) operations,
432 * as the size of the output data is different to the size of the input data.
433 *
434 * @note The rte_comp_op contains both input and output parameters and is the
435 * vehicle for the application to pass data into and out of the PMD. While an
436 * op is inflight, i.e. once it has been enqueued, the private_xform or stream
437 * attached to it and any mbufs or memory referenced by it should not be altered
438 * or freed by the application. The PMD may use or change some of this data at
439 * any time until it has been returned in a dequeue operation.
440 *
441 * @note The flush flag only applies to operations which return SUCCESS.
442 * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer
443 * is as if flush flag was FLUSH_NONE.
444 * @note flush flag only applies in compression direction. It has no meaning
445 * for decompression.
446 * @note: operation ordering is not maintained within the queue pair.
447 *
448 * @param dev_id
449 * Compress device identifier
450 * @param qp_id
451 * The index of the queue pair on which operations
452 * are to be enqueued for processing. The value
453 * must be in the range [0, nb_queue_pairs - 1]
454 * previously supplied to *rte_compressdev_configure*
455 * @param ops
456 * The address of an array of *nb_ops* pointers
457 * to *rte_comp_op* structures which contain
458 * the operations to be processed
459 * @param nb_ops
460 * The number of operations to process
461 * @return
462 * The number of operations actually enqueued on the device. The return
463 * value can be less than the value of the *nb_ops* parameter when the
464 * comp devices queue is full or if invalid parameters are specified in
465 * a *rte_comp_op*.
466 */
467 __rte_experimental
468 uint16_t
469 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
470 struct rte_comp_op **ops, uint16_t nb_ops);
471
472 /**
473 * This should alloc a stream from the device's mempool and initialise it.
474 * The application should call this API when setting up for the stateful
475 * processing of a set of data on a device. The API can be called multiple
476 * times to set up a stream for each data set. The handle returned is only for
477 * use with ops of op_type STATEFUL and must be passed to the PMD
478 * with every op in the data stream
479 *
480 * @param dev_id
481 * Compress device identifier
482 * @param xform
483 * xform data
484 * @param stream
485 * Pointer to where PMD's private stream handle should be stored
486 *
487 * @return
488 * - 0 if successful and valid stream handle
489 * - <0 in error cases
490 * - Returns -EINVAL if input parameters are invalid.
491 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
492 * - Returns -ENOTSUP if comp device does not support the comp transform.
493 * - Returns -ENOMEM if the private stream could not be allocated.
494 *
495 */
496 __rte_experimental
497 int
498 rte_compressdev_stream_create(uint8_t dev_id,
499 const struct rte_comp_xform *xform,
500 void **stream);
501
502 /**
503 * This should clear the stream and return it to the device's mempool.
504 *
505 * @param dev_id
506 * Compress device identifier
507 *
508 * @param stream
509 * PMD's private stream data
510 *
511 * @return
512 * - 0 if successful
513 * - <0 in error cases
514 * - Returns -EINVAL if input parameters are invalid.
515 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
516 * - Returns -EBUSY if can't free stream as there are inflight operations
517 */
518 __rte_experimental
519 int
520 rte_compressdev_stream_free(uint8_t dev_id, void *stream);
521
522 /**
523 * This should alloc a private_xform from the device's mempool and initialise
524 * it. The application should call this API when setting up for stateless
525 * processing on a device. If it returns non-shareable, then the appl cannot
526 * share this handle with multiple in-flight ops and should call this API again
527 * to get a separate handle for every in-flight op.
528 * The handle returned is only valid for use with ops of op_type STATELESS.
529 *
530 * @param dev_id
531 * Compress device identifier
532 * @param xform
533 * xform data
534 * @param private_xform
535 * Pointer to where PMD's private_xform handle should be stored
536 *
537 * @return
538 * - if successful returns 0
539 * and valid private_xform handle
540 * - <0 in error cases
541 * - Returns -EINVAL if input parameters are invalid.
542 * - Returns -ENOTSUP if comp device does not support the comp transform.
543 * - Returns -ENOMEM if the private_xform could not be allocated.
544 */
545 __rte_experimental
546 int
547 rte_compressdev_private_xform_create(uint8_t dev_id,
548 const struct rte_comp_xform *xform,
549 void **private_xform);
550
551 /**
552 * This should clear the private_xform and return it to the device's mempool.
553 * It is the application's responsibility to ensure that private_xform data
554 * is not cleared while there are still in-flight operations using it.
555 *
556 * @param dev_id
557 * Compress device identifier
558 *
559 * @param private_xform
560 * PMD's private_xform data
561 *
562 * @return
563 * - 0 if successful
564 * - <0 in error cases
565 * - Returns -EINVAL if input parameters are invalid.
566 */
567 __rte_experimental
568 int
569 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform);
570
571 #ifdef __cplusplus
572 }
573 #endif
574
575 #endif /* _RTE_COMPRESSDEV_H_ */