#define QAT_PMD_COMP_SGL_DEF_SEGMENTS 16
+struct stream_create_info {
+ struct qat_comp_dev_private *comp_dev;
+ int socket_id;
+ int error;
+};
+
static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
{/* COMPRESSION - deflate */
.algo = RTE_COMP_ALGO_DEFLATE,
RTE_COMP_FF_HUFFMAN_DYNAMIC |
RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
- RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_STATEFUL_DECOMPRESSION,
.window_size = {.min = 15, .max = 15, .increment = 0} },
{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
= *qp_addr;
qp = (struct qat_qp *)*qp_addr;
+ qp->min_enq_burst_threshold = qat_private->min_enq_burst_threshold;
for (i = 0; i < qp->nb_descriptors; i++) {
struct qat_comp_op_cookie *cookie =
qp->op_cookies[i];
+ cookie->qp = qp;
+ cookie->cookie_index = i;
+
cookie->qat_sgl_src_d = rte_zmalloc_socket(NULL,
sizeof(struct qat_sgl) +
sizeof(struct qat_flat_buf) *
QAT_PMD_COMP_SGL_DEF_SEGMENTS;
cookie->socket_id = dev->data->socket_id;
+
+ cookie->error = 0;
}
return ret;
return mp;
}
+static void
+qat_comp_stream_init(struct rte_mempool *mp __rte_unused, void *opaque,
+ void *obj, unsigned int obj_idx)
+{
+ struct stream_create_info *info = opaque;
+ struct qat_comp_stream *stream = obj;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *memzone;
+ struct qat_inter_sgl *ram_banks_desc;
+
+ /* find a memzone for RAM banks */
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "%s_%u_rambanks",
+ info->comp_dev->qat_dev->name, obj_idx);
+ memzone = rte_memzone_lookup(mz_name);
+ if (memzone == NULL) {
+ /* allocate a memzone for compression state and RAM banks */
+ memzone = rte_memzone_reserve_aligned(mz_name,
+ QAT_STATE_REGISTERS_MAX_SIZE
+ + sizeof(struct qat_inter_sgl)
+ + QAT_INFLATE_CONTEXT_SIZE,
+ info->socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, QAT_64_BYTE_ALIGN);
+ if (memzone == NULL) {
+ QAT_LOG(ERR,
+ "Can't allocate RAM banks for device %s, object %u",
+ info->comp_dev->qat_dev->name, obj_idx);
+ info->error = -ENOMEM;
+ return;
+ }
+ }
+
+ /* prepare the buffer list descriptor for RAM banks */
+ ram_banks_desc = (struct qat_inter_sgl *)
+ (((uint8_t *) memzone->addr) + QAT_STATE_REGISTERS_MAX_SIZE);
+ ram_banks_desc->num_bufs = 1;
+ ram_banks_desc->buffers[0].len = QAT_INFLATE_CONTEXT_SIZE;
+ ram_banks_desc->buffers[0].addr = memzone->iova
+ + QAT_STATE_REGISTERS_MAX_SIZE
+ + sizeof(struct qat_inter_sgl);
+
+ memset(stream, 0, qat_comp_stream_size());
+ stream->memzone = memzone;
+ stream->state_registers_decomp = memzone->addr;
+ stream->state_registers_decomp_phys = memzone->iova;
+ stream->inflate_context = ((uint8_t *) memzone->addr)
+ + QAT_STATE_REGISTERS_MAX_SIZE;
+ stream->inflate_context_phys = memzone->iova
+ + QAT_STATE_REGISTERS_MAX_SIZE;
+}
+
+static void
+qat_comp_stream_destroy(struct rte_mempool *mp __rte_unused,
+ void *opaque __rte_unused, void *obj,
+ unsigned obj_idx __rte_unused)
+{
+ struct qat_comp_stream *stream = obj;
+
+ rte_memzone_free(stream->memzone);
+}
+
+static struct rte_mempool *
+qat_comp_create_stream_pool(struct qat_comp_dev_private *comp_dev,
+ int socket_id,
+ uint32_t num_elements)
+{
+ char stream_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(stream_pool_name, RTE_MEMPOOL_NAMESIZE,
+ "%s_streams", comp_dev->qat_dev->name);
+
+ QAT_LOG(DEBUG, "streampool: %s", stream_pool_name);
+ mp = rte_mempool_lookup(stream_pool_name);
+
+ if (mp != NULL) {
+ QAT_LOG(DEBUG, "streampool already created");
+ if (mp->size != num_elements) {
+ QAT_LOG(DEBUG, "streampool wrong size - delete it");
+ rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->streampool = NULL;
+ }
+ }
+
+ if (mp == NULL) {
+ struct stream_create_info info = {
+ .comp_dev = comp_dev,
+ .socket_id = socket_id,
+ .error = 0
+ };
+ mp = rte_mempool_create(stream_pool_name,
+ num_elements,
+ qat_comp_stream_size(), 0, 0,
+ NULL, NULL, qat_comp_stream_init, &info,
+ socket_id, 0);
+ if (mp == NULL) {
+ QAT_LOG(ERR,
+ "Err creating mempool %s w %d elements of size %d",
+ stream_pool_name, num_elements,
+ qat_comp_stream_size());
+ } else if (info.error) {
+ rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
+ QAT_LOG(ERR,
+ "Destoying mempool %s as at least one element failed initialisation",
+ stream_pool_name);
+ rte_mempool_free(mp);
+ mp = NULL;
+ }
+ }
+
+ return mp;
+}
+
static void
_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
{
rte_mempool_free(comp_dev->xformpool);
comp_dev->xformpool = NULL;
}
+
+ /* Free stream pool */
+ if (comp_dev->streampool) {
+ rte_mempool_obj_iter(comp_dev->streampool,
+ qat_comp_stream_destroy, NULL);
+ rte_mempool_free(comp_dev->streampool);
+ comp_dev->streampool = NULL;
+ }
}
static int
struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
int ret = 0;
- if (config->max_nb_streams != 0) {
- QAT_LOG(ERR,
- "QAT device does not support STATEFUL so max_nb_streams must be 0");
- return -EINVAL;
- }
-
if (RTE_PMD_QAT_COMP_IM_BUFFER_SIZE == 0) {
QAT_LOG(WARNING,
"RTE_PMD_QAT_COMP_IM_BUFFER_SIZE = 0 in config file, so"
}
}
- comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev, config,
- config->max_nb_priv_xforms);
- if (comp_dev->xformpool == NULL) {
+ if (config->max_nb_priv_xforms) {
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ config, config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ } else
+ comp_dev->xformpool = NULL;
+
+ if (config->max_nb_streams) {
+ comp_dev->streampool = qat_comp_create_stream_pool(comp_dev,
+ config->socket_id, config->max_nb_streams);
+ if (comp_dev->streampool == NULL) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ } else
+ comp_dev->streampool = NULL;
- ret = -ENOMEM;
- goto error_out;
- }
return 0;
error_out:
}
}
-static uint16_t
-qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
- uint16_t nb_ops)
-{
- return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
-}
-
-static uint16_t
-qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
- uint16_t nb_ops)
-{
- return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
-}
-
static uint16_t
qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
struct rte_comp_op **ops __rte_unused,
};
static uint16_t
-qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
+qat_comp_pmd_dequeue_first_op_burst(void *qp, struct rte_comp_op **ops,
uint16_t nb_ops)
{
uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
} else {
tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
- qat_comp_pmd_dequeue_op_burst;
+ (compressdev_dequeue_pkt_burst_t)
+ qat_dequeue_op_burst;
}
}
return ret;
/* Compression related operations */
.private_xform_create = qat_comp_private_xform_create,
- .private_xform_free = qat_comp_private_xform_free
+ .private_xform_free = qat_comp_private_xform_free,
+ .stream_create = qat_comp_stream_create,
+ .stream_free = qat_comp_stream_free
};
/* An rte_driver is needed in the registration of the device with compressdev.
.alias = qat_comp_drv_name
};
int
-qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev,
+ struct qat_dev_cmd_param *qat_dev_cmd_param)
{
+ int i = 0;
if (qat_pci_dev->qat_dev_gen == QAT_GEN3) {
- QAT_LOG(ERR, "Compression PMD not supported on QAT c4xxx");
+ QAT_LOG(ERR, "Compression PMD not supported on QAT P5xxx");
return 0;
}
compressdev->dev_ops = &compress_qat_ops;
- compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
- compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
+ compressdev->enqueue_burst = (compressdev_enqueue_pkt_burst_t)
+ qat_enqueue_comp_op_burst;
+ compressdev->dequeue_burst = qat_comp_pmd_dequeue_first_op_burst;
compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
break;
}
+ while (1) {
+ if (qat_dev_cmd_param[i].name == NULL)
+ break;
+ if (!strcmp(qat_dev_cmd_param[i].name, COMP_ENQ_THRESHOLD_NAME))
+ comp_dev->min_enq_burst_threshold =
+ qat_dev_cmd_param[i].val;
+ i++;
+ }
+
QAT_LOG(DEBUG,
"Created QAT COMP device %s as compressdev instance %d",
name, compressdev->data->dev_id);