return 0;
}
-void safexcel_dequeue(struct safexcel_crypto_priv *priv)
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx;
struct safexcel_request *request;
- int i, ret, n = 0, nreq[EIP197_MAX_RINGS] = {0};
- int cdesc[EIP197_MAX_RINGS] = {0}, rdesc[EIP197_MAX_RINGS] = {0};
- int commands, results;
+ int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
do {
- spin_lock_bh(&priv->lock);
- req = crypto_dequeue_request(&priv->queue);
- backlog = crypto_get_backlog(&priv->queue);
- spin_unlock_bh(&priv->lock);
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ req = crypto_dequeue_request(&priv->ring[ring].queue);
+ backlog = crypto_get_backlog(&priv->ring[ring].queue);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!req)
goto finalize;
goto requeue;
ctx = crypto_tfm_ctx(req->tfm);
- ret = ctx->send(req, ctx->ring, request, &commands, &results);
+ ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
requeue:
- spin_lock_bh(&priv->lock);
- crypto_enqueue_request(&priv->queue, req);
- spin_unlock_bh(&priv->lock);
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ crypto_enqueue_request(&priv->ring[ring].queue, req);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
- priv->need_dequeue = true;
+ priv->ring[ring].need_dequeue = true;
continue;
}
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
- spin_lock_bh(&priv->ring[ctx->ring].egress_lock);
- list_add_tail(&request->list, &priv->ring[ctx->ring].list);
- spin_unlock_bh(&priv->ring[ctx->ring].egress_lock);
-
- cdesc[ctx->ring] += commands;
- rdesc[ctx->ring] += results;
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+ list_add_tail(&request->list, &priv->ring[ring].list);
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
- nreq[ctx->ring]++;
- } while (n++ < EIP197_MAX_BATCH_SZ);
+ cdesc += commands;
+ rdesc += results;
+ } while (nreq++ < EIP197_MAX_BATCH_SZ);
finalize:
- if (n == EIP197_MAX_BATCH_SZ)
- priv->need_dequeue = true;
- else if (!n)
+ if (nreq == EIP197_MAX_BATCH_SZ)
+ priv->ring[ring].need_dequeue = true;
+ else if (!nreq)
return;
- for (i = 0; i < priv->config.rings; i++) {
- if (!nreq[i])
- continue;
+ spin_lock_bh(&priv->ring[ring].lock);
- spin_lock_bh(&priv->ring[i].lock);
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
+ priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq[i]),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_THRESH);
+ /* let the RDR know we have pending descriptors */
+ writel((rdesc * priv->config.rd_offset) << 2,
+ priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
- /* let the RDR know we have pending descriptors */
- writel((rdesc[i] * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ /* let the CDR know we have pending descriptors */
+ writel((cdesc * priv->config.cd_offset) << 2,
+ priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
- /* let the CDR know we have pending descriptors */
- writel((cdesc[i] * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[i].lock);
- }
+ spin_unlock_bh(&priv->ring[ring].lock);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->need_dequeue) {
- priv->need_dequeue = false;
- safexcel_dequeue(data->priv);
+ if (priv->ring[data->ring].need_dequeue) {
+ priv->ring[data->ring].need_dequeue = false;
+ safexcel_dequeue(data->priv, data->ring);
}
}
goto err_clk;
}
+ crypto_init_queue(&priv->ring[i].queue,
+ EIP197_DEFAULT_RING_SIZE);
+
INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock);
spin_lock_init(&priv->ring[i].egress_lock);
+ spin_lock_init(&priv->ring[i].queue_lock);
}
platform_set_drvdata(pdev, priv);
atomic_set(&priv->ring_used, 0);
- spin_lock_init(&priv->lock);
- crypto_init_queue(&priv->queue, EIP197_DEFAULT_RING_SIZE);
-
ret = safexcel_hw_init(priv);
if (ret) {
dev_err(dev, "EIP h/w init failed (%d)\n", ret);
struct clk *clk;
struct safexcel_config config;
- spinlock_t lock;
- struct crypto_queue queue;
-
- bool need_dequeue;
-
/* context DMA pool */
struct dma_pool *context_pool;
/* command/result rings */
struct safexcel_ring cdr;
struct safexcel_ring rdr;
+
+ /* queue */
+ struct crypto_queue queue;
+ spinlock_t queue_lock;
+ bool need_dequeue;
} ring[EIP197_MAX_RINGS];
};
int error;
};
-void safexcel_dequeue(struct safexcel_crypto_priv *priv);
+void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
return ndesc;
}
+ ring = safexcel_select_ring(priv);
+ ctx->base.ring = ring;
ctx->base.needs_inv = false;
- ctx->base.ring = safexcel_select_ring(priv);
ctx->base.send = safexcel_aes_send;
- spin_lock_bh(&priv->lock);
- enq_ret = crypto_enqueue_request(&priv->queue, async);
- spin_unlock_bh(&priv->lock);
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- priv->need_dequeue = true;
+ if (!priv->ring[ring].need_dequeue)
+ safexcel_dequeue(priv, ring);
+
*should_complete = false;
return ndesc;
struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req;
struct safexcel_inv_result result = { 0 };
+ int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct skcipher_request));
ctx->base.exit_inv = true;
ctx->base.send = safexcel_cipher_send_inv;
- spin_lock_bh(&priv->lock);
- crypto_enqueue_request(&priv->queue, &req.base);
- spin_unlock_bh(&priv->lock);
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->need_dequeue)
- safexcel_dequeue(priv);
+ if (!priv->ring[ring].need_dequeue)
+ safexcel_dequeue(priv, ring);
wait_for_completion_interruptible(&result.completion);
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
- int ret;
+ int ret, ring;
ctx->direction = dir;
ctx->mode = mode;
return -ENOMEM;
}
- spin_lock_bh(&priv->lock);
- ret = crypto_enqueue_request(&priv->queue, &req->base);
- spin_unlock_bh(&priv->lock);
+ ring = ctx->base.ring;
+
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->need_dequeue)
- safexcel_dequeue(priv);
+ if (!priv->ring[ring].need_dequeue)
+ safexcel_dequeue(priv, ring);
return ret;
}
return 1;
}
- ctx->base.ring = safexcel_select_ring(priv);
+ ring = safexcel_select_ring(priv);
+ ctx->base.ring = ring;
ctx->base.needs_inv = false;
ctx->base.send = safexcel_ahash_send;
- spin_lock_bh(&priv->lock);
- enq_ret = crypto_enqueue_request(&priv->queue, async);
- spin_unlock_bh(&priv->lock);
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- priv->need_dequeue = true;
+ if (!priv->ring[ring].need_dequeue)
+ safexcel_dequeue(priv, ring);
+
*should_complete = false;
return 1;
struct safexcel_crypto_priv *priv = ctx->priv;
struct ahash_request req;
struct safexcel_inv_result result = { 0 };
+ int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct ahash_request));
ctx->base.exit_inv = true;
ctx->base.send = safexcel_ahash_send_inv;
- spin_lock_bh(&priv->lock);
- crypto_enqueue_request(&priv->queue, &req.base);
- spin_unlock_bh(&priv->lock);
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ crypto_enqueue_request(&priv->ring[ring].queue, &req.base);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->need_dequeue)
- safexcel_dequeue(priv);
+ if (!priv->ring[ring].need_dequeue)
+ safexcel_dequeue(priv, ring);
wait_for_completion_interruptible(&result.completion);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_crypto_priv *priv = ctx->priv;
- int ret;
+ int ret, ring;
ctx->base.send = safexcel_ahash_send;
return -ENOMEM;
}
- spin_lock_bh(&priv->lock);
- ret = crypto_enqueue_request(&priv->queue, &areq->base);
- spin_unlock_bh(&priv->lock);
+ ring = ctx->base.ring;
+
+ spin_lock_bh(&priv->ring[ring].queue_lock);
+ ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
+ spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->need_dequeue)
- safexcel_dequeue(priv);
+ if (!priv->ring[ring].need_dequeue)
+ safexcel_dequeue(priv, ring);
return ret;
}