return -ENOMEM;
}
-static void cc_aead_complete(struct device *dev, void *cc_req)
+static void cc_aead_complete(struct device *dev, void *cc_req, int err)
{
struct aead_request *areq = (struct aead_request *)cc_req;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- int err = 0;
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
areq->iv = areq_ctx->backup_iv;
+ if (err)
+ goto done;
+
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
ctx->authsize) != 0) {
CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
}
}
-
+done:
aead_request_complete(areq, err);
}
rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_aead_request(dev, req);
}
areq_ctx->plaintext_authenticate_only = false;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
cc_proc_rfc4309_ccm(req);
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
areq_ctx->plaintext_authenticate_only = false;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
cc_proc_rfc4309_ccm(req);
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
out:
return rc;
areq_ctx->is_gcm4543 = true;
rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
- if (rc != -EINPROGRESS)
+ if (rc != -EINPROGRESS && rc != -EBUSY)
req->iv = areq_ctx->backup_iv;
return rc;
struct crypto_shash *shash_tfm;
};
-static void cc_cipher_complete(struct device *dev, void *cc_req);
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
{
}
}
-static void cc_cipher_complete(struct device *dev, void *cc_req)
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
struct scatterlist *dst = areq->dst;
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
- int completion_error = 0;
struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
memcpy(req->info, req_ctx->backup_info, ivsize);
kfree(req_ctx->backup_info);
- } else {
+ } else if (!err) {
scatterwalk_map_and_copy(req->info, req->dst,
(req->nbytes - ivsize),
ivsize, 0);
}
- ablkcipher_request_complete(areq, completion_error);
+ ablkcipher_request_complete(areq, err);
}
static int cc_cipher_process(struct ablkcipher_request *req,
rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
&req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
/* Failed to send the request or request completed
* synchronously
*/
if (cts_restore_flag)
ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
kfree(req_ctx->backup_info);
kfree(req_ctx->iv);
}
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
- void (*user_cb)(struct device *dev, void *req);
+ void (*user_cb)(struct device *dev, void *req, int err);
void *user_arg;
dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
/* For the first 'ivgen_dma_addr_len' addresses of this array,
state->digest_result_dma_addr = 0;
}
-static void cc_update_complete(struct device *dev, void *cc_req)
+static void cc_update_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
dev_dbg(dev, "req=%pK\n", req);
cc_unmap_hash_request(dev, state, req->src, false);
- req->base.complete(&req->base, 0);
+ req->base.complete(&req->base, err);
}
-static void cc_digest_complete(struct device *dev, void *cc_req)
+static void cc_digest_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
cc_unmap_hash_request(dev, state, req->src, false);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, 0);
+ req->base.complete(&req->base, err);
}
-static void cc_hash_complete(struct device *dev, void *cc_req)
+static void cc_hash_complete(struct device *dev, void *cc_req, int err)
{
struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
cc_unmap_hash_request(dev, state, req->src, false);
cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, 0);
+ req->base.complete(&req->base, err);
}
static int cc_hash_digest(struct ahash_request *req)
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_result(dev, state, digestsize, result);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
}
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_result(dev, state, digestsize, result);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
cc_unmap_result(dev, state, digestsize, result);
cc_req.user_arg = (void *)req;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
}
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_result(dev, state, digestsize, req->result);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_result(dev, state, digestsize, req->result);
idx++;
rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
- if (rc != -EINPROGRESS) {
+ if (rc != -EINPROGRESS && rc != -EBUSY) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
cc_unmap_result(dev, state, digestsize, req->result);
#include "ssi_pm.h"
#define CC_MAX_POLL_ITER 10
+/* The highest descriptor count in used */
+#define CC_MAX_DESC_SEQ_LEN 23
struct cc_req_mgr_handle {
/* Request manager resources */
u8 *dummy_comp_buff;
dma_addr_t dummy_comp_buff_dma;
+ /* backlog queue */
+ struct list_head backlog;
+ unsigned int bl_len;
+ spinlock_t bl_lock; /* protect backlog queue */
+
#ifdef COMP_IN_WQ
struct workqueue_struct *workq;
struct delayed_work compwork;
#endif
};
+struct cc_bl_item {
+ struct cc_crypto_req creq;
+ struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
+ unsigned int len;
+ struct list_head list;
+ bool notif;
+};
+
static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
drvdata->request_mgr_handle = req_mgr_h;
spin_lock_init(&req_mgr_h->hw_lock);
+ spin_lock_init(&req_mgr_h->bl_lock);
+ INIT_LIST_HEAD(&req_mgr_h->backlog);
+
#ifdef COMP_IN_WQ
dev_dbg(dev, "Initializing completion workqueue\n");
req_mgr_h->workq = create_singlethread_workqueue("arm_cc7x_wq");
* \param dev
* \param dx_compl_h The completion event to signal
*/
-static void request_mgr_complete(struct device *dev, void *dx_compl_h)
+static void request_mgr_complete(struct device *dev, void *dx_compl_h,
+ int dummy)
{
struct completion *this_compl = dx_compl_h;
return -EINPROGRESS;
}
+static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
+ struct cc_bl_item *bli)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+
+ spin_lock_bh(&mgr->bl_lock);
+ list_add_tail(&bli->list, &mgr->backlog);
+ ++mgr->bl_len;
+ spin_unlock_bh(&mgr->bl_lock);
+ tasklet_schedule(&mgr->comptask);
+}
+
+static void cc_proc_backlog(struct cc_drvdata *drvdata)
+{
+ struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct cc_bl_item *bli;
+ struct cc_crypto_req *creq;
+ struct crypto_async_request *req;
+ bool ivgen;
+ unsigned int total_len;
+ struct device *dev = drvdata_to_dev(drvdata);
+ int rc;
+
+ spin_lock(&mgr->bl_lock);
+
+ while (mgr->bl_len) {
+ bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ spin_unlock(&mgr->bl_lock);
+
+ creq = &bli->creq;
+ req = (struct crypto_async_request *)creq->user_arg;
+
+ /*
+ * Notify the request we're moving out of the backlog
+ * but only if we haven't done so already.
+ */
+ if (!bli->notif) {
+ req->complete(req, -EINPROGRESS);
+ bli->notif = true;
+ }
+
+ ivgen = !!creq->ivgen_dma_addr_len;
+ total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
+
+ spin_lock(&mgr->hw_lock);
+
+ rc = cc_queues_status(drvdata, mgr, total_len);
+ if (rc) {
+ /*
+ * There is still not room in the FIFO for
+ * this request. Bail out. We'll return here
+ * on the next completion irq.
+ */
+ spin_unlock(&mgr->hw_lock);
+ return;
+ }
+
+ rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
+ bli->len, false, ivgen);
+
+ spin_unlock(&mgr->hw_lock);
+
+ if (rc != -EINPROGRESS) {
+#if defined(CONFIG_PM)
+ cc_pm_put_suspend(dev);
+#endif
+ creq->user_cb(dev, req, rc);
+ }
+
+ /* Remove ourselves from the backlog list */
+ spin_lock(&mgr->bl_lock);
+ list_del(&bli->list);
+ --mgr->bl_len;
+ }
+
+ spin_unlock(&mgr->bl_lock);
+}
+
int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
struct cc_hw_desc *desc, unsigned int len,
struct crypto_async_request *req)
bool ivgen = !!cc_req->ivgen_dma_addr_len;
unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
struct device *dev = drvdata_to_dev(drvdata);
+ bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
+ gfp_t flags = cc_gfp_flags(req);
+ struct cc_bl_item *bli;
#if defined(CONFIG_PM)
rc = cc_pm_get(dev);
spin_lock_bh(&mgr->hw_lock);
rc = cc_queues_status(drvdata, mgr, total_len);
- if (!rc)
- rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
- ivgen);
+#ifdef CC_DEBUG_FORCE_BACKLOG
+ if (backlog_ok)
+ rc = -ENOSPC;
+#endif /* CC_DEBUG_FORCE_BACKLOG */
- spin_unlock_bh(&mgr->hw_lock);
+ if (rc == -ENOSPC && backlog_ok) {
+ spin_unlock_bh(&mgr->hw_lock);
+ bli = kmalloc(sizeof(*bli), flags);
+ if (!bli) {
#if defined(CONFIG_PM)
- if (rc != -EINPROGRESS)
- cc_pm_put_suspend(dev);
+ cc_pm_put_suspend(dev);
#endif
+ return -ENOMEM;
+ }
+
+ memcpy(&bli->creq, cc_req, sizeof(*cc_req));
+ memcpy(&bli->desc, desc, len * sizeof(*desc));
+ bli->len = len;
+ bli->notif = false;
+ cc_enqueue_backlog(drvdata, bli);
+ return -EBUSY;
+ }
+ if (!rc)
+ rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
+ ivgen);
+
+ spin_unlock_bh(&mgr->hw_lock);
return rc;
}
cc_req = &request_mgr_handle->req_queue[*tail];
if (cc_req->user_cb)
- cc_req->user_cb(dev, cc_req->user_arg);
+ cc_req->user_cb(dev, cc_req->user_arg, 0);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+
+ cc_proc_backlog(drvdata);
}
/*