]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
crypto: inside-secure - avoid unmapping DMA memory that was not mapped
authorAntoine Tenart <antoine.tenart@free-electrons.com>
Tue, 26 Dec 2017 16:21:16 +0000 (17:21 +0100)
committerSeth Forshee <seth.forshee@canonical.com>
Sat, 3 Feb 2018 17:40:40 +0000 (18:40 +0100)
BugLink: http://bugs.launchpad.net/bugs/1747169
commit c957f8b3e2e54b29f53ef69decc87bbc858c9b58 upstream.

This patch adds a parameter in the SafeXcel ahash request structure to
keep track of the number of SG entries mapped. This allows not to call
dma_unmap_sg() when dma_map_sg() wasn't called in the first place. This
also removes a warning when the debugging of the DMA-API is enabled in
the kernel configuration: "DMA-API: device driver tries to free DMA
memory it has not allocated".

Fixes: 1b44c5a60c13 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
drivers/crypto/inside-secure/safexcel_hash.c

index c6f229fb9d0f46b64639923eb1db8ac40816b42f..da9d040bccc24537429742b43ffbeb9f34d870de 100644 (file)
@@ -34,6 +34,8 @@ struct safexcel_ahash_req {
        bool hmac;
        bool needs_inv;
 
+       int nents;
+
        u8 state_sz;    /* expected sate size, only set once */
        u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
@@ -152,8 +154,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
                memcpy(areq->result, sreq->state,
                       crypto_ahash_digestsize(ahash));
 
-       dma_unmap_sg(priv->dev, areq->src,
-                    sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+       if (sreq->nents) {
+               dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+               sreq->nents = 0;
+       }
 
        safexcel_free_context(priv, async, sreq->state_sz);
 
@@ -178,7 +182,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
        struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
        struct safexcel_result_desc *rdesc;
        struct scatterlist *sg;
-       int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+       int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
 
        queued = len = req->len - req->processed;
        if (queued < crypto_ahash_blocksize(ahash))
@@ -248,15 +252,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
        }
 
        /* Now handle the current ahash request buffer(s) */
-       nents = dma_map_sg(priv->dev, areq->src,
-                      sg_nents_for_len(areq->src, areq->nbytes),
-                      DMA_TO_DEVICE);
-       if (!nents) {
+       req->nents = dma_map_sg(priv->dev, areq->src,
+                               sg_nents_for_len(areq->src, areq->nbytes),
+                               DMA_TO_DEVICE);
+       if (!req->nents) {
                ret = -ENOMEM;
                goto cdesc_rollback;
        }
 
-       for_each_sg(areq->src, sg, nents, i) {
+       for_each_sg(areq->src, sg, req->nents, i) {
                int sglen = sg_dma_len(sg);
 
                /* Do not overflow the request */