]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/crypto/caam/caamhash.c
Merge tag 'pm-turbostat-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / crypto / caam / caamhash.c
index e58639ea53b11e97bb06c91fd117ca8b1f30983a..da4f94eab3da1b6eb3f84423bb638002ef304add 100644 (file)
@@ -109,7 +109,6 @@ struct caam_hash_ctx {
        dma_addr_t sh_desc_digest_dma;
        struct device *jrdev;
        u8 key[CAAM_MAX_HASH_KEY_SIZE];
-       dma_addr_t key_dma;
        int ctx_len;
        struct alginfo adata;
 };
@@ -138,6 +137,31 @@ struct caam_export_state {
        int (*finup)(struct ahash_request *req);
 };
 
+static inline void switch_buf(struct caam_hash_state *state)
+{
+       state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+       return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+       return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+       return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+       return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
 /* Common job descriptor seq in/out ptr routines */
 
 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -149,6 +173,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
                                        ctx_len, DMA_FROM_DEVICE);
        if (dma_mapping_error(jrdev, state->ctx_dma)) {
                dev_err(jrdev, "unable to map ctx\n");
+               state->ctx_dma = 0;
                return -ENOMEM;
        }
 
@@ -169,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
        return dst_dma;
 }
 
-/* Map current buffer in state and put it in link table */
-static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
-                                           struct sec4_sg_entry *sec4_sg,
-                                           u8 *buf, int buflen)
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_sec4_sg(struct device *jrdev,
+                                    struct sec4_sg_entry *sec4_sg,
+                                    struct caam_hash_state *state)
 {
-       dma_addr_t buf_dma;
+       int buflen = *current_buflen(state);
 
-       buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
-       dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
+       if (!buflen)
+               return 0;
 
-       return buf_dma;
-}
+       state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
+                                       DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, state->buf_dma)) {
+               dev_err(jrdev, "unable to map buf\n");
+               state->buf_dma = 0;
+               return -ENOMEM;
+       }
 
-/*
- * Only put buffer in link table if it contains data, which is possible,
- * since a buffer has previously been used, and needs to be unmapped,
- */
-static inline dma_addr_t
-try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
-                      u8 *buf, dma_addr_t buf_dma, int buflen,
-                      int last_buflen)
-{
-       if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
-               dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
-       if (buflen)
-               buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
-       else
-               buf_dma = 0;
-
-       return buf_dma;
+       dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
+
+       return 0;
 }
 
 /* Map state->caam_ctx, and add it to link table */
@@ -209,6 +225,7 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
        state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
        if (dma_mapping_error(jrdev, state->ctx_dma)) {
                dev_err(jrdev, "unable to map ctx\n");
+               state->ctx_dma = 0;
                return -ENOMEM;
        }
 
@@ -277,12 +294,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_update shared descriptor */
        desc = ctx->sh_desc_update;
        ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
-       ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash update shdesc@"__stringify(__LINE__)": ",
@@ -292,13 +305,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_update_first shared descriptor */
        desc = ctx->sh_desc_update_first;
        ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
-       ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
-                                                      desc_bytes(desc),
-                                                      DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -308,12 +316,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_final shared descriptor */
        desc = ctx->sh_desc_fin;
        ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
-       ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -323,13 +327,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_digest shared descriptor */
        desc = ctx->sh_desc_digest;
        ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
-       ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
-                                                desc_bytes(desc),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -420,7 +419,6 @@ static int ahash_setkey(struct crypto_ahash *ahash,
                        const u8 *key, unsigned int keylen)
 {
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-       struct device *jrdev = ctx->jrdev;
        int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
        int digestsize = crypto_ahash_digestsize(ahash);
        int ret;
@@ -448,28 +446,14 @@ static int ahash_setkey(struct crypto_ahash *ahash,
        if (ret)
                goto bad_free_key;
 
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
-                                     DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               ret = -ENOMEM;
-               goto error_free_key;
-       }
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
                       ctx->adata.keylen_pad, 1);
 #endif
 
-       ret = ahash_set_sh_desc(ahash);
-       if (ret) {
-               dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
-                                DMA_TO_DEVICE);
-       }
-
- error_free_key:
        kfree(hashed_key);
-       return ret;
+       return ahash_set_sh_desc(ahash);
  bad_free_key:
        kfree(hashed_key);
        crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -498,6 +482,8 @@ static inline void ahash_unmap(struct device *dev,
                        struct ahash_edesc *edesc,
                        struct ahash_request *req, int dst_len)
 {
+       struct caam_hash_state *state = ahash_request_ctx(req);
+
        if (edesc->src_nents)
                dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
        if (edesc->dst_dma)
@@ -506,6 +492,12 @@ static inline void ahash_unmap(struct device *dev,
        if (edesc->sec4_sg_bytes)
                dma_unmap_single(dev, edesc->sec4_sg_dma,
                                 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
+
+       if (state->buf_dma) {
+               dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+                                DMA_TO_DEVICE);
+               state->buf_dma = 0;
+       }
 }
 
 static inline void ahash_unmap_ctx(struct device *dev,
@@ -516,8 +508,10 @@ static inline void ahash_unmap_ctx(struct device *dev,
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
 
-       if (state->ctx_dma)
+       if (state->ctx_dma) {
                dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+               state->ctx_dma = 0;
+       }
        ahash_unmap(dev, edesc, req, dst_len);
 }
 
@@ -562,8 +556,8 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
        struct ahash_edesc *edesc;
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-#ifdef DEBUG
        struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
        int digestsize = crypto_ahash_digestsize(ahash);
 
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
@@ -574,6 +568,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
                caam_jr_strstatus(jrdev, err);
 
        ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+       switch_buf(state);
        kfree(edesc);
 
 #ifdef DEBUG
@@ -630,8 +625,8 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
        struct ahash_edesc *edesc;
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
-#ifdef DEBUG
        struct caam_hash_state *state = ahash_request_ctx(req);
+#ifdef DEBUG
        int digestsize = crypto_ahash_digestsize(ahash);
 
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
@@ -642,6 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
                caam_jr_strstatus(jrdev, err);
 
        ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+       switch_buf(state);
        kfree(edesc);
 
 #ifdef DEBUG
@@ -725,11 +721,10 @@ static int ahash_update_ctx(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
-       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
-       int *next_buflen = state->current_buf ? &state->buflen_0 :
-                          &state->buflen_1, last_buflen;
+       u8 *buf = current_buf(state);
+       int *buflen = current_buflen(state);
+       u8 *next_buf = alt_buf(state);
+       int *next_buflen = alt_buflen(state), last_buflen;
        int in_len = *buflen + req->nbytes, to_hash;
        u32 *desc;
        int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
@@ -783,10 +778,9 @@ static int ahash_update_ctx(struct ahash_request *req)
                if (ret)
                        goto unmap_ctx;
 
-               state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
-                                                       edesc->sec4_sg + 1,
-                                                       buf, state->buf_dma,
-                                                       *buflen, last_buflen);
+               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+               if (ret)
+                       goto unmap_ctx;
 
                if (mapped_nents) {
                        sg_to_sec4_sg_last(req->src, mapped_nents,
@@ -801,8 +795,6 @@ static int ahash_update_ctx(struct ahash_request *req)
                                cpu_to_caam32(SEC4_SG_LEN_FIN);
                }
 
-               state->current_buf = !state->current_buf;
-
                desc = edesc->hw_desc;
 
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -859,10 +851,7 @@ static int ahash_final_ctx(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
-       int last_buflen = state->current_buf ? state->buflen_0 :
-                         state->buflen_1;
+       int buflen = *current_buflen(state);
        u32 *desc;
        int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
@@ -889,9 +878,10 @@ static int ahash_final_ctx(struct ahash_request *req)
        if (ret)
                goto unmap_ctx;
 
-       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
-                                               buf, state->buf_dma, buflen,
-                                               last_buflen);
+       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+       if (ret)
+               goto unmap_ctx;
+
        (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
                cpu_to_caam32(SEC4_SG_LEN_FIN);
 
@@ -938,10 +928,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
-       int last_buflen = state->current_buf ? state->buflen_0 :
-                         state->buflen_1;
+       int buflen = *current_buflen(state);
        u32 *desc;
        int sec4_sg_src_index;
        int src_nents, mapped_nents;
@@ -986,9 +973,9 @@ static int ahash_finup_ctx(struct ahash_request *req)
        if (ret)
                goto unmap_ctx;
 
-       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
-                                               buf, state->buf_dma, buflen,
-                                               last_buflen);
+       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
+       if (ret)
+               goto unmap_ctx;
 
        ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
                                  sec4_sg_src_index, ctx->ctx_len + buflen,
@@ -1024,6 +1011,7 @@ static int ahash_digest(struct ahash_request *req)
 {
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+       struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
@@ -1033,6 +1021,8 @@ static int ahash_digest(struct ahash_request *req)
        struct ahash_edesc *edesc;
        int ret;
 
+       state->buf_dma = 0;
+
        src_nents = sg_nents_for_len(req->src, req->nbytes);
        if (src_nents < 0) {
                dev_err(jrdev, "Invalid number of src SG.\n");
@@ -1105,8 +1095,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
+       u8 *buf = current_buf(state);
+       int buflen = *current_buflen(state);
        u32 *desc;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
@@ -1166,11 +1156,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
-       u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
-       int *next_buflen = state->current_buf ? &state->buflen_0 :
-                          &state->buflen_1;
+       u8 *buf = current_buf(state);
+       int *buflen = current_buflen(state);
+       u8 *next_buf = alt_buf(state);
+       int *next_buflen = alt_buflen(state);
        int in_len = *buflen + req->nbytes, to_hash;
        int sec4_sg_bytes, src_nents, mapped_nents;
        struct ahash_edesc *edesc;
@@ -1219,8 +1208,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->dst_dma = 0;
 
-               state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
-                                                   buf, *buflen);
+               ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+               if (ret)
+                       goto unmap_ctx;
+
                sg_to_sec4_sg_last(req->src, mapped_nents,
                                   edesc->sec4_sg + 1, 0);
 
@@ -1230,8 +1221,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                                                 *next_buflen, 0);
                }
 
-               state->current_buf = !state->current_buf;
-
                desc = edesc->hw_desc;
 
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1293,10 +1282,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
-       int last_buflen = state->current_buf ? state->buflen_0 :
-                         state->buflen_1;
+       int buflen = *current_buflen(state);
        u32 *desc;
        int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
        int digestsize = crypto_ahash_digestsize(ahash);
@@ -1338,9 +1324,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        edesc->src_nents = src_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
 
-       state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
-                                               state->buf_dma, buflen,
-                                               last_buflen);
+       ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
+       if (ret)
+               goto unmap;
 
        ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
                                  req->nbytes);
@@ -1386,9 +1372,8 @@ static int ahash_update_first(struct ahash_request *req)
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
                       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-       u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
-       int *next_buflen = state->current_buf ?
-               &state->buflen_1 : &state->buflen_0;
+       u8 *next_buf = alt_buf(state);
+       int *next_buflen = alt_buflen(state);
        int to_hash;
        u32 *desc;
        int src_nents, mapped_nents;
@@ -1470,6 +1455,7 @@ static int ahash_update_first(struct ahash_request *req)
                state->final = ahash_final_no_ctx;
                scatterwalk_map_and_copy(next_buf, req->src, 0,
                                         req->nbytes, 0);
+               switch_buf(state);
        }
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
@@ -1497,6 +1483,7 @@ static int ahash_init(struct ahash_request *req)
        state->finup = ahash_finup_first;
        state->final = ahash_final_no_ctx;
 
+       state->ctx_dma = 0;
        state->current_buf = 0;
        state->buf_dma = 0;
        state->buflen_0 = 0;
@@ -1732,6 +1719,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
                                         HASH_MSG_LEN + SHA256_DIGEST_SIZE,
                                         HASH_MSG_LEN + 64,
                                         HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+       dma_addr_t dma_addr;
 
        /*
         * Get a Job ring from Job Ring driver to ensure in-order
@@ -1742,6 +1730,26 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
                pr_err("Job Ring Device allocation for transform failed\n");
                return PTR_ERR(ctx->jrdev);
        }
+
+       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+                                       offsetof(struct caam_hash_ctx,
+                                                sh_desc_update_dma),
+                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+               dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+               caam_jr_free(ctx->jrdev);
+               return -ENOMEM;
+       }
+
+       ctx->sh_desc_update_dma = dma_addr;
+       ctx->sh_desc_update_first_dma = dma_addr +
+                                       offsetof(struct caam_hash_ctx,
+                                                sh_desc_update_first);
+       ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
+                                                  sh_desc_fin);
+       ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
+                                                     sh_desc_digest);
+
        /* copy descriptor header template value */
        ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
 
@@ -1758,26 +1766,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
 {
        struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (ctx->sh_desc_update_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
-                                desc_bytes(ctx->sh_desc_update),
-                                DMA_TO_DEVICE);
-       if (ctx->sh_desc_update_first_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
-                                desc_bytes(ctx->sh_desc_update_first),
-                                DMA_TO_DEVICE);
-       if (ctx->sh_desc_fin_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
-                                desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
-       if (ctx->sh_desc_digest_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
-                                desc_bytes(ctx->sh_desc_digest),
-                                DMA_TO_DEVICE);
-
+       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+                              offsetof(struct caam_hash_ctx,
+                                       sh_desc_update_dma),
+                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
        caam_jr_free(ctx->jrdev);
 }