]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/crypto/nx/nx-sha256.c
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-focal-kernel.git] / drivers / crypto / nx / nx-sha256.c
1 /**
2 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/module.h>
25 #include <asm/vio.h>
26 #include <asm/byteorder.h>
27
28 #include "nx_csbcpb.h"
29 #include "nx.h"
30
31
32 static int nx_sha256_init(struct shash_desc *desc)
33 {
34 struct sha256_state *sctx = shash_desc_ctx(desc);
35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
36 struct nx_sg *out_sg;
37 int len;
38 u32 max_sg_len;
39
40 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
41
42 memset(sctx, 0, sizeof *sctx);
43
44 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
45
46 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
47
48 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
49 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
50 max_sg_len = min_t(u64, max_sg_len,
51 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
52
53 len = SHA256_DIGEST_SIZE;
54 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
55 &len, max_sg_len);
56 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
57
58 if (len != SHA256_DIGEST_SIZE)
59 return -EINVAL;
60
61 sctx->state[0] = __cpu_to_be32(SHA256_H0);
62 sctx->state[1] = __cpu_to_be32(SHA256_H1);
63 sctx->state[2] = __cpu_to_be32(SHA256_H2);
64 sctx->state[3] = __cpu_to_be32(SHA256_H3);
65 sctx->state[4] = __cpu_to_be32(SHA256_H4);
66 sctx->state[5] = __cpu_to_be32(SHA256_H5);
67 sctx->state[6] = __cpu_to_be32(SHA256_H6);
68 sctx->state[7] = __cpu_to_be32(SHA256_H7);
69 sctx->count = 0;
70
71 return 0;
72 }
73
74 static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
75 unsigned int len)
76 {
77 struct sha256_state *sctx = shash_desc_ctx(desc);
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
79 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
80 struct nx_sg *in_sg;
81 u64 to_process = 0, leftover, total;
82 unsigned long irq_flags;
83 int rc = 0;
84 int data_len;
85 u32 max_sg_len;
86 u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
87
88 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
89
90 /* 2 cases for total data len:
91 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
92 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
93 */
94 total = (sctx->count % SHA256_BLOCK_SIZE) + len;
95 if (total < SHA256_BLOCK_SIZE) {
96 memcpy(sctx->buf + buf_len, data, len);
97 sctx->count += len;
98 goto out;
99 }
100
101 memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
102 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
103 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
104
105 in_sg = nx_ctx->in_sg;
106 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
107 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
108 max_sg_len = min_t(u64, max_sg_len,
109 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
110
111 do {
112 /*
113 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
114 * this update. This value is also restricted by the sg list
115 * limits.
116 */
117 to_process = total - to_process;
118 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
119
120 if (buf_len) {
121 data_len = buf_len;
122 in_sg = nx_build_sg_list(nx_ctx->in_sg,
123 (u8 *) sctx->buf,
124 &data_len,
125 max_sg_len);
126
127 if (data_len != buf_len) {
128 rc = -EINVAL;
129 goto out;
130 }
131 }
132
133 data_len = to_process - buf_len;
134 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
135 &data_len, max_sg_len);
136
137 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
138
139 to_process = (data_len + buf_len);
140 leftover = total - to_process;
141
142 /*
143 * we've hit the nx chip previously and we're updating
144 * again, so copy over the partial digest.
145 */
146 memcpy(csbcpb->cpb.sha256.input_partial_digest,
147 csbcpb->cpb.sha256.message_digest,
148 SHA256_DIGEST_SIZE);
149
150 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
151 rc = -EINVAL;
152 goto out;
153 }
154
155 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
156 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
157 if (rc)
158 goto out;
159
160 atomic_inc(&(nx_ctx->stats->sha256_ops));
161
162 total -= to_process;
163 data += to_process - buf_len;
164 buf_len = 0;
165
166 } while (leftover >= SHA256_BLOCK_SIZE);
167
168 /* copy the leftover back into the state struct */
169 if (leftover)
170 memcpy(sctx->buf, data, leftover);
171
172 sctx->count += len;
173 memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
174 out:
175 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
176 return rc;
177 }
178
179 static int nx_sha256_final(struct shash_desc *desc, u8 *out)
180 {
181 struct sha256_state *sctx = shash_desc_ctx(desc);
182 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
183 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
184 struct nx_sg *in_sg, *out_sg;
185 unsigned long irq_flags;
186 u32 max_sg_len;
187 int rc = 0;
188 int len;
189
190 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
191
192 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
193 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
194 max_sg_len = min_t(u64, max_sg_len,
195 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
196
197 /* final is represented by continuing the operation and indicating that
198 * this is not an intermediate operation */
199 if (sctx->count >= SHA256_BLOCK_SIZE) {
200 /* we've hit the nx chip previously, now we're finalizing,
201 * so copy over the partial digest */
202 memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
203 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
204 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
205 } else {
206 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
207 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
208 }
209
210 csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
211
212 len = sctx->count & (SHA256_BLOCK_SIZE - 1);
213 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
214 &len, max_sg_len);
215
216 if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
217 rc = -EINVAL;
218 goto out;
219 }
220
221 len = SHA256_DIGEST_SIZE;
222 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
223
224 if (len != SHA256_DIGEST_SIZE) {
225 rc = -EINVAL;
226 goto out;
227 }
228
229 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
230 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
231 if (!nx_ctx->op.outlen) {
232 rc = -EINVAL;
233 goto out;
234 }
235
236 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
237 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
238 if (rc)
239 goto out;
240
241 atomic_inc(&(nx_ctx->stats->sha256_ops));
242
243 atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
244 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
245 out:
246 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
247 return rc;
248 }
249
250 static int nx_sha256_export(struct shash_desc *desc, void *out)
251 {
252 struct sha256_state *sctx = shash_desc_ctx(desc);
253
254 memcpy(out, sctx, sizeof(*sctx));
255
256 return 0;
257 }
258
259 static int nx_sha256_import(struct shash_desc *desc, const void *in)
260 {
261 struct sha256_state *sctx = shash_desc_ctx(desc);
262
263 memcpy(sctx, in, sizeof(*sctx));
264
265 return 0;
266 }
267
268 struct shash_alg nx_shash_sha256_alg = {
269 .digestsize = SHA256_DIGEST_SIZE,
270 .init = nx_sha256_init,
271 .update = nx_sha256_update,
272 .final = nx_sha256_final,
273 .export = nx_sha256_export,
274 .import = nx_sha256_import,
275 .descsize = sizeof(struct sha256_state),
276 .statesize = sizeof(struct sha256_state),
277 .base = {
278 .cra_name = "sha256",
279 .cra_driver_name = "sha256-nx",
280 .cra_priority = 300,
281 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
282 .cra_blocksize = SHA256_BLOCK_SIZE,
283 .cra_module = THIS_MODULE,
284 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
285 .cra_init = nx_crypto_ctx_sha_init,
286 .cra_exit = nx_crypto_ctx_exit,
287 }
288 };