]>
Commit | Line | Data |
---|---|---|
ec8f5d8f SV |
1 | /* |
2 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 and | |
6 | * only version 2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | */ | |
13 | ||
14 | #include <linux/err.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/types.h> | |
17 | #include <crypto/scatterwalk.h> | |
18 | #include <crypto/sha.h> | |
19 | ||
20 | #include "cipher.h" | |
21 | #include "common.h" | |
22 | #include "core.h" | |
23 | #include "regs-v5.h" | |
24 | #include "sha.h" | |
25 | ||
26 | #define QCE_SECTOR_SIZE 512 | |
27 | ||
28 | static inline u32 qce_read(struct qce_device *qce, u32 offset) | |
29 | { | |
30 | return readl(qce->base + offset); | |
31 | } | |
32 | ||
33 | static inline void qce_write(struct qce_device *qce, u32 offset, u32 val) | |
34 | { | |
35 | writel(val, qce->base + offset); | |
36 | } | |
37 | ||
38 | static inline void qce_write_array(struct qce_device *qce, u32 offset, | |
39 | const u32 *val, unsigned int len) | |
40 | { | |
41 | int i; | |
42 | ||
43 | for (i = 0; i < len; i++) | |
44 | qce_write(qce, offset + i * sizeof(u32), val[i]); | |
45 | } | |
46 | ||
47 | static inline void | |
48 | qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len) | |
49 | { | |
50 | int i; | |
51 | ||
52 | for (i = 0; i < len; i++) | |
53 | qce_write(qce, offset + i * sizeof(u32), 0); | |
54 | } | |
55 | ||
56 | static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size) | |
57 | { | |
58 | u32 cfg = 0; | |
59 | ||
60 | if (IS_AES(flags)) { | |
61 | if (aes_key_size == AES_KEYSIZE_128) | |
62 | cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT; | |
63 | else if (aes_key_size == AES_KEYSIZE_256) | |
64 | cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT; | |
65 | } | |
66 | ||
67 | if (IS_AES(flags)) | |
68 | cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT; | |
69 | else if (IS_DES(flags) || IS_3DES(flags)) | |
70 | cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT; | |
71 | ||
72 | if (IS_DES(flags)) | |
73 | cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT; | |
74 | ||
75 | if (IS_3DES(flags)) | |
76 | cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT; | |
77 | ||
78 | switch (flags & QCE_MODE_MASK) { | |
79 | case QCE_MODE_ECB: | |
80 | cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT; | |
81 | break; | |
82 | case QCE_MODE_CBC: | |
83 | cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT; | |
84 | break; | |
85 | case QCE_MODE_CTR: | |
86 | cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT; | |
87 | break; | |
88 | case QCE_MODE_XTS: | |
89 | cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT; | |
90 | break; | |
91 | case QCE_MODE_CCM: | |
92 | cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT; | |
93 | cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT; | |
94 | break; | |
95 | default: | |
96 | return ~0; | |
97 | } | |
98 | ||
99 | return cfg; | |
100 | } | |
101 | ||
102 | static u32 qce_auth_cfg(unsigned long flags, u32 key_size) | |
103 | { | |
104 | u32 cfg = 0; | |
105 | ||
106 | if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags))) | |
107 | cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT; | |
108 | else | |
109 | cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT; | |
110 | ||
111 | if (IS_CCM(flags) || IS_CMAC(flags)) { | |
112 | if (key_size == AES_KEYSIZE_128) | |
113 | cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT; | |
114 | else if (key_size == AES_KEYSIZE_256) | |
115 | cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT; | |
116 | } | |
117 | ||
118 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) | |
119 | cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT; | |
120 | else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) | |
121 | cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT; | |
122 | else if (IS_CMAC(flags)) | |
123 | cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT; | |
124 | ||
125 | if (IS_SHA1(flags) || IS_SHA256(flags)) | |
126 | cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT; | |
127 | else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) || | |
128 | IS_CBC(flags) || IS_CTR(flags)) | |
129 | cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT; | |
130 | else if (IS_AES(flags) && IS_CCM(flags)) | |
131 | cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT; | |
132 | else if (IS_AES(flags) && IS_CMAC(flags)) | |
133 | cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT; | |
134 | ||
135 | if (IS_SHA(flags) || IS_SHA_HMAC(flags)) | |
136 | cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT; | |
137 | ||
138 | if (IS_CCM(flags)) | |
139 | cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT; | |
140 | ||
141 | if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) || | |
142 | IS_CMAC(flags)) | |
143 | cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT); | |
144 | ||
145 | return cfg; | |
146 | } | |
147 | ||
148 | static u32 qce_config_reg(struct qce_device *qce, int little) | |
149 | { | |
150 | u32 beats = (qce->burst_size >> 3) - 1; | |
151 | u32 pipe_pair = qce->pipe_pair_id; | |
152 | u32 config; | |
153 | ||
154 | config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK; | |
155 | config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) | | |
156 | BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT); | |
157 | config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK; | |
158 | config &= ~HIGH_SPD_EN_N_SHIFT; | |
159 | ||
160 | if (little) | |
161 | config |= BIT(LITTLE_ENDIAN_MODE_SHIFT); | |
162 | ||
163 | return config; | |
164 | } | |
165 | ||
166 | void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len) | |
167 | { | |
168 | __be32 *d = dst; | |
169 | const u8 *s = src; | |
170 | unsigned int n; | |
171 | ||
172 | n = len / sizeof(u32); | |
173 | for (; n > 0; n--) { | |
174 | *d = cpu_to_be32p((const __u32 *) s); | |
175 | s += sizeof(__u32); | |
176 | d++; | |
177 | } | |
178 | } | |
179 | ||
180 | static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize) | |
181 | { | |
182 | u8 swap[QCE_AES_IV_LENGTH]; | |
183 | u32 i, j; | |
184 | ||
185 | if (ivsize > QCE_AES_IV_LENGTH) | |
186 | return; | |
187 | ||
188 | memset(swap, 0, QCE_AES_IV_LENGTH); | |
189 | ||
190 | for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1; | |
191 | i < QCE_AES_IV_LENGTH; i++, j--) | |
192 | swap[i] = src[j]; | |
193 | ||
194 | qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH); | |
195 | } | |
196 | ||
197 | static void qce_xtskey(struct qce_device *qce, const u8 *enckey, | |
198 | unsigned int enckeylen, unsigned int cryptlen) | |
199 | { | |
200 | u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; | |
201 | unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); | |
202 | unsigned int xtsdusize; | |
203 | ||
58a6535f SV |
204 | qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, |
205 | enckeylen / 2); | |
ec8f5d8f SV |
206 | qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); |
207 | ||
208 | /* xts du size 512B */ | |
209 | xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); | |
210 | qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); | |
211 | } | |
212 | ||
213 | static void qce_setup_config(struct qce_device *qce) | |
214 | { | |
215 | u32 config; | |
216 | ||
217 | /* get big endianness */ | |
218 | config = qce_config_reg(qce, 0); | |
219 | ||
220 | /* clear status */ | |
221 | qce_write(qce, REG_STATUS, 0); | |
222 | qce_write(qce, REG_CONFIG, config); | |
223 | } | |
224 | ||
225 | static inline void qce_crypto_go(struct qce_device *qce) | |
226 | { | |
227 | qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT)); | |
228 | } | |
229 | ||
230 | static int qce_setup_regs_ahash(struct crypto_async_request *async_req, | |
231 | u32 totallen, u32 offset) | |
232 | { | |
233 | struct ahash_request *req = ahash_request_cast(async_req); | |
234 | struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); | |
235 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); | |
236 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); | |
237 | struct qce_device *qce = tmpl->qce; | |
238 | unsigned int digestsize = crypto_ahash_digestsize(ahash); | |
239 | unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm); | |
240 | __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0}; | |
241 | __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0}; | |
242 | u32 auth_cfg = 0, config; | |
243 | unsigned int iv_words; | |
244 | ||
245 | /* if not the last, the size has to be on the block boundary */ | |
246 | if (!rctx->last_blk && req->nbytes % blocksize) | |
247 | return -EINVAL; | |
248 | ||
249 | qce_setup_config(qce); | |
250 | ||
251 | if (IS_CMAC(rctx->flags)) { | |
252 | qce_write(qce, REG_AUTH_SEG_CFG, 0); | |
253 | qce_write(qce, REG_ENCR_SEG_CFG, 0); | |
254 | qce_write(qce, REG_ENCR_SEG_SIZE, 0); | |
255 | qce_clear_array(qce, REG_AUTH_IV0, 16); | |
256 | qce_clear_array(qce, REG_AUTH_KEY0, 16); | |
257 | qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); | |
258 | ||
259 | auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen); | |
260 | } | |
261 | ||
262 | if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) { | |
263 | u32 authkey_words = rctx->authklen / sizeof(u32); | |
264 | ||
265 | qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen); | |
58a6535f SV |
266 | qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey, |
267 | authkey_words); | |
ec8f5d8f SV |
268 | } |
269 | ||
270 | if (IS_CMAC(rctx->flags)) | |
271 | goto go_proc; | |
272 | ||
273 | if (rctx->first_blk) | |
274 | memcpy(auth, rctx->digest, digestsize); | |
275 | else | |
276 | qce_cpu_to_be32p_array(auth, rctx->digest, digestsize); | |
277 | ||
278 | iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8; | |
58a6535f | 279 | qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words); |
ec8f5d8f SV |
280 | |
281 | if (rctx->first_blk) | |
282 | qce_clear_array(qce, REG_AUTH_BYTECNT0, 4); | |
283 | else | |
58a6535f SV |
284 | qce_write_array(qce, REG_AUTH_BYTECNT0, |
285 | (u32 *)rctx->byte_count, 2); | |
ec8f5d8f SV |
286 | |
287 | auth_cfg = qce_auth_cfg(rctx->flags, 0); | |
288 | ||
289 | if (rctx->last_blk) | |
290 | auth_cfg |= BIT(AUTH_LAST_SHIFT); | |
291 | else | |
292 | auth_cfg &= ~BIT(AUTH_LAST_SHIFT); | |
293 | ||
294 | if (rctx->first_blk) | |
295 | auth_cfg |= BIT(AUTH_FIRST_SHIFT); | |
296 | else | |
297 | auth_cfg &= ~BIT(AUTH_FIRST_SHIFT); | |
298 | ||
299 | go_proc: | |
300 | qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); | |
301 | qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes); | |
302 | qce_write(qce, REG_AUTH_SEG_START, 0); | |
303 | qce_write(qce, REG_ENCR_SEG_CFG, 0); | |
304 | qce_write(qce, REG_SEG_SIZE, req->nbytes); | |
305 | ||
306 | /* get little endianness */ | |
307 | config = qce_config_reg(qce, 1); | |
308 | qce_write(qce, REG_CONFIG, config); | |
309 | ||
310 | qce_crypto_go(qce); | |
311 | ||
312 | return 0; | |
313 | } | |
314 | ||
315 | static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req, | |
316 | u32 totallen, u32 offset) | |
317 | { | |
318 | struct ablkcipher_request *req = ablkcipher_request_cast(async_req); | |
319 | struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); | |
320 | struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm); | |
321 | struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); | |
322 | struct qce_device *qce = tmpl->qce; | |
323 | __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0}; | |
324 | __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0}; | |
325 | unsigned int enckey_words, enciv_words; | |
326 | unsigned int keylen; | |
327 | u32 encr_cfg = 0, auth_cfg = 0, config; | |
328 | unsigned int ivsize = rctx->ivsize; | |
329 | unsigned long flags = rctx->flags; | |
330 | ||
331 | qce_setup_config(qce); | |
332 | ||
333 | if (IS_XTS(flags)) | |
334 | keylen = ctx->enc_keylen / 2; | |
335 | else | |
336 | keylen = ctx->enc_keylen; | |
337 | ||
338 | qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen); | |
339 | enckey_words = keylen / sizeof(u32); | |
340 | ||
341 | qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg); | |
342 | ||
343 | encr_cfg = qce_encr_cfg(flags, keylen); | |
344 | ||
345 | if (IS_DES(flags)) { | |
346 | enciv_words = 2; | |
347 | enckey_words = 2; | |
348 | } else if (IS_3DES(flags)) { | |
349 | enciv_words = 2; | |
350 | enckey_words = 6; | |
351 | } else if (IS_AES(flags)) { | |
352 | if (IS_XTS(flags)) | |
353 | qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen, | |
354 | rctx->cryptlen); | |
355 | enciv_words = 4; | |
356 | } else { | |
357 | return -EINVAL; | |
358 | } | |
359 | ||
58a6535f | 360 | qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words); |
ec8f5d8f SV |
361 | |
362 | if (!IS_ECB(flags)) { | |
363 | if (IS_XTS(flags)) | |
364 | qce_xts_swapiv(enciv, rctx->iv, ivsize); | |
365 | else | |
366 | qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize); | |
367 | ||
58a6535f | 368 | qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words); |
ec8f5d8f SV |
369 | } |
370 | ||
371 | if (IS_ENCRYPT(flags)) | |
372 | encr_cfg |= BIT(ENCODE_SHIFT); | |
373 | ||
374 | qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); | |
375 | qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); | |
376 | qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); | |
377 | ||
378 | if (IS_CTR(flags)) { | |
379 | qce_write(qce, REG_CNTR_MASK, ~0); | |
380 | qce_write(qce, REG_CNTR_MASK0, ~0); | |
381 | qce_write(qce, REG_CNTR_MASK1, ~0); | |
382 | qce_write(qce, REG_CNTR_MASK2, ~0); | |
383 | } | |
384 | ||
385 | qce_write(qce, REG_SEG_SIZE, totallen); | |
386 | ||
387 | /* get little endianness */ | |
388 | config = qce_config_reg(qce, 1); | |
389 | qce_write(qce, REG_CONFIG, config); | |
390 | ||
391 | qce_crypto_go(qce); | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, | |
397 | u32 offset) | |
398 | { | |
399 | switch (type) { | |
400 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
401 | return qce_setup_regs_ablkcipher(async_req, totallen, offset); | |
402 | case CRYPTO_ALG_TYPE_AHASH: | |
403 | return qce_setup_regs_ahash(async_req, totallen, offset); | |
404 | default: | |
405 | return -EINVAL; | |
406 | } | |
407 | } | |
408 | ||
409 | #define STATUS_ERRORS \ | |
410 | (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT)) | |
411 | ||
412 | int qce_check_status(struct qce_device *qce, u32 *status) | |
413 | { | |
414 | int ret = 0; | |
415 | ||
416 | *status = qce_read(qce, REG_STATUS); | |
417 | ||
418 | /* | |
419 | * Don't use result dump status. The operation may not be complete. | |
420 | * Instead, use the status we just read from device. In case, we need to | |
421 | * use result_status from result dump the result_status needs to be byte | |
422 | * swapped, since we set the device to little endian. | |
423 | */ | |
424 | if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT))) | |
425 | ret = -ENXIO; | |
426 | ||
427 | return ret; | |
428 | } | |
429 | ||
430 | void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step) | |
431 | { | |
432 | u32 val; | |
433 | ||
434 | val = qce_read(qce, REG_VERSION); | |
435 | *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT; | |
436 | *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT; | |
437 | *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT; | |
438 | } |