]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/staging/ccree/ssi_cipher.c
staging: ccree: save ciphertext for CTS IV
[mirror_ubuntu-artful-kernel.git] / drivers / staging / ccree / ssi_cipher.c
CommitLineData
302ef8eb
GBY
1/*
2 * Copyright (C) 2012-2017 ARM Limited or its affiliates.
c8f17865 3 *
302ef8eb
GBY
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
c8f17865 7 *
302ef8eb
GBY
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
c8f17865 12 *
302ef8eb
GBY
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/semaphore.h>
21#include <crypto/algapi.h>
22#include <crypto/internal/skcipher.h>
23#include <crypto/aes.h>
24#include <crypto/ctr.h>
25#include <crypto/des.h>
0ddc27d4 26#include <crypto/scatterwalk.h>
302ef8eb
GBY
27
28#include "ssi_config.h"
29#include "ssi_driver.h"
30#include "cc_lli_defs.h"
31#include "ssi_buffer_mgr.h"
32#include "ssi_cipher.h"
33#include "ssi_request_mgr.h"
34#include "ssi_sysfs.h"
16609980 35#include "ssi_fips_local.h"
302ef8eb
GBY
36
37#define MAX_ABLKCIPHER_SEQ_LEN 6
38
39#define template_ablkcipher template_u.ablkcipher
302ef8eb
GBY
40
41#define SSI_MIN_AES_XTS_SIZE 0x10
42#define SSI_MAX_AES_XTS_SIZE 0x2000
43struct ssi_blkcipher_handle {
44 struct list_head blkcipher_alg_list;
45};
46
47struct cc_user_key_info {
a1ab41eb 48 u8 *key;
302ef8eb
GBY
49 dma_addr_t key_dma_addr;
50};
492ddcbb 51
302ef8eb 52struct cc_hw_key_info {
8ca57f5c
GBY
53 enum cc_hw_crypto_key key1_slot;
54 enum cc_hw_crypto_key key2_slot;
302ef8eb
GBY
55};
56
57struct ssi_ablkcipher_ctx {
58 struct ssi_drvdata *drvdata;
59 int keylen;
60 int key_round_number;
61 int cipher_mode;
62 int flow_mode;
63 unsigned int flags;
64 struct blkcipher_req_ctx *sync_ctx;
65 struct cc_user_key_info user;
66 struct cc_hw_key_info hw;
67 struct crypto_shash *shash_tfm;
68};
69
70static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
71
a1ab41eb 72static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
e7258b6a 73 switch (ctx_p->flow_mode) {
302ef8eb 74 case S_DIN_to_AES:
e7258b6a 75 switch (size) {
302ef8eb
GBY
76 case CC_AES_128_BIT_KEY_SIZE:
77 case CC_AES_192_BIT_KEY_SIZE:
78 if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
79 (ctx_p->cipher_mode != DRV_CIPHER_ESSIV) &&
80 (ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)))
81 return 0;
82 break;
83 case CC_AES_256_BIT_KEY_SIZE:
84 return 0;
e7258b6a
GBY
85 case (CC_AES_192_BIT_KEY_SIZE * 2):
86 case (CC_AES_256_BIT_KEY_SIZE * 2):
302ef8eb
GBY
87 if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
88 (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
89 (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
90 return 0;
91 break;
92 default:
93 break;
94 }
95 case S_DIN_to_DES:
96 if (likely(size == DES3_EDE_KEY_SIZE ||
97 size == DES_KEY_SIZE))
98 return 0;
99 break;
100#if SSI_CC_HAS_MULTI2
101 case S_DIN_to_MULTI2:
102 if (likely(size == CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE))
103 return 0;
104 break;
105#endif
106 default:
107 break;
302ef8eb
GBY
108 }
109 return -EINVAL;
110}
111
302ef8eb 112static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
e7258b6a 113 switch (ctx_p->flow_mode) {
302ef8eb 114 case S_DIN_to_AES:
e7258b6a 115 switch (ctx_p->cipher_mode) {
302ef8eb
GBY
116 case DRV_CIPHER_XTS:
117 if ((size >= SSI_MIN_AES_XTS_SIZE) &&
c8f17865 118 (size <= SSI_MAX_AES_XTS_SIZE) &&
302ef8eb
GBY
119 IS_ALIGNED(size, AES_BLOCK_SIZE))
120 return 0;
121 break;
122 case DRV_CIPHER_CBC_CTS:
123 if (likely(size >= AES_BLOCK_SIZE))
124 return 0;
125 break;
126 case DRV_CIPHER_OFB:
127 case DRV_CIPHER_CTR:
128 return 0;
129 case DRV_CIPHER_ECB:
130 case DRV_CIPHER_CBC:
131 case DRV_CIPHER_ESSIV:
132 case DRV_CIPHER_BITLOCKER:
133 if (likely(IS_ALIGNED(size, AES_BLOCK_SIZE)))
134 return 0;
135 break;
136 default:
137 break;
138 }
139 break;
140 case S_DIN_to_DES:
141 if (likely(IS_ALIGNED(size, DES_BLOCK_SIZE)))
142 return 0;
143 break;
144#if SSI_CC_HAS_MULTI2
145 case S_DIN_to_MULTI2:
146 switch (ctx_p->cipher_mode) {
147 case DRV_MULTI2_CBC:
148 if (likely(IS_ALIGNED(size, CC_MULTI2_BLOCK_SIZE)))
149 return 0;
150 break;
151 case DRV_MULTI2_OFB:
152 return 0;
153 default:
154 break;
155 }
156 break;
157#endif /*SSI_CC_HAS_MULTI2*/
158 default:
159 break;
302ef8eb
GBY
160 }
161 return -EINVAL;
162}
163
164static unsigned int get_max_keysize(struct crypto_tfm *tfm)
165{
166 struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
167
a8f6cbaa 168 if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER)
302ef8eb 169 return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
302ef8eb 170
a8f6cbaa 171 if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER)
302ef8eb 172 return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
302ef8eb
GBY
173
174 return 0;
175}
176
177static int ssi_blkcipher_init(struct crypto_tfm *tfm)
178{
179 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
180 struct crypto_alg *alg = tfm->__crt_alg;
181 struct ssi_crypto_alg *ssi_alg =
182 container_of(alg, struct ssi_crypto_alg, crypto_alg);
183 struct device *dev;
184 int rc = 0;
185 unsigned int max_key_buf_size = get_max_keysize(tfm);
186
c8f17865 187 SSI_LOG_DEBUG("Initializing context @%p for %s\n", ctx_p,
302ef8eb
GBY
188 crypto_tfm_alg_name(tfm));
189
16609980 190 CHECK_AND_RETURN_UPON_FIPS_ERROR();
302ef8eb
GBY
191 ctx_p->cipher_mode = ssi_alg->cipher_mode;
192 ctx_p->flow_mode = ssi_alg->flow_mode;
193 ctx_p->drvdata = ssi_alg->drvdata;
194 dev = &ctx_p->drvdata->plat_dev->dev;
195
196 /* Allocate key buffer, cache line aligned */
e7258b6a 197 ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
302ef8eb
GBY
198 if (!ctx_p->user.key) {
199 SSI_LOG_ERR("Allocating key buffer in context failed\n");
200 rc = -ENOMEM;
201 }
202 SSI_LOG_DEBUG("Allocated key buffer in context. key=@%p\n",
203 ctx_p->user.key);
204
205 /* Map key buffer */
206 ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
207 max_key_buf_size, DMA_TO_DEVICE);
208 if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
209 SSI_LOG_ERR("Mapping Key %u B at va=%pK for DMA failed\n",
210 max_key_buf_size, ctx_p->user.key);
211 return -ENOMEM;
212 }
302ef8eb
GBY
213 SSI_LOG_DEBUG("Mapped key %u B at va=%pK to dma=0x%llX\n",
214 max_key_buf_size, ctx_p->user.key,
215 (unsigned long long)ctx_p->user.key_dma_addr);
216
217 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
218 /* Alloc hash tfm for essiv */
219 ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
220 if (IS_ERR(ctx_p->shash_tfm)) {
221 SSI_LOG_ERR("Error allocating hash tfm for ESSIV.\n");
222 return PTR_ERR(ctx_p->shash_tfm);
223 }
224 }
225
226 return rc;
227}
228
229static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
230{
231 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
232 struct device *dev = &ctx_p->drvdata->plat_dev->dev;
233 unsigned int max_key_buf_size = get_max_keysize(tfm);
234
235 SSI_LOG_DEBUG("Clearing context @%p for %s\n",
236 crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
237
238 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
239 /* Free hash tfm for essiv */
240 crypto_free_shash(ctx_p->shash_tfm);
241 ctx_p->shash_tfm = NULL;
242 }
243
244 /* Unmap key buffer */
302ef8eb
GBY
245 dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
246 DMA_TO_DEVICE);
c8f17865 247 SSI_LOG_DEBUG("Unmapped key buffer key_dma_addr=0x%llX\n",
302ef8eb
GBY
248 (unsigned long long)ctx_p->user.key_dma_addr);
249
250 /* Free key buffer in context */
251 kfree(ctx_p->user.key);
252 SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
253}
254
1de8f59f 255struct tdes_keys {
4f71fecd
DR
256 u8 key1[DES_KEY_SIZE];
257 u8 key2[DES_KEY_SIZE];
258 u8 key3[DES_KEY_SIZE];
1de8f59f 259};
302ef8eb 260
4f71fecd
DR
261static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
262 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
263 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
264 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
302ef8eb 265
16609980
GBY
266/* The function verifies that tdes keys are not weak.*/
267static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
268{
269#ifdef CCREE_FIPS_SUPPORT
1de8f59f 270 struct tdes_keys *tdes_key = (struct tdes_keys *)key;
16609980
GBY
271
272 /* verify key1 != key2 and key3 != key2*/
d32a0b6d
GBY
273 if (unlikely((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||
274 (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2, sizeof(tdes_key->key3)) == 0))) {
4f71fecd
DR
275 return -ENOEXEC;
276 }
16609980
GBY
277#endif /* CCREE_FIPS_SUPPORT */
278
4f71fecd 279 return 0;
16609980
GBY
280}
281
282/* The function verifies that xts keys are not weak.*/
283static int ssi_fips_verify_xts_keys(const u8 *key, unsigned int keylen)
284{
285#ifdef CCREE_FIPS_SUPPORT
4f71fecd
DR
286 /* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
287 int singleKeySize = keylen >> 1;
16609980 288
a8f6cbaa 289 if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0))
16609980 290 return -ENOEXEC;
16609980
GBY
291#endif /* CCREE_FIPS_SUPPORT */
292
4f71fecd 293 return 0;
16609980
GBY
294}
295
8ca57f5c 296static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
302ef8eb
GBY
297{
298 switch (slot_num) {
299 case 0:
300 return KFDE0_KEY;
301 case 1:
302 return KFDE1_KEY;
303 case 2:
304 return KFDE2_KEY;
305 case 3:
306 return KFDE3_KEY;
307 }
308 return END_OF_KEYS;
309}
310
c8f17865
TI
311static int ssi_blkcipher_setkey(struct crypto_tfm *tfm,
312 const u8 *key,
302ef8eb
GBY
313 unsigned int keylen)
314{
315 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
316 struct device *dev = &ctx_p->drvdata->plat_dev->dev;
317 u32 tmp[DES_EXPKEY_WORDS];
318 unsigned int max_key_buf_size = get_max_keysize(tfm);
302ef8eb
GBY
319
320 SSI_LOG_DEBUG("Setting key in context @%p for %s. keylen=%u\n",
321 ctx_p, crypto_tfm_alg_name(tfm), keylen);
a1ab41eb 322 dump_byte_array("key", (u8 *)key, keylen);
302ef8eb 323
16609980
GBY
324 CHECK_AND_RETURN_UPON_FIPS_ERROR();
325
326 SSI_LOG_DEBUG("ssi_blkcipher_setkey: after FIPS check");
c8f17865 327
302ef8eb 328 /* STAT_PHASE_0: Init and sanity checks */
302ef8eb
GBY
329
330#if SSI_CC_HAS_MULTI2
331 /*last byte of key buffer is round number and should not be a part of key size*/
a8f6cbaa 332 if (ctx_p->flow_mode == S_DIN_to_MULTI2)
e7258b6a 333 keylen -= 1;
302ef8eb
GBY
334#endif /*SSI_CC_HAS_MULTI2*/
335
e7258b6a 336 if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
302ef8eb
GBY
337 SSI_LOG_ERR("Unsupported key size %d.\n", keylen);
338 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
339 return -EINVAL;
340 }
341
342 if (ssi_is_hw_key(tfm)) {
343 /* setting HW key slots */
d32a0b6d 344 struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
302ef8eb
GBY
345
346 if (unlikely(ctx_p->flow_mode != S_DIN_to_AES)) {
347 SSI_LOG_ERR("HW key not supported for non-AES flows\n");
348 return -EINVAL;
349 }
350
351 ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
352 if (unlikely(ctx_p->hw.key1_slot == END_OF_KEYS)) {
353 SSI_LOG_ERR("Unsupported hw key1 number (%d)\n", hki->hw_key1);
354 return -EINVAL;
355 }
356
357 if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
358 (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
359 (ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)) {
360 if (unlikely(hki->hw_key1 == hki->hw_key2)) {
361 SSI_LOG_ERR("Illegal hw key numbers (%d,%d)\n", hki->hw_key1, hki->hw_key2);
362 return -EINVAL;
363 }
364 ctx_p->hw.key2_slot = hw_key_to_cc_hw_key(hki->hw_key2);
365 if (unlikely(ctx_p->hw.key2_slot == END_OF_KEYS)) {
366 SSI_LOG_ERR("Unsupported hw key2 number (%d)\n", hki->hw_key2);
367 return -EINVAL;
368 }
369 }
370
371 ctx_p->keylen = keylen;
302ef8eb
GBY
372 SSI_LOG_DEBUG("ssi_blkcipher_setkey: ssi_is_hw_key ret 0");
373
374 return 0;
375 }
376
377 // verify weak keys
378 if (ctx_p->flow_mode == S_DIN_to_DES) {
379 if (unlikely(!des_ekey(tmp, key)) &&
380 (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
381 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
382 SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak DES key");
383 return -EINVAL;
384 }
385 }
c8f17865 386 if ((ctx_p->cipher_mode == DRV_CIPHER_XTS) &&
16609980
GBY
387 ssi_fips_verify_xts_keys(key, keylen) != 0) {
388 SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak XTS key");
389 return -EINVAL;
390 }
c8f17865
TI
391 if ((ctx_p->flow_mode == S_DIN_to_DES) &&
392 (keylen == DES3_EDE_KEY_SIZE) &&
16609980
GBY
393 ssi_fips_verify_3des_keys(key, keylen) != 0) {
394 SSI_LOG_DEBUG("ssi_blkcipher_setkey: weak 3DES key");
395 return -EINVAL;
396 }
397
302ef8eb 398 /* STAT_PHASE_1: Copy key to ctx */
c8f17865 399 dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
302ef8eb 400 max_key_buf_size, DMA_TO_DEVICE);
44c891af 401
302ef8eb 402 if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
44c891af 403#if SSI_CC_HAS_MULTI2
302ef8eb
GBY
404 memcpy(ctx_p->user.key, key, CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE);
405 ctx_p->key_round_number = key[CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE];
406 if (ctx_p->key_round_number < CC_MULTI2_MIN_NUM_ROUNDS ||
407 ctx_p->key_round_number > CC_MULTI2_MAX_NUM_ROUNDS) {
408 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
409 SSI_LOG_DEBUG("ssi_blkcipher_setkey: SSI_CC_HAS_MULTI2 einval");
410 return -EINVAL;
302ef8eb 411#endif /*SSI_CC_HAS_MULTI2*/
44c891af 412 } else {
302ef8eb
GBY
413 memcpy(ctx_p->user.key, key, keylen);
414 if (keylen == 24)
415 memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
416
417 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
418 /* sha256 for key2 - use sw implementation */
419 int key_len = keylen >> 1;
420 int err;
421 SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
492ddcbb 422
302ef8eb
GBY
423 desc->tfm = ctx_p->shash_tfm;
424
425 err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
426 if (err) {
427 SSI_LOG_ERR("Failed to hash ESSIV key.\n");
428 return err;
429 }
430 }
431 }
c8f17865 432 dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
302ef8eb 433 max_key_buf_size, DMA_TO_DEVICE);
302ef8eb 434 ctx_p->keylen = keylen;
c8f17865 435
302ef8eb
GBY
436 SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
437 return 0;
438}
439
440static inline void
441ssi_blkcipher_create_setup_desc(
442 struct crypto_tfm *tfm,
443 struct blkcipher_req_ctx *req_ctx,
444 unsigned int ivsize,
445 unsigned int nbytes,
8ca57f5c 446 struct cc_hw_desc desc[],
302ef8eb
GBY
447 unsigned int *seq_size)
448{
449 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
450 int cipher_mode = ctx_p->cipher_mode;
451 int flow_mode = ctx_p->flow_mode;
452 int direction = req_ctx->gen_ctx.op_type;
453 dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
454 unsigned int key_len = ctx_p->keylen;
455 dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
456 unsigned int du_size = nbytes;
457
458 struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
459
460 if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_512)
461 du_size = 512;
462 if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) == CRYPTO_ALG_BULK_DU_4096)
463 du_size = 4096;
464
465 switch (cipher_mode) {
466 case DRV_CIPHER_CBC:
467 case DRV_CIPHER_CBC_CTS:
468 case DRV_CIPHER_CTR:
469 case DRV_CIPHER_OFB:
470 /* Load cipher state */
8b64e512
GBY
471 hw_desc_init(&desc[*seq_size]);
472 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
473 NS_BIT);
474 set_cipher_config0(&desc[*seq_size], direction);
475 set_flow_mode(&desc[*seq_size], flow_mode);
476 set_cipher_mode(&desc[*seq_size], cipher_mode);
c8f17865 477 if ((cipher_mode == DRV_CIPHER_CTR) ||
e7258b6a 478 (cipher_mode == DRV_CIPHER_OFB)) {
8b64e512 479 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
302ef8eb 480 } else {
8b64e512 481 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
302ef8eb
GBY
482 }
483 (*seq_size)++;
484 /*FALLTHROUGH*/
485 case DRV_CIPHER_ECB:
486 /* Load key */
8b64e512
GBY
487 hw_desc_init(&desc[*seq_size]);
488 set_cipher_mode(&desc[*seq_size], cipher_mode);
489 set_cipher_config0(&desc[*seq_size], direction);
302ef8eb 490 if (flow_mode == S_DIN_to_AES) {
302ef8eb 491 if (ssi_is_hw_key(tfm)) {
8b64e512
GBY
492 set_hw_crypto_key(&desc[*seq_size],
493 ctx_p->hw.key1_slot);
302ef8eb 494 } else {
8b64e512
GBY
495 set_din_type(&desc[*seq_size], DMA_DLLI,
496 key_dma_addr, ((key_len == 24) ?
497 AES_MAX_KEY_SIZE :
498 key_len), NS_BIT);
302ef8eb 499 }
8b64e512 500 set_key_size_aes(&desc[*seq_size], key_len);
302ef8eb
GBY
501 } else {
502 /*des*/
8b64e512
GBY
503 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
504 key_len, NS_BIT);
505 set_key_size_des(&desc[*seq_size], key_len);
302ef8eb 506 }
8b64e512
GBY
507 set_flow_mode(&desc[*seq_size], flow_mode);
508 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
302ef8eb
GBY
509 (*seq_size)++;
510 break;
511 case DRV_CIPHER_XTS:
512 case DRV_CIPHER_ESSIV:
513 case DRV_CIPHER_BITLOCKER:
514 /* Load AES key */
8b64e512
GBY
515 hw_desc_init(&desc[*seq_size]);
516 set_cipher_mode(&desc[*seq_size], cipher_mode);
517 set_cipher_config0(&desc[*seq_size], direction);
302ef8eb 518 if (ssi_is_hw_key(tfm)) {
8b64e512
GBY
519 set_hw_crypto_key(&desc[*seq_size],
520 ctx_p->hw.key1_slot);
302ef8eb 521 } else {
8b64e512
GBY
522 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
523 (key_len / 2), NS_BIT);
302ef8eb 524 }
8b64e512
GBY
525 set_key_size_aes(&desc[*seq_size], (key_len / 2));
526 set_flow_mode(&desc[*seq_size], flow_mode);
527 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
302ef8eb
GBY
528 (*seq_size)++;
529
530 /* load XEX key */
8b64e512
GBY
531 hw_desc_init(&desc[*seq_size]);
532 set_cipher_mode(&desc[*seq_size], cipher_mode);
533 set_cipher_config0(&desc[*seq_size], direction);
302ef8eb 534 if (ssi_is_hw_key(tfm)) {
8b64e512
GBY
535 set_hw_crypto_key(&desc[*seq_size],
536 ctx_p->hw.key2_slot);
302ef8eb 537 } else {
8b64e512
GBY
538 set_din_type(&desc[*seq_size], DMA_DLLI,
539 (key_dma_addr + (key_len / 2)),
540 (key_len / 2), NS_BIT);
302ef8eb 541 }
8b64e512
GBY
542 set_xex_data_unit_size(&desc[*seq_size], du_size);
543 set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
544 set_key_size_aes(&desc[*seq_size], (key_len / 2));
545 set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
302ef8eb 546 (*seq_size)++;
c8f17865 547
302ef8eb 548 /* Set state */
8b64e512
GBY
549 hw_desc_init(&desc[*seq_size]);
550 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
551 set_cipher_mode(&desc[*seq_size], cipher_mode);
552 set_cipher_config0(&desc[*seq_size], direction);
553 set_key_size_aes(&desc[*seq_size], (key_len / 2));
554 set_flow_mode(&desc[*seq_size], flow_mode);
555 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
556 CC_AES_BLOCK_SIZE, NS_BIT);
302ef8eb
GBY
557 (*seq_size)++;
558 break;
559 default:
560 SSI_LOG_ERR("Unsupported cipher mode (%d)\n", cipher_mode);
561 BUG();
562 }
563}
564
565#if SSI_CC_HAS_MULTI2
566static inline void ssi_blkcipher_create_multi2_setup_desc(
567 struct crypto_tfm *tfm,
568 struct blkcipher_req_ctx *req_ctx,
569 unsigned int ivsize,
8ca57f5c 570 struct cc_hw_desc desc[],
302ef8eb
GBY
571 unsigned int *seq_size)
572{
573 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
c8f17865 574
302ef8eb
GBY
575 int direction = req_ctx->gen_ctx.op_type;
576 /* Load system key */
8b64e512
GBY
577 hw_desc_init(&desc[*seq_size]);
578 set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
579 set_cipher_config0(&desc[*seq_size], direction);
580 set_din_type(&desc[*seq_size], DMA_DLLI, ctx_p->user.key_dma_addr,
581 CC_MULTI2_SYSTEM_KEY_SIZE, NS_BIT);
582 set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
583 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
302ef8eb
GBY
584 (*seq_size)++;
585
586 /* load data key */
8b64e512
GBY
587 hw_desc_init(&desc[*seq_size]);
588 set_din_type(&desc[*seq_size], DMA_DLLI,
589 (ctx_p->user.key_dma_addr + CC_MULTI2_SYSTEM_KEY_SIZE),
590 CC_MULTI2_DATA_KEY_SIZE, NS_BIT);
591 set_multi2_num_rounds(&desc[*seq_size], ctx_p->key_round_number);
592 set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
593 set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
594 set_cipher_config0(&desc[*seq_size], direction);
595 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
302ef8eb 596 (*seq_size)++;
c8f17865 597
302ef8eb 598 /* Set state */
8b64e512
GBY
599 hw_desc_init(&desc[*seq_size]);
600 set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
601 ivsize, NS_BIT);
602 set_cipher_config0(&desc[*seq_size], direction);
603 set_flow_mode(&desc[*seq_size], ctx_p->flow_mode);
604 set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
605 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
302ef8eb 606 (*seq_size)++;
302ef8eb
GBY
607}
608#endif /*SSI_CC_HAS_MULTI2*/
609
610static inline void
611ssi_blkcipher_create_data_desc(
612 struct crypto_tfm *tfm,
613 struct blkcipher_req_ctx *req_ctx,
614 struct scatterlist *dst, struct scatterlist *src,
615 unsigned int nbytes,
616 void *areq,
8ca57f5c 617 struct cc_hw_desc desc[],
302ef8eb
GBY
618 unsigned int *seq_size)
619{
620 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
621 unsigned int flow_mode = ctx_p->flow_mode;
622
623 switch (ctx_p->flow_mode) {
624 case S_DIN_to_AES:
625 flow_mode = DIN_AES_DOUT;
626 break;
627 case S_DIN_to_DES:
628 flow_mode = DIN_DES_DOUT;
629 break;
630#if SSI_CC_HAS_MULTI2
631 case S_DIN_to_MULTI2:
632 flow_mode = DIN_MULTI2_DOUT;
633 break;
634#endif /*SSI_CC_HAS_MULTI2*/
635 default:
636 SSI_LOG_ERR("invalid flow mode, flow_mode = %d \n", flow_mode);
637 return;
638 }
639 /* Process */
e7258b6a 640 if (likely(req_ctx->dma_buf_type == SSI_DMA_BUF_DLLI)) {
302ef8eb
GBY
641 SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
642 (unsigned long long)sg_dma_address(src),
643 nbytes);
644 SSI_LOG_DEBUG(" data params addr 0x%llX length 0x%X \n",
645 (unsigned long long)sg_dma_address(dst),
646 nbytes);
8b64e512
GBY
647 hw_desc_init(&desc[*seq_size]);
648 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
649 nbytes, NS_BIT);
650 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
651 nbytes, NS_BIT, (!areq ? 0 : 1));
6191eb1d 652 if (areq)
8b64e512 653 set_queue_last_ind(&desc[*seq_size]);
a8f6cbaa 654
8b64e512 655 set_flow_mode(&desc[*seq_size], flow_mode);
302ef8eb
GBY
656 (*seq_size)++;
657 } else {
658 /* bypass */
659 SSI_LOG_DEBUG(" bypass params addr 0x%llX "
660 "length 0x%X addr 0x%08X\n",
661 (unsigned long long)req_ctx->mlli_params.mlli_dma_addr,
662 req_ctx->mlli_params.mlli_len,
663 (unsigned int)ctx_p->drvdata->mlli_sram_addr);
8b64e512
GBY
664 hw_desc_init(&desc[*seq_size]);
665 set_din_type(&desc[*seq_size], DMA_DLLI,
666 req_ctx->mlli_params.mlli_dma_addr,
667 req_ctx->mlli_params.mlli_len, NS_BIT);
668 set_dout_sram(&desc[*seq_size],
669 ctx_p->drvdata->mlli_sram_addr,
670 req_ctx->mlli_params.mlli_len);
671 set_flow_mode(&desc[*seq_size], BYPASS);
302ef8eb
GBY
672 (*seq_size)++;
673
8b64e512
GBY
674 hw_desc_init(&desc[*seq_size]);
675 set_din_type(&desc[*seq_size], DMA_MLLI,
676 ctx_p->drvdata->mlli_sram_addr,
677 req_ctx->in_mlli_nents, NS_BIT);
302ef8eb
GBY
678 if (req_ctx->out_nents == 0) {
679 SSI_LOG_DEBUG(" din/dout params addr 0x%08X "
680 "addr 0x%08X\n",
681 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
682 (unsigned int)ctx_p->drvdata->mlli_sram_addr);
8b64e512
GBY
683 set_dout_mlli(&desc[*seq_size],
684 ctx_p->drvdata->mlli_sram_addr,
685 req_ctx->in_mlli_nents, NS_BIT,
686 (!areq ? 0 : 1));
302ef8eb
GBY
687 } else {
688 SSI_LOG_DEBUG(" din/dout params "
689 "addr 0x%08X addr 0x%08X\n",
690 (unsigned int)ctx_p->drvdata->mlli_sram_addr,
c8f17865 691 (unsigned int)ctx_p->drvdata->mlli_sram_addr +
a1ab41eb 692 (u32)LLI_ENTRY_BYTE_SIZE *
302ef8eb 693 req_ctx->in_nents);
8b64e512
GBY
694 set_dout_mlli(&desc[*seq_size],
695 (ctx_p->drvdata->mlli_sram_addr +
696 (LLI_ENTRY_BYTE_SIZE *
697 req_ctx->in_mlli_nents)),
698 req_ctx->out_mlli_nents, NS_BIT,
699 (!areq ? 0 : 1));
302ef8eb 700 }
6191eb1d 701 if (areq)
8b64e512 702 set_queue_last_ind(&desc[*seq_size]);
a8f6cbaa 703
8b64e512 704 set_flow_mode(&desc[*seq_size], flow_mode);
302ef8eb
GBY
705 (*seq_size)++;
706 }
707}
708
709static int ssi_blkcipher_complete(struct device *dev,
4f71fecd
DR
710 struct ssi_ablkcipher_ctx *ctx_p,
711 struct blkcipher_req_ctx *req_ctx,
712 struct scatterlist *dst,
713 struct scatterlist *src,
714 unsigned int ivsize,
715 void *areq,
716 void __iomem *cc_base)
302ef8eb
GBY
717{
718 int completion_error = 0;
a1ab41eb 719 u32 inflight_counter;
0ddc27d4 720 struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
302ef8eb 721
302ef8eb 722 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
302ef8eb 723
302ef8eb
GBY
724 /*Set the inflight couter value to local variable*/
725 inflight_counter = ctx_p->drvdata->inflight_counter;
726 /*Decrease the inflight counter*/
e7258b6a 727 if (ctx_p->flow_mode == BYPASS && ctx_p->drvdata->inflight_counter > 0)
302ef8eb
GBY
728 ctx_p->drvdata->inflight_counter--;
729
e7258b6a 730 if (areq) {
0ddc27d4
GBY
731 /*
732 * The crypto API expects us to set the req->info to the last
733 * ciphertext block. For encrypt, simply copy from the result.
734 * For decrypt, we must copy from a saved buffer since this
735 * could be an in-place decryption operation and the src is
736 * lost by this point.
737 */
738 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
739 memcpy(req->info, req_ctx->backup_info, ivsize);
740 kfree(req_ctx->backup_info);
741 } else {
742 scatterwalk_map_and_copy(req->info, req->dst,
743 (req->nbytes - ivsize),
744 ivsize, 0);
745 }
746
302ef8eb
GBY
747 ablkcipher_request_complete(areq, completion_error);
748 return 0;
749 }
750 return completion_error;
751}
752
753static int ssi_blkcipher_process(
754 struct crypto_tfm *tfm,
755 struct blkcipher_req_ctx *req_ctx,
756 struct scatterlist *dst, struct scatterlist *src,
757 unsigned int nbytes,
758 void *info, //req info
759 unsigned int ivsize,
c8f17865 760 void *areq,
302ef8eb
GBY
761 enum drv_crypto_direction direction)
762{
763 struct ssi_ablkcipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
764 struct device *dev = &ctx_p->drvdata->plat_dev->dev;
8ca57f5c 765 struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
302ef8eb 766 struct ssi_crypto_req ssi_req = {};
e7258b6a 767 int rc, seq_len = 0, cts_restore_flag = 0;
302ef8eb
GBY
768
769 SSI_LOG_DEBUG("%s areq=%p info=%p nbytes=%d\n",
e7258b6a 770 ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"),
302ef8eb
GBY
771 areq, info, nbytes);
772
16609980 773 CHECK_AND_RETURN_UPON_FIPS_ERROR();
302ef8eb 774 /* STAT_PHASE_0: Init and sanity checks */
c8f17865 775
302ef8eb
GBY
776 /* TODO: check data length according to mode */
777 if (unlikely(validate_data_size(ctx_p, nbytes))) {
778 SSI_LOG_ERR("Unsupported data size %d.\n", nbytes);
779 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
0ddc27d4
GBY
780 rc = -EINVAL;
781 goto exit_process;
302ef8eb
GBY
782 }
783 if (nbytes == 0) {
784 /* No data to process is valid */
0ddc27d4
GBY
785 rc = 0;
786 goto exit_process;
302ef8eb 787 }
4f71fecd 788 /*For CTS in case of data size aligned to 16 use CBC mode*/
e7258b6a 789 if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
302ef8eb
GBY
790 ctx_p->cipher_mode = DRV_CIPHER_CBC;
791 cts_restore_flag = 1;
792 }
793
794 /* Setup DX request structure */
795 ssi_req.user_cb = (void *)ssi_ablkcipher_complete;
796 ssi_req.user_arg = (void *)areq;
797
798#ifdef ENABLE_CYCLE_COUNT
799 ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
800 STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
801
802#endif
803
804 /* Setup request context */
805 req_ctx->gen_ctx.op_type = direction;
c8f17865 806
302ef8eb 807 /* STAT_PHASE_1: Map buffers */
c8f17865 808
302ef8eb
GBY
809 rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
810 if (unlikely(rc != 0)) {
811 SSI_LOG_ERR("map_request() failed\n");
812 goto exit_process;
813 }
814
302ef8eb 815 /* STAT_PHASE_2: Create sequence */
302ef8eb
GBY
816
817 /* Setup processing */
818#if SSI_CC_HAS_MULTI2
a8f6cbaa
GBY
819 if (ctx_p->flow_mode == S_DIN_to_MULTI2)
820 ssi_blkcipher_create_multi2_setup_desc(tfm, req_ctx, ivsize,
821 desc, &seq_len);
822 else
302ef8eb 823#endif /*SSI_CC_HAS_MULTI2*/
a8f6cbaa
GBY
824 ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
825 desc, &seq_len);
302ef8eb
GBY
826 /* Data processing */
827 ssi_blkcipher_create_data_desc(tfm,
c8f17865 828 req_ctx,
302ef8eb
GBY
829 dst, src,
830 nbytes,
831 areq,
832 desc, &seq_len);
833
a4d826b9 834 /* do we need to generate IV? */
7331916c 835 if (req_ctx->is_giv) {
a4d826b9
GBY
836 ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
837 ssi_req.ivgen_dma_addr_len = 1;
838 /* set the IV size (8/16 B long)*/
839 ssi_req.ivgen_size = ivsize;
840 }
302ef8eb
GBY
841
842 /* STAT_PHASE_3: Lock HW and push sequence */
c8f17865 843
6191eb1d
GBY
844 rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, (!areq) ? 0 : 1);
845 if (areq) {
302ef8eb
GBY
846 if (unlikely(rc != -EINPROGRESS)) {
847 /* Failed to send the request or request completed synchronously */
848 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
849 }
850
302ef8eb
GBY
851 } else {
852 if (rc != 0) {
853 ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
302ef8eb 854 } else {
6c5ed91b
AB
855 rc = ssi_blkcipher_complete(dev, ctx_p, req_ctx, dst,
856 src, ivsize, NULL,
857 ctx_p->drvdata->cc_base);
c8f17865 858 }
302ef8eb
GBY
859 }
860
861exit_process:
862 if (cts_restore_flag != 0)
863 ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
c8f17865 864
0ddc27d4
GBY
865 if (rc != -EINPROGRESS)
866 kfree(req_ctx->backup_info);
867
302ef8eb
GBY
868 return rc;
869}
870
871static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
872{
873 struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
874 struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
875 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
876 struct ssi_ablkcipher_ctx *ctx_p = crypto_ablkcipher_ctx(tfm);
877 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
878
16609980
GBY
879 CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR();
880
6c5ed91b
AB
881 ssi_blkcipher_complete(dev, ctx_p, req_ctx, areq->dst, areq->src,
882 ivsize, areq, cc_base);
302ef8eb
GBY
883}
884
302ef8eb
GBY
885/* Async wrap functions */
886
887static int ssi_ablkcipher_init(struct crypto_tfm *tfm)
888{
889 struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
c8f17865 890
302ef8eb
GBY
891 ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
892
893 return ssi_blkcipher_init(tfm);
894}
895
c8f17865
TI
896static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
897 const u8 *key,
302ef8eb
GBY
898 unsigned int keylen)
899{
900 return ssi_blkcipher_setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
901}
902
903static int ssi_ablkcipher_encrypt(struct ablkcipher_request *req)
904{
905 struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
906 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
907 struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
908 unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
909
a4d826b9 910 req_ctx->is_giv = false;
302ef8eb
GBY
911
912 return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_ENCRYPT);
913}
914
915static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
916{
917 struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
918 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
919 struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
920 unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
921
0ddc27d4
GBY
922 /*
923 * Allocate and save the last IV sized bytes of the source, which will
924 * be lost in case of in-place decryption and might be needed for CTS.
925 */
926 req_ctx->backup_info = kmalloc(ivsize, GFP_KERNEL);
927 if (!req_ctx->backup_info)
928 return -ENOMEM;
929
930 scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
931 (req->nbytes - ivsize), ivsize, 0);
a4d826b9 932 req_ctx->is_giv = false;
0ddc27d4 933
302ef8eb
GBY
934 return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
935}
936
302ef8eb
GBY
937/* DX Block cipher alg */
938static struct ssi_alg_template blkcipher_algs[] = {
939/* Async template */
940#if SSI_CC_HAS_AES_XTS
941 {
942 .name = "xts(aes)",
943 .driver_name = "xts-aes-dx",
944 .blocksize = AES_BLOCK_SIZE,
945 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
946 .template_ablkcipher = {
947 .setkey = ssi_ablkcipher_setkey,
948 .encrypt = ssi_ablkcipher_encrypt,
949 .decrypt = ssi_ablkcipher_decrypt,
950 .min_keysize = AES_MIN_KEY_SIZE * 2,
951 .max_keysize = AES_MAX_KEY_SIZE * 2,
952 .ivsize = AES_BLOCK_SIZE,
953 .geniv = "eseqiv",
954 },
955 .cipher_mode = DRV_CIPHER_XTS,
956 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
957 },
958 {
959 .name = "xts(aes)",
960 .driver_name = "xts-aes-du512-dx",
961 .blocksize = AES_BLOCK_SIZE,
962 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
963 .template_ablkcipher = {
964 .setkey = ssi_ablkcipher_setkey,
965 .encrypt = ssi_ablkcipher_encrypt,
966 .decrypt = ssi_ablkcipher_decrypt,
967 .min_keysize = AES_MIN_KEY_SIZE * 2,
968 .max_keysize = AES_MAX_KEY_SIZE * 2,
969 .ivsize = AES_BLOCK_SIZE,
970 },
971 .cipher_mode = DRV_CIPHER_XTS,
972 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
973 },
974 {
975 .name = "xts(aes)",
976 .driver_name = "xts-aes-du4096-dx",
977 .blocksize = AES_BLOCK_SIZE,
978 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
979 .template_ablkcipher = {
980 .setkey = ssi_ablkcipher_setkey,
981 .encrypt = ssi_ablkcipher_encrypt,
982 .decrypt = ssi_ablkcipher_decrypt,
983 .min_keysize = AES_MIN_KEY_SIZE * 2,
984 .max_keysize = AES_MAX_KEY_SIZE * 2,
985 .ivsize = AES_BLOCK_SIZE,
986 },
987 .cipher_mode = DRV_CIPHER_XTS,
988 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
989 },
990#endif /*SSI_CC_HAS_AES_XTS*/
991#if SSI_CC_HAS_AES_ESSIV
992 {
993 .name = "essiv(aes)",
994 .driver_name = "essiv-aes-dx",
995 .blocksize = AES_BLOCK_SIZE,
996 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
997 .template_ablkcipher = {
998 .setkey = ssi_ablkcipher_setkey,
999 .encrypt = ssi_ablkcipher_encrypt,
1000 .decrypt = ssi_ablkcipher_decrypt,
1001 .min_keysize = AES_MIN_KEY_SIZE * 2,
1002 .max_keysize = AES_MAX_KEY_SIZE * 2,
1003 .ivsize = AES_BLOCK_SIZE,
1004 },
1005 .cipher_mode = DRV_CIPHER_ESSIV,
1006 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1007 },
1008 {
1009 .name = "essiv(aes)",
1010 .driver_name = "essiv-aes-du512-dx",
1011 .blocksize = AES_BLOCK_SIZE,
1012 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
1013 .template_ablkcipher = {
1014 .setkey = ssi_ablkcipher_setkey,
1015 .encrypt = ssi_ablkcipher_encrypt,
1016 .decrypt = ssi_ablkcipher_decrypt,
1017 .min_keysize = AES_MIN_KEY_SIZE * 2,
1018 .max_keysize = AES_MAX_KEY_SIZE * 2,
1019 .ivsize = AES_BLOCK_SIZE,
1020 },
1021 .cipher_mode = DRV_CIPHER_ESSIV,
1022 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1023 },
1024 {
1025 .name = "essiv(aes)",
1026 .driver_name = "essiv-aes-du4096-dx",
1027 .blocksize = AES_BLOCK_SIZE,
1028 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
1029 .template_ablkcipher = {
1030 .setkey = ssi_ablkcipher_setkey,
1031 .encrypt = ssi_ablkcipher_encrypt,
1032 .decrypt = ssi_ablkcipher_decrypt,
1033 .min_keysize = AES_MIN_KEY_SIZE * 2,
1034 .max_keysize = AES_MAX_KEY_SIZE * 2,
1035 .ivsize = AES_BLOCK_SIZE,
1036 },
1037 .cipher_mode = DRV_CIPHER_ESSIV,
1038 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1039 },
1040#endif /*SSI_CC_HAS_AES_ESSIV*/
1041#if SSI_CC_HAS_AES_BITLOCKER
1042 {
1043 .name = "bitlocker(aes)",
1044 .driver_name = "bitlocker-aes-dx",
1045 .blocksize = AES_BLOCK_SIZE,
1046 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1047 .template_ablkcipher = {
1048 .setkey = ssi_ablkcipher_setkey,
1049 .encrypt = ssi_ablkcipher_encrypt,
1050 .decrypt = ssi_ablkcipher_decrypt,
1051 .min_keysize = AES_MIN_KEY_SIZE * 2,
1052 .max_keysize = AES_MAX_KEY_SIZE * 2,
1053 .ivsize = AES_BLOCK_SIZE,
1054 },
1055 .cipher_mode = DRV_CIPHER_BITLOCKER,
1056 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1057 },
1058 {
1059 .name = "bitlocker(aes)",
1060 .driver_name = "bitlocker-aes-du512-dx",
1061 .blocksize = AES_BLOCK_SIZE,
1062 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
1063 .template_ablkcipher = {
1064 .setkey = ssi_ablkcipher_setkey,
1065 .encrypt = ssi_ablkcipher_encrypt,
1066 .decrypt = ssi_ablkcipher_decrypt,
1067 .min_keysize = AES_MIN_KEY_SIZE * 2,
1068 .max_keysize = AES_MAX_KEY_SIZE * 2,
1069 .ivsize = AES_BLOCK_SIZE,
1070 },
1071 .cipher_mode = DRV_CIPHER_BITLOCKER,
1072 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1073 },
1074 {
1075 .name = "bitlocker(aes)",
1076 .driver_name = "bitlocker-aes-du4096-dx",
1077 .blocksize = AES_BLOCK_SIZE,
1078 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
1079 .template_ablkcipher = {
1080 .setkey = ssi_ablkcipher_setkey,
1081 .encrypt = ssi_ablkcipher_encrypt,
1082 .decrypt = ssi_ablkcipher_decrypt,
1083 .min_keysize = AES_MIN_KEY_SIZE * 2,
1084 .max_keysize = AES_MAX_KEY_SIZE * 2,
1085 .ivsize = AES_BLOCK_SIZE,
1086 },
1087 .cipher_mode = DRV_CIPHER_BITLOCKER,
1088 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1089 },
1090#endif /*SSI_CC_HAS_AES_BITLOCKER*/
1091 {
1092 .name = "ecb(aes)",
1093 .driver_name = "ecb-aes-dx",
1094 .blocksize = AES_BLOCK_SIZE,
1095 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1096 .template_ablkcipher = {
1097 .setkey = ssi_ablkcipher_setkey,
1098 .encrypt = ssi_ablkcipher_encrypt,
1099 .decrypt = ssi_ablkcipher_decrypt,
1100 .min_keysize = AES_MIN_KEY_SIZE,
1101 .max_keysize = AES_MAX_KEY_SIZE,
1102 .ivsize = 0,
1103 },
1104 .cipher_mode = DRV_CIPHER_ECB,
1105 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1106 },
1107 {
1108 .name = "cbc(aes)",
1109 .driver_name = "cbc-aes-dx",
1110 .blocksize = AES_BLOCK_SIZE,
1111 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1112 .template_ablkcipher = {
1113 .setkey = ssi_ablkcipher_setkey,
1114 .encrypt = ssi_ablkcipher_encrypt,
1115 .decrypt = ssi_ablkcipher_decrypt,
1116 .min_keysize = AES_MIN_KEY_SIZE,
1117 .max_keysize = AES_MAX_KEY_SIZE,
1118 .ivsize = AES_BLOCK_SIZE,
da38a83b 1119 },
302ef8eb
GBY
1120 .cipher_mode = DRV_CIPHER_CBC,
1121 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1122 },
1123 {
1124 .name = "ofb(aes)",
1125 .driver_name = "ofb-aes-dx",
1126 .blocksize = AES_BLOCK_SIZE,
1127 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1128 .template_ablkcipher = {
1129 .setkey = ssi_ablkcipher_setkey,
1130 .encrypt = ssi_ablkcipher_encrypt,
1131 .decrypt = ssi_ablkcipher_decrypt,
1132 .min_keysize = AES_MIN_KEY_SIZE,
1133 .max_keysize = AES_MAX_KEY_SIZE,
1134 .ivsize = AES_BLOCK_SIZE,
1135 },
1136 .cipher_mode = DRV_CIPHER_OFB,
1137 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1138 },
1139#if SSI_CC_HAS_AES_CTS
1140 {
1141 .name = "cts1(cbc(aes))",
1142 .driver_name = "cts1-cbc-aes-dx",
1143 .blocksize = AES_BLOCK_SIZE,
1144 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1145 .template_ablkcipher = {
1146 .setkey = ssi_ablkcipher_setkey,
1147 .encrypt = ssi_ablkcipher_encrypt,
1148 .decrypt = ssi_ablkcipher_decrypt,
1149 .min_keysize = AES_MIN_KEY_SIZE,
1150 .max_keysize = AES_MAX_KEY_SIZE,
1151 .ivsize = AES_BLOCK_SIZE,
1152 },
1153 .cipher_mode = DRV_CIPHER_CBC_CTS,
1154 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1155 },
1156#endif
1157 {
1158 .name = "ctr(aes)",
1159 .driver_name = "ctr-aes-dx",
1160 .blocksize = 1,
1161 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1162 .template_ablkcipher = {
1163 .setkey = ssi_ablkcipher_setkey,
1164 .encrypt = ssi_ablkcipher_encrypt,
1165 .decrypt = ssi_ablkcipher_decrypt,
1166 .min_keysize = AES_MIN_KEY_SIZE,
1167 .max_keysize = AES_MAX_KEY_SIZE,
1168 .ivsize = AES_BLOCK_SIZE,
1169 },
1170 .cipher_mode = DRV_CIPHER_CTR,
1171 .flow_mode = S_DIN_to_AES,
302ef8eb
GBY
1172 },
1173 {
1174 .name = "cbc(des3_ede)",
1175 .driver_name = "cbc-3des-dx",
1176 .blocksize = DES3_EDE_BLOCK_SIZE,
1177 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1178 .template_ablkcipher = {
1179 .setkey = ssi_ablkcipher_setkey,
1180 .encrypt = ssi_ablkcipher_encrypt,
1181 .decrypt = ssi_ablkcipher_decrypt,
1182 .min_keysize = DES3_EDE_KEY_SIZE,
1183 .max_keysize = DES3_EDE_KEY_SIZE,
1184 .ivsize = DES3_EDE_BLOCK_SIZE,
1185 },
1186 .cipher_mode = DRV_CIPHER_CBC,
1187 .flow_mode = S_DIN_to_DES,
302ef8eb
GBY
1188 },
1189 {
1190 .name = "ecb(des3_ede)",
1191 .driver_name = "ecb-3des-dx",
1192 .blocksize = DES3_EDE_BLOCK_SIZE,
1193 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1194 .template_ablkcipher = {
1195 .setkey = ssi_ablkcipher_setkey,
1196 .encrypt = ssi_ablkcipher_encrypt,
1197 .decrypt = ssi_ablkcipher_decrypt,
1198 .min_keysize = DES3_EDE_KEY_SIZE,
1199 .max_keysize = DES3_EDE_KEY_SIZE,
1200 .ivsize = 0,
1201 },
1202 .cipher_mode = DRV_CIPHER_ECB,
1203 .flow_mode = S_DIN_to_DES,
302ef8eb
GBY
1204 },
1205 {
1206 .name = "cbc(des)",
1207 .driver_name = "cbc-des-dx",
1208 .blocksize = DES_BLOCK_SIZE,
1209 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1210 .template_ablkcipher = {
1211 .setkey = ssi_ablkcipher_setkey,
1212 .encrypt = ssi_ablkcipher_encrypt,
1213 .decrypt = ssi_ablkcipher_decrypt,
1214 .min_keysize = DES_KEY_SIZE,
1215 .max_keysize = DES_KEY_SIZE,
1216 .ivsize = DES_BLOCK_SIZE,
1217 },
1218 .cipher_mode = DRV_CIPHER_CBC,
1219 .flow_mode = S_DIN_to_DES,
302ef8eb
GBY
1220 },
1221 {
1222 .name = "ecb(des)",
1223 .driver_name = "ecb-des-dx",
1224 .blocksize = DES_BLOCK_SIZE,
1225 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1226 .template_ablkcipher = {
1227 .setkey = ssi_ablkcipher_setkey,
1228 .encrypt = ssi_ablkcipher_encrypt,
1229 .decrypt = ssi_ablkcipher_decrypt,
1230 .min_keysize = DES_KEY_SIZE,
1231 .max_keysize = DES_KEY_SIZE,
1232 .ivsize = 0,
1233 },
1234 .cipher_mode = DRV_CIPHER_ECB,
1235 .flow_mode = S_DIN_to_DES,
302ef8eb
GBY
1236 },
1237#if SSI_CC_HAS_MULTI2
1238 {
1239 .name = "cbc(multi2)",
1240 .driver_name = "cbc-multi2-dx",
1241 .blocksize = CC_MULTI2_BLOCK_SIZE,
1242 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1243 .template_ablkcipher = {
1244 .setkey = ssi_ablkcipher_setkey,
1245 .encrypt = ssi_ablkcipher_encrypt,
1246 .decrypt = ssi_ablkcipher_decrypt,
1247 .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
1248 .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
1249 .ivsize = CC_MULTI2_IV_SIZE,
1250 },
1251 .cipher_mode = DRV_MULTI2_CBC,
1252 .flow_mode = S_DIN_to_MULTI2,
302ef8eb
GBY
1253 },
1254 {
1255 .name = "ofb(multi2)",
1256 .driver_name = "ofb-multi2-dx",
1257 .blocksize = 1,
1258 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1259 .template_ablkcipher = {
1260 .setkey = ssi_ablkcipher_setkey,
1261 .encrypt = ssi_ablkcipher_encrypt,
1262 .decrypt = ssi_ablkcipher_encrypt,
1263 .min_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
1264 .max_keysize = CC_MULTI2_SYSTEM_N_DATA_KEY_SIZE + 1,
1265 .ivsize = CC_MULTI2_IV_SIZE,
1266 },
1267 .cipher_mode = DRV_MULTI2_OFB,
1268 .flow_mode = S_DIN_to_MULTI2,
302ef8eb
GBY
1269 },
1270#endif /*SSI_CC_HAS_MULTI2*/
1271};
1272
c8f17865 1273static
302ef8eb
GBY
1274struct ssi_crypto_alg *ssi_ablkcipher_create_alg(struct ssi_alg_template *template)
1275{
1276 struct ssi_crypto_alg *t_alg;
1277 struct crypto_alg *alg;
1278
1279 t_alg = kzalloc(sizeof(struct ssi_crypto_alg), GFP_KERNEL);
1280 if (!t_alg) {
1281 SSI_LOG_ERR("failed to allocate t_alg\n");
1282 return ERR_PTR(-ENOMEM);
1283 }
1284
1285 alg = &t_alg->crypto_alg;
1286
1287 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
1288 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1289 template->driver_name);
1290 alg->cra_module = THIS_MODULE;
1291 alg->cra_priority = SSI_CRA_PRIO;
1292 alg->cra_blocksize = template->blocksize;
1293 alg->cra_alignmask = 0;
1294 alg->cra_ctxsize = sizeof(struct ssi_ablkcipher_ctx);
c8f17865 1295
da38a83b
GBY
1296 alg->cra_init = ssi_ablkcipher_init;
1297 alg->cra_exit = ssi_blkcipher_exit;
1298 alg->cra_type = &crypto_ablkcipher_type;
1299 alg->cra_ablkcipher = template->template_ablkcipher;
1300 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
302ef8eb 1301 template->type;
302ef8eb
GBY
1302
1303 t_alg->cipher_mode = template->cipher_mode;
1304 t_alg->flow_mode = template->flow_mode;
1305
1306 return t_alg;
1307}
1308
1309int ssi_ablkcipher_free(struct ssi_drvdata *drvdata)
1310{
1311 struct ssi_crypto_alg *t_alg, *n;
c8f17865 1312 struct ssi_blkcipher_handle *blkcipher_handle =
302ef8eb
GBY
1313 drvdata->blkcipher_handle;
1314 struct device *dev;
492ddcbb 1315
302ef8eb
GBY
1316 dev = &drvdata->plat_dev->dev;
1317
6191eb1d 1318 if (blkcipher_handle) {
302ef8eb
GBY
1319 /* Remove registered algs */
1320 list_for_each_entry_safe(t_alg, n,
1321 &blkcipher_handle->blkcipher_alg_list,
1322 entry) {
1323 crypto_unregister_alg(&t_alg->crypto_alg);
1324 list_del(&t_alg->entry);
1325 kfree(t_alg);
1326 }
1327 kfree(blkcipher_handle);
1328 drvdata->blkcipher_handle = NULL;
1329 }
1330 return 0;
1331}
1332
302ef8eb
GBY
1333int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
1334{
1335 struct ssi_blkcipher_handle *ablkcipher_handle;
1336 struct ssi_crypto_alg *t_alg;
1337 int rc = -ENOMEM;
1338 int alg;
1339
1340 ablkcipher_handle = kmalloc(sizeof(struct ssi_blkcipher_handle),
1341 GFP_KERNEL);
6191eb1d 1342 if (!ablkcipher_handle)
302ef8eb
GBY
1343 return -ENOMEM;
1344
1345 drvdata->blkcipher_handle = ablkcipher_handle;
1346
1347 INIT_LIST_HEAD(&ablkcipher_handle->blkcipher_alg_list);
1348
1349 /* Linux crypto */
1350 SSI_LOG_DEBUG("Number of algorithms = %zu\n", ARRAY_SIZE(blkcipher_algs));
1351 for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
1352 SSI_LOG_DEBUG("creating %s\n", blkcipher_algs[alg].driver_name);
1353 t_alg = ssi_ablkcipher_create_alg(&blkcipher_algs[alg]);
1354 if (IS_ERR(t_alg)) {
1355 rc = PTR_ERR(t_alg);
1356 SSI_LOG_ERR("%s alg allocation failed\n",
1357 blkcipher_algs[alg].driver_name);
1358 goto fail0;
1359 }
1360 t_alg->drvdata = drvdata;
1361
1362 SSI_LOG_DEBUG("registering %s\n", blkcipher_algs[alg].driver_name);
1363 rc = crypto_register_alg(&t_alg->crypto_alg);
1364 SSI_LOG_DEBUG("%s alg registration rc = %x\n",
1365 t_alg->crypto_alg.cra_driver_name, rc);
1366 if (unlikely(rc != 0)) {
1367 SSI_LOG_ERR("%s alg registration failed\n",
1368 t_alg->crypto_alg.cra_driver_name);
1369 kfree(t_alg);
1370 goto fail0;
1371 } else {
c8f17865 1372 list_add_tail(&t_alg->entry,
302ef8eb 1373 &ablkcipher_handle->blkcipher_alg_list);
c8f17865 1374 SSI_LOG_DEBUG("Registered %s\n",
302ef8eb
GBY
1375 t_alg->crypto_alg.cra_driver_name);
1376 }
1377 }
1378 return 0;
1379
1380fail0:
1381 ssi_ablkcipher_free(drvdata);
1382 return rc;
1383}