]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / crypto / sunxi-ss / sun4i-ss-cipher.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4 *
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6 *
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
10 *
11 * You could find the datasheet in Documentation/arm/sunxi/README
12 */
13 #include "sun4i-ss.h"
14
15 static int sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 u32 mode = ctx->mode;
23 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 u32 rx_cnt = SS_RX_DEFAULT;
25 u32 tx_cnt = 0;
26 u32 spaces;
27 u32 v;
28 int err = 0;
29 unsigned int i;
30 unsigned int ileft = areq->cryptlen;
31 unsigned int oleft = areq->cryptlen;
32 unsigned int todo;
33 struct sg_mapping_iter mi, mo;
34 unsigned int oi, oo; /* offset for in and out */
35 unsigned long flags;
36
37 if (!areq->cryptlen)
38 return 0;
39
40 if (!areq->src || !areq->dst) {
41 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
42 return -EINVAL;
43 }
44
45 spin_lock_irqsave(&ss->slock, flags);
46
47 for (i = 0; i < op->keylen; i += 4)
48 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
49
50 if (areq->iv) {
51 for (i = 0; i < 4 && i < ivsize / 4; i++) {
52 v = *(u32 *)(areq->iv + i * 4);
53 writel(v, ss->base + SS_IV0 + i * 4);
54 }
55 }
56 writel(mode, ss->base + SS_CTL);
57
58 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
59 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
60 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
61 SG_MITER_TO_SG | SG_MITER_ATOMIC);
62 sg_miter_next(&mi);
63 sg_miter_next(&mo);
64 if (!mi.addr || !mo.addr) {
65 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
66 err = -EINVAL;
67 goto release_ss;
68 }
69
70 ileft = areq->cryptlen / 4;
71 oleft = areq->cryptlen / 4;
72 oi = 0;
73 oo = 0;
74 do {
75 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
76 if (todo) {
77 ileft -= todo;
78 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
79 oi += todo * 4;
80 }
81 if (oi == mi.length) {
82 sg_miter_next(&mi);
83 oi = 0;
84 }
85
86 spaces = readl(ss->base + SS_FCSR);
87 rx_cnt = SS_RXFIFO_SPACES(spaces);
88 tx_cnt = SS_TXFIFO_SPACES(spaces);
89
90 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
91 if (todo) {
92 oleft -= todo;
93 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
94 oo += todo * 4;
95 }
96 if (oo == mo.length) {
97 sg_miter_next(&mo);
98 oo = 0;
99 }
100 } while (oleft);
101
102 if (areq->iv) {
103 for (i = 0; i < 4 && i < ivsize / 4; i++) {
104 v = readl(ss->base + SS_IV0 + i * 4);
105 *(u32 *)(areq->iv + i * 4) = v;
106 }
107 }
108
109 release_ss:
110 sg_miter_stop(&mi);
111 sg_miter_stop(&mo);
112 writel(0, ss->base + SS_CTL);
113 spin_unlock_irqrestore(&ss->slock, flags);
114 return err;
115 }
116
117 /* Generic function that support SG with size not multiple of 4 */
118 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
119 {
120 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
121 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
122 struct sun4i_ss_ctx *ss = op->ss;
123 int no_chunk = 1;
124 struct scatterlist *in_sg = areq->src;
125 struct scatterlist *out_sg = areq->dst;
126 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
127 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
128 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
129 struct sun4i_ss_alg_template *algt;
130 u32 mode = ctx->mode;
131 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
132 u32 rx_cnt = SS_RX_DEFAULT;
133 u32 tx_cnt = 0;
134 u32 v;
135 u32 spaces;
136 int err = 0;
137 unsigned int i;
138 unsigned int ileft = areq->cryptlen;
139 unsigned int oleft = areq->cryptlen;
140 unsigned int todo;
141 struct sg_mapping_iter mi, mo;
142 unsigned int oi, oo; /* offset for in and out */
143 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
144 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
145 unsigned int ob = 0; /* offset in buf */
146 unsigned int obo = 0; /* offset in bufo*/
147 unsigned int obl = 0; /* length of data in bufo */
148 unsigned long flags;
149 bool need_fallback;
150
151 if (!areq->cryptlen)
152 return 0;
153
154 if (!areq->src || !areq->dst) {
155 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
156 return -EINVAL;
157 }
158
159 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
160 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
161 need_fallback = true;
162
163 /*
164 * if we have only SGs with size multiple of 4,
165 * we can use the SS optimized function
166 */
167 while (in_sg && no_chunk == 1) {
168 if (in_sg->length % 4)
169 no_chunk = 0;
170 in_sg = sg_next(in_sg);
171 }
172 while (out_sg && no_chunk == 1) {
173 if (out_sg->length % 4)
174 no_chunk = 0;
175 out_sg = sg_next(out_sg);
176 }
177
178 if (no_chunk == 1 && !need_fallback)
179 return sun4i_ss_opti_poll(areq);
180
181 if (need_fallback) {
182 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
183 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
184 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
185 NULL);
186 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
187 areq->cryptlen, areq->iv);
188 if (ctx->mode & SS_DECRYPTION)
189 err = crypto_skcipher_decrypt(subreq);
190 else
191 err = crypto_skcipher_encrypt(subreq);
192 skcipher_request_zero(subreq);
193 return err;
194 }
195
196 spin_lock_irqsave(&ss->slock, flags);
197
198 for (i = 0; i < op->keylen; i += 4)
199 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
200
201 if (areq->iv) {
202 for (i = 0; i < 4 && i < ivsize / 4; i++) {
203 v = *(u32 *)(areq->iv + i * 4);
204 writel(v, ss->base + SS_IV0 + i * 4);
205 }
206 }
207 writel(mode, ss->base + SS_CTL);
208
209 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
210 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
211 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
212 SG_MITER_TO_SG | SG_MITER_ATOMIC);
213 sg_miter_next(&mi);
214 sg_miter_next(&mo);
215 if (!mi.addr || !mo.addr) {
216 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
217 err = -EINVAL;
218 goto release_ss;
219 }
220 ileft = areq->cryptlen;
221 oleft = areq->cryptlen;
222 oi = 0;
223 oo = 0;
224
225 while (oleft) {
226 if (ileft) {
227 /*
228 * todo is the number of consecutive 4byte word that we
229 * can read from current SG
230 */
231 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
232 if (todo && !ob) {
233 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
234 todo);
235 ileft -= todo * 4;
236 oi += todo * 4;
237 } else {
238 /*
239 * not enough consecutive bytes, so we need to
240 * linearize in buf. todo is in bytes
241 * After that copy, if we have a multiple of 4
242 * we need to be able to write all buf in one
243 * pass, so it is why we min() with rx_cnt
244 */
245 todo = min3(rx_cnt * 4 - ob, ileft,
246 mi.length - oi);
247 memcpy(buf + ob, mi.addr + oi, todo);
248 ileft -= todo;
249 oi += todo;
250 ob += todo;
251 if (!(ob % 4)) {
252 writesl(ss->base + SS_RXFIFO, buf,
253 ob / 4);
254 ob = 0;
255 }
256 }
257 if (oi == mi.length) {
258 sg_miter_next(&mi);
259 oi = 0;
260 }
261 }
262
263 spaces = readl(ss->base + SS_FCSR);
264 rx_cnt = SS_RXFIFO_SPACES(spaces);
265 tx_cnt = SS_TXFIFO_SPACES(spaces);
266 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
267 mode,
268 oi, mi.length, ileft, areq->cryptlen, rx_cnt,
269 oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
270
271 if (!tx_cnt)
272 continue;
273 /* todo in 4bytes word */
274 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
275 if (todo) {
276 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
277 oleft -= todo * 4;
278 oo += todo * 4;
279 if (oo == mo.length) {
280 sg_miter_next(&mo);
281 oo = 0;
282 }
283 } else {
284 /*
285 * read obl bytes in bufo, we read at maximum for
286 * emptying the device
287 */
288 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
289 obl = tx_cnt * 4;
290 obo = 0;
291 do {
292 /*
293 * how many bytes we can copy ?
294 * no more than remaining SG size
295 * no more than remaining buffer
296 * no need to test against oleft
297 */
298 todo = min(mo.length - oo, obl - obo);
299 memcpy(mo.addr + oo, bufo + obo, todo);
300 oleft -= todo;
301 obo += todo;
302 oo += todo;
303 if (oo == mo.length) {
304 sg_miter_next(&mo);
305 oo = 0;
306 }
307 } while (obo < obl);
308 /* bufo must be fully used here */
309 }
310 }
311 if (areq->iv) {
312 for (i = 0; i < 4 && i < ivsize / 4; i++) {
313 v = readl(ss->base + SS_IV0 + i * 4);
314 *(u32 *)(areq->iv + i * 4) = v;
315 }
316 }
317
318 release_ss:
319 sg_miter_stop(&mi);
320 sg_miter_stop(&mo);
321 writel(0, ss->base + SS_CTL);
322 spin_unlock_irqrestore(&ss->slock, flags);
323
324 return err;
325 }
326
327 /* CBC AES */
328 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
329 {
330 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
331 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
332 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
333
334 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
335 op->keymode;
336 return sun4i_ss_cipher_poll(areq);
337 }
338
339 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
340 {
341 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
342 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
343 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
344
345 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
346 op->keymode;
347 return sun4i_ss_cipher_poll(areq);
348 }
349
350 /* ECB AES */
351 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
352 {
353 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
354 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
355 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
356
357 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
358 op->keymode;
359 return sun4i_ss_cipher_poll(areq);
360 }
361
362 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
363 {
364 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
365 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
366 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
367
368 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
369 op->keymode;
370 return sun4i_ss_cipher_poll(areq);
371 }
372
373 /* CBC DES */
374 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
375 {
376 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
377 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
378 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
379
380 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
381 op->keymode;
382 return sun4i_ss_cipher_poll(areq);
383 }
384
385 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
386 {
387 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
388 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
389 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
390
391 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
392 op->keymode;
393 return sun4i_ss_cipher_poll(areq);
394 }
395
396 /* ECB DES */
397 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
398 {
399 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
400 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
401 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
402
403 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
404 op->keymode;
405 return sun4i_ss_cipher_poll(areq);
406 }
407
408 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
409 {
410 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
411 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
412 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
413
414 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
415 op->keymode;
416 return sun4i_ss_cipher_poll(areq);
417 }
418
419 /* CBC 3DES */
420 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
421 {
422 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
423 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
424 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
425
426 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
427 op->keymode;
428 return sun4i_ss_cipher_poll(areq);
429 }
430
431 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
432 {
433 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
434 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
435 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
436
437 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
438 op->keymode;
439 return sun4i_ss_cipher_poll(areq);
440 }
441
442 /* ECB 3DES */
443 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
444 {
445 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
446 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
447 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
448
449 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
450 op->keymode;
451 return sun4i_ss_cipher_poll(areq);
452 }
453
454 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
455 {
456 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
457 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
458 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
459
460 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
461 op->keymode;
462 return sun4i_ss_cipher_poll(areq);
463 }
464
465 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
466 {
467 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
468 struct sun4i_ss_alg_template *algt;
469 const char *name = crypto_tfm_alg_name(tfm);
470
471 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
472
473 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
474 alg.crypto.base);
475 op->ss = algt->ss;
476
477 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
478 sizeof(struct sun4i_cipher_req_ctx));
479
480 op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
481 if (IS_ERR(op->fallback_tfm)) {
482 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
483 name, PTR_ERR(op->fallback_tfm));
484 return PTR_ERR(op->fallback_tfm);
485 }
486
487 return 0;
488 }
489
490 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
491 {
492 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
493 crypto_free_sync_skcipher(op->fallback_tfm);
494 }
495
496 /* check and set the AES key, prepare the mode to be used */
497 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
498 unsigned int keylen)
499 {
500 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
501 struct sun4i_ss_ctx *ss = op->ss;
502
503 switch (keylen) {
504 case 128 / 8:
505 op->keymode = SS_AES_128BITS;
506 break;
507 case 192 / 8:
508 op->keymode = SS_AES_192BITS;
509 break;
510 case 256 / 8:
511 op->keymode = SS_AES_256BITS;
512 break;
513 default:
514 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
515 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
516 return -EINVAL;
517 }
518 op->keylen = keylen;
519 memcpy(op->key, key, keylen);
520
521 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
522 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
523
524 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
525 }
526
527 /* check and set the DES key, prepare the mode to be used */
528 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
529 unsigned int keylen)
530 {
531 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
532 struct sun4i_ss_ctx *ss = op->ss;
533 u32 flags;
534 u32 tmp[DES_EXPKEY_WORDS];
535 int ret;
536
537 if (unlikely(keylen != DES_KEY_SIZE)) {
538 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
539 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
540 return -EINVAL;
541 }
542
543 flags = crypto_skcipher_get_flags(tfm);
544
545 ret = des_ekey(tmp, key);
546 if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
547 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
548 dev_dbg(ss->dev, "Weak key %u\n", keylen);
549 return -EINVAL;
550 }
551
552 op->keylen = keylen;
553 memcpy(op->key, key, keylen);
554
555 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
556 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
557
558 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
559 }
560
561 /* check and set the 3DES key, prepare the mode to be used */
562 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
563 unsigned int keylen)
564 {
565 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
566 int err;
567
568 err = des3_verify_key(tfm, key);
569 if (unlikely(err))
570 return err;
571
572 op->keylen = keylen;
573 memcpy(op->key, key, keylen);
574
575 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
576 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
577
578 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
579
580 }