]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - crypto/rsa-pkcs1pad.c
Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[mirror_ubuntu-jammy-kernel.git] / crypto / rsa-pkcs1pad.c
1 /*
2 * RSA padding templates.
3 *
4 * Copyright (c) 2015 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12 #include <crypto/algapi.h>
13 #include <crypto/akcipher.h>
14 #include <crypto/internal/akcipher.h>
15 #include <crypto/internal/rsa.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/random.h>
21
22 /*
23 * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
24 */
25 static const u8 rsa_digest_info_md5[] = {
26 0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
27 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
28 0x05, 0x00, 0x04, 0x10
29 };
30
31 static const u8 rsa_digest_info_sha1[] = {
32 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
33 0x2b, 0x0e, 0x03, 0x02, 0x1a,
34 0x05, 0x00, 0x04, 0x14
35 };
36
37 static const u8 rsa_digest_info_rmd160[] = {
38 0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
39 0x2b, 0x24, 0x03, 0x02, 0x01,
40 0x05, 0x00, 0x04, 0x14
41 };
42
43 static const u8 rsa_digest_info_sha224[] = {
44 0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
45 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
46 0x05, 0x00, 0x04, 0x1c
47 };
48
49 static const u8 rsa_digest_info_sha256[] = {
50 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
51 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
52 0x05, 0x00, 0x04, 0x20
53 };
54
55 static const u8 rsa_digest_info_sha384[] = {
56 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
57 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
58 0x05, 0x00, 0x04, 0x30
59 };
60
61 static const u8 rsa_digest_info_sha512[] = {
62 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
63 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
64 0x05, 0x00, 0x04, 0x40
65 };
66
67 static const struct rsa_asn1_template {
68 const char *name;
69 const u8 *data;
70 size_t size;
71 } rsa_asn1_templates[] = {
72 #define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
73 _(md5),
74 _(sha1),
75 _(rmd160),
76 _(sha256),
77 _(sha384),
78 _(sha512),
79 _(sha224),
80 { NULL }
81 #undef _
82 };
83
84 static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
85 {
86 const struct rsa_asn1_template *p;
87
88 for (p = rsa_asn1_templates; p->name; p++)
89 if (strcmp(name, p->name) == 0)
90 return p;
91 return NULL;
92 }
93
94 struct pkcs1pad_ctx {
95 struct crypto_akcipher *child;
96 unsigned int key_size;
97 };
98
99 struct pkcs1pad_inst_ctx {
100 struct crypto_akcipher_spawn spawn;
101 const struct rsa_asn1_template *digest_info;
102 };
103
104 struct pkcs1pad_request {
105 struct scatterlist in_sg[2], out_sg[1];
106 uint8_t *in_buf, *out_buf;
107 struct akcipher_request child_req;
108 };
109
110 static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
111 unsigned int keylen)
112 {
113 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
114 int err;
115
116 ctx->key_size = 0;
117
118 err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
119 if (err)
120 return err;
121
122 /* Find out new modulus size from rsa implementation */
123 err = crypto_akcipher_maxsize(ctx->child);
124 if (err > PAGE_SIZE)
125 return -ENOTSUPP;
126
127 ctx->key_size = err;
128 return 0;
129 }
130
131 static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
132 unsigned int keylen)
133 {
134 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
135 int err;
136
137 ctx->key_size = 0;
138
139 err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
140 if (err)
141 return err;
142
143 /* Find out new modulus size from rsa implementation */
144 err = crypto_akcipher_maxsize(ctx->child);
145 if (err > PAGE_SIZE)
146 return -ENOTSUPP;
147
148 ctx->key_size = err;
149 return 0;
150 }
151
152 static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
153 {
154 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
155
156 /*
157 * The maximum destination buffer size for the encrypt/sign operations
158 * will be the same as for RSA, even though it's smaller for
159 * decrypt/verify.
160 */
161
162 return ctx->key_size;
163 }
164
165 static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
166 struct scatterlist *next)
167 {
168 int nsegs = next ? 2 : 1;
169
170 sg_init_table(sg, nsegs);
171 sg_set_buf(sg, buf, len);
172
173 if (next)
174 sg_chain(sg, nsegs, next);
175 }
176
177 static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
178 {
179 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
180 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
181 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
182 unsigned int pad_len;
183 unsigned int len;
184 u8 *out_buf;
185
186 if (err)
187 goto out;
188
189 len = req_ctx->child_req.dst_len;
190 pad_len = ctx->key_size - len;
191
192 /* Four billion to one */
193 if (likely(!pad_len))
194 goto out;
195
196 out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
197 err = -ENOMEM;
198 if (!out_buf)
199 goto out;
200
201 sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
202 out_buf + pad_len, len);
203 sg_copy_from_buffer(req->dst,
204 sg_nents_for_len(req->dst, ctx->key_size),
205 out_buf, ctx->key_size);
206 kzfree(out_buf);
207
208 out:
209 req->dst_len = ctx->key_size;
210
211 kfree(req_ctx->in_buf);
212
213 return err;
214 }
215
216 static void pkcs1pad_encrypt_sign_complete_cb(
217 struct crypto_async_request *child_async_req, int err)
218 {
219 struct akcipher_request *req = child_async_req->data;
220 struct crypto_async_request async_req;
221
222 if (err == -EINPROGRESS)
223 return;
224
225 async_req.data = req->base.data;
226 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
227 async_req.flags = child_async_req->flags;
228 req->base.complete(&async_req,
229 pkcs1pad_encrypt_sign_complete(req, err));
230 }
231
232 static int pkcs1pad_encrypt(struct akcipher_request *req)
233 {
234 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
235 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
236 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
237 int err;
238 unsigned int i, ps_end;
239
240 if (!ctx->key_size)
241 return -EINVAL;
242
243 if (req->src_len > ctx->key_size - 11)
244 return -EOVERFLOW;
245
246 if (req->dst_len < ctx->key_size) {
247 req->dst_len = ctx->key_size;
248 return -EOVERFLOW;
249 }
250
251 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
252 GFP_KERNEL);
253 if (!req_ctx->in_buf)
254 return -ENOMEM;
255
256 ps_end = ctx->key_size - req->src_len - 2;
257 req_ctx->in_buf[0] = 0x02;
258 for (i = 1; i < ps_end; i++)
259 req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
260 req_ctx->in_buf[ps_end] = 0x00;
261
262 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
263 ctx->key_size - 1 - req->src_len, req->src);
264
265 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
266 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
267 pkcs1pad_encrypt_sign_complete_cb, req);
268
269 /* Reuse output buffer */
270 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
271 req->dst, ctx->key_size - 1, req->dst_len);
272
273 err = crypto_akcipher_encrypt(&req_ctx->child_req);
274 if (err != -EINPROGRESS && err != -EBUSY)
275 return pkcs1pad_encrypt_sign_complete(req, err);
276
277 return err;
278 }
279
280 static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
281 {
282 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
283 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
284 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
285 unsigned int dst_len;
286 unsigned int pos;
287 u8 *out_buf;
288
289 if (err)
290 goto done;
291
292 err = -EINVAL;
293 dst_len = req_ctx->child_req.dst_len;
294 if (dst_len < ctx->key_size - 1)
295 goto done;
296
297 out_buf = req_ctx->out_buf;
298 if (dst_len == ctx->key_size) {
299 if (out_buf[0] != 0x00)
300 /* Decrypted value had no leading 0 byte */
301 goto done;
302
303 dst_len--;
304 out_buf++;
305 }
306
307 if (out_buf[0] != 0x02)
308 goto done;
309
310 for (pos = 1; pos < dst_len; pos++)
311 if (out_buf[pos] == 0x00)
312 break;
313 if (pos < 9 || pos == dst_len)
314 goto done;
315 pos++;
316
317 err = 0;
318
319 if (req->dst_len < dst_len - pos)
320 err = -EOVERFLOW;
321 req->dst_len = dst_len - pos;
322
323 if (!err)
324 sg_copy_from_buffer(req->dst,
325 sg_nents_for_len(req->dst, req->dst_len),
326 out_buf + pos, req->dst_len);
327
328 done:
329 kzfree(req_ctx->out_buf);
330
331 return err;
332 }
333
334 static void pkcs1pad_decrypt_complete_cb(
335 struct crypto_async_request *child_async_req, int err)
336 {
337 struct akcipher_request *req = child_async_req->data;
338 struct crypto_async_request async_req;
339
340 if (err == -EINPROGRESS)
341 return;
342
343 async_req.data = req->base.data;
344 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
345 async_req.flags = child_async_req->flags;
346 req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
347 }
348
349 static int pkcs1pad_decrypt(struct akcipher_request *req)
350 {
351 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
352 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
353 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
354 int err;
355
356 if (!ctx->key_size || req->src_len != ctx->key_size)
357 return -EINVAL;
358
359 req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
360 if (!req_ctx->out_buf)
361 return -ENOMEM;
362
363 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
364 ctx->key_size, NULL);
365
366 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
367 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
368 pkcs1pad_decrypt_complete_cb, req);
369
370 /* Reuse input buffer, output to a new buffer */
371 akcipher_request_set_crypt(&req_ctx->child_req, req->src,
372 req_ctx->out_sg, req->src_len,
373 ctx->key_size);
374
375 err = crypto_akcipher_decrypt(&req_ctx->child_req);
376 if (err != -EINPROGRESS && err != -EBUSY)
377 return pkcs1pad_decrypt_complete(req, err);
378
379 return err;
380 }
381
382 static int pkcs1pad_sign(struct akcipher_request *req)
383 {
384 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
385 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
386 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
387 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
388 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
389 const struct rsa_asn1_template *digest_info = ictx->digest_info;
390 int err;
391 unsigned int ps_end, digest_size = 0;
392
393 if (!ctx->key_size)
394 return -EINVAL;
395
396 if (digest_info)
397 digest_size = digest_info->size;
398
399 if (req->src_len + digest_size > ctx->key_size - 11)
400 return -EOVERFLOW;
401
402 if (req->dst_len < ctx->key_size) {
403 req->dst_len = ctx->key_size;
404 return -EOVERFLOW;
405 }
406
407 req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
408 GFP_KERNEL);
409 if (!req_ctx->in_buf)
410 return -ENOMEM;
411
412 ps_end = ctx->key_size - digest_size - req->src_len - 2;
413 req_ctx->in_buf[0] = 0x01;
414 memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
415 req_ctx->in_buf[ps_end] = 0x00;
416
417 if (digest_info)
418 memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
419 digest_info->size);
420
421 pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
422 ctx->key_size - 1 - req->src_len, req->src);
423
424 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
425 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
426 pkcs1pad_encrypt_sign_complete_cb, req);
427
428 /* Reuse output buffer */
429 akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
430 req->dst, ctx->key_size - 1, req->dst_len);
431
432 err = crypto_akcipher_decrypt(&req_ctx->child_req);
433 if (err != -EINPROGRESS && err != -EBUSY)
434 return pkcs1pad_encrypt_sign_complete(req, err);
435
436 return err;
437 }
438
439 static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
440 {
441 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
442 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
443 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
444 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
445 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
446 const struct rsa_asn1_template *digest_info = ictx->digest_info;
447 unsigned int dst_len;
448 unsigned int pos;
449 u8 *out_buf;
450
451 if (err)
452 goto done;
453
454 err = -EINVAL;
455 dst_len = req_ctx->child_req.dst_len;
456 if (dst_len < ctx->key_size - 1)
457 goto done;
458
459 out_buf = req_ctx->out_buf;
460 if (dst_len == ctx->key_size) {
461 if (out_buf[0] != 0x00)
462 /* Decrypted value had no leading 0 byte */
463 goto done;
464
465 dst_len--;
466 out_buf++;
467 }
468
469 err = -EBADMSG;
470 if (out_buf[0] != 0x01)
471 goto done;
472
473 for (pos = 1; pos < dst_len; pos++)
474 if (out_buf[pos] != 0xff)
475 break;
476
477 if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
478 goto done;
479 pos++;
480
481 if (digest_info) {
482 if (crypto_memneq(out_buf + pos, digest_info->data,
483 digest_info->size))
484 goto done;
485
486 pos += digest_info->size;
487 }
488
489 err = 0;
490
491 if (req->dst_len != dst_len - pos) {
492 err = -EKEYREJECTED;
493 req->dst_len = dst_len - pos;
494 goto done;
495 }
496 /* Extract appended digest. */
497 sg_pcopy_to_buffer(req->src,
498 sg_nents_for_len(req->src,
499 req->src_len + req->dst_len),
500 req_ctx->out_buf + ctx->key_size,
501 req->dst_len, ctx->key_size);
502 /* Do the actual verification step. */
503 if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
504 req->dst_len) != 0)
505 err = -EKEYREJECTED;
506 done:
507 kzfree(req_ctx->out_buf);
508
509 return err;
510 }
511
512 static void pkcs1pad_verify_complete_cb(
513 struct crypto_async_request *child_async_req, int err)
514 {
515 struct akcipher_request *req = child_async_req->data;
516 struct crypto_async_request async_req;
517
518 if (err == -EINPROGRESS)
519 return;
520
521 async_req.data = req->base.data;
522 async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
523 async_req.flags = child_async_req->flags;
524 req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
525 }
526
527 /*
528 * The verify operation is here for completeness similar to the verification
529 * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
530 * as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
531 * retrieve the DigestInfo from a signature, instead the user is expected
532 * to call the sign operation to generate the expected signature and compare
533 * signatures instead of the message-digests.
534 */
535 static int pkcs1pad_verify(struct akcipher_request *req)
536 {
537 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
538 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
539 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
540 int err;
541
542 if (WARN_ON(req->dst) ||
543 WARN_ON(!req->dst_len) ||
544 !ctx->key_size || req->src_len < ctx->key_size)
545 return -EINVAL;
546
547 req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
548 if (!req_ctx->out_buf)
549 return -ENOMEM;
550
551 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
552 ctx->key_size, NULL);
553
554 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
555 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
556 pkcs1pad_verify_complete_cb, req);
557
558 /* Reuse input buffer, output to a new buffer */
559 akcipher_request_set_crypt(&req_ctx->child_req, req->src,
560 req_ctx->out_sg, req->src_len,
561 ctx->key_size);
562
563 err = crypto_akcipher_encrypt(&req_ctx->child_req);
564 if (err != -EINPROGRESS && err != -EBUSY)
565 return pkcs1pad_verify_complete(req, err);
566
567 return err;
568 }
569
570 static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
571 {
572 struct akcipher_instance *inst = akcipher_alg_instance(tfm);
573 struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
574 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
575 struct crypto_akcipher *child_tfm;
576
577 child_tfm = crypto_spawn_akcipher(&ictx->spawn);
578 if (IS_ERR(child_tfm))
579 return PTR_ERR(child_tfm);
580
581 ctx->child = child_tfm;
582 return 0;
583 }
584
585 static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
586 {
587 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
588
589 crypto_free_akcipher(ctx->child);
590 }
591
592 static void pkcs1pad_free(struct akcipher_instance *inst)
593 {
594 struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
595 struct crypto_akcipher_spawn *spawn = &ctx->spawn;
596
597 crypto_drop_akcipher(spawn);
598 kfree(inst);
599 }
600
601 static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
602 {
603 const struct rsa_asn1_template *digest_info;
604 struct crypto_attr_type *algt;
605 struct akcipher_instance *inst;
606 struct pkcs1pad_inst_ctx *ctx;
607 struct crypto_akcipher_spawn *spawn;
608 struct akcipher_alg *rsa_alg;
609 const char *rsa_alg_name;
610 const char *hash_name;
611 int err;
612
613 algt = crypto_get_attr_type(tb);
614 if (IS_ERR(algt))
615 return PTR_ERR(algt);
616
617 if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
618 return -EINVAL;
619
620 rsa_alg_name = crypto_attr_alg_name(tb[1]);
621 if (IS_ERR(rsa_alg_name))
622 return PTR_ERR(rsa_alg_name);
623
624 hash_name = crypto_attr_alg_name(tb[2]);
625 if (IS_ERR(hash_name))
626 hash_name = NULL;
627
628 if (hash_name) {
629 digest_info = rsa_lookup_asn1(hash_name);
630 if (!digest_info)
631 return -EINVAL;
632 } else
633 digest_info = NULL;
634
635 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
636 if (!inst)
637 return -ENOMEM;
638
639 ctx = akcipher_instance_ctx(inst);
640 spawn = &ctx->spawn;
641 ctx->digest_info = digest_info;
642
643 crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
644 err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
645 crypto_requires_sync(algt->type, algt->mask));
646 if (err)
647 goto out_free_inst;
648
649 rsa_alg = crypto_spawn_akcipher_alg(spawn);
650
651 err = -ENAMETOOLONG;
652
653 if (!hash_name) {
654 if (snprintf(inst->alg.base.cra_name,
655 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
656 rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
657 goto out_drop_alg;
658
659 if (snprintf(inst->alg.base.cra_driver_name,
660 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
661 rsa_alg->base.cra_driver_name) >=
662 CRYPTO_MAX_ALG_NAME)
663 goto out_drop_alg;
664 } else {
665 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
666 "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
667 hash_name) >= CRYPTO_MAX_ALG_NAME)
668 goto out_drop_alg;
669
670 if (snprintf(inst->alg.base.cra_driver_name,
671 CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
672 rsa_alg->base.cra_driver_name,
673 hash_name) >= CRYPTO_MAX_ALG_NAME)
674 goto out_drop_alg;
675 }
676
677 inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
678 inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
679 inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
680
681 inst->alg.init = pkcs1pad_init_tfm;
682 inst->alg.exit = pkcs1pad_exit_tfm;
683
684 inst->alg.encrypt = pkcs1pad_encrypt;
685 inst->alg.decrypt = pkcs1pad_decrypt;
686 inst->alg.sign = pkcs1pad_sign;
687 inst->alg.verify = pkcs1pad_verify;
688 inst->alg.set_pub_key = pkcs1pad_set_pub_key;
689 inst->alg.set_priv_key = pkcs1pad_set_priv_key;
690 inst->alg.max_size = pkcs1pad_get_max_size;
691 inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
692
693 inst->free = pkcs1pad_free;
694
695 err = akcipher_register_instance(tmpl, inst);
696 if (err)
697 goto out_drop_alg;
698
699 return 0;
700
701 out_drop_alg:
702 crypto_drop_akcipher(spawn);
703 out_free_inst:
704 kfree(inst);
705 return err;
706 }
707
708 struct crypto_template rsa_pkcs1pad_tmpl = {
709 .name = "pkcs1pad",
710 .create = pkcs1pad_create,
711 .module = THIS_MODULE,
712 };