]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/crypto/caam/caampkc.c
Merge tag 'socfpga_updates_for_v4.20_part3' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-focal-kernel.git] / drivers / crypto / caam / caampkc.c
CommitLineData
8c419778
TA
1/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9#include "compat.h"
10#include "regs.h"
11#include "intern.h"
12#include "jr.h"
13#include "error.h"
14#include "desc_constr.h"
15#include "sg_sw_sec4.h"
16#include "caampkc.h"
17
18#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
52e26d77
RA
21#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f2_pdb))
4a651b12
RA
23#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f3_pdb))
8c419778
TA
25
26static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
27 struct akcipher_request *req)
28{
29 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
30 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
31
32 if (edesc->sec4_sg_bytes)
33 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
34 DMA_TO_DEVICE);
35}
36
37static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
38 struct akcipher_request *req)
39{
40 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
41 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
42 struct caam_rsa_key *key = &ctx->key;
43 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
44
45 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
46 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
47}
48
49static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
50 struct akcipher_request *req)
51{
52 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
53 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
54 struct caam_rsa_key *key = &ctx->key;
55 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
56
57 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
58 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
59}
60
52e26d77
RA
61static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
62 struct akcipher_request *req)
63{
64 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
65 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
66 struct caam_rsa_key *key = &ctx->key;
67 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
68 size_t p_sz = key->p_sz;
4bffaab3 69 size_t q_sz = key->q_sz;
52e26d77
RA
70
71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
f1bf9e60
HG
74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
76}
77
4a651b12
RA
78static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
79 struct akcipher_request *req)
80{
81 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
82 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
83 struct caam_rsa_key *key = &ctx->key;
84 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
85 size_t p_sz = key->p_sz;
4bffaab3 86 size_t q_sz = key->q_sz;
4a651b12
RA
87
88 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
89 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
f1bf9e60
HG
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
95}
96
8c419778
TA
97/* RSA Job Completion handler */
98static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
99{
100 struct akcipher_request *req = context;
101 struct rsa_edesc *edesc;
102
103 if (err)
104 caam_jr_strstatus(dev, err);
105
106 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
107
108 rsa_pub_unmap(dev, edesc, req);
109 rsa_io_unmap(dev, edesc, req);
110 kfree(edesc);
111
112 akcipher_request_complete(req, err);
113}
114
115static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
116 void *context)
117{
118 struct akcipher_request *req = context;
119 struct rsa_edesc *edesc;
120
121 if (err)
122 caam_jr_strstatus(dev, err);
123
124 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
125
126 rsa_priv_f1_unmap(dev, edesc, req);
127 rsa_io_unmap(dev, edesc, req);
128 kfree(edesc);
129
130 akcipher_request_complete(req, err);
131}
132
52e26d77
RA
133static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
134 void *context)
135{
136 struct akcipher_request *req = context;
137 struct rsa_edesc *edesc;
138
139 if (err)
140 caam_jr_strstatus(dev, err);
141
142 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
143
144 rsa_priv_f2_unmap(dev, edesc, req);
145 rsa_io_unmap(dev, edesc, req);
146 kfree(edesc);
147
148 akcipher_request_complete(req, err);
149}
150
4a651b12
RA
151static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
152 void *context)
153{
154 struct akcipher_request *req = context;
155 struct rsa_edesc *edesc;
156
157 if (err)
158 caam_jr_strstatus(dev, err);
159
160 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
161
162 rsa_priv_f3_unmap(dev, edesc, req);
163 rsa_io_unmap(dev, edesc, req);
164 kfree(edesc);
165
166 akcipher_request_complete(req, err);
167}
168
8a2a0dd3
HG
169static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
170 unsigned int nbytes,
171 unsigned int flags)
172{
173 struct sg_mapping_iter miter;
174 int lzeros, ents;
175 unsigned int len;
176 unsigned int tbytes = nbytes;
177 const u8 *buff;
178
179 ents = sg_nents_for_len(sgl, nbytes);
180 if (ents < 0)
181 return ents;
182
183 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
184
185 lzeros = 0;
186 len = 0;
187 while (nbytes > 0) {
188 while (len && !*buff) {
189 lzeros++;
190 len--;
191 buff++;
192 }
193
194 if (len && *buff)
195 break;
196
197 sg_miter_next(&miter);
198 buff = miter.addr;
199 len = miter.length;
200
201 nbytes -= lzeros;
202 lzeros = 0;
203 }
204
205 miter.consumed = lzeros;
206 sg_miter_stop(&miter);
207 nbytes -= lzeros;
208
209 return tbytes - nbytes;
210}
211
8c419778
TA
212static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
213 size_t desclen)
214{
215 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
216 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
217 struct device *dev = ctx->dev;
8a2a0dd3 218 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
8c419778 219 struct rsa_edesc *edesc;
019d62db
HG
220 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
221 GFP_KERNEL : GFP_ATOMIC;
8a2a0dd3 222 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
8c419778
TA
223 int sgc;
224 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
225 int src_nents, dst_nents;
8a2a0dd3
HG
226 int lzeros;
227
228 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
229 if (lzeros < 0)
230 return ERR_PTR(lzeros);
231
232 req->src_len -= lzeros;
233 req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
8c419778
TA
234
235 src_nents = sg_nents_for_len(req->src, req->src_len);
236 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
237
238 if (src_nents > 1)
239 sec4_sg_len = src_nents;
240 if (dst_nents > 1)
241 sec4_sg_len += dst_nents;
242
243 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
244
245 /* allocate space for base edesc, hw desc commands and link tables */
246 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
247 GFP_DMA | flags);
248 if (!edesc)
249 return ERR_PTR(-ENOMEM);
250
251 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
252 if (unlikely(!sgc)) {
253 dev_err(dev, "unable to map source\n");
254 goto src_fail;
255 }
256
257 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
258 if (unlikely(!sgc)) {
259 dev_err(dev, "unable to map destination\n");
260 goto dst_fail;
261 }
262
263 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
264
265 sec4_sg_index = 0;
266 if (src_nents > 1) {
267 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
268 sec4_sg_index += src_nents;
269 }
270 if (dst_nents > 1)
271 sg_to_sec4_sg_last(req->dst, dst_nents,
272 edesc->sec4_sg + sec4_sg_index, 0);
273
274 /* Save nents for later use in Job Descriptor */
275 edesc->src_nents = src_nents;
276 edesc->dst_nents = dst_nents;
277
278 if (!sec4_sg_bytes)
279 return edesc;
280
281 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
282 sec4_sg_bytes, DMA_TO_DEVICE);
283 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
284 dev_err(dev, "unable to map S/G table\n");
285 goto sec4_sg_fail;
286 }
287
288 edesc->sec4_sg_bytes = sec4_sg_bytes;
289
290 return edesc;
291
292sec4_sg_fail:
293 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
294dst_fail:
295 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
296src_fail:
297 kfree(edesc);
298 return ERR_PTR(-ENOMEM);
299}
300
301static int set_rsa_pub_pdb(struct akcipher_request *req,
302 struct rsa_edesc *edesc)
303{
304 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
305 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
306 struct caam_rsa_key *key = &ctx->key;
307 struct device *dev = ctx->dev;
308 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
309 int sec4_sg_index = 0;
310
311 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
312 if (dma_mapping_error(dev, pdb->n_dma)) {
313 dev_err(dev, "Unable to map RSA modulus memory\n");
314 return -ENOMEM;
315 }
316
317 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
318 if (dma_mapping_error(dev, pdb->e_dma)) {
319 dev_err(dev, "Unable to map RSA public exponent memory\n");
320 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
321 return -ENOMEM;
322 }
323
324 if (edesc->src_nents > 1) {
325 pdb->sgf |= RSA_PDB_SGF_F;
326 pdb->f_dma = edesc->sec4_sg_dma;
327 sec4_sg_index += edesc->src_nents;
328 } else {
329 pdb->f_dma = sg_dma_address(req->src);
330 }
331
332 if (edesc->dst_nents > 1) {
333 pdb->sgf |= RSA_PDB_SGF_G;
334 pdb->g_dma = edesc->sec4_sg_dma +
335 sec4_sg_index * sizeof(struct sec4_sg_entry);
336 } else {
337 pdb->g_dma = sg_dma_address(req->dst);
338 }
339
340 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
341 pdb->f_len = req->src_len;
342
343 return 0;
344}
345
346static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
347 struct rsa_edesc *edesc)
348{
349 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
350 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
351 struct caam_rsa_key *key = &ctx->key;
352 struct device *dev = ctx->dev;
353 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
354 int sec4_sg_index = 0;
355
356 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
357 if (dma_mapping_error(dev, pdb->n_dma)) {
358 dev_err(dev, "Unable to map modulus memory\n");
359 return -ENOMEM;
360 }
361
362 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
363 if (dma_mapping_error(dev, pdb->d_dma)) {
364 dev_err(dev, "Unable to map RSA private exponent memory\n");
365 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
366 return -ENOMEM;
367 }
368
369 if (edesc->src_nents > 1) {
370 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
371 pdb->g_dma = edesc->sec4_sg_dma;
372 sec4_sg_index += edesc->src_nents;
373 } else {
374 pdb->g_dma = sg_dma_address(req->src);
375 }
376
377 if (edesc->dst_nents > 1) {
378 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
379 pdb->f_dma = edesc->sec4_sg_dma +
380 sec4_sg_index * sizeof(struct sec4_sg_entry);
381 } else {
382 pdb->f_dma = sg_dma_address(req->dst);
383 }
384
385 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
386
387 return 0;
388}
389
52e26d77
RA
390static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
391 struct rsa_edesc *edesc)
392{
393 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
394 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
395 struct caam_rsa_key *key = &ctx->key;
396 struct device *dev = ctx->dev;
397 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
398 int sec4_sg_index = 0;
399 size_t p_sz = key->p_sz;
4bffaab3 400 size_t q_sz = key->q_sz;
52e26d77
RA
401
402 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
403 if (dma_mapping_error(dev, pdb->d_dma)) {
404 dev_err(dev, "Unable to map RSA private exponent memory\n");
405 return -ENOMEM;
406 }
407
408 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
409 if (dma_mapping_error(dev, pdb->p_dma)) {
410 dev_err(dev, "Unable to map RSA prime factor p memory\n");
411 goto unmap_d;
412 }
413
414 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
415 if (dma_mapping_error(dev, pdb->q_dma)) {
416 dev_err(dev, "Unable to map RSA prime factor q memory\n");
417 goto unmap_p;
418 }
419
f1bf9e60 420 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
421 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
422 dev_err(dev, "Unable to map RSA tmp1 memory\n");
423 goto unmap_q;
424 }
425
f1bf9e60 426 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
427 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
428 dev_err(dev, "Unable to map RSA tmp2 memory\n");
429 goto unmap_tmp1;
430 }
431
432 if (edesc->src_nents > 1) {
433 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
434 pdb->g_dma = edesc->sec4_sg_dma;
435 sec4_sg_index += edesc->src_nents;
436 } else {
437 pdb->g_dma = sg_dma_address(req->src);
438 }
439
440 if (edesc->dst_nents > 1) {
441 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
442 pdb->f_dma = edesc->sec4_sg_dma +
443 sec4_sg_index * sizeof(struct sec4_sg_entry);
444 } else {
445 pdb->f_dma = sg_dma_address(req->dst);
446 }
447
448 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
449 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
450
451 return 0;
452
453unmap_tmp1:
f1bf9e60 454 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
455unmap_q:
456 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
457unmap_p:
458 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
459unmap_d:
460 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
461
462 return -ENOMEM;
463}
464
4a651b12
RA
465static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
466 struct rsa_edesc *edesc)
467{
468 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
469 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
470 struct caam_rsa_key *key = &ctx->key;
471 struct device *dev = ctx->dev;
472 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
473 int sec4_sg_index = 0;
474 size_t p_sz = key->p_sz;
4bffaab3 475 size_t q_sz = key->q_sz;
4a651b12
RA
476
477 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
478 if (dma_mapping_error(dev, pdb->p_dma)) {
479 dev_err(dev, "Unable to map RSA prime factor p memory\n");
480 return -ENOMEM;
481 }
482
483 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
484 if (dma_mapping_error(dev, pdb->q_dma)) {
485 dev_err(dev, "Unable to map RSA prime factor q memory\n");
486 goto unmap_p;
487 }
488
489 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
490 if (dma_mapping_error(dev, pdb->dp_dma)) {
491 dev_err(dev, "Unable to map RSA exponent dp memory\n");
492 goto unmap_q;
493 }
494
495 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
496 if (dma_mapping_error(dev, pdb->dq_dma)) {
497 dev_err(dev, "Unable to map RSA exponent dq memory\n");
498 goto unmap_dp;
499 }
500
501 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
502 if (dma_mapping_error(dev, pdb->c_dma)) {
503 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
504 goto unmap_dq;
505 }
506
f1bf9e60 507 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
508 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
509 dev_err(dev, "Unable to map RSA tmp1 memory\n");
510 goto unmap_qinv;
511 }
512
f1bf9e60 513 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
514 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
515 dev_err(dev, "Unable to map RSA tmp2 memory\n");
516 goto unmap_tmp1;
517 }
518
519 if (edesc->src_nents > 1) {
520 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
521 pdb->g_dma = edesc->sec4_sg_dma;
522 sec4_sg_index += edesc->src_nents;
523 } else {
524 pdb->g_dma = sg_dma_address(req->src);
525 }
526
527 if (edesc->dst_nents > 1) {
528 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
529 pdb->f_dma = edesc->sec4_sg_dma +
530 sec4_sg_index * sizeof(struct sec4_sg_entry);
531 } else {
532 pdb->f_dma = sg_dma_address(req->dst);
533 }
534
535 pdb->sgf |= key->n_sz;
536 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
537
538 return 0;
539
540unmap_tmp1:
f1bf9e60 541 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
542unmap_qinv:
543 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
544unmap_dq:
545 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
546unmap_dp:
547 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
548unmap_q:
549 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
550unmap_p:
551 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
552
553 return -ENOMEM;
554}
555
8c419778
TA
556static int caam_rsa_enc(struct akcipher_request *req)
557{
558 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
559 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
560 struct caam_rsa_key *key = &ctx->key;
561 struct device *jrdev = ctx->dev;
562 struct rsa_edesc *edesc;
563 int ret;
564
565 if (unlikely(!key->n || !key->e))
566 return -EINVAL;
567
568 if (req->dst_len < key->n_sz) {
569 req->dst_len = key->n_sz;
570 dev_err(jrdev, "Output buffer length less than parameter n\n");
571 return -EOVERFLOW;
572 }
573
574 /* Allocate extended descriptor */
575 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
576 if (IS_ERR(edesc))
577 return PTR_ERR(edesc);
578
579 /* Set RSA Encrypt Protocol Data Block */
580 ret = set_rsa_pub_pdb(req, edesc);
581 if (ret)
582 goto init_fail;
583
584 /* Initialize Job Descriptor */
585 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
586
587 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
588 if (!ret)
589 return -EINPROGRESS;
590
591 rsa_pub_unmap(jrdev, edesc, req);
592
593init_fail:
594 rsa_io_unmap(jrdev, edesc, req);
595 kfree(edesc);
596 return ret;
597}
598
52e26d77 599static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
8c419778
TA
600{
601 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
602 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8c419778
TA
603 struct device *jrdev = ctx->dev;
604 struct rsa_edesc *edesc;
605 int ret;
606
8c419778
TA
607 /* Allocate extended descriptor */
608 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
609 if (IS_ERR(edesc))
610 return PTR_ERR(edesc);
611
612 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
613 ret = set_rsa_priv_f1_pdb(req, edesc);
614 if (ret)
615 goto init_fail;
616
617 /* Initialize Job Descriptor */
618 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
619
620 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
621 if (!ret)
622 return -EINPROGRESS;
623
624 rsa_priv_f1_unmap(jrdev, edesc, req);
625
626init_fail:
627 rsa_io_unmap(jrdev, edesc, req);
628 kfree(edesc);
629 return ret;
630}
631
52e26d77
RA
632static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
633{
634 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
635 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
636 struct device *jrdev = ctx->dev;
637 struct rsa_edesc *edesc;
638 int ret;
639
640 /* Allocate extended descriptor */
641 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
642 if (IS_ERR(edesc))
643 return PTR_ERR(edesc);
644
645 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
646 ret = set_rsa_priv_f2_pdb(req, edesc);
647 if (ret)
648 goto init_fail;
649
650 /* Initialize Job Descriptor */
651 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
652
653 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
654 if (!ret)
655 return -EINPROGRESS;
656
657 rsa_priv_f2_unmap(jrdev, edesc, req);
658
659init_fail:
660 rsa_io_unmap(jrdev, edesc, req);
661 kfree(edesc);
662 return ret;
663}
664
4a651b12
RA
665static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
666{
667 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
668 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
669 struct device *jrdev = ctx->dev;
670 struct rsa_edesc *edesc;
671 int ret;
672
673 /* Allocate extended descriptor */
674 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
675 if (IS_ERR(edesc))
676 return PTR_ERR(edesc);
677
678 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
679 ret = set_rsa_priv_f3_pdb(req, edesc);
680 if (ret)
681 goto init_fail;
682
683 /* Initialize Job Descriptor */
684 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
685
686 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
687 if (!ret)
688 return -EINPROGRESS;
689
690 rsa_priv_f3_unmap(jrdev, edesc, req);
691
692init_fail:
693 rsa_io_unmap(jrdev, edesc, req);
694 kfree(edesc);
695 return ret;
696}
697
52e26d77
RA
698static int caam_rsa_dec(struct akcipher_request *req)
699{
700 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
701 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
702 struct caam_rsa_key *key = &ctx->key;
703 int ret;
704
705 if (unlikely(!key->n || !key->d))
706 return -EINVAL;
707
708 if (req->dst_len < key->n_sz) {
709 req->dst_len = key->n_sz;
710 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
711 return -EOVERFLOW;
712 }
713
4a651b12
RA
714 if (key->priv_form == FORM3)
715 ret = caam_rsa_dec_priv_f3(req);
716 else if (key->priv_form == FORM2)
52e26d77
RA
717 ret = caam_rsa_dec_priv_f2(req);
718 else
719 ret = caam_rsa_dec_priv_f1(req);
720
721 return ret;
722}
723
8c419778
TA
724static void caam_rsa_free_key(struct caam_rsa_key *key)
725{
726 kzfree(key->d);
52e26d77
RA
727 kzfree(key->p);
728 kzfree(key->q);
4a651b12
RA
729 kzfree(key->dp);
730 kzfree(key->dq);
731 kzfree(key->qinv);
52e26d77
RA
732 kzfree(key->tmp1);
733 kzfree(key->tmp2);
8c419778
TA
734 kfree(key->e);
735 kfree(key->n);
52e26d77 736 memset(key, 0, sizeof(*key));
8c419778
TA
737}
738
7ca4a9a1
RA
739static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
740{
741 while (!**ptr && *nbytes) {
742 (*ptr)++;
743 (*nbytes)--;
744 }
745}
746
4a651b12
RA
747/**
748 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
749 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
750 * BER-encoding requires that the minimum number of bytes be used to encode the
751 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
752 * length.
753 *
754 * @ptr : pointer to {dP, dQ, qInv} CRT member
755 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
756 * @dstlen: length in bytes of corresponding p or q prime factor
757 */
758static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
759{
760 u8 *dst;
761
762 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
763 if (!nbytes)
764 return NULL;
765
766 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
767 if (!dst)
768 return NULL;
769
770 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
771
772 return dst;
773}
774
8c419778
TA
775/**
776 * caam_read_raw_data - Read a raw byte stream as a positive integer.
777 * The function skips buffer's leading zeros, copies the remained data
778 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
779 * the address of the new buffer.
780 *
781 * @buf : The data to read
782 * @nbytes: The amount of data to read
783 */
784static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
785{
8c419778 786
7ca4a9a1 787 caam_rsa_drop_leading_zeros(&buf, nbytes);
7fcaf62a
TA
788 if (!*nbytes)
789 return NULL;
8c419778 790
b930f3a2 791 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
8c419778
TA
792}
793
794static int caam_rsa_check_key_length(unsigned int len)
795{
796 if (len > 4096)
797 return -EINVAL;
798 return 0;
799}
800
801static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
802 unsigned int keylen)
803{
804 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8439e94f 805 struct rsa_key raw_key = {NULL};
8c419778
TA
806 struct caam_rsa_key *rsa_key = &ctx->key;
807 int ret;
808
809 /* Free the old RSA key if any */
810 caam_rsa_free_key(rsa_key);
811
812 ret = rsa_parse_pub_key(&raw_key, key, keylen);
813 if (ret)
814 return ret;
815
816 /* Copy key in DMA zone */
817 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
818 if (!rsa_key->e)
819 goto err;
820
821 /*
822 * Skip leading zeros and copy the positive integer to a buffer
823 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
824 * expects a positive integer for the RSA modulus and uses its length as
825 * decryption output length.
826 */
827 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
828 if (!rsa_key->n)
829 goto err;
830
831 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
832 caam_rsa_free_key(rsa_key);
833 return -EINVAL;
834 }
835
836 rsa_key->e_sz = raw_key.e_sz;
837 rsa_key->n_sz = raw_key.n_sz;
838
839 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
840
841 return 0;
842err:
843 caam_rsa_free_key(rsa_key);
844 return -ENOMEM;
845}
846
52e26d77
RA
847static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
848 struct rsa_key *raw_key)
849{
850 struct caam_rsa_key *rsa_key = &ctx->key;
851 size_t p_sz = raw_key->p_sz;
852 size_t q_sz = raw_key->q_sz;
853
854 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
855 if (!rsa_key->p)
856 return;
857 rsa_key->p_sz = p_sz;
858
859 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
860 if (!rsa_key->q)
861 goto free_p;
862 rsa_key->q_sz = q_sz;
863
864 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
865 if (!rsa_key->tmp1)
866 goto free_q;
867
868 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
869 if (!rsa_key->tmp2)
870 goto free_tmp1;
871
872 rsa_key->priv_form = FORM2;
873
4a651b12
RA
874 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
875 if (!rsa_key->dp)
876 goto free_tmp2;
877
878 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
879 if (!rsa_key->dq)
880 goto free_dp;
881
882 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
883 q_sz);
884 if (!rsa_key->qinv)
885 goto free_dq;
886
887 rsa_key->priv_form = FORM3;
888
52e26d77
RA
889 return;
890
4a651b12
RA
891free_dq:
892 kzfree(rsa_key->dq);
893free_dp:
894 kzfree(rsa_key->dp);
895free_tmp2:
896 kzfree(rsa_key->tmp2);
52e26d77
RA
897free_tmp1:
898 kzfree(rsa_key->tmp1);
899free_q:
900 kzfree(rsa_key->q);
901free_p:
902 kzfree(rsa_key->p);
903}
904
8c419778
TA
905static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
906 unsigned int keylen)
907{
908 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8439e94f 909 struct rsa_key raw_key = {NULL};
8c419778
TA
910 struct caam_rsa_key *rsa_key = &ctx->key;
911 int ret;
912
913 /* Free the old RSA key if any */
914 caam_rsa_free_key(rsa_key);
915
916 ret = rsa_parse_priv_key(&raw_key, key, keylen);
917 if (ret)
918 return ret;
919
920 /* Copy key in DMA zone */
921 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
922 if (!rsa_key->d)
923 goto err;
924
925 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
926 if (!rsa_key->e)
927 goto err;
928
929 /*
930 * Skip leading zeros and copy the positive integer to a buffer
931 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
932 * expects a positive integer for the RSA modulus and uses its length as
933 * decryption output length.
934 */
935 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
936 if (!rsa_key->n)
937 goto err;
938
939 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
940 caam_rsa_free_key(rsa_key);
941 return -EINVAL;
942 }
943
944 rsa_key->d_sz = raw_key.d_sz;
945 rsa_key->e_sz = raw_key.e_sz;
946 rsa_key->n_sz = raw_key.n_sz;
947
948 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
949 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
950
52e26d77
RA
951 caam_rsa_set_priv_key_form(ctx, &raw_key);
952
8c419778
TA
953 return 0;
954
955err:
956 caam_rsa_free_key(rsa_key);
957 return -ENOMEM;
958}
959
e198429c 960static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
8c419778
TA
961{
962 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8c419778 963
e198429c 964 return ctx->key.n_sz;
8c419778
TA
965}
966
967/* Per session pkc's driver context creation function */
968static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
969{
970 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
971
972 ctx->dev = caam_jr_alloc();
973
974 if (IS_ERR(ctx->dev)) {
33fa46d7 975 pr_err("Job Ring Device allocation for transform failed\n");
8c419778
TA
976 return PTR_ERR(ctx->dev);
977 }
978
979 return 0;
980}
981
982/* Per session pkc's driver context cleanup function */
983static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
984{
985 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
986 struct caam_rsa_key *key = &ctx->key;
987
988 caam_rsa_free_key(key);
989 caam_jr_free(ctx->dev);
990}
991
992static struct akcipher_alg caam_rsa = {
993 .encrypt = caam_rsa_enc,
994 .decrypt = caam_rsa_dec,
995 .sign = caam_rsa_dec,
996 .verify = caam_rsa_enc,
997 .set_pub_key = caam_rsa_set_pub_key,
998 .set_priv_key = caam_rsa_set_priv_key,
999 .max_size = caam_rsa_max_size,
1000 .init = caam_rsa_init_tfm,
1001 .exit = caam_rsa_exit_tfm,
8a2a0dd3 1002 .reqsize = sizeof(struct caam_rsa_req_ctx),
8c419778
TA
1003 .base = {
1004 .cra_name = "rsa",
1005 .cra_driver_name = "rsa-caam",
1006 .cra_priority = 3000,
1007 .cra_module = THIS_MODULE,
1008 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1009 },
1010};
1011
1012/* Public Key Cryptography module initialization handler */
1013static int __init caam_pkc_init(void)
1014{
1015 struct device_node *dev_node;
1016 struct platform_device *pdev;
1017 struct device *ctrldev;
1018 struct caam_drv_private *priv;
1019 u32 cha_inst, pk_inst;
1020 int err;
1021
1022 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1023 if (!dev_node) {
1024 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1025 if (!dev_node)
1026 return -ENODEV;
1027 }
1028
1029 pdev = of_find_device_by_node(dev_node);
1030 if (!pdev) {
1031 of_node_put(dev_node);
1032 return -ENODEV;
1033 }
1034
1035 ctrldev = &pdev->dev;
1036 priv = dev_get_drvdata(ctrldev);
1037 of_node_put(dev_node);
1038
1039 /*
1040 * If priv is NULL, it's probably because the caam driver wasn't
1041 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1042 */
1043 if (!priv)
1044 return -ENODEV;
1045
1046 /* Determine public key hardware accelerator presence. */
1047 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1048 pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1049
1050 /* Do not register algorithms if PKHA is not present. */
1051 if (!pk_inst)
1052 return -ENODEV;
1053
1054 err = crypto_register_akcipher(&caam_rsa);
1055 if (err)
1056 dev_warn(ctrldev, "%s alg registration failed\n",
1057 caam_rsa.base.cra_driver_name);
1058 else
1059 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1060
1061 return err;
1062}
1063
1064static void __exit caam_pkc_exit(void)
1065{
1066 crypto_unregister_akcipher(&caam_rsa);
1067}
1068
1069module_init(caam_pkc_init);
1070module_exit(caam_pkc_exit);
1071
1072MODULE_LICENSE("Dual BSD/GPL");
1073MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1074MODULE_AUTHOR("Freescale Semiconductor");