]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/crypto/caam/caampkc.c
crypto/nx: Rename nx842_powernv_function as icswx function
[mirror_ubuntu-bionic-kernel.git] / drivers / crypto / caam / caampkc.c
CommitLineData
8c419778
TA
1/*
2 * caam - Freescale FSL CAAM support for Public Key Cryptography
3 *
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 *
6 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
7 * all the desired key parameters, input and output pointers.
8 */
9#include "compat.h"
10#include "regs.h"
11#include "intern.h"
12#include "jr.h"
13#include "error.h"
14#include "desc_constr.h"
15#include "sg_sw_sec4.h"
16#include "caampkc.h"
17
18#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
19#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20 sizeof(struct rsa_priv_f1_pdb))
52e26d77
RA
21#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f2_pdb))
4a651b12
RA
23#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f3_pdb))
8c419778
TA
25
26static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
27 struct akcipher_request *req)
28{
29 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
30 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
31
32 if (edesc->sec4_sg_bytes)
33 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
34 DMA_TO_DEVICE);
35}
36
37static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
38 struct akcipher_request *req)
39{
40 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
41 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
42 struct caam_rsa_key *key = &ctx->key;
43 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
44
45 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
46 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
47}
48
49static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
50 struct akcipher_request *req)
51{
52 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
53 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
54 struct caam_rsa_key *key = &ctx->key;
55 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
56
57 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
58 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
59}
60
52e26d77
RA
61static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
62 struct akcipher_request *req)
63{
64 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
65 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
66 struct caam_rsa_key *key = &ctx->key;
67 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
68 size_t p_sz = key->p_sz;
69 size_t q_sz = key->p_sz;
70
71 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
72 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
73 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
75 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
76}
77
4a651b12
RA
78static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
79 struct akcipher_request *req)
80{
81 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
82 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
83 struct caam_rsa_key *key = &ctx->key;
84 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
85 size_t p_sz = key->p_sz;
86 size_t q_sz = key->p_sz;
87
88 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
89 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
90 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
95}
96
8c419778
TA
97/* RSA Job Completion handler */
98static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
99{
100 struct akcipher_request *req = context;
101 struct rsa_edesc *edesc;
102
103 if (err)
104 caam_jr_strstatus(dev, err);
105
106 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
107
108 rsa_pub_unmap(dev, edesc, req);
109 rsa_io_unmap(dev, edesc, req);
110 kfree(edesc);
111
112 akcipher_request_complete(req, err);
113}
114
115static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
116 void *context)
117{
118 struct akcipher_request *req = context;
119 struct rsa_edesc *edesc;
120
121 if (err)
122 caam_jr_strstatus(dev, err);
123
124 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
125
126 rsa_priv_f1_unmap(dev, edesc, req);
127 rsa_io_unmap(dev, edesc, req);
128 kfree(edesc);
129
130 akcipher_request_complete(req, err);
131}
132
52e26d77
RA
133static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
134 void *context)
135{
136 struct akcipher_request *req = context;
137 struct rsa_edesc *edesc;
138
139 if (err)
140 caam_jr_strstatus(dev, err);
141
142 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
143
144 rsa_priv_f2_unmap(dev, edesc, req);
145 rsa_io_unmap(dev, edesc, req);
146 kfree(edesc);
147
148 akcipher_request_complete(req, err);
149}
150
4a651b12
RA
151static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
152 void *context)
153{
154 struct akcipher_request *req = context;
155 struct rsa_edesc *edesc;
156
157 if (err)
158 caam_jr_strstatus(dev, err);
159
160 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
161
162 rsa_priv_f3_unmap(dev, edesc, req);
163 rsa_io_unmap(dev, edesc, req);
164 kfree(edesc);
165
166 akcipher_request_complete(req, err);
167}
168
8c419778
TA
169static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
170 size_t desclen)
171{
172 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
173 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
174 struct device *dev = ctx->dev;
175 struct rsa_edesc *edesc;
019d62db
HG
176 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
177 GFP_KERNEL : GFP_ATOMIC;
8c419778
TA
178 int sgc;
179 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
180 int src_nents, dst_nents;
181
182 src_nents = sg_nents_for_len(req->src, req->src_len);
183 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
184
185 if (src_nents > 1)
186 sec4_sg_len = src_nents;
187 if (dst_nents > 1)
188 sec4_sg_len += dst_nents;
189
190 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
191
192 /* allocate space for base edesc, hw desc commands and link tables */
193 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
194 GFP_DMA | flags);
195 if (!edesc)
196 return ERR_PTR(-ENOMEM);
197
198 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
199 if (unlikely(!sgc)) {
200 dev_err(dev, "unable to map source\n");
201 goto src_fail;
202 }
203
204 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
205 if (unlikely(!sgc)) {
206 dev_err(dev, "unable to map destination\n");
207 goto dst_fail;
208 }
209
210 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
211
212 sec4_sg_index = 0;
213 if (src_nents > 1) {
214 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
215 sec4_sg_index += src_nents;
216 }
217 if (dst_nents > 1)
218 sg_to_sec4_sg_last(req->dst, dst_nents,
219 edesc->sec4_sg + sec4_sg_index, 0);
220
221 /* Save nents for later use in Job Descriptor */
222 edesc->src_nents = src_nents;
223 edesc->dst_nents = dst_nents;
224
225 if (!sec4_sg_bytes)
226 return edesc;
227
228 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
229 sec4_sg_bytes, DMA_TO_DEVICE);
230 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
231 dev_err(dev, "unable to map S/G table\n");
232 goto sec4_sg_fail;
233 }
234
235 edesc->sec4_sg_bytes = sec4_sg_bytes;
236
237 return edesc;
238
239sec4_sg_fail:
240 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
241dst_fail:
242 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
243src_fail:
244 kfree(edesc);
245 return ERR_PTR(-ENOMEM);
246}
247
248static int set_rsa_pub_pdb(struct akcipher_request *req,
249 struct rsa_edesc *edesc)
250{
251 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
252 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
253 struct caam_rsa_key *key = &ctx->key;
254 struct device *dev = ctx->dev;
255 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
256 int sec4_sg_index = 0;
257
258 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
259 if (dma_mapping_error(dev, pdb->n_dma)) {
260 dev_err(dev, "Unable to map RSA modulus memory\n");
261 return -ENOMEM;
262 }
263
264 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
265 if (dma_mapping_error(dev, pdb->e_dma)) {
266 dev_err(dev, "Unable to map RSA public exponent memory\n");
267 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
268 return -ENOMEM;
269 }
270
271 if (edesc->src_nents > 1) {
272 pdb->sgf |= RSA_PDB_SGF_F;
273 pdb->f_dma = edesc->sec4_sg_dma;
274 sec4_sg_index += edesc->src_nents;
275 } else {
276 pdb->f_dma = sg_dma_address(req->src);
277 }
278
279 if (edesc->dst_nents > 1) {
280 pdb->sgf |= RSA_PDB_SGF_G;
281 pdb->g_dma = edesc->sec4_sg_dma +
282 sec4_sg_index * sizeof(struct sec4_sg_entry);
283 } else {
284 pdb->g_dma = sg_dma_address(req->dst);
285 }
286
287 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
288 pdb->f_len = req->src_len;
289
290 return 0;
291}
292
293static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
294 struct rsa_edesc *edesc)
295{
296 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
297 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
298 struct caam_rsa_key *key = &ctx->key;
299 struct device *dev = ctx->dev;
300 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
301 int sec4_sg_index = 0;
302
303 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
304 if (dma_mapping_error(dev, pdb->n_dma)) {
305 dev_err(dev, "Unable to map modulus memory\n");
306 return -ENOMEM;
307 }
308
309 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
310 if (dma_mapping_error(dev, pdb->d_dma)) {
311 dev_err(dev, "Unable to map RSA private exponent memory\n");
312 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
313 return -ENOMEM;
314 }
315
316 if (edesc->src_nents > 1) {
317 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
318 pdb->g_dma = edesc->sec4_sg_dma;
319 sec4_sg_index += edesc->src_nents;
320 } else {
321 pdb->g_dma = sg_dma_address(req->src);
322 }
323
324 if (edesc->dst_nents > 1) {
325 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
326 pdb->f_dma = edesc->sec4_sg_dma +
327 sec4_sg_index * sizeof(struct sec4_sg_entry);
328 } else {
329 pdb->f_dma = sg_dma_address(req->dst);
330 }
331
332 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
333
334 return 0;
335}
336
52e26d77
RA
337static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
338 struct rsa_edesc *edesc)
339{
340 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
341 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
342 struct caam_rsa_key *key = &ctx->key;
343 struct device *dev = ctx->dev;
344 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
345 int sec4_sg_index = 0;
346 size_t p_sz = key->p_sz;
347 size_t q_sz = key->p_sz;
348
349 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
350 if (dma_mapping_error(dev, pdb->d_dma)) {
351 dev_err(dev, "Unable to map RSA private exponent memory\n");
352 return -ENOMEM;
353 }
354
355 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
356 if (dma_mapping_error(dev, pdb->p_dma)) {
357 dev_err(dev, "Unable to map RSA prime factor p memory\n");
358 goto unmap_d;
359 }
360
361 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
362 if (dma_mapping_error(dev, pdb->q_dma)) {
363 dev_err(dev, "Unable to map RSA prime factor q memory\n");
364 goto unmap_p;
365 }
366
367 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
368 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
369 dev_err(dev, "Unable to map RSA tmp1 memory\n");
370 goto unmap_q;
371 }
372
373 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
374 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
375 dev_err(dev, "Unable to map RSA tmp2 memory\n");
376 goto unmap_tmp1;
377 }
378
379 if (edesc->src_nents > 1) {
380 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
381 pdb->g_dma = edesc->sec4_sg_dma;
382 sec4_sg_index += edesc->src_nents;
383 } else {
384 pdb->g_dma = sg_dma_address(req->src);
385 }
386
387 if (edesc->dst_nents > 1) {
388 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
389 pdb->f_dma = edesc->sec4_sg_dma +
390 sec4_sg_index * sizeof(struct sec4_sg_entry);
391 } else {
392 pdb->f_dma = sg_dma_address(req->dst);
393 }
394
395 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
396 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
397
398 return 0;
399
400unmap_tmp1:
401 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
402unmap_q:
403 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
404unmap_p:
405 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
406unmap_d:
407 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
408
409 return -ENOMEM;
410}
411
4a651b12
RA
412static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
413 struct rsa_edesc *edesc)
414{
415 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
416 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
417 struct caam_rsa_key *key = &ctx->key;
418 struct device *dev = ctx->dev;
419 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
420 int sec4_sg_index = 0;
421 size_t p_sz = key->p_sz;
422 size_t q_sz = key->p_sz;
423
424 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
425 if (dma_mapping_error(dev, pdb->p_dma)) {
426 dev_err(dev, "Unable to map RSA prime factor p memory\n");
427 return -ENOMEM;
428 }
429
430 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
431 if (dma_mapping_error(dev, pdb->q_dma)) {
432 dev_err(dev, "Unable to map RSA prime factor q memory\n");
433 goto unmap_p;
434 }
435
436 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
437 if (dma_mapping_error(dev, pdb->dp_dma)) {
438 dev_err(dev, "Unable to map RSA exponent dp memory\n");
439 goto unmap_q;
440 }
441
442 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
443 if (dma_mapping_error(dev, pdb->dq_dma)) {
444 dev_err(dev, "Unable to map RSA exponent dq memory\n");
445 goto unmap_dp;
446 }
447
448 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
449 if (dma_mapping_error(dev, pdb->c_dma)) {
450 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
451 goto unmap_dq;
452 }
453
454 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
455 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
456 dev_err(dev, "Unable to map RSA tmp1 memory\n");
457 goto unmap_qinv;
458 }
459
460 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
461 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
462 dev_err(dev, "Unable to map RSA tmp2 memory\n");
463 goto unmap_tmp1;
464 }
465
466 if (edesc->src_nents > 1) {
467 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
468 pdb->g_dma = edesc->sec4_sg_dma;
469 sec4_sg_index += edesc->src_nents;
470 } else {
471 pdb->g_dma = sg_dma_address(req->src);
472 }
473
474 if (edesc->dst_nents > 1) {
475 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
476 pdb->f_dma = edesc->sec4_sg_dma +
477 sec4_sg_index * sizeof(struct sec4_sg_entry);
478 } else {
479 pdb->f_dma = sg_dma_address(req->dst);
480 }
481
482 pdb->sgf |= key->n_sz;
483 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
484
485 return 0;
486
487unmap_tmp1:
488 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
489unmap_qinv:
490 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
491unmap_dq:
492 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
493unmap_dp:
494 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
495unmap_q:
496 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
497unmap_p:
498 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
499
500 return -ENOMEM;
501}
502
8c419778
TA
503static int caam_rsa_enc(struct akcipher_request *req)
504{
505 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
506 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
507 struct caam_rsa_key *key = &ctx->key;
508 struct device *jrdev = ctx->dev;
509 struct rsa_edesc *edesc;
510 int ret;
511
512 if (unlikely(!key->n || !key->e))
513 return -EINVAL;
514
515 if (req->dst_len < key->n_sz) {
516 req->dst_len = key->n_sz;
517 dev_err(jrdev, "Output buffer length less than parameter n\n");
518 return -EOVERFLOW;
519 }
520
521 /* Allocate extended descriptor */
522 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
523 if (IS_ERR(edesc))
524 return PTR_ERR(edesc);
525
526 /* Set RSA Encrypt Protocol Data Block */
527 ret = set_rsa_pub_pdb(req, edesc);
528 if (ret)
529 goto init_fail;
530
531 /* Initialize Job Descriptor */
532 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
533
534 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
535 if (!ret)
536 return -EINPROGRESS;
537
538 rsa_pub_unmap(jrdev, edesc, req);
539
540init_fail:
541 rsa_io_unmap(jrdev, edesc, req);
542 kfree(edesc);
543 return ret;
544}
545
52e26d77 546static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
8c419778
TA
547{
548 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
549 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8c419778
TA
550 struct device *jrdev = ctx->dev;
551 struct rsa_edesc *edesc;
552 int ret;
553
8c419778
TA
554 /* Allocate extended descriptor */
555 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
556 if (IS_ERR(edesc))
557 return PTR_ERR(edesc);
558
559 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
560 ret = set_rsa_priv_f1_pdb(req, edesc);
561 if (ret)
562 goto init_fail;
563
564 /* Initialize Job Descriptor */
565 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
566
567 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
568 if (!ret)
569 return -EINPROGRESS;
570
571 rsa_priv_f1_unmap(jrdev, edesc, req);
572
573init_fail:
574 rsa_io_unmap(jrdev, edesc, req);
575 kfree(edesc);
576 return ret;
577}
578
52e26d77
RA
579static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
580{
581 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
582 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
583 struct device *jrdev = ctx->dev;
584 struct rsa_edesc *edesc;
585 int ret;
586
587 /* Allocate extended descriptor */
588 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
589 if (IS_ERR(edesc))
590 return PTR_ERR(edesc);
591
592 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
593 ret = set_rsa_priv_f2_pdb(req, edesc);
594 if (ret)
595 goto init_fail;
596
597 /* Initialize Job Descriptor */
598 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
599
600 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
601 if (!ret)
602 return -EINPROGRESS;
603
604 rsa_priv_f2_unmap(jrdev, edesc, req);
605
606init_fail:
607 rsa_io_unmap(jrdev, edesc, req);
608 kfree(edesc);
609 return ret;
610}
611
4a651b12
RA
612static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
613{
614 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
615 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
616 struct device *jrdev = ctx->dev;
617 struct rsa_edesc *edesc;
618 int ret;
619
620 /* Allocate extended descriptor */
621 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
622 if (IS_ERR(edesc))
623 return PTR_ERR(edesc);
624
625 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
626 ret = set_rsa_priv_f3_pdb(req, edesc);
627 if (ret)
628 goto init_fail;
629
630 /* Initialize Job Descriptor */
631 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
632
633 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
634 if (!ret)
635 return -EINPROGRESS;
636
637 rsa_priv_f3_unmap(jrdev, edesc, req);
638
639init_fail:
640 rsa_io_unmap(jrdev, edesc, req);
641 kfree(edesc);
642 return ret;
643}
644
52e26d77
RA
645static int caam_rsa_dec(struct akcipher_request *req)
646{
647 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
648 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
649 struct caam_rsa_key *key = &ctx->key;
650 int ret;
651
652 if (unlikely(!key->n || !key->d))
653 return -EINVAL;
654
655 if (req->dst_len < key->n_sz) {
656 req->dst_len = key->n_sz;
657 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
658 return -EOVERFLOW;
659 }
660
4a651b12
RA
661 if (key->priv_form == FORM3)
662 ret = caam_rsa_dec_priv_f3(req);
663 else if (key->priv_form == FORM2)
52e26d77
RA
664 ret = caam_rsa_dec_priv_f2(req);
665 else
666 ret = caam_rsa_dec_priv_f1(req);
667
668 return ret;
669}
670
8c419778
TA
671static void caam_rsa_free_key(struct caam_rsa_key *key)
672{
673 kzfree(key->d);
52e26d77
RA
674 kzfree(key->p);
675 kzfree(key->q);
4a651b12
RA
676 kzfree(key->dp);
677 kzfree(key->dq);
678 kzfree(key->qinv);
52e26d77
RA
679 kzfree(key->tmp1);
680 kzfree(key->tmp2);
8c419778
TA
681 kfree(key->e);
682 kfree(key->n);
52e26d77 683 memset(key, 0, sizeof(*key));
8c419778
TA
684}
685
7ca4a9a1
RA
686static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
687{
688 while (!**ptr && *nbytes) {
689 (*ptr)++;
690 (*nbytes)--;
691 }
692}
693
4a651b12
RA
694/**
695 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
696 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
697 * BER-encoding requires that the minimum number of bytes be used to encode the
698 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
699 * length.
700 *
701 * @ptr : pointer to {dP, dQ, qInv} CRT member
702 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
703 * @dstlen: length in bytes of corresponding p or q prime factor
704 */
705static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
706{
707 u8 *dst;
708
709 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
710 if (!nbytes)
711 return NULL;
712
713 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
714 if (!dst)
715 return NULL;
716
717 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
718
719 return dst;
720}
721
8c419778
TA
722/**
723 * caam_read_raw_data - Read a raw byte stream as a positive integer.
724 * The function skips buffer's leading zeros, copies the remained data
725 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
726 * the address of the new buffer.
727 *
728 * @buf : The data to read
729 * @nbytes: The amount of data to read
730 */
731static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
732{
733 u8 *val;
734
7ca4a9a1 735 caam_rsa_drop_leading_zeros(&buf, nbytes);
7fcaf62a
TA
736 if (!*nbytes)
737 return NULL;
8c419778
TA
738
739 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
740 if (!val)
741 return NULL;
742
743 memcpy(val, buf, *nbytes);
744
745 return val;
746}
747
748static int caam_rsa_check_key_length(unsigned int len)
749{
750 if (len > 4096)
751 return -EINVAL;
752 return 0;
753}
754
755static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
756 unsigned int keylen)
757{
758 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8439e94f 759 struct rsa_key raw_key = {NULL};
8c419778
TA
760 struct caam_rsa_key *rsa_key = &ctx->key;
761 int ret;
762
763 /* Free the old RSA key if any */
764 caam_rsa_free_key(rsa_key);
765
766 ret = rsa_parse_pub_key(&raw_key, key, keylen);
767 if (ret)
768 return ret;
769
770 /* Copy key in DMA zone */
771 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
772 if (!rsa_key->e)
773 goto err;
774
775 /*
776 * Skip leading zeros and copy the positive integer to a buffer
777 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
778 * expects a positive integer for the RSA modulus and uses its length as
779 * decryption output length.
780 */
781 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
782 if (!rsa_key->n)
783 goto err;
784
785 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
786 caam_rsa_free_key(rsa_key);
787 return -EINVAL;
788 }
789
790 rsa_key->e_sz = raw_key.e_sz;
791 rsa_key->n_sz = raw_key.n_sz;
792
793 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
794
795 return 0;
796err:
797 caam_rsa_free_key(rsa_key);
798 return -ENOMEM;
799}
800
52e26d77
RA
801static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
802 struct rsa_key *raw_key)
803{
804 struct caam_rsa_key *rsa_key = &ctx->key;
805 size_t p_sz = raw_key->p_sz;
806 size_t q_sz = raw_key->q_sz;
807
808 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
809 if (!rsa_key->p)
810 return;
811 rsa_key->p_sz = p_sz;
812
813 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
814 if (!rsa_key->q)
815 goto free_p;
816 rsa_key->q_sz = q_sz;
817
818 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
819 if (!rsa_key->tmp1)
820 goto free_q;
821
822 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
823 if (!rsa_key->tmp2)
824 goto free_tmp1;
825
826 rsa_key->priv_form = FORM2;
827
4a651b12
RA
828 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
829 if (!rsa_key->dp)
830 goto free_tmp2;
831
832 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
833 if (!rsa_key->dq)
834 goto free_dp;
835
836 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
837 q_sz);
838 if (!rsa_key->qinv)
839 goto free_dq;
840
841 rsa_key->priv_form = FORM3;
842
52e26d77
RA
843 return;
844
4a651b12
RA
845free_dq:
846 kzfree(rsa_key->dq);
847free_dp:
848 kzfree(rsa_key->dp);
849free_tmp2:
850 kzfree(rsa_key->tmp2);
52e26d77
RA
851free_tmp1:
852 kzfree(rsa_key->tmp1);
853free_q:
854 kzfree(rsa_key->q);
855free_p:
856 kzfree(rsa_key->p);
857}
858
8c419778
TA
859static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
860 unsigned int keylen)
861{
862 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8439e94f 863 struct rsa_key raw_key = {NULL};
8c419778
TA
864 struct caam_rsa_key *rsa_key = &ctx->key;
865 int ret;
866
867 /* Free the old RSA key if any */
868 caam_rsa_free_key(rsa_key);
869
870 ret = rsa_parse_priv_key(&raw_key, key, keylen);
871 if (ret)
872 return ret;
873
874 /* Copy key in DMA zone */
875 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
876 if (!rsa_key->d)
877 goto err;
878
879 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
880 if (!rsa_key->e)
881 goto err;
882
883 /*
884 * Skip leading zeros and copy the positive integer to a buffer
885 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
886 * expects a positive integer for the RSA modulus and uses its length as
887 * decryption output length.
888 */
889 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
890 if (!rsa_key->n)
891 goto err;
892
893 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
894 caam_rsa_free_key(rsa_key);
895 return -EINVAL;
896 }
897
898 rsa_key->d_sz = raw_key.d_sz;
899 rsa_key->e_sz = raw_key.e_sz;
900 rsa_key->n_sz = raw_key.n_sz;
901
902 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
903 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
904
52e26d77
RA
905 caam_rsa_set_priv_key_form(ctx, &raw_key);
906
8c419778
TA
907 return 0;
908
909err:
910 caam_rsa_free_key(rsa_key);
911 return -ENOMEM;
912}
913
e198429c 914static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
8c419778
TA
915{
916 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8c419778 917
e198429c 918 return ctx->key.n_sz;
8c419778
TA
919}
920
921/* Per session pkc's driver context creation function */
922static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
923{
924 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
925
926 ctx->dev = caam_jr_alloc();
927
928 if (IS_ERR(ctx->dev)) {
33fa46d7 929 pr_err("Job Ring Device allocation for transform failed\n");
8c419778
TA
930 return PTR_ERR(ctx->dev);
931 }
932
933 return 0;
934}
935
936/* Per session pkc's driver context cleanup function */
937static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
938{
939 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
940 struct caam_rsa_key *key = &ctx->key;
941
942 caam_rsa_free_key(key);
943 caam_jr_free(ctx->dev);
944}
945
946static struct akcipher_alg caam_rsa = {
947 .encrypt = caam_rsa_enc,
948 .decrypt = caam_rsa_dec,
949 .sign = caam_rsa_dec,
950 .verify = caam_rsa_enc,
951 .set_pub_key = caam_rsa_set_pub_key,
952 .set_priv_key = caam_rsa_set_priv_key,
953 .max_size = caam_rsa_max_size,
954 .init = caam_rsa_init_tfm,
955 .exit = caam_rsa_exit_tfm,
956 .base = {
957 .cra_name = "rsa",
958 .cra_driver_name = "rsa-caam",
959 .cra_priority = 3000,
960 .cra_module = THIS_MODULE,
961 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
962 },
963};
964
965/* Public Key Cryptography module initialization handler */
966static int __init caam_pkc_init(void)
967{
968 struct device_node *dev_node;
969 struct platform_device *pdev;
970 struct device *ctrldev;
971 struct caam_drv_private *priv;
972 u32 cha_inst, pk_inst;
973 int err;
974
975 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
976 if (!dev_node) {
977 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
978 if (!dev_node)
979 return -ENODEV;
980 }
981
982 pdev = of_find_device_by_node(dev_node);
983 if (!pdev) {
984 of_node_put(dev_node);
985 return -ENODEV;
986 }
987
988 ctrldev = &pdev->dev;
989 priv = dev_get_drvdata(ctrldev);
990 of_node_put(dev_node);
991
992 /*
993 * If priv is NULL, it's probably because the caam driver wasn't
994 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
995 */
996 if (!priv)
997 return -ENODEV;
998
999 /* Determine public key hardware accelerator presence. */
1000 cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
1001 pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1002
1003 /* Do not register algorithms if PKHA is not present. */
1004 if (!pk_inst)
1005 return -ENODEV;
1006
1007 err = crypto_register_akcipher(&caam_rsa);
1008 if (err)
1009 dev_warn(ctrldev, "%s alg registration failed\n",
1010 caam_rsa.base.cra_driver_name);
1011 else
1012 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1013
1014 return err;
1015}
1016
1017static void __exit caam_pkc_exit(void)
1018{
1019 crypto_unregister_akcipher(&caam_rsa);
1020}
1021
1022module_init(caam_pkc_init);
1023module_exit(caam_pkc_exit);
1024
1025MODULE_LICENSE("Dual BSD/GPL");
1026MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1027MODULE_AUTHOR("Freescale Semiconductor");