]>
Commit | Line | Data |
---|---|---|
a9905320 TS |
1 | /* |
2 | This file is provided under a dual BSD/GPLv2 license. When using or | |
3 | redistributing this file, you may do so under either license. | |
4 | ||
5 | GPL LICENSE SUMMARY | |
6 | Copyright(c) 2014 Intel Corporation. | |
7 | This program is free software; you can redistribute it and/or modify | |
8 | it under the terms of version 2 of the GNU General Public License as | |
9 | published by the Free Software Foundation. | |
10 | ||
11 | This program is distributed in the hope that it will be useful, but | |
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | General Public License for more details. | |
15 | ||
16 | Contact Information: | |
17 | qat-linux@intel.com | |
18 | ||
19 | BSD LICENSE | |
20 | Copyright(c) 2014 Intel Corporation. | |
21 | Redistribution and use in source and binary forms, with or without | |
22 | modification, are permitted provided that the following conditions | |
23 | are met: | |
24 | ||
25 | * Redistributions of source code must retain the above copyright | |
26 | notice, this list of conditions and the following disclaimer. | |
27 | * Redistributions in binary form must reproduce the above copyright | |
28 | notice, this list of conditions and the following disclaimer in | |
29 | the documentation and/or other materials provided with the | |
30 | distribution. | |
31 | * Neither the name of Intel Corporation nor the names of its | |
32 | contributors may be used to endorse or promote products derived | |
33 | from this software without specific prior written permission. | |
34 | ||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
46 | */ | |
47 | ||
48 | #include <linux/module.h> | |
49 | #include <crypto/internal/rsa.h> | |
50 | #include <crypto/internal/akcipher.h> | |
51 | #include <crypto/akcipher.h> | |
c9839143 SB |
52 | #include <crypto/kpp.h> |
53 | #include <crypto/internal/kpp.h> | |
54 | #include <crypto/dh.h> | |
a9905320 TS |
55 | #include <linux/dma-mapping.h> |
56 | #include <linux/fips.h> | |
22287b0b | 57 | #include <crypto/scatterwalk.h> |
a9905320 TS |
58 | #include "icp_qat_fw_pke.h" |
59 | #include "adf_accel_devices.h" | |
60 | #include "adf_transport.h" | |
61 | #include "adf_common_drv.h" | |
62 | #include "qat_crypto.h" | |
63 | ||
8f5ea2df TS |
64 | static DEFINE_MUTEX(algs_lock); |
65 | static unsigned int active_devs; | |
66 | ||
a9905320 TS |
67 | struct qat_rsa_input_params { |
68 | union { | |
69 | struct { | |
70 | dma_addr_t m; | |
71 | dma_addr_t e; | |
72 | dma_addr_t n; | |
73 | } enc; | |
74 | struct { | |
75 | dma_addr_t c; | |
76 | dma_addr_t d; | |
77 | dma_addr_t n; | |
78 | } dec; | |
879f77e9 SB |
79 | struct { |
80 | dma_addr_t c; | |
81 | dma_addr_t p; | |
82 | dma_addr_t q; | |
83 | dma_addr_t dp; | |
84 | dma_addr_t dq; | |
85 | dma_addr_t qinv; | |
86 | } dec_crt; | |
a9905320 TS |
87 | u64 in_tab[8]; |
88 | }; | |
89 | } __packed __aligned(64); | |
90 | ||
91 | struct qat_rsa_output_params { | |
92 | union { | |
93 | struct { | |
94 | dma_addr_t c; | |
95 | } enc; | |
96 | struct { | |
97 | dma_addr_t m; | |
98 | } dec; | |
99 | u64 out_tab[8]; | |
100 | }; | |
101 | } __packed __aligned(64); | |
102 | ||
103 | struct qat_rsa_ctx { | |
104 | char *n; | |
105 | char *e; | |
106 | char *d; | |
879f77e9 SB |
107 | char *p; |
108 | char *q; | |
109 | char *dp; | |
110 | char *dq; | |
111 | char *qinv; | |
a9905320 TS |
112 | dma_addr_t dma_n; |
113 | dma_addr_t dma_e; | |
114 | dma_addr_t dma_d; | |
879f77e9 SB |
115 | dma_addr_t dma_p; |
116 | dma_addr_t dma_q; | |
117 | dma_addr_t dma_dp; | |
118 | dma_addr_t dma_dq; | |
119 | dma_addr_t dma_qinv; | |
a9905320 | 120 | unsigned int key_sz; |
879f77e9 | 121 | bool crt_mode; |
a9905320 TS |
122 | struct qat_crypto_instance *inst; |
123 | } __packed __aligned(64); | |
124 | ||
c9839143 SB |
125 | struct qat_dh_input_params { |
126 | union { | |
127 | struct { | |
128 | dma_addr_t b; | |
129 | dma_addr_t xa; | |
130 | dma_addr_t p; | |
131 | } in; | |
132 | struct { | |
133 | dma_addr_t xa; | |
134 | dma_addr_t p; | |
135 | } in_g2; | |
136 | u64 in_tab[8]; | |
137 | }; | |
138 | } __packed __aligned(64); | |
139 | ||
140 | struct qat_dh_output_params { | |
141 | union { | |
142 | dma_addr_t r; | |
143 | u64 out_tab[8]; | |
144 | }; | |
145 | } __packed __aligned(64); | |
146 | ||
147 | struct qat_dh_ctx { | |
148 | char *g; | |
149 | char *xa; | |
150 | char *p; | |
151 | dma_addr_t dma_g; | |
152 | dma_addr_t dma_xa; | |
153 | dma_addr_t dma_p; | |
154 | unsigned int p_size; | |
155 | bool g2; | |
156 | struct qat_crypto_instance *inst; | |
157 | } __packed __aligned(64); | |
158 | ||
159 | struct qat_asym_request { | |
160 | union { | |
161 | struct qat_rsa_input_params rsa; | |
162 | struct qat_dh_input_params dh; | |
163 | } in; | |
164 | union { | |
165 | struct qat_rsa_output_params rsa; | |
166 | struct qat_dh_output_params dh; | |
167 | } out; | |
a9905320 TS |
168 | dma_addr_t phy_in; |
169 | dma_addr_t phy_out; | |
170 | char *src_align; | |
22287b0b | 171 | char *dst_align; |
a9905320 | 172 | struct icp_qat_fw_pke_request req; |
c9839143 SB |
173 | union { |
174 | struct qat_rsa_ctx *rsa; | |
175 | struct qat_dh_ctx *dh; | |
176 | } ctx; | |
177 | union { | |
178 | struct akcipher_request *rsa; | |
179 | struct kpp_request *dh; | |
180 | } areq; | |
a9905320 | 181 | int err; |
c9839143 | 182 | void (*cb)(struct icp_qat_fw_pke_resp *resp); |
a9905320 TS |
183 | } __aligned(64); |
184 | ||
c9839143 SB |
185 | static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) |
186 | { | |
187 | struct qat_asym_request *req = (void *)(__force long)resp->opaque; | |
188 | struct kpp_request *areq = req->areq.dh; | |
189 | struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev); | |
190 | int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( | |
191 | resp->pke_resp_hdr.comn_resp_flags); | |
192 | ||
193 | err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; | |
194 | ||
195 | if (areq->src) { | |
196 | if (req->src_align) | |
197 | dma_free_coherent(dev, req->ctx.dh->p_size, | |
198 | req->src_align, req->in.dh.in.b); | |
199 | else | |
200 | dma_unmap_single(dev, req->in.dh.in.b, | |
201 | req->ctx.dh->p_size, DMA_TO_DEVICE); | |
202 | } | |
203 | ||
204 | areq->dst_len = req->ctx.dh->p_size; | |
205 | if (req->dst_align) { | |
206 | scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, | |
207 | areq->dst_len, 1); | |
208 | ||
209 | dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align, | |
210 | req->out.dh.r); | |
211 | } else { | |
212 | dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, | |
213 | DMA_FROM_DEVICE); | |
214 | } | |
215 | ||
216 | dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), | |
217 | DMA_TO_DEVICE); | |
218 | dma_unmap_single(dev, req->phy_out, | |
219 | sizeof(struct qat_dh_output_params), | |
220 | DMA_TO_DEVICE); | |
221 | ||
222 | kpp_request_complete(areq, err); | |
223 | } | |
224 | ||
225 | #define PKE_DH_1536 0x390c1a49 | |
226 | #define PKE_DH_G2_1536 0x2e0b1a3e | |
227 | #define PKE_DH_2048 0x4d0c1a60 | |
228 | #define PKE_DH_G2_2048 0x3e0b1a55 | |
229 | #define PKE_DH_3072 0x510c1a77 | |
230 | #define PKE_DH_G2_3072 0x3a0b1a6c | |
231 | #define PKE_DH_4096 0x690c1a8e | |
232 | #define PKE_DH_G2_4096 0x4a0b1a83 | |
233 | ||
234 | static unsigned long qat_dh_fn_id(unsigned int len, bool g2) | |
235 | { | |
236 | unsigned int bitslen = len << 3; | |
237 | ||
238 | switch (bitslen) { | |
239 | case 1536: | |
240 | return g2 ? PKE_DH_G2_1536 : PKE_DH_1536; | |
241 | case 2048: | |
242 | return g2 ? PKE_DH_G2_2048 : PKE_DH_2048; | |
243 | case 3072: | |
244 | return g2 ? PKE_DH_G2_3072 : PKE_DH_3072; | |
245 | case 4096: | |
246 | return g2 ? PKE_DH_G2_4096 : PKE_DH_4096; | |
247 | default: | |
248 | return 0; | |
249 | }; | |
250 | } | |
251 | ||
252 | static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm) | |
253 | { | |
254 | return kpp_tfm_ctx(tfm); | |
255 | } | |
256 | ||
257 | static int qat_dh_compute_value(struct kpp_request *req) | |
258 | { | |
259 | struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); | |
260 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); | |
261 | struct qat_crypto_instance *inst = ctx->inst; | |
262 | struct device *dev = &GET_DEV(inst->accel_dev); | |
263 | struct qat_asym_request *qat_req = | |
264 | PTR_ALIGN(kpp_request_ctx(req), 64); | |
265 | struct icp_qat_fw_pke_request *msg = &qat_req->req; | |
266 | int ret, ctr = 0; | |
267 | int n_input_params = 0; | |
268 | ||
269 | if (unlikely(!ctx->xa)) | |
270 | return -EINVAL; | |
271 | ||
272 | if (req->dst_len < ctx->p_size) { | |
273 | req->dst_len = ctx->p_size; | |
274 | return -EOVERFLOW; | |
275 | } | |
276 | memset(msg, '\0', sizeof(*msg)); | |
277 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, | |
278 | ICP_QAT_FW_COMN_REQ_FLAG_SET); | |
279 | ||
280 | msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size, | |
281 | !req->src && ctx->g2); | |
282 | if (unlikely(!msg->pke_hdr.cd_pars.func_id)) | |
283 | return -EINVAL; | |
284 | ||
285 | qat_req->cb = qat_dh_cb; | |
286 | qat_req->ctx.dh = ctx; | |
287 | qat_req->areq.dh = req; | |
288 | msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; | |
289 | msg->pke_hdr.comn_req_flags = | |
290 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, | |
291 | QAT_COMN_CD_FLD_TYPE_64BIT_ADR); | |
292 | ||
293 | /* | |
294 | * If no source is provided use g as base | |
295 | */ | |
296 | if (req->src) { | |
297 | qat_req->in.dh.in.xa = ctx->dma_xa; | |
298 | qat_req->in.dh.in.p = ctx->dma_p; | |
299 | n_input_params = 3; | |
300 | } else { | |
301 | if (ctx->g2) { | |
302 | qat_req->in.dh.in_g2.xa = ctx->dma_xa; | |
303 | qat_req->in.dh.in_g2.p = ctx->dma_p; | |
304 | n_input_params = 2; | |
305 | } else { | |
306 | qat_req->in.dh.in.b = ctx->dma_g; | |
307 | qat_req->in.dh.in.xa = ctx->dma_xa; | |
308 | qat_req->in.dh.in.p = ctx->dma_p; | |
309 | n_input_params = 3; | |
310 | } | |
311 | } | |
312 | ||
313 | ret = -ENOMEM; | |
314 | if (req->src) { | |
315 | /* | |
316 | * src can be of any size in valid range, but HW expects it to | |
317 | * be the same as modulo p so in case it is different we need | |
318 | * to allocate a new buf and copy src data. | |
319 | * In other case we just need to map the user provided buffer. | |
320 | * Also need to make sure that it is in contiguous buffer. | |
321 | */ | |
322 | if (sg_is_last(req->src) && req->src_len == ctx->p_size) { | |
323 | qat_req->src_align = NULL; | |
324 | qat_req->in.dh.in.b = dma_map_single(dev, | |
325 | sg_virt(req->src), | |
326 | req->src_len, | |
327 | DMA_TO_DEVICE); | |
328 | if (unlikely(dma_mapping_error(dev, | |
329 | qat_req->in.dh.in.b))) | |
330 | return ret; | |
331 | ||
332 | } else { | |
333 | int shift = ctx->p_size - req->src_len; | |
334 | ||
335 | qat_req->src_align = dma_zalloc_coherent(dev, | |
336 | ctx->p_size, | |
337 | &qat_req->in.dh.in.b, | |
338 | GFP_KERNEL); | |
339 | if (unlikely(!qat_req->src_align)) | |
340 | return ret; | |
341 | ||
342 | scatterwalk_map_and_copy(qat_req->src_align + shift, | |
343 | req->src, 0, req->src_len, 0); | |
344 | } | |
345 | } | |
346 | /* | |
347 | * dst can be of any size in valid range, but HW expects it to be the | |
348 | * same as modulo m so in case it is different we need to allocate a | |
349 | * new buf and copy src data. | |
350 | * In other case we just need to map the user provided buffer. | |
351 | * Also need to make sure that it is in contiguous buffer. | |
352 | */ | |
353 | if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { | |
354 | qat_req->dst_align = NULL; | |
355 | qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), | |
356 | req->dst_len, | |
357 | DMA_FROM_DEVICE); | |
358 | ||
359 | if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) | |
360 | goto unmap_src; | |
361 | ||
362 | } else { | |
363 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, | |
364 | &qat_req->out.dh.r, | |
365 | GFP_KERNEL); | |
366 | if (unlikely(!qat_req->dst_align)) | |
367 | goto unmap_src; | |
368 | } | |
369 | ||
370 | qat_req->in.dh.in_tab[n_input_params] = 0; | |
371 | qat_req->out.dh.out_tab[1] = 0; | |
372 | /* Mapping in.in.b or in.in_g2.xa is the same */ | |
373 | qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b, | |
374 | sizeof(struct qat_dh_input_params), | |
375 | DMA_TO_DEVICE); | |
376 | if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) | |
377 | goto unmap_dst; | |
378 | ||
379 | qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r, | |
380 | sizeof(struct qat_dh_output_params), | |
381 | DMA_TO_DEVICE); | |
382 | if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) | |
383 | goto unmap_in_params; | |
384 | ||
385 | msg->pke_mid.src_data_addr = qat_req->phy_in; | |
386 | msg->pke_mid.dest_data_addr = qat_req->phy_out; | |
387 | msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; | |
388 | msg->input_param_count = n_input_params; | |
389 | msg->output_param_count = 1; | |
390 | ||
391 | do { | |
392 | ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); | |
393 | } while (ret == -EBUSY && ctr++ < 100); | |
394 | ||
395 | if (!ret) | |
396 | return -EINPROGRESS; | |
397 | ||
398 | if (!dma_mapping_error(dev, qat_req->phy_out)) | |
399 | dma_unmap_single(dev, qat_req->phy_out, | |
400 | sizeof(struct qat_dh_output_params), | |
401 | DMA_TO_DEVICE); | |
402 | unmap_in_params: | |
403 | if (!dma_mapping_error(dev, qat_req->phy_in)) | |
404 | dma_unmap_single(dev, qat_req->phy_in, | |
405 | sizeof(struct qat_dh_input_params), | |
406 | DMA_TO_DEVICE); | |
407 | unmap_dst: | |
408 | if (qat_req->dst_align) | |
409 | dma_free_coherent(dev, ctx->p_size, qat_req->dst_align, | |
410 | qat_req->out.dh.r); | |
411 | else | |
412 | if (!dma_mapping_error(dev, qat_req->out.dh.r)) | |
413 | dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, | |
414 | DMA_FROM_DEVICE); | |
415 | unmap_src: | |
416 | if (req->src) { | |
417 | if (qat_req->src_align) | |
418 | dma_free_coherent(dev, ctx->p_size, qat_req->src_align, | |
419 | qat_req->in.dh.in.b); | |
420 | else | |
421 | if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) | |
422 | dma_unmap_single(dev, qat_req->in.dh.in.b, | |
423 | ctx->p_size, | |
424 | DMA_TO_DEVICE); | |
425 | } | |
426 | return ret; | |
427 | } | |
428 | ||
429 | static int qat_dh_check_params_length(unsigned int p_len) | |
430 | { | |
431 | switch (p_len) { | |
432 | case 1536: | |
433 | case 2048: | |
434 | case 3072: | |
435 | case 4096: | |
436 | return 0; | |
437 | } | |
438 | return -EINVAL; | |
439 | } | |
440 | ||
441 | static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) | |
442 | { | |
443 | struct qat_crypto_instance *inst = ctx->inst; | |
444 | struct device *dev = &GET_DEV(inst->accel_dev); | |
445 | ||
446 | if (unlikely(!params->p || !params->g)) | |
447 | return -EINVAL; | |
448 | ||
449 | if (qat_dh_check_params_length(params->p_size << 3)) | |
450 | return -EINVAL; | |
451 | ||
452 | ctx->p_size = params->p_size; | |
453 | ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); | |
454 | if (!ctx->p) | |
455 | return -ENOMEM; | |
456 | memcpy(ctx->p, params->p, ctx->p_size); | |
457 | ||
458 | /* If g equals 2 don't copy it */ | |
459 | if (params->g_size == 1 && *(char *)params->g == 0x02) { | |
460 | ctx->g2 = true; | |
461 | return 0; | |
462 | } | |
463 | ||
464 | ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); | |
465 | if (!ctx->g) { | |
466 | dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); | |
467 | ctx->p = NULL; | |
468 | return -ENOMEM; | |
469 | } | |
470 | memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, | |
471 | params->g_size); | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
476 | static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) | |
477 | { | |
478 | if (ctx->g) { | |
479 | dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); | |
480 | ctx->g = NULL; | |
481 | } | |
482 | if (ctx->xa) { | |
483 | dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); | |
484 | ctx->xa = NULL; | |
485 | } | |
486 | if (ctx->p) { | |
487 | dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); | |
488 | ctx->p = NULL; | |
489 | } | |
490 | ctx->p_size = 0; | |
491 | ctx->g2 = false; | |
492 | } | |
493 | ||
5527dfb6 | 494 | static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, |
c9839143 SB |
495 | unsigned int len) |
496 | { | |
497 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); | |
498 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | |
499 | struct dh params; | |
500 | int ret; | |
501 | ||
502 | if (crypto_dh_decode_key(buf, len, ¶ms) < 0) | |
503 | return -EINVAL; | |
504 | ||
505 | /* Free old secret if any */ | |
506 | qat_dh_clear_ctx(dev, ctx); | |
507 | ||
508 | ret = qat_dh_set_params(ctx, ¶ms); | |
509 | if (ret < 0) | |
510 | return ret; | |
511 | ||
512 | ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, | |
513 | GFP_KERNEL); | |
514 | if (!ctx->xa) { | |
515 | qat_dh_clear_ctx(dev, ctx); | |
516 | return -ENOMEM; | |
517 | } | |
518 | memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, | |
519 | params.key_size); | |
520 | ||
521 | return 0; | |
522 | } | |
523 | ||
85ac98cb | 524 | static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) |
c9839143 SB |
525 | { |
526 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); | |
527 | ||
85ac98cb | 528 | return ctx->p_size; |
c9839143 SB |
529 | } |
530 | ||
531 | static int qat_dh_init_tfm(struct crypto_kpp *tfm) | |
532 | { | |
533 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); | |
534 | struct qat_crypto_instance *inst = | |
535 | qat_crypto_get_instance_node(get_current_node()); | |
536 | ||
537 | if (!inst) | |
538 | return -EINVAL; | |
539 | ||
540 | ctx->p_size = 0; | |
541 | ctx->g2 = false; | |
542 | ctx->inst = inst; | |
543 | return 0; | |
544 | } | |
545 | ||
546 | static void qat_dh_exit_tfm(struct crypto_kpp *tfm) | |
547 | { | |
548 | struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); | |
549 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | |
550 | ||
551 | qat_dh_clear_ctx(dev, ctx); | |
552 | qat_crypto_put_instance(ctx->inst); | |
553 | } | |
554 | ||
a9905320 TS |
555 | static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) |
556 | { | |
c9839143 SB |
557 | struct qat_asym_request *req = (void *)(__force long)resp->opaque; |
558 | struct akcipher_request *areq = req->areq.rsa; | |
559 | struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev); | |
a9905320 TS |
560 | int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( |
561 | resp->pke_resp_hdr.comn_resp_flags); | |
a9905320 TS |
562 | |
563 | err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; | |
564 | ||
565 | if (req->src_align) | |
c9839143 SB |
566 | dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, |
567 | req->in.rsa.enc.m); | |
a9905320 | 568 | else |
c9839143 | 569 | dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, |
a9905320 TS |
570 | DMA_TO_DEVICE); |
571 | ||
c9839143 | 572 | areq->dst_len = req->ctx.rsa->key_sz; |
22287b0b | 573 | if (req->dst_align) { |
22287b0b TS |
574 | scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, |
575 | areq->dst_len, 1); | |
576 | ||
c9839143 SB |
577 | dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, |
578 | req->out.rsa.enc.c); | |
22287b0b | 579 | } else { |
c9839143 | 580 | dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, |
22287b0b TS |
581 | DMA_FROM_DEVICE); |
582 | } | |
583 | ||
a9905320 TS |
584 | dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), |
585 | DMA_TO_DEVICE); | |
586 | dma_unmap_single(dev, req->phy_out, | |
587 | sizeof(struct qat_rsa_output_params), | |
588 | DMA_TO_DEVICE); | |
589 | ||
a9905320 TS |
590 | akcipher_request_complete(areq, err); |
591 | } | |
592 | ||
593 | void qat_alg_asym_callback(void *_resp) | |
594 | { | |
595 | struct icp_qat_fw_pke_resp *resp = _resp; | |
c9839143 | 596 | struct qat_asym_request *areq = (void *)(__force long)resp->opaque; |
a9905320 | 597 | |
c9839143 | 598 | areq->cb(resp); |
a9905320 TS |
599 | } |
600 | ||
601 | #define PKE_RSA_EP_512 0x1c161b21 | |
602 | #define PKE_RSA_EP_1024 0x35111bf7 | |
603 | #define PKE_RSA_EP_1536 0x4d111cdc | |
604 | #define PKE_RSA_EP_2048 0x6e111dba | |
605 | #define PKE_RSA_EP_3072 0x7d111ea3 | |
606 | #define PKE_RSA_EP_4096 0xa5101f7e | |
607 | ||
608 | static unsigned long qat_rsa_enc_fn_id(unsigned int len) | |
609 | { | |
610 | unsigned int bitslen = len << 3; | |
611 | ||
612 | switch (bitslen) { | |
613 | case 512: | |
614 | return PKE_RSA_EP_512; | |
615 | case 1024: | |
616 | return PKE_RSA_EP_1024; | |
617 | case 1536: | |
618 | return PKE_RSA_EP_1536; | |
619 | case 2048: | |
620 | return PKE_RSA_EP_2048; | |
621 | case 3072: | |
622 | return PKE_RSA_EP_3072; | |
623 | case 4096: | |
624 | return PKE_RSA_EP_4096; | |
625 | default: | |
626 | return 0; | |
627 | }; | |
628 | } | |
629 | ||
630 | #define PKE_RSA_DP1_512 0x1c161b3c | |
631 | #define PKE_RSA_DP1_1024 0x35111c12 | |
632 | #define PKE_RSA_DP1_1536 0x4d111cf7 | |
633 | #define PKE_RSA_DP1_2048 0x6e111dda | |
634 | #define PKE_RSA_DP1_3072 0x7d111ebe | |
635 | #define PKE_RSA_DP1_4096 0xa5101f98 | |
636 | ||
637 | static unsigned long qat_rsa_dec_fn_id(unsigned int len) | |
638 | { | |
639 | unsigned int bitslen = len << 3; | |
640 | ||
641 | switch (bitslen) { | |
642 | case 512: | |
643 | return PKE_RSA_DP1_512; | |
644 | case 1024: | |
645 | return PKE_RSA_DP1_1024; | |
646 | case 1536: | |
647 | return PKE_RSA_DP1_1536; | |
648 | case 2048: | |
649 | return PKE_RSA_DP1_2048; | |
650 | case 3072: | |
651 | return PKE_RSA_DP1_3072; | |
652 | case 4096: | |
653 | return PKE_RSA_DP1_4096; | |
654 | default: | |
655 | return 0; | |
656 | }; | |
657 | } | |
658 | ||
879f77e9 SB |
659 | #define PKE_RSA_DP2_512 0x1c131b57 |
660 | #define PKE_RSA_DP2_1024 0x26131c2d | |
661 | #define PKE_RSA_DP2_1536 0x45111d12 | |
662 | #define PKE_RSA_DP2_2048 0x59121dfa | |
663 | #define PKE_RSA_DP2_3072 0x81121ed9 | |
664 | #define PKE_RSA_DP2_4096 0xb1111fb2 | |
665 | ||
666 | static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len) | |
667 | { | |
668 | unsigned int bitslen = len << 3; | |
669 | ||
670 | switch (bitslen) { | |
671 | case 512: | |
672 | return PKE_RSA_DP2_512; | |
673 | case 1024: | |
674 | return PKE_RSA_DP2_1024; | |
675 | case 1536: | |
676 | return PKE_RSA_DP2_1536; | |
677 | case 2048: | |
678 | return PKE_RSA_DP2_2048; | |
679 | case 3072: | |
680 | return PKE_RSA_DP2_3072; | |
681 | case 4096: | |
682 | return PKE_RSA_DP2_4096; | |
683 | default: | |
684 | return 0; | |
685 | }; | |
686 | } | |
687 | ||
a9905320 TS |
688 | static int qat_rsa_enc(struct akcipher_request *req) |
689 | { | |
690 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | |
691 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | |
692 | struct qat_crypto_instance *inst = ctx->inst; | |
693 | struct device *dev = &GET_DEV(inst->accel_dev); | |
c9839143 | 694 | struct qat_asym_request *qat_req = |
a9905320 TS |
695 | PTR_ALIGN(akcipher_request_ctx(req), 64); |
696 | struct icp_qat_fw_pke_request *msg = &qat_req->req; | |
697 | int ret, ctr = 0; | |
698 | ||
699 | if (unlikely(!ctx->n || !ctx->e)) | |
700 | return -EINVAL; | |
701 | ||
702 | if (req->dst_len < ctx->key_sz) { | |
703 | req->dst_len = ctx->key_sz; | |
704 | return -EOVERFLOW; | |
705 | } | |
706 | memset(msg, '\0', sizeof(*msg)); | |
707 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, | |
708 | ICP_QAT_FW_COMN_REQ_FLAG_SET); | |
709 | msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz); | |
710 | if (unlikely(!msg->pke_hdr.cd_pars.func_id)) | |
711 | return -EINVAL; | |
712 | ||
c9839143 SB |
713 | qat_req->cb = qat_rsa_cb; |
714 | qat_req->ctx.rsa = ctx; | |
715 | qat_req->areq.rsa = req; | |
a9905320 TS |
716 | msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; |
717 | msg->pke_hdr.comn_req_flags = | |
718 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, | |
719 | QAT_COMN_CD_FLD_TYPE_64BIT_ADR); | |
720 | ||
c9839143 SB |
721 | qat_req->in.rsa.enc.e = ctx->dma_e; |
722 | qat_req->in.rsa.enc.n = ctx->dma_n; | |
a9905320 TS |
723 | ret = -ENOMEM; |
724 | ||
725 | /* | |
726 | * src can be of any size in valid range, but HW expects it to be the | |
727 | * same as modulo n so in case it is different we need to allocate a | |
728 | * new buf and copy src data. | |
729 | * In other case we just need to map the user provided buffer. | |
22287b0b | 730 | * Also need to make sure that it is in contiguous buffer. |
a9905320 | 731 | */ |
22287b0b TS |
732 | if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { |
733 | qat_req->src_align = NULL; | |
c9839143 | 734 | qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), |
22287b0b | 735 | req->src_len, DMA_TO_DEVICE); |
c9839143 | 736 | if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) |
22287b0b TS |
737 | return ret; |
738 | ||
739 | } else { | |
a9905320 TS |
740 | int shift = ctx->key_sz - req->src_len; |
741 | ||
742 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | |
c9839143 | 743 | &qat_req->in.rsa.enc.m, |
a9905320 TS |
744 | GFP_KERNEL); |
745 | if (unlikely(!qat_req->src_align)) | |
746 | return ret; | |
747 | ||
22287b0b TS |
748 | scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, |
749 | 0, req->src_len, 0); | |
750 | } | |
751 | if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { | |
752 | qat_req->dst_align = NULL; | |
c9839143 SB |
753 | qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), |
754 | req->dst_len, | |
755 | DMA_FROM_DEVICE); | |
22287b0b | 756 | |
c9839143 | 757 | if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) |
22287b0b TS |
758 | goto unmap_src; |
759 | ||
a9905320 | 760 | } else { |
22287b0b | 761 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, |
c9839143 | 762 | &qat_req->out.rsa.enc.c, |
22287b0b TS |
763 | GFP_KERNEL); |
764 | if (unlikely(!qat_req->dst_align)) | |
765 | goto unmap_src; | |
766 | ||
a9905320 | 767 | } |
c9839143 SB |
768 | qat_req->in.rsa.in_tab[3] = 0; |
769 | qat_req->out.rsa.out_tab[1] = 0; | |
770 | qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, | |
a9905320 TS |
771 | sizeof(struct qat_rsa_input_params), |
772 | DMA_TO_DEVICE); | |
22287b0b TS |
773 | if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) |
774 | goto unmap_dst; | |
775 | ||
c9839143 | 776 | qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c, |
a9905320 | 777 | sizeof(struct qat_rsa_output_params), |
22287b0b TS |
778 | DMA_TO_DEVICE); |
779 | if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) | |
780 | goto unmap_in_params; | |
a9905320 TS |
781 | |
782 | msg->pke_mid.src_data_addr = qat_req->phy_in; | |
783 | msg->pke_mid.dest_data_addr = qat_req->phy_out; | |
c9839143 | 784 | msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; |
a9905320 TS |
785 | msg->input_param_count = 3; |
786 | msg->output_param_count = 1; | |
787 | do { | |
788 | ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); | |
789 | } while (ret == -EBUSY && ctr++ < 100); | |
790 | ||
791 | if (!ret) | |
792 | return -EINPROGRESS; | |
26d52ea3 TS |
793 | |
794 | if (!dma_mapping_error(dev, qat_req->phy_out)) | |
795 | dma_unmap_single(dev, qat_req->phy_out, | |
796 | sizeof(struct qat_rsa_output_params), | |
797 | DMA_TO_DEVICE); | |
798 | unmap_in_params: | |
799 | if (!dma_mapping_error(dev, qat_req->phy_in)) | |
800 | dma_unmap_single(dev, qat_req->phy_in, | |
801 | sizeof(struct qat_rsa_input_params), | |
802 | DMA_TO_DEVICE); | |
22287b0b TS |
803 | unmap_dst: |
804 | if (qat_req->dst_align) | |
805 | dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, | |
c9839143 | 806 | qat_req->out.rsa.enc.c); |
22287b0b | 807 | else |
c9839143 SB |
808 | if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) |
809 | dma_unmap_single(dev, qat_req->out.rsa.enc.c, | |
810 | ctx->key_sz, DMA_FROM_DEVICE); | |
26d52ea3 TS |
811 | unmap_src: |
812 | if (qat_req->src_align) | |
813 | dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, | |
c9839143 | 814 | qat_req->in.rsa.enc.m); |
26d52ea3 | 815 | else |
c9839143 SB |
816 | if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) |
817 | dma_unmap_single(dev, qat_req->in.rsa.enc.m, | |
818 | ctx->key_sz, DMA_TO_DEVICE); | |
a9905320 TS |
819 | return ret; |
820 | } | |
821 | ||
822 | static int qat_rsa_dec(struct akcipher_request *req) | |
823 | { | |
824 | struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); | |
825 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | |
826 | struct qat_crypto_instance *inst = ctx->inst; | |
827 | struct device *dev = &GET_DEV(inst->accel_dev); | |
c9839143 | 828 | struct qat_asym_request *qat_req = |
a9905320 TS |
829 | PTR_ALIGN(akcipher_request_ctx(req), 64); |
830 | struct icp_qat_fw_pke_request *msg = &qat_req->req; | |
831 | int ret, ctr = 0; | |
832 | ||
833 | if (unlikely(!ctx->n || !ctx->d)) | |
834 | return -EINVAL; | |
835 | ||
836 | if (req->dst_len < ctx->key_sz) { | |
837 | req->dst_len = ctx->key_sz; | |
838 | return -EOVERFLOW; | |
839 | } | |
840 | memset(msg, '\0', sizeof(*msg)); | |
841 | ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, | |
842 | ICP_QAT_FW_COMN_REQ_FLAG_SET); | |
879f77e9 SB |
843 | msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ? |
844 | qat_rsa_dec_fn_id_crt(ctx->key_sz) : | |
845 | qat_rsa_dec_fn_id(ctx->key_sz); | |
a9905320 TS |
846 | if (unlikely(!msg->pke_hdr.cd_pars.func_id)) |
847 | return -EINVAL; | |
848 | ||
c9839143 SB |
849 | qat_req->cb = qat_rsa_cb; |
850 | qat_req->ctx.rsa = ctx; | |
851 | qat_req->areq.rsa = req; | |
a9905320 TS |
852 | msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; |
853 | msg->pke_hdr.comn_req_flags = | |
854 | ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT, | |
855 | QAT_COMN_CD_FLD_TYPE_64BIT_ADR); | |
856 | ||
879f77e9 | 857 | if (ctx->crt_mode) { |
c9839143 SB |
858 | qat_req->in.rsa.dec_crt.p = ctx->dma_p; |
859 | qat_req->in.rsa.dec_crt.q = ctx->dma_q; | |
860 | qat_req->in.rsa.dec_crt.dp = ctx->dma_dp; | |
861 | qat_req->in.rsa.dec_crt.dq = ctx->dma_dq; | |
862 | qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv; | |
879f77e9 | 863 | } else { |
c9839143 SB |
864 | qat_req->in.rsa.dec.d = ctx->dma_d; |
865 | qat_req->in.rsa.dec.n = ctx->dma_n; | |
879f77e9 | 866 | } |
a9905320 TS |
867 | ret = -ENOMEM; |
868 | ||
869 | /* | |
870 | * src can be of any size in valid range, but HW expects it to be the | |
871 | * same as modulo n so in case it is different we need to allocate a | |
872 | * new buf and copy src data. | |
873 | * In other case we just need to map the user provided buffer. | |
22287b0b | 874 | * Also need to make sure that it is in contiguous buffer. |
a9905320 | 875 | */ |
22287b0b TS |
876 | if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { |
877 | qat_req->src_align = NULL; | |
c9839143 | 878 | qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), |
22287b0b | 879 | req->dst_len, DMA_TO_DEVICE); |
c9839143 | 880 | if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) |
22287b0b TS |
881 | return ret; |
882 | ||
883 | } else { | |
a9905320 TS |
884 | int shift = ctx->key_sz - req->src_len; |
885 | ||
886 | qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, | |
c9839143 | 887 | &qat_req->in.rsa.dec.c, |
a9905320 TS |
888 | GFP_KERNEL); |
889 | if (unlikely(!qat_req->src_align)) | |
890 | return ret; | |
891 | ||
22287b0b TS |
892 | scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, |
893 | 0, req->src_len, 0); | |
894 | } | |
895 | if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { | |
896 | qat_req->dst_align = NULL; | |
c9839143 | 897 | qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), |
22287b0b TS |
898 | req->dst_len, |
899 | DMA_FROM_DEVICE); | |
900 | ||
c9839143 | 901 | if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) |
22287b0b TS |
902 | goto unmap_src; |
903 | ||
a9905320 | 904 | } else { |
22287b0b | 905 | qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, |
c9839143 | 906 | &qat_req->out.rsa.dec.m, |
22287b0b TS |
907 | GFP_KERNEL); |
908 | if (unlikely(!qat_req->dst_align)) | |
909 | goto unmap_src; | |
910 | ||
a9905320 | 911 | } |
22287b0b | 912 | |
879f77e9 | 913 | if (ctx->crt_mode) |
c9839143 | 914 | qat_req->in.rsa.in_tab[6] = 0; |
879f77e9 | 915 | else |
c9839143 SB |
916 | qat_req->in.rsa.in_tab[3] = 0; |
917 | qat_req->out.rsa.out_tab[1] = 0; | |
918 | qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c, | |
a9905320 TS |
919 | sizeof(struct qat_rsa_input_params), |
920 | DMA_TO_DEVICE); | |
22287b0b TS |
921 | if (unlikely(dma_mapping_error(dev, qat_req->phy_in))) |
922 | goto unmap_dst; | |
923 | ||
c9839143 | 924 | qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m, |
a9905320 | 925 | sizeof(struct qat_rsa_output_params), |
22287b0b TS |
926 | DMA_TO_DEVICE); |
927 | if (unlikely(dma_mapping_error(dev, qat_req->phy_out))) | |
928 | goto unmap_in_params; | |
a9905320 TS |
929 | |
930 | msg->pke_mid.src_data_addr = qat_req->phy_in; | |
931 | msg->pke_mid.dest_data_addr = qat_req->phy_out; | |
c9839143 | 932 | msg->pke_mid.opaque = (uint64_t)(__force long)qat_req; |
879f77e9 SB |
933 | if (ctx->crt_mode) |
934 | msg->input_param_count = 6; | |
935 | else | |
936 | msg->input_param_count = 3; | |
937 | ||
a9905320 TS |
938 | msg->output_param_count = 1; |
939 | do { | |
940 | ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg); | |
941 | } while (ret == -EBUSY && ctr++ < 100); | |
942 | ||
943 | if (!ret) | |
944 | return -EINPROGRESS; | |
26d52ea3 TS |
945 | |
946 | if (!dma_mapping_error(dev, qat_req->phy_out)) | |
947 | dma_unmap_single(dev, qat_req->phy_out, | |
948 | sizeof(struct qat_rsa_output_params), | |
949 | DMA_TO_DEVICE); | |
950 | unmap_in_params: | |
951 | if (!dma_mapping_error(dev, qat_req->phy_in)) | |
952 | dma_unmap_single(dev, qat_req->phy_in, | |
953 | sizeof(struct qat_rsa_input_params), | |
954 | DMA_TO_DEVICE); | |
22287b0b TS |
955 | unmap_dst: |
956 | if (qat_req->dst_align) | |
957 | dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, | |
c9839143 | 958 | qat_req->out.rsa.dec.m); |
22287b0b | 959 | else |
c9839143 SB |
960 | if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) |
961 | dma_unmap_single(dev, qat_req->out.rsa.dec.m, | |
962 | ctx->key_sz, DMA_FROM_DEVICE); | |
26d52ea3 TS |
963 | unmap_src: |
964 | if (qat_req->src_align) | |
965 | dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, | |
c9839143 | 966 | qat_req->in.rsa.dec.c); |
26d52ea3 | 967 | else |
c9839143 SB |
968 | if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) |
969 | dma_unmap_single(dev, qat_req->in.rsa.dec.c, | |
970 | ctx->key_sz, DMA_TO_DEVICE); | |
a9905320 TS |
971 | return ret; |
972 | } | |
973 | ||
6889621f | 974 | int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) |
a9905320 | 975 | { |
a9905320 TS |
976 | struct qat_crypto_instance *inst = ctx->inst; |
977 | struct device *dev = &GET_DEV(inst->accel_dev); | |
978 | const char *ptr = value; | |
979 | int ret; | |
980 | ||
981 | while (!*ptr && vlen) { | |
982 | ptr++; | |
983 | vlen--; | |
984 | } | |
985 | ||
986 | ctx->key_sz = vlen; | |
987 | ret = -EINVAL; | |
a9905320 TS |
988 | /* invalid key size provided */ |
989 | if (!qat_rsa_enc_fn_id(ctx->key_sz)) | |
990 | goto err; | |
991 | ||
992 | ret = -ENOMEM; | |
993 | ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); | |
994 | if (!ctx->n) | |
995 | goto err; | |
996 | ||
997 | memcpy(ctx->n, ptr, ctx->key_sz); | |
998 | return 0; | |
999 | err: | |
1000 | ctx->key_sz = 0; | |
1001 | ctx->n = NULL; | |
1002 | return ret; | |
1003 | } | |
1004 | ||
6889621f | 1005 | int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) |
a9905320 | 1006 | { |
a9905320 TS |
1007 | struct qat_crypto_instance *inst = ctx->inst; |
1008 | struct device *dev = &GET_DEV(inst->accel_dev); | |
1009 | const char *ptr = value; | |
1010 | ||
1011 | while (!*ptr && vlen) { | |
1012 | ptr++; | |
1013 | vlen--; | |
1014 | } | |
1015 | ||
1016 | if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { | |
1017 | ctx->e = NULL; | |
1018 | return -EINVAL; | |
1019 | } | |
1020 | ||
1021 | ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); | |
6889621f | 1022 | if (!ctx->e) |
a9905320 | 1023 | return -ENOMEM; |
6889621f | 1024 | |
a9905320 TS |
1025 | memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); |
1026 | return 0; | |
1027 | } | |
1028 | ||
6889621f | 1029 | int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) |
a9905320 | 1030 | { |
a9905320 TS |
1031 | struct qat_crypto_instance *inst = ctx->inst; |
1032 | struct device *dev = &GET_DEV(inst->accel_dev); | |
1033 | const char *ptr = value; | |
1034 | int ret; | |
1035 | ||
1036 | while (!*ptr && vlen) { | |
1037 | ptr++; | |
1038 | vlen--; | |
1039 | } | |
1040 | ||
1041 | ret = -EINVAL; | |
1042 | if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) | |
1043 | goto err; | |
1044 | ||
a9905320 TS |
1045 | ret = -ENOMEM; |
1046 | ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); | |
aa8b6dd4 | 1047 | if (!ctx->d) |
a9905320 TS |
1048 | goto err; |
1049 | ||
1050 | memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); | |
1051 | return 0; | |
1052 | err: | |
1053 | ctx->d = NULL; | |
1054 | return ret; | |
1055 | } | |
1056 | ||
879f77e9 | 1057 | static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len) |
a9905320 | 1058 | { |
879f77e9 SB |
1059 | while (!**ptr && *len) { |
1060 | (*ptr)++; | |
1061 | (*len)--; | |
1062 | } | |
1063 | } | |
1064 | ||
1065 | static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key) | |
1066 | { | |
1067 | struct qat_crypto_instance *inst = ctx->inst; | |
1068 | struct device *dev = &GET_DEV(inst->accel_dev); | |
1069 | const char *ptr; | |
1070 | unsigned int len; | |
1071 | unsigned int half_key_sz = ctx->key_sz / 2; | |
1072 | ||
1073 | /* p */ | |
1074 | ptr = rsa_key->p; | |
1075 | len = rsa_key->p_sz; | |
1076 | qat_rsa_drop_leading_zeros(&ptr, &len); | |
1077 | if (!len) | |
1078 | goto err; | |
1079 | ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); | |
1080 | if (!ctx->p) | |
1081 | goto err; | |
1082 | memcpy(ctx->p + (half_key_sz - len), ptr, len); | |
1083 | ||
1084 | /* q */ | |
1085 | ptr = rsa_key->q; | |
1086 | len = rsa_key->q_sz; | |
1087 | qat_rsa_drop_leading_zeros(&ptr, &len); | |
1088 | if (!len) | |
1089 | goto free_p; | |
1090 | ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); | |
1091 | if (!ctx->q) | |
1092 | goto free_p; | |
1093 | memcpy(ctx->q + (half_key_sz - len), ptr, len); | |
1094 | ||
1095 | /* dp */ | |
1096 | ptr = rsa_key->dp; | |
1097 | len = rsa_key->dp_sz; | |
1098 | qat_rsa_drop_leading_zeros(&ptr, &len); | |
1099 | if (!len) | |
1100 | goto free_q; | |
1101 | ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, | |
1102 | GFP_KERNEL); | |
1103 | if (!ctx->dp) | |
1104 | goto free_q; | |
1105 | memcpy(ctx->dp + (half_key_sz - len), ptr, len); | |
1106 | ||
1107 | /* dq */ | |
1108 | ptr = rsa_key->dq; | |
1109 | len = rsa_key->dq_sz; | |
1110 | qat_rsa_drop_leading_zeros(&ptr, &len); | |
1111 | if (!len) | |
1112 | goto free_dp; | |
1113 | ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, | |
1114 | GFP_KERNEL); | |
1115 | if (!ctx->dq) | |
1116 | goto free_dp; | |
1117 | memcpy(ctx->dq + (half_key_sz - len), ptr, len); | |
1118 | ||
1119 | /* qinv */ | |
1120 | ptr = rsa_key->qinv; | |
1121 | len = rsa_key->qinv_sz; | |
1122 | qat_rsa_drop_leading_zeros(&ptr, &len); | |
1123 | if (!len) | |
1124 | goto free_dq; | |
1125 | ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, | |
1126 | GFP_KERNEL); | |
1127 | if (!ctx->qinv) | |
1128 | goto free_dq; | |
1129 | memcpy(ctx->qinv + (half_key_sz - len), ptr, len); | |
1130 | ||
1131 | ctx->crt_mode = true; | |
1132 | return; | |
1133 | ||
1134 | free_dq: | |
1135 | memset(ctx->dq, '\0', half_key_sz); | |
1136 | dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq); | |
1137 | ctx->dq = NULL; | |
1138 | free_dp: | |
1139 | memset(ctx->dp, '\0', half_key_sz); | |
1140 | dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp); | |
1141 | ctx->dp = NULL; | |
1142 | free_q: | |
1143 | memset(ctx->q, '\0', half_key_sz); | |
1144 | dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q); | |
1145 | ctx->q = NULL; | |
1146 | free_p: | |
1147 | memset(ctx->p, '\0', half_key_sz); | |
1148 | dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p); | |
1149 | ctx->p = NULL; | |
1150 | err: | |
1151 | ctx->crt_mode = false; | |
1152 | } | |
1153 | ||
1154 | static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx) | |
1155 | { | |
1156 | unsigned int half_key_sz = ctx->key_sz / 2; | |
a9905320 TS |
1157 | |
1158 | /* Free the old key if any */ | |
1159 | if (ctx->n) | |
1160 | dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); | |
1161 | if (ctx->e) | |
1162 | dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); | |
1163 | if (ctx->d) { | |
1164 | memset(ctx->d, '\0', ctx->key_sz); | |
1165 | dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); | |
1166 | } | |
879f77e9 SB |
1167 | if (ctx->p) { |
1168 | memset(ctx->p, '\0', half_key_sz); | |
1169 | dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p); | |
1170 | } | |
1171 | if (ctx->q) { | |
1172 | memset(ctx->q, '\0', half_key_sz); | |
1173 | dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q); | |
1174 | } | |
1175 | if (ctx->dp) { | |
1176 | memset(ctx->dp, '\0', half_key_sz); | |
1177 | dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp); | |
1178 | } | |
1179 | if (ctx->dq) { | |
1180 | memset(ctx->dq, '\0', half_key_sz); | |
1181 | dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq); | |
1182 | } | |
1183 | if (ctx->qinv) { | |
1184 | memset(ctx->qinv, '\0', half_key_sz); | |
1185 | dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv); | |
1186 | } | |
a9905320 TS |
1187 | |
1188 | ctx->n = NULL; | |
1189 | ctx->e = NULL; | |
1190 | ctx->d = NULL; | |
879f77e9 SB |
1191 | ctx->p = NULL; |
1192 | ctx->q = NULL; | |
1193 | ctx->dp = NULL; | |
1194 | ctx->dq = NULL; | |
1195 | ctx->qinv = NULL; | |
1196 | ctx->crt_mode = false; | |
1197 | ctx->key_sz = 0; | |
1198 | } | |
1199 | ||
1200 | static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, | |
1201 | unsigned int keylen, bool private) | |
1202 | { | |
1203 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | |
1204 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | |
1205 | struct rsa_key rsa_key; | |
1206 | int ret; | |
1207 | ||
1208 | qat_rsa_clear_ctx(dev, ctx); | |
22287b0b TS |
1209 | |
1210 | if (private) | |
6889621f | 1211 | ret = rsa_parse_priv_key(&rsa_key, key, keylen); |
22287b0b | 1212 | else |
6889621f SB |
1213 | ret = rsa_parse_pub_key(&rsa_key, key, keylen); |
1214 | if (ret < 0) | |
1215 | goto free; | |
1216 | ||
1217 | ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz); | |
a9905320 TS |
1218 | if (ret < 0) |
1219 | goto free; | |
6889621f SB |
1220 | ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); |
1221 | if (ret < 0) | |
1222 | goto free; | |
1223 | if (private) { | |
1224 | ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); | |
1225 | if (ret < 0) | |
1226 | goto free; | |
879f77e9 | 1227 | qat_rsa_setkey_crt(ctx, &rsa_key); |
6889621f | 1228 | } |
a9905320 TS |
1229 | |
1230 | if (!ctx->n || !ctx->e) { | |
1231 | /* invalid key provided */ | |
1232 | ret = -EINVAL; | |
1233 | goto free; | |
1234 | } | |
22287b0b TS |
1235 | if (private && !ctx->d) { |
1236 | /* invalid private key provided */ | |
1237 | ret = -EINVAL; | |
1238 | goto free; | |
1239 | } | |
a9905320 TS |
1240 | |
1241 | return 0; | |
1242 | free: | |
879f77e9 | 1243 | qat_rsa_clear_ctx(dev, ctx); |
a9905320 TS |
1244 | return ret; |
1245 | } | |
1246 | ||
22287b0b TS |
1247 | static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, |
1248 | unsigned int keylen) | |
1249 | { | |
1250 | return qat_rsa_setkey(tfm, key, keylen, false); | |
1251 | } | |
1252 | ||
1253 | static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, | |
1254 | unsigned int keylen) | |
1255 | { | |
1256 | return qat_rsa_setkey(tfm, key, keylen, true); | |
1257 | } | |
1258 | ||
515c4d27 | 1259 | static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm) |
22287b0b TS |
1260 | { |
1261 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | |
1262 | ||
515c4d27 | 1263 | return ctx->key_sz; |
22287b0b TS |
1264 | } |
1265 | ||
a9905320 TS |
1266 | static int qat_rsa_init_tfm(struct crypto_akcipher *tfm) |
1267 | { | |
1268 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | |
1269 | struct qat_crypto_instance *inst = | |
1270 | qat_crypto_get_instance_node(get_current_node()); | |
1271 | ||
1272 | if (!inst) | |
1273 | return -EINVAL; | |
1274 | ||
1275 | ctx->key_sz = 0; | |
1276 | ctx->inst = inst; | |
1277 | return 0; | |
1278 | } | |
1279 | ||
1280 | static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) | |
1281 | { | |
1282 | struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); | |
1283 | struct device *dev = &GET_DEV(ctx->inst->accel_dev); | |
1284 | ||
1285 | if (ctx->n) | |
1286 | dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); | |
1287 | if (ctx->e) | |
1288 | dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); | |
1289 | if (ctx->d) { | |
1290 | memset(ctx->d, '\0', ctx->key_sz); | |
1291 | dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); | |
1292 | } | |
1293 | qat_crypto_put_instance(ctx->inst); | |
1294 | ctx->n = NULL; | |
738f9823 | 1295 | ctx->e = NULL; |
a9905320 TS |
1296 | ctx->d = NULL; |
1297 | } | |
1298 | ||
1299 | static struct akcipher_alg rsa = { | |
1300 | .encrypt = qat_rsa_enc, | |
1301 | .decrypt = qat_rsa_dec, | |
1302 | .sign = qat_rsa_dec, | |
1303 | .verify = qat_rsa_enc, | |
22287b0b TS |
1304 | .set_pub_key = qat_rsa_setpubkey, |
1305 | .set_priv_key = qat_rsa_setprivkey, | |
1306 | .max_size = qat_rsa_max_size, | |
a9905320 TS |
1307 | .init = qat_rsa_init_tfm, |
1308 | .exit = qat_rsa_exit_tfm, | |
c9839143 | 1309 | .reqsize = sizeof(struct qat_asym_request) + 64, |
a9905320 TS |
1310 | .base = { |
1311 | .cra_name = "rsa", | |
1312 | .cra_driver_name = "qat-rsa", | |
1313 | .cra_priority = 1000, | |
1314 | .cra_module = THIS_MODULE, | |
1315 | .cra_ctxsize = sizeof(struct qat_rsa_ctx), | |
1316 | }, | |
1317 | }; | |
1318 | ||
c9839143 SB |
1319 | static struct kpp_alg dh = { |
1320 | .set_secret = qat_dh_set_secret, | |
1321 | .generate_public_key = qat_dh_compute_value, | |
1322 | .compute_shared_secret = qat_dh_compute_value, | |
1323 | .max_size = qat_dh_max_size, | |
1324 | .init = qat_dh_init_tfm, | |
1325 | .exit = qat_dh_exit_tfm, | |
1326 | .reqsize = sizeof(struct qat_asym_request) + 64, | |
1327 | .base = { | |
1328 | .cra_name = "dh", | |
1329 | .cra_driver_name = "qat-dh", | |
1330 | .cra_priority = 1000, | |
1331 | .cra_module = THIS_MODULE, | |
1332 | .cra_ctxsize = sizeof(struct qat_dh_ctx), | |
1333 | }, | |
1334 | }; | |
1335 | ||
a9905320 TS |
1336 | int qat_asym_algs_register(void) |
1337 | { | |
8f5ea2df TS |
1338 | int ret = 0; |
1339 | ||
1340 | mutex_lock(&algs_lock); | |
1341 | if (++active_devs == 1) { | |
1342 | rsa.base.cra_flags = 0; | |
1343 | ret = crypto_register_akcipher(&rsa); | |
c9839143 SB |
1344 | if (ret) |
1345 | goto unlock; | |
1346 | ret = crypto_register_kpp(&dh); | |
8f5ea2df | 1347 | } |
c9839143 | 1348 | unlock: |
8f5ea2df TS |
1349 | mutex_unlock(&algs_lock); |
1350 | return ret; | |
a9905320 TS |
1351 | } |
1352 | ||
1353 | void qat_asym_algs_unregister(void) | |
1354 | { | |
8f5ea2df | 1355 | mutex_lock(&algs_lock); |
c9839143 | 1356 | if (--active_devs == 0) { |
8f5ea2df | 1357 | crypto_unregister_akcipher(&rsa); |
c9839143 SB |
1358 | crypto_unregister_kpp(&dh); |
1359 | } | |
8f5ea2df | 1360 | mutex_unlock(&algs_lock); |
a9905320 | 1361 | } |