]>
Commit | Line | Data |
---|---|---|
48fe583f CL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * amlogic-cipher.c - hardware cryptographic offloader for Amlogic GXL SoC | |
4 | * | |
5 | * Copyright (C) 2018-2019 Corentin LABBE <clabbe@baylibre.com> | |
6 | * | |
7 | * This file add support for AES cipher with 128,192,256 bits keysize in | |
8 | * CBC and ECB mode. | |
9 | */ | |
10 | ||
11 | #include <linux/crypto.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/io.h> | |
14 | #include <crypto/scatterwalk.h> | |
15 | #include <linux/scatterlist.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <crypto/internal/skcipher.h> | |
18 | #include "amlogic-gxl.h" | |
19 | ||
20 | static int get_engine_number(struct meson_dev *mc) | |
21 | { | |
22 | return atomic_inc_return(&mc->flow) % MAXFLOW; | |
23 | } | |
24 | ||
25 | static bool meson_cipher_need_fallback(struct skcipher_request *areq) | |
26 | { | |
27 | struct scatterlist *src_sg = areq->src; | |
28 | struct scatterlist *dst_sg = areq->dst; | |
29 | ||
30 | if (areq->cryptlen == 0) | |
31 | return true; | |
32 | ||
33 | if (sg_nents(src_sg) != sg_nents(dst_sg)) | |
34 | return true; | |
35 | ||
36 | /* KEY/IV descriptors use 3 desc */ | |
37 | if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3) | |
38 | return true; | |
39 | ||
40 | while (src_sg && dst_sg) { | |
41 | if ((src_sg->length % 16) != 0) | |
42 | return true; | |
43 | if ((dst_sg->length % 16) != 0) | |
44 | return true; | |
45 | if (src_sg->length != dst_sg->length) | |
46 | return true; | |
47 | if (!IS_ALIGNED(src_sg->offset, sizeof(u32))) | |
48 | return true; | |
49 | if (!IS_ALIGNED(dst_sg->offset, sizeof(u32))) | |
50 | return true; | |
51 | src_sg = sg_next(src_sg); | |
52 | dst_sg = sg_next(dst_sg); | |
53 | } | |
54 | ||
55 | return false; | |
56 | } | |
57 | ||
58 | static int meson_cipher_do_fallback(struct skcipher_request *areq) | |
59 | { | |
60 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); | |
61 | struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); | |
62 | struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); | |
63 | int err; | |
64 | #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG | |
65 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); | |
66 | struct meson_alg_template *algt; | |
67 | #endif | |
68 | SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback_tfm); | |
69 | ||
70 | #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG | |
71 | algt = container_of(alg, struct meson_alg_template, alg.skcipher); | |
72 | algt->stat_fb++; | |
73 | #endif | |
74 | skcipher_request_set_sync_tfm(req, op->fallback_tfm); | |
75 | skcipher_request_set_callback(req, areq->base.flags, NULL, NULL); | |
76 | skcipher_request_set_crypt(req, areq->src, areq->dst, | |
77 | areq->cryptlen, areq->iv); | |
78 | if (rctx->op_dir == MESON_DECRYPT) | |
79 | err = crypto_skcipher_decrypt(req); | |
80 | else | |
81 | err = crypto_skcipher_encrypt(req); | |
82 | skcipher_request_zero(req); | |
83 | return err; | |
84 | } | |
85 | ||
86 | static int meson_cipher(struct skcipher_request *areq) | |
87 | { | |
88 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); | |
89 | struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); | |
90 | struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); | |
91 | struct meson_dev *mc = op->mc; | |
92 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); | |
93 | struct meson_alg_template *algt; | |
94 | int flow = rctx->flow; | |
95 | unsigned int todo, eat, len; | |
96 | struct scatterlist *src_sg = areq->src; | |
97 | struct scatterlist *dst_sg = areq->dst; | |
98 | struct meson_desc *desc; | |
99 | int nr_sgs, nr_sgd; | |
100 | int i, err = 0; | |
101 | unsigned int keyivlen, ivsize, offset, tloffset; | |
102 | dma_addr_t phykeyiv; | |
103 | void *backup_iv = NULL, *bkeyiv; | |
3d041588 | 104 | __le32 v; |
48fe583f CL |
105 | |
106 | algt = container_of(alg, struct meson_alg_template, alg.skcipher); | |
107 | ||
108 | dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__, | |
109 | crypto_tfm_alg_name(areq->base.tfm), | |
110 | areq->cryptlen, | |
111 | rctx->op_dir, crypto_skcipher_ivsize(tfm), | |
112 | op->keylen, flow); | |
113 | ||
114 | #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG | |
115 | algt->stat_req++; | |
116 | mc->chanlist[flow].stat_req++; | |
117 | #endif | |
118 | ||
119 | /* | |
120 | * The hardware expect a list of meson_desc structures. | |
121 | * The 2 first structures store key | |
122 | * The third stores IV | |
123 | */ | |
124 | bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA); | |
125 | if (!bkeyiv) | |
126 | return -ENOMEM; | |
127 | ||
128 | memcpy(bkeyiv, op->key, op->keylen); | |
129 | keyivlen = op->keylen; | |
130 | ||
131 | ivsize = crypto_skcipher_ivsize(tfm); | |
132 | if (areq->iv && ivsize > 0) { | |
133 | if (ivsize > areq->cryptlen) { | |
134 | dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen); | |
56601574 CL |
135 | err = -EINVAL; |
136 | goto theend; | |
48fe583f CL |
137 | } |
138 | memcpy(bkeyiv + 32, areq->iv, ivsize); | |
139 | keyivlen = 48; | |
140 | if (rctx->op_dir == MESON_DECRYPT) { | |
141 | backup_iv = kzalloc(ivsize, GFP_KERNEL); | |
142 | if (!backup_iv) { | |
143 | err = -ENOMEM; | |
144 | goto theend; | |
145 | } | |
146 | offset = areq->cryptlen - ivsize; | |
147 | scatterwalk_map_and_copy(backup_iv, areq->src, offset, | |
148 | ivsize, 0); | |
149 | } | |
150 | } | |
151 | if (keyivlen == 24) | |
152 | keyivlen = 32; | |
153 | ||
154 | phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen, | |
155 | DMA_TO_DEVICE); | |
56601574 CL |
156 | err = dma_mapping_error(mc->dev, phykeyiv); |
157 | if (err) { | |
48fe583f | 158 | dev_err(mc->dev, "Cannot DMA MAP KEY IV\n"); |
56601574 | 159 | goto theend; |
48fe583f CL |
160 | } |
161 | ||
162 | tloffset = 0; | |
163 | eat = 0; | |
164 | i = 0; | |
165 | while (keyivlen > eat) { | |
166 | desc = &mc->chanlist[flow].tl[tloffset]; | |
167 | memset(desc, 0, sizeof(struct meson_desc)); | |
168 | todo = min(keyivlen - eat, 16u); | |
3d041588 CL |
169 | desc->t_src = cpu_to_le32(phykeyiv + i * 16); |
170 | desc->t_dst = cpu_to_le32(i * 16); | |
171 | v = (MODE_KEY << 20) | DESC_OWN | 16; | |
172 | desc->t_status = cpu_to_le32(v); | |
173 | ||
48fe583f CL |
174 | eat += todo; |
175 | i++; | |
176 | tloffset++; | |
177 | } | |
178 | ||
179 | if (areq->src == areq->dst) { | |
180 | nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), | |
181 | DMA_BIDIRECTIONAL); | |
182 | if (nr_sgs < 0) { | |
183 | dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); | |
184 | err = -EINVAL; | |
185 | goto theend; | |
186 | } | |
187 | nr_sgd = nr_sgs; | |
188 | } else { | |
189 | nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src), | |
190 | DMA_TO_DEVICE); | |
191 | if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) { | |
192 | dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs); | |
193 | err = -EINVAL; | |
194 | goto theend; | |
195 | } | |
196 | nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst), | |
197 | DMA_FROM_DEVICE); | |
198 | if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) { | |
199 | dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd); | |
200 | err = -EINVAL; | |
201 | goto theend; | |
202 | } | |
203 | } | |
204 | ||
205 | src_sg = areq->src; | |
206 | dst_sg = areq->dst; | |
207 | len = areq->cryptlen; | |
208 | while (src_sg) { | |
209 | desc = &mc->chanlist[flow].tl[tloffset]; | |
210 | memset(desc, 0, sizeof(struct meson_desc)); | |
211 | ||
3d041588 CL |
212 | desc->t_src = cpu_to_le32(sg_dma_address(src_sg)); |
213 | desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg)); | |
48fe583f | 214 | todo = min(len, sg_dma_len(src_sg)); |
3d041588 CL |
215 | v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26); |
216 | if (rctx->op_dir) | |
217 | v |= DESC_ENCRYPTION; | |
48fe583f CL |
218 | len -= todo; |
219 | ||
220 | if (!sg_next(src_sg)) | |
3d041588 CL |
221 | v |= DESC_LAST; |
222 | desc->t_status = cpu_to_le32(v); | |
48fe583f CL |
223 | tloffset++; |
224 | src_sg = sg_next(src_sg); | |
225 | dst_sg = sg_next(dst_sg); | |
226 | } | |
227 | ||
228 | reinit_completion(&mc->chanlist[flow].complete); | |
229 | mc->chanlist[flow].status = 0; | |
230 | writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2)); | |
231 | wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete, | |
232 | msecs_to_jiffies(500)); | |
233 | if (mc->chanlist[flow].status == 0) { | |
234 | dev_err(mc->dev, "DMA timeout for flow %d\n", flow); | |
235 | err = -EINVAL; | |
236 | } | |
237 | ||
238 | dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE); | |
239 | ||
240 | if (areq->src == areq->dst) { | |
241 | dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); | |
242 | } else { | |
243 | dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE); | |
244 | dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); | |
245 | } | |
246 | ||
247 | if (areq->iv && ivsize > 0) { | |
248 | if (rctx->op_dir == MESON_DECRYPT) { | |
249 | memcpy(areq->iv, backup_iv, ivsize); | |
48fe583f CL |
250 | } else { |
251 | scatterwalk_map_and_copy(areq->iv, areq->dst, | |
252 | areq->cryptlen - ivsize, | |
253 | ivsize, 0); | |
254 | } | |
255 | } | |
256 | theend: | |
257 | kzfree(bkeyiv); | |
56601574 | 258 | kzfree(backup_iv); |
48fe583f CL |
259 | |
260 | return err; | |
261 | } | |
262 | ||
263 | static int meson_handle_cipher_request(struct crypto_engine *engine, | |
264 | void *areq) | |
265 | { | |
266 | int err; | |
267 | struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); | |
268 | ||
269 | err = meson_cipher(breq); | |
270 | crypto_finalize_skcipher_request(engine, breq, err); | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | int meson_skdecrypt(struct skcipher_request *areq) | |
276 | { | |
277 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); | |
278 | struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); | |
279 | struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); | |
280 | struct crypto_engine *engine; | |
281 | int e; | |
282 | ||
283 | rctx->op_dir = MESON_DECRYPT; | |
284 | if (meson_cipher_need_fallback(areq)) | |
285 | return meson_cipher_do_fallback(areq); | |
286 | e = get_engine_number(op->mc); | |
287 | engine = op->mc->chanlist[e].engine; | |
288 | rctx->flow = e; | |
289 | ||
290 | return crypto_transfer_skcipher_request_to_engine(engine, areq); | |
291 | } | |
292 | ||
293 | int meson_skencrypt(struct skcipher_request *areq) | |
294 | { | |
295 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); | |
296 | struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); | |
297 | struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq); | |
298 | struct crypto_engine *engine; | |
299 | int e; | |
300 | ||
301 | rctx->op_dir = MESON_ENCRYPT; | |
302 | if (meson_cipher_need_fallback(areq)) | |
303 | return meson_cipher_do_fallback(areq); | |
304 | e = get_engine_number(op->mc); | |
305 | engine = op->mc->chanlist[e].engine; | |
306 | rctx->flow = e; | |
307 | ||
308 | return crypto_transfer_skcipher_request_to_engine(engine, areq); | |
309 | } | |
310 | ||
311 | int meson_cipher_init(struct crypto_tfm *tfm) | |
312 | { | |
313 | struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); | |
314 | struct meson_alg_template *algt; | |
315 | const char *name = crypto_tfm_alg_name(tfm); | |
316 | struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); | |
317 | struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); | |
318 | ||
319 | memset(op, 0, sizeof(struct meson_cipher_tfm_ctx)); | |
320 | ||
321 | algt = container_of(alg, struct meson_alg_template, alg.skcipher); | |
322 | op->mc = algt->mc; | |
323 | ||
324 | sktfm->reqsize = sizeof(struct meson_cipher_req_ctx); | |
325 | ||
326 | op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); | |
327 | if (IS_ERR(op->fallback_tfm)) { | |
328 | dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n", | |
329 | name, PTR_ERR(op->fallback_tfm)); | |
330 | return PTR_ERR(op->fallback_tfm); | |
331 | } | |
332 | ||
333 | op->enginectx.op.do_one_request = meson_handle_cipher_request; | |
334 | op->enginectx.op.prepare_request = NULL; | |
335 | op->enginectx.op.unprepare_request = NULL; | |
336 | ||
337 | return 0; | |
338 | } | |
339 | ||
340 | void meson_cipher_exit(struct crypto_tfm *tfm) | |
341 | { | |
342 | struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); | |
343 | ||
344 | if (op->key) { | |
345 | memzero_explicit(op->key, op->keylen); | |
346 | kfree(op->key); | |
347 | } | |
348 | crypto_free_sync_skcipher(op->fallback_tfm); | |
349 | } | |
350 | ||
351 | int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, | |
352 | unsigned int keylen) | |
353 | { | |
354 | struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); | |
355 | struct meson_dev *mc = op->mc; | |
356 | ||
357 | switch (keylen) { | |
358 | case 128 / 8: | |
359 | op->keymode = MODE_AES_128; | |
360 | break; | |
361 | case 192 / 8: | |
362 | op->keymode = MODE_AES_192; | |
363 | break; | |
364 | case 256 / 8: | |
365 | op->keymode = MODE_AES_256; | |
366 | break; | |
367 | default: | |
368 | dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen); | |
48fe583f CL |
369 | return -EINVAL; |
370 | } | |
371 | if (op->key) { | |
372 | memzero_explicit(op->key, op->keylen); | |
373 | kfree(op->key); | |
374 | } | |
375 | op->keylen = keylen; | |
d832a612 | 376 | op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); |
48fe583f CL |
377 | if (!op->key) |
378 | return -ENOMEM; | |
48fe583f CL |
379 | |
380 | return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen); | |
381 | } |