]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Handle async block request by crypto hardware engine. | |
3 | * | |
4 | * Copyright (C) 2016 Linaro, Inc. | |
5 | * | |
6 | * Author: Baolin Wang <baolin.wang@linaro.org> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <linux/err.h> | |
16 | #include <linux/delay.h> | |
17 | #include <crypto/engine.h> | |
18 | #include <crypto/internal/hash.h> | |
19 | #include <uapi/linux/sched/types.h> | |
20 | #include "internal.h" | |
21 | ||
22 | #define CRYPTO_ENGINE_MAX_QLEN 10 | |
23 | ||
24 | /** | |
25 | * crypto_pump_requests - dequeue one request from engine queue to process | |
26 | * @engine: the hardware engine | |
27 | * @in_kthread: true if we are in the context of the request pump thread | |
28 | * | |
29 | * This function checks if there is any request in the engine queue that | |
30 | * needs processing and if so call out to the driver to initialize hardware | |
31 | * and handle each request. | |
32 | */ | |
33 | static void crypto_pump_requests(struct crypto_engine *engine, | |
34 | bool in_kthread) | |
35 | { | |
36 | struct crypto_async_request *async_req, *backlog; | |
37 | struct ahash_request *hreq; | |
38 | struct ablkcipher_request *breq; | |
39 | unsigned long flags; | |
40 | bool was_busy = false; | |
41 | int ret, rtype; | |
42 | ||
43 | spin_lock_irqsave(&engine->queue_lock, flags); | |
44 | ||
45 | /* Make sure we are not already running a request */ | |
46 | if (engine->cur_req) | |
47 | goto out; | |
48 | ||
49 | /* If another context is idling then defer */ | |
50 | if (engine->idling) { | |
51 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
52 | goto out; | |
53 | } | |
54 | ||
55 | /* Check if the engine queue is idle */ | |
56 | if (!crypto_queue_len(&engine->queue) || !engine->running) { | |
57 | if (!engine->busy) | |
58 | goto out; | |
59 | ||
60 | /* Only do teardown in the thread */ | |
61 | if (!in_kthread) { | |
62 | kthread_queue_work(engine->kworker, | |
63 | &engine->pump_requests); | |
64 | goto out; | |
65 | } | |
66 | ||
67 | engine->busy = false; | |
68 | engine->idling = true; | |
69 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
70 | ||
71 | if (engine->unprepare_crypt_hardware && | |
72 | engine->unprepare_crypt_hardware(engine)) | |
73 | dev_err(engine->dev, "failed to unprepare crypt hardware\n"); | |
74 | ||
75 | spin_lock_irqsave(&engine->queue_lock, flags); | |
76 | engine->idling = false; | |
77 | goto out; | |
78 | } | |
79 | ||
80 | /* Get the fist request from the engine queue to handle */ | |
81 | backlog = crypto_get_backlog(&engine->queue); | |
82 | async_req = crypto_dequeue_request(&engine->queue); | |
83 | if (!async_req) | |
84 | goto out; | |
85 | ||
86 | engine->cur_req = async_req; | |
87 | if (backlog) | |
88 | backlog->complete(backlog, -EINPROGRESS); | |
89 | ||
90 | if (engine->busy) | |
91 | was_busy = true; | |
92 | else | |
93 | engine->busy = true; | |
94 | ||
95 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
96 | ||
97 | rtype = crypto_tfm_alg_type(engine->cur_req->tfm); | |
98 | /* Until here we get the request need to be encrypted successfully */ | |
99 | if (!was_busy && engine->prepare_crypt_hardware) { | |
100 | ret = engine->prepare_crypt_hardware(engine); | |
101 | if (ret) { | |
102 | dev_err(engine->dev, "failed to prepare crypt hardware\n"); | |
103 | goto req_err; | |
104 | } | |
105 | } | |
106 | ||
107 | switch (rtype) { | |
108 | case CRYPTO_ALG_TYPE_AHASH: | |
109 | hreq = ahash_request_cast(engine->cur_req); | |
110 | if (engine->prepare_hash_request) { | |
111 | ret = engine->prepare_hash_request(engine, hreq); | |
112 | if (ret) { | |
113 | dev_err(engine->dev, "failed to prepare request: %d\n", | |
114 | ret); | |
115 | goto req_err; | |
116 | } | |
117 | engine->cur_req_prepared = true; | |
118 | } | |
119 | ret = engine->hash_one_request(engine, hreq); | |
120 | if (ret) { | |
121 | dev_err(engine->dev, "failed to hash one request from queue\n"); | |
122 | goto req_err; | |
123 | } | |
124 | return; | |
125 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
126 | breq = ablkcipher_request_cast(engine->cur_req); | |
127 | if (engine->prepare_cipher_request) { | |
128 | ret = engine->prepare_cipher_request(engine, breq); | |
129 | if (ret) { | |
130 | dev_err(engine->dev, "failed to prepare request: %d\n", | |
131 | ret); | |
132 | goto req_err; | |
133 | } | |
134 | engine->cur_req_prepared = true; | |
135 | } | |
136 | ret = engine->cipher_one_request(engine, breq); | |
137 | if (ret) { | |
138 | dev_err(engine->dev, "failed to cipher one request from queue\n"); | |
139 | goto req_err; | |
140 | } | |
141 | return; | |
142 | default: | |
143 | dev_err(engine->dev, "failed to prepare request of unknown type\n"); | |
144 | return; | |
145 | } | |
146 | ||
147 | req_err: | |
148 | switch (rtype) { | |
149 | case CRYPTO_ALG_TYPE_AHASH: | |
150 | hreq = ahash_request_cast(engine->cur_req); | |
151 | crypto_finalize_hash_request(engine, hreq, ret); | |
152 | break; | |
153 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
154 | breq = ablkcipher_request_cast(engine->cur_req); | |
155 | crypto_finalize_cipher_request(engine, breq, ret); | |
156 | break; | |
157 | } | |
158 | return; | |
159 | ||
160 | out: | |
161 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
162 | } | |
163 | ||
164 | static void crypto_pump_work(struct kthread_work *work) | |
165 | { | |
166 | struct crypto_engine *engine = | |
167 | container_of(work, struct crypto_engine, pump_requests); | |
168 | ||
169 | crypto_pump_requests(engine, true); | |
170 | } | |
171 | ||
172 | /** | |
173 | * crypto_transfer_cipher_request - transfer the new request into the | |
174 | * enginequeue | |
175 | * @engine: the hardware engine | |
176 | * @req: the request need to be listed into the engine queue | |
177 | */ | |
178 | int crypto_transfer_cipher_request(struct crypto_engine *engine, | |
179 | struct ablkcipher_request *req, | |
180 | bool need_pump) | |
181 | { | |
182 | unsigned long flags; | |
183 | int ret; | |
184 | ||
185 | spin_lock_irqsave(&engine->queue_lock, flags); | |
186 | ||
187 | if (!engine->running) { | |
188 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
189 | return -ESHUTDOWN; | |
190 | } | |
191 | ||
192 | ret = ablkcipher_enqueue_request(&engine->queue, req); | |
193 | ||
194 | if (!engine->busy && need_pump) | |
195 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
196 | ||
197 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
198 | return ret; | |
199 | } | |
200 | EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request); | |
201 | ||
202 | /** | |
203 | * crypto_transfer_cipher_request_to_engine - transfer one request to list | |
204 | * into the engine queue | |
205 | * @engine: the hardware engine | |
206 | * @req: the request need to be listed into the engine queue | |
207 | */ | |
208 | int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine, | |
209 | struct ablkcipher_request *req) | |
210 | { | |
211 | return crypto_transfer_cipher_request(engine, req, true); | |
212 | } | |
213 | EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine); | |
214 | ||
215 | /** | |
216 | * crypto_transfer_hash_request - transfer the new request into the | |
217 | * enginequeue | |
218 | * @engine: the hardware engine | |
219 | * @req: the request need to be listed into the engine queue | |
220 | */ | |
221 | int crypto_transfer_hash_request(struct crypto_engine *engine, | |
222 | struct ahash_request *req, bool need_pump) | |
223 | { | |
224 | unsigned long flags; | |
225 | int ret; | |
226 | ||
227 | spin_lock_irqsave(&engine->queue_lock, flags); | |
228 | ||
229 | if (!engine->running) { | |
230 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
231 | return -ESHUTDOWN; | |
232 | } | |
233 | ||
234 | ret = ahash_enqueue_request(&engine->queue, req); | |
235 | ||
236 | if (!engine->busy && need_pump) | |
237 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
238 | ||
239 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
240 | return ret; | |
241 | } | |
242 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request); | |
243 | ||
244 | /** | |
245 | * crypto_transfer_hash_request_to_engine - transfer one request to list | |
246 | * into the engine queue | |
247 | * @engine: the hardware engine | |
248 | * @req: the request need to be listed into the engine queue | |
249 | */ | |
250 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, | |
251 | struct ahash_request *req) | |
252 | { | |
253 | return crypto_transfer_hash_request(engine, req, true); | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); | |
256 | ||
257 | /** | |
258 | * crypto_finalize_cipher_request - finalize one request if the request is done | |
259 | * @engine: the hardware engine | |
260 | * @req: the request need to be finalized | |
261 | * @err: error number | |
262 | */ | |
263 | void crypto_finalize_cipher_request(struct crypto_engine *engine, | |
264 | struct ablkcipher_request *req, int err) | |
265 | { | |
266 | unsigned long flags; | |
267 | bool finalize_cur_req = false; | |
268 | int ret; | |
269 | ||
270 | spin_lock_irqsave(&engine->queue_lock, flags); | |
271 | if (engine->cur_req == &req->base) | |
272 | finalize_cur_req = true; | |
273 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
274 | ||
275 | if (finalize_cur_req) { | |
276 | if (engine->cur_req_prepared && | |
277 | engine->unprepare_cipher_request) { | |
278 | ret = engine->unprepare_cipher_request(engine, req); | |
279 | if (ret) | |
280 | dev_err(engine->dev, "failed to unprepare request\n"); | |
281 | } | |
282 | spin_lock_irqsave(&engine->queue_lock, flags); | |
283 | engine->cur_req = NULL; | |
284 | engine->cur_req_prepared = false; | |
285 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
286 | } | |
287 | ||
288 | req->base.complete(&req->base, err); | |
289 | ||
290 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
291 | } | |
292 | EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request); | |
293 | ||
294 | /** | |
295 | * crypto_finalize_hash_request - finalize one request if the request is done | |
296 | * @engine: the hardware engine | |
297 | * @req: the request need to be finalized | |
298 | * @err: error number | |
299 | */ | |
300 | void crypto_finalize_hash_request(struct crypto_engine *engine, | |
301 | struct ahash_request *req, int err) | |
302 | { | |
303 | unsigned long flags; | |
304 | bool finalize_cur_req = false; | |
305 | int ret; | |
306 | ||
307 | spin_lock_irqsave(&engine->queue_lock, flags); | |
308 | if (engine->cur_req == &req->base) | |
309 | finalize_cur_req = true; | |
310 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
311 | ||
312 | if (finalize_cur_req) { | |
313 | if (engine->cur_req_prepared && | |
314 | engine->unprepare_hash_request) { | |
315 | ret = engine->unprepare_hash_request(engine, req); | |
316 | if (ret) | |
317 | dev_err(engine->dev, "failed to unprepare request\n"); | |
318 | } | |
319 | spin_lock_irqsave(&engine->queue_lock, flags); | |
320 | engine->cur_req = NULL; | |
321 | engine->cur_req_prepared = false; | |
322 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
323 | } | |
324 | ||
325 | req->base.complete(&req->base, err); | |
326 | ||
327 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
328 | } | |
329 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); | |
330 | ||
331 | /** | |
332 | * crypto_engine_start - start the hardware engine | |
333 | * @engine: the hardware engine need to be started | |
334 | * | |
335 | * Return 0 on success, else on fail. | |
336 | */ | |
337 | int crypto_engine_start(struct crypto_engine *engine) | |
338 | { | |
339 | unsigned long flags; | |
340 | ||
341 | spin_lock_irqsave(&engine->queue_lock, flags); | |
342 | ||
343 | if (engine->running || engine->busy) { | |
344 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
345 | return -EBUSY; | |
346 | } | |
347 | ||
348 | engine->running = true; | |
349 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
350 | ||
351 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
352 | ||
353 | return 0; | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(crypto_engine_start); | |
356 | ||
357 | /** | |
358 | * crypto_engine_stop - stop the hardware engine | |
359 | * @engine: the hardware engine need to be stopped | |
360 | * | |
361 | * Return 0 on success, else on fail. | |
362 | */ | |
363 | int crypto_engine_stop(struct crypto_engine *engine) | |
364 | { | |
365 | unsigned long flags; | |
366 | unsigned int limit = 500; | |
367 | int ret = 0; | |
368 | ||
369 | spin_lock_irqsave(&engine->queue_lock, flags); | |
370 | ||
371 | /* | |
372 | * If the engine queue is not empty or the engine is on busy state, | |
373 | * we need to wait for a while to pump the requests of engine queue. | |
374 | */ | |
375 | while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { | |
376 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
377 | msleep(20); | |
378 | spin_lock_irqsave(&engine->queue_lock, flags); | |
379 | } | |
380 | ||
381 | if (crypto_queue_len(&engine->queue) || engine->busy) | |
382 | ret = -EBUSY; | |
383 | else | |
384 | engine->running = false; | |
385 | ||
386 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
387 | ||
388 | if (ret) | |
389 | dev_warn(engine->dev, "could not stop engine\n"); | |
390 | ||
391 | return ret; | |
392 | } | |
393 | EXPORT_SYMBOL_GPL(crypto_engine_stop); | |
394 | ||
395 | /** | |
396 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and | |
397 | * initialize it. | |
398 | * @dev: the device attached with one hardware engine | |
399 | * @rt: whether this queue is set to run as a realtime task | |
400 | * | |
401 | * This must be called from context that can sleep. | |
402 | * Return: the crypto engine structure on success, else NULL. | |
403 | */ | |
404 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |
405 | { | |
406 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | |
407 | struct crypto_engine *engine; | |
408 | ||
409 | if (!dev) | |
410 | return NULL; | |
411 | ||
412 | engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); | |
413 | if (!engine) | |
414 | return NULL; | |
415 | ||
416 | engine->dev = dev; | |
417 | engine->rt = rt; | |
418 | engine->running = false; | |
419 | engine->busy = false; | |
420 | engine->idling = false; | |
421 | engine->cur_req_prepared = false; | |
422 | engine->priv_data = dev; | |
423 | snprintf(engine->name, sizeof(engine->name), | |
424 | "%s-engine", dev_name(dev)); | |
425 | ||
426 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | |
427 | spin_lock_init(&engine->queue_lock); | |
428 | ||
429 | engine->kworker = kthread_create_worker(0, "%s", engine->name); | |
430 | if (IS_ERR(engine->kworker)) { | |
431 | dev_err(dev, "failed to create crypto request pump task\n"); | |
432 | return NULL; | |
433 | } | |
434 | kthread_init_work(&engine->pump_requests, crypto_pump_work); | |
435 | ||
436 | if (engine->rt) { | |
437 | dev_info(dev, "will run requests pump with realtime priority\n"); | |
438 | sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); | |
439 | } | |
440 | ||
441 | return engine; | |
442 | } | |
443 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); | |
444 | ||
445 | /** | |
446 | * crypto_engine_exit - free the resources of hardware engine when exit | |
447 | * @engine: the hardware engine need to be freed | |
448 | * | |
449 | * Return 0 for success. | |
450 | */ | |
451 | int crypto_engine_exit(struct crypto_engine *engine) | |
452 | { | |
453 | int ret; | |
454 | ||
455 | ret = crypto_engine_stop(engine); | |
456 | if (ret) | |
457 | return ret; | |
458 | ||
459 | kthread_destroy_worker(engine->kworker); | |
460 | ||
461 | return 0; | |
462 | } | |
463 | EXPORT_SYMBOL_GPL(crypto_engine_exit); | |
464 | ||
465 | MODULE_LICENSE("GPL"); | |
466 | MODULE_DESCRIPTION("Crypto hardware engine framework"); |