]>
Commit | Line | Data |
---|---|---|
cce9e06d HX |
1 | /* |
2 | * Cryptographic API for algorithms (i.e., low-level API). | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | #ifndef _CRYPTO_ALGAPI_H | |
13 | #define _CRYPTO_ALGAPI_H | |
14 | ||
15 | #include <linux/crypto.h> | |
b5b7f088 HX |
16 | #include <linux/list.h> |
17 | #include <linux/kernel.h> | |
b6aa63c0 | 18 | #include <linux/skbuff.h> |
cce9e06d | 19 | |
13c935bb SM |
20 | /* |
21 | * Maximum values for blocksize and alignmask, used to allocate | |
22 | * static buffers that are big enough for any combination of | |
23 | * ciphers and architectures. | |
24 | */ | |
25 | #define MAX_CIPHER_BLOCKSIZE 16 | |
26 | #define MAX_CIPHER_ALIGNMASK 15 | |
27 | ||
5d1d65f8 | 28 | struct crypto_aead; |
319382a6 | 29 | struct crypto_instance; |
4cc7720c | 30 | struct module; |
ebc610e5 | 31 | struct rtattr; |
e853c3cf HX |
32 | struct seq_file; |
33 | ||
34 | struct crypto_type { | |
27d2a330 | 35 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); |
2ca33da1 | 36 | unsigned int (*extsize)(struct crypto_alg *alg); |
27d2a330 | 37 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); |
2ca33da1 | 38 | int (*init_tfm)(struct crypto_tfm *tfm); |
e853c3cf | 39 | void (*show)(struct seq_file *m, struct crypto_alg *alg); |
b6aa63c0 | 40 | int (*report)(struct sk_buff *skb, struct crypto_alg *alg); |
319382a6 | 41 | void (*free)(struct crypto_instance *inst); |
7b0bac64 HX |
42 | |
43 | unsigned int type; | |
44 | unsigned int maskclear; | |
45 | unsigned int maskset; | |
46 | unsigned int tfmsize; | |
e853c3cf | 47 | }; |
4cc7720c HX |
48 | |
49 | struct crypto_instance { | |
50 | struct crypto_alg alg; | |
51 | ||
52 | struct crypto_template *tmpl; | |
53 | struct hlist_node list; | |
54 | ||
55 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | |
56 | }; | |
57 | ||
58 | struct crypto_template { | |
59 | struct list_head list; | |
60 | struct hlist_head instances; | |
61 | struct module *module; | |
62 | ||
ebc610e5 | 63 | struct crypto_instance *(*alloc)(struct rtattr **tb); |
4cc7720c | 64 | void (*free)(struct crypto_instance *inst); |
f2ac72e8 | 65 | int (*create)(struct crypto_template *tmpl, struct rtattr **tb); |
4cc7720c HX |
66 | |
67 | char name[CRYPTO_MAX_ALG_NAME]; | |
68 | }; | |
69 | ||
6bfd4809 HX |
70 | struct crypto_spawn { |
71 | struct list_head list; | |
72 | struct crypto_alg *alg; | |
73 | struct crypto_instance *inst; | |
97eedce1 | 74 | const struct crypto_type *frontend; |
a73e6996 | 75 | u32 mask; |
6bfd4809 HX |
76 | }; |
77 | ||
b5b7f088 HX |
78 | struct crypto_queue { |
79 | struct list_head list; | |
80 | struct list_head *backlog; | |
81 | ||
82 | unsigned int qlen; | |
83 | unsigned int max_qlen; | |
84 | }; | |
85 | ||
5c64097a HX |
86 | struct scatter_walk { |
87 | struct scatterlist *sg; | |
88 | unsigned int offset; | |
89 | }; | |
90 | ||
5cde0af2 HX |
91 | struct blkcipher_walk { |
92 | union { | |
93 | struct { | |
94 | struct page *page; | |
95 | unsigned long offset; | |
96 | } phys; | |
97 | ||
98 | struct { | |
99 | u8 *page; | |
100 | u8 *addr; | |
101 | } virt; | |
102 | } src, dst; | |
103 | ||
104 | struct scatter_walk in; | |
105 | unsigned int nbytes; | |
106 | ||
107 | struct scatter_walk out; | |
108 | unsigned int total; | |
109 | ||
110 | void *page; | |
111 | u8 *buffer; | |
112 | u8 *iv; | |
822be00f | 113 | unsigned int ivsize; |
5cde0af2 HX |
114 | |
115 | int flags; | |
822be00f AB |
116 | unsigned int walk_blocksize; |
117 | unsigned int cipher_blocksize; | |
118 | unsigned int alignmask; | |
5cde0af2 HX |
119 | }; |
120 | ||
bf06099d DM |
121 | struct ablkcipher_walk { |
122 | struct { | |
123 | struct page *page; | |
124 | unsigned int offset; | |
125 | } src, dst; | |
126 | ||
127 | struct scatter_walk in; | |
128 | unsigned int nbytes; | |
129 | struct scatter_walk out; | |
130 | unsigned int total; | |
131 | struct list_head buffers; | |
132 | u8 *iv_buffer; | |
133 | u8 *iv; | |
134 | int flags; | |
135 | unsigned int blocksize; | |
136 | }; | |
137 | ||
b5b7f088 | 138 | extern const struct crypto_type crypto_ablkcipher_type; |
5cde0af2 HX |
139 | extern const struct crypto_type crypto_blkcipher_type; |
140 | ||
db131ef9 HX |
141 | void crypto_mod_put(struct crypto_alg *alg); |
142 | ||
4cc7720c HX |
143 | int crypto_register_template(struct crypto_template *tmpl); |
144 | void crypto_unregister_template(struct crypto_template *tmpl); | |
145 | struct crypto_template *crypto_lookup_template(const char *name); | |
146 | ||
9cd899a3 HX |
147 | int crypto_register_instance(struct crypto_template *tmpl, |
148 | struct crypto_instance *inst); | |
87b16756 | 149 | int crypto_unregister_instance(struct crypto_instance *inst); |
9cd899a3 | 150 | |
6bfd4809 | 151 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, |
a73e6996 | 152 | struct crypto_instance *inst, u32 mask); |
97eedce1 HX |
153 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, |
154 | struct crypto_instance *inst, | |
155 | const struct crypto_type *frontend); | |
d6ef2f19 HX |
156 | int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, |
157 | u32 type, u32 mask); | |
97eedce1 | 158 | |
6bfd4809 | 159 | void crypto_drop_spawn(struct crypto_spawn *spawn); |
2e306ee0 HX |
160 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
161 | u32 mask); | |
97eedce1 | 162 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn); |
6bfd4809 | 163 | |
378f4f51 HX |
164 | static inline void crypto_set_spawn(struct crypto_spawn *spawn, |
165 | struct crypto_instance *inst) | |
166 | { | |
167 | spawn->inst = inst; | |
168 | } | |
169 | ||
ebc610e5 HX |
170 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); |
171 | int crypto_check_attr_type(struct rtattr **tb, u32 type); | |
68b6c7d6 | 172 | const char *crypto_attr_alg_name(struct rtattr *rta); |
d06854f0 HX |
173 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
174 | const struct crypto_type *frontend, | |
175 | u32 type, u32 mask); | |
176 | ||
177 | static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, | |
178 | u32 type, u32 mask) | |
179 | { | |
180 | return crypto_attr_alg2(rta, NULL, type, mask); | |
181 | } | |
182 | ||
3c09f17c | 183 | int crypto_attr_u32(struct rtattr *rta, u32 *num); |
32f27c74 HX |
184 | int crypto_inst_setname(struct crypto_instance *inst, const char *name, |
185 | struct crypto_alg *alg); | |
70ec7bb9 HX |
186 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
187 | unsigned int head); | |
7fed0bf2 HX |
188 | struct crypto_instance *crypto_alloc_instance(const char *name, |
189 | struct crypto_alg *alg); | |
190 | ||
b5b7f088 HX |
191 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); |
192 | int crypto_enqueue_request(struct crypto_queue *queue, | |
193 | struct crypto_async_request *request); | |
194 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | |
195 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | |
9f93a8a0 BW |
196 | static inline unsigned int crypto_queue_len(struct crypto_queue *queue) |
197 | { | |
198 | return queue->qlen; | |
199 | } | |
b5b7f088 | 200 | |
7613636d | 201 | void crypto_inc(u8 *a, unsigned int size); |
a7c391f0 | 202 | void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); |
db91af0f AB |
203 | |
204 | static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) | |
205 | { | |
206 | if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && | |
207 | __builtin_constant_p(size) && | |
208 | (size % sizeof(unsigned long)) == 0) { | |
209 | unsigned long *d = (unsigned long *)dst; | |
210 | unsigned long *s = (unsigned long *)src; | |
211 | ||
212 | while (size > 0) { | |
213 | *d++ ^= *s++; | |
214 | size -= sizeof(unsigned long); | |
215 | } | |
216 | } else { | |
a7c391f0 | 217 | __crypto_xor(dst, dst, src, size); |
db91af0f AB |
218 | } |
219 | } | |
7613636d | 220 | |
45fe93df AB |
221 | static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, |
222 | unsigned int size) | |
223 | { | |
224 | if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && | |
225 | __builtin_constant_p(size) && | |
226 | (size % sizeof(unsigned long)) == 0) { | |
227 | unsigned long *d = (unsigned long *)dst; | |
228 | unsigned long *s1 = (unsigned long *)src1; | |
229 | unsigned long *s2 = (unsigned long *)src2; | |
230 | ||
231 | while (size > 0) { | |
232 | *d++ = *s1++ ^ *s2++; | |
233 | size -= sizeof(unsigned long); | |
234 | } | |
235 | } else { | |
236 | __crypto_xor(dst, src1, src2, size); | |
237 | } | |
238 | } | |
239 | ||
5cde0af2 HX |
240 | int blkcipher_walk_done(struct blkcipher_desc *desc, |
241 | struct blkcipher_walk *walk, int err); | |
242 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | |
243 | struct blkcipher_walk *walk); | |
244 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | |
245 | struct blkcipher_walk *walk); | |
7607bd8f HX |
246 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, |
247 | struct blkcipher_walk *walk, | |
248 | unsigned int blocksize); | |
4f7f1d7c AB |
249 | int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, |
250 | struct blkcipher_walk *walk, | |
251 | struct crypto_aead *tfm, | |
252 | unsigned int blocksize); | |
5cde0af2 | 253 | |
bf06099d DM |
254 | int ablkcipher_walk_done(struct ablkcipher_request *req, |
255 | struct ablkcipher_walk *walk, int err); | |
256 | int ablkcipher_walk_phys(struct ablkcipher_request *req, | |
257 | struct ablkcipher_walk *walk); | |
258 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); | |
259 | ||
5cde0af2 HX |
260 | static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) |
261 | { | |
ab300465 HX |
262 | return PTR_ALIGN(crypto_tfm_ctx(tfm), |
263 | crypto_tfm_alg_alignmask(tfm) + 1); | |
5cde0af2 HX |
264 | } |
265 | ||
124b53d0 HX |
266 | static inline struct crypto_instance *crypto_tfm_alg_instance( |
267 | struct crypto_tfm *tfm) | |
268 | { | |
269 | return container_of(tfm->__crt_alg, struct crypto_instance, alg); | |
270 | } | |
271 | ||
4cc7720c HX |
272 | static inline void *crypto_instance_ctx(struct crypto_instance *inst) |
273 | { | |
274 | return inst->__ctx; | |
275 | } | |
276 | ||
b5b7f088 HX |
277 | static inline struct ablkcipher_alg *crypto_ablkcipher_alg( |
278 | struct crypto_ablkcipher *tfm) | |
279 | { | |
280 | return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; | |
281 | } | |
282 | ||
283 | static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) | |
284 | { | |
285 | return crypto_tfm_ctx(&tfm->base); | |
286 | } | |
287 | ||
aa379a6a SS |
288 | static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) |
289 | { | |
290 | return crypto_tfm_ctx_aligned(&tfm->base); | |
291 | } | |
292 | ||
124b53d0 HX |
293 | static inline struct crypto_blkcipher *crypto_spawn_blkcipher( |
294 | struct crypto_spawn *spawn) | |
295 | { | |
296 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; | |
332f8840 | 297 | u32 mask = CRYPTO_ALG_TYPE_MASK; |
124b53d0 HX |
298 | |
299 | return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); | |
300 | } | |
301 | ||
5cde0af2 HX |
302 | static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) |
303 | { | |
304 | return crypto_tfm_ctx(&tfm->base); | |
305 | } | |
306 | ||
307 | static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) | |
308 | { | |
309 | return crypto_tfm_ctx_aligned(&tfm->base); | |
310 | } | |
311 | ||
2e306ee0 HX |
312 | static inline struct crypto_cipher *crypto_spawn_cipher( |
313 | struct crypto_spawn *spawn) | |
314 | { | |
315 | u32 type = CRYPTO_ALG_TYPE_CIPHER; | |
316 | u32 mask = CRYPTO_ALG_TYPE_MASK; | |
317 | ||
318 | return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); | |
319 | } | |
320 | ||
f28776a3 HX |
321 | static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) |
322 | { | |
323 | return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; | |
324 | } | |
325 | ||
5cde0af2 HX |
326 | static inline void blkcipher_walk_init(struct blkcipher_walk *walk, |
327 | struct scatterlist *dst, | |
328 | struct scatterlist *src, | |
329 | unsigned int nbytes) | |
330 | { | |
331 | walk->in.sg = src; | |
332 | walk->out.sg = dst; | |
333 | walk->total = nbytes; | |
334 | } | |
335 | ||
bf06099d DM |
336 | static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, |
337 | struct scatterlist *dst, | |
338 | struct scatterlist *src, | |
339 | unsigned int nbytes) | |
340 | { | |
341 | walk->in.sg = src; | |
342 | walk->out.sg = dst; | |
343 | walk->total = nbytes; | |
344 | INIT_LIST_HEAD(&walk->buffers); | |
345 | } | |
346 | ||
347 | static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) | |
348 | { | |
349 | if (unlikely(!list_empty(&walk->buffers))) | |
350 | __ablkcipher_walk_complete(walk); | |
351 | } | |
352 | ||
b5b7f088 HX |
353 | static inline struct crypto_async_request *crypto_get_backlog( |
354 | struct crypto_queue *queue) | |
355 | { | |
356 | return queue->backlog == &queue->list ? NULL : | |
357 | container_of(queue->backlog, struct crypto_async_request, list); | |
358 | } | |
359 | ||
2de98e75 | 360 | static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, |
b5b7f088 HX |
361 | struct ablkcipher_request *request) |
362 | { | |
2de98e75 | 363 | return crypto_enqueue_request(queue, &request->base); |
b5b7f088 HX |
364 | } |
365 | ||
366 | static inline struct ablkcipher_request *ablkcipher_dequeue_request( | |
2de98e75 | 367 | struct crypto_queue *queue) |
b5b7f088 | 368 | { |
2de98e75 | 369 | return ablkcipher_request_cast(crypto_dequeue_request(queue)); |
b5b7f088 HX |
370 | } |
371 | ||
372 | static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) | |
373 | { | |
374 | return req->__ctx; | |
375 | } | |
376 | ||
2de98e75 HX |
377 | static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, |
378 | struct crypto_ablkcipher *tfm) | |
b5b7f088 | 379 | { |
2de98e75 | 380 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); |
b5b7f088 HX |
381 | } |
382 | ||
3c09f17c HX |
383 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, |
384 | u32 type, u32 mask) | |
385 | { | |
386 | return crypto_attr_alg(tb[1], type, mask); | |
387 | } | |
388 | ||
016df0ab HX |
389 | static inline int crypto_requires_off(u32 type, u32 mask, u32 off) |
390 | { | |
391 | return (type ^ off) & mask & off; | |
392 | } | |
393 | ||
378f4f51 HX |
394 | /* |
395 | * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. | |
396 | * Otherwise returns zero. | |
397 | */ | |
398 | static inline int crypto_requires_sync(u32 type, u32 mask) | |
399 | { | |
016df0ab | 400 | return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC); |
378f4f51 HX |
401 | } |
402 | ||
6bf37e5a JY |
403 | noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); |
404 | ||
405 | /** | |
406 | * crypto_memneq - Compare two areas of memory without leaking | |
407 | * timing information. | |
408 | * | |
409 | * @a: One area of memory | |
410 | * @b: Another area of memory | |
411 | * @size: The size of the area. | |
412 | * | |
413 | * Returns 0 when data is equal, 1 otherwise. | |
414 | */ | |
415 | static inline int crypto_memneq(const void *a, const void *b, size_t size) | |
416 | { | |
417 | return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; | |
418 | } | |
cce9e06d | 419 | |
bb55a4c1 MV |
420 | static inline void crypto_yield(u32 flags) |
421 | { | |
41401397 | 422 | #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) |
bb55a4c1 MV |
423 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
424 | cond_resched(); | |
41401397 | 425 | #endif |
bb55a4c1 MV |
426 | } |
427 | ||
6bf37e5a | 428 | #endif /* _CRYPTO_ALGAPI_H */ |