]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - crypto/algif_skcipher.c
crypto: ahash - Fix early termination in hash walk
[mirror_ubuntu-bionic-kernel.git] / crypto / algif_skcipher.c
1 /*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * The following concept of the memory management is used:
14 *
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
20 *
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
24 *
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
27 * the RX SGL release.
28 */
29
30 #include <crypto/scatterwalk.h>
31 #include <crypto/skcipher.h>
32 #include <crypto/if_alg.h>
33 #include <linux/init.h>
34 #include <linux/list.h>
35 #include <linux/kernel.h>
36 #include <linux/mm.h>
37 #include <linux/module.h>
38 #include <linux/net.h>
39 #include <net/sock.h>
40
41 struct skcipher_tfm {
42 struct crypto_skcipher *skcipher;
43 bool has_key;
44 };
45
46 static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
47 size_t size)
48 {
49 struct sock *sk = sock->sk;
50 struct alg_sock *ask = alg_sk(sk);
51 struct sock *psk = ask->parent;
52 struct alg_sock *pask = alg_sk(psk);
53 struct skcipher_tfm *skc = pask->private;
54 struct crypto_skcipher *tfm = skc->skcipher;
55 unsigned ivsize = crypto_skcipher_ivsize(tfm);
56
57 return af_alg_sendmsg(sock, msg, size, ivsize);
58 }
59
60 static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
61 size_t ignored, int flags)
62 {
63 struct sock *sk = sock->sk;
64 struct alg_sock *ask = alg_sk(sk);
65 struct sock *psk = ask->parent;
66 struct alg_sock *pask = alg_sk(psk);
67 struct af_alg_ctx *ctx = ask->private;
68 struct skcipher_tfm *skc = pask->private;
69 struct crypto_skcipher *tfm = skc->skcipher;
70 unsigned int bs = crypto_skcipher_blocksize(tfm);
71 struct af_alg_async_req *areq;
72 int err = 0;
73 size_t len = 0;
74
75 if (!ctx->used) {
76 err = af_alg_wait_for_data(sk, flags);
77 if (err)
78 return err;
79 }
80
81 /* Allocate cipher request for current operation. */
82 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
83 crypto_skcipher_reqsize(tfm));
84 if (IS_ERR(areq))
85 return PTR_ERR(areq);
86
87 /* convert iovecs of output buffers into RX SGL */
88 err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
89 if (err)
90 goto free;
91
92 /* Process only as much RX buffers for which we have TX data */
93 if (len > ctx->used)
94 len = ctx->used;
95
96 /*
97 * If more buffers are to be expected to be processed, process only
98 * full block size buffers.
99 */
100 if (ctx->more || len < ctx->used)
101 len -= len % bs;
102
103 /*
104 * Create a per request TX SGL for this request which tracks the
105 * SG entries from the global TX SGL.
106 */
107 areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
108 if (!areq->tsgl_entries)
109 areq->tsgl_entries = 1;
110 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
111 GFP_KERNEL);
112 if (!areq->tsgl) {
113 err = -ENOMEM;
114 goto free;
115 }
116 sg_init_table(areq->tsgl, areq->tsgl_entries);
117 af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
118
119 /* Initialize the crypto operation */
120 skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
121 skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
122 areq->first_rsgl.sgl.sg, len, ctx->iv);
123
124 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
125 /* AIO operation */
126 sock_hold(sk);
127 areq->iocb = msg->msg_iocb;
128
129 /* Remember output size that will be generated. */
130 areq->outlen = len;
131
132 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
133 CRYPTO_TFM_REQ_MAY_SLEEP,
134 af_alg_async_cb, areq);
135 err = ctx->enc ?
136 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
137 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
138
139 /* AIO operation in progress */
140 if (err == -EINPROGRESS || err == -EBUSY)
141 return -EIOCBQUEUED;
142
143 sock_put(sk);
144 } else {
145 /* Synchronous operation */
146 skcipher_request_set_callback(&areq->cra_u.skcipher_req,
147 CRYPTO_TFM_REQ_MAY_SLEEP |
148 CRYPTO_TFM_REQ_MAY_BACKLOG,
149 crypto_req_done, &ctx->wait);
150 err = crypto_wait_req(ctx->enc ?
151 crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
152 crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
153 &ctx->wait);
154 }
155
156
157 free:
158 af_alg_free_resources(areq);
159
160 return err ? err : len;
161 }
162
163 static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
164 size_t ignored, int flags)
165 {
166 struct sock *sk = sock->sk;
167 int ret = 0;
168
169 lock_sock(sk);
170 while (msg_data_left(msg)) {
171 int err = _skcipher_recvmsg(sock, msg, ignored, flags);
172
173 /*
174 * This error covers -EIOCBQUEUED which implies that we can
175 * only handle one AIO request. If the caller wants to have
176 * multiple AIO requests in parallel, he must make multiple
177 * separate AIO calls.
178 *
179 * Also return the error if no data has been processed so far.
180 */
181 if (err <= 0) {
182 if (err == -EIOCBQUEUED || !ret)
183 ret = err;
184 goto out;
185 }
186
187 ret += err;
188 }
189
190 out:
191 af_alg_wmem_wakeup(sk);
192 release_sock(sk);
193 return ret;
194 }
195
196
197 static struct proto_ops algif_skcipher_ops = {
198 .family = PF_ALG,
199
200 .connect = sock_no_connect,
201 .socketpair = sock_no_socketpair,
202 .getname = sock_no_getname,
203 .ioctl = sock_no_ioctl,
204 .listen = sock_no_listen,
205 .shutdown = sock_no_shutdown,
206 .getsockopt = sock_no_getsockopt,
207 .mmap = sock_no_mmap,
208 .bind = sock_no_bind,
209 .accept = sock_no_accept,
210 .setsockopt = sock_no_setsockopt,
211
212 .release = af_alg_release,
213 .sendmsg = skcipher_sendmsg,
214 .sendpage = af_alg_sendpage,
215 .recvmsg = skcipher_recvmsg,
216 .poll = af_alg_poll,
217 };
218
219 static int skcipher_check_key(struct socket *sock)
220 {
221 int err = 0;
222 struct sock *psk;
223 struct alg_sock *pask;
224 struct skcipher_tfm *tfm;
225 struct sock *sk = sock->sk;
226 struct alg_sock *ask = alg_sk(sk);
227
228 lock_sock(sk);
229 if (ask->refcnt)
230 goto unlock_child;
231
232 psk = ask->parent;
233 pask = alg_sk(ask->parent);
234 tfm = pask->private;
235
236 err = -ENOKEY;
237 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
238 if (!tfm->has_key)
239 goto unlock;
240
241 if (!pask->refcnt++)
242 sock_hold(psk);
243
244 ask->refcnt = 1;
245 sock_put(psk);
246
247 err = 0;
248
249 unlock:
250 release_sock(psk);
251 unlock_child:
252 release_sock(sk);
253
254 return err;
255 }
256
257 static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
258 size_t size)
259 {
260 int err;
261
262 err = skcipher_check_key(sock);
263 if (err)
264 return err;
265
266 return skcipher_sendmsg(sock, msg, size);
267 }
268
269 static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
270 int offset, size_t size, int flags)
271 {
272 int err;
273
274 err = skcipher_check_key(sock);
275 if (err)
276 return err;
277
278 return af_alg_sendpage(sock, page, offset, size, flags);
279 }
280
281 static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
282 size_t ignored, int flags)
283 {
284 int err;
285
286 err = skcipher_check_key(sock);
287 if (err)
288 return err;
289
290 return skcipher_recvmsg(sock, msg, ignored, flags);
291 }
292
293 static struct proto_ops algif_skcipher_ops_nokey = {
294 .family = PF_ALG,
295
296 .connect = sock_no_connect,
297 .socketpair = sock_no_socketpair,
298 .getname = sock_no_getname,
299 .ioctl = sock_no_ioctl,
300 .listen = sock_no_listen,
301 .shutdown = sock_no_shutdown,
302 .getsockopt = sock_no_getsockopt,
303 .mmap = sock_no_mmap,
304 .bind = sock_no_bind,
305 .accept = sock_no_accept,
306 .setsockopt = sock_no_setsockopt,
307
308 .release = af_alg_release,
309 .sendmsg = skcipher_sendmsg_nokey,
310 .sendpage = skcipher_sendpage_nokey,
311 .recvmsg = skcipher_recvmsg_nokey,
312 .poll = af_alg_poll,
313 };
314
315 static void *skcipher_bind(const char *name, u32 type, u32 mask)
316 {
317 struct skcipher_tfm *tfm;
318 struct crypto_skcipher *skcipher;
319
320 tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
321 if (!tfm)
322 return ERR_PTR(-ENOMEM);
323
324 skcipher = crypto_alloc_skcipher(name, type, mask);
325 if (IS_ERR(skcipher)) {
326 kfree(tfm);
327 return ERR_CAST(skcipher);
328 }
329
330 tfm->skcipher = skcipher;
331
332 return tfm;
333 }
334
335 static void skcipher_release(void *private)
336 {
337 struct skcipher_tfm *tfm = private;
338
339 crypto_free_skcipher(tfm->skcipher);
340 kfree(tfm);
341 }
342
343 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
344 {
345 struct skcipher_tfm *tfm = private;
346 int err;
347
348 err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
349 tfm->has_key = !err;
350
351 return err;
352 }
353
354 static void skcipher_sock_destruct(struct sock *sk)
355 {
356 struct alg_sock *ask = alg_sk(sk);
357 struct af_alg_ctx *ctx = ask->private;
358 struct sock *psk = ask->parent;
359 struct alg_sock *pask = alg_sk(psk);
360 struct skcipher_tfm *skc = pask->private;
361 struct crypto_skcipher *tfm = skc->skcipher;
362
363 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
364 sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
365 sock_kfree_s(sk, ctx, ctx->len);
366 af_alg_release_parent(sk);
367 }
368
369 static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
370 {
371 struct af_alg_ctx *ctx;
372 struct alg_sock *ask = alg_sk(sk);
373 struct skcipher_tfm *tfm = private;
374 struct crypto_skcipher *skcipher = tfm->skcipher;
375 unsigned int len = sizeof(*ctx);
376
377 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
378 if (!ctx)
379 return -ENOMEM;
380
381 ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
382 GFP_KERNEL);
383 if (!ctx->iv) {
384 sock_kfree_s(sk, ctx, len);
385 return -ENOMEM;
386 }
387
388 memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
389
390 INIT_LIST_HEAD(&ctx->tsgl_list);
391 ctx->len = len;
392 ctx->used = 0;
393 atomic_set(&ctx->rcvused, 0);
394 ctx->more = 0;
395 ctx->merge = 0;
396 ctx->enc = 0;
397 crypto_init_wait(&ctx->wait);
398
399 ask->private = ctx;
400
401 sk->sk_destruct = skcipher_sock_destruct;
402
403 return 0;
404 }
405
406 static int skcipher_accept_parent(void *private, struct sock *sk)
407 {
408 struct skcipher_tfm *tfm = private;
409
410 if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
411 return -ENOKEY;
412
413 return skcipher_accept_parent_nokey(private, sk);
414 }
415
416 static const struct af_alg_type algif_type_skcipher = {
417 .bind = skcipher_bind,
418 .release = skcipher_release,
419 .setkey = skcipher_setkey,
420 .accept = skcipher_accept_parent,
421 .accept_nokey = skcipher_accept_parent_nokey,
422 .ops = &algif_skcipher_ops,
423 .ops_nokey = &algif_skcipher_ops_nokey,
424 .name = "skcipher",
425 .owner = THIS_MODULE
426 };
427
428 static int __init algif_skcipher_init(void)
429 {
430 return af_alg_register_type(&algif_type_skcipher);
431 }
432
433 static void __exit algif_skcipher_exit(void)
434 {
435 int err = af_alg_unregister_type(&algif_type_skcipher);
436 BUG_ON(err);
437 }
438
439 module_init(algif_skcipher_init);
440 module_exit(algif_skcipher_exit);
441 MODULE_LICENSE("GPL");