]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - crypto/algif_skcipher.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6
[mirror_ubuntu-artful-kernel.git] / crypto / algif_skcipher.c
1 /*
2 * algif_skcipher: User-space interface for skcipher algorithms
3 *
4 * This file provides the user-space API for symmetric key ciphers.
5 *
6 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15 #include <crypto/scatterwalk.h>
16 #include <crypto/skcipher.h>
17 #include <crypto/if_alg.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/net.h>
24 #include <net/sock.h>
25
26 struct skcipher_sg_list {
27 struct list_head list;
28
29 int cur;
30
31 struct scatterlist sg[0];
32 };
33
34 struct skcipher_ctx {
35 struct list_head tsgl;
36 struct af_alg_sgl rsgl;
37
38 void *iv;
39
40 struct af_alg_completion completion;
41
42 unsigned used;
43
44 unsigned int len;
45 bool more;
46 bool merge;
47 bool enc;
48
49 struct ablkcipher_request req;
50 };
51
52 #define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
53 sizeof(struct scatterlist) - 1)
54
55 static inline int skcipher_sndbuf(struct sock *sk)
56 {
57 struct alg_sock *ask = alg_sk(sk);
58 struct skcipher_ctx *ctx = ask->private;
59
60 return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
61 ctx->used, 0);
62 }
63
64 static inline bool skcipher_writable(struct sock *sk)
65 {
66 return PAGE_SIZE <= skcipher_sndbuf(sk);
67 }
68
69 static int skcipher_alloc_sgl(struct sock *sk)
70 {
71 struct alg_sock *ask = alg_sk(sk);
72 struct skcipher_ctx *ctx = ask->private;
73 struct skcipher_sg_list *sgl;
74 struct scatterlist *sg = NULL;
75
76 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
77 if (!list_empty(&ctx->tsgl))
78 sg = sgl->sg;
79
80 if (!sg || sgl->cur >= MAX_SGL_ENTS) {
81 sgl = sock_kmalloc(sk, sizeof(*sgl) +
82 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
83 GFP_KERNEL);
84 if (!sgl)
85 return -ENOMEM;
86
87 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
88 sgl->cur = 0;
89
90 if (sg)
91 scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
92
93 list_add_tail(&sgl->list, &ctx->tsgl);
94 }
95
96 return 0;
97 }
98
99 static void skcipher_pull_sgl(struct sock *sk, int used)
100 {
101 struct alg_sock *ask = alg_sk(sk);
102 struct skcipher_ctx *ctx = ask->private;
103 struct skcipher_sg_list *sgl;
104 struct scatterlist *sg;
105 int i;
106
107 while (!list_empty(&ctx->tsgl)) {
108 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
109 list);
110 sg = sgl->sg;
111
112 for (i = 0; i < sgl->cur; i++) {
113 int plen = min_t(int, used, sg[i].length);
114
115 if (!sg_page(sg + i))
116 continue;
117
118 sg[i].length -= plen;
119 sg[i].offset += plen;
120
121 used -= plen;
122 ctx->used -= plen;
123
124 if (sg[i].length)
125 return;
126
127 put_page(sg_page(sg + i));
128 sg_assign_page(sg + i, NULL);
129 }
130
131 list_del(&sgl->list);
132 sock_kfree_s(sk, sgl,
133 sizeof(*sgl) + sizeof(sgl->sg[0]) *
134 (MAX_SGL_ENTS + 1));
135 }
136
137 if (!ctx->used)
138 ctx->merge = 0;
139 }
140
141 static void skcipher_free_sgl(struct sock *sk)
142 {
143 struct alg_sock *ask = alg_sk(sk);
144 struct skcipher_ctx *ctx = ask->private;
145
146 skcipher_pull_sgl(sk, ctx->used);
147 }
148
149 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
150 {
151 long timeout;
152 DEFINE_WAIT(wait);
153 int err = -ERESTARTSYS;
154
155 if (flags & MSG_DONTWAIT)
156 return -EAGAIN;
157
158 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
159
160 for (;;) {
161 if (signal_pending(current))
162 break;
163 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
164 timeout = MAX_SCHEDULE_TIMEOUT;
165 if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
166 err = 0;
167 break;
168 }
169 }
170 finish_wait(sk_sleep(sk), &wait);
171
172 return err;
173 }
174
175 static void skcipher_wmem_wakeup(struct sock *sk)
176 {
177 struct socket_wq *wq;
178
179 if (!skcipher_writable(sk))
180 return;
181
182 rcu_read_lock();
183 wq = rcu_dereference(sk->sk_wq);
184 if (wq_has_sleeper(wq))
185 wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
186 POLLRDNORM |
187 POLLRDBAND);
188 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
189 rcu_read_unlock();
190 }
191
192 static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
193 {
194 struct alg_sock *ask = alg_sk(sk);
195 struct skcipher_ctx *ctx = ask->private;
196 long timeout;
197 DEFINE_WAIT(wait);
198 int err = -ERESTARTSYS;
199
200 if (flags & MSG_DONTWAIT) {
201 return -EAGAIN;
202 }
203
204 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
205
206 for (;;) {
207 if (signal_pending(current))
208 break;
209 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
210 timeout = MAX_SCHEDULE_TIMEOUT;
211 if (sk_wait_event(sk, &timeout, ctx->used)) {
212 err = 0;
213 break;
214 }
215 }
216 finish_wait(sk_sleep(sk), &wait);
217
218 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
219
220 return err;
221 }
222
223 static void skcipher_data_wakeup(struct sock *sk)
224 {
225 struct alg_sock *ask = alg_sk(sk);
226 struct skcipher_ctx *ctx = ask->private;
227 struct socket_wq *wq;
228
229 if (!ctx->used)
230 return;
231
232 rcu_read_lock();
233 wq = rcu_dereference(sk->sk_wq);
234 if (wq_has_sleeper(wq))
235 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
236 POLLRDNORM |
237 POLLRDBAND);
238 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
239 rcu_read_unlock();
240 }
241
242 static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
243 struct msghdr *msg, size_t size)
244 {
245 struct sock *sk = sock->sk;
246 struct alg_sock *ask = alg_sk(sk);
247 struct skcipher_ctx *ctx = ask->private;
248 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
249 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
250 struct skcipher_sg_list *sgl;
251 struct af_alg_control con = {};
252 long copied = 0;
253 bool enc = 0;
254 int err;
255 int i;
256
257 if (msg->msg_controllen) {
258 err = af_alg_cmsg_send(msg, &con);
259 if (err)
260 return err;
261
262 switch (con.op) {
263 case ALG_OP_ENCRYPT:
264 enc = 1;
265 break;
266 case ALG_OP_DECRYPT:
267 enc = 0;
268 break;
269 default:
270 return -EINVAL;
271 }
272
273 if (con.iv && con.iv->ivlen != ivsize)
274 return -EINVAL;
275 }
276
277 err = -EINVAL;
278
279 lock_sock(sk);
280 if (!ctx->more && ctx->used)
281 goto unlock;
282
283 if (!ctx->used) {
284 ctx->enc = enc;
285 if (con.iv)
286 memcpy(ctx->iv, con.iv->iv, ivsize);
287 }
288
289 while (size) {
290 struct scatterlist *sg;
291 unsigned long len = size;
292 int plen;
293
294 if (ctx->merge) {
295 sgl = list_entry(ctx->tsgl.prev,
296 struct skcipher_sg_list, list);
297 sg = sgl->sg + sgl->cur - 1;
298 len = min_t(unsigned long, len,
299 PAGE_SIZE - sg->offset - sg->length);
300
301 err = memcpy_fromiovec(page_address(sg_page(sg)) +
302 sg->offset + sg->length,
303 msg->msg_iov, len);
304 if (err)
305 goto unlock;
306
307 sg->length += len;
308 ctx->merge = (sg->offset + sg->length) &
309 (PAGE_SIZE - 1);
310
311 ctx->used += len;
312 copied += len;
313 size -= len;
314 continue;
315 }
316
317 if (!skcipher_writable(sk)) {
318 err = skcipher_wait_for_wmem(sk, msg->msg_flags);
319 if (err)
320 goto unlock;
321 }
322
323 len = min_t(unsigned long, len, skcipher_sndbuf(sk));
324
325 err = skcipher_alloc_sgl(sk);
326 if (err)
327 goto unlock;
328
329 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
330 sg = sgl->sg;
331 do {
332 i = sgl->cur;
333 plen = min_t(int, len, PAGE_SIZE);
334
335 sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
336 err = -ENOMEM;
337 if (!sg_page(sg + i))
338 goto unlock;
339
340 err = memcpy_fromiovec(page_address(sg_page(sg + i)),
341 msg->msg_iov, plen);
342 if (err) {
343 __free_page(sg_page(sg + i));
344 sg_assign_page(sg + i, NULL);
345 goto unlock;
346 }
347
348 sg[i].length = plen;
349 len -= plen;
350 ctx->used += plen;
351 copied += plen;
352 size -= plen;
353 sgl->cur++;
354 } while (len && sgl->cur < MAX_SGL_ENTS);
355
356 ctx->merge = plen & (PAGE_SIZE - 1);
357 }
358
359 err = 0;
360
361 ctx->more = msg->msg_flags & MSG_MORE;
362 if (!ctx->more && !list_empty(&ctx->tsgl))
363 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
364
365 unlock:
366 skcipher_data_wakeup(sk);
367 release_sock(sk);
368
369 return copied ?: err;
370 }
371
372 static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
373 int offset, size_t size, int flags)
374 {
375 struct sock *sk = sock->sk;
376 struct alg_sock *ask = alg_sk(sk);
377 struct skcipher_ctx *ctx = ask->private;
378 struct skcipher_sg_list *sgl;
379 int err = -EINVAL;
380
381 lock_sock(sk);
382 if (!ctx->more && ctx->used)
383 goto unlock;
384
385 if (!size)
386 goto done;
387
388 if (!skcipher_writable(sk)) {
389 err = skcipher_wait_for_wmem(sk, flags);
390 if (err)
391 goto unlock;
392 }
393
394 err = skcipher_alloc_sgl(sk);
395 if (err)
396 goto unlock;
397
398 ctx->merge = 0;
399 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
400
401 get_page(page);
402 sg_set_page(sgl->sg + sgl->cur, page, size, offset);
403 sgl->cur++;
404 ctx->used += size;
405
406 done:
407 ctx->more = flags & MSG_MORE;
408 if (!ctx->more && !list_empty(&ctx->tsgl))
409 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
410
411 unlock:
412 skcipher_data_wakeup(sk);
413 release_sock(sk);
414
415 return err ?: size;
416 }
417
418 static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
419 struct msghdr *msg, size_t ignored, int flags)
420 {
421 struct sock *sk = sock->sk;
422 struct alg_sock *ask = alg_sk(sk);
423 struct skcipher_ctx *ctx = ask->private;
424 unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
425 &ctx->req));
426 struct skcipher_sg_list *sgl;
427 struct scatterlist *sg;
428 unsigned long iovlen;
429 struct iovec *iov;
430 int err = -EAGAIN;
431 int used;
432 long copied = 0;
433
434 lock_sock(sk);
435 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
436 iovlen--, iov++) {
437 unsigned long seglen = iov->iov_len;
438 char __user *from = iov->iov_base;
439
440 while (seglen) {
441 sgl = list_first_entry(&ctx->tsgl,
442 struct skcipher_sg_list, list);
443 sg = sgl->sg;
444
445 while (!sg->length)
446 sg++;
447
448 used = ctx->used;
449 if (!used) {
450 err = skcipher_wait_for_data(sk, flags);
451 if (err)
452 goto unlock;
453 }
454
455 used = min_t(unsigned long, used, seglen);
456
457 used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
458 err = used;
459 if (err < 0)
460 goto unlock;
461
462 if (ctx->more || used < ctx->used)
463 used -= used % bs;
464
465 err = -EINVAL;
466 if (!used)
467 goto free;
468
469 ablkcipher_request_set_crypt(&ctx->req, sg,
470 ctx->rsgl.sg, used,
471 ctx->iv);
472
473 err = af_alg_wait_for_completion(
474 ctx->enc ?
475 crypto_ablkcipher_encrypt(&ctx->req) :
476 crypto_ablkcipher_decrypt(&ctx->req),
477 &ctx->completion);
478
479 free:
480 af_alg_free_sg(&ctx->rsgl);
481
482 if (err)
483 goto unlock;
484
485 copied += used;
486 from += used;
487 seglen -= used;
488 skcipher_pull_sgl(sk, used);
489 }
490 }
491
492 err = 0;
493
494 unlock:
495 skcipher_wmem_wakeup(sk);
496 release_sock(sk);
497
498 return copied ?: err;
499 }
500
501
502 static unsigned int skcipher_poll(struct file *file, struct socket *sock,
503 poll_table *wait)
504 {
505 struct sock *sk = sock->sk;
506 struct alg_sock *ask = alg_sk(sk);
507 struct skcipher_ctx *ctx = ask->private;
508 unsigned int mask;
509
510 sock_poll_wait(file, sk_sleep(sk), wait);
511 mask = 0;
512
513 if (ctx->used)
514 mask |= POLLIN | POLLRDNORM;
515
516 if (skcipher_writable(sk))
517 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
518
519 return mask;
520 }
521
522 static struct proto_ops algif_skcipher_ops = {
523 .family = PF_ALG,
524
525 .connect = sock_no_connect,
526 .socketpair = sock_no_socketpair,
527 .getname = sock_no_getname,
528 .ioctl = sock_no_ioctl,
529 .listen = sock_no_listen,
530 .shutdown = sock_no_shutdown,
531 .getsockopt = sock_no_getsockopt,
532 .mmap = sock_no_mmap,
533 .bind = sock_no_bind,
534 .accept = sock_no_accept,
535 .setsockopt = sock_no_setsockopt,
536
537 .release = af_alg_release,
538 .sendmsg = skcipher_sendmsg,
539 .sendpage = skcipher_sendpage,
540 .recvmsg = skcipher_recvmsg,
541 .poll = skcipher_poll,
542 };
543
544 static void *skcipher_bind(const char *name, u32 type, u32 mask)
545 {
546 return crypto_alloc_ablkcipher(name, type, mask);
547 }
548
549 static void skcipher_release(void *private)
550 {
551 crypto_free_ablkcipher(private);
552 }
553
554 static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
555 {
556 return crypto_ablkcipher_setkey(private, key, keylen);
557 }
558
559 static void skcipher_sock_destruct(struct sock *sk)
560 {
561 struct alg_sock *ask = alg_sk(sk);
562 struct skcipher_ctx *ctx = ask->private;
563 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
564
565 skcipher_free_sgl(sk);
566 sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
567 sock_kfree_s(sk, ctx, ctx->len);
568 af_alg_release_parent(sk);
569 }
570
571 static int skcipher_accept_parent(void *private, struct sock *sk)
572 {
573 struct skcipher_ctx *ctx;
574 struct alg_sock *ask = alg_sk(sk);
575 unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
576
577 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
578 if (!ctx)
579 return -ENOMEM;
580
581 ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
582 GFP_KERNEL);
583 if (!ctx->iv) {
584 sock_kfree_s(sk, ctx, len);
585 return -ENOMEM;
586 }
587
588 memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
589
590 INIT_LIST_HEAD(&ctx->tsgl);
591 ctx->len = len;
592 ctx->used = 0;
593 ctx->more = 0;
594 ctx->merge = 0;
595 ctx->enc = 0;
596 af_alg_init_completion(&ctx->completion);
597
598 ask->private = ctx;
599
600 ablkcipher_request_set_tfm(&ctx->req, private);
601 ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
602 af_alg_complete, &ctx->completion);
603
604 sk->sk_destruct = skcipher_sock_destruct;
605
606 return 0;
607 }
608
609 static const struct af_alg_type algif_type_skcipher = {
610 .bind = skcipher_bind,
611 .release = skcipher_release,
612 .setkey = skcipher_setkey,
613 .accept = skcipher_accept_parent,
614 .ops = &algif_skcipher_ops,
615 .name = "skcipher",
616 .owner = THIS_MODULE
617 };
618
619 static int __init algif_skcipher_init(void)
620 {
621 return af_alg_register_type(&algif_type_skcipher);
622 }
623
624 static void __exit algif_skcipher_exit(void)
625 {
626 int err = af_alg_unregister_type(&algif_type_skcipher);
627 BUG_ON(err);
628 }
629
630 module_init(algif_skcipher_init);
631 module_exit(algif_skcipher_exit);
632 MODULE_LICENSE("GPL");