]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - crypto/scompress.c
UBUNTU: Start new release
[mirror_ubuntu-zesty-kernel.git] / crypto / scompress.c
1 /*
2 * Synchronous Compression operations
3 *
4 * Copyright 2015 LG Electronics Inc.
5 * Copyright (c) 2016, Intel Corporation
6 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/crypto.h>
21 #include <linux/vmalloc.h>
22 #include <crypto/algapi.h>
23 #include <linux/cryptouser.h>
24 #include <net/netlink.h>
25 #include <linux/scatterlist.h>
26 #include <crypto/scatterwalk.h>
27 #include <crypto/internal/acompress.h>
28 #include <crypto/internal/scompress.h>
29 #include "internal.h"
30
31 static const struct crypto_type crypto_scomp_type;
32 static void * __percpu *scomp_src_scratches;
33 static void * __percpu *scomp_dst_scratches;
34 static int scomp_scratch_users;
35 static DEFINE_MUTEX(scomp_lock);
36
37 #ifdef CONFIG_NET
38 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
39 {
40 struct crypto_report_comp rscomp;
41
42 strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
43
44 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
45 sizeof(struct crypto_report_comp), &rscomp))
46 goto nla_put_failure;
47 return 0;
48
49 nla_put_failure:
50 return -EMSGSIZE;
51 }
52 #else
53 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
54 {
55 return -ENOSYS;
56 }
57 #endif
58
59 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
60 __attribute__ ((unused));
61
62 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
63 {
64 seq_puts(m, "type : scomp\n");
65 }
66
67 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
68 {
69 return 0;
70 }
71
72 static void crypto_scomp_free_scratches(void * __percpu *scratches)
73 {
74 int i;
75
76 if (!scratches)
77 return;
78
79 for_each_possible_cpu(i)
80 vfree(*per_cpu_ptr(scratches, i));
81
82 free_percpu(scratches);
83 }
84
85 static void * __percpu *crypto_scomp_alloc_scratches(void)
86 {
87 void * __percpu *scratches;
88 int i;
89
90 scratches = alloc_percpu(void *);
91 if (!scratches)
92 return NULL;
93
94 for_each_possible_cpu(i) {
95 void *scratch;
96
97 scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
98 if (!scratch)
99 goto error;
100 *per_cpu_ptr(scratches, i) = scratch;
101 }
102
103 return scratches;
104
105 error:
106 crypto_scomp_free_scratches(scratches);
107 return NULL;
108 }
109
110 static void crypto_scomp_free_all_scratches(void)
111 {
112 if (!--scomp_scratch_users) {
113 crypto_scomp_free_scratches(scomp_src_scratches);
114 crypto_scomp_free_scratches(scomp_dst_scratches);
115 scomp_src_scratches = NULL;
116 scomp_dst_scratches = NULL;
117 }
118 }
119
120 static int crypto_scomp_alloc_all_scratches(void)
121 {
122 if (!scomp_scratch_users++) {
123 scomp_src_scratches = crypto_scomp_alloc_scratches();
124 if (!scomp_src_scratches)
125 return -ENOMEM;
126 scomp_dst_scratches = crypto_scomp_alloc_scratches();
127 if (!scomp_dst_scratches)
128 return -ENOMEM;
129 }
130 return 0;
131 }
132
133 static void crypto_scomp_sg_free(struct scatterlist *sgl)
134 {
135 int i, n;
136 struct page *page;
137
138 if (!sgl)
139 return;
140
141 n = sg_nents(sgl);
142 for_each_sg(sgl, sgl, n, i) {
143 page = sg_page(sgl);
144 if (page)
145 __free_page(page);
146 }
147
148 kfree(sgl);
149 }
150
151 static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
152 {
153 struct scatterlist *sgl;
154 struct page *page;
155 int i, n;
156
157 n = ((size - 1) >> PAGE_SHIFT) + 1;
158
159 sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
160 if (!sgl)
161 return NULL;
162
163 sg_init_table(sgl, n);
164
165 for (i = 0; i < n; i++) {
166 page = alloc_page(gfp);
167 if (!page)
168 goto err;
169 sg_set_page(sgl + i, page, PAGE_SIZE, 0);
170 }
171
172 return sgl;
173
174 err:
175 sg_mark_end(sgl + i);
176 crypto_scomp_sg_free(sgl);
177 return NULL;
178 }
179
180 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
181 {
182 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
183 void **tfm_ctx = acomp_tfm_ctx(tfm);
184 struct crypto_scomp *scomp = *tfm_ctx;
185 void **ctx = acomp_request_ctx(req);
186 const int cpu = get_cpu();
187 u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
188 u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
189 int ret;
190
191 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
192 ret = -EINVAL;
193 goto out;
194 }
195
196 if (req->dst && !req->dlen) {
197 ret = -EINVAL;
198 goto out;
199 }
200
201 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
202 req->dlen = SCOMP_SCRATCH_SIZE;
203
204 scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
205 if (dir)
206 ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
207 scratch_dst, &req->dlen, *ctx);
208 else
209 ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
210 scratch_dst, &req->dlen, *ctx);
211 if (!ret) {
212 if (!req->dst) {
213 req->dst = crypto_scomp_sg_alloc(req->dlen,
214 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
215 GFP_KERNEL : GFP_ATOMIC);
216 if (!req->dst)
217 goto out;
218 }
219 scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
220 1);
221 }
222 out:
223 put_cpu();
224 return ret;
225 }
226
227 static int scomp_acomp_compress(struct acomp_req *req)
228 {
229 return scomp_acomp_comp_decomp(req, 1);
230 }
231
232 static int scomp_acomp_decompress(struct acomp_req *req)
233 {
234 return scomp_acomp_comp_decomp(req, 0);
235 }
236
237 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
238 {
239 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
240
241 crypto_free_scomp(*ctx);
242 }
243
244 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
245 {
246 struct crypto_alg *calg = tfm->__crt_alg;
247 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
248 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
249 struct crypto_scomp *scomp;
250
251 if (!crypto_mod_get(calg))
252 return -EAGAIN;
253
254 scomp = crypto_create_tfm(calg, &crypto_scomp_type);
255 if (IS_ERR(scomp)) {
256 crypto_mod_put(calg);
257 return PTR_ERR(scomp);
258 }
259
260 *ctx = scomp;
261 tfm->exit = crypto_exit_scomp_ops_async;
262
263 crt->compress = scomp_acomp_compress;
264 crt->decompress = scomp_acomp_decompress;
265 crt->dst_free = crypto_scomp_sg_free;
266 crt->reqsize = sizeof(void *);
267
268 return 0;
269 }
270
271 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
272 {
273 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
274 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
275 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
276 struct crypto_scomp *scomp = *tfm_ctx;
277 void *ctx;
278
279 ctx = crypto_scomp_alloc_ctx(scomp);
280 if (IS_ERR(ctx)) {
281 kfree(req);
282 return NULL;
283 }
284
285 *req->__ctx = ctx;
286
287 return req;
288 }
289
290 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
291 {
292 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
293 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
294 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
295 struct crypto_scomp *scomp = *tfm_ctx;
296 void *ctx = *req->__ctx;
297
298 if (ctx)
299 crypto_scomp_free_ctx(scomp, ctx);
300 }
301
302 static const struct crypto_type crypto_scomp_type = {
303 .extsize = crypto_alg_extsize,
304 .init_tfm = crypto_scomp_init_tfm,
305 #ifdef CONFIG_PROC_FS
306 .show = crypto_scomp_show,
307 #endif
308 .report = crypto_scomp_report,
309 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
310 .maskset = CRYPTO_ALG_TYPE_MASK,
311 .type = CRYPTO_ALG_TYPE_SCOMPRESS,
312 .tfmsize = offsetof(struct crypto_scomp, base),
313 };
314
315 int crypto_register_scomp(struct scomp_alg *alg)
316 {
317 struct crypto_alg *base = &alg->base;
318 int ret = -ENOMEM;
319
320 mutex_lock(&scomp_lock);
321 if (crypto_scomp_alloc_all_scratches())
322 goto error;
323
324 base->cra_type = &crypto_scomp_type;
325 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
326 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
327
328 ret = crypto_register_alg(base);
329 if (ret)
330 goto error;
331
332 mutex_unlock(&scomp_lock);
333 return ret;
334
335 error:
336 crypto_scomp_free_all_scratches();
337 mutex_unlock(&scomp_lock);
338 return ret;
339 }
340 EXPORT_SYMBOL_GPL(crypto_register_scomp);
341
342 int crypto_unregister_scomp(struct scomp_alg *alg)
343 {
344 int ret;
345
346 mutex_lock(&scomp_lock);
347 ret = crypto_unregister_alg(&alg->base);
348 crypto_scomp_free_all_scratches();
349 mutex_unlock(&scomp_lock);
350
351 return ret;
352 }
353 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
354
355 MODULE_LICENSE("GPL");
356 MODULE_DESCRIPTION("Synchronous compression type");