2 * Synchronous Compression operations
4 * Copyright 2015 LG Electronics Inc.
5 * Copyright (c) 2016, Intel Corporation
6 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/crypto.h>
21 #include <linux/compiler.h>
22 #include <linux/vmalloc.h>
23 #include <crypto/algapi.h>
24 #include <linux/cryptouser.h>
25 #include <net/netlink.h>
26 #include <linux/scatterlist.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/internal/acompress.h>
29 #include <crypto/internal/scompress.h>
32 static const struct crypto_type crypto_scomp_type
;
33 static void * __percpu
*scomp_src_scratches
;
34 static void * __percpu
*scomp_dst_scratches
;
35 static int scomp_scratch_users
;
36 static DEFINE_MUTEX(scomp_lock
);
39 static int crypto_scomp_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
41 struct crypto_report_comp rscomp
;
43 strncpy(rscomp
.type
, "scomp", sizeof(rscomp
.type
));
45 if (nla_put(skb
, CRYPTOCFGA_REPORT_COMPRESS
,
46 sizeof(struct crypto_report_comp
), &rscomp
))
54 static int crypto_scomp_report(struct sk_buff
*skb
, struct crypto_alg
*alg
)
60 static void crypto_scomp_show(struct seq_file
*m
, struct crypto_alg
*alg
)
63 static void crypto_scomp_show(struct seq_file
*m
, struct crypto_alg
*alg
)
65 seq_puts(m
, "type : scomp\n");
68 static int crypto_scomp_init_tfm(struct crypto_tfm
*tfm
)
73 static void crypto_scomp_free_scratches(void * __percpu
*scratches
)
80 for_each_possible_cpu(i
)
81 vfree(*per_cpu_ptr(scratches
, i
));
83 free_percpu(scratches
);
86 static void * __percpu
*crypto_scomp_alloc_scratches(void)
88 void * __percpu
*scratches
;
91 scratches
= alloc_percpu(void *);
95 for_each_possible_cpu(i
) {
98 scratch
= vmalloc_node(SCOMP_SCRATCH_SIZE
, cpu_to_node(i
));
101 *per_cpu_ptr(scratches
, i
) = scratch
;
107 crypto_scomp_free_scratches(scratches
);
111 static void crypto_scomp_free_all_scratches(void)
113 if (!--scomp_scratch_users
) {
114 crypto_scomp_free_scratches(scomp_src_scratches
);
115 crypto_scomp_free_scratches(scomp_dst_scratches
);
116 scomp_src_scratches
= NULL
;
117 scomp_dst_scratches
= NULL
;
121 static int crypto_scomp_alloc_all_scratches(void)
123 if (!scomp_scratch_users
++) {
124 scomp_src_scratches
= crypto_scomp_alloc_scratches();
125 if (!scomp_src_scratches
)
127 scomp_dst_scratches
= crypto_scomp_alloc_scratches();
128 if (!scomp_dst_scratches
)
134 static void crypto_scomp_sg_free(struct scatterlist
*sgl
)
143 for_each_sg(sgl
, sgl
, n
, i
) {
152 static struct scatterlist
*crypto_scomp_sg_alloc(size_t size
, gfp_t gfp
)
154 struct scatterlist
*sgl
;
158 n
= ((size
- 1) >> PAGE_SHIFT
) + 1;
160 sgl
= kmalloc_array(n
, sizeof(struct scatterlist
), gfp
);
164 sg_init_table(sgl
, n
);
166 for (i
= 0; i
< n
; i
++) {
167 page
= alloc_page(gfp
);
170 sg_set_page(sgl
+ i
, page
, PAGE_SIZE
, 0);
176 sg_mark_end(sgl
+ i
);
177 crypto_scomp_sg_free(sgl
);
181 static int scomp_acomp_comp_decomp(struct acomp_req
*req
, int dir
)
183 struct crypto_acomp
*tfm
= crypto_acomp_reqtfm(req
);
184 void **tfm_ctx
= acomp_tfm_ctx(tfm
);
185 struct crypto_scomp
*scomp
= *tfm_ctx
;
186 void **ctx
= acomp_request_ctx(req
);
187 const int cpu
= get_cpu();
188 u8
*scratch_src
= *per_cpu_ptr(scomp_src_scratches
, cpu
);
189 u8
*scratch_dst
= *per_cpu_ptr(scomp_dst_scratches
, cpu
);
192 if (!req
->src
|| !req
->slen
|| req
->slen
> SCOMP_SCRATCH_SIZE
) {
197 if (req
->dst
&& !req
->dlen
) {
202 if (!req
->dlen
|| req
->dlen
> SCOMP_SCRATCH_SIZE
)
203 req
->dlen
= SCOMP_SCRATCH_SIZE
;
205 scatterwalk_map_and_copy(scratch_src
, req
->src
, 0, req
->slen
, 0);
207 ret
= crypto_scomp_compress(scomp
, scratch_src
, req
->slen
,
208 scratch_dst
, &req
->dlen
, *ctx
);
210 ret
= crypto_scomp_decompress(scomp
, scratch_src
, req
->slen
,
211 scratch_dst
, &req
->dlen
, *ctx
);
214 req
->dst
= crypto_scomp_sg_alloc(req
->dlen
,
215 req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
216 GFP_KERNEL
: GFP_ATOMIC
);
220 scatterwalk_map_and_copy(scratch_dst
, req
->dst
, 0, req
->dlen
,
228 static int scomp_acomp_compress(struct acomp_req
*req
)
230 return scomp_acomp_comp_decomp(req
, 1);
233 static int scomp_acomp_decompress(struct acomp_req
*req
)
235 return scomp_acomp_comp_decomp(req
, 0);
238 static void crypto_exit_scomp_ops_async(struct crypto_tfm
*tfm
)
240 struct crypto_scomp
**ctx
= crypto_tfm_ctx(tfm
);
242 crypto_free_scomp(*ctx
);
245 int crypto_init_scomp_ops_async(struct crypto_tfm
*tfm
)
247 struct crypto_alg
*calg
= tfm
->__crt_alg
;
248 struct crypto_acomp
*crt
= __crypto_acomp_tfm(tfm
);
249 struct crypto_scomp
**ctx
= crypto_tfm_ctx(tfm
);
250 struct crypto_scomp
*scomp
;
252 if (!crypto_mod_get(calg
))
255 scomp
= crypto_create_tfm(calg
, &crypto_scomp_type
);
257 crypto_mod_put(calg
);
258 return PTR_ERR(scomp
);
262 tfm
->exit
= crypto_exit_scomp_ops_async
;
264 crt
->compress
= scomp_acomp_compress
;
265 crt
->decompress
= scomp_acomp_decompress
;
266 crt
->dst_free
= crypto_scomp_sg_free
;
267 crt
->reqsize
= sizeof(void *);
272 struct acomp_req
*crypto_acomp_scomp_alloc_ctx(struct acomp_req
*req
)
274 struct crypto_acomp
*acomp
= crypto_acomp_reqtfm(req
);
275 struct crypto_tfm
*tfm
= crypto_acomp_tfm(acomp
);
276 struct crypto_scomp
**tfm_ctx
= crypto_tfm_ctx(tfm
);
277 struct crypto_scomp
*scomp
= *tfm_ctx
;
280 ctx
= crypto_scomp_alloc_ctx(scomp
);
291 void crypto_acomp_scomp_free_ctx(struct acomp_req
*req
)
293 struct crypto_acomp
*acomp
= crypto_acomp_reqtfm(req
);
294 struct crypto_tfm
*tfm
= crypto_acomp_tfm(acomp
);
295 struct crypto_scomp
**tfm_ctx
= crypto_tfm_ctx(tfm
);
296 struct crypto_scomp
*scomp
= *tfm_ctx
;
297 void *ctx
= *req
->__ctx
;
300 crypto_scomp_free_ctx(scomp
, ctx
);
303 static const struct crypto_type crypto_scomp_type
= {
304 .extsize
= crypto_alg_extsize
,
305 .init_tfm
= crypto_scomp_init_tfm
,
306 #ifdef CONFIG_PROC_FS
307 .show
= crypto_scomp_show
,
309 .report
= crypto_scomp_report
,
310 .maskclear
= ~CRYPTO_ALG_TYPE_MASK
,
311 .maskset
= CRYPTO_ALG_TYPE_MASK
,
312 .type
= CRYPTO_ALG_TYPE_SCOMPRESS
,
313 .tfmsize
= offsetof(struct crypto_scomp
, base
),
316 int crypto_register_scomp(struct scomp_alg
*alg
)
318 struct crypto_alg
*base
= &alg
->base
;
321 mutex_lock(&scomp_lock
);
322 if (crypto_scomp_alloc_all_scratches())
325 base
->cra_type
= &crypto_scomp_type
;
326 base
->cra_flags
&= ~CRYPTO_ALG_TYPE_MASK
;
327 base
->cra_flags
|= CRYPTO_ALG_TYPE_SCOMPRESS
;
329 ret
= crypto_register_alg(base
);
333 mutex_unlock(&scomp_lock
);
337 crypto_scomp_free_all_scratches();
338 mutex_unlock(&scomp_lock
);
341 EXPORT_SYMBOL_GPL(crypto_register_scomp
);
343 int crypto_unregister_scomp(struct scomp_alg
*alg
)
347 mutex_lock(&scomp_lock
);
348 ret
= crypto_unregister_alg(&alg
->base
);
349 crypto_scomp_free_all_scratches();
350 mutex_unlock(&scomp_lock
);
354 EXPORT_SYMBOL_GPL(crypto_unregister_scomp
);
356 int crypto_register_scomps(struct scomp_alg
*algs
, int count
)
360 for (i
= 0; i
< count
; i
++) {
361 ret
= crypto_register_scomp(&algs
[i
]);
369 for (--i
; i
>= 0; --i
)
370 crypto_unregister_scomp(&algs
[i
]);
374 EXPORT_SYMBOL_GPL(crypto_register_scomps
);
376 void crypto_unregister_scomps(struct scomp_alg
*algs
, int count
)
380 for (i
= count
- 1; i
>= 0; --i
)
381 crypto_unregister_scomp(&algs
[i
]);
383 EXPORT_SYMBOL_GPL(crypto_unregister_scomps
);
385 MODULE_LICENSE("GPL");
386 MODULE_DESCRIPTION("Synchronous compression type");