]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/block/zram/zcomp.c
f1ff39a3d1c12249b56c54174ca37ec0f0825033
[mirror_ubuntu-artful-kernel.git] / drivers / block / zram / zcomp.c
1 /*
2 * Copyright (C) 2014 Sergey Senozhatsky.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/err.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/sched.h>
16
17 #include "zcomp.h"
18 #include "zcomp_lzo.h"
19 #ifdef CONFIG_ZRAM_LZ4_COMPRESS
20 #include "zcomp_lz4.h"
21 #endif
22
23 /*
24 * single zcomp_strm backend
25 */
26 struct zcomp_strm_single {
27 struct mutex strm_lock;
28 struct zcomp_strm *zstrm;
29 };
30
31 /*
32 * multi zcomp_strm backend
33 */
34 struct zcomp_strm_multi {
35 /* protect strm list */
36 spinlock_t strm_lock;
37 /* max possible number of zstrm streams */
38 int max_strm;
39 /* number of available zstrm streams */
40 int avail_strm;
41 /* list of available strms */
42 struct list_head idle_strm;
43 wait_queue_head_t strm_wait;
44 };
45
46 static struct zcomp_backend *backends[] = {
47 &zcomp_lzo,
48 #ifdef CONFIG_ZRAM_LZ4_COMPRESS
49 &zcomp_lz4,
50 #endif
51 NULL
52 };
53
54 static struct zcomp_backend *find_backend(const char *compress)
55 {
56 int i = 0;
57 while (backends[i]) {
58 if (sysfs_streq(compress, backends[i]->name))
59 break;
60 i++;
61 }
62 return backends[i];
63 }
64
65 static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
66 {
67 if (zstrm->private)
68 comp->backend->destroy(zstrm->private);
69 free_pages((unsigned long)zstrm->buffer, 1);
70 kfree(zstrm);
71 }
72
73 /*
74 * allocate new zcomp_strm structure with ->private initialized by
75 * backend, return NULL on error
76 */
77 static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
78 {
79 struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
80 if (!zstrm)
81 return NULL;
82
83 zstrm->private = comp->backend->create();
84 /*
85 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
86 * case when compressed size is larger than the original one
87 */
88 zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
89 if (!zstrm->private || !zstrm->buffer) {
90 zcomp_strm_free(comp, zstrm);
91 zstrm = NULL;
92 }
93 return zstrm;
94 }
95
96 /*
97 * get idle zcomp_strm or wait until other process release
98 * (zcomp_strm_release()) one for us
99 */
100 static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
101 {
102 struct zcomp_strm_multi *zs = comp->stream;
103 struct zcomp_strm *zstrm;
104
105 while (1) {
106 spin_lock(&zs->strm_lock);
107 if (!list_empty(&zs->idle_strm)) {
108 zstrm = list_entry(zs->idle_strm.next,
109 struct zcomp_strm, list);
110 list_del(&zstrm->list);
111 spin_unlock(&zs->strm_lock);
112 return zstrm;
113 }
114 /* zstrm streams limit reached, wait for idle stream */
115 if (zs->avail_strm >= zs->max_strm) {
116 spin_unlock(&zs->strm_lock);
117 wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
118 continue;
119 }
120 /* allocate new zstrm stream */
121 zs->avail_strm++;
122 spin_unlock(&zs->strm_lock);
123
124 zstrm = zcomp_strm_alloc(comp);
125 if (!zstrm) {
126 spin_lock(&zs->strm_lock);
127 zs->avail_strm--;
128 spin_unlock(&zs->strm_lock);
129 wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
130 continue;
131 }
132 break;
133 }
134 return zstrm;
135 }
136
137 /* add stream back to idle list and wake up waiter or free the stream */
138 static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
139 {
140 struct zcomp_strm_multi *zs = comp->stream;
141
142 spin_lock(&zs->strm_lock);
143 if (zs->avail_strm <= zs->max_strm) {
144 list_add(&zstrm->list, &zs->idle_strm);
145 spin_unlock(&zs->strm_lock);
146 wake_up(&zs->strm_wait);
147 return;
148 }
149
150 zs->avail_strm--;
151 spin_unlock(&zs->strm_lock);
152 zcomp_strm_free(comp, zstrm);
153 }
154
155 /* change max_strm limit */
156 static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
157 {
158 struct zcomp_strm_multi *zs = comp->stream;
159 struct zcomp_strm *zstrm;
160
161 spin_lock(&zs->strm_lock);
162 zs->max_strm = num_strm;
163 /*
164 * if user has lowered the limit and there are idle streams,
165 * immediately free as much streams (and memory) as we can.
166 */
167 while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
168 zstrm = list_entry(zs->idle_strm.next,
169 struct zcomp_strm, list);
170 list_del(&zstrm->list);
171 zcomp_strm_free(comp, zstrm);
172 zs->avail_strm--;
173 }
174 spin_unlock(&zs->strm_lock);
175 return true;
176 }
177
178 static void zcomp_strm_multi_destroy(struct zcomp *comp)
179 {
180 struct zcomp_strm_multi *zs = comp->stream;
181 struct zcomp_strm *zstrm;
182
183 while (!list_empty(&zs->idle_strm)) {
184 zstrm = list_entry(zs->idle_strm.next,
185 struct zcomp_strm, list);
186 list_del(&zstrm->list);
187 zcomp_strm_free(comp, zstrm);
188 }
189 kfree(zs);
190 }
191
192 static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
193 {
194 struct zcomp_strm *zstrm;
195 struct zcomp_strm_multi *zs;
196
197 comp->destroy = zcomp_strm_multi_destroy;
198 comp->strm_find = zcomp_strm_multi_find;
199 comp->strm_release = zcomp_strm_multi_release;
200 comp->set_max_streams = zcomp_strm_multi_set_max_streams;
201 zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
202 if (!zs)
203 return -ENOMEM;
204
205 comp->stream = zs;
206 spin_lock_init(&zs->strm_lock);
207 INIT_LIST_HEAD(&zs->idle_strm);
208 init_waitqueue_head(&zs->strm_wait);
209 zs->max_strm = max_strm;
210 zs->avail_strm = 1;
211
212 zstrm = zcomp_strm_alloc(comp);
213 if (!zstrm) {
214 kfree(zs);
215 return -ENOMEM;
216 }
217 list_add(&zstrm->list, &zs->idle_strm);
218 return 0;
219 }
220
221 static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
222 {
223 struct zcomp_strm_single *zs = comp->stream;
224 mutex_lock(&zs->strm_lock);
225 return zs->zstrm;
226 }
227
228 static void zcomp_strm_single_release(struct zcomp *comp,
229 struct zcomp_strm *zstrm)
230 {
231 struct zcomp_strm_single *zs = comp->stream;
232 mutex_unlock(&zs->strm_lock);
233 }
234
235 static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
236 {
237 /* zcomp_strm_single support only max_comp_streams == 1 */
238 return false;
239 }
240
241 static void zcomp_strm_single_destroy(struct zcomp *comp)
242 {
243 struct zcomp_strm_single *zs = comp->stream;
244 zcomp_strm_free(comp, zs->zstrm);
245 kfree(zs);
246 }
247
248 static int zcomp_strm_single_create(struct zcomp *comp)
249 {
250 struct zcomp_strm_single *zs;
251
252 comp->destroy = zcomp_strm_single_destroy;
253 comp->strm_find = zcomp_strm_single_find;
254 comp->strm_release = zcomp_strm_single_release;
255 comp->set_max_streams = zcomp_strm_single_set_max_streams;
256 zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
257 if (!zs)
258 return -ENOMEM;
259
260 comp->stream = zs;
261 mutex_init(&zs->strm_lock);
262 zs->zstrm = zcomp_strm_alloc(comp);
263 if (!zs->zstrm) {
264 kfree(zs);
265 return -ENOMEM;
266 }
267 return 0;
268 }
269
270 /* show available compressors */
271 ssize_t zcomp_available_show(const char *comp, char *buf)
272 {
273 ssize_t sz = 0;
274 int i = 0;
275
276 while (backends[i]) {
277 if (sysfs_streq(comp, backends[i]->name))
278 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
279 "[%s] ", backends[i]->name);
280 else
281 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
282 "%s ", backends[i]->name);
283 i++;
284 }
285 sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
286 return sz;
287 }
288
289 bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
290 {
291 return comp->set_max_streams(comp, num_strm);
292 }
293
294 struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
295 {
296 return comp->strm_find(comp);
297 }
298
299 void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
300 {
301 comp->strm_release(comp, zstrm);
302 }
303
304 int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
305 const unsigned char *src, size_t *dst_len)
306 {
307 return comp->backend->compress(src, zstrm->buffer, dst_len,
308 zstrm->private);
309 }
310
311 int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
312 size_t src_len, unsigned char *dst)
313 {
314 return comp->backend->decompress(src, src_len, dst);
315 }
316
317 void zcomp_destroy(struct zcomp *comp)
318 {
319 comp->destroy(comp);
320 kfree(comp);
321 }
322
323 /*
324 * search available compressors for requested algorithm.
325 * allocate new zcomp and initialize it. return compressing
326 * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
327 * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
328 * case of allocation error.
329 */
330 struct zcomp *zcomp_create(const char *compress, int max_strm)
331 {
332 struct zcomp *comp;
333 struct zcomp_backend *backend;
334
335 backend = find_backend(compress);
336 if (!backend)
337 return ERR_PTR(-EINVAL);
338
339 comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
340 if (!comp)
341 return ERR_PTR(-ENOMEM);
342
343 comp->backend = backend;
344 if (max_strm > 1)
345 zcomp_strm_multi_create(comp, max_strm);
346 else
347 zcomp_strm_single_create(comp);
348 if (!comp->stream) {
349 kfree(comp);
350 return ERR_PTR(-ENOMEM);
351 }
352 return comp;
353 }