]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - mm/zpool.c
2 * zpool memory storage api
4 * Copyright (C) 2014 Dan Streetman
6 * This is a common frontend for memory storage pool implementations.
7 * Typically, this is used to store compressed memory.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/list.h>
13 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
18 #include <linux/zpool.h>
23 struct zpool_driver
*driver
;
25 struct zpool_ops
*ops
;
27 struct list_head list
;
30 static LIST_HEAD(drivers_head
);
31 static DEFINE_SPINLOCK(drivers_lock
);
33 static LIST_HEAD(pools_head
);
34 static DEFINE_SPINLOCK(pools_lock
);
37 * zpool_register_driver() - register a zpool implementation.
38 * @driver: driver to register
40 void zpool_register_driver(struct zpool_driver
*driver
)
42 spin_lock(&drivers_lock
);
43 atomic_set(&driver
->refcount
, 0);
44 list_add(&driver
->list
, &drivers_head
);
45 spin_unlock(&drivers_lock
);
47 EXPORT_SYMBOL(zpool_register_driver
);
50 * zpool_unregister_driver() - unregister a zpool implementation.
51 * @driver: driver to unregister.
53 * Module usage counting is used to prevent using a driver
54 * while/after unloading, so if this is called from module
55 * exit function, this should never fail; if called from
56 * other than the module exit function, and this returns
57 * failure, the driver is in use and must remain available.
59 int zpool_unregister_driver(struct zpool_driver
*driver
)
61 int ret
= 0, refcount
;
63 spin_lock(&drivers_lock
);
64 refcount
= atomic_read(&driver
->refcount
);
65 WARN_ON(refcount
< 0);
69 list_del(&driver
->list
);
70 spin_unlock(&drivers_lock
);
74 EXPORT_SYMBOL(zpool_unregister_driver
);
77 * zpool_evict() - evict callback from a zpool implementation.
78 * @pool: pool to evict from.
79 * @handle: handle to evict.
81 * This can be used by zpool implementations to call the
82 * user's evict zpool_ops struct evict callback.
84 int zpool_evict(void *pool
, unsigned long handle
)
88 spin_lock(&pools_lock
);
89 list_for_each_entry(zpool
, &pools_head
, list
) {
90 if (zpool
->pool
== pool
) {
91 spin_unlock(&pools_lock
);
92 if (!zpool
->ops
|| !zpool
->ops
->evict
)
94 return zpool
->ops
->evict(zpool
, handle
);
97 spin_unlock(&pools_lock
);
101 EXPORT_SYMBOL(zpool_evict
);
103 static struct zpool_driver
*zpool_get_driver(char *type
)
105 struct zpool_driver
*driver
;
107 spin_lock(&drivers_lock
);
108 list_for_each_entry(driver
, &drivers_head
, list
) {
109 if (!strcmp(driver
->type
, type
)) {
110 bool got
= try_module_get(driver
->owner
);
113 atomic_inc(&driver
->refcount
);
114 spin_unlock(&drivers_lock
);
115 return got
? driver
: NULL
;
119 spin_unlock(&drivers_lock
);
123 static void zpool_put_driver(struct zpool_driver
*driver
)
125 atomic_dec(&driver
->refcount
);
126 module_put(driver
->owner
);
130 * zpool_create_pool() - Create a new zpool
131 * @type The type of the zpool to create (e.g. zbud, zsmalloc)
132 * @name The name of the zpool (e.g. zram0, zswap)
133 * @gfp The GFP flags to use when allocating the pool.
134 * @ops The optional ops callback.
136 * This creates a new zpool of the specified type. The gfp flags will be
137 * used when allocating memory, if the implementation supports it. If the
138 * ops param is NULL, then the created zpool will not be shrinkable.
140 * Implementations must guarantee this to be thread-safe.
142 * Returns: New zpool on success, NULL on failure.
144 struct zpool
*zpool_create_pool(char *type
, char *name
, gfp_t gfp
,
145 struct zpool_ops
*ops
)
147 struct zpool_driver
*driver
;
150 pr_info("creating pool type %s\n", type
);
152 driver
= zpool_get_driver(type
);
155 request_module("zpool-%s", type
);
156 driver
= zpool_get_driver(type
);
160 pr_err("no driver for type %s\n", type
);
164 zpool
= kmalloc(sizeof(*zpool
), gfp
);
166 pr_err("couldn't create zpool - out of memory\n");
167 zpool_put_driver(driver
);
171 zpool
->type
= driver
->type
;
172 zpool
->driver
= driver
;
173 zpool
->pool
= driver
->create(name
, gfp
, ops
);
177 pr_err("couldn't create %s pool\n", type
);
178 zpool_put_driver(driver
);
183 pr_info("created %s pool\n", type
);
185 spin_lock(&pools_lock
);
186 list_add(&zpool
->list
, &pools_head
);
187 spin_unlock(&pools_lock
);
193 * zpool_destroy_pool() - Destroy a zpool
194 * @pool The zpool to destroy.
196 * Implementations must guarantee this to be thread-safe,
197 * however only when destroying different pools. The same
198 * pool should only be destroyed once, and should not be used
199 * after it is destroyed.
201 * This destroys an existing zpool. The zpool should not be in use.
203 void zpool_destroy_pool(struct zpool
*zpool
)
205 pr_info("destroying pool type %s\n", zpool
->type
);
207 spin_lock(&pools_lock
);
208 list_del(&zpool
->list
);
209 spin_unlock(&pools_lock
);
210 zpool
->driver
->destroy(zpool
->pool
);
211 zpool_put_driver(zpool
->driver
);
216 * zpool_get_type() - Get the type of the zpool
217 * @pool The zpool to check
219 * This returns the type of the pool.
221 * Implementations must guarantee this to be thread-safe.
223 * Returns: The type of zpool.
225 char *zpool_get_type(struct zpool
*zpool
)
231 * zpool_malloc() - Allocate memory
232 * @pool The zpool to allocate from.
233 * @size The amount of memory to allocate.
234 * @gfp The GFP flags to use when allocating memory.
235 * @handle Pointer to the handle to set
237 * This allocates the requested amount of memory from the pool.
238 * The gfp flags will be used when allocating memory, if the
239 * implementation supports it. The provided @handle will be
240 * set to the allocated object handle.
242 * Implementations must guarantee this to be thread-safe.
244 * Returns: 0 on success, negative value on error.
246 int zpool_malloc(struct zpool
*zpool
, size_t size
, gfp_t gfp
,
247 unsigned long *handle
)
249 return zpool
->driver
->malloc(zpool
->pool
, size
, gfp
, handle
);
253 * zpool_free() - Free previously allocated memory
254 * @pool The zpool that allocated the memory.
255 * @handle The handle to the memory to free.
257 * This frees previously allocated memory. This does not guarantee
258 * that the pool will actually free memory, only that the memory
259 * in the pool will become available for use by the pool.
261 * Implementations must guarantee this to be thread-safe,
262 * however only when freeing different handles. The same
263 * handle should only be freed once, and should not be used
266 void zpool_free(struct zpool
*zpool
, unsigned long handle
)
268 zpool
->driver
->free(zpool
->pool
, handle
);
272 * zpool_shrink() - Shrink the pool size
273 * @pool The zpool to shrink.
274 * @pages The number of pages to shrink the pool.
275 * @reclaimed The number of pages successfully evicted.
277 * This attempts to shrink the actual memory size of the pool
278 * by evicting currently used handle(s). If the pool was
279 * created with no zpool_ops, or the evict call fails for any
280 * of the handles, this will fail. If non-NULL, the @reclaimed
281 * parameter will be set to the number of pages reclaimed,
282 * which may be more than the number of pages requested.
284 * Implementations must guarantee this to be thread-safe.
286 * Returns: 0 on success, negative value on error/failure.
288 int zpool_shrink(struct zpool
*zpool
, unsigned int pages
,
289 unsigned int *reclaimed
)
291 return zpool
->driver
->shrink(zpool
->pool
, pages
, reclaimed
);
295 * zpool_map_handle() - Map a previously allocated handle into memory
296 * @pool The zpool that the handle was allocated from
297 * @handle The handle to map
298 * @mm How the memory should be mapped
300 * This maps a previously allocated handle into memory. The @mm
301 * param indicates to the implementation how the memory will be
302 * used, i.e. read-only, write-only, read-write. If the
303 * implementation does not support it, the memory will be treated
306 * This may hold locks, disable interrupts, and/or preemption,
307 * and the zpool_unmap_handle() must be called to undo those
308 * actions. The code that uses the mapped handle should complete
309 * its operatons on the mapped handle memory quickly and unmap
310 * as soon as possible. As the implementation may use per-cpu
311 * data, multiple handles should not be mapped concurrently on
314 * Returns: A pointer to the handle's mapped memory area.
316 void *zpool_map_handle(struct zpool
*zpool
, unsigned long handle
,
317 enum zpool_mapmode mapmode
)
319 return zpool
->driver
->map(zpool
->pool
, handle
, mapmode
);
323 * zpool_unmap_handle() - Unmap a previously mapped handle
324 * @pool The zpool that the handle was allocated from
325 * @handle The handle to unmap
327 * This unmaps a previously mapped handle. Any locks or other
328 * actions that the implementation took in zpool_map_handle()
329 * will be undone here. The memory area returned from
330 * zpool_map_handle() should no longer be used after this.
332 void zpool_unmap_handle(struct zpool
*zpool
, unsigned long handle
)
334 zpool
->driver
->unmap(zpool
->pool
, handle
);
338 * zpool_get_total_size() - The total size of the pool
339 * @pool The zpool to check
341 * This returns the total size in bytes of the pool.
343 * Returns: Total size of the zpool in bytes.
345 u64
zpool_get_total_size(struct zpool
*zpool
)
347 return zpool
->driver
->total_size(zpool
->pool
);
350 static int __init
init_zpool(void)
356 static void __exit
exit_zpool(void)
358 pr_info("unloaded\n");
361 module_init(init_zpool
);
362 module_exit(exit_zpool
);
364 MODULE_LICENSE("GPL");
365 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
366 MODULE_DESCRIPTION("Common API for compressed memory storage");