]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/device-mapper.h
dm: introduce split_discard_requests
[mirror_ubuntu-artful-kernel.git] / include / linux / device-mapper.h
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/ratelimit.h>
14
15 struct dm_dev;
16 struct dm_target;
17 struct dm_table;
18 struct mapped_device;
19 struct bio_vec;
20
21 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
22
23 union map_info {
24 void *ptr;
25 unsigned long long ll;
26 unsigned target_request_nr;
27 };
28
29 /*
30 * In the constructor the target parameter will already have the
31 * table, type, begin and len fields filled in.
32 */
33 typedef int (*dm_ctr_fn) (struct dm_target *target,
34 unsigned int argc, char **argv);
35
36 /*
37 * The destructor doesn't need to free the dm_target, just
38 * anything hidden ti->private.
39 */
40 typedef void (*dm_dtr_fn) (struct dm_target *ti);
41
42 /*
43 * The map function must return:
44 * < 0: error
45 * = 0: The target will handle the io by resubmitting it later
46 * = 1: simple remap complete
47 * = 2: The target wants to push back the io
48 */
49 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
50 union map_info *map_context);
51 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
52 union map_info *map_context);
53
54 /*
55 * Returns:
56 * < 0 : error (currently ignored)
57 * 0 : ended successfully
58 * 1 : for some reason the io has still not completed (eg,
59 * multipath target might want to requeue a failed io).
60 * 2 : The target wants to push back the io
61 */
62 typedef int (*dm_endio_fn) (struct dm_target *ti,
63 struct bio *bio, int error,
64 union map_info *map_context);
65 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
66 struct request *clone, int error,
67 union map_info *map_context);
68
69 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
70 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
71 typedef int (*dm_preresume_fn) (struct dm_target *ti);
72 typedef void (*dm_resume_fn) (struct dm_target *ti);
73
74 typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
75 char *result, unsigned int maxlen);
76
77 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
78
79 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
80 unsigned long arg);
81
82 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
83 struct bio_vec *biovec, int max_size);
84
85 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
86 struct dm_dev *dev,
87 sector_t start, sector_t len,
88 void *data);
89
90 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
91 iterate_devices_callout_fn fn,
92 void *data);
93
94 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
95 struct queue_limits *limits);
96
97 /*
98 * Returns:
99 * 0: The target can handle the next I/O immediately.
100 * 1: The target can't handle the next I/O immediately.
101 */
102 typedef int (*dm_busy_fn) (struct dm_target *ti);
103
104 void dm_error(const char *message);
105
106 /*
107 * Combine device limits.
108 */
109 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
110 sector_t start, sector_t len, void *data);
111
112 struct dm_dev {
113 struct block_device *bdev;
114 fmode_t mode;
115 char name[16];
116 };
117
118 /*
119 * Constructors should call these functions to ensure destination devices
120 * are opened/closed correctly.
121 */
122 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
123 struct dm_dev **result);
124 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
125
126 /*
127 * Information about a target type
128 */
129
130 struct target_type {
131 uint64_t features;
132 const char *name;
133 struct module *module;
134 unsigned version[3];
135 dm_ctr_fn ctr;
136 dm_dtr_fn dtr;
137 dm_map_fn map;
138 dm_map_request_fn map_rq;
139 dm_endio_fn end_io;
140 dm_request_endio_fn rq_end_io;
141 dm_presuspend_fn presuspend;
142 dm_postsuspend_fn postsuspend;
143 dm_preresume_fn preresume;
144 dm_resume_fn resume;
145 dm_status_fn status;
146 dm_message_fn message;
147 dm_ioctl_fn ioctl;
148 dm_merge_fn merge;
149 dm_busy_fn busy;
150 dm_iterate_devices_fn iterate_devices;
151 dm_io_hints_fn io_hints;
152
153 /* For internal device-mapper use. */
154 struct list_head list;
155 };
156
157 /*
158 * Target features
159 */
160
161 /*
162 * Any table that contains an instance of this target must have only one.
163 */
164 #define DM_TARGET_SINGLETON 0x00000001
165 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
166
167 /*
168 * Indicates that a target does not support read-only devices.
169 */
170 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
171 #define dm_target_always_writeable(type) \
172 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
173
174 /*
175 * Any device that contains a table with an instance of this target may never
176 * have tables containing any different target type.
177 */
178 #define DM_TARGET_IMMUTABLE 0x00000004
179 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
180
181 struct dm_target {
182 struct dm_table *table;
183 struct target_type *type;
184
185 /* target limits */
186 sector_t begin;
187 sector_t len;
188
189 /* If non-zero, maximum size of I/O submitted to a target. */
190 uint32_t max_io_len;
191
192 /*
193 * A number of zero-length barrier requests that will be submitted
194 * to the target for the purpose of flushing cache.
195 *
196 * The request number will be placed in union map_info->target_request_nr.
197 * It is a responsibility of the target driver to remap these requests
198 * to the real underlying devices.
199 */
200 unsigned num_flush_requests;
201
202 /*
203 * The number of discard requests that will be submitted to the
204 * target. map_info->request_nr is used just like num_flush_requests.
205 */
206 unsigned num_discard_requests;
207
208 /* target specific data */
209 void *private;
210
211 /* Used to provide an error string from the ctr */
212 char *error;
213
214 /*
215 * Set if this target needs to receive discards regardless of
216 * whether or not its underlying devices have support.
217 */
218 unsigned discards_supported:1;
219
220 /*
221 * Set if the target required discard request to be split
222 * on max_io_len boundary.
223 */
224 unsigned split_discard_requests:1;
225
226 /*
227 * Set if this target does not return zeroes on discarded blocks.
228 */
229 unsigned discard_zeroes_data_unsupported:1;
230 };
231
232 /* Each target can link one of these into the table */
233 struct dm_target_callbacks {
234 struct list_head list;
235 int (*congested_fn) (struct dm_target_callbacks *, int);
236 };
237
238 int dm_register_target(struct target_type *t);
239 void dm_unregister_target(struct target_type *t);
240
241 /*
242 * Target argument parsing.
243 */
244 struct dm_arg_set {
245 unsigned argc;
246 char **argv;
247 };
248
249 /*
250 * The minimum and maximum value of a numeric argument, together with
251 * the error message to use if the number is found to be outside that range.
252 */
253 struct dm_arg {
254 unsigned min;
255 unsigned max;
256 char *error;
257 };
258
259 /*
260 * Validate the next argument, either returning it as *value or, if invalid,
261 * returning -EINVAL and setting *error.
262 */
263 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
264 unsigned *value, char **error);
265
266 /*
267 * Process the next argument as the start of a group containing between
268 * arg->min and arg->max further arguments. Either return the size as
269 * *num_args or, if invalid, return -EINVAL and set *error.
270 */
271 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
272 unsigned *num_args, char **error);
273
274 /*
275 * Return the current argument and shift to the next.
276 */
277 const char *dm_shift_arg(struct dm_arg_set *as);
278
279 /*
280 * Move through num_args arguments.
281 */
282 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
283
284 /*-----------------------------------------------------------------
285 * Functions for creating and manipulating mapped devices.
286 * Drop the reference with dm_put when you finish with the object.
287 *---------------------------------------------------------------*/
288
289 /*
290 * DM_ANY_MINOR chooses the next available minor number.
291 */
292 #define DM_ANY_MINOR (-1)
293 int dm_create(int minor, struct mapped_device **md);
294
295 /*
296 * Reference counting for md.
297 */
298 struct mapped_device *dm_get_md(dev_t dev);
299 void dm_get(struct mapped_device *md);
300 void dm_put(struct mapped_device *md);
301
302 /*
303 * An arbitrary pointer may be stored alongside a mapped device.
304 */
305 void dm_set_mdptr(struct mapped_device *md, void *ptr);
306 void *dm_get_mdptr(struct mapped_device *md);
307
308 /*
309 * A device can still be used while suspended, but I/O is deferred.
310 */
311 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
312 int dm_resume(struct mapped_device *md);
313
314 /*
315 * Event functions.
316 */
317 uint32_t dm_get_event_nr(struct mapped_device *md);
318 int dm_wait_event(struct mapped_device *md, int event_nr);
319 uint32_t dm_next_uevent_seq(struct mapped_device *md);
320 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
321
322 /*
323 * Info functions.
324 */
325 const char *dm_device_name(struct mapped_device *md);
326 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
327 struct gendisk *dm_disk(struct mapped_device *md);
328 int dm_suspended(struct dm_target *ti);
329 int dm_noflush_suspending(struct dm_target *ti);
330 union map_info *dm_get_mapinfo(struct bio *bio);
331 union map_info *dm_get_rq_mapinfo(struct request *rq);
332
333 /*
334 * Geometry functions.
335 */
336 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
337 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
338
339
340 /*-----------------------------------------------------------------
341 * Functions for manipulating device-mapper tables.
342 *---------------------------------------------------------------*/
343
344 /*
345 * First create an empty table.
346 */
347 int dm_table_create(struct dm_table **result, fmode_t mode,
348 unsigned num_targets, struct mapped_device *md);
349
350 /*
351 * Then call this once for each target.
352 */
353 int dm_table_add_target(struct dm_table *t, const char *type,
354 sector_t start, sector_t len, char *params);
355
356 /*
357 * Target_ctr should call this if it needs to add any callbacks.
358 */
359 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
360
361 /*
362 * Finally call this to make the table ready for use.
363 */
364 int dm_table_complete(struct dm_table *t);
365
366 /*
367 * Target may require that it is never sent I/O larger than len.
368 */
369 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
370
371 /*
372 * Table reference counting.
373 */
374 struct dm_table *dm_get_live_table(struct mapped_device *md);
375 void dm_table_get(struct dm_table *t);
376 void dm_table_put(struct dm_table *t);
377
378 /*
379 * Queries
380 */
381 sector_t dm_table_get_size(struct dm_table *t);
382 unsigned int dm_table_get_num_targets(struct dm_table *t);
383 fmode_t dm_table_get_mode(struct dm_table *t);
384 struct mapped_device *dm_table_get_md(struct dm_table *t);
385
386 /*
387 * Trigger an event.
388 */
389 void dm_table_event(struct dm_table *t);
390
391 /*
392 * The device must be suspended before calling this method.
393 * Returns the previous table, which the caller must destroy.
394 */
395 struct dm_table *dm_swap_table(struct mapped_device *md,
396 struct dm_table *t);
397
398 /*
399 * A wrapper around vmalloc.
400 */
401 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
402
403 /*-----------------------------------------------------------------
404 * Macros.
405 *---------------------------------------------------------------*/
406 #define DM_NAME "device-mapper"
407
408 #ifdef CONFIG_PRINTK
409 extern struct ratelimit_state dm_ratelimit_state;
410
411 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
412 #else
413 #define dm_ratelimit() 0
414 #endif
415
416 #define DMCRIT(f, arg...) \
417 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
418
419 #define DMERR(f, arg...) \
420 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
421 #define DMERR_LIMIT(f, arg...) \
422 do { \
423 if (dm_ratelimit()) \
424 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
425 f "\n", ## arg); \
426 } while (0)
427
428 #define DMWARN(f, arg...) \
429 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
430 #define DMWARN_LIMIT(f, arg...) \
431 do { \
432 if (dm_ratelimit()) \
433 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
434 f "\n", ## arg); \
435 } while (0)
436
437 #define DMINFO(f, arg...) \
438 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
439 #define DMINFO_LIMIT(f, arg...) \
440 do { \
441 if (dm_ratelimit()) \
442 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
443 "\n", ## arg); \
444 } while (0)
445
446 #ifdef CONFIG_DM_DEBUG
447 # define DMDEBUG(f, arg...) \
448 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
449 # define DMDEBUG_LIMIT(f, arg...) \
450 do { \
451 if (dm_ratelimit()) \
452 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
453 "\n", ## arg); \
454 } while (0)
455 #else
456 # define DMDEBUG(f, arg...) do {} while (0)
457 # define DMDEBUG_LIMIT(f, arg...) do {} while (0)
458 #endif
459
460 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
461 0 : scnprintf(result + sz, maxlen - sz, x))
462
463 #define SECTOR_SHIFT 9
464
465 /*
466 * Definitions of return values from target end_io function.
467 */
468 #define DM_ENDIO_INCOMPLETE 1
469 #define DM_ENDIO_REQUEUE 2
470
471 /*
472 * Definitions of return values from target map function.
473 */
474 #define DM_MAPIO_SUBMITTED 0
475 #define DM_MAPIO_REMAPPED 1
476 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
477
478 /*
479 * Ceiling(n / sz)
480 */
481 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
482
483 #define dm_sector_div_up(n, sz) ( \
484 { \
485 sector_t _r = ((n) + (sz) - 1); \
486 sector_div(_r, (sz)); \
487 _r; \
488 } \
489 )
490
491 /*
492 * ceiling(n / size) * size
493 */
494 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
495
496 #define dm_array_too_big(fixed, obj, num) \
497 ((num) > (UINT_MAX - (fixed)) / (obj))
498
499 /*
500 * Sector offset taken relative to the start of the target instead of
501 * relative to the start of the device.
502 */
503 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
504
505 static inline sector_t to_sector(unsigned long n)
506 {
507 return (n >> SECTOR_SHIFT);
508 }
509
510 static inline unsigned long to_bytes(sector_t n)
511 {
512 return (n << SECTOR_SHIFT);
513 }
514
515 /*-----------------------------------------------------------------
516 * Helper for block layer and dm core operations
517 *---------------------------------------------------------------*/
518 void dm_dispatch_request(struct request *rq);
519 void dm_requeue_unmapped_request(struct request *rq);
520 void dm_kill_unmapped_request(struct request *rq, int error);
521 int dm_underlying_device_busy(struct request_queue *q);
522
523 #endif /* _LINUX_DEVICE_MAPPER_H */