]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/md/dm-bufio.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / md / dm-bufio.c
1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
21
22 #define DM_MSG_PREFIX "bufio"
23
24 /*
25 * Memory management policy:
26 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30 * dirty buffers.
31 */
32 #define DM_BUFIO_MIN_BUFFERS 8
33
34 #define DM_BUFIO_MEMORY_PERCENT 2
35 #define DM_BUFIO_VMALLOC_PERCENT 25
36 #define DM_BUFIO_WRITEBACK_PERCENT 75
37
38 /*
39 * Check buffer ages in this interval (seconds)
40 */
41 #define DM_BUFIO_WORK_TIMER_SECS 30
42
43 /*
44 * Free buffers when they are older than this (seconds)
45 */
46 #define DM_BUFIO_DEFAULT_AGE_SECS 300
47
48 /*
49 * The nr of bytes of cached data to keep around.
50 */
51 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
52
53 /*
54 * The number of bvec entries that are embedded directly in the buffer.
55 * If the chunk size is larger, dm-io is used to do the io.
56 */
57 #define DM_BUFIO_INLINE_VECS 16
58
59 /*
60 * Don't try to use kmem_cache_alloc for blocks larger than this.
61 * For explanation, see alloc_buffer_data below.
62 */
63 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
65
66 /*
67 * dm_buffer->list_mode
68 */
69 #define LIST_CLEAN 0
70 #define LIST_DIRTY 1
71 #define LIST_SIZE 2
72
73 /*
74 * Linking of buffers:
75 * All buffers are linked to cache_hash with their hash_list field.
76 *
77 * Clean buffers that are not being written (B_WRITING not set)
78 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 *
80 * Dirty and clean buffers that are being written are linked to
81 * lru[LIST_DIRTY] with their lru_list field. When the write
82 * finishes, the buffer cannot be relinked immediately (because we
83 * are in an interrupt context and relinking requires process
84 * context), so some clean-not-writing buffers can be held on
85 * dirty_lru too. They are later added to lru in the process
86 * context.
87 */
88 struct dm_bufio_client {
89 struct mutex lock;
90
91 struct list_head lru[LIST_SIZE];
92 unsigned long n_buffers[LIST_SIZE];
93
94 struct block_device *bdev;
95 unsigned block_size;
96 unsigned char sectors_per_block_bits;
97 unsigned char pages_per_block_bits;
98 unsigned char blocks_per_page_bits;
99 unsigned aux_size;
100 void (*alloc_callback)(struct dm_buffer *);
101 void (*write_callback)(struct dm_buffer *);
102
103 struct dm_io_client *dm_io;
104
105 struct list_head reserved_buffers;
106 unsigned need_reserved_buffers;
107
108 unsigned minimum_buffers;
109
110 struct rb_root buffer_tree;
111 wait_queue_head_t free_buffer_wait;
112
113 sector_t start;
114
115 int async_write_error;
116
117 struct list_head client_list;
118 struct shrinker shrinker;
119 };
120
121 /*
122 * Buffer state bits.
123 */
124 #define B_READING 0
125 #define B_WRITING 1
126 #define B_DIRTY 2
127
128 /*
129 * Describes how the block was allocated:
130 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
131 * See the comment at alloc_buffer_data.
132 */
133 enum data_mode {
134 DATA_MODE_SLAB = 0,
135 DATA_MODE_GET_FREE_PAGES = 1,
136 DATA_MODE_VMALLOC = 2,
137 DATA_MODE_LIMIT = 3
138 };
139
140 struct dm_buffer {
141 struct rb_node node;
142 struct list_head lru_list;
143 sector_t block;
144 void *data;
145 enum data_mode data_mode;
146 unsigned char list_mode; /* LIST_* */
147 unsigned hold_count;
148 int read_error;
149 int write_error;
150 unsigned long state;
151 unsigned long last_accessed;
152 struct dm_bufio_client *c;
153 struct list_head write_list;
154 struct bio bio;
155 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
156 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
157 #define MAX_STACK 10
158 struct stack_trace stack_trace;
159 unsigned long stack_entries[MAX_STACK];
160 #endif
161 };
162
163 /*----------------------------------------------------------------*/
164
165 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
166 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
167
168 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
169 {
170 unsigned ret = c->blocks_per_page_bits - 1;
171
172 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
173
174 return ret;
175 }
176
177 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
178 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
179
180 #define dm_bufio_in_request() (!!current->bio_list)
181
182 static void dm_bufio_lock(struct dm_bufio_client *c)
183 {
184 mutex_lock_nested(&c->lock, dm_bufio_in_request());
185 }
186
187 static int dm_bufio_trylock(struct dm_bufio_client *c)
188 {
189 return mutex_trylock(&c->lock);
190 }
191
192 static void dm_bufio_unlock(struct dm_bufio_client *c)
193 {
194 mutex_unlock(&c->lock);
195 }
196
197 /*----------------------------------------------------------------*/
198
199 /*
200 * Default cache size: available memory divided by the ratio.
201 */
202 static unsigned long dm_bufio_default_cache_size;
203
204 /*
205 * Total cache size set by the user.
206 */
207 static unsigned long dm_bufio_cache_size;
208
209 /*
210 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
211 * at any time. If it disagrees, the user has changed cache size.
212 */
213 static unsigned long dm_bufio_cache_size_latch;
214
215 static DEFINE_SPINLOCK(param_spinlock);
216
217 /*
218 * Buffers are freed after this timeout
219 */
220 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
221 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
222
223 static unsigned long dm_bufio_peak_allocated;
224 static unsigned long dm_bufio_allocated_kmem_cache;
225 static unsigned long dm_bufio_allocated_get_free_pages;
226 static unsigned long dm_bufio_allocated_vmalloc;
227 static unsigned long dm_bufio_current_allocated;
228
229 /*----------------------------------------------------------------*/
230
231 /*
232 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
233 */
234 static unsigned long dm_bufio_cache_size_per_client;
235
236 /*
237 * The current number of clients.
238 */
239 static int dm_bufio_client_count;
240
241 /*
242 * The list of all clients.
243 */
244 static LIST_HEAD(dm_bufio_all_clients);
245
246 /*
247 * This mutex protects dm_bufio_cache_size_latch,
248 * dm_bufio_cache_size_per_client and dm_bufio_client_count
249 */
250 static DEFINE_MUTEX(dm_bufio_clients_lock);
251
252 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
253 static void buffer_record_stack(struct dm_buffer *b)
254 {
255 b->stack_trace.nr_entries = 0;
256 b->stack_trace.max_entries = MAX_STACK;
257 b->stack_trace.entries = b->stack_entries;
258 b->stack_trace.skip = 2;
259 save_stack_trace(&b->stack_trace);
260 }
261 #endif
262
263 /*----------------------------------------------------------------
264 * A red/black tree acts as an index for all the buffers.
265 *--------------------------------------------------------------*/
266 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
267 {
268 struct rb_node *n = c->buffer_tree.rb_node;
269 struct dm_buffer *b;
270
271 while (n) {
272 b = container_of(n, struct dm_buffer, node);
273
274 if (b->block == block)
275 return b;
276
277 n = (b->block < block) ? n->rb_left : n->rb_right;
278 }
279
280 return NULL;
281 }
282
283 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
284 {
285 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
286 struct dm_buffer *found;
287
288 while (*new) {
289 found = container_of(*new, struct dm_buffer, node);
290
291 if (found->block == b->block) {
292 BUG_ON(found != b);
293 return;
294 }
295
296 parent = *new;
297 new = (found->block < b->block) ?
298 &((*new)->rb_left) : &((*new)->rb_right);
299 }
300
301 rb_link_node(&b->node, parent, new);
302 rb_insert_color(&b->node, &c->buffer_tree);
303 }
304
305 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
306 {
307 rb_erase(&b->node, &c->buffer_tree);
308 }
309
310 /*----------------------------------------------------------------*/
311
312 static void adjust_total_allocated(enum data_mode data_mode, long diff)
313 {
314 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
315 &dm_bufio_allocated_kmem_cache,
316 &dm_bufio_allocated_get_free_pages,
317 &dm_bufio_allocated_vmalloc,
318 };
319
320 spin_lock(&param_spinlock);
321
322 *class_ptr[data_mode] += diff;
323
324 dm_bufio_current_allocated += diff;
325
326 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
327 dm_bufio_peak_allocated = dm_bufio_current_allocated;
328
329 spin_unlock(&param_spinlock);
330 }
331
332 /*
333 * Change the number of clients and recalculate per-client limit.
334 */
335 static void __cache_size_refresh(void)
336 {
337 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
338 BUG_ON(dm_bufio_client_count < 0);
339
340 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
341
342 /*
343 * Use default if set to 0 and report the actual cache size used.
344 */
345 if (!dm_bufio_cache_size_latch) {
346 (void)cmpxchg(&dm_bufio_cache_size, 0,
347 dm_bufio_default_cache_size);
348 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
349 }
350
351 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
352 (dm_bufio_client_count ? : 1);
353 }
354
355 /*
356 * Allocating buffer data.
357 *
358 * Small buffers are allocated with kmem_cache, to use space optimally.
359 *
360 * For large buffers, we choose between get_free_pages and vmalloc.
361 * Each has advantages and disadvantages.
362 *
363 * __get_free_pages can randomly fail if the memory is fragmented.
364 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
365 * as low as 128M) so using it for caching is not appropriate.
366 *
367 * If the allocation may fail we use __get_free_pages. Memory fragmentation
368 * won't have a fatal effect here, but it just causes flushes of some other
369 * buffers and more I/O will be performed. Don't use __get_free_pages if it
370 * always fails (i.e. order >= MAX_ORDER).
371 *
372 * If the allocation shouldn't fail we use __vmalloc. This is only for the
373 * initial reserve allocation, so there's no risk of wasting all vmalloc
374 * space.
375 */
376 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
377 enum data_mode *data_mode)
378 {
379 unsigned noio_flag;
380 void *ptr;
381
382 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
383 *data_mode = DATA_MODE_SLAB;
384 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
385 }
386
387 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
388 gfp_mask & __GFP_NORETRY) {
389 *data_mode = DATA_MODE_GET_FREE_PAGES;
390 return (void *)__get_free_pages(gfp_mask,
391 c->pages_per_block_bits);
392 }
393
394 *data_mode = DATA_MODE_VMALLOC;
395
396 /*
397 * __vmalloc allocates the data pages and auxiliary structures with
398 * gfp_flags that were specified, but pagetables are always allocated
399 * with GFP_KERNEL, no matter what was specified as gfp_mask.
400 *
401 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
402 * all allocations done by this process (including pagetables) are done
403 * as if GFP_NOIO was specified.
404 */
405
406 if (gfp_mask & __GFP_NORETRY)
407 noio_flag = memalloc_noio_save();
408
409 ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
410
411 if (gfp_mask & __GFP_NORETRY)
412 memalloc_noio_restore(noio_flag);
413
414 return ptr;
415 }
416
417 /*
418 * Free buffer's data.
419 */
420 static void free_buffer_data(struct dm_bufio_client *c,
421 void *data, enum data_mode data_mode)
422 {
423 switch (data_mode) {
424 case DATA_MODE_SLAB:
425 kmem_cache_free(DM_BUFIO_CACHE(c), data);
426 break;
427
428 case DATA_MODE_GET_FREE_PAGES:
429 free_pages((unsigned long)data, c->pages_per_block_bits);
430 break;
431
432 case DATA_MODE_VMALLOC:
433 vfree(data);
434 break;
435
436 default:
437 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
438 data_mode);
439 BUG();
440 }
441 }
442
443 /*
444 * Allocate buffer and its data.
445 */
446 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
447 {
448 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
449 gfp_mask);
450
451 if (!b)
452 return NULL;
453
454 b->c = c;
455
456 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
457 if (!b->data) {
458 kfree(b);
459 return NULL;
460 }
461
462 adjust_total_allocated(b->data_mode, (long)c->block_size);
463
464 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
465 memset(&b->stack_trace, 0, sizeof(b->stack_trace));
466 #endif
467 return b;
468 }
469
470 /*
471 * Free buffer and its data.
472 */
473 static void free_buffer(struct dm_buffer *b)
474 {
475 struct dm_bufio_client *c = b->c;
476
477 adjust_total_allocated(b->data_mode, -(long)c->block_size);
478
479 free_buffer_data(c, b->data, b->data_mode);
480 kfree(b);
481 }
482
483 /*
484 * Link buffer to the hash list and clean or dirty queue.
485 */
486 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
487 {
488 struct dm_bufio_client *c = b->c;
489
490 c->n_buffers[dirty]++;
491 b->block = block;
492 b->list_mode = dirty;
493 list_add(&b->lru_list, &c->lru[dirty]);
494 __insert(b->c, b);
495 b->last_accessed = jiffies;
496 }
497
498 /*
499 * Unlink buffer from the hash list and dirty or clean queue.
500 */
501 static void __unlink_buffer(struct dm_buffer *b)
502 {
503 struct dm_bufio_client *c = b->c;
504
505 BUG_ON(!c->n_buffers[b->list_mode]);
506
507 c->n_buffers[b->list_mode]--;
508 __remove(b->c, b);
509 list_del(&b->lru_list);
510 }
511
512 /*
513 * Place the buffer to the head of dirty or clean LRU queue.
514 */
515 static void __relink_lru(struct dm_buffer *b, int dirty)
516 {
517 struct dm_bufio_client *c = b->c;
518
519 BUG_ON(!c->n_buffers[b->list_mode]);
520
521 c->n_buffers[b->list_mode]--;
522 c->n_buffers[dirty]++;
523 b->list_mode = dirty;
524 list_move(&b->lru_list, &c->lru[dirty]);
525 b->last_accessed = jiffies;
526 }
527
528 /*----------------------------------------------------------------
529 * Submit I/O on the buffer.
530 *
531 * Bio interface is faster but it has some problems:
532 * the vector list is limited (increasing this limit increases
533 * memory-consumption per buffer, so it is not viable);
534 *
535 * the memory must be direct-mapped, not vmalloced;
536 *
537 * the I/O driver can reject requests spuriously if it thinks that
538 * the requests are too big for the device or if they cross a
539 * controller-defined memory boundary.
540 *
541 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
542 * it is not vmalloced, try using the bio interface.
543 *
544 * If the buffer is big, if it is vmalloced or if the underlying device
545 * rejects the bio because it is too large, use dm-io layer to do the I/O.
546 * The dm-io layer splits the I/O into multiple requests, avoiding the above
547 * shortcomings.
548 *--------------------------------------------------------------*/
549
550 /*
551 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
552 * that the request was handled directly with bio interface.
553 */
554 static void dmio_complete(unsigned long error, void *context)
555 {
556 struct dm_buffer *b = context;
557
558 b->bio.bi_error = error ? -EIO : 0;
559 b->bio.bi_end_io(&b->bio);
560 }
561
562 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
563 unsigned n_sectors, bio_end_io_t *end_io)
564 {
565 int r;
566 struct dm_io_request io_req = {
567 .bi_op = rw,
568 .bi_op_flags = 0,
569 .notify.fn = dmio_complete,
570 .notify.context = b,
571 .client = b->c->dm_io,
572 };
573 struct dm_io_region region = {
574 .bdev = b->c->bdev,
575 .sector = sector,
576 .count = n_sectors,
577 };
578
579 if (b->data_mode != DATA_MODE_VMALLOC) {
580 io_req.mem.type = DM_IO_KMEM;
581 io_req.mem.ptr.addr = b->data;
582 } else {
583 io_req.mem.type = DM_IO_VMA;
584 io_req.mem.ptr.vma = b->data;
585 }
586
587 b->bio.bi_end_io = end_io;
588
589 r = dm_io(&io_req, 1, &region, NULL);
590 if (r) {
591 b->bio.bi_error = r;
592 end_io(&b->bio);
593 }
594 }
595
596 static void inline_endio(struct bio *bio)
597 {
598 bio_end_io_t *end_fn = bio->bi_private;
599 int error = bio->bi_error;
600
601 /*
602 * Reset the bio to free any attached resources
603 * (e.g. bio integrity profiles).
604 */
605 bio_reset(bio);
606
607 bio->bi_error = error;
608 end_fn(bio);
609 }
610
611 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
612 unsigned n_sectors, bio_end_io_t *end_io)
613 {
614 char *ptr;
615 int len;
616
617 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
618 b->bio.bi_iter.bi_sector = sector;
619 b->bio.bi_bdev = b->c->bdev;
620 b->bio.bi_end_io = inline_endio;
621 /*
622 * Use of .bi_private isn't a problem here because
623 * the dm_buffer's inline bio is local to bufio.
624 */
625 b->bio.bi_private = end_io;
626 bio_set_op_attrs(&b->bio, rw, 0);
627
628 /*
629 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
630 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
631 */
632 ptr = b->data;
633 len = n_sectors << SECTOR_SHIFT;
634
635 if (len >= PAGE_SIZE)
636 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
637 else
638 BUG_ON((unsigned long)ptr & (len - 1));
639
640 do {
641 if (!bio_add_page(&b->bio, virt_to_page(ptr),
642 len < PAGE_SIZE ? len : PAGE_SIZE,
643 offset_in_page(ptr))) {
644 BUG_ON(b->c->block_size <= PAGE_SIZE);
645 use_dmio(b, rw, sector, n_sectors, end_io);
646 return;
647 }
648
649 len -= PAGE_SIZE;
650 ptr += PAGE_SIZE;
651 } while (len > 0);
652
653 submit_bio(&b->bio);
654 }
655
656 static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
657 {
658 unsigned n_sectors;
659 sector_t sector;
660
661 if (rw == WRITE && b->c->write_callback)
662 b->c->write_callback(b);
663
664 sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
665 n_sectors = 1 << b->c->sectors_per_block_bits;
666
667 if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) &&
668 b->data_mode != DATA_MODE_VMALLOC)
669 use_inline_bio(b, rw, sector, n_sectors, end_io);
670 else
671 use_dmio(b, rw, sector, n_sectors, end_io);
672 }
673
674 /*----------------------------------------------------------------
675 * Writing dirty buffers
676 *--------------------------------------------------------------*/
677
678 /*
679 * The endio routine for write.
680 *
681 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
682 * it.
683 */
684 static void write_endio(struct bio *bio)
685 {
686 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
687
688 b->write_error = bio->bi_error;
689 if (unlikely(bio->bi_error)) {
690 struct dm_bufio_client *c = b->c;
691 int error = bio->bi_error;
692 (void)cmpxchg(&c->async_write_error, 0, error);
693 }
694
695 BUG_ON(!test_bit(B_WRITING, &b->state));
696
697 smp_mb__before_atomic();
698 clear_bit(B_WRITING, &b->state);
699 smp_mb__after_atomic();
700
701 wake_up_bit(&b->state, B_WRITING);
702 }
703
704 /*
705 * Initiate a write on a dirty buffer, but don't wait for it.
706 *
707 * - If the buffer is not dirty, exit.
708 * - If there some previous write going on, wait for it to finish (we can't
709 * have two writes on the same buffer simultaneously).
710 * - Submit our write and don't wait on it. We set B_WRITING indicating
711 * that there is a write in progress.
712 */
713 static void __write_dirty_buffer(struct dm_buffer *b,
714 struct list_head *write_list)
715 {
716 if (!test_bit(B_DIRTY, &b->state))
717 return;
718
719 clear_bit(B_DIRTY, &b->state);
720 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
721
722 if (!write_list)
723 submit_io(b, WRITE, write_endio);
724 else
725 list_add_tail(&b->write_list, write_list);
726 }
727
728 static void __flush_write_list(struct list_head *write_list)
729 {
730 struct blk_plug plug;
731 blk_start_plug(&plug);
732 while (!list_empty(write_list)) {
733 struct dm_buffer *b =
734 list_entry(write_list->next, struct dm_buffer, write_list);
735 list_del(&b->write_list);
736 submit_io(b, WRITE, write_endio);
737 cond_resched();
738 }
739 blk_finish_plug(&plug);
740 }
741
742 /*
743 * Wait until any activity on the buffer finishes. Possibly write the
744 * buffer if it is dirty. When this function finishes, there is no I/O
745 * running on the buffer and the buffer is not dirty.
746 */
747 static void __make_buffer_clean(struct dm_buffer *b)
748 {
749 BUG_ON(b->hold_count);
750
751 if (!b->state) /* fast case */
752 return;
753
754 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
755 __write_dirty_buffer(b, NULL);
756 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
757 }
758
759 /*
760 * Find some buffer that is not held by anybody, clean it, unlink it and
761 * return it.
762 */
763 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
764 {
765 struct dm_buffer *b;
766
767 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
768 BUG_ON(test_bit(B_WRITING, &b->state));
769 BUG_ON(test_bit(B_DIRTY, &b->state));
770
771 if (!b->hold_count) {
772 __make_buffer_clean(b);
773 __unlink_buffer(b);
774 return b;
775 }
776 cond_resched();
777 }
778
779 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
780 BUG_ON(test_bit(B_READING, &b->state));
781
782 if (!b->hold_count) {
783 __make_buffer_clean(b);
784 __unlink_buffer(b);
785 return b;
786 }
787 cond_resched();
788 }
789
790 return NULL;
791 }
792
793 /*
794 * Wait until some other threads free some buffer or release hold count on
795 * some buffer.
796 *
797 * This function is entered with c->lock held, drops it and regains it
798 * before exiting.
799 */
800 static void __wait_for_free_buffer(struct dm_bufio_client *c)
801 {
802 DECLARE_WAITQUEUE(wait, current);
803
804 add_wait_queue(&c->free_buffer_wait, &wait);
805 set_current_state(TASK_UNINTERRUPTIBLE);
806 dm_bufio_unlock(c);
807
808 io_schedule();
809
810 remove_wait_queue(&c->free_buffer_wait, &wait);
811
812 dm_bufio_lock(c);
813 }
814
815 enum new_flag {
816 NF_FRESH = 0,
817 NF_READ = 1,
818 NF_GET = 2,
819 NF_PREFETCH = 3
820 };
821
822 /*
823 * Allocate a new buffer. If the allocation is not possible, wait until
824 * some other thread frees a buffer.
825 *
826 * May drop the lock and regain it.
827 */
828 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
829 {
830 struct dm_buffer *b;
831 bool tried_noio_alloc = false;
832
833 /*
834 * dm-bufio is resistant to allocation failures (it just keeps
835 * one buffer reserved in cases all the allocations fail).
836 * So set flags to not try too hard:
837 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
838 * mutex and wait ourselves.
839 * __GFP_NORETRY: don't retry and rather return failure
840 * __GFP_NOMEMALLOC: don't use emergency reserves
841 * __GFP_NOWARN: don't print a warning in case of failure
842 *
843 * For debugging, if we set the cache size to 1, no new buffers will
844 * be allocated.
845 */
846 while (1) {
847 if (dm_bufio_cache_size_latch != 1) {
848 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
849 if (b)
850 return b;
851 }
852
853 if (nf == NF_PREFETCH)
854 return NULL;
855
856 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
857 dm_bufio_unlock(c);
858 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
859 dm_bufio_lock(c);
860 if (b)
861 return b;
862 tried_noio_alloc = true;
863 }
864
865 if (!list_empty(&c->reserved_buffers)) {
866 b = list_entry(c->reserved_buffers.next,
867 struct dm_buffer, lru_list);
868 list_del(&b->lru_list);
869 c->need_reserved_buffers++;
870
871 return b;
872 }
873
874 b = __get_unclaimed_buffer(c);
875 if (b)
876 return b;
877
878 __wait_for_free_buffer(c);
879 }
880 }
881
882 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
883 {
884 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
885
886 if (!b)
887 return NULL;
888
889 if (c->alloc_callback)
890 c->alloc_callback(b);
891
892 return b;
893 }
894
895 /*
896 * Free a buffer and wake other threads waiting for free buffers.
897 */
898 static void __free_buffer_wake(struct dm_buffer *b)
899 {
900 struct dm_bufio_client *c = b->c;
901
902 if (!c->need_reserved_buffers)
903 free_buffer(b);
904 else {
905 list_add(&b->lru_list, &c->reserved_buffers);
906 c->need_reserved_buffers--;
907 }
908
909 wake_up(&c->free_buffer_wait);
910 }
911
912 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
913 struct list_head *write_list)
914 {
915 struct dm_buffer *b, *tmp;
916
917 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
918 BUG_ON(test_bit(B_READING, &b->state));
919
920 if (!test_bit(B_DIRTY, &b->state) &&
921 !test_bit(B_WRITING, &b->state)) {
922 __relink_lru(b, LIST_CLEAN);
923 continue;
924 }
925
926 if (no_wait && test_bit(B_WRITING, &b->state))
927 return;
928
929 __write_dirty_buffer(b, write_list);
930 cond_resched();
931 }
932 }
933
934 /*
935 * Get writeback threshold and buffer limit for a given client.
936 */
937 static void __get_memory_limit(struct dm_bufio_client *c,
938 unsigned long *threshold_buffers,
939 unsigned long *limit_buffers)
940 {
941 unsigned long buffers;
942
943 if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
944 if (mutex_trylock(&dm_bufio_clients_lock)) {
945 __cache_size_refresh();
946 mutex_unlock(&dm_bufio_clients_lock);
947 }
948 }
949
950 buffers = dm_bufio_cache_size_per_client >>
951 (c->sectors_per_block_bits + SECTOR_SHIFT);
952
953 if (buffers < c->minimum_buffers)
954 buffers = c->minimum_buffers;
955
956 *limit_buffers = buffers;
957 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
958 }
959
960 /*
961 * Check if we're over watermark.
962 * If we are over threshold_buffers, start freeing buffers.
963 * If we're over "limit_buffers", block until we get under the limit.
964 */
965 static void __check_watermark(struct dm_bufio_client *c,
966 struct list_head *write_list)
967 {
968 unsigned long threshold_buffers, limit_buffers;
969
970 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
971
972 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
973 limit_buffers) {
974
975 struct dm_buffer *b = __get_unclaimed_buffer(c);
976
977 if (!b)
978 return;
979
980 __free_buffer_wake(b);
981 cond_resched();
982 }
983
984 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
985 __write_dirty_buffers_async(c, 1, write_list);
986 }
987
988 /*----------------------------------------------------------------
989 * Getting a buffer
990 *--------------------------------------------------------------*/
991
992 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
993 enum new_flag nf, int *need_submit,
994 struct list_head *write_list)
995 {
996 struct dm_buffer *b, *new_b = NULL;
997
998 *need_submit = 0;
999
1000 b = __find(c, block);
1001 if (b)
1002 goto found_buffer;
1003
1004 if (nf == NF_GET)
1005 return NULL;
1006
1007 new_b = __alloc_buffer_wait(c, nf);
1008 if (!new_b)
1009 return NULL;
1010
1011 /*
1012 * We've had a period where the mutex was unlocked, so need to
1013 * recheck the hash table.
1014 */
1015 b = __find(c, block);
1016 if (b) {
1017 __free_buffer_wake(new_b);
1018 goto found_buffer;
1019 }
1020
1021 __check_watermark(c, write_list);
1022
1023 b = new_b;
1024 b->hold_count = 1;
1025 b->read_error = 0;
1026 b->write_error = 0;
1027 __link_buffer(b, block, LIST_CLEAN);
1028
1029 if (nf == NF_FRESH) {
1030 b->state = 0;
1031 return b;
1032 }
1033
1034 b->state = 1 << B_READING;
1035 *need_submit = 1;
1036
1037 return b;
1038
1039 found_buffer:
1040 if (nf == NF_PREFETCH)
1041 return NULL;
1042 /*
1043 * Note: it is essential that we don't wait for the buffer to be
1044 * read if dm_bufio_get function is used. Both dm_bufio_get and
1045 * dm_bufio_prefetch can be used in the driver request routine.
1046 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1047 * the same buffer, it would deadlock if we waited.
1048 */
1049 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1050 return NULL;
1051
1052 b->hold_count++;
1053 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1054 test_bit(B_WRITING, &b->state));
1055 return b;
1056 }
1057
1058 /*
1059 * The endio routine for reading: set the error, clear the bit and wake up
1060 * anyone waiting on the buffer.
1061 */
1062 static void read_endio(struct bio *bio)
1063 {
1064 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1065
1066 b->read_error = bio->bi_error;
1067
1068 BUG_ON(!test_bit(B_READING, &b->state));
1069
1070 smp_mb__before_atomic();
1071 clear_bit(B_READING, &b->state);
1072 smp_mb__after_atomic();
1073
1074 wake_up_bit(&b->state, B_READING);
1075 }
1076
1077 /*
1078 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1079 * functions is similar except that dm_bufio_new doesn't read the
1080 * buffer from the disk (assuming that the caller overwrites all the data
1081 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1082 */
1083 static void *new_read(struct dm_bufio_client *c, sector_t block,
1084 enum new_flag nf, struct dm_buffer **bp)
1085 {
1086 int need_submit;
1087 struct dm_buffer *b;
1088
1089 LIST_HEAD(write_list);
1090
1091 dm_bufio_lock(c);
1092 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1093 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1094 if (b && b->hold_count == 1)
1095 buffer_record_stack(b);
1096 #endif
1097 dm_bufio_unlock(c);
1098
1099 __flush_write_list(&write_list);
1100
1101 if (!b)
1102 return NULL;
1103
1104 if (need_submit)
1105 submit_io(b, READ, read_endio);
1106
1107 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1108
1109 if (b->read_error) {
1110 int error = b->read_error;
1111
1112 dm_bufio_release(b);
1113
1114 return ERR_PTR(error);
1115 }
1116
1117 *bp = b;
1118
1119 return b->data;
1120 }
1121
1122 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1123 struct dm_buffer **bp)
1124 {
1125 return new_read(c, block, NF_GET, bp);
1126 }
1127 EXPORT_SYMBOL_GPL(dm_bufio_get);
1128
1129 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1130 struct dm_buffer **bp)
1131 {
1132 BUG_ON(dm_bufio_in_request());
1133
1134 return new_read(c, block, NF_READ, bp);
1135 }
1136 EXPORT_SYMBOL_GPL(dm_bufio_read);
1137
1138 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1139 struct dm_buffer **bp)
1140 {
1141 BUG_ON(dm_bufio_in_request());
1142
1143 return new_read(c, block, NF_FRESH, bp);
1144 }
1145 EXPORT_SYMBOL_GPL(dm_bufio_new);
1146
1147 void dm_bufio_prefetch(struct dm_bufio_client *c,
1148 sector_t block, unsigned n_blocks)
1149 {
1150 struct blk_plug plug;
1151
1152 LIST_HEAD(write_list);
1153
1154 BUG_ON(dm_bufio_in_request());
1155
1156 blk_start_plug(&plug);
1157 dm_bufio_lock(c);
1158
1159 for (; n_blocks--; block++) {
1160 int need_submit;
1161 struct dm_buffer *b;
1162 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1163 &write_list);
1164 if (unlikely(!list_empty(&write_list))) {
1165 dm_bufio_unlock(c);
1166 blk_finish_plug(&plug);
1167 __flush_write_list(&write_list);
1168 blk_start_plug(&plug);
1169 dm_bufio_lock(c);
1170 }
1171 if (unlikely(b != NULL)) {
1172 dm_bufio_unlock(c);
1173
1174 if (need_submit)
1175 submit_io(b, READ, read_endio);
1176 dm_bufio_release(b);
1177
1178 cond_resched();
1179
1180 if (!n_blocks)
1181 goto flush_plug;
1182 dm_bufio_lock(c);
1183 }
1184 }
1185
1186 dm_bufio_unlock(c);
1187
1188 flush_plug:
1189 blk_finish_plug(&plug);
1190 }
1191 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1192
1193 void dm_bufio_release(struct dm_buffer *b)
1194 {
1195 struct dm_bufio_client *c = b->c;
1196
1197 dm_bufio_lock(c);
1198
1199 BUG_ON(!b->hold_count);
1200
1201 b->hold_count--;
1202 if (!b->hold_count) {
1203 wake_up(&c->free_buffer_wait);
1204
1205 /*
1206 * If there were errors on the buffer, and the buffer is not
1207 * to be written, free the buffer. There is no point in caching
1208 * invalid buffer.
1209 */
1210 if ((b->read_error || b->write_error) &&
1211 !test_bit(B_READING, &b->state) &&
1212 !test_bit(B_WRITING, &b->state) &&
1213 !test_bit(B_DIRTY, &b->state)) {
1214 __unlink_buffer(b);
1215 __free_buffer_wake(b);
1216 }
1217 }
1218
1219 dm_bufio_unlock(c);
1220 }
1221 EXPORT_SYMBOL_GPL(dm_bufio_release);
1222
1223 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1224 {
1225 struct dm_bufio_client *c = b->c;
1226
1227 dm_bufio_lock(c);
1228
1229 BUG_ON(test_bit(B_READING, &b->state));
1230
1231 if (!test_and_set_bit(B_DIRTY, &b->state))
1232 __relink_lru(b, LIST_DIRTY);
1233
1234 dm_bufio_unlock(c);
1235 }
1236 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1237
1238 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1239 {
1240 LIST_HEAD(write_list);
1241
1242 BUG_ON(dm_bufio_in_request());
1243
1244 dm_bufio_lock(c);
1245 __write_dirty_buffers_async(c, 0, &write_list);
1246 dm_bufio_unlock(c);
1247 __flush_write_list(&write_list);
1248 }
1249 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1250
1251 /*
1252 * For performance, it is essential that the buffers are written asynchronously
1253 * and simultaneously (so that the block layer can merge the writes) and then
1254 * waited upon.
1255 *
1256 * Finally, we flush hardware disk cache.
1257 */
1258 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1259 {
1260 int a, f;
1261 unsigned long buffers_processed = 0;
1262 struct dm_buffer *b, *tmp;
1263
1264 LIST_HEAD(write_list);
1265
1266 dm_bufio_lock(c);
1267 __write_dirty_buffers_async(c, 0, &write_list);
1268 dm_bufio_unlock(c);
1269 __flush_write_list(&write_list);
1270 dm_bufio_lock(c);
1271
1272 again:
1273 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1274 int dropped_lock = 0;
1275
1276 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1277 buffers_processed++;
1278
1279 BUG_ON(test_bit(B_READING, &b->state));
1280
1281 if (test_bit(B_WRITING, &b->state)) {
1282 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1283 dropped_lock = 1;
1284 b->hold_count++;
1285 dm_bufio_unlock(c);
1286 wait_on_bit_io(&b->state, B_WRITING,
1287 TASK_UNINTERRUPTIBLE);
1288 dm_bufio_lock(c);
1289 b->hold_count--;
1290 } else
1291 wait_on_bit_io(&b->state, B_WRITING,
1292 TASK_UNINTERRUPTIBLE);
1293 }
1294
1295 if (!test_bit(B_DIRTY, &b->state) &&
1296 !test_bit(B_WRITING, &b->state))
1297 __relink_lru(b, LIST_CLEAN);
1298
1299 cond_resched();
1300
1301 /*
1302 * If we dropped the lock, the list is no longer consistent,
1303 * so we must restart the search.
1304 *
1305 * In the most common case, the buffer just processed is
1306 * relinked to the clean list, so we won't loop scanning the
1307 * same buffer again and again.
1308 *
1309 * This may livelock if there is another thread simultaneously
1310 * dirtying buffers, so we count the number of buffers walked
1311 * and if it exceeds the total number of buffers, it means that
1312 * someone is doing some writes simultaneously with us. In
1313 * this case, stop, dropping the lock.
1314 */
1315 if (dropped_lock)
1316 goto again;
1317 }
1318 wake_up(&c->free_buffer_wait);
1319 dm_bufio_unlock(c);
1320
1321 a = xchg(&c->async_write_error, 0);
1322 f = dm_bufio_issue_flush(c);
1323 if (a)
1324 return a;
1325
1326 return f;
1327 }
1328 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1329
1330 /*
1331 * Use dm-io to send and empty barrier flush the device.
1332 */
1333 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1334 {
1335 struct dm_io_request io_req = {
1336 .bi_op = REQ_OP_WRITE,
1337 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1338 .mem.type = DM_IO_KMEM,
1339 .mem.ptr.addr = NULL,
1340 .client = c->dm_io,
1341 };
1342 struct dm_io_region io_reg = {
1343 .bdev = c->bdev,
1344 .sector = 0,
1345 .count = 0,
1346 };
1347
1348 BUG_ON(dm_bufio_in_request());
1349
1350 return dm_io(&io_req, 1, &io_reg, NULL);
1351 }
1352 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1353
1354 /*
1355 * We first delete any other buffer that may be at that new location.
1356 *
1357 * Then, we write the buffer to the original location if it was dirty.
1358 *
1359 * Then, if we are the only one who is holding the buffer, relink the buffer
1360 * in the hash queue for the new location.
1361 *
1362 * If there was someone else holding the buffer, we write it to the new
1363 * location but not relink it, because that other user needs to have the buffer
1364 * at the same place.
1365 */
1366 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1367 {
1368 struct dm_bufio_client *c = b->c;
1369 struct dm_buffer *new;
1370
1371 BUG_ON(dm_bufio_in_request());
1372
1373 dm_bufio_lock(c);
1374
1375 retry:
1376 new = __find(c, new_block);
1377 if (new) {
1378 if (new->hold_count) {
1379 __wait_for_free_buffer(c);
1380 goto retry;
1381 }
1382
1383 /*
1384 * FIXME: Is there any point waiting for a write that's going
1385 * to be overwritten in a bit?
1386 */
1387 __make_buffer_clean(new);
1388 __unlink_buffer(new);
1389 __free_buffer_wake(new);
1390 }
1391
1392 BUG_ON(!b->hold_count);
1393 BUG_ON(test_bit(B_READING, &b->state));
1394
1395 __write_dirty_buffer(b, NULL);
1396 if (b->hold_count == 1) {
1397 wait_on_bit_io(&b->state, B_WRITING,
1398 TASK_UNINTERRUPTIBLE);
1399 set_bit(B_DIRTY, &b->state);
1400 __unlink_buffer(b);
1401 __link_buffer(b, new_block, LIST_DIRTY);
1402 } else {
1403 sector_t old_block;
1404 wait_on_bit_lock_io(&b->state, B_WRITING,
1405 TASK_UNINTERRUPTIBLE);
1406 /*
1407 * Relink buffer to "new_block" so that write_callback
1408 * sees "new_block" as a block number.
1409 * After the write, link the buffer back to old_block.
1410 * All this must be done in bufio lock, so that block number
1411 * change isn't visible to other threads.
1412 */
1413 old_block = b->block;
1414 __unlink_buffer(b);
1415 __link_buffer(b, new_block, b->list_mode);
1416 submit_io(b, WRITE, write_endio);
1417 wait_on_bit_io(&b->state, B_WRITING,
1418 TASK_UNINTERRUPTIBLE);
1419 __unlink_buffer(b);
1420 __link_buffer(b, old_block, b->list_mode);
1421 }
1422
1423 dm_bufio_unlock(c);
1424 dm_bufio_release(b);
1425 }
1426 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1427
1428 /*
1429 * Free the given buffer.
1430 *
1431 * This is just a hint, if the buffer is in use or dirty, this function
1432 * does nothing.
1433 */
1434 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1435 {
1436 struct dm_buffer *b;
1437
1438 dm_bufio_lock(c);
1439
1440 b = __find(c, block);
1441 if (b && likely(!b->hold_count) && likely(!b->state)) {
1442 __unlink_buffer(b);
1443 __free_buffer_wake(b);
1444 }
1445
1446 dm_bufio_unlock(c);
1447 }
1448 EXPORT_SYMBOL(dm_bufio_forget);
1449
1450 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1451 {
1452 c->minimum_buffers = n;
1453 }
1454 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1455
1456 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1457 {
1458 return c->block_size;
1459 }
1460 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1461
1462 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1463 {
1464 return i_size_read(c->bdev->bd_inode) >>
1465 (SECTOR_SHIFT + c->sectors_per_block_bits);
1466 }
1467 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1468
1469 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1470 {
1471 return b->block;
1472 }
1473 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1474
1475 void *dm_bufio_get_block_data(struct dm_buffer *b)
1476 {
1477 return b->data;
1478 }
1479 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1480
1481 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1482 {
1483 return b + 1;
1484 }
1485 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1486
1487 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1488 {
1489 return b->c;
1490 }
1491 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1492
1493 static void drop_buffers(struct dm_bufio_client *c)
1494 {
1495 struct dm_buffer *b;
1496 int i;
1497 bool warned = false;
1498
1499 BUG_ON(dm_bufio_in_request());
1500
1501 /*
1502 * An optimization so that the buffers are not written one-by-one.
1503 */
1504 dm_bufio_write_dirty_buffers_async(c);
1505
1506 dm_bufio_lock(c);
1507
1508 while ((b = __get_unclaimed_buffer(c)))
1509 __free_buffer_wake(b);
1510
1511 for (i = 0; i < LIST_SIZE; i++)
1512 list_for_each_entry(b, &c->lru[i], lru_list) {
1513 WARN_ON(!warned);
1514 warned = true;
1515 DMERR("leaked buffer %llx, hold count %u, list %d",
1516 (unsigned long long)b->block, b->hold_count, i);
1517 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1518 print_stack_trace(&b->stack_trace, 1);
1519 b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1520 #endif
1521 }
1522
1523 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1524 while ((b = __get_unclaimed_buffer(c)))
1525 __free_buffer_wake(b);
1526 #endif
1527
1528 for (i = 0; i < LIST_SIZE; i++)
1529 BUG_ON(!list_empty(&c->lru[i]));
1530
1531 dm_bufio_unlock(c);
1532 }
1533
1534 /*
1535 * We may not be able to evict this buffer if IO pending or the client
1536 * is still using it. Caller is expected to know buffer is too old.
1537 *
1538 * And if GFP_NOFS is used, we must not do any I/O because we hold
1539 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1540 * rerouted to different bufio client.
1541 */
1542 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1543 {
1544 if (!(gfp & __GFP_FS)) {
1545 if (test_bit(B_READING, &b->state) ||
1546 test_bit(B_WRITING, &b->state) ||
1547 test_bit(B_DIRTY, &b->state))
1548 return false;
1549 }
1550
1551 if (b->hold_count)
1552 return false;
1553
1554 __make_buffer_clean(b);
1555 __unlink_buffer(b);
1556 __free_buffer_wake(b);
1557
1558 return true;
1559 }
1560
1561 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1562 {
1563 unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1564 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1565 }
1566
1567 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1568 gfp_t gfp_mask)
1569 {
1570 int l;
1571 struct dm_buffer *b, *tmp;
1572 unsigned long freed = 0;
1573 unsigned long count = nr_to_scan;
1574 unsigned long retain_target = get_retain_buffers(c);
1575
1576 for (l = 0; l < LIST_SIZE; l++) {
1577 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1578 if (__try_evict_buffer(b, gfp_mask))
1579 freed++;
1580 if (!--nr_to_scan || ((count - freed) <= retain_target))
1581 return freed;
1582 cond_resched();
1583 }
1584 }
1585 return freed;
1586 }
1587
1588 static unsigned long
1589 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1590 {
1591 struct dm_bufio_client *c;
1592 unsigned long freed;
1593
1594 c = container_of(shrink, struct dm_bufio_client, shrinker);
1595 if (sc->gfp_mask & __GFP_FS)
1596 dm_bufio_lock(c);
1597 else if (!dm_bufio_trylock(c))
1598 return SHRINK_STOP;
1599
1600 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1601 dm_bufio_unlock(c);
1602 return freed;
1603 }
1604
1605 static unsigned long
1606 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1607 {
1608 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1609
1610 return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
1611 }
1612
1613 /*
1614 * Create the buffering interface
1615 */
1616 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1617 unsigned reserved_buffers, unsigned aux_size,
1618 void (*alloc_callback)(struct dm_buffer *),
1619 void (*write_callback)(struct dm_buffer *))
1620 {
1621 int r;
1622 struct dm_bufio_client *c;
1623 unsigned i;
1624
1625 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1626 (block_size & (block_size - 1)));
1627
1628 c = kzalloc(sizeof(*c), GFP_KERNEL);
1629 if (!c) {
1630 r = -ENOMEM;
1631 goto bad_client;
1632 }
1633 c->buffer_tree = RB_ROOT;
1634
1635 c->bdev = bdev;
1636 c->block_size = block_size;
1637 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1638 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1639 __ffs(block_size) - PAGE_SHIFT : 0;
1640 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1641 PAGE_SHIFT - __ffs(block_size) : 0);
1642
1643 c->aux_size = aux_size;
1644 c->alloc_callback = alloc_callback;
1645 c->write_callback = write_callback;
1646
1647 for (i = 0; i < LIST_SIZE; i++) {
1648 INIT_LIST_HEAD(&c->lru[i]);
1649 c->n_buffers[i] = 0;
1650 }
1651
1652 mutex_init(&c->lock);
1653 INIT_LIST_HEAD(&c->reserved_buffers);
1654 c->need_reserved_buffers = reserved_buffers;
1655
1656 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1657
1658 init_waitqueue_head(&c->free_buffer_wait);
1659 c->async_write_error = 0;
1660
1661 c->dm_io = dm_io_client_create();
1662 if (IS_ERR(c->dm_io)) {
1663 r = PTR_ERR(c->dm_io);
1664 goto bad_dm_io;
1665 }
1666
1667 mutex_lock(&dm_bufio_clients_lock);
1668 if (c->blocks_per_page_bits) {
1669 if (!DM_BUFIO_CACHE_NAME(c)) {
1670 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1671 if (!DM_BUFIO_CACHE_NAME(c)) {
1672 r = -ENOMEM;
1673 mutex_unlock(&dm_bufio_clients_lock);
1674 goto bad_cache;
1675 }
1676 }
1677
1678 if (!DM_BUFIO_CACHE(c)) {
1679 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1680 c->block_size,
1681 c->block_size, 0, NULL);
1682 if (!DM_BUFIO_CACHE(c)) {
1683 r = -ENOMEM;
1684 mutex_unlock(&dm_bufio_clients_lock);
1685 goto bad_cache;
1686 }
1687 }
1688 }
1689 mutex_unlock(&dm_bufio_clients_lock);
1690
1691 while (c->need_reserved_buffers) {
1692 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1693
1694 if (!b) {
1695 r = -ENOMEM;
1696 goto bad_buffer;
1697 }
1698 __free_buffer_wake(b);
1699 }
1700
1701 mutex_lock(&dm_bufio_clients_lock);
1702 dm_bufio_client_count++;
1703 list_add(&c->client_list, &dm_bufio_all_clients);
1704 __cache_size_refresh();
1705 mutex_unlock(&dm_bufio_clients_lock);
1706
1707 c->shrinker.count_objects = dm_bufio_shrink_count;
1708 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1709 c->shrinker.seeks = 1;
1710 c->shrinker.batch = 0;
1711 register_shrinker(&c->shrinker);
1712
1713 return c;
1714
1715 bad_buffer:
1716 bad_cache:
1717 while (!list_empty(&c->reserved_buffers)) {
1718 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1719 struct dm_buffer, lru_list);
1720 list_del(&b->lru_list);
1721 free_buffer(b);
1722 }
1723 dm_io_client_destroy(c->dm_io);
1724 bad_dm_io:
1725 kfree(c);
1726 bad_client:
1727 return ERR_PTR(r);
1728 }
1729 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1730
1731 /*
1732 * Free the buffering interface.
1733 * It is required that there are no references on any buffers.
1734 */
1735 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1736 {
1737 unsigned i;
1738
1739 drop_buffers(c);
1740
1741 unregister_shrinker(&c->shrinker);
1742
1743 mutex_lock(&dm_bufio_clients_lock);
1744
1745 list_del(&c->client_list);
1746 dm_bufio_client_count--;
1747 __cache_size_refresh();
1748
1749 mutex_unlock(&dm_bufio_clients_lock);
1750
1751 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1752 BUG_ON(c->need_reserved_buffers);
1753
1754 while (!list_empty(&c->reserved_buffers)) {
1755 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1756 struct dm_buffer, lru_list);
1757 list_del(&b->lru_list);
1758 free_buffer(b);
1759 }
1760
1761 for (i = 0; i < LIST_SIZE; i++)
1762 if (c->n_buffers[i])
1763 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1764
1765 for (i = 0; i < LIST_SIZE; i++)
1766 BUG_ON(c->n_buffers[i]);
1767
1768 dm_io_client_destroy(c->dm_io);
1769 kfree(c);
1770 }
1771 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1772
1773 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1774 {
1775 c->start = start;
1776 }
1777 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1778
1779 static unsigned get_max_age_hz(void)
1780 {
1781 unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1782
1783 if (max_age > UINT_MAX / HZ)
1784 max_age = UINT_MAX / HZ;
1785
1786 return max_age * HZ;
1787 }
1788
1789 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1790 {
1791 return time_after_eq(jiffies, b->last_accessed + age_hz);
1792 }
1793
1794 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1795 {
1796 struct dm_buffer *b, *tmp;
1797 unsigned long retain_target = get_retain_buffers(c);
1798 unsigned long count;
1799 LIST_HEAD(write_list);
1800
1801 dm_bufio_lock(c);
1802
1803 __check_watermark(c, &write_list);
1804 if (unlikely(!list_empty(&write_list))) {
1805 dm_bufio_unlock(c);
1806 __flush_write_list(&write_list);
1807 dm_bufio_lock(c);
1808 }
1809
1810 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1811 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1812 if (count <= retain_target)
1813 break;
1814
1815 if (!older_than(b, age_hz))
1816 break;
1817
1818 if (__try_evict_buffer(b, 0))
1819 count--;
1820
1821 cond_resched();
1822 }
1823
1824 dm_bufio_unlock(c);
1825 }
1826
1827 static void cleanup_old_buffers(void)
1828 {
1829 unsigned long max_age_hz = get_max_age_hz();
1830 struct dm_bufio_client *c;
1831
1832 mutex_lock(&dm_bufio_clients_lock);
1833
1834 __cache_size_refresh();
1835
1836 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1837 __evict_old_buffers(c, max_age_hz);
1838
1839 mutex_unlock(&dm_bufio_clients_lock);
1840 }
1841
1842 static struct workqueue_struct *dm_bufio_wq;
1843 static struct delayed_work dm_bufio_work;
1844
1845 static void work_fn(struct work_struct *w)
1846 {
1847 cleanup_old_buffers();
1848
1849 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1850 DM_BUFIO_WORK_TIMER_SECS * HZ);
1851 }
1852
1853 /*----------------------------------------------------------------
1854 * Module setup
1855 *--------------------------------------------------------------*/
1856
1857 /*
1858 * This is called only once for the whole dm_bufio module.
1859 * It initializes memory limit.
1860 */
1861 static int __init dm_bufio_init(void)
1862 {
1863 __u64 mem;
1864
1865 dm_bufio_allocated_kmem_cache = 0;
1866 dm_bufio_allocated_get_free_pages = 0;
1867 dm_bufio_allocated_vmalloc = 0;
1868 dm_bufio_current_allocated = 0;
1869
1870 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1871 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1872
1873 mem = (__u64)((totalram_pages - totalhigh_pages) *
1874 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1875
1876 if (mem > ULONG_MAX)
1877 mem = ULONG_MAX;
1878
1879 #ifdef CONFIG_MMU
1880 /*
1881 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1882 * in fs/proc/internal.h
1883 */
1884 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1885 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1886 #endif
1887
1888 dm_bufio_default_cache_size = mem;
1889
1890 mutex_lock(&dm_bufio_clients_lock);
1891 __cache_size_refresh();
1892 mutex_unlock(&dm_bufio_clients_lock);
1893
1894 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1895 if (!dm_bufio_wq)
1896 return -ENOMEM;
1897
1898 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1899 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1900 DM_BUFIO_WORK_TIMER_SECS * HZ);
1901
1902 return 0;
1903 }
1904
1905 /*
1906 * This is called once when unloading the dm_bufio module.
1907 */
1908 static void __exit dm_bufio_exit(void)
1909 {
1910 int bug = 0;
1911 int i;
1912
1913 cancel_delayed_work_sync(&dm_bufio_work);
1914 destroy_workqueue(dm_bufio_wq);
1915
1916 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1917 kmem_cache_destroy(dm_bufio_caches[i]);
1918
1919 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1920 kfree(dm_bufio_cache_names[i]);
1921
1922 if (dm_bufio_client_count) {
1923 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1924 __func__, dm_bufio_client_count);
1925 bug = 1;
1926 }
1927
1928 if (dm_bufio_current_allocated) {
1929 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1930 __func__, dm_bufio_current_allocated);
1931 bug = 1;
1932 }
1933
1934 if (dm_bufio_allocated_get_free_pages) {
1935 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1936 __func__, dm_bufio_allocated_get_free_pages);
1937 bug = 1;
1938 }
1939
1940 if (dm_bufio_allocated_vmalloc) {
1941 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1942 __func__, dm_bufio_allocated_vmalloc);
1943 bug = 1;
1944 }
1945
1946 BUG_ON(bug);
1947 }
1948
1949 module_init(dm_bufio_init)
1950 module_exit(dm_bufio_exit)
1951
1952 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1953 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1954
1955 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1956 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1957
1958 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1959 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1960
1961 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1962 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1963
1964 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1965 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1966
1967 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1968 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1969
1970 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1971 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1972
1973 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1974 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1975
1976 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1977 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1978 MODULE_LICENSE("GPL");