]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/md/dm-bufio.c
misc: rtsx: make various functions static
[mirror_ubuntu-bionic-kernel.git] / drivers / md / dm-bufio.c
1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
3 *
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-bufio.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
21
22 #define DM_MSG_PREFIX "bufio"
23
24 /*
25 * Memory management policy:
26 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
30 * dirty buffers.
31 */
32 #define DM_BUFIO_MIN_BUFFERS 8
33
34 #define DM_BUFIO_MEMORY_PERCENT 2
35 #define DM_BUFIO_VMALLOC_PERCENT 25
36 #define DM_BUFIO_WRITEBACK_PERCENT 75
37
38 /*
39 * Check buffer ages in this interval (seconds)
40 */
41 #define DM_BUFIO_WORK_TIMER_SECS 30
42
43 /*
44 * Free buffers when they are older than this (seconds)
45 */
46 #define DM_BUFIO_DEFAULT_AGE_SECS 300
47
48 /*
49 * The nr of bytes of cached data to keep around.
50 */
51 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
52
53 /*
54 * The number of bvec entries that are embedded directly in the buffer.
55 * If the chunk size is larger, dm-io is used to do the io.
56 */
57 #define DM_BUFIO_INLINE_VECS 16
58
59 /*
60 * Don't try to use kmem_cache_alloc for blocks larger than this.
61 * For explanation, see alloc_buffer_data below.
62 */
63 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
65
66 /*
67 * Align buffer writes to this boundary.
68 * Tests show that SSDs have the highest IOPS when using 4k writes.
69 */
70 #define DM_BUFIO_WRITE_ALIGN 4096
71
72 /*
73 * dm_buffer->list_mode
74 */
75 #define LIST_CLEAN 0
76 #define LIST_DIRTY 1
77 #define LIST_SIZE 2
78
79 /*
80 * Linking of buffers:
81 * All buffers are linked to cache_hash with their hash_list field.
82 *
83 * Clean buffers that are not being written (B_WRITING not set)
84 * are linked to lru[LIST_CLEAN] with their lru_list field.
85 *
86 * Dirty and clean buffers that are being written are linked to
87 * lru[LIST_DIRTY] with their lru_list field. When the write
88 * finishes, the buffer cannot be relinked immediately (because we
89 * are in an interrupt context and relinking requires process
90 * context), so some clean-not-writing buffers can be held on
91 * dirty_lru too. They are later added to lru in the process
92 * context.
93 */
94 struct dm_bufio_client {
95 struct mutex lock;
96
97 struct list_head lru[LIST_SIZE];
98 unsigned long n_buffers[LIST_SIZE];
99
100 struct block_device *bdev;
101 unsigned block_size;
102 unsigned char sectors_per_block_bits;
103 unsigned char pages_per_block_bits;
104 unsigned char blocks_per_page_bits;
105 unsigned aux_size;
106 void (*alloc_callback)(struct dm_buffer *);
107 void (*write_callback)(struct dm_buffer *);
108
109 struct dm_io_client *dm_io;
110
111 struct list_head reserved_buffers;
112 unsigned need_reserved_buffers;
113
114 unsigned minimum_buffers;
115
116 struct rb_root buffer_tree;
117 wait_queue_head_t free_buffer_wait;
118
119 sector_t start;
120
121 int async_write_error;
122
123 struct list_head client_list;
124 struct shrinker shrinker;
125 };
126
127 /*
128 * Buffer state bits.
129 */
130 #define B_READING 0
131 #define B_WRITING 1
132 #define B_DIRTY 2
133
134 /*
135 * Describes how the block was allocated:
136 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
137 * See the comment at alloc_buffer_data.
138 */
139 enum data_mode {
140 DATA_MODE_SLAB = 0,
141 DATA_MODE_GET_FREE_PAGES = 1,
142 DATA_MODE_VMALLOC = 2,
143 DATA_MODE_LIMIT = 3
144 };
145
146 struct dm_buffer {
147 struct rb_node node;
148 struct list_head lru_list;
149 sector_t block;
150 void *data;
151 enum data_mode data_mode;
152 unsigned char list_mode; /* LIST_* */
153 unsigned hold_count;
154 blk_status_t read_error;
155 blk_status_t write_error;
156 unsigned long state;
157 unsigned long last_accessed;
158 unsigned dirty_start;
159 unsigned dirty_end;
160 unsigned write_start;
161 unsigned write_end;
162 struct dm_bufio_client *c;
163 struct list_head write_list;
164 struct bio bio;
165 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
166 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
167 #define MAX_STACK 10
168 struct stack_trace stack_trace;
169 unsigned long stack_entries[MAX_STACK];
170 #endif
171 };
172
173 /*----------------------------------------------------------------*/
174
175 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
176 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
177
178 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
179 {
180 unsigned ret = c->blocks_per_page_bits - 1;
181
182 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
183
184 return ret;
185 }
186
187 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
188 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
189
190 #define dm_bufio_in_request() (!!current->bio_list)
191
192 static void dm_bufio_lock(struct dm_bufio_client *c)
193 {
194 mutex_lock_nested(&c->lock, dm_bufio_in_request());
195 }
196
197 static int dm_bufio_trylock(struct dm_bufio_client *c)
198 {
199 return mutex_trylock(&c->lock);
200 }
201
202 static void dm_bufio_unlock(struct dm_bufio_client *c)
203 {
204 mutex_unlock(&c->lock);
205 }
206
207 /*----------------------------------------------------------------*/
208
209 /*
210 * Default cache size: available memory divided by the ratio.
211 */
212 static unsigned long dm_bufio_default_cache_size;
213
214 /*
215 * Total cache size set by the user.
216 */
217 static unsigned long dm_bufio_cache_size;
218
219 /*
220 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
221 * at any time. If it disagrees, the user has changed cache size.
222 */
223 static unsigned long dm_bufio_cache_size_latch;
224
225 static DEFINE_SPINLOCK(param_spinlock);
226
227 /*
228 * Buffers are freed after this timeout
229 */
230 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
231 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
232
233 static unsigned long dm_bufio_peak_allocated;
234 static unsigned long dm_bufio_allocated_kmem_cache;
235 static unsigned long dm_bufio_allocated_get_free_pages;
236 static unsigned long dm_bufio_allocated_vmalloc;
237 static unsigned long dm_bufio_current_allocated;
238
239 /*----------------------------------------------------------------*/
240
241 /*
242 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
243 */
244 static unsigned long dm_bufio_cache_size_per_client;
245
246 /*
247 * The current number of clients.
248 */
249 static int dm_bufio_client_count;
250
251 /*
252 * The list of all clients.
253 */
254 static LIST_HEAD(dm_bufio_all_clients);
255
256 /*
257 * This mutex protects dm_bufio_cache_size_latch,
258 * dm_bufio_cache_size_per_client and dm_bufio_client_count
259 */
260 static DEFINE_MUTEX(dm_bufio_clients_lock);
261
262 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
263 static void buffer_record_stack(struct dm_buffer *b)
264 {
265 b->stack_trace.nr_entries = 0;
266 b->stack_trace.max_entries = MAX_STACK;
267 b->stack_trace.entries = b->stack_entries;
268 b->stack_trace.skip = 2;
269 save_stack_trace(&b->stack_trace);
270 }
271 #endif
272
273 /*----------------------------------------------------------------
274 * A red/black tree acts as an index for all the buffers.
275 *--------------------------------------------------------------*/
276 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
277 {
278 struct rb_node *n = c->buffer_tree.rb_node;
279 struct dm_buffer *b;
280
281 while (n) {
282 b = container_of(n, struct dm_buffer, node);
283
284 if (b->block == block)
285 return b;
286
287 n = (b->block < block) ? n->rb_left : n->rb_right;
288 }
289
290 return NULL;
291 }
292
293 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
294 {
295 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
296 struct dm_buffer *found;
297
298 while (*new) {
299 found = container_of(*new, struct dm_buffer, node);
300
301 if (found->block == b->block) {
302 BUG_ON(found != b);
303 return;
304 }
305
306 parent = *new;
307 new = (found->block < b->block) ?
308 &((*new)->rb_left) : &((*new)->rb_right);
309 }
310
311 rb_link_node(&b->node, parent, new);
312 rb_insert_color(&b->node, &c->buffer_tree);
313 }
314
315 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
316 {
317 rb_erase(&b->node, &c->buffer_tree);
318 }
319
320 /*----------------------------------------------------------------*/
321
322 static void adjust_total_allocated(enum data_mode data_mode, long diff)
323 {
324 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
325 &dm_bufio_allocated_kmem_cache,
326 &dm_bufio_allocated_get_free_pages,
327 &dm_bufio_allocated_vmalloc,
328 };
329
330 spin_lock(&param_spinlock);
331
332 *class_ptr[data_mode] += diff;
333
334 dm_bufio_current_allocated += diff;
335
336 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
337 dm_bufio_peak_allocated = dm_bufio_current_allocated;
338
339 spin_unlock(&param_spinlock);
340 }
341
342 /*
343 * Change the number of clients and recalculate per-client limit.
344 */
345 static void __cache_size_refresh(void)
346 {
347 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
348 BUG_ON(dm_bufio_client_count < 0);
349
350 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
351
352 /*
353 * Use default if set to 0 and report the actual cache size used.
354 */
355 if (!dm_bufio_cache_size_latch) {
356 (void)cmpxchg(&dm_bufio_cache_size, 0,
357 dm_bufio_default_cache_size);
358 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
359 }
360
361 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
362 (dm_bufio_client_count ? : 1);
363 }
364
365 /*
366 * Allocating buffer data.
367 *
368 * Small buffers are allocated with kmem_cache, to use space optimally.
369 *
370 * For large buffers, we choose between get_free_pages and vmalloc.
371 * Each has advantages and disadvantages.
372 *
373 * __get_free_pages can randomly fail if the memory is fragmented.
374 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
375 * as low as 128M) so using it for caching is not appropriate.
376 *
377 * If the allocation may fail we use __get_free_pages. Memory fragmentation
378 * won't have a fatal effect here, but it just causes flushes of some other
379 * buffers and more I/O will be performed. Don't use __get_free_pages if it
380 * always fails (i.e. order >= MAX_ORDER).
381 *
382 * If the allocation shouldn't fail we use __vmalloc. This is only for the
383 * initial reserve allocation, so there's no risk of wasting all vmalloc
384 * space.
385 */
386 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
387 enum data_mode *data_mode)
388 {
389 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
390 *data_mode = DATA_MODE_SLAB;
391 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
392 }
393
394 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
395 gfp_mask & __GFP_NORETRY) {
396 *data_mode = DATA_MODE_GET_FREE_PAGES;
397 return (void *)__get_free_pages(gfp_mask,
398 c->pages_per_block_bits);
399 }
400
401 *data_mode = DATA_MODE_VMALLOC;
402
403 /*
404 * __vmalloc allocates the data pages and auxiliary structures with
405 * gfp_flags that were specified, but pagetables are always allocated
406 * with GFP_KERNEL, no matter what was specified as gfp_mask.
407 *
408 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
409 * all allocations done by this process (including pagetables) are done
410 * as if GFP_NOIO was specified.
411 */
412 if (gfp_mask & __GFP_NORETRY) {
413 unsigned noio_flag = memalloc_noio_save();
414 void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
415
416 memalloc_noio_restore(noio_flag);
417 return ptr;
418 }
419
420 return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
421 }
422
423 /*
424 * Free buffer's data.
425 */
426 static void free_buffer_data(struct dm_bufio_client *c,
427 void *data, enum data_mode data_mode)
428 {
429 switch (data_mode) {
430 case DATA_MODE_SLAB:
431 kmem_cache_free(DM_BUFIO_CACHE(c), data);
432 break;
433
434 case DATA_MODE_GET_FREE_PAGES:
435 free_pages((unsigned long)data, c->pages_per_block_bits);
436 break;
437
438 case DATA_MODE_VMALLOC:
439 vfree(data);
440 break;
441
442 default:
443 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
444 data_mode);
445 BUG();
446 }
447 }
448
449 /*
450 * Allocate buffer and its data.
451 */
452 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
453 {
454 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
455 gfp_mask);
456
457 if (!b)
458 return NULL;
459
460 b->c = c;
461
462 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
463 if (!b->data) {
464 kfree(b);
465 return NULL;
466 }
467
468 adjust_total_allocated(b->data_mode, (long)c->block_size);
469
470 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
471 memset(&b->stack_trace, 0, sizeof(b->stack_trace));
472 #endif
473 return b;
474 }
475
476 /*
477 * Free buffer and its data.
478 */
479 static void free_buffer(struct dm_buffer *b)
480 {
481 struct dm_bufio_client *c = b->c;
482
483 adjust_total_allocated(b->data_mode, -(long)c->block_size);
484
485 free_buffer_data(c, b->data, b->data_mode);
486 kfree(b);
487 }
488
489 /*
490 * Link buffer to the hash list and clean or dirty queue.
491 */
492 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
493 {
494 struct dm_bufio_client *c = b->c;
495
496 c->n_buffers[dirty]++;
497 b->block = block;
498 b->list_mode = dirty;
499 list_add(&b->lru_list, &c->lru[dirty]);
500 __insert(b->c, b);
501 b->last_accessed = jiffies;
502 }
503
504 /*
505 * Unlink buffer from the hash list and dirty or clean queue.
506 */
507 static void __unlink_buffer(struct dm_buffer *b)
508 {
509 struct dm_bufio_client *c = b->c;
510
511 BUG_ON(!c->n_buffers[b->list_mode]);
512
513 c->n_buffers[b->list_mode]--;
514 __remove(b->c, b);
515 list_del(&b->lru_list);
516 }
517
518 /*
519 * Place the buffer to the head of dirty or clean LRU queue.
520 */
521 static void __relink_lru(struct dm_buffer *b, int dirty)
522 {
523 struct dm_bufio_client *c = b->c;
524
525 BUG_ON(!c->n_buffers[b->list_mode]);
526
527 c->n_buffers[b->list_mode]--;
528 c->n_buffers[dirty]++;
529 b->list_mode = dirty;
530 list_move(&b->lru_list, &c->lru[dirty]);
531 b->last_accessed = jiffies;
532 }
533
534 /*----------------------------------------------------------------
535 * Submit I/O on the buffer.
536 *
537 * Bio interface is faster but it has some problems:
538 * the vector list is limited (increasing this limit increases
539 * memory-consumption per buffer, so it is not viable);
540 *
541 * the memory must be direct-mapped, not vmalloced;
542 *
543 * the I/O driver can reject requests spuriously if it thinks that
544 * the requests are too big for the device or if they cross a
545 * controller-defined memory boundary.
546 *
547 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
548 * it is not vmalloced, try using the bio interface.
549 *
550 * If the buffer is big, if it is vmalloced or if the underlying device
551 * rejects the bio because it is too large, use dm-io layer to do the I/O.
552 * The dm-io layer splits the I/O into multiple requests, avoiding the above
553 * shortcomings.
554 *--------------------------------------------------------------*/
555
556 /*
557 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
558 * that the request was handled directly with bio interface.
559 */
560 static void dmio_complete(unsigned long error, void *context)
561 {
562 struct dm_buffer *b = context;
563
564 b->bio.bi_status = error ? BLK_STS_IOERR : 0;
565 b->bio.bi_end_io(&b->bio);
566 }
567
568 static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
569 unsigned n_sectors, unsigned offset, bio_end_io_t *end_io)
570 {
571 int r;
572 struct dm_io_request io_req = {
573 .bi_op = rw,
574 .bi_op_flags = 0,
575 .notify.fn = dmio_complete,
576 .notify.context = b,
577 .client = b->c->dm_io,
578 };
579 struct dm_io_region region = {
580 .bdev = b->c->bdev,
581 .sector = sector,
582 .count = n_sectors,
583 };
584
585 if (b->data_mode != DATA_MODE_VMALLOC) {
586 io_req.mem.type = DM_IO_KMEM;
587 io_req.mem.ptr.addr = (char *)b->data + offset;
588 } else {
589 io_req.mem.type = DM_IO_VMA;
590 io_req.mem.ptr.vma = (char *)b->data + offset;
591 }
592
593 b->bio.bi_end_io = end_io;
594
595 r = dm_io(&io_req, 1, &region, NULL);
596 if (r) {
597 b->bio.bi_status = errno_to_blk_status(r);
598 end_io(&b->bio);
599 }
600 }
601
602 static void inline_endio(struct bio *bio)
603 {
604 bio_end_io_t *end_fn = bio->bi_private;
605 blk_status_t status = bio->bi_status;
606
607 /*
608 * Reset the bio to free any attached resources
609 * (e.g. bio integrity profiles).
610 */
611 bio_reset(bio);
612
613 bio->bi_status = status;
614 end_fn(bio);
615 }
616
617 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
618 unsigned n_sectors, unsigned offset, bio_end_io_t *end_io)
619 {
620 char *ptr;
621 unsigned len;
622
623 bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
624 b->bio.bi_iter.bi_sector = sector;
625 bio_set_dev(&b->bio, b->c->bdev);
626 b->bio.bi_end_io = inline_endio;
627 /*
628 * Use of .bi_private isn't a problem here because
629 * the dm_buffer's inline bio is local to bufio.
630 */
631 b->bio.bi_private = end_io;
632 bio_set_op_attrs(&b->bio, rw, 0);
633
634 ptr = (char *)b->data + offset;
635 len = n_sectors << SECTOR_SHIFT;
636
637 do {
638 unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
639 if (!bio_add_page(&b->bio, virt_to_page(ptr), this_step,
640 offset_in_page(ptr))) {
641 BUG_ON(b->c->block_size <= PAGE_SIZE);
642 use_dmio(b, rw, sector, n_sectors, offset, end_io);
643 return;
644 }
645
646 len -= this_step;
647 ptr += this_step;
648 } while (len > 0);
649
650 submit_bio(&b->bio);
651 }
652
653 static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
654 {
655 unsigned n_sectors;
656 sector_t sector;
657 unsigned offset, end;
658
659 sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
660
661 if (rw != WRITE) {
662 n_sectors = 1 << b->c->sectors_per_block_bits;
663 offset = 0;
664 } else {
665 if (b->c->write_callback)
666 b->c->write_callback(b);
667 offset = b->write_start;
668 end = b->write_end;
669 offset &= -DM_BUFIO_WRITE_ALIGN;
670 end += DM_BUFIO_WRITE_ALIGN - 1;
671 end &= -DM_BUFIO_WRITE_ALIGN;
672 if (unlikely(end > b->c->block_size))
673 end = b->c->block_size;
674
675 sector += offset >> SECTOR_SHIFT;
676 n_sectors = (end - offset) >> SECTOR_SHIFT;
677 }
678
679 if (n_sectors <= ((DM_BUFIO_INLINE_VECS * PAGE_SIZE) >> SECTOR_SHIFT) &&
680 b->data_mode != DATA_MODE_VMALLOC)
681 use_inline_bio(b, rw, sector, n_sectors, offset, end_io);
682 else
683 use_dmio(b, rw, sector, n_sectors, offset, end_io);
684 }
685
686 /*----------------------------------------------------------------
687 * Writing dirty buffers
688 *--------------------------------------------------------------*/
689
690 /*
691 * The endio routine for write.
692 *
693 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
694 * it.
695 */
696 static void write_endio(struct bio *bio)
697 {
698 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
699
700 b->write_error = bio->bi_status;
701 if (unlikely(bio->bi_status)) {
702 struct dm_bufio_client *c = b->c;
703
704 (void)cmpxchg(&c->async_write_error, 0,
705 blk_status_to_errno(bio->bi_status));
706 }
707
708 BUG_ON(!test_bit(B_WRITING, &b->state));
709
710 smp_mb__before_atomic();
711 clear_bit(B_WRITING, &b->state);
712 smp_mb__after_atomic();
713
714 wake_up_bit(&b->state, B_WRITING);
715 }
716
717 /*
718 * Initiate a write on a dirty buffer, but don't wait for it.
719 *
720 * - If the buffer is not dirty, exit.
721 * - If there some previous write going on, wait for it to finish (we can't
722 * have two writes on the same buffer simultaneously).
723 * - Submit our write and don't wait on it. We set B_WRITING indicating
724 * that there is a write in progress.
725 */
726 static void __write_dirty_buffer(struct dm_buffer *b,
727 struct list_head *write_list)
728 {
729 if (!test_bit(B_DIRTY, &b->state))
730 return;
731
732 clear_bit(B_DIRTY, &b->state);
733 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
734
735 b->write_start = b->dirty_start;
736 b->write_end = b->dirty_end;
737
738 if (!write_list)
739 submit_io(b, WRITE, write_endio);
740 else
741 list_add_tail(&b->write_list, write_list);
742 }
743
744 static void __flush_write_list(struct list_head *write_list)
745 {
746 struct blk_plug plug;
747 blk_start_plug(&plug);
748 while (!list_empty(write_list)) {
749 struct dm_buffer *b =
750 list_entry(write_list->next, struct dm_buffer, write_list);
751 list_del(&b->write_list);
752 submit_io(b, WRITE, write_endio);
753 cond_resched();
754 }
755 blk_finish_plug(&plug);
756 }
757
758 /*
759 * Wait until any activity on the buffer finishes. Possibly write the
760 * buffer if it is dirty. When this function finishes, there is no I/O
761 * running on the buffer and the buffer is not dirty.
762 */
763 static void __make_buffer_clean(struct dm_buffer *b)
764 {
765 BUG_ON(b->hold_count);
766
767 if (!b->state) /* fast case */
768 return;
769
770 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
771 __write_dirty_buffer(b, NULL);
772 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
773 }
774
775 /*
776 * Find some buffer that is not held by anybody, clean it, unlink it and
777 * return it.
778 */
779 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
780 {
781 struct dm_buffer *b;
782
783 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
784 BUG_ON(test_bit(B_WRITING, &b->state));
785 BUG_ON(test_bit(B_DIRTY, &b->state));
786
787 if (!b->hold_count) {
788 __make_buffer_clean(b);
789 __unlink_buffer(b);
790 return b;
791 }
792 cond_resched();
793 }
794
795 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
796 BUG_ON(test_bit(B_READING, &b->state));
797
798 if (!b->hold_count) {
799 __make_buffer_clean(b);
800 __unlink_buffer(b);
801 return b;
802 }
803 cond_resched();
804 }
805
806 return NULL;
807 }
808
809 /*
810 * Wait until some other threads free some buffer or release hold count on
811 * some buffer.
812 *
813 * This function is entered with c->lock held, drops it and regains it
814 * before exiting.
815 */
816 static void __wait_for_free_buffer(struct dm_bufio_client *c)
817 {
818 DECLARE_WAITQUEUE(wait, current);
819
820 add_wait_queue(&c->free_buffer_wait, &wait);
821 set_current_state(TASK_UNINTERRUPTIBLE);
822 dm_bufio_unlock(c);
823
824 io_schedule();
825
826 remove_wait_queue(&c->free_buffer_wait, &wait);
827
828 dm_bufio_lock(c);
829 }
830
831 enum new_flag {
832 NF_FRESH = 0,
833 NF_READ = 1,
834 NF_GET = 2,
835 NF_PREFETCH = 3
836 };
837
838 /*
839 * Allocate a new buffer. If the allocation is not possible, wait until
840 * some other thread frees a buffer.
841 *
842 * May drop the lock and regain it.
843 */
844 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
845 {
846 struct dm_buffer *b;
847 bool tried_noio_alloc = false;
848
849 /*
850 * dm-bufio is resistant to allocation failures (it just keeps
851 * one buffer reserved in cases all the allocations fail).
852 * So set flags to not try too hard:
853 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
854 * mutex and wait ourselves.
855 * __GFP_NORETRY: don't retry and rather return failure
856 * __GFP_NOMEMALLOC: don't use emergency reserves
857 * __GFP_NOWARN: don't print a warning in case of failure
858 *
859 * For debugging, if we set the cache size to 1, no new buffers will
860 * be allocated.
861 */
862 while (1) {
863 if (dm_bufio_cache_size_latch != 1) {
864 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
865 if (b)
866 return b;
867 }
868
869 if (nf == NF_PREFETCH)
870 return NULL;
871
872 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
873 dm_bufio_unlock(c);
874 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
875 dm_bufio_lock(c);
876 if (b)
877 return b;
878 tried_noio_alloc = true;
879 }
880
881 if (!list_empty(&c->reserved_buffers)) {
882 b = list_entry(c->reserved_buffers.next,
883 struct dm_buffer, lru_list);
884 list_del(&b->lru_list);
885 c->need_reserved_buffers++;
886
887 return b;
888 }
889
890 b = __get_unclaimed_buffer(c);
891 if (b)
892 return b;
893
894 __wait_for_free_buffer(c);
895 }
896 }
897
898 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
899 {
900 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
901
902 if (!b)
903 return NULL;
904
905 if (c->alloc_callback)
906 c->alloc_callback(b);
907
908 return b;
909 }
910
911 /*
912 * Free a buffer and wake other threads waiting for free buffers.
913 */
914 static void __free_buffer_wake(struct dm_buffer *b)
915 {
916 struct dm_bufio_client *c = b->c;
917
918 if (!c->need_reserved_buffers)
919 free_buffer(b);
920 else {
921 list_add(&b->lru_list, &c->reserved_buffers);
922 c->need_reserved_buffers--;
923 }
924
925 wake_up(&c->free_buffer_wait);
926 }
927
928 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
929 struct list_head *write_list)
930 {
931 struct dm_buffer *b, *tmp;
932
933 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
934 BUG_ON(test_bit(B_READING, &b->state));
935
936 if (!test_bit(B_DIRTY, &b->state) &&
937 !test_bit(B_WRITING, &b->state)) {
938 __relink_lru(b, LIST_CLEAN);
939 continue;
940 }
941
942 if (no_wait && test_bit(B_WRITING, &b->state))
943 return;
944
945 __write_dirty_buffer(b, write_list);
946 cond_resched();
947 }
948 }
949
950 /*
951 * Get writeback threshold and buffer limit for a given client.
952 */
953 static void __get_memory_limit(struct dm_bufio_client *c,
954 unsigned long *threshold_buffers,
955 unsigned long *limit_buffers)
956 {
957 unsigned long buffers;
958
959 if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
960 if (mutex_trylock(&dm_bufio_clients_lock)) {
961 __cache_size_refresh();
962 mutex_unlock(&dm_bufio_clients_lock);
963 }
964 }
965
966 buffers = dm_bufio_cache_size_per_client >>
967 (c->sectors_per_block_bits + SECTOR_SHIFT);
968
969 if (buffers < c->minimum_buffers)
970 buffers = c->minimum_buffers;
971
972 *limit_buffers = buffers;
973 *threshold_buffers = mult_frac(buffers,
974 DM_BUFIO_WRITEBACK_PERCENT, 100);
975 }
976
977 /*
978 * Check if we're over watermark.
979 * If we are over threshold_buffers, start freeing buffers.
980 * If we're over "limit_buffers", block until we get under the limit.
981 */
982 static void __check_watermark(struct dm_bufio_client *c,
983 struct list_head *write_list)
984 {
985 unsigned long threshold_buffers, limit_buffers;
986
987 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
988
989 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
990 limit_buffers) {
991
992 struct dm_buffer *b = __get_unclaimed_buffer(c);
993
994 if (!b)
995 return;
996
997 __free_buffer_wake(b);
998 cond_resched();
999 }
1000
1001 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
1002 __write_dirty_buffers_async(c, 1, write_list);
1003 }
1004
1005 /*----------------------------------------------------------------
1006 * Getting a buffer
1007 *--------------------------------------------------------------*/
1008
1009 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1010 enum new_flag nf, int *need_submit,
1011 struct list_head *write_list)
1012 {
1013 struct dm_buffer *b, *new_b = NULL;
1014
1015 *need_submit = 0;
1016
1017 b = __find(c, block);
1018 if (b)
1019 goto found_buffer;
1020
1021 if (nf == NF_GET)
1022 return NULL;
1023
1024 new_b = __alloc_buffer_wait(c, nf);
1025 if (!new_b)
1026 return NULL;
1027
1028 /*
1029 * We've had a period where the mutex was unlocked, so need to
1030 * recheck the hash table.
1031 */
1032 b = __find(c, block);
1033 if (b) {
1034 __free_buffer_wake(new_b);
1035 goto found_buffer;
1036 }
1037
1038 __check_watermark(c, write_list);
1039
1040 b = new_b;
1041 b->hold_count = 1;
1042 b->read_error = 0;
1043 b->write_error = 0;
1044 __link_buffer(b, block, LIST_CLEAN);
1045
1046 if (nf == NF_FRESH) {
1047 b->state = 0;
1048 return b;
1049 }
1050
1051 b->state = 1 << B_READING;
1052 *need_submit = 1;
1053
1054 return b;
1055
1056 found_buffer:
1057 if (nf == NF_PREFETCH)
1058 return NULL;
1059 /*
1060 * Note: it is essential that we don't wait for the buffer to be
1061 * read if dm_bufio_get function is used. Both dm_bufio_get and
1062 * dm_bufio_prefetch can be used in the driver request routine.
1063 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1064 * the same buffer, it would deadlock if we waited.
1065 */
1066 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1067 return NULL;
1068
1069 b->hold_count++;
1070 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1071 test_bit(B_WRITING, &b->state));
1072 return b;
1073 }
1074
1075 /*
1076 * The endio routine for reading: set the error, clear the bit and wake up
1077 * anyone waiting on the buffer.
1078 */
1079 static void read_endio(struct bio *bio)
1080 {
1081 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1082
1083 b->read_error = bio->bi_status;
1084
1085 BUG_ON(!test_bit(B_READING, &b->state));
1086
1087 smp_mb__before_atomic();
1088 clear_bit(B_READING, &b->state);
1089 smp_mb__after_atomic();
1090
1091 wake_up_bit(&b->state, B_READING);
1092 }
1093
1094 /*
1095 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1096 * functions is similar except that dm_bufio_new doesn't read the
1097 * buffer from the disk (assuming that the caller overwrites all the data
1098 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1099 */
1100 static void *new_read(struct dm_bufio_client *c, sector_t block,
1101 enum new_flag nf, struct dm_buffer **bp)
1102 {
1103 int need_submit;
1104 struct dm_buffer *b;
1105
1106 LIST_HEAD(write_list);
1107
1108 dm_bufio_lock(c);
1109 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1110 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1111 if (b && b->hold_count == 1)
1112 buffer_record_stack(b);
1113 #endif
1114 dm_bufio_unlock(c);
1115
1116 __flush_write_list(&write_list);
1117
1118 if (!b)
1119 return NULL;
1120
1121 if (need_submit)
1122 submit_io(b, READ, read_endio);
1123
1124 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1125
1126 if (b->read_error) {
1127 int error = blk_status_to_errno(b->read_error);
1128
1129 dm_bufio_release(b);
1130
1131 return ERR_PTR(error);
1132 }
1133
1134 *bp = b;
1135
1136 return b->data;
1137 }
1138
1139 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1140 struct dm_buffer **bp)
1141 {
1142 return new_read(c, block, NF_GET, bp);
1143 }
1144 EXPORT_SYMBOL_GPL(dm_bufio_get);
1145
1146 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1147 struct dm_buffer **bp)
1148 {
1149 BUG_ON(dm_bufio_in_request());
1150
1151 return new_read(c, block, NF_READ, bp);
1152 }
1153 EXPORT_SYMBOL_GPL(dm_bufio_read);
1154
1155 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1156 struct dm_buffer **bp)
1157 {
1158 BUG_ON(dm_bufio_in_request());
1159
1160 return new_read(c, block, NF_FRESH, bp);
1161 }
1162 EXPORT_SYMBOL_GPL(dm_bufio_new);
1163
1164 void dm_bufio_prefetch(struct dm_bufio_client *c,
1165 sector_t block, unsigned n_blocks)
1166 {
1167 struct blk_plug plug;
1168
1169 LIST_HEAD(write_list);
1170
1171 BUG_ON(dm_bufio_in_request());
1172
1173 blk_start_plug(&plug);
1174 dm_bufio_lock(c);
1175
1176 for (; n_blocks--; block++) {
1177 int need_submit;
1178 struct dm_buffer *b;
1179 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1180 &write_list);
1181 if (unlikely(!list_empty(&write_list))) {
1182 dm_bufio_unlock(c);
1183 blk_finish_plug(&plug);
1184 __flush_write_list(&write_list);
1185 blk_start_plug(&plug);
1186 dm_bufio_lock(c);
1187 }
1188 if (unlikely(b != NULL)) {
1189 dm_bufio_unlock(c);
1190
1191 if (need_submit)
1192 submit_io(b, READ, read_endio);
1193 dm_bufio_release(b);
1194
1195 cond_resched();
1196
1197 if (!n_blocks)
1198 goto flush_plug;
1199 dm_bufio_lock(c);
1200 }
1201 }
1202
1203 dm_bufio_unlock(c);
1204
1205 flush_plug:
1206 blk_finish_plug(&plug);
1207 }
1208 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1209
1210 void dm_bufio_release(struct dm_buffer *b)
1211 {
1212 struct dm_bufio_client *c = b->c;
1213
1214 dm_bufio_lock(c);
1215
1216 BUG_ON(!b->hold_count);
1217
1218 b->hold_count--;
1219 if (!b->hold_count) {
1220 wake_up(&c->free_buffer_wait);
1221
1222 /*
1223 * If there were errors on the buffer, and the buffer is not
1224 * to be written, free the buffer. There is no point in caching
1225 * invalid buffer.
1226 */
1227 if ((b->read_error || b->write_error) &&
1228 !test_bit(B_READING, &b->state) &&
1229 !test_bit(B_WRITING, &b->state) &&
1230 !test_bit(B_DIRTY, &b->state)) {
1231 __unlink_buffer(b);
1232 __free_buffer_wake(b);
1233 }
1234 }
1235
1236 dm_bufio_unlock(c);
1237 }
1238 EXPORT_SYMBOL_GPL(dm_bufio_release);
1239
1240 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
1241 unsigned start, unsigned end)
1242 {
1243 struct dm_bufio_client *c = b->c;
1244
1245 BUG_ON(start >= end);
1246 BUG_ON(end > b->c->block_size);
1247
1248 dm_bufio_lock(c);
1249
1250 BUG_ON(test_bit(B_READING, &b->state));
1251
1252 if (!test_and_set_bit(B_DIRTY, &b->state)) {
1253 b->dirty_start = start;
1254 b->dirty_end = end;
1255 __relink_lru(b, LIST_DIRTY);
1256 } else {
1257 if (start < b->dirty_start)
1258 b->dirty_start = start;
1259 if (end > b->dirty_end)
1260 b->dirty_end = end;
1261 }
1262
1263 dm_bufio_unlock(c);
1264 }
1265 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
1266
1267 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1268 {
1269 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
1270 }
1271 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1272
1273 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1274 {
1275 LIST_HEAD(write_list);
1276
1277 BUG_ON(dm_bufio_in_request());
1278
1279 dm_bufio_lock(c);
1280 __write_dirty_buffers_async(c, 0, &write_list);
1281 dm_bufio_unlock(c);
1282 __flush_write_list(&write_list);
1283 }
1284 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1285
1286 /*
1287 * For performance, it is essential that the buffers are written asynchronously
1288 * and simultaneously (so that the block layer can merge the writes) and then
1289 * waited upon.
1290 *
1291 * Finally, we flush hardware disk cache.
1292 */
1293 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1294 {
1295 int a, f;
1296 unsigned long buffers_processed = 0;
1297 struct dm_buffer *b, *tmp;
1298
1299 LIST_HEAD(write_list);
1300
1301 dm_bufio_lock(c);
1302 __write_dirty_buffers_async(c, 0, &write_list);
1303 dm_bufio_unlock(c);
1304 __flush_write_list(&write_list);
1305 dm_bufio_lock(c);
1306
1307 again:
1308 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1309 int dropped_lock = 0;
1310
1311 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1312 buffers_processed++;
1313
1314 BUG_ON(test_bit(B_READING, &b->state));
1315
1316 if (test_bit(B_WRITING, &b->state)) {
1317 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1318 dropped_lock = 1;
1319 b->hold_count++;
1320 dm_bufio_unlock(c);
1321 wait_on_bit_io(&b->state, B_WRITING,
1322 TASK_UNINTERRUPTIBLE);
1323 dm_bufio_lock(c);
1324 b->hold_count--;
1325 } else
1326 wait_on_bit_io(&b->state, B_WRITING,
1327 TASK_UNINTERRUPTIBLE);
1328 }
1329
1330 if (!test_bit(B_DIRTY, &b->state) &&
1331 !test_bit(B_WRITING, &b->state))
1332 __relink_lru(b, LIST_CLEAN);
1333
1334 cond_resched();
1335
1336 /*
1337 * If we dropped the lock, the list is no longer consistent,
1338 * so we must restart the search.
1339 *
1340 * In the most common case, the buffer just processed is
1341 * relinked to the clean list, so we won't loop scanning the
1342 * same buffer again and again.
1343 *
1344 * This may livelock if there is another thread simultaneously
1345 * dirtying buffers, so we count the number of buffers walked
1346 * and if it exceeds the total number of buffers, it means that
1347 * someone is doing some writes simultaneously with us. In
1348 * this case, stop, dropping the lock.
1349 */
1350 if (dropped_lock)
1351 goto again;
1352 }
1353 wake_up(&c->free_buffer_wait);
1354 dm_bufio_unlock(c);
1355
1356 a = xchg(&c->async_write_error, 0);
1357 f = dm_bufio_issue_flush(c);
1358 if (a)
1359 return a;
1360
1361 return f;
1362 }
1363 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1364
1365 /*
1366 * Use dm-io to send and empty barrier flush the device.
1367 */
1368 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1369 {
1370 struct dm_io_request io_req = {
1371 .bi_op = REQ_OP_WRITE,
1372 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1373 .mem.type = DM_IO_KMEM,
1374 .mem.ptr.addr = NULL,
1375 .client = c->dm_io,
1376 };
1377 struct dm_io_region io_reg = {
1378 .bdev = c->bdev,
1379 .sector = 0,
1380 .count = 0,
1381 };
1382
1383 BUG_ON(dm_bufio_in_request());
1384
1385 return dm_io(&io_req, 1, &io_reg, NULL);
1386 }
1387 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1388
1389 /*
1390 * We first delete any other buffer that may be at that new location.
1391 *
1392 * Then, we write the buffer to the original location if it was dirty.
1393 *
1394 * Then, if we are the only one who is holding the buffer, relink the buffer
1395 * in the hash queue for the new location.
1396 *
1397 * If there was someone else holding the buffer, we write it to the new
1398 * location but not relink it, because that other user needs to have the buffer
1399 * at the same place.
1400 */
1401 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1402 {
1403 struct dm_bufio_client *c = b->c;
1404 struct dm_buffer *new;
1405
1406 BUG_ON(dm_bufio_in_request());
1407
1408 dm_bufio_lock(c);
1409
1410 retry:
1411 new = __find(c, new_block);
1412 if (new) {
1413 if (new->hold_count) {
1414 __wait_for_free_buffer(c);
1415 goto retry;
1416 }
1417
1418 /*
1419 * FIXME: Is there any point waiting for a write that's going
1420 * to be overwritten in a bit?
1421 */
1422 __make_buffer_clean(new);
1423 __unlink_buffer(new);
1424 __free_buffer_wake(new);
1425 }
1426
1427 BUG_ON(!b->hold_count);
1428 BUG_ON(test_bit(B_READING, &b->state));
1429
1430 __write_dirty_buffer(b, NULL);
1431 if (b->hold_count == 1) {
1432 wait_on_bit_io(&b->state, B_WRITING,
1433 TASK_UNINTERRUPTIBLE);
1434 set_bit(B_DIRTY, &b->state);
1435 b->dirty_start = 0;
1436 b->dirty_end = c->block_size;
1437 __unlink_buffer(b);
1438 __link_buffer(b, new_block, LIST_DIRTY);
1439 } else {
1440 sector_t old_block;
1441 wait_on_bit_lock_io(&b->state, B_WRITING,
1442 TASK_UNINTERRUPTIBLE);
1443 /*
1444 * Relink buffer to "new_block" so that write_callback
1445 * sees "new_block" as a block number.
1446 * After the write, link the buffer back to old_block.
1447 * All this must be done in bufio lock, so that block number
1448 * change isn't visible to other threads.
1449 */
1450 old_block = b->block;
1451 __unlink_buffer(b);
1452 __link_buffer(b, new_block, b->list_mode);
1453 submit_io(b, WRITE, write_endio);
1454 wait_on_bit_io(&b->state, B_WRITING,
1455 TASK_UNINTERRUPTIBLE);
1456 __unlink_buffer(b);
1457 __link_buffer(b, old_block, b->list_mode);
1458 }
1459
1460 dm_bufio_unlock(c);
1461 dm_bufio_release(b);
1462 }
1463 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1464
1465 /*
1466 * Free the given buffer.
1467 *
1468 * This is just a hint, if the buffer is in use or dirty, this function
1469 * does nothing.
1470 */
1471 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1472 {
1473 struct dm_buffer *b;
1474
1475 dm_bufio_lock(c);
1476
1477 b = __find(c, block);
1478 if (b && likely(!b->hold_count) && likely(!b->state)) {
1479 __unlink_buffer(b);
1480 __free_buffer_wake(b);
1481 }
1482
1483 dm_bufio_unlock(c);
1484 }
1485 EXPORT_SYMBOL(dm_bufio_forget);
1486
1487 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1488 {
1489 c->minimum_buffers = n;
1490 }
1491 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1492
1493 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1494 {
1495 return c->block_size;
1496 }
1497 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1498
1499 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1500 {
1501 return i_size_read(c->bdev->bd_inode) >>
1502 (SECTOR_SHIFT + c->sectors_per_block_bits);
1503 }
1504 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1505
1506 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1507 {
1508 return b->block;
1509 }
1510 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1511
1512 void *dm_bufio_get_block_data(struct dm_buffer *b)
1513 {
1514 return b->data;
1515 }
1516 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1517
1518 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1519 {
1520 return b + 1;
1521 }
1522 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1523
1524 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1525 {
1526 return b->c;
1527 }
1528 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1529
1530 static void drop_buffers(struct dm_bufio_client *c)
1531 {
1532 struct dm_buffer *b;
1533 int i;
1534 bool warned = false;
1535
1536 BUG_ON(dm_bufio_in_request());
1537
1538 /*
1539 * An optimization so that the buffers are not written one-by-one.
1540 */
1541 dm_bufio_write_dirty_buffers_async(c);
1542
1543 dm_bufio_lock(c);
1544
1545 while ((b = __get_unclaimed_buffer(c)))
1546 __free_buffer_wake(b);
1547
1548 for (i = 0; i < LIST_SIZE; i++)
1549 list_for_each_entry(b, &c->lru[i], lru_list) {
1550 WARN_ON(!warned);
1551 warned = true;
1552 DMERR("leaked buffer %llx, hold count %u, list %d",
1553 (unsigned long long)b->block, b->hold_count, i);
1554 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1555 print_stack_trace(&b->stack_trace, 1);
1556 b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
1557 #endif
1558 }
1559
1560 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1561 while ((b = __get_unclaimed_buffer(c)))
1562 __free_buffer_wake(b);
1563 #endif
1564
1565 for (i = 0; i < LIST_SIZE; i++)
1566 BUG_ON(!list_empty(&c->lru[i]));
1567
1568 dm_bufio_unlock(c);
1569 }
1570
1571 /*
1572 * We may not be able to evict this buffer if IO pending or the client
1573 * is still using it. Caller is expected to know buffer is too old.
1574 *
1575 * And if GFP_NOFS is used, we must not do any I/O because we hold
1576 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1577 * rerouted to different bufio client.
1578 */
1579 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1580 {
1581 if (!(gfp & __GFP_FS)) {
1582 if (test_bit(B_READING, &b->state) ||
1583 test_bit(B_WRITING, &b->state) ||
1584 test_bit(B_DIRTY, &b->state))
1585 return false;
1586 }
1587
1588 if (b->hold_count)
1589 return false;
1590
1591 __make_buffer_clean(b);
1592 __unlink_buffer(b);
1593 __free_buffer_wake(b);
1594
1595 return true;
1596 }
1597
1598 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
1599 {
1600 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
1601 return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
1602 }
1603
1604 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1605 gfp_t gfp_mask)
1606 {
1607 int l;
1608 struct dm_buffer *b, *tmp;
1609 unsigned long freed = 0;
1610 unsigned long count = c->n_buffers[LIST_CLEAN] +
1611 c->n_buffers[LIST_DIRTY];
1612 unsigned long retain_target = get_retain_buffers(c);
1613
1614 for (l = 0; l < LIST_SIZE; l++) {
1615 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1616 if (__try_evict_buffer(b, gfp_mask))
1617 freed++;
1618 if (!--nr_to_scan || ((count - freed) <= retain_target))
1619 return freed;
1620 cond_resched();
1621 }
1622 }
1623 return freed;
1624 }
1625
1626 static unsigned long
1627 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1628 {
1629 struct dm_bufio_client *c;
1630 unsigned long freed;
1631
1632 c = container_of(shrink, struct dm_bufio_client, shrinker);
1633 if (sc->gfp_mask & __GFP_FS)
1634 dm_bufio_lock(c);
1635 else if (!dm_bufio_trylock(c))
1636 return SHRINK_STOP;
1637
1638 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1639 dm_bufio_unlock(c);
1640 return freed;
1641 }
1642
1643 static unsigned long
1644 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1645 {
1646 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1647 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1648 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1649 unsigned long retain_target = get_retain_buffers(c);
1650
1651 return (count < retain_target) ? 0 : (count - retain_target);
1652 }
1653
1654 /*
1655 * Create the buffering interface
1656 */
1657 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1658 unsigned reserved_buffers, unsigned aux_size,
1659 void (*alloc_callback)(struct dm_buffer *),
1660 void (*write_callback)(struct dm_buffer *))
1661 {
1662 int r;
1663 struct dm_bufio_client *c;
1664 unsigned i;
1665
1666 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1667 (block_size & (block_size - 1)));
1668
1669 c = kzalloc(sizeof(*c), GFP_KERNEL);
1670 if (!c) {
1671 r = -ENOMEM;
1672 goto bad_client;
1673 }
1674 c->buffer_tree = RB_ROOT;
1675
1676 c->bdev = bdev;
1677 c->block_size = block_size;
1678 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
1679 c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
1680 __ffs(block_size) - PAGE_SHIFT : 0;
1681 c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1682 PAGE_SHIFT - __ffs(block_size) : 0);
1683
1684 c->aux_size = aux_size;
1685 c->alloc_callback = alloc_callback;
1686 c->write_callback = write_callback;
1687
1688 for (i = 0; i < LIST_SIZE; i++) {
1689 INIT_LIST_HEAD(&c->lru[i]);
1690 c->n_buffers[i] = 0;
1691 }
1692
1693 mutex_init(&c->lock);
1694 INIT_LIST_HEAD(&c->reserved_buffers);
1695 c->need_reserved_buffers = reserved_buffers;
1696
1697 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1698
1699 init_waitqueue_head(&c->free_buffer_wait);
1700 c->async_write_error = 0;
1701
1702 c->dm_io = dm_io_client_create();
1703 if (IS_ERR(c->dm_io)) {
1704 r = PTR_ERR(c->dm_io);
1705 goto bad_dm_io;
1706 }
1707
1708 mutex_lock(&dm_bufio_clients_lock);
1709 if (c->blocks_per_page_bits) {
1710 if (!DM_BUFIO_CACHE_NAME(c)) {
1711 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1712 if (!DM_BUFIO_CACHE_NAME(c)) {
1713 r = -ENOMEM;
1714 mutex_unlock(&dm_bufio_clients_lock);
1715 goto bad_cache;
1716 }
1717 }
1718
1719 if (!DM_BUFIO_CACHE(c)) {
1720 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1721 c->block_size,
1722 c->block_size, 0, NULL);
1723 if (!DM_BUFIO_CACHE(c)) {
1724 r = -ENOMEM;
1725 mutex_unlock(&dm_bufio_clients_lock);
1726 goto bad_cache;
1727 }
1728 }
1729 }
1730 mutex_unlock(&dm_bufio_clients_lock);
1731
1732 while (c->need_reserved_buffers) {
1733 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1734
1735 if (!b) {
1736 r = -ENOMEM;
1737 goto bad_buffer;
1738 }
1739 __free_buffer_wake(b);
1740 }
1741
1742 mutex_lock(&dm_bufio_clients_lock);
1743 dm_bufio_client_count++;
1744 list_add(&c->client_list, &dm_bufio_all_clients);
1745 __cache_size_refresh();
1746 mutex_unlock(&dm_bufio_clients_lock);
1747
1748 c->shrinker.count_objects = dm_bufio_shrink_count;
1749 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1750 c->shrinker.seeks = 1;
1751 c->shrinker.batch = 0;
1752 register_shrinker(&c->shrinker);
1753
1754 return c;
1755
1756 bad_buffer:
1757 bad_cache:
1758 while (!list_empty(&c->reserved_buffers)) {
1759 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1760 struct dm_buffer, lru_list);
1761 list_del(&b->lru_list);
1762 free_buffer(b);
1763 }
1764 dm_io_client_destroy(c->dm_io);
1765 bad_dm_io:
1766 kfree(c);
1767 bad_client:
1768 return ERR_PTR(r);
1769 }
1770 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1771
1772 /*
1773 * Free the buffering interface.
1774 * It is required that there are no references on any buffers.
1775 */
1776 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1777 {
1778 unsigned i;
1779
1780 drop_buffers(c);
1781
1782 unregister_shrinker(&c->shrinker);
1783
1784 mutex_lock(&dm_bufio_clients_lock);
1785
1786 list_del(&c->client_list);
1787 dm_bufio_client_count--;
1788 __cache_size_refresh();
1789
1790 mutex_unlock(&dm_bufio_clients_lock);
1791
1792 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1793 BUG_ON(c->need_reserved_buffers);
1794
1795 while (!list_empty(&c->reserved_buffers)) {
1796 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1797 struct dm_buffer, lru_list);
1798 list_del(&b->lru_list);
1799 free_buffer(b);
1800 }
1801
1802 for (i = 0; i < LIST_SIZE; i++)
1803 if (c->n_buffers[i])
1804 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1805
1806 for (i = 0; i < LIST_SIZE; i++)
1807 BUG_ON(c->n_buffers[i]);
1808
1809 dm_io_client_destroy(c->dm_io);
1810 kfree(c);
1811 }
1812 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1813
1814 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
1815 {
1816 c->start = start;
1817 }
1818 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
1819
1820 static unsigned get_max_age_hz(void)
1821 {
1822 unsigned max_age = READ_ONCE(dm_bufio_max_age);
1823
1824 if (max_age > UINT_MAX / HZ)
1825 max_age = UINT_MAX / HZ;
1826
1827 return max_age * HZ;
1828 }
1829
1830 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1831 {
1832 return time_after_eq(jiffies, b->last_accessed + age_hz);
1833 }
1834
1835 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1836 {
1837 struct dm_buffer *b, *tmp;
1838 unsigned long retain_target = get_retain_buffers(c);
1839 unsigned long count;
1840 LIST_HEAD(write_list);
1841
1842 dm_bufio_lock(c);
1843
1844 __check_watermark(c, &write_list);
1845 if (unlikely(!list_empty(&write_list))) {
1846 dm_bufio_unlock(c);
1847 __flush_write_list(&write_list);
1848 dm_bufio_lock(c);
1849 }
1850
1851 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1852 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1853 if (count <= retain_target)
1854 break;
1855
1856 if (!older_than(b, age_hz))
1857 break;
1858
1859 if (__try_evict_buffer(b, 0))
1860 count--;
1861
1862 cond_resched();
1863 }
1864
1865 dm_bufio_unlock(c);
1866 }
1867
1868 static void cleanup_old_buffers(void)
1869 {
1870 unsigned long max_age_hz = get_max_age_hz();
1871 struct dm_bufio_client *c;
1872
1873 mutex_lock(&dm_bufio_clients_lock);
1874
1875 __cache_size_refresh();
1876
1877 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1878 __evict_old_buffers(c, max_age_hz);
1879
1880 mutex_unlock(&dm_bufio_clients_lock);
1881 }
1882
1883 static struct workqueue_struct *dm_bufio_wq;
1884 static struct delayed_work dm_bufio_work;
1885
1886 static void work_fn(struct work_struct *w)
1887 {
1888 cleanup_old_buffers();
1889
1890 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1891 DM_BUFIO_WORK_TIMER_SECS * HZ);
1892 }
1893
1894 /*----------------------------------------------------------------
1895 * Module setup
1896 *--------------------------------------------------------------*/
1897
1898 /*
1899 * This is called only once for the whole dm_bufio module.
1900 * It initializes memory limit.
1901 */
1902 static int __init dm_bufio_init(void)
1903 {
1904 __u64 mem;
1905
1906 dm_bufio_allocated_kmem_cache = 0;
1907 dm_bufio_allocated_get_free_pages = 0;
1908 dm_bufio_allocated_vmalloc = 0;
1909 dm_bufio_current_allocated = 0;
1910
1911 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1912 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1913
1914 mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1915 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1916
1917 if (mem > ULONG_MAX)
1918 mem = ULONG_MAX;
1919
1920 #ifdef CONFIG_MMU
1921 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1922 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1923 #endif
1924
1925 dm_bufio_default_cache_size = mem;
1926
1927 mutex_lock(&dm_bufio_clients_lock);
1928 __cache_size_refresh();
1929 mutex_unlock(&dm_bufio_clients_lock);
1930
1931 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
1932 if (!dm_bufio_wq)
1933 return -ENOMEM;
1934
1935 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1936 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1937 DM_BUFIO_WORK_TIMER_SECS * HZ);
1938
1939 return 0;
1940 }
1941
1942 /*
1943 * This is called once when unloading the dm_bufio module.
1944 */
1945 static void __exit dm_bufio_exit(void)
1946 {
1947 int bug = 0;
1948 int i;
1949
1950 cancel_delayed_work_sync(&dm_bufio_work);
1951 destroy_workqueue(dm_bufio_wq);
1952
1953 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1954 kmem_cache_destroy(dm_bufio_caches[i]);
1955
1956 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1957 kfree(dm_bufio_cache_names[i]);
1958
1959 if (dm_bufio_client_count) {
1960 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1961 __func__, dm_bufio_client_count);
1962 bug = 1;
1963 }
1964
1965 if (dm_bufio_current_allocated) {
1966 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1967 __func__, dm_bufio_current_allocated);
1968 bug = 1;
1969 }
1970
1971 if (dm_bufio_allocated_get_free_pages) {
1972 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1973 __func__, dm_bufio_allocated_get_free_pages);
1974 bug = 1;
1975 }
1976
1977 if (dm_bufio_allocated_vmalloc) {
1978 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1979 __func__, dm_bufio_allocated_vmalloc);
1980 bug = 1;
1981 }
1982
1983 BUG_ON(bug);
1984 }
1985
1986 module_init(dm_bufio_init)
1987 module_exit(dm_bufio_exit)
1988
1989 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1990 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1991
1992 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1993 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1994
1995 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
1996 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1997
1998 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1999 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2000
2001 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
2002 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2003
2004 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
2005 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2006
2007 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
2008 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2009
2010 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
2011 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
2012
2013 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2014 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
2015 MODULE_LICENSE("GPL");