2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
17 #include <linux/rbtree.h>
19 #define DM_MSG_PREFIX "bufio"
22 * Memory management policy:
23 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
24 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
25 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
26 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
29 #define DM_BUFIO_MIN_BUFFERS 8
31 #define DM_BUFIO_MEMORY_PERCENT 2
32 #define DM_BUFIO_VMALLOC_PERCENT 25
33 #define DM_BUFIO_WRITEBACK_PERCENT 75
36 * Check buffer ages in this interval (seconds)
38 #define DM_BUFIO_WORK_TIMER_SECS 30
41 * Free buffers when they are older than this (seconds)
43 #define DM_BUFIO_DEFAULT_AGE_SECS 300
46 * The nr of bytes of cached data to keep around.
48 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
51 * The number of bvec entries that are embedded directly in the buffer.
52 * If the chunk size is larger, dm-io is used to do the io.
54 #define DM_BUFIO_INLINE_VECS 16
57 * Don't try to use kmem_cache_alloc for blocks larger than this.
58 * For explanation, see alloc_buffer_data below.
60 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
61 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
64 * dm_buffer->list_mode
72 * All buffers are linked to cache_hash with their hash_list field.
74 * Clean buffers that are not being written (B_WRITING not set)
75 * are linked to lru[LIST_CLEAN] with their lru_list field.
77 * Dirty and clean buffers that are being written are linked to
78 * lru[LIST_DIRTY] with their lru_list field. When the write
79 * finishes, the buffer cannot be relinked immediately (because we
80 * are in an interrupt context and relinking requires process
81 * context), so some clean-not-writing buffers can be held on
82 * dirty_lru too. They are later added to lru in the process
85 struct dm_bufio_client
{
88 struct list_head lru
[LIST_SIZE
];
89 unsigned long n_buffers
[LIST_SIZE
];
91 struct block_device
*bdev
;
93 unsigned char sectors_per_block_bits
;
94 unsigned char pages_per_block_bits
;
95 unsigned char blocks_per_page_bits
;
97 void (*alloc_callback
)(struct dm_buffer
*);
98 void (*write_callback
)(struct dm_buffer
*);
100 struct dm_io_client
*dm_io
;
102 struct list_head reserved_buffers
;
103 unsigned need_reserved_buffers
;
105 unsigned minimum_buffers
;
107 struct rb_root buffer_tree
;
108 wait_queue_head_t free_buffer_wait
;
110 int async_write_error
;
112 struct list_head client_list
;
113 struct shrinker shrinker
;
124 * Describes how the block was allocated:
125 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
126 * See the comment at alloc_buffer_data.
130 DATA_MODE_GET_FREE_PAGES
= 1,
131 DATA_MODE_VMALLOC
= 2,
137 struct list_head lru_list
;
140 enum data_mode data_mode
;
141 unsigned char list_mode
; /* LIST_* */
146 unsigned long last_accessed
;
147 struct dm_bufio_client
*c
;
148 struct list_head write_list
;
150 struct bio_vec bio_vec
[DM_BUFIO_INLINE_VECS
];
153 /*----------------------------------------------------------------*/
155 static struct kmem_cache
*dm_bufio_caches
[PAGE_SHIFT
- SECTOR_SHIFT
];
156 static char *dm_bufio_cache_names
[PAGE_SHIFT
- SECTOR_SHIFT
];
158 static inline int dm_bufio_cache_index(struct dm_bufio_client
*c
)
160 unsigned ret
= c
->blocks_per_page_bits
- 1;
162 BUG_ON(ret
>= ARRAY_SIZE(dm_bufio_caches
));
167 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
168 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
170 #define dm_bufio_in_request() (!!current->bio_list)
172 static void dm_bufio_lock(struct dm_bufio_client
*c
)
174 mutex_lock_nested(&c
->lock
, dm_bufio_in_request());
177 static int dm_bufio_trylock(struct dm_bufio_client
*c
)
179 return mutex_trylock(&c
->lock
);
182 static void dm_bufio_unlock(struct dm_bufio_client
*c
)
184 mutex_unlock(&c
->lock
);
188 * FIXME Move to sched.h?
190 #ifdef CONFIG_PREEMPT_VOLUNTARY
191 # define dm_bufio_cond_resched() \
193 if (unlikely(need_resched())) \
197 # define dm_bufio_cond_resched() do { } while (0)
200 /*----------------------------------------------------------------*/
203 * Default cache size: available memory divided by the ratio.
205 static unsigned long dm_bufio_default_cache_size
;
208 * Total cache size set by the user.
210 static unsigned long dm_bufio_cache_size
;
213 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
214 * at any time. If it disagrees, the user has changed cache size.
216 static unsigned long dm_bufio_cache_size_latch
;
218 static DEFINE_SPINLOCK(param_spinlock
);
221 * Buffers are freed after this timeout
223 static unsigned dm_bufio_max_age
= DM_BUFIO_DEFAULT_AGE_SECS
;
224 static unsigned dm_bufio_retain_bytes
= DM_BUFIO_DEFAULT_RETAIN_BYTES
;
226 static unsigned long dm_bufio_peak_allocated
;
227 static unsigned long dm_bufio_allocated_kmem_cache
;
228 static unsigned long dm_bufio_allocated_get_free_pages
;
229 static unsigned long dm_bufio_allocated_vmalloc
;
230 static unsigned long dm_bufio_current_allocated
;
232 /*----------------------------------------------------------------*/
235 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
237 static unsigned long dm_bufio_cache_size_per_client
;
240 * The current number of clients.
242 static int dm_bufio_client_count
;
245 * The list of all clients.
247 static LIST_HEAD(dm_bufio_all_clients
);
250 * This mutex protects dm_bufio_cache_size_latch,
251 * dm_bufio_cache_size_per_client and dm_bufio_client_count
253 static DEFINE_MUTEX(dm_bufio_clients_lock
);
255 /*----------------------------------------------------------------
256 * A red/black tree acts as an index for all the buffers.
257 *--------------------------------------------------------------*/
258 static struct dm_buffer
*__find(struct dm_bufio_client
*c
, sector_t block
)
260 struct rb_node
*n
= c
->buffer_tree
.rb_node
;
264 b
= container_of(n
, struct dm_buffer
, node
);
266 if (b
->block
== block
)
269 n
= (b
->block
< block
) ? n
->rb_left
: n
->rb_right
;
275 static void __insert(struct dm_bufio_client
*c
, struct dm_buffer
*b
)
277 struct rb_node
**new = &c
->buffer_tree
.rb_node
, *parent
= NULL
;
278 struct dm_buffer
*found
;
281 found
= container_of(*new, struct dm_buffer
, node
);
283 if (found
->block
== b
->block
) {
289 new = (found
->block
< b
->block
) ?
290 &((*new)->rb_left
) : &((*new)->rb_right
);
293 rb_link_node(&b
->node
, parent
, new);
294 rb_insert_color(&b
->node
, &c
->buffer_tree
);
297 static void __remove(struct dm_bufio_client
*c
, struct dm_buffer
*b
)
299 rb_erase(&b
->node
, &c
->buffer_tree
);
302 /*----------------------------------------------------------------*/
304 static void adjust_total_allocated(enum data_mode data_mode
, long diff
)
306 static unsigned long * const class_ptr
[DATA_MODE_LIMIT
] = {
307 &dm_bufio_allocated_kmem_cache
,
308 &dm_bufio_allocated_get_free_pages
,
309 &dm_bufio_allocated_vmalloc
,
312 spin_lock(¶m_spinlock
);
314 *class_ptr
[data_mode
] += diff
;
316 dm_bufio_current_allocated
+= diff
;
318 if (dm_bufio_current_allocated
> dm_bufio_peak_allocated
)
319 dm_bufio_peak_allocated
= dm_bufio_current_allocated
;
321 spin_unlock(¶m_spinlock
);
325 * Change the number of clients and recalculate per-client limit.
327 static void __cache_size_refresh(void)
329 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock
));
330 BUG_ON(dm_bufio_client_count
< 0);
332 dm_bufio_cache_size_latch
= ACCESS_ONCE(dm_bufio_cache_size
);
335 * Use default if set to 0 and report the actual cache size used.
337 if (!dm_bufio_cache_size_latch
) {
338 (void)cmpxchg(&dm_bufio_cache_size
, 0,
339 dm_bufio_default_cache_size
);
340 dm_bufio_cache_size_latch
= dm_bufio_default_cache_size
;
343 dm_bufio_cache_size_per_client
= dm_bufio_cache_size_latch
/
344 (dm_bufio_client_count
? : 1);
348 * Allocating buffer data.
350 * Small buffers are allocated with kmem_cache, to use space optimally.
352 * For large buffers, we choose between get_free_pages and vmalloc.
353 * Each has advantages and disadvantages.
355 * __get_free_pages can randomly fail if the memory is fragmented.
356 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
357 * as low as 128M) so using it for caching is not appropriate.
359 * If the allocation may fail we use __get_free_pages. Memory fragmentation
360 * won't have a fatal effect here, but it just causes flushes of some other
361 * buffers and more I/O will be performed. Don't use __get_free_pages if it
362 * always fails (i.e. order >= MAX_ORDER).
364 * If the allocation shouldn't fail we use __vmalloc. This is only for the
365 * initial reserve allocation, so there's no risk of wasting all vmalloc
368 static void *alloc_buffer_data(struct dm_bufio_client
*c
, gfp_t gfp_mask
,
369 enum data_mode
*data_mode
)
374 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT
) {
375 *data_mode
= DATA_MODE_SLAB
;
376 return kmem_cache_alloc(DM_BUFIO_CACHE(c
), gfp_mask
);
379 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT
&&
380 gfp_mask
& __GFP_NORETRY
) {
381 *data_mode
= DATA_MODE_GET_FREE_PAGES
;
382 return (void *)__get_free_pages(gfp_mask
,
383 c
->pages_per_block_bits
);
386 *data_mode
= DATA_MODE_VMALLOC
;
389 * __vmalloc allocates the data pages and auxiliary structures with
390 * gfp_flags that were specified, but pagetables are always allocated
391 * with GFP_KERNEL, no matter what was specified as gfp_mask.
393 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
394 * all allocations done by this process (including pagetables) are done
395 * as if GFP_NOIO was specified.
398 if (gfp_mask
& __GFP_NORETRY
)
399 noio_flag
= memalloc_noio_save();
401 ptr
= __vmalloc(c
->block_size
, gfp_mask
| __GFP_HIGHMEM
, PAGE_KERNEL
);
403 if (gfp_mask
& __GFP_NORETRY
)
404 memalloc_noio_restore(noio_flag
);
410 * Free buffer's data.
412 static void free_buffer_data(struct dm_bufio_client
*c
,
413 void *data
, enum data_mode data_mode
)
417 kmem_cache_free(DM_BUFIO_CACHE(c
), data
);
420 case DATA_MODE_GET_FREE_PAGES
:
421 free_pages((unsigned long)data
, c
->pages_per_block_bits
);
424 case DATA_MODE_VMALLOC
:
429 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
436 * Allocate buffer and its data.
438 static struct dm_buffer
*alloc_buffer(struct dm_bufio_client
*c
, gfp_t gfp_mask
)
440 struct dm_buffer
*b
= kmalloc(sizeof(struct dm_buffer
) + c
->aux_size
,
448 b
->data
= alloc_buffer_data(c
, gfp_mask
, &b
->data_mode
);
454 adjust_total_allocated(b
->data_mode
, (long)c
->block_size
);
460 * Free buffer and its data.
462 static void free_buffer(struct dm_buffer
*b
)
464 struct dm_bufio_client
*c
= b
->c
;
466 adjust_total_allocated(b
->data_mode
, -(long)c
->block_size
);
468 free_buffer_data(c
, b
->data
, b
->data_mode
);
473 * Link buffer to the hash list and clean or dirty queue.
475 static void __link_buffer(struct dm_buffer
*b
, sector_t block
, int dirty
)
477 struct dm_bufio_client
*c
= b
->c
;
479 c
->n_buffers
[dirty
]++;
481 b
->list_mode
= dirty
;
482 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
484 b
->last_accessed
= jiffies
;
488 * Unlink buffer from the hash list and dirty or clean queue.
490 static void __unlink_buffer(struct dm_buffer
*b
)
492 struct dm_bufio_client
*c
= b
->c
;
494 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
496 c
->n_buffers
[b
->list_mode
]--;
498 list_del(&b
->lru_list
);
502 * Place the buffer to the head of dirty or clean LRU queue.
504 static void __relink_lru(struct dm_buffer
*b
, int dirty
)
506 struct dm_bufio_client
*c
= b
->c
;
508 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
510 c
->n_buffers
[b
->list_mode
]--;
511 c
->n_buffers
[dirty
]++;
512 b
->list_mode
= dirty
;
513 list_move(&b
->lru_list
, &c
->lru
[dirty
]);
514 b
->last_accessed
= jiffies
;
517 /*----------------------------------------------------------------
518 * Submit I/O on the buffer.
520 * Bio interface is faster but it has some problems:
521 * the vector list is limited (increasing this limit increases
522 * memory-consumption per buffer, so it is not viable);
524 * the memory must be direct-mapped, not vmalloced;
526 * the I/O driver can reject requests spuriously if it thinks that
527 * the requests are too big for the device or if they cross a
528 * controller-defined memory boundary.
530 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
531 * it is not vmalloced, try using the bio interface.
533 * If the buffer is big, if it is vmalloced or if the underlying device
534 * rejects the bio because it is too large, use dm-io layer to do the I/O.
535 * The dm-io layer splits the I/O into multiple requests, avoiding the above
537 *--------------------------------------------------------------*/
540 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
541 * that the request was handled directly with bio interface.
543 static void dmio_complete(unsigned long error
, void *context
)
545 struct dm_buffer
*b
= context
;
547 b
->bio
.bi_end_io(&b
->bio
, error
? -EIO
: 0);
550 static void use_dmio(struct dm_buffer
*b
, int rw
, sector_t block
,
551 bio_end_io_t
*end_io
)
554 struct dm_io_request io_req
= {
556 .notify
.fn
= dmio_complete
,
558 .client
= b
->c
->dm_io
,
560 struct dm_io_region region
= {
562 .sector
= block
<< b
->c
->sectors_per_block_bits
,
563 .count
= b
->c
->block_size
>> SECTOR_SHIFT
,
566 if (b
->data_mode
!= DATA_MODE_VMALLOC
) {
567 io_req
.mem
.type
= DM_IO_KMEM
;
568 io_req
.mem
.ptr
.addr
= b
->data
;
570 io_req
.mem
.type
= DM_IO_VMA
;
571 io_req
.mem
.ptr
.vma
= b
->data
;
574 b
->bio
.bi_end_io
= end_io
;
576 r
= dm_io(&io_req
, 1, ®ion
, NULL
);
581 static void inline_endio(struct bio
*bio
, int error
)
583 bio_end_io_t
*end_fn
= bio
->bi_private
;
586 * Reset the bio to free any attached resources
587 * (e.g. bio integrity profiles).
594 static void use_inline_bio(struct dm_buffer
*b
, int rw
, sector_t block
,
595 bio_end_io_t
*end_io
)
601 b
->bio
.bi_io_vec
= b
->bio_vec
;
602 b
->bio
.bi_max_vecs
= DM_BUFIO_INLINE_VECS
;
603 b
->bio
.bi_iter
.bi_sector
= block
<< b
->c
->sectors_per_block_bits
;
604 b
->bio
.bi_bdev
= b
->c
->bdev
;
605 b
->bio
.bi_end_io
= inline_endio
;
607 * Use of .bi_private isn't a problem here because
608 * the dm_buffer's inline bio is local to bufio.
610 b
->bio
.bi_private
= end_io
;
613 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
614 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
617 len
= b
->c
->block_size
;
619 if (len
>= PAGE_SIZE
)
620 BUG_ON((unsigned long)ptr
& (PAGE_SIZE
- 1));
622 BUG_ON((unsigned long)ptr
& (len
- 1));
625 if (!bio_add_page(&b
->bio
, virt_to_page(ptr
),
626 len
< PAGE_SIZE
? len
: PAGE_SIZE
,
627 virt_to_phys(ptr
) & (PAGE_SIZE
- 1))) {
628 BUG_ON(b
->c
->block_size
<= PAGE_SIZE
);
629 use_dmio(b
, rw
, block
, end_io
);
637 submit_bio(rw
, &b
->bio
);
640 static void submit_io(struct dm_buffer
*b
, int rw
, sector_t block
,
641 bio_end_io_t
*end_io
)
643 if (rw
== WRITE
&& b
->c
->write_callback
)
644 b
->c
->write_callback(b
);
646 if (b
->c
->block_size
<= DM_BUFIO_INLINE_VECS
* PAGE_SIZE
&&
647 b
->data_mode
!= DATA_MODE_VMALLOC
)
648 use_inline_bio(b
, rw
, block
, end_io
);
650 use_dmio(b
, rw
, block
, end_io
);
653 /*----------------------------------------------------------------
654 * Writing dirty buffers
655 *--------------------------------------------------------------*/
658 * The endio routine for write.
660 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
663 static void write_endio(struct bio
*bio
, int error
)
665 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
667 b
->write_error
= error
;
668 if (unlikely(error
)) {
669 struct dm_bufio_client
*c
= b
->c
;
670 (void)cmpxchg(&c
->async_write_error
, 0, error
);
673 BUG_ON(!test_bit(B_WRITING
, &b
->state
));
675 smp_mb__before_atomic();
676 clear_bit(B_WRITING
, &b
->state
);
677 smp_mb__after_atomic();
679 wake_up_bit(&b
->state
, B_WRITING
);
683 * Initiate a write on a dirty buffer, but don't wait for it.
685 * - If the buffer is not dirty, exit.
686 * - If there some previous write going on, wait for it to finish (we can't
687 * have two writes on the same buffer simultaneously).
688 * - Submit our write and don't wait on it. We set B_WRITING indicating
689 * that there is a write in progress.
691 static void __write_dirty_buffer(struct dm_buffer
*b
,
692 struct list_head
*write_list
)
694 if (!test_bit(B_DIRTY
, &b
->state
))
697 clear_bit(B_DIRTY
, &b
->state
);
698 wait_on_bit_lock_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
701 submit_io(b
, WRITE
, b
->block
, write_endio
);
703 list_add_tail(&b
->write_list
, write_list
);
706 static void __flush_write_list(struct list_head
*write_list
)
708 struct blk_plug plug
;
709 blk_start_plug(&plug
);
710 while (!list_empty(write_list
)) {
711 struct dm_buffer
*b
=
712 list_entry(write_list
->next
, struct dm_buffer
, write_list
);
713 list_del(&b
->write_list
);
714 submit_io(b
, WRITE
, b
->block
, write_endio
);
715 dm_bufio_cond_resched();
717 blk_finish_plug(&plug
);
721 * Wait until any activity on the buffer finishes. Possibly write the
722 * buffer if it is dirty. When this function finishes, there is no I/O
723 * running on the buffer and the buffer is not dirty.
725 static void __make_buffer_clean(struct dm_buffer
*b
)
727 BUG_ON(b
->hold_count
);
729 if (!b
->state
) /* fast case */
732 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
733 __write_dirty_buffer(b
, NULL
);
734 wait_on_bit_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
738 * Find some buffer that is not held by anybody, clean it, unlink it and
741 static struct dm_buffer
*__get_unclaimed_buffer(struct dm_bufio_client
*c
)
745 list_for_each_entry_reverse(b
, &c
->lru
[LIST_CLEAN
], lru_list
) {
746 BUG_ON(test_bit(B_WRITING
, &b
->state
));
747 BUG_ON(test_bit(B_DIRTY
, &b
->state
));
749 if (!b
->hold_count
) {
750 __make_buffer_clean(b
);
754 dm_bufio_cond_resched();
757 list_for_each_entry_reverse(b
, &c
->lru
[LIST_DIRTY
], lru_list
) {
758 BUG_ON(test_bit(B_READING
, &b
->state
));
760 if (!b
->hold_count
) {
761 __make_buffer_clean(b
);
765 dm_bufio_cond_resched();
772 * Wait until some other threads free some buffer or release hold count on
775 * This function is entered with c->lock held, drops it and regains it
778 static void __wait_for_free_buffer(struct dm_bufio_client
*c
)
780 DECLARE_WAITQUEUE(wait
, current
);
782 add_wait_queue(&c
->free_buffer_wait
, &wait
);
783 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
788 remove_wait_queue(&c
->free_buffer_wait
, &wait
);
801 * Allocate a new buffer. If the allocation is not possible, wait until
802 * some other thread frees a buffer.
804 * May drop the lock and regain it.
806 static struct dm_buffer
*__alloc_buffer_wait_no_callback(struct dm_bufio_client
*c
, enum new_flag nf
)
811 * dm-bufio is resistant to allocation failures (it just keeps
812 * one buffer reserved in cases all the allocations fail).
813 * So set flags to not try too hard:
814 * GFP_NOIO: don't recurse into the I/O layer
815 * __GFP_NORETRY: don't retry and rather return failure
816 * __GFP_NOMEMALLOC: don't use emergency reserves
817 * __GFP_NOWARN: don't print a warning in case of failure
819 * For debugging, if we set the cache size to 1, no new buffers will
823 if (dm_bufio_cache_size_latch
!= 1) {
824 b
= alloc_buffer(c
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
829 if (nf
== NF_PREFETCH
)
832 if (!list_empty(&c
->reserved_buffers
)) {
833 b
= list_entry(c
->reserved_buffers
.next
,
834 struct dm_buffer
, lru_list
);
835 list_del(&b
->lru_list
);
836 c
->need_reserved_buffers
++;
841 b
= __get_unclaimed_buffer(c
);
845 __wait_for_free_buffer(c
);
849 static struct dm_buffer
*__alloc_buffer_wait(struct dm_bufio_client
*c
, enum new_flag nf
)
851 struct dm_buffer
*b
= __alloc_buffer_wait_no_callback(c
, nf
);
856 if (c
->alloc_callback
)
857 c
->alloc_callback(b
);
863 * Free a buffer and wake other threads waiting for free buffers.
865 static void __free_buffer_wake(struct dm_buffer
*b
)
867 struct dm_bufio_client
*c
= b
->c
;
869 if (!c
->need_reserved_buffers
)
872 list_add(&b
->lru_list
, &c
->reserved_buffers
);
873 c
->need_reserved_buffers
--;
876 wake_up(&c
->free_buffer_wait
);
879 static void __write_dirty_buffers_async(struct dm_bufio_client
*c
, int no_wait
,
880 struct list_head
*write_list
)
882 struct dm_buffer
*b
, *tmp
;
884 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
885 BUG_ON(test_bit(B_READING
, &b
->state
));
887 if (!test_bit(B_DIRTY
, &b
->state
) &&
888 !test_bit(B_WRITING
, &b
->state
)) {
889 __relink_lru(b
, LIST_CLEAN
);
893 if (no_wait
&& test_bit(B_WRITING
, &b
->state
))
896 __write_dirty_buffer(b
, write_list
);
897 dm_bufio_cond_resched();
902 * Get writeback threshold and buffer limit for a given client.
904 static void __get_memory_limit(struct dm_bufio_client
*c
,
905 unsigned long *threshold_buffers
,
906 unsigned long *limit_buffers
)
908 unsigned long buffers
;
910 if (ACCESS_ONCE(dm_bufio_cache_size
) != dm_bufio_cache_size_latch
) {
911 mutex_lock(&dm_bufio_clients_lock
);
912 __cache_size_refresh();
913 mutex_unlock(&dm_bufio_clients_lock
);
916 buffers
= dm_bufio_cache_size_per_client
>>
917 (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
919 if (buffers
< c
->minimum_buffers
)
920 buffers
= c
->minimum_buffers
;
922 *limit_buffers
= buffers
;
923 *threshold_buffers
= buffers
* DM_BUFIO_WRITEBACK_PERCENT
/ 100;
927 * Check if we're over watermark.
928 * If we are over threshold_buffers, start freeing buffers.
929 * If we're over "limit_buffers", block until we get under the limit.
931 static void __check_watermark(struct dm_bufio_client
*c
,
932 struct list_head
*write_list
)
934 unsigned long threshold_buffers
, limit_buffers
;
936 __get_memory_limit(c
, &threshold_buffers
, &limit_buffers
);
938 while (c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
] >
941 struct dm_buffer
*b
= __get_unclaimed_buffer(c
);
946 __free_buffer_wake(b
);
947 dm_bufio_cond_resched();
950 if (c
->n_buffers
[LIST_DIRTY
] > threshold_buffers
)
951 __write_dirty_buffers_async(c
, 1, write_list
);
954 /*----------------------------------------------------------------
956 *--------------------------------------------------------------*/
958 static struct dm_buffer
*__bufio_new(struct dm_bufio_client
*c
, sector_t block
,
959 enum new_flag nf
, int *need_submit
,
960 struct list_head
*write_list
)
962 struct dm_buffer
*b
, *new_b
= NULL
;
966 b
= __find(c
, block
);
973 new_b
= __alloc_buffer_wait(c
, nf
);
978 * We've had a period where the mutex was unlocked, so need to
979 * recheck the hash table.
981 b
= __find(c
, block
);
983 __free_buffer_wake(new_b
);
987 __check_watermark(c
, write_list
);
993 __link_buffer(b
, block
, LIST_CLEAN
);
995 if (nf
== NF_FRESH
) {
1000 b
->state
= 1 << B_READING
;
1006 if (nf
== NF_PREFETCH
)
1009 * Note: it is essential that we don't wait for the buffer to be
1010 * read if dm_bufio_get function is used. Both dm_bufio_get and
1011 * dm_bufio_prefetch can be used in the driver request routine.
1012 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1013 * the same buffer, it would deadlock if we waited.
1015 if (nf
== NF_GET
&& unlikely(test_bit(B_READING
, &b
->state
)))
1019 __relink_lru(b
, test_bit(B_DIRTY
, &b
->state
) ||
1020 test_bit(B_WRITING
, &b
->state
));
1025 * The endio routine for reading: set the error, clear the bit and wake up
1026 * anyone waiting on the buffer.
1028 static void read_endio(struct bio
*bio
, int error
)
1030 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
1032 b
->read_error
= error
;
1034 BUG_ON(!test_bit(B_READING
, &b
->state
));
1036 smp_mb__before_atomic();
1037 clear_bit(B_READING
, &b
->state
);
1038 smp_mb__after_atomic();
1040 wake_up_bit(&b
->state
, B_READING
);
1044 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1045 * functions is similar except that dm_bufio_new doesn't read the
1046 * buffer from the disk (assuming that the caller overwrites all the data
1047 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1049 static void *new_read(struct dm_bufio_client
*c
, sector_t block
,
1050 enum new_flag nf
, struct dm_buffer
**bp
)
1053 struct dm_buffer
*b
;
1055 LIST_HEAD(write_list
);
1058 b
= __bufio_new(c
, block
, nf
, &need_submit
, &write_list
);
1061 __flush_write_list(&write_list
);
1067 submit_io(b
, READ
, b
->block
, read_endio
);
1069 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
1071 if (b
->read_error
) {
1072 int error
= b
->read_error
;
1074 dm_bufio_release(b
);
1076 return ERR_PTR(error
);
1084 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
1085 struct dm_buffer
**bp
)
1087 return new_read(c
, block
, NF_GET
, bp
);
1089 EXPORT_SYMBOL_GPL(dm_bufio_get
);
1091 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
1092 struct dm_buffer
**bp
)
1094 BUG_ON(dm_bufio_in_request());
1096 return new_read(c
, block
, NF_READ
, bp
);
1098 EXPORT_SYMBOL_GPL(dm_bufio_read
);
1100 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1101 struct dm_buffer
**bp
)
1103 BUG_ON(dm_bufio_in_request());
1105 return new_read(c
, block
, NF_FRESH
, bp
);
1107 EXPORT_SYMBOL_GPL(dm_bufio_new
);
1109 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
1110 sector_t block
, unsigned n_blocks
)
1112 struct blk_plug plug
;
1114 LIST_HEAD(write_list
);
1116 BUG_ON(dm_bufio_in_request());
1118 blk_start_plug(&plug
);
1121 for (; n_blocks
--; block
++) {
1123 struct dm_buffer
*b
;
1124 b
= __bufio_new(c
, block
, NF_PREFETCH
, &need_submit
,
1126 if (unlikely(!list_empty(&write_list
))) {
1128 blk_finish_plug(&plug
);
1129 __flush_write_list(&write_list
);
1130 blk_start_plug(&plug
);
1133 if (unlikely(b
!= NULL
)) {
1137 submit_io(b
, READ
, b
->block
, read_endio
);
1138 dm_bufio_release(b
);
1140 dm_bufio_cond_resched();
1151 blk_finish_plug(&plug
);
1153 EXPORT_SYMBOL_GPL(dm_bufio_prefetch
);
1155 void dm_bufio_release(struct dm_buffer
*b
)
1157 struct dm_bufio_client
*c
= b
->c
;
1161 BUG_ON(!b
->hold_count
);
1164 if (!b
->hold_count
) {
1165 wake_up(&c
->free_buffer_wait
);
1168 * If there were errors on the buffer, and the buffer is not
1169 * to be written, free the buffer. There is no point in caching
1172 if ((b
->read_error
|| b
->write_error
) &&
1173 !test_bit(B_READING
, &b
->state
) &&
1174 !test_bit(B_WRITING
, &b
->state
) &&
1175 !test_bit(B_DIRTY
, &b
->state
)) {
1177 __free_buffer_wake(b
);
1183 EXPORT_SYMBOL_GPL(dm_bufio_release
);
1185 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
)
1187 struct dm_bufio_client
*c
= b
->c
;
1191 BUG_ON(test_bit(B_READING
, &b
->state
));
1193 if (!test_and_set_bit(B_DIRTY
, &b
->state
))
1194 __relink_lru(b
, LIST_DIRTY
);
1198 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty
);
1200 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
)
1202 LIST_HEAD(write_list
);
1204 BUG_ON(dm_bufio_in_request());
1207 __write_dirty_buffers_async(c
, 0, &write_list
);
1209 __flush_write_list(&write_list
);
1211 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async
);
1214 * For performance, it is essential that the buffers are written asynchronously
1215 * and simultaneously (so that the block layer can merge the writes) and then
1218 * Finally, we flush hardware disk cache.
1220 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
)
1223 unsigned long buffers_processed
= 0;
1224 struct dm_buffer
*b
, *tmp
;
1226 LIST_HEAD(write_list
);
1229 __write_dirty_buffers_async(c
, 0, &write_list
);
1231 __flush_write_list(&write_list
);
1235 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
1236 int dropped_lock
= 0;
1238 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
])
1239 buffers_processed
++;
1241 BUG_ON(test_bit(B_READING
, &b
->state
));
1243 if (test_bit(B_WRITING
, &b
->state
)) {
1244 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
]) {
1248 wait_on_bit_io(&b
->state
, B_WRITING
,
1249 TASK_UNINTERRUPTIBLE
);
1253 wait_on_bit_io(&b
->state
, B_WRITING
,
1254 TASK_UNINTERRUPTIBLE
);
1257 if (!test_bit(B_DIRTY
, &b
->state
) &&
1258 !test_bit(B_WRITING
, &b
->state
))
1259 __relink_lru(b
, LIST_CLEAN
);
1261 dm_bufio_cond_resched();
1264 * If we dropped the lock, the list is no longer consistent,
1265 * so we must restart the search.
1267 * In the most common case, the buffer just processed is
1268 * relinked to the clean list, so we won't loop scanning the
1269 * same buffer again and again.
1271 * This may livelock if there is another thread simultaneously
1272 * dirtying buffers, so we count the number of buffers walked
1273 * and if it exceeds the total number of buffers, it means that
1274 * someone is doing some writes simultaneously with us. In
1275 * this case, stop, dropping the lock.
1280 wake_up(&c
->free_buffer_wait
);
1283 a
= xchg(&c
->async_write_error
, 0);
1284 f
= dm_bufio_issue_flush(c
);
1290 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers
);
1293 * Use dm-io to send and empty barrier flush the device.
1295 int dm_bufio_issue_flush(struct dm_bufio_client
*c
)
1297 struct dm_io_request io_req
= {
1298 .bi_rw
= WRITE_FLUSH
,
1299 .mem
.type
= DM_IO_KMEM
,
1300 .mem
.ptr
.addr
= NULL
,
1303 struct dm_io_region io_reg
= {
1309 BUG_ON(dm_bufio_in_request());
1311 return dm_io(&io_req
, 1, &io_reg
, NULL
);
1313 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush
);
1316 * We first delete any other buffer that may be at that new location.
1318 * Then, we write the buffer to the original location if it was dirty.
1320 * Then, if we are the only one who is holding the buffer, relink the buffer
1321 * in the hash queue for the new location.
1323 * If there was someone else holding the buffer, we write it to the new
1324 * location but not relink it, because that other user needs to have the buffer
1325 * at the same place.
1327 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
)
1329 struct dm_bufio_client
*c
= b
->c
;
1330 struct dm_buffer
*new;
1332 BUG_ON(dm_bufio_in_request());
1337 new = __find(c
, new_block
);
1339 if (new->hold_count
) {
1340 __wait_for_free_buffer(c
);
1345 * FIXME: Is there any point waiting for a write that's going
1346 * to be overwritten in a bit?
1348 __make_buffer_clean(new);
1349 __unlink_buffer(new);
1350 __free_buffer_wake(new);
1353 BUG_ON(!b
->hold_count
);
1354 BUG_ON(test_bit(B_READING
, &b
->state
));
1356 __write_dirty_buffer(b
, NULL
);
1357 if (b
->hold_count
== 1) {
1358 wait_on_bit_io(&b
->state
, B_WRITING
,
1359 TASK_UNINTERRUPTIBLE
);
1360 set_bit(B_DIRTY
, &b
->state
);
1362 __link_buffer(b
, new_block
, LIST_DIRTY
);
1365 wait_on_bit_lock_io(&b
->state
, B_WRITING
,
1366 TASK_UNINTERRUPTIBLE
);
1368 * Relink buffer to "new_block" so that write_callback
1369 * sees "new_block" as a block number.
1370 * After the write, link the buffer back to old_block.
1371 * All this must be done in bufio lock, so that block number
1372 * change isn't visible to other threads.
1374 old_block
= b
->block
;
1376 __link_buffer(b
, new_block
, b
->list_mode
);
1377 submit_io(b
, WRITE
, new_block
, write_endio
);
1378 wait_on_bit_io(&b
->state
, B_WRITING
,
1379 TASK_UNINTERRUPTIBLE
);
1381 __link_buffer(b
, old_block
, b
->list_mode
);
1385 dm_bufio_release(b
);
1387 EXPORT_SYMBOL_GPL(dm_bufio_release_move
);
1390 * Free the given buffer.
1392 * This is just a hint, if the buffer is in use or dirty, this function
1395 void dm_bufio_forget(struct dm_bufio_client
*c
, sector_t block
)
1397 struct dm_buffer
*b
;
1401 b
= __find(c
, block
);
1402 if (b
&& likely(!b
->hold_count
) && likely(!b
->state
)) {
1404 __free_buffer_wake(b
);
1409 EXPORT_SYMBOL(dm_bufio_forget
);
1411 void dm_bufio_set_minimum_buffers(struct dm_bufio_client
*c
, unsigned n
)
1413 c
->minimum_buffers
= n
;
1415 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers
);
1417 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
)
1419 return c
->block_size
;
1421 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size
);
1423 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
)
1425 return i_size_read(c
->bdev
->bd_inode
) >>
1426 (SECTOR_SHIFT
+ c
->sectors_per_block_bits
);
1428 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size
);
1430 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
)
1434 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number
);
1436 void *dm_bufio_get_block_data(struct dm_buffer
*b
)
1440 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data
);
1442 void *dm_bufio_get_aux_data(struct dm_buffer
*b
)
1446 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data
);
1448 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
)
1452 EXPORT_SYMBOL_GPL(dm_bufio_get_client
);
1454 static void drop_buffers(struct dm_bufio_client
*c
)
1456 struct dm_buffer
*b
;
1459 BUG_ON(dm_bufio_in_request());
1462 * An optimization so that the buffers are not written one-by-one.
1464 dm_bufio_write_dirty_buffers_async(c
);
1468 while ((b
= __get_unclaimed_buffer(c
)))
1469 __free_buffer_wake(b
);
1471 for (i
= 0; i
< LIST_SIZE
; i
++)
1472 list_for_each_entry(b
, &c
->lru
[i
], lru_list
)
1473 DMERR("leaked buffer %llx, hold count %u, list %d",
1474 (unsigned long long)b
->block
, b
->hold_count
, i
);
1476 for (i
= 0; i
< LIST_SIZE
; i
++)
1477 BUG_ON(!list_empty(&c
->lru
[i
]));
1483 * We may not be able to evict this buffer if IO pending or the client
1484 * is still using it. Caller is expected to know buffer is too old.
1486 * And if GFP_NOFS is used, we must not do any I/O because we hold
1487 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1488 * rerouted to different bufio client.
1490 static bool __try_evict_buffer(struct dm_buffer
*b
, gfp_t gfp
)
1492 if (!(gfp
& __GFP_FS
)) {
1493 if (test_bit(B_READING
, &b
->state
) ||
1494 test_bit(B_WRITING
, &b
->state
) ||
1495 test_bit(B_DIRTY
, &b
->state
))
1502 __make_buffer_clean(b
);
1504 __free_buffer_wake(b
);
1509 static unsigned get_retain_buffers(struct dm_bufio_client
*c
)
1511 unsigned retain_bytes
= ACCESS_ONCE(dm_bufio_retain_bytes
);
1512 return retain_bytes
/ c
->block_size
;
1515 static unsigned long __scan(struct dm_bufio_client
*c
, unsigned long nr_to_scan
,
1519 struct dm_buffer
*b
, *tmp
;
1520 unsigned long freed
= 0;
1521 unsigned long count
= nr_to_scan
;
1522 unsigned retain_target
= get_retain_buffers(c
);
1524 for (l
= 0; l
< LIST_SIZE
; l
++) {
1525 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[l
], lru_list
) {
1526 if (__try_evict_buffer(b
, gfp_mask
))
1528 if (!--nr_to_scan
|| ((count
- freed
) <= retain_target
))
1530 dm_bufio_cond_resched();
1536 static unsigned long
1537 dm_bufio_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1539 struct dm_bufio_client
*c
;
1540 unsigned long freed
;
1542 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1543 if (sc
->gfp_mask
& __GFP_FS
)
1545 else if (!dm_bufio_trylock(c
))
1548 freed
= __scan(c
, sc
->nr_to_scan
, sc
->gfp_mask
);
1553 static unsigned long
1554 dm_bufio_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1556 struct dm_bufio_client
*c
;
1557 unsigned long count
;
1559 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1560 if (sc
->gfp_mask
& __GFP_FS
)
1562 else if (!dm_bufio_trylock(c
))
1565 count
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1571 * Create the buffering interface
1573 struct dm_bufio_client
*dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
1574 unsigned reserved_buffers
, unsigned aux_size
,
1575 void (*alloc_callback
)(struct dm_buffer
*),
1576 void (*write_callback
)(struct dm_buffer
*))
1579 struct dm_bufio_client
*c
;
1582 BUG_ON(block_size
< 1 << SECTOR_SHIFT
||
1583 (block_size
& (block_size
- 1)));
1585 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1590 c
->buffer_tree
= RB_ROOT
;
1593 c
->block_size
= block_size
;
1594 c
->sectors_per_block_bits
= ffs(block_size
) - 1 - SECTOR_SHIFT
;
1595 c
->pages_per_block_bits
= (ffs(block_size
) - 1 >= PAGE_SHIFT
) ?
1596 ffs(block_size
) - 1 - PAGE_SHIFT
: 0;
1597 c
->blocks_per_page_bits
= (ffs(block_size
) - 1 < PAGE_SHIFT
?
1598 PAGE_SHIFT
- (ffs(block_size
) - 1) : 0);
1600 c
->aux_size
= aux_size
;
1601 c
->alloc_callback
= alloc_callback
;
1602 c
->write_callback
= write_callback
;
1604 for (i
= 0; i
< LIST_SIZE
; i
++) {
1605 INIT_LIST_HEAD(&c
->lru
[i
]);
1606 c
->n_buffers
[i
] = 0;
1609 mutex_init(&c
->lock
);
1610 INIT_LIST_HEAD(&c
->reserved_buffers
);
1611 c
->need_reserved_buffers
= reserved_buffers
;
1613 c
->minimum_buffers
= DM_BUFIO_MIN_BUFFERS
;
1615 init_waitqueue_head(&c
->free_buffer_wait
);
1616 c
->async_write_error
= 0;
1618 c
->dm_io
= dm_io_client_create();
1619 if (IS_ERR(c
->dm_io
)) {
1620 r
= PTR_ERR(c
->dm_io
);
1624 mutex_lock(&dm_bufio_clients_lock
);
1625 if (c
->blocks_per_page_bits
) {
1626 if (!DM_BUFIO_CACHE_NAME(c
)) {
1627 DM_BUFIO_CACHE_NAME(c
) = kasprintf(GFP_KERNEL
, "dm_bufio_cache-%u", c
->block_size
);
1628 if (!DM_BUFIO_CACHE_NAME(c
)) {
1630 mutex_unlock(&dm_bufio_clients_lock
);
1635 if (!DM_BUFIO_CACHE(c
)) {
1636 DM_BUFIO_CACHE(c
) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c
),
1638 c
->block_size
, 0, NULL
);
1639 if (!DM_BUFIO_CACHE(c
)) {
1641 mutex_unlock(&dm_bufio_clients_lock
);
1646 mutex_unlock(&dm_bufio_clients_lock
);
1648 while (c
->need_reserved_buffers
) {
1649 struct dm_buffer
*b
= alloc_buffer(c
, GFP_KERNEL
);
1655 __free_buffer_wake(b
);
1658 mutex_lock(&dm_bufio_clients_lock
);
1659 dm_bufio_client_count
++;
1660 list_add(&c
->client_list
, &dm_bufio_all_clients
);
1661 __cache_size_refresh();
1662 mutex_unlock(&dm_bufio_clients_lock
);
1664 c
->shrinker
.count_objects
= dm_bufio_shrink_count
;
1665 c
->shrinker
.scan_objects
= dm_bufio_shrink_scan
;
1666 c
->shrinker
.seeks
= 1;
1667 c
->shrinker
.batch
= 0;
1668 register_shrinker(&c
->shrinker
);
1674 while (!list_empty(&c
->reserved_buffers
)) {
1675 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1676 struct dm_buffer
, lru_list
);
1677 list_del(&b
->lru_list
);
1680 dm_io_client_destroy(c
->dm_io
);
1686 EXPORT_SYMBOL_GPL(dm_bufio_client_create
);
1689 * Free the buffering interface.
1690 * It is required that there are no references on any buffers.
1692 void dm_bufio_client_destroy(struct dm_bufio_client
*c
)
1698 unregister_shrinker(&c
->shrinker
);
1700 mutex_lock(&dm_bufio_clients_lock
);
1702 list_del(&c
->client_list
);
1703 dm_bufio_client_count
--;
1704 __cache_size_refresh();
1706 mutex_unlock(&dm_bufio_clients_lock
);
1708 BUG_ON(!RB_EMPTY_ROOT(&c
->buffer_tree
));
1709 BUG_ON(c
->need_reserved_buffers
);
1711 while (!list_empty(&c
->reserved_buffers
)) {
1712 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1713 struct dm_buffer
, lru_list
);
1714 list_del(&b
->lru_list
);
1718 for (i
= 0; i
< LIST_SIZE
; i
++)
1719 if (c
->n_buffers
[i
])
1720 DMERR("leaked buffer count %d: %ld", i
, c
->n_buffers
[i
]);
1722 for (i
= 0; i
< LIST_SIZE
; i
++)
1723 BUG_ON(c
->n_buffers
[i
]);
1725 dm_io_client_destroy(c
->dm_io
);
1728 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy
);
1730 static unsigned get_max_age_hz(void)
1732 unsigned max_age
= ACCESS_ONCE(dm_bufio_max_age
);
1734 if (max_age
> UINT_MAX
/ HZ
)
1735 max_age
= UINT_MAX
/ HZ
;
1737 return max_age
* HZ
;
1740 static bool older_than(struct dm_buffer
*b
, unsigned long age_hz
)
1742 return (jiffies
- b
->last_accessed
) >= age_hz
;
1745 static void __evict_old_buffers(struct dm_bufio_client
*c
, unsigned long age_hz
)
1747 struct dm_buffer
*b
, *tmp
;
1748 unsigned retain_target
= get_retain_buffers(c
);
1753 count
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1754 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_CLEAN
], lru_list
) {
1755 if (count
<= retain_target
)
1758 if (!older_than(b
, age_hz
))
1761 if (__try_evict_buffer(b
, 0))
1764 dm_bufio_cond_resched();
1770 static void cleanup_old_buffers(void)
1772 unsigned long max_age_hz
= get_max_age_hz();
1773 struct dm_bufio_client
*c
;
1775 mutex_lock(&dm_bufio_clients_lock
);
1777 list_for_each_entry(c
, &dm_bufio_all_clients
, client_list
)
1778 __evict_old_buffers(c
, max_age_hz
);
1780 mutex_unlock(&dm_bufio_clients_lock
);
1783 static struct workqueue_struct
*dm_bufio_wq
;
1784 static struct delayed_work dm_bufio_work
;
1786 static void work_fn(struct work_struct
*w
)
1788 cleanup_old_buffers();
1790 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1791 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1794 /*----------------------------------------------------------------
1796 *--------------------------------------------------------------*/
1799 * This is called only once for the whole dm_bufio module.
1800 * It initializes memory limit.
1802 static int __init
dm_bufio_init(void)
1806 dm_bufio_allocated_kmem_cache
= 0;
1807 dm_bufio_allocated_get_free_pages
= 0;
1808 dm_bufio_allocated_vmalloc
= 0;
1809 dm_bufio_current_allocated
= 0;
1811 memset(&dm_bufio_caches
, 0, sizeof dm_bufio_caches
);
1812 memset(&dm_bufio_cache_names
, 0, sizeof dm_bufio_cache_names
);
1814 mem
= (__u64
)((totalram_pages
- totalhigh_pages
) *
1815 DM_BUFIO_MEMORY_PERCENT
/ 100) << PAGE_SHIFT
;
1817 if (mem
> ULONG_MAX
)
1822 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1823 * in fs/proc/internal.h
1825 if (mem
> (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100)
1826 mem
= (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100;
1829 dm_bufio_default_cache_size
= mem
;
1831 mutex_lock(&dm_bufio_clients_lock
);
1832 __cache_size_refresh();
1833 mutex_unlock(&dm_bufio_clients_lock
);
1835 dm_bufio_wq
= create_singlethread_workqueue("dm_bufio_cache");
1839 INIT_DELAYED_WORK(&dm_bufio_work
, work_fn
);
1840 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1841 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1847 * This is called once when unloading the dm_bufio module.
1849 static void __exit
dm_bufio_exit(void)
1854 cancel_delayed_work_sync(&dm_bufio_work
);
1855 destroy_workqueue(dm_bufio_wq
);
1857 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_caches
); i
++) {
1858 struct kmem_cache
*kc
= dm_bufio_caches
[i
];
1861 kmem_cache_destroy(kc
);
1864 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_cache_names
); i
++)
1865 kfree(dm_bufio_cache_names
[i
]);
1867 if (dm_bufio_client_count
) {
1868 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1869 __func__
, dm_bufio_client_count
);
1873 if (dm_bufio_current_allocated
) {
1874 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1875 __func__
, dm_bufio_current_allocated
);
1879 if (dm_bufio_allocated_get_free_pages
) {
1880 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1881 __func__
, dm_bufio_allocated_get_free_pages
);
1885 if (dm_bufio_allocated_vmalloc
) {
1886 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1887 __func__
, dm_bufio_allocated_vmalloc
);
1895 module_init(dm_bufio_init
)
1896 module_exit(dm_bufio_exit
)
1898 module_param_named(max_cache_size_bytes
, dm_bufio_cache_size
, ulong
, S_IRUGO
| S_IWUSR
);
1899 MODULE_PARM_DESC(max_cache_size_bytes
, "Size of metadata cache");
1901 module_param_named(max_age_seconds
, dm_bufio_max_age
, uint
, S_IRUGO
| S_IWUSR
);
1902 MODULE_PARM_DESC(max_age_seconds
, "Max age of a buffer in seconds");
1904 module_param_named(retain_bytes
, dm_bufio_retain_bytes
, uint
, S_IRUGO
| S_IWUSR
);
1905 MODULE_PARM_DESC(retain_bytes
, "Try to keep at least this many bytes cached in memory");
1907 module_param_named(peak_allocated_bytes
, dm_bufio_peak_allocated
, ulong
, S_IRUGO
| S_IWUSR
);
1908 MODULE_PARM_DESC(peak_allocated_bytes
, "Tracks the maximum allocated memory");
1910 module_param_named(allocated_kmem_cache_bytes
, dm_bufio_allocated_kmem_cache
, ulong
, S_IRUGO
);
1911 MODULE_PARM_DESC(allocated_kmem_cache_bytes
, "Memory allocated with kmem_cache_alloc");
1913 module_param_named(allocated_get_free_pages_bytes
, dm_bufio_allocated_get_free_pages
, ulong
, S_IRUGO
);
1914 MODULE_PARM_DESC(allocated_get_free_pages_bytes
, "Memory allocated with get_free_pages");
1916 module_param_named(allocated_vmalloc_bytes
, dm_bufio_allocated_vmalloc
, ulong
, S_IRUGO
);
1917 MODULE_PARM_DESC(allocated_vmalloc_bytes
, "Memory allocated with vmalloc");
1919 module_param_named(current_allocated_bytes
, dm_bufio_current_allocated
, ulong
, S_IRUGO
);
1920 MODULE_PARM_DESC(current_allocated_bytes
, "Memory currently used by the cache");
1922 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1923 MODULE_DESCRIPTION(DM_NAME
" buffered I/O library");
1924 MODULE_LICENSE("GPL");