]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/md/md-bitmap.c
Merge tag 'at91-5.2-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/at91/linux...
[mirror_ubuntu-hirsute-kernel.git] / drivers / md / md-bitmap.c
1 /*
2 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3 *
4 * bitmap_create - sets up the bitmap structure
5 * bitmap_destroy - destroys the bitmap structure
6 *
7 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
8 * - added disk storage for bitmap
9 * - changes to allow various bitmap chunk sizes
10 */
11
12 /*
13 * Still to do:
14 *
15 * flush after percent set rather than just time based. (maybe both).
16 */
17
18 #include <linux/blkdev.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/timer.h>
24 #include <linux/sched.h>
25 #include <linux/list.h>
26 #include <linux/file.h>
27 #include <linux/mount.h>
28 #include <linux/buffer_head.h>
29 #include <linux/seq_file.h>
30 #include <trace/events/block.h>
31 #include "md.h"
32 #include "md-bitmap.h"
33
34 static inline char *bmname(struct bitmap *bitmap)
35 {
36 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
37 }
38
39 /*
40 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
41 *
42 * 1) check to see if this page is allocated, if it's not then try to alloc
43 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
44 * page pointer directly as a counter
45 *
46 * if we find our page, we increment the page's refcount so that it stays
47 * allocated while we're using it
48 */
49 static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
50 unsigned long page, int create, int no_hijack)
51 __releases(bitmap->lock)
52 __acquires(bitmap->lock)
53 {
54 unsigned char *mappage;
55
56 if (page >= bitmap->pages) {
57 /* This can happen if bitmap_start_sync goes beyond
58 * End-of-device while looking for a whole page.
59 * It is harmless.
60 */
61 return -EINVAL;
62 }
63
64 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
65 return 0;
66
67 if (bitmap->bp[page].map) /* page is already allocated, just return */
68 return 0;
69
70 if (!create)
71 return -ENOENT;
72
73 /* this page has not been allocated yet */
74
75 spin_unlock_irq(&bitmap->lock);
76 /* It is possible that this is being called inside a
77 * prepare_to_wait/finish_wait loop from raid5c:make_request().
78 * In general it is not permitted to sleep in that context as it
79 * can cause the loop to spin freely.
80 * That doesn't apply here as we can only reach this point
81 * once with any loop.
82 * When this function completes, either bp[page].map or
83 * bp[page].hijacked. In either case, this function will
84 * abort before getting to this point again. So there is
85 * no risk of a free-spin, and so it is safe to assert
86 * that sleeping here is allowed.
87 */
88 sched_annotate_sleep();
89 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
90 spin_lock_irq(&bitmap->lock);
91
92 if (mappage == NULL) {
93 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
94 /* We don't support hijack for cluster raid */
95 if (no_hijack)
96 return -ENOMEM;
97 /* failed - set the hijacked flag so that we can use the
98 * pointer as a counter */
99 if (!bitmap->bp[page].map)
100 bitmap->bp[page].hijacked = 1;
101 } else if (bitmap->bp[page].map ||
102 bitmap->bp[page].hijacked) {
103 /* somebody beat us to getting the page */
104 kfree(mappage);
105 } else {
106
107 /* no page was in place and we have one, so install it */
108
109 bitmap->bp[page].map = mappage;
110 bitmap->missing_pages--;
111 }
112 return 0;
113 }
114
115 /* if page is completely empty, put it back on the free list, or dealloc it */
116 /* if page was hijacked, unmark the flag so it might get alloced next time */
117 /* Note: lock should be held when calling this */
118 static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
119 {
120 char *ptr;
121
122 if (bitmap->bp[page].count) /* page is still busy */
123 return;
124
125 /* page is no longer in use, it can be released */
126
127 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
128 bitmap->bp[page].hijacked = 0;
129 bitmap->bp[page].map = NULL;
130 } else {
131 /* normal case, free the page */
132 ptr = bitmap->bp[page].map;
133 bitmap->bp[page].map = NULL;
134 bitmap->missing_pages++;
135 kfree(ptr);
136 }
137 }
138
139 /*
140 * bitmap file handling - read and write the bitmap file and its superblock
141 */
142
143 /*
144 * basic page I/O operations
145 */
146
147 /* IO operations when bitmap is stored near all superblocks */
148 static int read_sb_page(struct mddev *mddev, loff_t offset,
149 struct page *page,
150 unsigned long index, int size)
151 {
152 /* choose a good rdev and read the page from there */
153
154 struct md_rdev *rdev;
155 sector_t target;
156
157 rdev_for_each(rdev, mddev) {
158 if (! test_bit(In_sync, &rdev->flags)
159 || test_bit(Faulty, &rdev->flags)
160 || test_bit(Bitmap_sync, &rdev->flags))
161 continue;
162
163 target = offset + index * (PAGE_SIZE/512);
164
165 if (sync_page_io(rdev, target,
166 roundup(size, bdev_logical_block_size(rdev->bdev)),
167 page, REQ_OP_READ, 0, true)) {
168 page->index = index;
169 return 0;
170 }
171 }
172 return -EIO;
173 }
174
175 static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
176 {
177 /* Iterate the disks of an mddev, using rcu to protect access to the
178 * linked list, and raising the refcount of devices we return to ensure
179 * they don't disappear while in use.
180 * As devices are only added or removed when raid_disk is < 0 and
181 * nr_pending is 0 and In_sync is clear, the entries we return will
182 * still be in the same position on the list when we re-enter
183 * list_for_each_entry_continue_rcu.
184 *
185 * Note that if entered with 'rdev == NULL' to start at the
186 * beginning, we temporarily assign 'rdev' to an address which
187 * isn't really an rdev, but which can be used by
188 * list_for_each_entry_continue_rcu() to find the first entry.
189 */
190 rcu_read_lock();
191 if (rdev == NULL)
192 /* start at the beginning */
193 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
194 else {
195 /* release the previous rdev and start from there. */
196 rdev_dec_pending(rdev, mddev);
197 }
198 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
199 if (rdev->raid_disk >= 0 &&
200 !test_bit(Faulty, &rdev->flags)) {
201 /* this is a usable devices */
202 atomic_inc(&rdev->nr_pending);
203 rcu_read_unlock();
204 return rdev;
205 }
206 }
207 rcu_read_unlock();
208 return NULL;
209 }
210
211 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
212 {
213 struct md_rdev *rdev;
214 struct block_device *bdev;
215 struct mddev *mddev = bitmap->mddev;
216 struct bitmap_storage *store = &bitmap->storage;
217
218 restart:
219 rdev = NULL;
220 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
221 int size = PAGE_SIZE;
222 loff_t offset = mddev->bitmap_info.offset;
223
224 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
225
226 if (page->index == store->file_pages-1) {
227 int last_page_size = store->bytes & (PAGE_SIZE-1);
228 if (last_page_size == 0)
229 last_page_size = PAGE_SIZE;
230 size = roundup(last_page_size,
231 bdev_logical_block_size(bdev));
232 }
233 /* Just make sure we aren't corrupting data or
234 * metadata
235 */
236 if (mddev->external) {
237 /* Bitmap could be anywhere. */
238 if (rdev->sb_start + offset + (page->index
239 * (PAGE_SIZE/512))
240 > rdev->data_offset
241 &&
242 rdev->sb_start + offset
243 < (rdev->data_offset + mddev->dev_sectors
244 + (PAGE_SIZE/512)))
245 goto bad_alignment;
246 } else if (offset < 0) {
247 /* DATA BITMAP METADATA */
248 if (offset
249 + (long)(page->index * (PAGE_SIZE/512))
250 + size/512 > 0)
251 /* bitmap runs in to metadata */
252 goto bad_alignment;
253 if (rdev->data_offset + mddev->dev_sectors
254 > rdev->sb_start + offset)
255 /* data runs in to bitmap */
256 goto bad_alignment;
257 } else if (rdev->sb_start < rdev->data_offset) {
258 /* METADATA BITMAP DATA */
259 if (rdev->sb_start
260 + offset
261 + page->index*(PAGE_SIZE/512) + size/512
262 > rdev->data_offset)
263 /* bitmap runs in to data */
264 goto bad_alignment;
265 } else {
266 /* DATA METADATA BITMAP - no problems */
267 }
268 md_super_write(mddev, rdev,
269 rdev->sb_start + offset
270 + page->index * (PAGE_SIZE/512),
271 size,
272 page);
273 }
274
275 if (wait && md_super_wait(mddev) < 0)
276 goto restart;
277 return 0;
278
279 bad_alignment:
280 return -EINVAL;
281 }
282
283 static void md_bitmap_file_kick(struct bitmap *bitmap);
284 /*
285 * write out a page to a file
286 */
287 static void write_page(struct bitmap *bitmap, struct page *page, int wait)
288 {
289 struct buffer_head *bh;
290
291 if (bitmap->storage.file == NULL) {
292 switch (write_sb_page(bitmap, page, wait)) {
293 case -EINVAL:
294 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
295 }
296 } else {
297
298 bh = page_buffers(page);
299
300 while (bh && bh->b_blocknr) {
301 atomic_inc(&bitmap->pending_writes);
302 set_buffer_locked(bh);
303 set_buffer_mapped(bh);
304 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
305 bh = bh->b_this_page;
306 }
307
308 if (wait)
309 wait_event(bitmap->write_wait,
310 atomic_read(&bitmap->pending_writes)==0);
311 }
312 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
313 md_bitmap_file_kick(bitmap);
314 }
315
316 static void end_bitmap_write(struct buffer_head *bh, int uptodate)
317 {
318 struct bitmap *bitmap = bh->b_private;
319
320 if (!uptodate)
321 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
322 if (atomic_dec_and_test(&bitmap->pending_writes))
323 wake_up(&bitmap->write_wait);
324 }
325
326 /* copied from buffer.c */
327 static void
328 __clear_page_buffers(struct page *page)
329 {
330 ClearPagePrivate(page);
331 set_page_private(page, 0);
332 put_page(page);
333 }
334 static void free_buffers(struct page *page)
335 {
336 struct buffer_head *bh;
337
338 if (!PagePrivate(page))
339 return;
340
341 bh = page_buffers(page);
342 while (bh) {
343 struct buffer_head *next = bh->b_this_page;
344 free_buffer_head(bh);
345 bh = next;
346 }
347 __clear_page_buffers(page);
348 put_page(page);
349 }
350
351 /* read a page from a file.
352 * We both read the page, and attach buffers to the page to record the
353 * address of each block (using bmap). These addresses will be used
354 * to write the block later, completely bypassing the filesystem.
355 * This usage is similar to how swap files are handled, and allows us
356 * to write to a file with no concerns of memory allocation failing.
357 */
358 static int read_page(struct file *file, unsigned long index,
359 struct bitmap *bitmap,
360 unsigned long count,
361 struct page *page)
362 {
363 int ret = 0;
364 struct inode *inode = file_inode(file);
365 struct buffer_head *bh;
366 sector_t block;
367
368 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
369 (unsigned long long)index << PAGE_SHIFT);
370
371 bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
372 if (!bh) {
373 ret = -ENOMEM;
374 goto out;
375 }
376 attach_page_buffers(page, bh);
377 block = index << (PAGE_SHIFT - inode->i_blkbits);
378 while (bh) {
379 if (count == 0)
380 bh->b_blocknr = 0;
381 else {
382 bh->b_blocknr = bmap(inode, block);
383 if (bh->b_blocknr == 0) {
384 /* Cannot use this file! */
385 ret = -EINVAL;
386 goto out;
387 }
388 bh->b_bdev = inode->i_sb->s_bdev;
389 if (count < (1<<inode->i_blkbits))
390 count = 0;
391 else
392 count -= (1<<inode->i_blkbits);
393
394 bh->b_end_io = end_bitmap_write;
395 bh->b_private = bitmap;
396 atomic_inc(&bitmap->pending_writes);
397 set_buffer_locked(bh);
398 set_buffer_mapped(bh);
399 submit_bh(REQ_OP_READ, 0, bh);
400 }
401 block++;
402 bh = bh->b_this_page;
403 }
404 page->index = index;
405
406 wait_event(bitmap->write_wait,
407 atomic_read(&bitmap->pending_writes)==0);
408 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
409 ret = -EIO;
410 out:
411 if (ret)
412 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
413 (int)PAGE_SIZE,
414 (unsigned long long)index << PAGE_SHIFT,
415 ret);
416 return ret;
417 }
418
419 /*
420 * bitmap file superblock operations
421 */
422
423 /*
424 * md_bitmap_wait_writes() should be called before writing any bitmap
425 * blocks, to ensure previous writes, particularly from
426 * md_bitmap_daemon_work(), have completed.
427 */
428 static void md_bitmap_wait_writes(struct bitmap *bitmap)
429 {
430 if (bitmap->storage.file)
431 wait_event(bitmap->write_wait,
432 atomic_read(&bitmap->pending_writes)==0);
433 else
434 /* Note that we ignore the return value. The writes
435 * might have failed, but that would just mean that
436 * some bits which should be cleared haven't been,
437 * which is safe. The relevant bitmap blocks will
438 * probably get written again, but there is no great
439 * loss if they aren't.
440 */
441 md_super_wait(bitmap->mddev);
442 }
443
444
445 /* update the event counter and sync the superblock to disk */
446 void md_bitmap_update_sb(struct bitmap *bitmap)
447 {
448 bitmap_super_t *sb;
449
450 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
451 return;
452 if (bitmap->mddev->bitmap_info.external)
453 return;
454 if (!bitmap->storage.sb_page) /* no superblock */
455 return;
456 sb = kmap_atomic(bitmap->storage.sb_page);
457 sb->events = cpu_to_le64(bitmap->mddev->events);
458 if (bitmap->mddev->events < bitmap->events_cleared)
459 /* rocking back to read-only */
460 bitmap->events_cleared = bitmap->mddev->events;
461 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
462 /*
463 * clear BITMAP_WRITE_ERROR bit to protect against the case that
464 * a bitmap write error occurred but the later writes succeeded.
465 */
466 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
467 /* Just in case these have been changed via sysfs: */
468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
470 /* This might have been changed by a reshape */
471 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
472 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
473 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
474 sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
475 bitmap_info.space);
476 kunmap_atomic(sb);
477 write_page(bitmap, bitmap->storage.sb_page, 1);
478 }
479 EXPORT_SYMBOL(md_bitmap_update_sb);
480
481 /* print out the bitmap file superblock */
482 void md_bitmap_print_sb(struct bitmap *bitmap)
483 {
484 bitmap_super_t *sb;
485
486 if (!bitmap || !bitmap->storage.sb_page)
487 return;
488 sb = kmap_atomic(bitmap->storage.sb_page);
489 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
490 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
491 pr_debug(" version: %d\n", le32_to_cpu(sb->version));
492 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
493 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
494 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
495 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
496 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
497 pr_debug(" events: %llu\n",
498 (unsigned long long) le64_to_cpu(sb->events));
499 pr_debug("events cleared: %llu\n",
500 (unsigned long long) le64_to_cpu(sb->events_cleared));
501 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
502 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
503 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
504 pr_debug(" sync size: %llu KB\n",
505 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
506 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
507 kunmap_atomic(sb);
508 }
509
510 /*
511 * bitmap_new_disk_sb
512 * @bitmap
513 *
514 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
515 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
516 * This function verifies 'bitmap_info' and populates the on-disk bitmap
517 * structure, which is to be written to disk.
518 *
519 * Returns: 0 on success, -Exxx on error
520 */
521 static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
522 {
523 bitmap_super_t *sb;
524 unsigned long chunksize, daemon_sleep, write_behind;
525
526 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
527 if (bitmap->storage.sb_page == NULL)
528 return -ENOMEM;
529 bitmap->storage.sb_page->index = 0;
530
531 sb = kmap_atomic(bitmap->storage.sb_page);
532
533 sb->magic = cpu_to_le32(BITMAP_MAGIC);
534 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
535
536 chunksize = bitmap->mddev->bitmap_info.chunksize;
537 BUG_ON(!chunksize);
538 if (!is_power_of_2(chunksize)) {
539 kunmap_atomic(sb);
540 pr_warn("bitmap chunksize not a power of 2\n");
541 return -EINVAL;
542 }
543 sb->chunksize = cpu_to_le32(chunksize);
544
545 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
546 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
547 pr_debug("Choosing daemon_sleep default (5 sec)\n");
548 daemon_sleep = 5 * HZ;
549 }
550 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
551 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
552
553 /*
554 * FIXME: write_behind for RAID1. If not specified, what
555 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
556 */
557 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
558 if (write_behind > COUNTER_MAX)
559 write_behind = COUNTER_MAX / 2;
560 sb->write_behind = cpu_to_le32(write_behind);
561 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
562
563 /* keep the array size field of the bitmap superblock up to date */
564 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
565
566 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
567
568 set_bit(BITMAP_STALE, &bitmap->flags);
569 sb->state = cpu_to_le32(bitmap->flags);
570 bitmap->events_cleared = bitmap->mddev->events;
571 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
572 bitmap->mddev->bitmap_info.nodes = 0;
573
574 kunmap_atomic(sb);
575
576 return 0;
577 }
578
579 /* read the superblock from the bitmap file and initialize some bitmap fields */
580 static int md_bitmap_read_sb(struct bitmap *bitmap)
581 {
582 char *reason = NULL;
583 bitmap_super_t *sb;
584 unsigned long chunksize, daemon_sleep, write_behind;
585 unsigned long long events;
586 int nodes = 0;
587 unsigned long sectors_reserved = 0;
588 int err = -EINVAL;
589 struct page *sb_page;
590 loff_t offset = bitmap->mddev->bitmap_info.offset;
591
592 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
593 chunksize = 128 * 1024 * 1024;
594 daemon_sleep = 5 * HZ;
595 write_behind = 0;
596 set_bit(BITMAP_STALE, &bitmap->flags);
597 err = 0;
598 goto out_no_sb;
599 }
600 /* page 0 is the superblock, read it... */
601 sb_page = alloc_page(GFP_KERNEL);
602 if (!sb_page)
603 return -ENOMEM;
604 bitmap->storage.sb_page = sb_page;
605
606 re_read:
607 /* If cluster_slot is set, the cluster is setup */
608 if (bitmap->cluster_slot >= 0) {
609 sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
610
611 sector_div(bm_blocks,
612 bitmap->mddev->bitmap_info.chunksize >> 9);
613 /* bits to bytes */
614 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
615 /* to 4k blocks */
616 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
617 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
618 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
619 bitmap->cluster_slot, offset);
620 }
621
622 if (bitmap->storage.file) {
623 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
624 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
625
626 err = read_page(bitmap->storage.file, 0,
627 bitmap, bytes, sb_page);
628 } else {
629 err = read_sb_page(bitmap->mddev,
630 offset,
631 sb_page,
632 0, sizeof(bitmap_super_t));
633 }
634 if (err)
635 return err;
636
637 err = -EINVAL;
638 sb = kmap_atomic(sb_page);
639
640 chunksize = le32_to_cpu(sb->chunksize);
641 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
642 write_behind = le32_to_cpu(sb->write_behind);
643 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
644 /* Setup nodes/clustername only if bitmap version is
645 * cluster-compatible
646 */
647 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
648 nodes = le32_to_cpu(sb->nodes);
649 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
650 sb->cluster_name, 64);
651 }
652
653 /* verify that the bitmap-specific fields are valid */
654 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
655 reason = "bad magic";
656 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
657 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
658 reason = "unrecognized superblock version";
659 else if (chunksize < 512)
660 reason = "bitmap chunksize too small";
661 else if (!is_power_of_2(chunksize))
662 reason = "bitmap chunksize not a power of 2";
663 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
664 reason = "daemon sleep period out of range";
665 else if (write_behind > COUNTER_MAX)
666 reason = "write-behind limit out of range (0 - 16383)";
667 if (reason) {
668 pr_warn("%s: invalid bitmap file superblock: %s\n",
669 bmname(bitmap), reason);
670 goto out;
671 }
672
673 /* keep the array size field of the bitmap superblock up to date */
674 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
675
676 if (bitmap->mddev->persistent) {
677 /*
678 * We have a persistent array superblock, so compare the
679 * bitmap's UUID and event counter to the mddev's
680 */
681 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
682 pr_warn("%s: bitmap superblock UUID mismatch\n",
683 bmname(bitmap));
684 goto out;
685 }
686 events = le64_to_cpu(sb->events);
687 if (!nodes && (events < bitmap->mddev->events)) {
688 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
689 bmname(bitmap), events,
690 (unsigned long long) bitmap->mddev->events);
691 set_bit(BITMAP_STALE, &bitmap->flags);
692 }
693 }
694
695 /* assign fields using values from superblock */
696 bitmap->flags |= le32_to_cpu(sb->state);
697 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
698 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
699 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
700 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
701 err = 0;
702
703 out:
704 kunmap_atomic(sb);
705 /* Assigning chunksize is required for "re_read" */
706 bitmap->mddev->bitmap_info.chunksize = chunksize;
707 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
708 err = md_setup_cluster(bitmap->mddev, nodes);
709 if (err) {
710 pr_warn("%s: Could not setup cluster service (%d)\n",
711 bmname(bitmap), err);
712 goto out_no_sb;
713 }
714 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
715 goto re_read;
716 }
717
718
719 out_no_sb:
720 if (test_bit(BITMAP_STALE, &bitmap->flags))
721 bitmap->events_cleared = bitmap->mddev->events;
722 bitmap->mddev->bitmap_info.chunksize = chunksize;
723 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
724 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
725 bitmap->mddev->bitmap_info.nodes = nodes;
726 if (bitmap->mddev->bitmap_info.space == 0 ||
727 bitmap->mddev->bitmap_info.space > sectors_reserved)
728 bitmap->mddev->bitmap_info.space = sectors_reserved;
729 if (err) {
730 md_bitmap_print_sb(bitmap);
731 if (bitmap->cluster_slot < 0)
732 md_cluster_stop(bitmap->mddev);
733 }
734 return err;
735 }
736
737 /*
738 * general bitmap file operations
739 */
740
741 /*
742 * on-disk bitmap:
743 *
744 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
745 * file a page at a time. There's a superblock at the start of the file.
746 */
747 /* calculate the index of the page that contains this bit */
748 static inline unsigned long file_page_index(struct bitmap_storage *store,
749 unsigned long chunk)
750 {
751 if (store->sb_page)
752 chunk += sizeof(bitmap_super_t) << 3;
753 return chunk >> PAGE_BIT_SHIFT;
754 }
755
756 /* calculate the (bit) offset of this bit within a page */
757 static inline unsigned long file_page_offset(struct bitmap_storage *store,
758 unsigned long chunk)
759 {
760 if (store->sb_page)
761 chunk += sizeof(bitmap_super_t) << 3;
762 return chunk & (PAGE_BITS - 1);
763 }
764
765 /*
766 * return a pointer to the page in the filemap that contains the given bit
767 *
768 */
769 static inline struct page *filemap_get_page(struct bitmap_storage *store,
770 unsigned long chunk)
771 {
772 if (file_page_index(store, chunk) >= store->file_pages)
773 return NULL;
774 return store->filemap[file_page_index(store, chunk)];
775 }
776
777 static int md_bitmap_storage_alloc(struct bitmap_storage *store,
778 unsigned long chunks, int with_super,
779 int slot_number)
780 {
781 int pnum, offset = 0;
782 unsigned long num_pages;
783 unsigned long bytes;
784
785 bytes = DIV_ROUND_UP(chunks, 8);
786 if (with_super)
787 bytes += sizeof(bitmap_super_t);
788
789 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
790 offset = slot_number * num_pages;
791
792 store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
793 GFP_KERNEL);
794 if (!store->filemap)
795 return -ENOMEM;
796
797 if (with_super && !store->sb_page) {
798 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
799 if (store->sb_page == NULL)
800 return -ENOMEM;
801 }
802
803 pnum = 0;
804 if (store->sb_page) {
805 store->filemap[0] = store->sb_page;
806 pnum = 1;
807 store->sb_page->index = offset;
808 }
809
810 for ( ; pnum < num_pages; pnum++) {
811 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
812 if (!store->filemap[pnum]) {
813 store->file_pages = pnum;
814 return -ENOMEM;
815 }
816 store->filemap[pnum]->index = pnum + offset;
817 }
818 store->file_pages = pnum;
819
820 /* We need 4 bits per page, rounded up to a multiple
821 * of sizeof(unsigned long) */
822 store->filemap_attr = kzalloc(
823 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
824 GFP_KERNEL);
825 if (!store->filemap_attr)
826 return -ENOMEM;
827
828 store->bytes = bytes;
829
830 return 0;
831 }
832
833 static void md_bitmap_file_unmap(struct bitmap_storage *store)
834 {
835 struct page **map, *sb_page;
836 int pages;
837 struct file *file;
838
839 file = store->file;
840 map = store->filemap;
841 pages = store->file_pages;
842 sb_page = store->sb_page;
843
844 while (pages--)
845 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
846 free_buffers(map[pages]);
847 kfree(map);
848 kfree(store->filemap_attr);
849
850 if (sb_page)
851 free_buffers(sb_page);
852
853 if (file) {
854 struct inode *inode = file_inode(file);
855 invalidate_mapping_pages(inode->i_mapping, 0, -1);
856 fput(file);
857 }
858 }
859
860 /*
861 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
862 * then it is no longer reliable, so we stop using it and we mark the file
863 * as failed in the superblock
864 */
865 static void md_bitmap_file_kick(struct bitmap *bitmap)
866 {
867 char *path, *ptr = NULL;
868
869 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
870 md_bitmap_update_sb(bitmap);
871
872 if (bitmap->storage.file) {
873 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
874 if (path)
875 ptr = file_path(bitmap->storage.file,
876 path, PAGE_SIZE);
877
878 pr_warn("%s: kicking failed bitmap file %s from array!\n",
879 bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
880
881 kfree(path);
882 } else
883 pr_warn("%s: disabling internal bitmap due to errors\n",
884 bmname(bitmap));
885 }
886 }
887
888 enum bitmap_page_attr {
889 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
890 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
891 * i.e. counter is 1 or 2. */
892 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
893 };
894
895 static inline void set_page_attr(struct bitmap *bitmap, int pnum,
896 enum bitmap_page_attr attr)
897 {
898 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
899 }
900
901 static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
902 enum bitmap_page_attr attr)
903 {
904 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
905 }
906
907 static inline int test_page_attr(struct bitmap *bitmap, int pnum,
908 enum bitmap_page_attr attr)
909 {
910 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
911 }
912
913 static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
914 enum bitmap_page_attr attr)
915 {
916 return test_and_clear_bit((pnum<<2) + attr,
917 bitmap->storage.filemap_attr);
918 }
919 /*
920 * bitmap_file_set_bit -- called before performing a write to the md device
921 * to set (and eventually sync) a particular bit in the bitmap file
922 *
923 * we set the bit immediately, then we record the page number so that
924 * when an unplug occurs, we can flush the dirty pages out to disk
925 */
926 static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
927 {
928 unsigned long bit;
929 struct page *page;
930 void *kaddr;
931 unsigned long chunk = block >> bitmap->counts.chunkshift;
932 struct bitmap_storage *store = &bitmap->storage;
933 unsigned long node_offset = 0;
934
935 if (mddev_is_clustered(bitmap->mddev))
936 node_offset = bitmap->cluster_slot * store->file_pages;
937
938 page = filemap_get_page(&bitmap->storage, chunk);
939 if (!page)
940 return;
941 bit = file_page_offset(&bitmap->storage, chunk);
942
943 /* set the bit */
944 kaddr = kmap_atomic(page);
945 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
946 set_bit(bit, kaddr);
947 else
948 set_bit_le(bit, kaddr);
949 kunmap_atomic(kaddr);
950 pr_debug("set file bit %lu page %lu\n", bit, page->index);
951 /* record page number so it gets flushed to disk when unplug occurs */
952 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
953 }
954
955 static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
956 {
957 unsigned long bit;
958 struct page *page;
959 void *paddr;
960 unsigned long chunk = block >> bitmap->counts.chunkshift;
961 struct bitmap_storage *store = &bitmap->storage;
962 unsigned long node_offset = 0;
963
964 if (mddev_is_clustered(bitmap->mddev))
965 node_offset = bitmap->cluster_slot * store->file_pages;
966
967 page = filemap_get_page(&bitmap->storage, chunk);
968 if (!page)
969 return;
970 bit = file_page_offset(&bitmap->storage, chunk);
971 paddr = kmap_atomic(page);
972 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
973 clear_bit(bit, paddr);
974 else
975 clear_bit_le(bit, paddr);
976 kunmap_atomic(paddr);
977 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
978 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
979 bitmap->allclean = 0;
980 }
981 }
982
983 static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
984 {
985 unsigned long bit;
986 struct page *page;
987 void *paddr;
988 unsigned long chunk = block >> bitmap->counts.chunkshift;
989 int set = 0;
990
991 page = filemap_get_page(&bitmap->storage, chunk);
992 if (!page)
993 return -EINVAL;
994 bit = file_page_offset(&bitmap->storage, chunk);
995 paddr = kmap_atomic(page);
996 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
997 set = test_bit(bit, paddr);
998 else
999 set = test_bit_le(bit, paddr);
1000 kunmap_atomic(paddr);
1001 return set;
1002 }
1003
1004
1005 /* this gets called when the md device is ready to unplug its underlying
1006 * (slave) device queues -- before we let any writes go down, we need to
1007 * sync the dirty pages of the bitmap file to disk */
1008 void md_bitmap_unplug(struct bitmap *bitmap)
1009 {
1010 unsigned long i;
1011 int dirty, need_write;
1012 int writing = 0;
1013
1014 if (!bitmap || !bitmap->storage.filemap ||
1015 test_bit(BITMAP_STALE, &bitmap->flags))
1016 return;
1017
1018 /* look at each page to see if there are any set bits that need to be
1019 * flushed out to disk */
1020 for (i = 0; i < bitmap->storage.file_pages; i++) {
1021 if (!bitmap->storage.filemap)
1022 return;
1023 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1024 need_write = test_and_clear_page_attr(bitmap, i,
1025 BITMAP_PAGE_NEEDWRITE);
1026 if (dirty || need_write) {
1027 if (!writing) {
1028 md_bitmap_wait_writes(bitmap);
1029 if (bitmap->mddev->queue)
1030 blk_add_trace_msg(bitmap->mddev->queue,
1031 "md bitmap_unplug");
1032 }
1033 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
1034 write_page(bitmap, bitmap->storage.filemap[i], 0);
1035 writing = 1;
1036 }
1037 }
1038 if (writing)
1039 md_bitmap_wait_writes(bitmap);
1040
1041 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1042 md_bitmap_file_kick(bitmap);
1043 }
1044 EXPORT_SYMBOL(md_bitmap_unplug);
1045
1046 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1047 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1048 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1049 * memory mapping of the bitmap file
1050 * Special cases:
1051 * if there's no bitmap file, or if the bitmap file had been
1052 * previously kicked from the array, we mark all the bits as
1053 * 1's in order to cause a full resync.
1054 *
1055 * We ignore all bits for sectors that end earlier than 'start'.
1056 * This is used when reading an out-of-date bitmap...
1057 */
1058 static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1059 {
1060 unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1061 struct page *page = NULL;
1062 unsigned long bit_cnt = 0;
1063 struct file *file;
1064 unsigned long offset;
1065 int outofdate;
1066 int ret = -ENOSPC;
1067 void *paddr;
1068 struct bitmap_storage *store = &bitmap->storage;
1069
1070 chunks = bitmap->counts.chunks;
1071 file = store->file;
1072
1073 if (!file && !bitmap->mddev->bitmap_info.offset) {
1074 /* No permanent bitmap - fill with '1s'. */
1075 store->filemap = NULL;
1076 store->file_pages = 0;
1077 for (i = 0; i < chunks ; i++) {
1078 /* if the disk bit is set, set the memory bit */
1079 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1080 >= start);
1081 md_bitmap_set_memory_bits(bitmap,
1082 (sector_t)i << bitmap->counts.chunkshift,
1083 needed);
1084 }
1085 return 0;
1086 }
1087
1088 outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
1089 if (outofdate)
1090 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
1091
1092 if (file && i_size_read(file->f_mapping->host) < store->bytes) {
1093 pr_warn("%s: bitmap file too short %lu < %lu\n",
1094 bmname(bitmap),
1095 (unsigned long) i_size_read(file->f_mapping->host),
1096 store->bytes);
1097 goto err;
1098 }
1099
1100 oldindex = ~0L;
1101 offset = 0;
1102 if (!bitmap->mddev->bitmap_info.external)
1103 offset = sizeof(bitmap_super_t);
1104
1105 if (mddev_is_clustered(bitmap->mddev))
1106 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1107
1108 for (i = 0; i < chunks; i++) {
1109 int b;
1110 index = file_page_index(&bitmap->storage, i);
1111 bit = file_page_offset(&bitmap->storage, i);
1112 if (index != oldindex) { /* this is a new page, read it in */
1113 int count;
1114 /* unmap the old page, we're done with it */
1115 if (index == store->file_pages-1)
1116 count = store->bytes - index * PAGE_SIZE;
1117 else
1118 count = PAGE_SIZE;
1119 page = store->filemap[index];
1120 if (file)
1121 ret = read_page(file, index, bitmap,
1122 count, page);
1123 else
1124 ret = read_sb_page(
1125 bitmap->mddev,
1126 bitmap->mddev->bitmap_info.offset,
1127 page,
1128 index + node_offset, count);
1129
1130 if (ret)
1131 goto err;
1132
1133 oldindex = index;
1134
1135 if (outofdate) {
1136 /*
1137 * if bitmap is out of date, dirty the
1138 * whole page and write it out
1139 */
1140 paddr = kmap_atomic(page);
1141 memset(paddr + offset, 0xff,
1142 PAGE_SIZE - offset);
1143 kunmap_atomic(paddr);
1144 write_page(bitmap, page, 1);
1145
1146 ret = -EIO;
1147 if (test_bit(BITMAP_WRITE_ERROR,
1148 &bitmap->flags))
1149 goto err;
1150 }
1151 }
1152 paddr = kmap_atomic(page);
1153 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
1154 b = test_bit(bit, paddr);
1155 else
1156 b = test_bit_le(bit, paddr);
1157 kunmap_atomic(paddr);
1158 if (b) {
1159 /* if the disk bit is set, set the memory bit */
1160 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1161 >= start);
1162 md_bitmap_set_memory_bits(bitmap,
1163 (sector_t)i << bitmap->counts.chunkshift,
1164 needed);
1165 bit_cnt++;
1166 }
1167 offset = 0;
1168 }
1169
1170 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1171 bmname(bitmap), store->file_pages,
1172 bit_cnt, chunks);
1173
1174 return 0;
1175
1176 err:
1177 pr_warn("%s: bitmap initialisation failed: %d\n",
1178 bmname(bitmap), ret);
1179 return ret;
1180 }
1181
1182 void md_bitmap_write_all(struct bitmap *bitmap)
1183 {
1184 /* We don't actually write all bitmap blocks here,
1185 * just flag them as needing to be written
1186 */
1187 int i;
1188
1189 if (!bitmap || !bitmap->storage.filemap)
1190 return;
1191 if (bitmap->storage.file)
1192 /* Only one copy, so nothing needed */
1193 return;
1194
1195 for (i = 0; i < bitmap->storage.file_pages; i++)
1196 set_page_attr(bitmap, i,
1197 BITMAP_PAGE_NEEDWRITE);
1198 bitmap->allclean = 0;
1199 }
1200
1201 static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1202 sector_t offset, int inc)
1203 {
1204 sector_t chunk = offset >> bitmap->chunkshift;
1205 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1206 bitmap->bp[page].count += inc;
1207 md_bitmap_checkfree(bitmap, page);
1208 }
1209
1210 static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1211 {
1212 sector_t chunk = offset >> bitmap->chunkshift;
1213 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1214 struct bitmap_page *bp = &bitmap->bp[page];
1215
1216 if (!bp->pending)
1217 bp->pending = 1;
1218 }
1219
1220 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1221 sector_t offset, sector_t *blocks,
1222 int create);
1223
1224 /*
1225 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1226 * out to disk
1227 */
1228
1229 void md_bitmap_daemon_work(struct mddev *mddev)
1230 {
1231 struct bitmap *bitmap;
1232 unsigned long j;
1233 unsigned long nextpage;
1234 sector_t blocks;
1235 struct bitmap_counts *counts;
1236
1237 /* Use a mutex to guard daemon_work against
1238 * bitmap_destroy.
1239 */
1240 mutex_lock(&mddev->bitmap_info.mutex);
1241 bitmap = mddev->bitmap;
1242 if (bitmap == NULL) {
1243 mutex_unlock(&mddev->bitmap_info.mutex);
1244 return;
1245 }
1246 if (time_before(jiffies, bitmap->daemon_lastrun
1247 + mddev->bitmap_info.daemon_sleep))
1248 goto done;
1249
1250 bitmap->daemon_lastrun = jiffies;
1251 if (bitmap->allclean) {
1252 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1253 goto done;
1254 }
1255 bitmap->allclean = 1;
1256
1257 if (bitmap->mddev->queue)
1258 blk_add_trace_msg(bitmap->mddev->queue,
1259 "md bitmap_daemon_work");
1260
1261 /* Any file-page which is PENDING now needs to be written.
1262 * So set NEEDWRITE now, then after we make any last-minute changes
1263 * we will write it.
1264 */
1265 for (j = 0; j < bitmap->storage.file_pages; j++)
1266 if (test_and_clear_page_attr(bitmap, j,
1267 BITMAP_PAGE_PENDING))
1268 set_page_attr(bitmap, j,
1269 BITMAP_PAGE_NEEDWRITE);
1270
1271 if (bitmap->need_sync &&
1272 mddev->bitmap_info.external == 0) {
1273 /* Arrange for superblock update as well as
1274 * other changes */
1275 bitmap_super_t *sb;
1276 bitmap->need_sync = 0;
1277 if (bitmap->storage.filemap) {
1278 sb = kmap_atomic(bitmap->storage.sb_page);
1279 sb->events_cleared =
1280 cpu_to_le64(bitmap->events_cleared);
1281 kunmap_atomic(sb);
1282 set_page_attr(bitmap, 0,
1283 BITMAP_PAGE_NEEDWRITE);
1284 }
1285 }
1286 /* Now look at the bitmap counters and if any are '2' or '1',
1287 * decrement and handle accordingly.
1288 */
1289 counts = &bitmap->counts;
1290 spin_lock_irq(&counts->lock);
1291 nextpage = 0;
1292 for (j = 0; j < counts->chunks; j++) {
1293 bitmap_counter_t *bmc;
1294 sector_t block = (sector_t)j << counts->chunkshift;
1295
1296 if (j == nextpage) {
1297 nextpage += PAGE_COUNTER_RATIO;
1298 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
1299 j |= PAGE_COUNTER_MASK;
1300 continue;
1301 }
1302 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1303 }
1304
1305 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1306 if (!bmc) {
1307 j |= PAGE_COUNTER_MASK;
1308 continue;
1309 }
1310 if (*bmc == 1 && !bitmap->need_sync) {
1311 /* We can clear the bit */
1312 *bmc = 0;
1313 md_bitmap_count_page(counts, block, -1);
1314 md_bitmap_file_clear_bit(bitmap, block);
1315 } else if (*bmc && *bmc <= 2) {
1316 *bmc = 1;
1317 md_bitmap_set_pending(counts, block);
1318 bitmap->allclean = 0;
1319 }
1320 }
1321 spin_unlock_irq(&counts->lock);
1322
1323 md_bitmap_wait_writes(bitmap);
1324 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1325 * DIRTY pages need to be written by bitmap_unplug so it can wait
1326 * for them.
1327 * If we find any DIRTY page we stop there and let bitmap_unplug
1328 * handle all the rest. This is important in the case where
1329 * the first blocking holds the superblock and it has been updated.
1330 * We mustn't write any other blocks before the superblock.
1331 */
1332 for (j = 0;
1333 j < bitmap->storage.file_pages
1334 && !test_bit(BITMAP_STALE, &bitmap->flags);
1335 j++) {
1336 if (test_page_attr(bitmap, j,
1337 BITMAP_PAGE_DIRTY))
1338 /* bitmap_unplug will handle the rest */
1339 break;
1340 if (test_and_clear_page_attr(bitmap, j,
1341 BITMAP_PAGE_NEEDWRITE)) {
1342 write_page(bitmap, bitmap->storage.filemap[j], 0);
1343 }
1344 }
1345
1346 done:
1347 if (bitmap->allclean == 0)
1348 mddev->thread->timeout =
1349 mddev->bitmap_info.daemon_sleep;
1350 mutex_unlock(&mddev->bitmap_info.mutex);
1351 }
1352
1353 static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1354 sector_t offset, sector_t *blocks,
1355 int create)
1356 __releases(bitmap->lock)
1357 __acquires(bitmap->lock)
1358 {
1359 /* If 'create', we might release the lock and reclaim it.
1360 * The lock must have been taken with interrupts enabled.
1361 * If !create, we don't release the lock.
1362 */
1363 sector_t chunk = offset >> bitmap->chunkshift;
1364 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1365 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1366 sector_t csize;
1367 int err;
1368
1369 err = md_bitmap_checkpage(bitmap, page, create, 0);
1370
1371 if (bitmap->bp[page].hijacked ||
1372 bitmap->bp[page].map == NULL)
1373 csize = ((sector_t)1) << (bitmap->chunkshift +
1374 PAGE_COUNTER_SHIFT - 1);
1375 else
1376 csize = ((sector_t)1) << bitmap->chunkshift;
1377 *blocks = csize - (offset & (csize - 1));
1378
1379 if (err < 0)
1380 return NULL;
1381
1382 /* now locked ... */
1383
1384 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1385 /* should we use the first or second counter field
1386 * of the hijacked pointer? */
1387 int hi = (pageoff > PAGE_COUNTER_MASK);
1388 return &((bitmap_counter_t *)
1389 &bitmap->bp[page].map)[hi];
1390 } else /* page is allocated */
1391 return (bitmap_counter_t *)
1392 &(bitmap->bp[page].map[pageoff]);
1393 }
1394
1395 int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1396 {
1397 if (!bitmap)
1398 return 0;
1399
1400 if (behind) {
1401 int bw;
1402 atomic_inc(&bitmap->behind_writes);
1403 bw = atomic_read(&bitmap->behind_writes);
1404 if (bw > bitmap->behind_writes_used)
1405 bitmap->behind_writes_used = bw;
1406
1407 pr_debug("inc write-behind count %d/%lu\n",
1408 bw, bitmap->mddev->bitmap_info.max_write_behind);
1409 }
1410
1411 while (sectors) {
1412 sector_t blocks;
1413 bitmap_counter_t *bmc;
1414
1415 spin_lock_irq(&bitmap->counts.lock);
1416 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1417 if (!bmc) {
1418 spin_unlock_irq(&bitmap->counts.lock);
1419 return 0;
1420 }
1421
1422 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1423 DEFINE_WAIT(__wait);
1424 /* note that it is safe to do the prepare_to_wait
1425 * after the test as long as we do it before dropping
1426 * the spinlock.
1427 */
1428 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1429 TASK_UNINTERRUPTIBLE);
1430 spin_unlock_irq(&bitmap->counts.lock);
1431 schedule();
1432 finish_wait(&bitmap->overflow_wait, &__wait);
1433 continue;
1434 }
1435
1436 switch (*bmc) {
1437 case 0:
1438 md_bitmap_file_set_bit(bitmap, offset);
1439 md_bitmap_count_page(&bitmap->counts, offset, 1);
1440 /* fall through */
1441 case 1:
1442 *bmc = 2;
1443 }
1444
1445 (*bmc)++;
1446
1447 spin_unlock_irq(&bitmap->counts.lock);
1448
1449 offset += blocks;
1450 if (sectors > blocks)
1451 sectors -= blocks;
1452 else
1453 sectors = 0;
1454 }
1455 return 0;
1456 }
1457 EXPORT_SYMBOL(md_bitmap_startwrite);
1458
1459 void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1460 unsigned long sectors, int success, int behind)
1461 {
1462 if (!bitmap)
1463 return;
1464 if (behind) {
1465 if (atomic_dec_and_test(&bitmap->behind_writes))
1466 wake_up(&bitmap->behind_wait);
1467 pr_debug("dec write-behind count %d/%lu\n",
1468 atomic_read(&bitmap->behind_writes),
1469 bitmap->mddev->bitmap_info.max_write_behind);
1470 }
1471
1472 while (sectors) {
1473 sector_t blocks;
1474 unsigned long flags;
1475 bitmap_counter_t *bmc;
1476
1477 spin_lock_irqsave(&bitmap->counts.lock, flags);
1478 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1479 if (!bmc) {
1480 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1481 return;
1482 }
1483
1484 if (success && !bitmap->mddev->degraded &&
1485 bitmap->events_cleared < bitmap->mddev->events) {
1486 bitmap->events_cleared = bitmap->mddev->events;
1487 bitmap->need_sync = 1;
1488 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1489 }
1490
1491 if (!success && !NEEDED(*bmc))
1492 *bmc |= NEEDED_MASK;
1493
1494 if (COUNTER(*bmc) == COUNTER_MAX)
1495 wake_up(&bitmap->overflow_wait);
1496
1497 (*bmc)--;
1498 if (*bmc <= 2) {
1499 md_bitmap_set_pending(&bitmap->counts, offset);
1500 bitmap->allclean = 0;
1501 }
1502 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1503 offset += blocks;
1504 if (sectors > blocks)
1505 sectors -= blocks;
1506 else
1507 sectors = 0;
1508 }
1509 }
1510 EXPORT_SYMBOL(md_bitmap_endwrite);
1511
1512 static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1513 int degraded)
1514 {
1515 bitmap_counter_t *bmc;
1516 int rv;
1517 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1518 *blocks = 1024;
1519 return 1; /* always resync if no bitmap */
1520 }
1521 spin_lock_irq(&bitmap->counts.lock);
1522 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1523 rv = 0;
1524 if (bmc) {
1525 /* locked */
1526 if (RESYNC(*bmc))
1527 rv = 1;
1528 else if (NEEDED(*bmc)) {
1529 rv = 1;
1530 if (!degraded) { /* don't set/clear bits if degraded */
1531 *bmc |= RESYNC_MASK;
1532 *bmc &= ~NEEDED_MASK;
1533 }
1534 }
1535 }
1536 spin_unlock_irq(&bitmap->counts.lock);
1537 return rv;
1538 }
1539
1540 int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1541 int degraded)
1542 {
1543 /* bitmap_start_sync must always report on multiples of whole
1544 * pages, otherwise resync (which is very PAGE_SIZE based) will
1545 * get confused.
1546 * So call __bitmap_start_sync repeatedly (if needed) until
1547 * At least PAGE_SIZE>>9 blocks are covered.
1548 * Return the 'or' of the result.
1549 */
1550 int rv = 0;
1551 sector_t blocks1;
1552
1553 *blocks = 0;
1554 while (*blocks < (PAGE_SIZE>>9)) {
1555 rv |= __bitmap_start_sync(bitmap, offset,
1556 &blocks1, degraded);
1557 offset += blocks1;
1558 *blocks += blocks1;
1559 }
1560 return rv;
1561 }
1562 EXPORT_SYMBOL(md_bitmap_start_sync);
1563
1564 void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1565 {
1566 bitmap_counter_t *bmc;
1567 unsigned long flags;
1568
1569 if (bitmap == NULL) {
1570 *blocks = 1024;
1571 return;
1572 }
1573 spin_lock_irqsave(&bitmap->counts.lock, flags);
1574 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1575 if (bmc == NULL)
1576 goto unlock;
1577 /* locked */
1578 if (RESYNC(*bmc)) {
1579 *bmc &= ~RESYNC_MASK;
1580
1581 if (!NEEDED(*bmc) && aborted)
1582 *bmc |= NEEDED_MASK;
1583 else {
1584 if (*bmc <= 2) {
1585 md_bitmap_set_pending(&bitmap->counts, offset);
1586 bitmap->allclean = 0;
1587 }
1588 }
1589 }
1590 unlock:
1591 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1592 }
1593 EXPORT_SYMBOL(md_bitmap_end_sync);
1594
1595 void md_bitmap_close_sync(struct bitmap *bitmap)
1596 {
1597 /* Sync has finished, and any bitmap chunks that weren't synced
1598 * properly have been aborted. It remains to us to clear the
1599 * RESYNC bit wherever it is still on
1600 */
1601 sector_t sector = 0;
1602 sector_t blocks;
1603 if (!bitmap)
1604 return;
1605 while (sector < bitmap->mddev->resync_max_sectors) {
1606 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1607 sector += blocks;
1608 }
1609 }
1610 EXPORT_SYMBOL(md_bitmap_close_sync);
1611
1612 void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1613 {
1614 sector_t s = 0;
1615 sector_t blocks;
1616
1617 if (!bitmap)
1618 return;
1619 if (sector == 0) {
1620 bitmap->last_end_sync = jiffies;
1621 return;
1622 }
1623 if (!force && time_before(jiffies, (bitmap->last_end_sync
1624 + bitmap->mddev->bitmap_info.daemon_sleep)))
1625 return;
1626 wait_event(bitmap->mddev->recovery_wait,
1627 atomic_read(&bitmap->mddev->recovery_active) == 0);
1628
1629 bitmap->mddev->curr_resync_completed = sector;
1630 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
1631 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1632 s = 0;
1633 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1634 md_bitmap_end_sync(bitmap, s, &blocks, 0);
1635 s += blocks;
1636 }
1637 bitmap->last_end_sync = jiffies;
1638 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1639 }
1640 EXPORT_SYMBOL(md_bitmap_cond_end_sync);
1641
1642 void md_bitmap_sync_with_cluster(struct mddev *mddev,
1643 sector_t old_lo, sector_t old_hi,
1644 sector_t new_lo, sector_t new_hi)
1645 {
1646 struct bitmap *bitmap = mddev->bitmap;
1647 sector_t sector, blocks = 0;
1648
1649 for (sector = old_lo; sector < new_lo; ) {
1650 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1651 sector += blocks;
1652 }
1653 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1654
1655 for (sector = old_hi; sector < new_hi; ) {
1656 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1657 sector += blocks;
1658 }
1659 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1660 }
1661 EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
1662
1663 static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1664 {
1665 /* For each chunk covered by any of these sectors, set the
1666 * counter to 2 and possibly set resync_needed. They should all
1667 * be 0 at this point
1668 */
1669
1670 sector_t secs;
1671 bitmap_counter_t *bmc;
1672 spin_lock_irq(&bitmap->counts.lock);
1673 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1674 if (!bmc) {
1675 spin_unlock_irq(&bitmap->counts.lock);
1676 return;
1677 }
1678 if (!*bmc) {
1679 *bmc = 2;
1680 md_bitmap_count_page(&bitmap->counts, offset, 1);
1681 md_bitmap_set_pending(&bitmap->counts, offset);
1682 bitmap->allclean = 0;
1683 }
1684 if (needed)
1685 *bmc |= NEEDED_MASK;
1686 spin_unlock_irq(&bitmap->counts.lock);
1687 }
1688
1689 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1690 void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1691 {
1692 unsigned long chunk;
1693
1694 for (chunk = s; chunk <= e; chunk++) {
1695 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1696 md_bitmap_set_memory_bits(bitmap, sec, 1);
1697 md_bitmap_file_set_bit(bitmap, sec);
1698 if (sec < bitmap->mddev->recovery_cp)
1699 /* We are asserting that the array is dirty,
1700 * so move the recovery_cp address back so
1701 * that it is obvious that it is dirty
1702 */
1703 bitmap->mddev->recovery_cp = sec;
1704 }
1705 }
1706
1707 /*
1708 * flush out any pending updates
1709 */
1710 void md_bitmap_flush(struct mddev *mddev)
1711 {
1712 struct bitmap *bitmap = mddev->bitmap;
1713 long sleep;
1714
1715 if (!bitmap) /* there was no bitmap */
1716 return;
1717
1718 /* run the daemon_work three time to ensure everything is flushed
1719 * that can be
1720 */
1721 sleep = mddev->bitmap_info.daemon_sleep * 2;
1722 bitmap->daemon_lastrun -= sleep;
1723 md_bitmap_daemon_work(mddev);
1724 bitmap->daemon_lastrun -= sleep;
1725 md_bitmap_daemon_work(mddev);
1726 bitmap->daemon_lastrun -= sleep;
1727 md_bitmap_daemon_work(mddev);
1728 md_bitmap_update_sb(bitmap);
1729 }
1730
1731 /*
1732 * free memory that was allocated
1733 */
1734 void md_bitmap_free(struct bitmap *bitmap)
1735 {
1736 unsigned long k, pages;
1737 struct bitmap_page *bp;
1738
1739 if (!bitmap) /* there was no bitmap */
1740 return;
1741
1742 if (bitmap->sysfs_can_clear)
1743 sysfs_put(bitmap->sysfs_can_clear);
1744
1745 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1746 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
1747 md_cluster_stop(bitmap->mddev);
1748
1749 /* Shouldn't be needed - but just in case.... */
1750 wait_event(bitmap->write_wait,
1751 atomic_read(&bitmap->pending_writes) == 0);
1752
1753 /* release the bitmap file */
1754 md_bitmap_file_unmap(&bitmap->storage);
1755
1756 bp = bitmap->counts.bp;
1757 pages = bitmap->counts.pages;
1758
1759 /* free all allocated memory */
1760
1761 if (bp) /* deallocate the page memory */
1762 for (k = 0; k < pages; k++)
1763 if (bp[k].map && !bp[k].hijacked)
1764 kfree(bp[k].map);
1765 kfree(bp);
1766 kfree(bitmap);
1767 }
1768 EXPORT_SYMBOL(md_bitmap_free);
1769
1770 void md_bitmap_wait_behind_writes(struct mddev *mddev)
1771 {
1772 struct bitmap *bitmap = mddev->bitmap;
1773
1774 /* wait for behind writes to complete */
1775 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1776 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1777 mdname(mddev));
1778 /* need to kick something here to make sure I/O goes? */
1779 wait_event(bitmap->behind_wait,
1780 atomic_read(&bitmap->behind_writes) == 0);
1781 }
1782 }
1783
1784 void md_bitmap_destroy(struct mddev *mddev)
1785 {
1786 struct bitmap *bitmap = mddev->bitmap;
1787
1788 if (!bitmap) /* there was no bitmap */
1789 return;
1790
1791 md_bitmap_wait_behind_writes(mddev);
1792
1793 mutex_lock(&mddev->bitmap_info.mutex);
1794 spin_lock(&mddev->lock);
1795 mddev->bitmap = NULL; /* disconnect from the md device */
1796 spin_unlock(&mddev->lock);
1797 mutex_unlock(&mddev->bitmap_info.mutex);
1798 if (mddev->thread)
1799 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1800
1801 md_bitmap_free(bitmap);
1802 }
1803
1804 /*
1805 * initialize the bitmap structure
1806 * if this returns an error, bitmap_destroy must be called to do clean up
1807 * once mddev->bitmap is set
1808 */
1809 struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
1810 {
1811 struct bitmap *bitmap;
1812 sector_t blocks = mddev->resync_max_sectors;
1813 struct file *file = mddev->bitmap_info.file;
1814 int err;
1815 struct kernfs_node *bm = NULL;
1816
1817 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
1818
1819 BUG_ON(file && mddev->bitmap_info.offset);
1820
1821 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1822 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1823 mdname(mddev));
1824 return ERR_PTR(-EBUSY);
1825 }
1826
1827 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
1828 if (!bitmap)
1829 return ERR_PTR(-ENOMEM);
1830
1831 spin_lock_init(&bitmap->counts.lock);
1832 atomic_set(&bitmap->pending_writes, 0);
1833 init_waitqueue_head(&bitmap->write_wait);
1834 init_waitqueue_head(&bitmap->overflow_wait);
1835 init_waitqueue_head(&bitmap->behind_wait);
1836
1837 bitmap->mddev = mddev;
1838 bitmap->cluster_slot = slot;
1839
1840 if (mddev->kobj.sd)
1841 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
1842 if (bm) {
1843 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
1844 sysfs_put(bm);
1845 } else
1846 bitmap->sysfs_can_clear = NULL;
1847
1848 bitmap->storage.file = file;
1849 if (file) {
1850 get_file(file);
1851 /* As future accesses to this file will use bmap,
1852 * and bypass the page cache, we must sync the file
1853 * first.
1854 */
1855 vfs_fsync(file, 1);
1856 }
1857 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1858 if (!mddev->bitmap_info.external) {
1859 /*
1860 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1861 * instructing us to create a new on-disk bitmap instance.
1862 */
1863 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1864 err = md_bitmap_new_disk_sb(bitmap);
1865 else
1866 err = md_bitmap_read_sb(bitmap);
1867 } else {
1868 err = 0;
1869 if (mddev->bitmap_info.chunksize == 0 ||
1870 mddev->bitmap_info.daemon_sleep == 0)
1871 /* chunksize and time_base need to be
1872 * set first. */
1873 err = -EINVAL;
1874 }
1875 if (err)
1876 goto error;
1877
1878 bitmap->daemon_lastrun = jiffies;
1879 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
1880 if (err)
1881 goto error;
1882
1883 pr_debug("created bitmap (%lu pages) for device %s\n",
1884 bitmap->counts.pages, bmname(bitmap));
1885
1886 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1887 if (err)
1888 goto error;
1889
1890 return bitmap;
1891 error:
1892 md_bitmap_free(bitmap);
1893 return ERR_PTR(err);
1894 }
1895
1896 int md_bitmap_load(struct mddev *mddev)
1897 {
1898 int err = 0;
1899 sector_t start = 0;
1900 sector_t sector = 0;
1901 struct bitmap *bitmap = mddev->bitmap;
1902
1903 if (!bitmap)
1904 goto out;
1905
1906 if (mddev_is_clustered(mddev))
1907 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1908
1909 /* Clear out old bitmap info first: Either there is none, or we
1910 * are resuming after someone else has possibly changed things,
1911 * so we should forget old cached info.
1912 * All chunks should be clean, but some might need_sync.
1913 */
1914 while (sector < mddev->resync_max_sectors) {
1915 sector_t blocks;
1916 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1917 sector += blocks;
1918 }
1919 md_bitmap_close_sync(bitmap);
1920
1921 if (mddev->degraded == 0
1922 || bitmap->events_cleared == mddev->events)
1923 /* no need to keep dirty bits to optimise a
1924 * re-add of a missing device */
1925 start = mddev->recovery_cp;
1926
1927 mutex_lock(&mddev->bitmap_info.mutex);
1928 err = md_bitmap_init_from_disk(bitmap, start);
1929 mutex_unlock(&mddev->bitmap_info.mutex);
1930
1931 if (err)
1932 goto out;
1933 clear_bit(BITMAP_STALE, &bitmap->flags);
1934
1935 /* Kick recovery in case any bits were set */
1936 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
1937
1938 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1939 md_wakeup_thread(mddev->thread);
1940
1941 md_bitmap_update_sb(bitmap);
1942
1943 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1944 err = -EIO;
1945 out:
1946 return err;
1947 }
1948 EXPORT_SYMBOL_GPL(md_bitmap_load);
1949
1950 struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1951 {
1952 int rv = 0;
1953 struct bitmap *bitmap;
1954
1955 bitmap = md_bitmap_create(mddev, slot);
1956 if (IS_ERR(bitmap)) {
1957 rv = PTR_ERR(bitmap);
1958 return ERR_PTR(rv);
1959 }
1960
1961 rv = md_bitmap_init_from_disk(bitmap, 0);
1962 if (rv) {
1963 md_bitmap_free(bitmap);
1964 return ERR_PTR(rv);
1965 }
1966
1967 return bitmap;
1968 }
1969 EXPORT_SYMBOL(get_bitmap_from_slot);
1970
1971 /* Loads the bitmap associated with slot and copies the resync information
1972 * to our bitmap
1973 */
1974 int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
1975 sector_t *low, sector_t *high, bool clear_bits)
1976 {
1977 int rv = 0, i, j;
1978 sector_t block, lo = 0, hi = 0;
1979 struct bitmap_counts *counts;
1980 struct bitmap *bitmap;
1981
1982 bitmap = get_bitmap_from_slot(mddev, slot);
1983 if (IS_ERR(bitmap)) {
1984 pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
1985 return -1;
1986 }
1987
1988 counts = &bitmap->counts;
1989 for (j = 0; j < counts->chunks; j++) {
1990 block = (sector_t)j << counts->chunkshift;
1991 if (md_bitmap_file_test_bit(bitmap, block)) {
1992 if (!lo)
1993 lo = block;
1994 hi = block;
1995 md_bitmap_file_clear_bit(bitmap, block);
1996 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
1997 md_bitmap_file_set_bit(mddev->bitmap, block);
1998 }
1999 }
2000
2001 if (clear_bits) {
2002 md_bitmap_update_sb(bitmap);
2003 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2004 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2005 for (i = 0; i < bitmap->storage.file_pages; i++)
2006 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2007 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2008 md_bitmap_unplug(bitmap);
2009 }
2010 md_bitmap_unplug(mddev->bitmap);
2011 *low = lo;
2012 *high = hi;
2013
2014 return rv;
2015 }
2016 EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
2017
2018
2019 void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2020 {
2021 unsigned long chunk_kb;
2022 struct bitmap_counts *counts;
2023
2024 if (!bitmap)
2025 return;
2026
2027 counts = &bitmap->counts;
2028
2029 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2030 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2031 "%lu%s chunk",
2032 counts->pages - counts->missing_pages,
2033 counts->pages,
2034 (counts->pages - counts->missing_pages)
2035 << (PAGE_SHIFT - 10),
2036 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2037 chunk_kb ? "KB" : "B");
2038 if (bitmap->storage.file) {
2039 seq_printf(seq, ", file: ");
2040 seq_file_path(seq, bitmap->storage.file, " \t\n");
2041 }
2042
2043 seq_printf(seq, "\n");
2044 }
2045
2046 int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2047 int chunksize, int init)
2048 {
2049 /* If chunk_size is 0, choose an appropriate chunk size.
2050 * Then possibly allocate new storage space.
2051 * Then quiesce, copy bits, replace bitmap, and re-start
2052 *
2053 * This function is called both to set up the initial bitmap
2054 * and to resize the bitmap while the array is active.
2055 * If this happens as a result of the array being resized,
2056 * chunksize will be zero, and we need to choose a suitable
2057 * chunksize, otherwise we use what we are given.
2058 */
2059 struct bitmap_storage store;
2060 struct bitmap_counts old_counts;
2061 unsigned long chunks;
2062 sector_t block;
2063 sector_t old_blocks, new_blocks;
2064 int chunkshift;
2065 int ret = 0;
2066 long pages;
2067 struct bitmap_page *new_bp;
2068
2069 if (bitmap->storage.file && !init) {
2070 pr_info("md: cannot resize file-based bitmap\n");
2071 return -EINVAL;
2072 }
2073
2074 if (chunksize == 0) {
2075 /* If there is enough space, leave the chunk size unchanged,
2076 * else increase by factor of two until there is enough space.
2077 */
2078 long bytes;
2079 long space = bitmap->mddev->bitmap_info.space;
2080
2081 if (space == 0) {
2082 /* We don't know how much space there is, so limit
2083 * to current size - in sectors.
2084 */
2085 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2086 if (!bitmap->mddev->bitmap_info.external)
2087 bytes += sizeof(bitmap_super_t);
2088 space = DIV_ROUND_UP(bytes, 512);
2089 bitmap->mddev->bitmap_info.space = space;
2090 }
2091 chunkshift = bitmap->counts.chunkshift;
2092 chunkshift--;
2093 do {
2094 /* 'chunkshift' is shift from block size to chunk size */
2095 chunkshift++;
2096 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2097 bytes = DIV_ROUND_UP(chunks, 8);
2098 if (!bitmap->mddev->bitmap_info.external)
2099 bytes += sizeof(bitmap_super_t);
2100 } while (bytes > (space << 9));
2101 } else
2102 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2103
2104 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2105 memset(&store, 0, sizeof(store));
2106 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2107 ret = md_bitmap_storage_alloc(&store, chunks,
2108 !bitmap->mddev->bitmap_info.external,
2109 mddev_is_clustered(bitmap->mddev)
2110 ? bitmap->cluster_slot : 0);
2111 if (ret) {
2112 md_bitmap_file_unmap(&store);
2113 goto err;
2114 }
2115
2116 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2117
2118 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
2119 ret = -ENOMEM;
2120 if (!new_bp) {
2121 md_bitmap_file_unmap(&store);
2122 goto err;
2123 }
2124
2125 if (!init)
2126 bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2127
2128 store.file = bitmap->storage.file;
2129 bitmap->storage.file = NULL;
2130
2131 if (store.sb_page && bitmap->storage.sb_page)
2132 memcpy(page_address(store.sb_page),
2133 page_address(bitmap->storage.sb_page),
2134 sizeof(bitmap_super_t));
2135 md_bitmap_file_unmap(&bitmap->storage);
2136 bitmap->storage = store;
2137
2138 old_counts = bitmap->counts;
2139 bitmap->counts.bp = new_bp;
2140 bitmap->counts.pages = pages;
2141 bitmap->counts.missing_pages = pages;
2142 bitmap->counts.chunkshift = chunkshift;
2143 bitmap->counts.chunks = chunks;
2144 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
2145 BITMAP_BLOCK_SHIFT);
2146
2147 blocks = min(old_counts.chunks << old_counts.chunkshift,
2148 chunks << chunkshift);
2149
2150 spin_lock_irq(&bitmap->counts.lock);
2151 /* For cluster raid, need to pre-allocate bitmap */
2152 if (mddev_is_clustered(bitmap->mddev)) {
2153 unsigned long page;
2154 for (page = 0; page < pages; page++) {
2155 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2156 if (ret) {
2157 unsigned long k;
2158
2159 /* deallocate the page memory */
2160 for (k = 0; k < page; k++) {
2161 kfree(new_bp[k].map);
2162 }
2163 kfree(new_bp);
2164
2165 /* restore some fields from old_counts */
2166 bitmap->counts.bp = old_counts.bp;
2167 bitmap->counts.pages = old_counts.pages;
2168 bitmap->counts.missing_pages = old_counts.pages;
2169 bitmap->counts.chunkshift = old_counts.chunkshift;
2170 bitmap->counts.chunks = old_counts.chunks;
2171 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
2172 BITMAP_BLOCK_SHIFT);
2173 blocks = old_counts.chunks << old_counts.chunkshift;
2174 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2175 break;
2176 } else
2177 bitmap->counts.bp[page].count += 1;
2178 }
2179 }
2180
2181 for (block = 0; block < blocks; ) {
2182 bitmap_counter_t *bmc_old, *bmc_new;
2183 int set;
2184
2185 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2186 set = bmc_old && NEEDED(*bmc_old);
2187
2188 if (set) {
2189 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2190 if (*bmc_new == 0) {
2191 /* need to set on-disk bits too. */
2192 sector_t end = block + new_blocks;
2193 sector_t start = block >> chunkshift;
2194 start <<= chunkshift;
2195 while (start < end) {
2196 md_bitmap_file_set_bit(bitmap, block);
2197 start += 1 << chunkshift;
2198 }
2199 *bmc_new = 2;
2200 md_bitmap_count_page(&bitmap->counts, block, 1);
2201 md_bitmap_set_pending(&bitmap->counts, block);
2202 }
2203 *bmc_new |= NEEDED_MASK;
2204 if (new_blocks < old_blocks)
2205 old_blocks = new_blocks;
2206 }
2207 block += old_blocks;
2208 }
2209
2210 if (bitmap->counts.bp != old_counts.bp) {
2211 unsigned long k;
2212 for (k = 0; k < old_counts.pages; k++)
2213 if (!old_counts.bp[k].hijacked)
2214 kfree(old_counts.bp[k].map);
2215 kfree(old_counts.bp);
2216 }
2217
2218 if (!init) {
2219 int i;
2220 while (block < (chunks << chunkshift)) {
2221 bitmap_counter_t *bmc;
2222 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2223 if (bmc) {
2224 /* new space. It needs to be resynced, so
2225 * we set NEEDED_MASK.
2226 */
2227 if (*bmc == 0) {
2228 *bmc = NEEDED_MASK | 2;
2229 md_bitmap_count_page(&bitmap->counts, block, 1);
2230 md_bitmap_set_pending(&bitmap->counts, block);
2231 }
2232 }
2233 block += new_blocks;
2234 }
2235 for (i = 0; i < bitmap->storage.file_pages; i++)
2236 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2237 }
2238 spin_unlock_irq(&bitmap->counts.lock);
2239
2240 if (!init) {
2241 md_bitmap_unplug(bitmap);
2242 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2243 }
2244 ret = 0;
2245 err:
2246 return ret;
2247 }
2248 EXPORT_SYMBOL_GPL(md_bitmap_resize);
2249
2250 static ssize_t
2251 location_show(struct mddev *mddev, char *page)
2252 {
2253 ssize_t len;
2254 if (mddev->bitmap_info.file)
2255 len = sprintf(page, "file");
2256 else if (mddev->bitmap_info.offset)
2257 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
2258 else
2259 len = sprintf(page, "none");
2260 len += sprintf(page+len, "\n");
2261 return len;
2262 }
2263
2264 static ssize_t
2265 location_store(struct mddev *mddev, const char *buf, size_t len)
2266 {
2267 int rv;
2268
2269 rv = mddev_lock(mddev);
2270 if (rv)
2271 return rv;
2272 if (mddev->pers) {
2273 if (!mddev->pers->quiesce) {
2274 rv = -EBUSY;
2275 goto out;
2276 }
2277 if (mddev->recovery || mddev->sync_thread) {
2278 rv = -EBUSY;
2279 goto out;
2280 }
2281 }
2282
2283 if (mddev->bitmap || mddev->bitmap_info.file ||
2284 mddev->bitmap_info.offset) {
2285 /* bitmap already configured. Only option is to clear it */
2286 if (strncmp(buf, "none", 4) != 0) {
2287 rv = -EBUSY;
2288 goto out;
2289 }
2290 if (mddev->pers) {
2291 mddev_suspend(mddev);
2292 md_bitmap_destroy(mddev);
2293 mddev_resume(mddev);
2294 }
2295 mddev->bitmap_info.offset = 0;
2296 if (mddev->bitmap_info.file) {
2297 struct file *f = mddev->bitmap_info.file;
2298 mddev->bitmap_info.file = NULL;
2299 fput(f);
2300 }
2301 } else {
2302 /* No bitmap, OK to set a location */
2303 long long offset;
2304 if (strncmp(buf, "none", 4) == 0)
2305 /* nothing to be done */;
2306 else if (strncmp(buf, "file:", 5) == 0) {
2307 /* Not supported yet */
2308 rv = -EINVAL;
2309 goto out;
2310 } else {
2311 if (buf[0] == '+')
2312 rv = kstrtoll(buf+1, 10, &offset);
2313 else
2314 rv = kstrtoll(buf, 10, &offset);
2315 if (rv)
2316 goto out;
2317 if (offset == 0) {
2318 rv = -EINVAL;
2319 goto out;
2320 }
2321 if (mddev->bitmap_info.external == 0 &&
2322 mddev->major_version == 0 &&
2323 offset != mddev->bitmap_info.default_offset) {
2324 rv = -EINVAL;
2325 goto out;
2326 }
2327 mddev->bitmap_info.offset = offset;
2328 if (mddev->pers) {
2329 struct bitmap *bitmap;
2330 bitmap = md_bitmap_create(mddev, -1);
2331 mddev_suspend(mddev);
2332 if (IS_ERR(bitmap))
2333 rv = PTR_ERR(bitmap);
2334 else {
2335 mddev->bitmap = bitmap;
2336 rv = md_bitmap_load(mddev);
2337 if (rv)
2338 mddev->bitmap_info.offset = 0;
2339 }
2340 if (rv) {
2341 md_bitmap_destroy(mddev);
2342 mddev_resume(mddev);
2343 goto out;
2344 }
2345 mddev_resume(mddev);
2346 }
2347 }
2348 }
2349 if (!mddev->external) {
2350 /* Ensure new bitmap info is stored in
2351 * metadata promptly.
2352 */
2353 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2354 md_wakeup_thread(mddev->thread);
2355 }
2356 rv = 0;
2357 out:
2358 mddev_unlock(mddev);
2359 if (rv)
2360 return rv;
2361 return len;
2362 }
2363
2364 static struct md_sysfs_entry bitmap_location =
2365 __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2366
2367 /* 'bitmap/space' is the space available at 'location' for the
2368 * bitmap. This allows the kernel to know when it is safe to
2369 * resize the bitmap to match a resized array.
2370 */
2371 static ssize_t
2372 space_show(struct mddev *mddev, char *page)
2373 {
2374 return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2375 }
2376
2377 static ssize_t
2378 space_store(struct mddev *mddev, const char *buf, size_t len)
2379 {
2380 unsigned long sectors;
2381 int rv;
2382
2383 rv = kstrtoul(buf, 10, &sectors);
2384 if (rv)
2385 return rv;
2386
2387 if (sectors == 0)
2388 return -EINVAL;
2389
2390 if (mddev->bitmap &&
2391 sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
2392 return -EFBIG; /* Bitmap is too big for this small space */
2393
2394 /* could make sure it isn't too big, but that isn't really
2395 * needed - user-space should be careful.
2396 */
2397 mddev->bitmap_info.space = sectors;
2398 return len;
2399 }
2400
2401 static struct md_sysfs_entry bitmap_space =
2402 __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2403
2404 static ssize_t
2405 timeout_show(struct mddev *mddev, char *page)
2406 {
2407 ssize_t len;
2408 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2409 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
2410
2411 len = sprintf(page, "%lu", secs);
2412 if (jifs)
2413 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2414 len += sprintf(page+len, "\n");
2415 return len;
2416 }
2417
2418 static ssize_t
2419 timeout_store(struct mddev *mddev, const char *buf, size_t len)
2420 {
2421 /* timeout can be set at any time */
2422 unsigned long timeout;
2423 int rv = strict_strtoul_scaled(buf, &timeout, 4);
2424 if (rv)
2425 return rv;
2426
2427 /* just to make sure we don't overflow... */
2428 if (timeout >= LONG_MAX / HZ)
2429 return -EINVAL;
2430
2431 timeout = timeout * HZ / 10000;
2432
2433 if (timeout >= MAX_SCHEDULE_TIMEOUT)
2434 timeout = MAX_SCHEDULE_TIMEOUT-1;
2435 if (timeout < 1)
2436 timeout = 1;
2437 mddev->bitmap_info.daemon_sleep = timeout;
2438 if (mddev->thread) {
2439 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2440 * the bitmap is all clean and we don't need to
2441 * adjust the timeout right now
2442 */
2443 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
2444 mddev->thread->timeout = timeout;
2445 md_wakeup_thread(mddev->thread);
2446 }
2447 }
2448 return len;
2449 }
2450
2451 static struct md_sysfs_entry bitmap_timeout =
2452 __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2453
2454 static ssize_t
2455 backlog_show(struct mddev *mddev, char *page)
2456 {
2457 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2458 }
2459
2460 static ssize_t
2461 backlog_store(struct mddev *mddev, const char *buf, size_t len)
2462 {
2463 unsigned long backlog;
2464 int rv = kstrtoul(buf, 10, &backlog);
2465 if (rv)
2466 return rv;
2467 if (backlog > COUNTER_MAX)
2468 return -EINVAL;
2469 mddev->bitmap_info.max_write_behind = backlog;
2470 return len;
2471 }
2472
2473 static struct md_sysfs_entry bitmap_backlog =
2474 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2475
2476 static ssize_t
2477 chunksize_show(struct mddev *mddev, char *page)
2478 {
2479 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2480 }
2481
2482 static ssize_t
2483 chunksize_store(struct mddev *mddev, const char *buf, size_t len)
2484 {
2485 /* Can only be changed when no bitmap is active */
2486 int rv;
2487 unsigned long csize;
2488 if (mddev->bitmap)
2489 return -EBUSY;
2490 rv = kstrtoul(buf, 10, &csize);
2491 if (rv)
2492 return rv;
2493 if (csize < 512 ||
2494 !is_power_of_2(csize))
2495 return -EINVAL;
2496 mddev->bitmap_info.chunksize = csize;
2497 return len;
2498 }
2499
2500 static struct md_sysfs_entry bitmap_chunksize =
2501 __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2502
2503 static ssize_t metadata_show(struct mddev *mddev, char *page)
2504 {
2505 if (mddev_is_clustered(mddev))
2506 return sprintf(page, "clustered\n");
2507 return sprintf(page, "%s\n", (mddev->bitmap_info.external
2508 ? "external" : "internal"));
2509 }
2510
2511 static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
2512 {
2513 if (mddev->bitmap ||
2514 mddev->bitmap_info.file ||
2515 mddev->bitmap_info.offset)
2516 return -EBUSY;
2517 if (strncmp(buf, "external", 8) == 0)
2518 mddev->bitmap_info.external = 1;
2519 else if ((strncmp(buf, "internal", 8) == 0) ||
2520 (strncmp(buf, "clustered", 9) == 0))
2521 mddev->bitmap_info.external = 0;
2522 else
2523 return -EINVAL;
2524 return len;
2525 }
2526
2527 static struct md_sysfs_entry bitmap_metadata =
2528 __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2529
2530 static ssize_t can_clear_show(struct mddev *mddev, char *page)
2531 {
2532 int len;
2533 spin_lock(&mddev->lock);
2534 if (mddev->bitmap)
2535 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2536 "false" : "true"));
2537 else
2538 len = sprintf(page, "\n");
2539 spin_unlock(&mddev->lock);
2540 return len;
2541 }
2542
2543 static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
2544 {
2545 if (mddev->bitmap == NULL)
2546 return -ENOENT;
2547 if (strncmp(buf, "false", 5) == 0)
2548 mddev->bitmap->need_sync = 1;
2549 else if (strncmp(buf, "true", 4) == 0) {
2550 if (mddev->degraded)
2551 return -EBUSY;
2552 mddev->bitmap->need_sync = 0;
2553 } else
2554 return -EINVAL;
2555 return len;
2556 }
2557
2558 static struct md_sysfs_entry bitmap_can_clear =
2559 __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2560
2561 static ssize_t
2562 behind_writes_used_show(struct mddev *mddev, char *page)
2563 {
2564 ssize_t ret;
2565 spin_lock(&mddev->lock);
2566 if (mddev->bitmap == NULL)
2567 ret = sprintf(page, "0\n");
2568 else
2569 ret = sprintf(page, "%lu\n",
2570 mddev->bitmap->behind_writes_used);
2571 spin_unlock(&mddev->lock);
2572 return ret;
2573 }
2574
2575 static ssize_t
2576 behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
2577 {
2578 if (mddev->bitmap)
2579 mddev->bitmap->behind_writes_used = 0;
2580 return len;
2581 }
2582
2583 static struct md_sysfs_entry max_backlog_used =
2584 __ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2585 behind_writes_used_show, behind_writes_used_reset);
2586
2587 static struct attribute *md_bitmap_attrs[] = {
2588 &bitmap_location.attr,
2589 &bitmap_space.attr,
2590 &bitmap_timeout.attr,
2591 &bitmap_backlog.attr,
2592 &bitmap_chunksize.attr,
2593 &bitmap_metadata.attr,
2594 &bitmap_can_clear.attr,
2595 &max_backlog_used.attr,
2596 NULL
2597 };
2598 struct attribute_group md_bitmap_group = {
2599 .name = "bitmap",
2600 .attrs = md_bitmap_attrs,
2601 };
2602