1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
18 #include "extent_map.h"
20 #include "transaction.h"
21 #include "print-tree.h"
24 #include "async-thread.h"
25 #include "check-integrity.h"
26 #include "rcu-string.h"
28 #include "dev-replace.h"
30 #include "tree-checker.h"
32 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
33 [BTRFS_RAID_RAID10
] = {
36 .devs_max
= 0, /* 0 == as many as possible */
38 .tolerated_failures
= 1,
42 .raid_name
= "raid10",
43 .bg_flag
= BTRFS_BLOCK_GROUP_RAID10
,
44 .mindev_error
= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET
,
46 [BTRFS_RAID_RAID1
] = {
51 .tolerated_failures
= 1,
56 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1
,
57 .mindev_error
= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET
,
64 .tolerated_failures
= 0,
69 .bg_flag
= BTRFS_BLOCK_GROUP_DUP
,
72 [BTRFS_RAID_RAID0
] = {
77 .tolerated_failures
= 0,
82 .bg_flag
= BTRFS_BLOCK_GROUP_RAID0
,
85 [BTRFS_RAID_SINGLE
] = {
90 .tolerated_failures
= 0,
94 .raid_name
= "single",
98 [BTRFS_RAID_RAID5
] = {
103 .tolerated_failures
= 1,
107 .raid_name
= "raid5",
108 .bg_flag
= BTRFS_BLOCK_GROUP_RAID5
,
109 .mindev_error
= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET
,
111 [BTRFS_RAID_RAID6
] = {
116 .tolerated_failures
= 2,
120 .raid_name
= "raid6",
121 .bg_flag
= BTRFS_BLOCK_GROUP_RAID6
,
122 .mindev_error
= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET
,
126 const char *get_raid_name(enum btrfs_raid_types type
)
128 if (type
>= BTRFS_NR_RAID_TYPES
)
131 return btrfs_raid_array
[type
].raid_name
;
135 * Fill @buf with textual description of @bg_flags, no more than @size_buf
136 * bytes including terminating null byte.
138 void btrfs_describe_block_groups(u64 bg_flags
, char *buf
, u32 size_buf
)
143 u64 flags
= bg_flags
;
144 u32 size_bp
= size_buf
;
151 #define DESCRIBE_FLAG(flag, desc) \
153 if (flags & (flag)) { \
154 ret = snprintf(bp, size_bp, "%s|", (desc)); \
155 if (ret < 0 || ret >= size_bp) \
163 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA
, "data");
164 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM
, "system");
165 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA
, "metadata");
167 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE
, "single");
168 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
169 DESCRIBE_FLAG(btrfs_raid_array
[i
].bg_flag
,
170 btrfs_raid_array
[i
].raid_name
);
174 ret
= snprintf(bp
, size_bp
, "0x%llx|", flags
);
178 if (size_bp
< size_buf
)
179 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last | */
182 * The text is trimmed, it's up to the caller to provide sufficiently
188 static int init_first_rw_device(struct btrfs_trans_handle
*trans
);
189 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
);
190 static void __btrfs_reset_dev_stats(struct btrfs_device
*dev
);
191 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
);
192 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*device
);
193 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
194 enum btrfs_map_op op
,
195 u64 logical
, u64
*length
,
196 struct btrfs_bio
**bbio_ret
,
197 int mirror_num
, int need_raid_map
);
203 * There are several mutexes that protect manipulation of devices and low-level
204 * structures like chunks but not block groups, extents or files
206 * uuid_mutex (global lock)
207 * ------------------------
208 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
209 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
210 * device) or requested by the device= mount option
212 * the mutex can be very coarse and can cover long-running operations
214 * protects: updates to fs_devices counters like missing devices, rw devices,
215 * seeding, structure cloning, opening/closing devices at mount/umount time
217 * global::fs_devs - add, remove, updates to the global list
219 * does not protect: manipulation of the fs_devices::devices list!
221 * btrfs_device::name - renames (write side), read is RCU
223 * fs_devices::device_list_mutex (per-fs, with RCU)
224 * ------------------------------------------------
225 * protects updates to fs_devices::devices, ie. adding and deleting
227 * simple list traversal with read-only actions can be done with RCU protection
229 * may be used to exclude some operations from running concurrently without any
230 * modifications to the list (see write_all_supers)
234 * protects balance structures (status, state) and context accessed from
235 * several places (internally, ioctl)
239 * protects chunks, adding or removing during allocation, trim or when a new
240 * device is added/removed
244 * a big lock that is held by the cleaner thread and prevents running subvolume
245 * cleaning together with relocation or delayed iputs
258 * Exclusive operations, BTRFS_FS_EXCL_OP
259 * ======================================
261 * Maintains the exclusivity of the following operations that apply to the
262 * whole filesystem and cannot run in parallel.
267 * - Device replace (*)
270 * The device operations (as above) can be in one of the following states:
276 * Only device operations marked with (*) can go into the Paused state for the
279 * - ioctl (only Balance can be Paused through ioctl)
280 * - filesystem remounted as read-only
281 * - filesystem unmounted and mounted as read-only
282 * - system power-cycle and filesystem mounted as read-only
283 * - filesystem or device errors leading to forced read-only
285 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
286 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
287 * A device operation in Paused or Running state can be canceled or resumed
288 * either by ioctl (Balance only) or when remounted as read-write.
289 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
293 DEFINE_MUTEX(uuid_mutex
);
294 static LIST_HEAD(fs_uuids
);
295 struct list_head
*btrfs_get_fs_uuids(void)
301 * alloc_fs_devices - allocate struct btrfs_fs_devices
302 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
303 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
305 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
306 * The returned struct is not linked onto any lists and can be destroyed with
307 * kfree() right away.
309 static struct btrfs_fs_devices
*alloc_fs_devices(const u8
*fsid
,
310 const u8
*metadata_fsid
)
312 struct btrfs_fs_devices
*fs_devs
;
314 fs_devs
= kzalloc(sizeof(*fs_devs
), GFP_KERNEL
);
316 return ERR_PTR(-ENOMEM
);
318 mutex_init(&fs_devs
->device_list_mutex
);
320 INIT_LIST_HEAD(&fs_devs
->devices
);
321 INIT_LIST_HEAD(&fs_devs
->alloc_list
);
322 INIT_LIST_HEAD(&fs_devs
->fs_list
);
324 memcpy(fs_devs
->fsid
, fsid
, BTRFS_FSID_SIZE
);
327 memcpy(fs_devs
->metadata_uuid
, metadata_fsid
, BTRFS_FSID_SIZE
);
329 memcpy(fs_devs
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
);
334 void btrfs_free_device(struct btrfs_device
*device
)
336 WARN_ON(!list_empty(&device
->post_commit_list
));
337 rcu_string_free(device
->name
);
338 extent_io_tree_release(&device
->alloc_state
);
339 bio_put(device
->flush_bio
);
343 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
345 struct btrfs_device
*device
;
346 WARN_ON(fs_devices
->opened
);
347 while (!list_empty(&fs_devices
->devices
)) {
348 device
= list_entry(fs_devices
->devices
.next
,
349 struct btrfs_device
, dev_list
);
350 list_del(&device
->dev_list
);
351 btrfs_free_device(device
);
356 static void btrfs_kobject_uevent(struct block_device
*bdev
,
357 enum kobject_action action
)
361 ret
= kobject_uevent(&disk_to_dev(bdev
->bd_disk
)->kobj
, action
);
363 pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
365 kobject_name(&disk_to_dev(bdev
->bd_disk
)->kobj
),
366 &disk_to_dev(bdev
->bd_disk
)->kobj
);
369 void __exit
btrfs_cleanup_fs_uuids(void)
371 struct btrfs_fs_devices
*fs_devices
;
373 while (!list_empty(&fs_uuids
)) {
374 fs_devices
= list_entry(fs_uuids
.next
,
375 struct btrfs_fs_devices
, fs_list
);
376 list_del(&fs_devices
->fs_list
);
377 free_fs_devices(fs_devices
);
382 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
383 * Returned struct is not linked onto any lists and must be destroyed using
386 static struct btrfs_device
*__alloc_device(void)
388 struct btrfs_device
*dev
;
390 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
392 return ERR_PTR(-ENOMEM
);
395 * Preallocate a bio that's always going to be used for flushing device
396 * barriers and matches the device lifespan
398 dev
->flush_bio
= bio_alloc_bioset(GFP_KERNEL
, 0, NULL
);
399 if (!dev
->flush_bio
) {
401 return ERR_PTR(-ENOMEM
);
404 INIT_LIST_HEAD(&dev
->dev_list
);
405 INIT_LIST_HEAD(&dev
->dev_alloc_list
);
406 INIT_LIST_HEAD(&dev
->post_commit_list
);
408 spin_lock_init(&dev
->io_lock
);
410 atomic_set(&dev
->reada_in_flight
, 0);
411 atomic_set(&dev
->dev_stats_ccnt
, 0);
412 btrfs_device_data_ordered_init(dev
);
413 INIT_RADIX_TREE(&dev
->reada_zones
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
414 INIT_RADIX_TREE(&dev
->reada_extents
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
415 extent_io_tree_init(NULL
, &dev
->alloc_state
, 0, NULL
);
420 static noinline
struct btrfs_fs_devices
*find_fsid(
421 const u8
*fsid
, const u8
*metadata_fsid
)
423 struct btrfs_fs_devices
*fs_devices
;
429 * Handle scanned device having completed its fsid change but
430 * belonging to a fs_devices that was created by first scanning
431 * a device which didn't have its fsid/metadata_uuid changed
432 * at all and the CHANGING_FSID_V2 flag set.
434 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
435 if (fs_devices
->fsid_change
&&
436 memcmp(metadata_fsid
, fs_devices
->fsid
,
437 BTRFS_FSID_SIZE
) == 0 &&
438 memcmp(fs_devices
->fsid
, fs_devices
->metadata_uuid
,
439 BTRFS_FSID_SIZE
) == 0) {
444 * Handle scanned device having completed its fsid change but
445 * belonging to a fs_devices that was created by a device that
446 * has an outdated pair of fsid/metadata_uuid and
447 * CHANGING_FSID_V2 flag set.
449 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
450 if (fs_devices
->fsid_change
&&
451 memcmp(fs_devices
->metadata_uuid
,
452 fs_devices
->fsid
, BTRFS_FSID_SIZE
) != 0 &&
453 memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
454 BTRFS_FSID_SIZE
) == 0) {
460 /* Handle non-split brain cases */
461 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
463 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0
464 && memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
465 BTRFS_FSID_SIZE
) == 0)
468 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
476 btrfs_get_bdev_and_sb(const char *device_path
, fmode_t flags
, void *holder
,
477 int flush
, struct block_device
**bdev
,
478 struct buffer_head
**bh
)
482 *bdev
= blkdev_get_by_path(device_path
, flags
, holder
);
485 ret
= PTR_ERR(*bdev
);
490 filemap_write_and_wait((*bdev
)->bd_inode
->i_mapping
);
491 ret
= set_blocksize(*bdev
, BTRFS_BDEV_BLOCKSIZE
);
493 blkdev_put(*bdev
, flags
);
496 invalidate_bdev(*bdev
);
497 *bh
= btrfs_read_dev_super(*bdev
);
500 blkdev_put(*bdev
, flags
);
512 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
513 struct bio
*head
, struct bio
*tail
)
516 struct bio
*old_head
;
518 old_head
= pending_bios
->head
;
519 pending_bios
->head
= head
;
520 if (pending_bios
->tail
)
521 tail
->bi_next
= old_head
;
523 pending_bios
->tail
= tail
;
527 * we try to collect pending bios for a device so we don't get a large
528 * number of procs sending bios down to the same device. This greatly
529 * improves the schedulers ability to collect and merge the bios.
531 * But, it also turns into a long list of bios to process and that is sure
532 * to eventually make the worker thread block. The solution here is to
533 * make some progress and then put this work struct back at the end of
534 * the list if the block device is congested. This way, multiple devices
535 * can make progress from a single worker thread.
537 static noinline
void run_scheduled_bios(struct btrfs_device
*device
)
539 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
541 struct backing_dev_info
*bdi
;
542 struct btrfs_pending_bios
*pending_bios
;
546 unsigned long num_run
;
547 unsigned long batch_run
= 0;
548 unsigned long last_waited
= 0;
550 int sync_pending
= 0;
551 struct blk_plug plug
;
554 * this function runs all the bios we've collected for
555 * a particular device. We don't want to wander off to
556 * another device without first sending all of these down.
557 * So, setup a plug here and finish it off before we return
559 blk_start_plug(&plug
);
561 bdi
= device
->bdev
->bd_bdi
;
564 spin_lock(&device
->io_lock
);
569 /* take all the bios off the list at once and process them
570 * later on (without the lock held). But, remember the
571 * tail and other pointers so the bios can be properly reinserted
572 * into the list if we hit congestion
574 if (!force_reg
&& device
->pending_sync_bios
.head
) {
575 pending_bios
= &device
->pending_sync_bios
;
578 pending_bios
= &device
->pending_bios
;
582 pending
= pending_bios
->head
;
583 tail
= pending_bios
->tail
;
584 WARN_ON(pending
&& !tail
);
587 * if pending was null this time around, no bios need processing
588 * at all and we can stop. Otherwise it'll loop back up again
589 * and do an additional check so no bios are missed.
591 * device->running_pending is used to synchronize with the
594 if (device
->pending_sync_bios
.head
== NULL
&&
595 device
->pending_bios
.head
== NULL
) {
597 device
->running_pending
= 0;
600 device
->running_pending
= 1;
603 pending_bios
->head
= NULL
;
604 pending_bios
->tail
= NULL
;
606 spin_unlock(&device
->io_lock
);
611 /* we want to work on both lists, but do more bios on the
612 * sync list than the regular list
615 pending_bios
!= &device
->pending_sync_bios
&&
616 device
->pending_sync_bios
.head
) ||
617 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
618 device
->pending_bios
.head
)) {
619 spin_lock(&device
->io_lock
);
620 requeue_list(pending_bios
, pending
, tail
);
625 pending
= pending
->bi_next
;
628 BUG_ON(atomic_read(&cur
->__bi_cnt
) == 0);
631 * if we're doing the sync list, record that our
632 * plug has some sync requests on it
634 * If we're doing the regular list and there are
635 * sync requests sitting around, unplug before
638 if (pending_bios
== &device
->pending_sync_bios
) {
640 } else if (sync_pending
) {
641 blk_finish_plug(&plug
);
642 blk_start_plug(&plug
);
646 btrfsic_submit_bio(cur
);
653 * we made progress, there is more work to do and the bdi
654 * is now congested. Back off and let other work structs
657 if (pending
&& bdi_write_congested(bdi
) && batch_run
> 8 &&
658 fs_info
->fs_devices
->open_devices
> 1) {
659 struct io_context
*ioc
;
661 ioc
= current
->io_context
;
664 * the main goal here is that we don't want to
665 * block if we're going to be able to submit
666 * more requests without blocking.
668 * This code does two great things, it pokes into
669 * the elevator code from a filesystem _and_
670 * it makes assumptions about how batching works.
672 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
673 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
675 ioc
->last_waited
== last_waited
)) {
677 * we want to go through our batch of
678 * requests and stop. So, we copy out
679 * the ioc->last_waited time and test
680 * against it before looping
682 last_waited
= ioc
->last_waited
;
686 spin_lock(&device
->io_lock
);
687 requeue_list(pending_bios
, pending
, tail
);
688 device
->running_pending
= 1;
690 spin_unlock(&device
->io_lock
);
691 btrfs_queue_work(fs_info
->submit_workers
,
701 spin_lock(&device
->io_lock
);
702 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
704 spin_unlock(&device
->io_lock
);
707 blk_finish_plug(&plug
);
710 static void pending_bios_fn(struct btrfs_work
*work
)
712 struct btrfs_device
*device
;
714 device
= container_of(work
, struct btrfs_device
, work
);
715 run_scheduled_bios(device
);
718 static bool device_path_matched(const char *path
, struct btrfs_device
*device
)
723 found
= strcmp(rcu_str_deref(device
->name
), path
);
730 * Search and remove all stale (devices which are not mounted) devices.
731 * When both inputs are NULL, it will search and release all stale devices.
732 * path: Optional. When provided will it release all unmounted devices
733 * matching this path only.
734 * skip_dev: Optional. Will skip this device when searching for the stale
736 * Return: 0 for success or if @path is NULL.
737 * -EBUSY if @path is a mounted device.
738 * -ENOENT if @path does not match any device in the list.
740 static int btrfs_free_stale_devices(const char *path
,
741 struct btrfs_device
*skip_device
)
743 struct btrfs_fs_devices
*fs_devices
, *tmp_fs_devices
;
744 struct btrfs_device
*device
, *tmp_device
;
750 list_for_each_entry_safe(fs_devices
, tmp_fs_devices
, &fs_uuids
, fs_list
) {
752 mutex_lock(&fs_devices
->device_list_mutex
);
753 list_for_each_entry_safe(device
, tmp_device
,
754 &fs_devices
->devices
, dev_list
) {
755 if (skip_device
&& skip_device
== device
)
757 if (path
&& !device
->name
)
759 if (path
&& !device_path_matched(path
, device
))
761 if (fs_devices
->opened
) {
762 /* for an already deleted device return 0 */
763 if (path
&& ret
!= 0)
768 /* delete the stale device */
769 fs_devices
->num_devices
--;
770 list_del(&device
->dev_list
);
771 btrfs_free_device(device
);
774 if (fs_devices
->num_devices
== 0)
777 mutex_unlock(&fs_devices
->device_list_mutex
);
779 if (fs_devices
->num_devices
== 0) {
780 btrfs_sysfs_remove_fsid(fs_devices
);
781 list_del(&fs_devices
->fs_list
);
782 free_fs_devices(fs_devices
);
789 static int btrfs_open_one_device(struct btrfs_fs_devices
*fs_devices
,
790 struct btrfs_device
*device
, fmode_t flags
,
793 struct request_queue
*q
;
794 struct block_device
*bdev
;
795 struct buffer_head
*bh
;
796 struct btrfs_super_block
*disk_super
;
805 ret
= btrfs_get_bdev_and_sb(device
->name
->str
, flags
, holder
, 1,
810 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
811 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
812 if (devid
!= device
->devid
)
815 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
))
818 device
->generation
= btrfs_super_generation(disk_super
);
820 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
821 if (btrfs_super_incompat_flags(disk_super
) &
822 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
) {
824 "BTRFS: Invalid seeding and uuid-changed device detected\n");
828 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
829 fs_devices
->seeding
= 1;
831 if (bdev_read_only(bdev
))
832 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
834 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
837 q
= bdev_get_queue(bdev
);
838 if (!blk_queue_nonrot(q
))
839 fs_devices
->rotating
= 1;
842 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
843 device
->mode
= flags
;
845 fs_devices
->open_devices
++;
846 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
847 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
848 fs_devices
->rw_devices
++;
849 list_add_tail(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
857 blkdev_put(bdev
, flags
);
863 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
864 * being created with a disk that has already completed its fsid change.
866 static struct btrfs_fs_devices
*find_fsid_inprogress(
867 struct btrfs_super_block
*disk_super
)
869 struct btrfs_fs_devices
*fs_devices
;
871 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
872 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
873 BTRFS_FSID_SIZE
) != 0 &&
874 memcmp(fs_devices
->metadata_uuid
, disk_super
->fsid
,
875 BTRFS_FSID_SIZE
) == 0 && !fs_devices
->fsid_change
) {
884 static struct btrfs_fs_devices
*find_fsid_changed(
885 struct btrfs_super_block
*disk_super
)
887 struct btrfs_fs_devices
*fs_devices
;
890 * Handles the case where scanned device is part of an fs that had
891 * multiple successful changes of FSID but curently device didn't
892 * observe it. Meaning our fsid will be different than theirs.
894 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
895 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
896 BTRFS_FSID_SIZE
) != 0 &&
897 memcmp(fs_devices
->metadata_uuid
, disk_super
->metadata_uuid
,
898 BTRFS_FSID_SIZE
) == 0 &&
899 memcmp(fs_devices
->fsid
, disk_super
->fsid
,
900 BTRFS_FSID_SIZE
) != 0) {
908 * Add new device to list of registered devices
911 * device pointer which was just added or updated when successful
912 * error pointer when failed
914 static noinline
struct btrfs_device
*device_list_add(const char *path
,
915 struct btrfs_super_block
*disk_super
,
916 bool *new_device_added
)
918 struct btrfs_device
*device
;
919 struct btrfs_fs_devices
*fs_devices
= NULL
;
920 struct rcu_string
*name
;
921 u64 found_transid
= btrfs_super_generation(disk_super
);
922 u64 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
923 bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
924 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
925 bool fsid_change_in_progress
= (btrfs_super_flags(disk_super
) &
926 BTRFS_SUPER_FLAG_CHANGING_FSID_V2
);
928 if (fsid_change_in_progress
) {
929 if (!has_metadata_uuid
) {
931 * When we have an image which has CHANGING_FSID_V2 set
932 * it might belong to either a filesystem which has
933 * disks with completed fsid change or it might belong
934 * to fs with no UUID changes in effect, handle both.
936 fs_devices
= find_fsid_inprogress(disk_super
);
938 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
940 fs_devices
= find_fsid_changed(disk_super
);
942 } else if (has_metadata_uuid
) {
943 fs_devices
= find_fsid(disk_super
->fsid
,
944 disk_super
->metadata_uuid
);
946 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
951 if (has_metadata_uuid
)
952 fs_devices
= alloc_fs_devices(disk_super
->fsid
,
953 disk_super
->metadata_uuid
);
955 fs_devices
= alloc_fs_devices(disk_super
->fsid
, NULL
);
957 if (IS_ERR(fs_devices
))
958 return ERR_CAST(fs_devices
);
960 fs_devices
->fsid_change
= fsid_change_in_progress
;
962 mutex_lock(&fs_devices
->device_list_mutex
);
963 list_add(&fs_devices
->fs_list
, &fs_uuids
);
967 mutex_lock(&fs_devices
->device_list_mutex
);
968 device
= btrfs_find_device(fs_devices
, devid
,
969 disk_super
->dev_item
.uuid
, NULL
, false);
972 * If this disk has been pulled into an fs devices created by
973 * a device which had the CHANGING_FSID_V2 flag then replace the
974 * metadata_uuid/fsid values of the fs_devices.
976 if (has_metadata_uuid
&& fs_devices
->fsid_change
&&
977 found_transid
> fs_devices
->latest_generation
) {
978 memcpy(fs_devices
->fsid
, disk_super
->fsid
,
980 memcpy(fs_devices
->metadata_uuid
,
981 disk_super
->metadata_uuid
, BTRFS_FSID_SIZE
);
983 fs_devices
->fsid_change
= false;
988 if (fs_devices
->opened
) {
989 mutex_unlock(&fs_devices
->device_list_mutex
);
990 return ERR_PTR(-EBUSY
);
993 device
= btrfs_alloc_device(NULL
, &devid
,
994 disk_super
->dev_item
.uuid
);
995 if (IS_ERR(device
)) {
996 mutex_unlock(&fs_devices
->device_list_mutex
);
997 /* we can safely leave the fs_devices entry around */
1001 name
= rcu_string_strdup(path
, GFP_NOFS
);
1003 btrfs_free_device(device
);
1004 mutex_unlock(&fs_devices
->device_list_mutex
);
1005 return ERR_PTR(-ENOMEM
);
1007 rcu_assign_pointer(device
->name
, name
);
1009 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
1010 fs_devices
->num_devices
++;
1012 device
->fs_devices
= fs_devices
;
1013 *new_device_added
= true;
1015 if (disk_super
->label
[0])
1016 pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
1017 disk_super
->label
, devid
, found_transid
, path
);
1019 pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
1020 disk_super
->fsid
, devid
, found_transid
, path
);
1022 } else if (!device
->name
|| strcmp(device
->name
->str
, path
)) {
1024 * When FS is already mounted.
1025 * 1. If you are here and if the device->name is NULL that
1026 * means this device was missing at time of FS mount.
1027 * 2. If you are here and if the device->name is different
1028 * from 'path' that means either
1029 * a. The same device disappeared and reappeared with
1030 * different name. or
1031 * b. The missing-disk-which-was-replaced, has
1034 * We must allow 1 and 2a above. But 2b would be a spurious
1035 * and unintentional.
1037 * Further in case of 1 and 2a above, the disk at 'path'
1038 * would have missed some transaction when it was away and
1039 * in case of 2a the stale bdev has to be updated as well.
1040 * 2b must not be allowed at all time.
1044 * For now, we do allow update to btrfs_fs_device through the
1045 * btrfs dev scan cli after FS has been mounted. We're still
1046 * tracking a problem where systems fail mount by subvolume id
1047 * when we reject replacement on a mounted FS.
1049 if (!fs_devices
->opened
&& found_transid
< device
->generation
) {
1051 * That is if the FS is _not_ mounted and if you
1052 * are here, that means there is more than one
1053 * disk with same uuid and devid.We keep the one
1054 * with larger generation number or the last-in if
1055 * generation are equal.
1057 mutex_unlock(&fs_devices
->device_list_mutex
);
1058 return ERR_PTR(-EEXIST
);
1062 * We are going to replace the device path for a given devid,
1063 * make sure it's the same device if the device is mounted
1066 struct block_device
*path_bdev
;
1068 path_bdev
= lookup_bdev(path
);
1069 if (IS_ERR(path_bdev
)) {
1070 mutex_unlock(&fs_devices
->device_list_mutex
);
1071 return ERR_CAST(path_bdev
);
1074 if (device
->bdev
!= path_bdev
) {
1076 mutex_unlock(&fs_devices
->device_list_mutex
);
1077 btrfs_warn_in_rcu(device
->fs_info
,
1078 "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
1079 disk_super
->fsid
, devid
,
1080 rcu_str_deref(device
->name
), path
);
1081 return ERR_PTR(-EEXIST
);
1084 btrfs_info_in_rcu(device
->fs_info
,
1085 "device fsid %pU devid %llu moved old:%s new:%s",
1086 disk_super
->fsid
, devid
,
1087 rcu_str_deref(device
->name
), path
);
1090 name
= rcu_string_strdup(path
, GFP_NOFS
);
1092 mutex_unlock(&fs_devices
->device_list_mutex
);
1093 return ERR_PTR(-ENOMEM
);
1095 rcu_string_free(device
->name
);
1096 rcu_assign_pointer(device
->name
, name
);
1097 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
1098 fs_devices
->missing_devices
--;
1099 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
1104 * Unmount does not free the btrfs_device struct but would zero
1105 * generation along with most of the other members. So just update
1106 * it back. We need it to pick the disk with largest generation
1109 if (!fs_devices
->opened
) {
1110 device
->generation
= found_transid
;
1111 fs_devices
->latest_generation
= max_t(u64
, found_transid
,
1112 fs_devices
->latest_generation
);
1115 fs_devices
->total_devices
= btrfs_super_num_devices(disk_super
);
1117 mutex_unlock(&fs_devices
->device_list_mutex
);
1121 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
1123 struct btrfs_fs_devices
*fs_devices
;
1124 struct btrfs_device
*device
;
1125 struct btrfs_device
*orig_dev
;
1127 fs_devices
= alloc_fs_devices(orig
->fsid
, NULL
);
1128 if (IS_ERR(fs_devices
))
1131 mutex_lock(&orig
->device_list_mutex
);
1132 fs_devices
->total_devices
= orig
->total_devices
;
1134 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
1135 struct rcu_string
*name
;
1137 device
= btrfs_alloc_device(NULL
, &orig_dev
->devid
,
1143 * This is ok to do without rcu read locked because we hold the
1144 * uuid mutex so nothing we touch in here is going to disappear.
1146 if (orig_dev
->name
) {
1147 name
= rcu_string_strdup(orig_dev
->name
->str
,
1150 btrfs_free_device(device
);
1153 rcu_assign_pointer(device
->name
, name
);
1156 list_add(&device
->dev_list
, &fs_devices
->devices
);
1157 device
->fs_devices
= fs_devices
;
1158 fs_devices
->num_devices
++;
1160 mutex_unlock(&orig
->device_list_mutex
);
1163 mutex_unlock(&orig
->device_list_mutex
);
1164 free_fs_devices(fs_devices
);
1165 return ERR_PTR(-ENOMEM
);
1169 * After we have read the system tree and know devids belonging to
1170 * this filesystem, remove the device which does not belong there.
1172 void btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
, int step
)
1174 struct btrfs_device
*device
, *next
;
1175 struct btrfs_device
*latest_dev
= NULL
;
1177 mutex_lock(&uuid_mutex
);
1179 /* This is the initialized path, it is safe to release the devices. */
1180 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
1181 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
1182 &device
->dev_state
)) {
1183 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1184 &device
->dev_state
) &&
1186 device
->generation
> latest_dev
->generation
)) {
1187 latest_dev
= device
;
1192 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
) {
1194 * In the first step, keep the device which has
1195 * the correct fsid and the devid that is used
1196 * for the dev_replace procedure.
1197 * In the second step, the dev_replace state is
1198 * read from the device tree and it is known
1199 * whether the procedure is really active or
1200 * not, which means whether this device is
1201 * used or whether it should be removed.
1203 if (step
== 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1204 &device
->dev_state
)) {
1209 blkdev_put(device
->bdev
, device
->mode
);
1210 device
->bdev
= NULL
;
1211 fs_devices
->open_devices
--;
1213 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1214 list_del_init(&device
->dev_alloc_list
);
1215 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1216 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1217 &device
->dev_state
))
1218 fs_devices
->rw_devices
--;
1220 list_del_init(&device
->dev_list
);
1221 fs_devices
->num_devices
--;
1222 btrfs_free_device(device
);
1225 if (fs_devices
->seed
) {
1226 fs_devices
= fs_devices
->seed
;
1230 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1232 mutex_unlock(&uuid_mutex
);
1235 static void btrfs_close_bdev(struct btrfs_device
*device
)
1240 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1241 sync_blockdev(device
->bdev
);
1242 invalidate_bdev(device
->bdev
);
1245 blkdev_put(device
->bdev
, device
->mode
);
1248 static void btrfs_close_one_device(struct btrfs_device
*device
)
1250 struct btrfs_fs_devices
*fs_devices
= device
->fs_devices
;
1251 struct btrfs_device
*new_device
;
1252 struct rcu_string
*name
;
1255 fs_devices
->open_devices
--;
1257 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1258 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
1259 list_del_init(&device
->dev_alloc_list
);
1260 fs_devices
->rw_devices
--;
1263 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
1264 fs_devices
->missing_devices
--;
1266 btrfs_close_bdev(device
);
1268 new_device
= btrfs_alloc_device(NULL
, &device
->devid
,
1270 BUG_ON(IS_ERR(new_device
)); /* -ENOMEM */
1272 /* Safe because we are under uuid_mutex */
1274 name
= rcu_string_strdup(device
->name
->str
, GFP_NOFS
);
1275 BUG_ON(!name
); /* -ENOMEM */
1276 rcu_assign_pointer(new_device
->name
, name
);
1279 list_replace_rcu(&device
->dev_list
, &new_device
->dev_list
);
1280 new_device
->fs_devices
= device
->fs_devices
;
1283 btrfs_free_device(device
);
1286 static int close_fs_devices(struct btrfs_fs_devices
*fs_devices
)
1288 struct btrfs_device
*device
, *tmp
;
1290 if (--fs_devices
->opened
> 0)
1293 mutex_lock(&fs_devices
->device_list_mutex
);
1294 list_for_each_entry_safe(device
, tmp
, &fs_devices
->devices
, dev_list
) {
1295 btrfs_close_one_device(device
);
1297 mutex_unlock(&fs_devices
->device_list_mutex
);
1299 WARN_ON(fs_devices
->open_devices
);
1300 WARN_ON(fs_devices
->rw_devices
);
1301 fs_devices
->opened
= 0;
1302 fs_devices
->seeding
= 0;
1307 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
1309 struct btrfs_fs_devices
*seed_devices
= NULL
;
1312 mutex_lock(&uuid_mutex
);
1313 ret
= close_fs_devices(fs_devices
);
1314 if (!fs_devices
->opened
) {
1315 seed_devices
= fs_devices
->seed
;
1316 fs_devices
->seed
= NULL
;
1318 mutex_unlock(&uuid_mutex
);
1320 while (seed_devices
) {
1321 fs_devices
= seed_devices
;
1322 seed_devices
= fs_devices
->seed
;
1323 close_fs_devices(fs_devices
);
1324 free_fs_devices(fs_devices
);
1329 static int open_fs_devices(struct btrfs_fs_devices
*fs_devices
,
1330 fmode_t flags
, void *holder
)
1332 struct btrfs_device
*device
;
1333 struct btrfs_device
*latest_dev
= NULL
;
1336 flags
|= FMODE_EXCL
;
1338 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1339 /* Just open everything we can; ignore failures here */
1340 if (btrfs_open_one_device(fs_devices
, device
, flags
, holder
))
1344 device
->generation
> latest_dev
->generation
)
1345 latest_dev
= device
;
1347 if (fs_devices
->open_devices
== 0) {
1351 fs_devices
->opened
= 1;
1352 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1353 fs_devices
->total_rw_bytes
= 0;
1358 static int devid_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1360 struct btrfs_device
*dev1
, *dev2
;
1362 dev1
= list_entry(a
, struct btrfs_device
, dev_list
);
1363 dev2
= list_entry(b
, struct btrfs_device
, dev_list
);
1365 if (dev1
->devid
< dev2
->devid
)
1367 else if (dev1
->devid
> dev2
->devid
)
1372 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
1373 fmode_t flags
, void *holder
)
1377 lockdep_assert_held(&uuid_mutex
);
1379 mutex_lock(&fs_devices
->device_list_mutex
);
1380 if (fs_devices
->opened
) {
1381 fs_devices
->opened
++;
1384 list_sort(NULL
, &fs_devices
->devices
, devid_cmp
);
1385 ret
= open_fs_devices(fs_devices
, flags
, holder
);
1387 mutex_unlock(&fs_devices
->device_list_mutex
);
1392 static void btrfs_release_disk_super(struct page
*page
)
1398 static int btrfs_read_disk_super(struct block_device
*bdev
, u64 bytenr
,
1400 struct btrfs_super_block
**disk_super
)
1405 /* make sure our super fits in the device */
1406 if (bytenr
+ PAGE_SIZE
>= i_size_read(bdev
->bd_inode
))
1409 /* make sure our super fits in the page */
1410 if (sizeof(**disk_super
) > PAGE_SIZE
)
1413 /* make sure our super doesn't straddle pages on disk */
1414 index
= bytenr
>> PAGE_SHIFT
;
1415 if ((bytenr
+ sizeof(**disk_super
) - 1) >> PAGE_SHIFT
!= index
)
1418 /* pull in the page with our super */
1419 *page
= read_cache_page_gfp(bdev
->bd_inode
->i_mapping
,
1422 if (IS_ERR_OR_NULL(*page
))
1427 /* align our pointer to the offset of the super block */
1428 *disk_super
= p
+ offset_in_page(bytenr
);
1430 if (btrfs_super_bytenr(*disk_super
) != bytenr
||
1431 btrfs_super_magic(*disk_super
) != BTRFS_MAGIC
) {
1432 btrfs_release_disk_super(*page
);
1436 if ((*disk_super
)->label
[0] &&
1437 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1])
1438 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1] = '\0';
1443 int btrfs_forget_devices(const char *path
)
1447 mutex_lock(&uuid_mutex
);
1448 ret
= btrfs_free_stale_devices(strlen(path
) ? path
: NULL
, NULL
);
1449 mutex_unlock(&uuid_mutex
);
1455 * Look for a btrfs signature on a device. This may be called out of the mount path
1456 * and we are not allowed to call set_blocksize during the scan. The superblock
1457 * is read via pagecache
1459 struct btrfs_device
*btrfs_scan_one_device(const char *path
, fmode_t flags
,
1462 struct btrfs_super_block
*disk_super
;
1463 bool new_device_added
= false;
1464 struct btrfs_device
*device
= NULL
;
1465 struct block_device
*bdev
;
1469 lockdep_assert_held(&uuid_mutex
);
1472 * we would like to check all the supers, but that would make
1473 * a btrfs mount succeed after a mkfs from a different FS.
1474 * So, we need to add a special mount option to scan for
1475 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1477 bytenr
= btrfs_sb_offset(0);
1478 flags
|= FMODE_EXCL
;
1480 bdev
= blkdev_get_by_path(path
, flags
, holder
);
1482 return ERR_CAST(bdev
);
1484 if (btrfs_read_disk_super(bdev
, bytenr
, &page
, &disk_super
)) {
1485 device
= ERR_PTR(-EINVAL
);
1486 goto error_bdev_put
;
1489 device
= device_list_add(path
, disk_super
, &new_device_added
);
1490 if (!IS_ERR(device
)) {
1491 if (new_device_added
)
1492 btrfs_free_stale_devices(path
, device
);
1495 btrfs_release_disk_super(page
);
1498 blkdev_put(bdev
, flags
);
1504 * Try to find a chunk that intersects [start, start + len] range and when one
1505 * such is found, record the end of it in *start
1507 static bool contains_pending_extent(struct btrfs_device
*device
, u64
*start
,
1510 u64 physical_start
, physical_end
;
1512 lockdep_assert_held(&device
->fs_info
->chunk_mutex
);
1514 if (!find_first_extent_bit(&device
->alloc_state
, *start
,
1515 &physical_start
, &physical_end
,
1516 CHUNK_ALLOCATED
, NULL
)) {
1518 if (in_range(physical_start
, *start
, len
) ||
1519 in_range(*start
, physical_start
,
1520 physical_end
- physical_start
)) {
1521 *start
= physical_end
+ 1;
1530 * find_free_dev_extent_start - find free space in the specified device
1531 * @device: the device which we search the free space in
1532 * @num_bytes: the size of the free space that we need
1533 * @search_start: the position from which to begin the search
1534 * @start: store the start of the free space.
1535 * @len: the size of the free space. that we find, or the size
1536 * of the max free space if we don't find suitable free space
1538 * this uses a pretty simple search, the expectation is that it is
1539 * called very infrequently and that a given device has a small number
1542 * @start is used to store the start of the free space if we find. But if we
1543 * don't find suitable free space, it will be used to store the start position
1544 * of the max free space.
1546 * @len is used to store the size of the free space that we find.
1547 * But if we don't find suitable free space, it is used to store the size of
1548 * the max free space.
1550 int find_free_dev_extent_start(struct btrfs_device
*device
, u64 num_bytes
,
1551 u64 search_start
, u64
*start
, u64
*len
)
1553 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1554 struct btrfs_root
*root
= fs_info
->dev_root
;
1555 struct btrfs_key key
;
1556 struct btrfs_dev_extent
*dev_extent
;
1557 struct btrfs_path
*path
;
1562 u64 search_end
= device
->total_bytes
;
1565 struct extent_buffer
*l
;
1568 * We don't want to overwrite the superblock on the drive nor any area
1569 * used by the boot loader (grub for example), so we make sure to start
1570 * at an offset of at least 1MB.
1572 search_start
= max_t(u64
, search_start
, SZ_1M
);
1574 path
= btrfs_alloc_path();
1578 max_hole_start
= search_start
;
1582 if (search_start
>= search_end
||
1583 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1588 path
->reada
= READA_FORWARD
;
1589 path
->search_commit_root
= 1;
1590 path
->skip_locking
= 1;
1592 key
.objectid
= device
->devid
;
1593 key
.offset
= search_start
;
1594 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1596 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1600 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
1607 slot
= path
->slots
[0];
1608 if (slot
>= btrfs_header_nritems(l
)) {
1609 ret
= btrfs_next_leaf(root
, path
);
1617 btrfs_item_key_to_cpu(l
, &key
, slot
);
1619 if (key
.objectid
< device
->devid
)
1622 if (key
.objectid
> device
->devid
)
1625 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
1628 if (key
.offset
> search_start
) {
1629 hole_size
= key
.offset
- search_start
;
1632 * Have to check before we set max_hole_start, otherwise
1633 * we could end up sending back this offset anyway.
1635 if (contains_pending_extent(device
, &search_start
,
1637 if (key
.offset
>= search_start
)
1638 hole_size
= key
.offset
- search_start
;
1643 if (hole_size
> max_hole_size
) {
1644 max_hole_start
= search_start
;
1645 max_hole_size
= hole_size
;
1649 * If this free space is greater than which we need,
1650 * it must be the max free space that we have found
1651 * until now, so max_hole_start must point to the start
1652 * of this free space and the length of this free space
1653 * is stored in max_hole_size. Thus, we return
1654 * max_hole_start and max_hole_size and go back to the
1657 if (hole_size
>= num_bytes
) {
1663 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1664 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
1666 if (extent_end
> search_start
)
1667 search_start
= extent_end
;
1674 * At this point, search_start should be the end of
1675 * allocated dev extents, and when shrinking the device,
1676 * search_end may be smaller than search_start.
1678 if (search_end
> search_start
) {
1679 hole_size
= search_end
- search_start
;
1681 if (contains_pending_extent(device
, &search_start
, hole_size
)) {
1682 btrfs_release_path(path
);
1686 if (hole_size
> max_hole_size
) {
1687 max_hole_start
= search_start
;
1688 max_hole_size
= hole_size
;
1693 if (max_hole_size
< num_bytes
)
1699 btrfs_free_path(path
);
1700 *start
= max_hole_start
;
1702 *len
= max_hole_size
;
1706 int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
1707 u64
*start
, u64
*len
)
1709 /* FIXME use last free of some kind */
1710 return find_free_dev_extent_start(device
, num_bytes
, 0, start
, len
);
1713 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
1714 struct btrfs_device
*device
,
1715 u64 start
, u64
*dev_extent_len
)
1717 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1718 struct btrfs_root
*root
= fs_info
->dev_root
;
1720 struct btrfs_path
*path
;
1721 struct btrfs_key key
;
1722 struct btrfs_key found_key
;
1723 struct extent_buffer
*leaf
= NULL
;
1724 struct btrfs_dev_extent
*extent
= NULL
;
1726 path
= btrfs_alloc_path();
1730 key
.objectid
= device
->devid
;
1732 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1734 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1736 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
1737 BTRFS_DEV_EXTENT_KEY
);
1740 leaf
= path
->nodes
[0];
1741 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1742 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1743 struct btrfs_dev_extent
);
1744 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
1745 btrfs_dev_extent_length(leaf
, extent
) < start
);
1747 btrfs_release_path(path
);
1749 } else if (ret
== 0) {
1750 leaf
= path
->nodes
[0];
1751 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1752 struct btrfs_dev_extent
);
1754 btrfs_handle_fs_error(fs_info
, ret
, "Slot search failed");
1758 *dev_extent_len
= btrfs_dev_extent_length(leaf
, extent
);
1760 ret
= btrfs_del_item(trans
, root
, path
);
1762 btrfs_handle_fs_error(fs_info
, ret
,
1763 "Failed to remove dev extent item");
1765 set_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
);
1768 btrfs_free_path(path
);
1772 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
1773 struct btrfs_device
*device
,
1774 u64 chunk_offset
, u64 start
, u64 num_bytes
)
1777 struct btrfs_path
*path
;
1778 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1779 struct btrfs_root
*root
= fs_info
->dev_root
;
1780 struct btrfs_dev_extent
*extent
;
1781 struct extent_buffer
*leaf
;
1782 struct btrfs_key key
;
1784 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
));
1785 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1786 path
= btrfs_alloc_path();
1790 key
.objectid
= device
->devid
;
1792 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1793 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1798 leaf
= path
->nodes
[0];
1799 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1800 struct btrfs_dev_extent
);
1801 btrfs_set_dev_extent_chunk_tree(leaf
, extent
,
1802 BTRFS_CHUNK_TREE_OBJECTID
);
1803 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
,
1804 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
1805 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
1807 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1808 btrfs_mark_buffer_dirty(leaf
);
1810 btrfs_free_path(path
);
1814 static u64
find_next_chunk(struct btrfs_fs_info
*fs_info
)
1816 struct extent_map_tree
*em_tree
;
1817 struct extent_map
*em
;
1821 em_tree
= &fs_info
->mapping_tree
.map_tree
;
1822 read_lock(&em_tree
->lock
);
1823 n
= rb_last(&em_tree
->map
.rb_root
);
1825 em
= rb_entry(n
, struct extent_map
, rb_node
);
1826 ret
= em
->start
+ em
->len
;
1828 read_unlock(&em_tree
->lock
);
1833 static noinline
int find_next_devid(struct btrfs_fs_info
*fs_info
,
1837 struct btrfs_key key
;
1838 struct btrfs_key found_key
;
1839 struct btrfs_path
*path
;
1841 path
= btrfs_alloc_path();
1845 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1846 key
.type
= BTRFS_DEV_ITEM_KEY
;
1847 key
.offset
= (u64
)-1;
1849 ret
= btrfs_search_slot(NULL
, fs_info
->chunk_root
, &key
, path
, 0, 0);
1853 BUG_ON(ret
== 0); /* Corruption */
1855 ret
= btrfs_previous_item(fs_info
->chunk_root
, path
,
1856 BTRFS_DEV_ITEMS_OBJECTID
,
1857 BTRFS_DEV_ITEM_KEY
);
1861 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1863 *devid_ret
= found_key
.offset
+ 1;
1867 btrfs_free_path(path
);
1872 * the device information is stored in the chunk root
1873 * the btrfs_device struct should be fully filled in
1875 static int btrfs_add_dev_item(struct btrfs_trans_handle
*trans
,
1876 struct btrfs_device
*device
)
1879 struct btrfs_path
*path
;
1880 struct btrfs_dev_item
*dev_item
;
1881 struct extent_buffer
*leaf
;
1882 struct btrfs_key key
;
1885 path
= btrfs_alloc_path();
1889 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1890 key
.type
= BTRFS_DEV_ITEM_KEY
;
1891 key
.offset
= device
->devid
;
1893 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->chunk_root
, path
,
1894 &key
, sizeof(*dev_item
));
1898 leaf
= path
->nodes
[0];
1899 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1901 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1902 btrfs_set_device_generation(leaf
, dev_item
, 0);
1903 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1904 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1905 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1906 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1907 btrfs_set_device_total_bytes(leaf
, dev_item
,
1908 btrfs_device_get_disk_total_bytes(device
));
1909 btrfs_set_device_bytes_used(leaf
, dev_item
,
1910 btrfs_device_get_bytes_used(device
));
1911 btrfs_set_device_group(leaf
, dev_item
, 0);
1912 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1913 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1914 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1916 ptr
= btrfs_device_uuid(dev_item
);
1917 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1918 ptr
= btrfs_device_fsid(dev_item
);
1919 write_extent_buffer(leaf
, trans
->fs_info
->fs_devices
->metadata_uuid
,
1920 ptr
, BTRFS_FSID_SIZE
);
1921 btrfs_mark_buffer_dirty(leaf
);
1925 btrfs_free_path(path
);
1930 * Function to update ctime/mtime for a given device path.
1931 * Mainly used for ctime/mtime based probe like libblkid.
1933 static void update_dev_time(const char *path_name
)
1937 filp
= filp_open(path_name
, O_RDWR
, 0);
1940 file_update_time(filp
);
1941 filp_close(filp
, NULL
);
1944 static int btrfs_rm_dev_item(struct btrfs_device
*device
)
1946 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
1948 struct btrfs_path
*path
;
1949 struct btrfs_key key
;
1950 struct btrfs_trans_handle
*trans
;
1952 path
= btrfs_alloc_path();
1956 trans
= btrfs_start_transaction(root
, 0);
1957 if (IS_ERR(trans
)) {
1958 btrfs_free_path(path
);
1959 return PTR_ERR(trans
);
1961 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1962 key
.type
= BTRFS_DEV_ITEM_KEY
;
1963 key
.offset
= device
->devid
;
1965 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1969 btrfs_abort_transaction(trans
, ret
);
1970 btrfs_end_transaction(trans
);
1974 ret
= btrfs_del_item(trans
, root
, path
);
1976 btrfs_abort_transaction(trans
, ret
);
1977 btrfs_end_transaction(trans
);
1981 btrfs_free_path(path
);
1983 ret
= btrfs_commit_transaction(trans
);
1988 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1989 * filesystem. It's up to the caller to adjust that number regarding eg. device
1992 static int btrfs_check_raid_min_devices(struct btrfs_fs_info
*fs_info
,
2000 seq
= read_seqbegin(&fs_info
->profiles_lock
);
2002 all_avail
= fs_info
->avail_data_alloc_bits
|
2003 fs_info
->avail_system_alloc_bits
|
2004 fs_info
->avail_metadata_alloc_bits
;
2005 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
2007 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++) {
2008 if (!(all_avail
& btrfs_raid_array
[i
].bg_flag
))
2011 if (num_devices
< btrfs_raid_array
[i
].devs_min
) {
2012 int ret
= btrfs_raid_array
[i
].mindev_error
;
2022 static struct btrfs_device
* btrfs_find_next_active_device(
2023 struct btrfs_fs_devices
*fs_devs
, struct btrfs_device
*device
)
2025 struct btrfs_device
*next_device
;
2027 list_for_each_entry(next_device
, &fs_devs
->devices
, dev_list
) {
2028 if (next_device
!= device
&&
2029 !test_bit(BTRFS_DEV_STATE_MISSING
, &next_device
->dev_state
)
2030 && next_device
->bdev
)
2038 * Helper function to check if the given device is part of s_bdev / latest_bdev
2039 * and replace it with the provided or the next active device, in the context
2040 * where this function called, there should be always be another device (or
2041 * this_dev) which is active.
2043 void btrfs_assign_next_active_device(struct btrfs_device
*device
,
2044 struct btrfs_device
*this_dev
)
2046 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2047 struct btrfs_device
*next_device
;
2050 next_device
= this_dev
;
2052 next_device
= btrfs_find_next_active_device(fs_info
->fs_devices
,
2054 ASSERT(next_device
);
2056 if (fs_info
->sb
->s_bdev
&&
2057 (fs_info
->sb
->s_bdev
== device
->bdev
))
2058 fs_info
->sb
->s_bdev
= next_device
->bdev
;
2060 if (fs_info
->fs_devices
->latest_bdev
== device
->bdev
)
2061 fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
2065 * Return btrfs_fs_devices::num_devices excluding the device that's being
2066 * currently replaced.
2068 static u64
btrfs_num_devices(struct btrfs_fs_info
*fs_info
)
2070 u64 num_devices
= fs_info
->fs_devices
->num_devices
;
2072 down_read(&fs_info
->dev_replace
.rwsem
);
2073 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
)) {
2074 ASSERT(num_devices
> 1);
2077 up_read(&fs_info
->dev_replace
.rwsem
);
2082 int btrfs_rm_device(struct btrfs_fs_info
*fs_info
, const char *device_path
,
2085 struct btrfs_device
*device
;
2086 struct btrfs_fs_devices
*cur_devices
;
2087 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2091 mutex_lock(&uuid_mutex
);
2093 num_devices
= btrfs_num_devices(fs_info
);
2095 ret
= btrfs_check_raid_min_devices(fs_info
, num_devices
- 1);
2099 device
= btrfs_find_device_by_devspec(fs_info
, devid
, device_path
);
2101 if (IS_ERR(device
)) {
2102 if (PTR_ERR(device
) == -ENOENT
&&
2103 strcmp(device_path
, "missing") == 0)
2104 ret
= BTRFS_ERROR_DEV_MISSING_NOT_FOUND
;
2106 ret
= PTR_ERR(device
);
2110 if (btrfs_pinned_by_swapfile(fs_info
, device
)) {
2111 btrfs_warn_in_rcu(fs_info
,
2112 "cannot remove device %s (devid %llu) due to active swapfile",
2113 rcu_str_deref(device
->name
), device
->devid
);
2118 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2119 ret
= BTRFS_ERROR_DEV_TGT_REPLACE
;
2123 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
2124 fs_info
->fs_devices
->rw_devices
== 1) {
2125 ret
= BTRFS_ERROR_DEV_ONLY_WRITABLE
;
2129 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2130 mutex_lock(&fs_info
->chunk_mutex
);
2131 list_del_init(&device
->dev_alloc_list
);
2132 device
->fs_devices
->rw_devices
--;
2133 mutex_unlock(&fs_info
->chunk_mutex
);
2136 mutex_unlock(&uuid_mutex
);
2137 ret
= btrfs_shrink_device(device
, 0);
2138 mutex_lock(&uuid_mutex
);
2143 * TODO: the superblock still includes this device in its num_devices
2144 * counter although write_all_supers() is not locked out. This
2145 * could give a filesystem state which requires a degraded mount.
2147 ret
= btrfs_rm_dev_item(device
);
2151 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2152 btrfs_scrub_cancel_dev(device
);
2155 * the device list mutex makes sure that we don't change
2156 * the device list while someone else is writing out all
2157 * the device supers. Whoever is writing all supers, should
2158 * lock the device list mutex before getting the number of
2159 * devices in the super block (super_copy). Conversely,
2160 * whoever updates the number of devices in the super block
2161 * (super_copy) should hold the device list mutex.
2165 * In normal cases the cur_devices == fs_devices. But in case
2166 * of deleting a seed device, the cur_devices should point to
2167 * its own fs_devices listed under the fs_devices->seed.
2169 cur_devices
= device
->fs_devices
;
2170 mutex_lock(&fs_devices
->device_list_mutex
);
2171 list_del_rcu(&device
->dev_list
);
2173 cur_devices
->num_devices
--;
2174 cur_devices
->total_devices
--;
2175 /* Update total_devices of the parent fs_devices if it's seed */
2176 if (cur_devices
!= fs_devices
)
2177 fs_devices
->total_devices
--;
2179 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
2180 cur_devices
->missing_devices
--;
2182 btrfs_assign_next_active_device(device
, NULL
);
2185 cur_devices
->open_devices
--;
2186 /* remove sysfs entry */
2187 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2190 num_devices
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
2191 btrfs_set_super_num_devices(fs_info
->super_copy
, num_devices
);
2192 mutex_unlock(&fs_devices
->device_list_mutex
);
2195 * at this point, the device is zero sized and detached from
2196 * the devices list. All that's left is to zero out the old
2197 * supers and free the device.
2199 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2200 btrfs_scratch_superblocks(device
->bdev
, device
->name
->str
);
2202 btrfs_close_bdev(device
);
2204 btrfs_free_device(device
);
2206 if (cur_devices
->open_devices
== 0) {
2207 while (fs_devices
) {
2208 if (fs_devices
->seed
== cur_devices
) {
2209 fs_devices
->seed
= cur_devices
->seed
;
2212 fs_devices
= fs_devices
->seed
;
2214 cur_devices
->seed
= NULL
;
2215 close_fs_devices(cur_devices
);
2216 free_fs_devices(cur_devices
);
2220 mutex_unlock(&uuid_mutex
);
2224 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2225 mutex_lock(&fs_info
->chunk_mutex
);
2226 list_add(&device
->dev_alloc_list
,
2227 &fs_devices
->alloc_list
);
2228 device
->fs_devices
->rw_devices
++;
2229 mutex_unlock(&fs_info
->chunk_mutex
);
2234 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device
*srcdev
)
2236 struct btrfs_fs_devices
*fs_devices
;
2238 lockdep_assert_held(&srcdev
->fs_info
->fs_devices
->device_list_mutex
);
2241 * in case of fs with no seed, srcdev->fs_devices will point
2242 * to fs_devices of fs_info. However when the dev being replaced is
2243 * a seed dev it will point to the seed's local fs_devices. In short
2244 * srcdev will have its correct fs_devices in both the cases.
2246 fs_devices
= srcdev
->fs_devices
;
2248 list_del_rcu(&srcdev
->dev_list
);
2249 list_del(&srcdev
->dev_alloc_list
);
2250 fs_devices
->num_devices
--;
2251 if (test_bit(BTRFS_DEV_STATE_MISSING
, &srcdev
->dev_state
))
2252 fs_devices
->missing_devices
--;
2254 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
))
2255 fs_devices
->rw_devices
--;
2258 fs_devices
->open_devices
--;
2261 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device
*srcdev
)
2263 struct btrfs_fs_info
*fs_info
= srcdev
->fs_info
;
2264 struct btrfs_fs_devices
*fs_devices
= srcdev
->fs_devices
;
2266 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
)) {
2267 /* zero out the old super if it is writable */
2268 btrfs_scratch_superblocks(srcdev
->bdev
, srcdev
->name
->str
);
2271 btrfs_close_bdev(srcdev
);
2273 btrfs_free_device(srcdev
);
2275 /* if this is no devs we rather delete the fs_devices */
2276 if (!fs_devices
->num_devices
) {
2277 struct btrfs_fs_devices
*tmp_fs_devices
;
2280 * On a mounted FS, num_devices can't be zero unless it's a
2281 * seed. In case of a seed device being replaced, the replace
2282 * target added to the sprout FS, so there will be no more
2283 * device left under the seed FS.
2285 ASSERT(fs_devices
->seeding
);
2287 tmp_fs_devices
= fs_info
->fs_devices
;
2288 while (tmp_fs_devices
) {
2289 if (tmp_fs_devices
->seed
== fs_devices
) {
2290 tmp_fs_devices
->seed
= fs_devices
->seed
;
2293 tmp_fs_devices
= tmp_fs_devices
->seed
;
2295 fs_devices
->seed
= NULL
;
2296 close_fs_devices(fs_devices
);
2297 free_fs_devices(fs_devices
);
2301 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device
*tgtdev
)
2303 struct btrfs_fs_devices
*fs_devices
= tgtdev
->fs_info
->fs_devices
;
2306 mutex_lock(&fs_devices
->device_list_mutex
);
2308 btrfs_sysfs_rm_device_link(fs_devices
, tgtdev
);
2311 fs_devices
->open_devices
--;
2313 fs_devices
->num_devices
--;
2315 btrfs_assign_next_active_device(tgtdev
, NULL
);
2317 list_del_rcu(&tgtdev
->dev_list
);
2319 mutex_unlock(&fs_devices
->device_list_mutex
);
2322 * The update_dev_time() with in btrfs_scratch_superblocks()
2323 * may lead to a call to btrfs_show_devname() which will try
2324 * to hold device_list_mutex. And here this device
2325 * is already out of device list, so we don't have to hold
2326 * the device_list_mutex lock.
2328 btrfs_scratch_superblocks(tgtdev
->bdev
, tgtdev
->name
->str
);
2330 btrfs_close_bdev(tgtdev
);
2332 btrfs_free_device(tgtdev
);
2335 static struct btrfs_device
*btrfs_find_device_by_path(
2336 struct btrfs_fs_info
*fs_info
, const char *device_path
)
2339 struct btrfs_super_block
*disk_super
;
2342 struct block_device
*bdev
;
2343 struct buffer_head
*bh
;
2344 struct btrfs_device
*device
;
2346 ret
= btrfs_get_bdev_and_sb(device_path
, FMODE_READ
,
2347 fs_info
->bdev_holder
, 0, &bdev
, &bh
);
2349 return ERR_PTR(ret
);
2350 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
2351 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
2352 dev_uuid
= disk_super
->dev_item
.uuid
;
2353 if (btrfs_fs_incompat(fs_info
, METADATA_UUID
))
2354 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2355 disk_super
->metadata_uuid
, true);
2357 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2358 disk_super
->fsid
, true);
2362 device
= ERR_PTR(-ENOENT
);
2363 blkdev_put(bdev
, FMODE_READ
);
2368 * Lookup a device given by device id, or the path if the id is 0.
2370 struct btrfs_device
*btrfs_find_device_by_devspec(
2371 struct btrfs_fs_info
*fs_info
, u64 devid
,
2372 const char *device_path
)
2374 struct btrfs_device
*device
;
2377 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
,
2380 return ERR_PTR(-ENOENT
);
2384 if (!device_path
|| !device_path
[0])
2385 return ERR_PTR(-EINVAL
);
2387 if (strcmp(device_path
, "missing") == 0) {
2388 /* Find first missing device */
2389 list_for_each_entry(device
, &fs_info
->fs_devices
->devices
,
2391 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
2392 &device
->dev_state
) && !device
->bdev
)
2395 return ERR_PTR(-ENOENT
);
2398 return btrfs_find_device_by_path(fs_info
, device_path
);
2402 * does all the dirty work required for changing file system's UUID.
2404 static int btrfs_prepare_sprout(struct btrfs_fs_info
*fs_info
)
2406 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2407 struct btrfs_fs_devices
*old_devices
;
2408 struct btrfs_fs_devices
*seed_devices
;
2409 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2410 struct btrfs_device
*device
;
2413 lockdep_assert_held(&uuid_mutex
);
2414 if (!fs_devices
->seeding
)
2417 seed_devices
= alloc_fs_devices(NULL
, NULL
);
2418 if (IS_ERR(seed_devices
))
2419 return PTR_ERR(seed_devices
);
2421 old_devices
= clone_fs_devices(fs_devices
);
2422 if (IS_ERR(old_devices
)) {
2423 kfree(seed_devices
);
2424 return PTR_ERR(old_devices
);
2427 list_add(&old_devices
->fs_list
, &fs_uuids
);
2429 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
2430 seed_devices
->opened
= 1;
2431 INIT_LIST_HEAD(&seed_devices
->devices
);
2432 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
2433 mutex_init(&seed_devices
->device_list_mutex
);
2435 mutex_lock(&fs_devices
->device_list_mutex
);
2436 list_splice_init_rcu(&fs_devices
->devices
, &seed_devices
->devices
,
2438 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
)
2439 device
->fs_devices
= seed_devices
;
2441 mutex_lock(&fs_info
->chunk_mutex
);
2442 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
2443 mutex_unlock(&fs_info
->chunk_mutex
);
2445 fs_devices
->seeding
= 0;
2446 fs_devices
->num_devices
= 0;
2447 fs_devices
->open_devices
= 0;
2448 fs_devices
->missing_devices
= 0;
2449 fs_devices
->rotating
= 0;
2450 fs_devices
->seed
= seed_devices
;
2452 generate_random_uuid(fs_devices
->fsid
);
2453 memcpy(fs_devices
->metadata_uuid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2454 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2455 mutex_unlock(&fs_devices
->device_list_mutex
);
2457 super_flags
= btrfs_super_flags(disk_super
) &
2458 ~BTRFS_SUPER_FLAG_SEEDING
;
2459 btrfs_set_super_flags(disk_super
, super_flags
);
2465 * Store the expected generation for seed devices in device items.
2467 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
)
2469 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2470 struct btrfs_root
*root
= fs_info
->chunk_root
;
2471 struct btrfs_path
*path
;
2472 struct extent_buffer
*leaf
;
2473 struct btrfs_dev_item
*dev_item
;
2474 struct btrfs_device
*device
;
2475 struct btrfs_key key
;
2476 u8 fs_uuid
[BTRFS_FSID_SIZE
];
2477 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2481 path
= btrfs_alloc_path();
2485 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2487 key
.type
= BTRFS_DEV_ITEM_KEY
;
2490 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2494 leaf
= path
->nodes
[0];
2496 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2497 ret
= btrfs_next_leaf(root
, path
);
2502 leaf
= path
->nodes
[0];
2503 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2504 btrfs_release_path(path
);
2508 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2509 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
2510 key
.type
!= BTRFS_DEV_ITEM_KEY
)
2513 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2514 struct btrfs_dev_item
);
2515 devid
= btrfs_device_id(leaf
, dev_item
);
2516 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
2518 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
2520 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2522 BUG_ON(!device
); /* Logic error */
2524 if (device
->fs_devices
->seeding
) {
2525 btrfs_set_device_generation(leaf
, dev_item
,
2526 device
->generation
);
2527 btrfs_mark_buffer_dirty(leaf
);
2535 btrfs_free_path(path
);
2539 int btrfs_init_new_device(struct btrfs_fs_info
*fs_info
, const char *device_path
)
2541 struct btrfs_root
*root
= fs_info
->dev_root
;
2542 struct request_queue
*q
;
2543 struct btrfs_trans_handle
*trans
;
2544 struct btrfs_device
*device
;
2545 struct block_device
*bdev
;
2546 struct super_block
*sb
= fs_info
->sb
;
2547 struct rcu_string
*name
;
2548 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2549 u64 orig_super_total_bytes
;
2550 u64 orig_super_num_devices
;
2551 int seeding_dev
= 0;
2553 bool unlocked
= false;
2555 if (sb_rdonly(sb
) && !fs_devices
->seeding
)
2558 bdev
= blkdev_get_by_path(device_path
, FMODE_WRITE
| FMODE_EXCL
,
2559 fs_info
->bdev_holder
);
2561 return PTR_ERR(bdev
);
2563 if (fs_devices
->seeding
) {
2565 down_write(&sb
->s_umount
);
2566 mutex_lock(&uuid_mutex
);
2569 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
2571 mutex_lock(&fs_devices
->device_list_mutex
);
2572 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
2573 if (device
->bdev
== bdev
) {
2576 &fs_devices
->device_list_mutex
);
2580 mutex_unlock(&fs_devices
->device_list_mutex
);
2582 device
= btrfs_alloc_device(fs_info
, NULL
, NULL
);
2583 if (IS_ERR(device
)) {
2584 /* we can safely leave the fs_devices entry around */
2585 ret
= PTR_ERR(device
);
2589 name
= rcu_string_strdup(device_path
, GFP_KERNEL
);
2592 goto error_free_device
;
2594 rcu_assign_pointer(device
->name
, name
);
2596 trans
= btrfs_start_transaction(root
, 0);
2597 if (IS_ERR(trans
)) {
2598 ret
= PTR_ERR(trans
);
2599 goto error_free_device
;
2602 q
= bdev_get_queue(bdev
);
2603 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
2604 device
->generation
= trans
->transid
;
2605 device
->io_width
= fs_info
->sectorsize
;
2606 device
->io_align
= fs_info
->sectorsize
;
2607 device
->sector_size
= fs_info
->sectorsize
;
2608 device
->total_bytes
= round_down(i_size_read(bdev
->bd_inode
),
2609 fs_info
->sectorsize
);
2610 device
->disk_total_bytes
= device
->total_bytes
;
2611 device
->commit_total_bytes
= device
->total_bytes
;
2612 device
->fs_info
= fs_info
;
2613 device
->bdev
= bdev
;
2614 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2615 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
2616 device
->mode
= FMODE_EXCL
;
2617 device
->dev_stats_valid
= 1;
2618 set_blocksize(device
->bdev
, BTRFS_BDEV_BLOCKSIZE
);
2621 sb
->s_flags
&= ~SB_RDONLY
;
2622 ret
= btrfs_prepare_sprout(fs_info
);
2624 btrfs_abort_transaction(trans
, ret
);
2629 device
->fs_devices
= fs_devices
;
2631 mutex_lock(&fs_devices
->device_list_mutex
);
2632 mutex_lock(&fs_info
->chunk_mutex
);
2633 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
2634 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
2635 fs_devices
->num_devices
++;
2636 fs_devices
->open_devices
++;
2637 fs_devices
->rw_devices
++;
2638 fs_devices
->total_devices
++;
2639 fs_devices
->total_rw_bytes
+= device
->total_bytes
;
2641 atomic64_add(device
->total_bytes
, &fs_info
->free_chunk_space
);
2643 if (!blk_queue_nonrot(q
))
2644 fs_devices
->rotating
= 1;
2646 orig_super_total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2647 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2648 round_down(orig_super_total_bytes
+ device
->total_bytes
,
2649 fs_info
->sectorsize
));
2651 orig_super_num_devices
= btrfs_super_num_devices(fs_info
->super_copy
);
2652 btrfs_set_super_num_devices(fs_info
->super_copy
,
2653 orig_super_num_devices
+ 1);
2655 /* add sysfs device entry */
2656 btrfs_sysfs_add_device_link(fs_devices
, device
);
2659 * we've got more storage, clear any full flags on the space
2662 btrfs_clear_space_info_full(fs_info
);
2664 mutex_unlock(&fs_info
->chunk_mutex
);
2665 mutex_unlock(&fs_devices
->device_list_mutex
);
2668 mutex_lock(&fs_info
->chunk_mutex
);
2669 ret
= init_first_rw_device(trans
);
2670 mutex_unlock(&fs_info
->chunk_mutex
);
2672 btrfs_abort_transaction(trans
, ret
);
2677 ret
= btrfs_add_dev_item(trans
, device
);
2679 btrfs_abort_transaction(trans
, ret
);
2684 char fsid_buf
[BTRFS_UUID_UNPARSED_SIZE
];
2686 ret
= btrfs_finish_sprout(trans
);
2688 btrfs_abort_transaction(trans
, ret
);
2692 /* Sprouting would change fsid of the mounted root,
2693 * so rename the fsid on the sysfs
2695 snprintf(fsid_buf
, BTRFS_UUID_UNPARSED_SIZE
, "%pU",
2696 fs_info
->fs_devices
->fsid
);
2697 if (kobject_rename(&fs_devices
->fsid_kobj
, fsid_buf
))
2699 "sysfs: failed to create fsid for sprout");
2702 ret
= btrfs_commit_transaction(trans
);
2705 mutex_unlock(&uuid_mutex
);
2706 up_write(&sb
->s_umount
);
2709 if (ret
) /* transaction commit */
2712 ret
= btrfs_relocate_sys_chunks(fs_info
);
2714 btrfs_handle_fs_error(fs_info
, ret
,
2715 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2716 trans
= btrfs_attach_transaction(root
);
2717 if (IS_ERR(trans
)) {
2718 if (PTR_ERR(trans
) == -ENOENT
)
2720 ret
= PTR_ERR(trans
);
2724 ret
= btrfs_commit_transaction(trans
);
2727 /* Update ctime/mtime for libblkid */
2728 update_dev_time(device_path
);
2732 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2733 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2734 mutex_lock(&fs_info
->chunk_mutex
);
2735 list_del_rcu(&device
->dev_list
);
2736 list_del(&device
->dev_alloc_list
);
2737 fs_info
->fs_devices
->num_devices
--;
2738 fs_info
->fs_devices
->open_devices
--;
2739 fs_info
->fs_devices
->rw_devices
--;
2740 fs_info
->fs_devices
->total_devices
--;
2741 fs_info
->fs_devices
->total_rw_bytes
-= device
->total_bytes
;
2742 atomic64_sub(device
->total_bytes
, &fs_info
->free_chunk_space
);
2743 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2744 orig_super_total_bytes
);
2745 btrfs_set_super_num_devices(fs_info
->super_copy
,
2746 orig_super_num_devices
);
2747 mutex_unlock(&fs_info
->chunk_mutex
);
2748 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2751 sb
->s_flags
|= SB_RDONLY
;
2753 btrfs_end_transaction(trans
);
2755 btrfs_free_device(device
);
2757 blkdev_put(bdev
, FMODE_EXCL
);
2758 if (seeding_dev
&& !unlocked
) {
2759 mutex_unlock(&uuid_mutex
);
2760 up_write(&sb
->s_umount
);
2765 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
2766 struct btrfs_device
*device
)
2769 struct btrfs_path
*path
;
2770 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2771 struct btrfs_dev_item
*dev_item
;
2772 struct extent_buffer
*leaf
;
2773 struct btrfs_key key
;
2775 path
= btrfs_alloc_path();
2779 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2780 key
.type
= BTRFS_DEV_ITEM_KEY
;
2781 key
.offset
= device
->devid
;
2783 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2792 leaf
= path
->nodes
[0];
2793 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
2795 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
2796 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
2797 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
2798 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
2799 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
2800 btrfs_set_device_total_bytes(leaf
, dev_item
,
2801 btrfs_device_get_disk_total_bytes(device
));
2802 btrfs_set_device_bytes_used(leaf
, dev_item
,
2803 btrfs_device_get_bytes_used(device
));
2804 btrfs_mark_buffer_dirty(leaf
);
2807 btrfs_free_path(path
);
2811 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
2812 struct btrfs_device
*device
, u64 new_size
)
2814 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2815 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2819 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2822 new_size
= round_down(new_size
, fs_info
->sectorsize
);
2824 mutex_lock(&fs_info
->chunk_mutex
);
2825 old_total
= btrfs_super_total_bytes(super_copy
);
2826 diff
= round_down(new_size
- device
->total_bytes
, fs_info
->sectorsize
);
2828 if (new_size
<= device
->total_bytes
||
2829 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2830 mutex_unlock(&fs_info
->chunk_mutex
);
2834 btrfs_set_super_total_bytes(super_copy
,
2835 round_down(old_total
+ diff
, fs_info
->sectorsize
));
2836 device
->fs_devices
->total_rw_bytes
+= diff
;
2838 btrfs_device_set_total_bytes(device
, new_size
);
2839 btrfs_device_set_disk_total_bytes(device
, new_size
);
2840 btrfs_clear_space_info_full(device
->fs_info
);
2841 if (list_empty(&device
->post_commit_list
))
2842 list_add_tail(&device
->post_commit_list
,
2843 &trans
->transaction
->dev_update_list
);
2844 mutex_unlock(&fs_info
->chunk_mutex
);
2846 return btrfs_update_device(trans
, device
);
2849 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2851 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2852 struct btrfs_root
*root
= fs_info
->chunk_root
;
2854 struct btrfs_path
*path
;
2855 struct btrfs_key key
;
2857 path
= btrfs_alloc_path();
2861 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2862 key
.offset
= chunk_offset
;
2863 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2865 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2868 else if (ret
> 0) { /* Logic error or corruption */
2869 btrfs_handle_fs_error(fs_info
, -ENOENT
,
2870 "Failed lookup while freeing chunk.");
2875 ret
= btrfs_del_item(trans
, root
, path
);
2877 btrfs_handle_fs_error(fs_info
, ret
,
2878 "Failed to delete chunk item.");
2880 btrfs_free_path(path
);
2884 static int btrfs_del_sys_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
2886 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2887 struct btrfs_disk_key
*disk_key
;
2888 struct btrfs_chunk
*chunk
;
2895 struct btrfs_key key
;
2897 mutex_lock(&fs_info
->chunk_mutex
);
2898 array_size
= btrfs_super_sys_array_size(super_copy
);
2900 ptr
= super_copy
->sys_chunk_array
;
2903 while (cur
< array_size
) {
2904 disk_key
= (struct btrfs_disk_key
*)ptr
;
2905 btrfs_disk_key_to_cpu(&key
, disk_key
);
2907 len
= sizeof(*disk_key
);
2909 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2910 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
2911 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
2912 len
+= btrfs_chunk_item_size(num_stripes
);
2917 if (key
.objectid
== BTRFS_FIRST_CHUNK_TREE_OBJECTID
&&
2918 key
.offset
== chunk_offset
) {
2919 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
2921 btrfs_set_super_sys_array_size(super_copy
, array_size
);
2927 mutex_unlock(&fs_info
->chunk_mutex
);
2932 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2933 * @logical: Logical block offset in bytes.
2934 * @length: Length of extent in bytes.
2936 * Return: Chunk mapping or ERR_PTR.
2938 struct extent_map
*btrfs_get_chunk_map(struct btrfs_fs_info
*fs_info
,
2939 u64 logical
, u64 length
)
2941 struct extent_map_tree
*em_tree
;
2942 struct extent_map
*em
;
2944 em_tree
= &fs_info
->mapping_tree
.map_tree
;
2945 read_lock(&em_tree
->lock
);
2946 em
= lookup_extent_mapping(em_tree
, logical
, length
);
2947 read_unlock(&em_tree
->lock
);
2950 btrfs_crit(fs_info
, "unable to find logical %llu length %llu",
2952 return ERR_PTR(-EINVAL
);
2955 if (em
->start
> logical
|| em
->start
+ em
->len
< logical
) {
2957 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2958 logical
, length
, em
->start
, em
->start
+ em
->len
);
2959 free_extent_map(em
);
2960 return ERR_PTR(-EINVAL
);
2963 /* callers are responsible for dropping em's ref. */
2967 int btrfs_remove_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2969 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2970 struct extent_map
*em
;
2971 struct map_lookup
*map
;
2972 u64 dev_extent_len
= 0;
2974 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2976 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
2979 * This is a logic error, but we don't want to just rely on the
2980 * user having built with ASSERT enabled, so if ASSERT doesn't
2981 * do anything we still error out.
2986 map
= em
->map_lookup
;
2987 mutex_lock(&fs_info
->chunk_mutex
);
2988 check_system_chunk(trans
, map
->type
);
2989 mutex_unlock(&fs_info
->chunk_mutex
);
2992 * Take the device list mutex to prevent races with the final phase of
2993 * a device replace operation that replaces the device object associated
2994 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2996 mutex_lock(&fs_devices
->device_list_mutex
);
2997 for (i
= 0; i
< map
->num_stripes
; i
++) {
2998 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
2999 ret
= btrfs_free_dev_extent(trans
, device
,
3000 map
->stripes
[i
].physical
,
3003 mutex_unlock(&fs_devices
->device_list_mutex
);
3004 btrfs_abort_transaction(trans
, ret
);
3008 if (device
->bytes_used
> 0) {
3009 mutex_lock(&fs_info
->chunk_mutex
);
3010 btrfs_device_set_bytes_used(device
,
3011 device
->bytes_used
- dev_extent_len
);
3012 atomic64_add(dev_extent_len
, &fs_info
->free_chunk_space
);
3013 btrfs_clear_space_info_full(fs_info
);
3014 mutex_unlock(&fs_info
->chunk_mutex
);
3017 ret
= btrfs_update_device(trans
, device
);
3019 mutex_unlock(&fs_devices
->device_list_mutex
);
3020 btrfs_abort_transaction(trans
, ret
);
3024 mutex_unlock(&fs_devices
->device_list_mutex
);
3026 ret
= btrfs_free_chunk(trans
, chunk_offset
);
3028 btrfs_abort_transaction(trans
, ret
);
3032 trace_btrfs_chunk_free(fs_info
, map
, chunk_offset
, em
->len
);
3034 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3035 ret
= btrfs_del_sys_chunk(fs_info
, chunk_offset
);
3037 btrfs_abort_transaction(trans
, ret
);
3042 ret
= btrfs_remove_block_group(trans
, chunk_offset
, em
);
3044 btrfs_abort_transaction(trans
, ret
);
3050 free_extent_map(em
);
3054 static int btrfs_relocate_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3056 struct btrfs_root
*root
= fs_info
->chunk_root
;
3057 struct btrfs_trans_handle
*trans
;
3061 * Prevent races with automatic removal of unused block groups.
3062 * After we relocate and before we remove the chunk with offset
3063 * chunk_offset, automatic removal of the block group can kick in,
3064 * resulting in a failure when calling btrfs_remove_chunk() below.
3066 * Make sure to acquire this mutex before doing a tree search (dev
3067 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3068 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3069 * we release the path used to search the chunk/dev tree and before
3070 * the current task acquires this mutex and calls us.
3072 lockdep_assert_held(&fs_info
->delete_unused_bgs_mutex
);
3074 ret
= btrfs_can_relocate(fs_info
, chunk_offset
);
3078 /* step one, relocate all the extents inside this chunk */
3079 btrfs_scrub_pause(fs_info
);
3080 ret
= btrfs_relocate_block_group(fs_info
, chunk_offset
);
3081 btrfs_scrub_continue(fs_info
);
3086 * We add the kobjects here (and after forcing data chunk creation)
3087 * since relocation is the only place we'll create chunks of a new
3088 * type at runtime. The only place where we'll remove the last
3089 * chunk of a type is the call immediately below this one. Even
3090 * so, we're protected against races with the cleaner thread since
3091 * we're covered by the delete_unused_bgs_mutex.
3093 btrfs_add_raid_kobjects(fs_info
);
3095 trans
= btrfs_start_trans_remove_block_group(root
->fs_info
,
3097 if (IS_ERR(trans
)) {
3098 ret
= PTR_ERR(trans
);
3099 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
3104 * step two, delete the device extents and the
3105 * chunk tree entries
3107 ret
= btrfs_remove_chunk(trans
, chunk_offset
);
3108 btrfs_end_transaction(trans
);
3112 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
)
3114 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3115 struct btrfs_path
*path
;
3116 struct extent_buffer
*leaf
;
3117 struct btrfs_chunk
*chunk
;
3118 struct btrfs_key key
;
3119 struct btrfs_key found_key
;
3121 bool retried
= false;
3125 path
= btrfs_alloc_path();
3130 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3131 key
.offset
= (u64
)-1;
3132 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3135 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3136 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3138 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3141 BUG_ON(ret
== 0); /* Corruption */
3143 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
3146 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3152 leaf
= path
->nodes
[0];
3153 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3155 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
3156 struct btrfs_chunk
);
3157 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3158 btrfs_release_path(path
);
3160 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3161 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3167 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3169 if (found_key
.offset
== 0)
3171 key
.offset
= found_key
.offset
- 1;
3174 if (failed
&& !retried
) {
3178 } else if (WARN_ON(failed
&& retried
)) {
3182 btrfs_free_path(path
);
3187 * return 1 : allocate a data chunk successfully,
3188 * return <0: errors during allocating a data chunk,
3189 * return 0 : no need to allocate a data chunk.
3191 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info
*fs_info
,
3194 struct btrfs_block_group_cache
*cache
;
3198 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3200 chunk_type
= cache
->flags
;
3201 btrfs_put_block_group(cache
);
3203 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
) {
3204 spin_lock(&fs_info
->data_sinfo
->lock
);
3205 bytes_used
= fs_info
->data_sinfo
->bytes_used
;
3206 spin_unlock(&fs_info
->data_sinfo
->lock
);
3209 struct btrfs_trans_handle
*trans
;
3212 trans
= btrfs_join_transaction(fs_info
->tree_root
);
3214 return PTR_ERR(trans
);
3216 ret
= btrfs_force_chunk_alloc(trans
,
3217 BTRFS_BLOCK_GROUP_DATA
);
3218 btrfs_end_transaction(trans
);
3222 btrfs_add_raid_kobjects(fs_info
);
3230 static int insert_balance_item(struct btrfs_fs_info
*fs_info
,
3231 struct btrfs_balance_control
*bctl
)
3233 struct btrfs_root
*root
= fs_info
->tree_root
;
3234 struct btrfs_trans_handle
*trans
;
3235 struct btrfs_balance_item
*item
;
3236 struct btrfs_disk_balance_args disk_bargs
;
3237 struct btrfs_path
*path
;
3238 struct extent_buffer
*leaf
;
3239 struct btrfs_key key
;
3242 path
= btrfs_alloc_path();
3246 trans
= btrfs_start_transaction(root
, 0);
3247 if (IS_ERR(trans
)) {
3248 btrfs_free_path(path
);
3249 return PTR_ERR(trans
);
3252 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3253 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3256 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
3261 leaf
= path
->nodes
[0];
3262 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
3264 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
3266 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->data
);
3267 btrfs_set_balance_data(leaf
, item
, &disk_bargs
);
3268 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->meta
);
3269 btrfs_set_balance_meta(leaf
, item
, &disk_bargs
);
3270 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->sys
);
3271 btrfs_set_balance_sys(leaf
, item
, &disk_bargs
);
3273 btrfs_set_balance_flags(leaf
, item
, bctl
->flags
);
3275 btrfs_mark_buffer_dirty(leaf
);
3277 btrfs_free_path(path
);
3278 err
= btrfs_commit_transaction(trans
);
3284 static int del_balance_item(struct btrfs_fs_info
*fs_info
)
3286 struct btrfs_root
*root
= fs_info
->tree_root
;
3287 struct btrfs_trans_handle
*trans
;
3288 struct btrfs_path
*path
;
3289 struct btrfs_key key
;
3292 path
= btrfs_alloc_path();
3296 trans
= btrfs_start_transaction(root
, 0);
3297 if (IS_ERR(trans
)) {
3298 btrfs_free_path(path
);
3299 return PTR_ERR(trans
);
3302 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3303 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3306 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3314 ret
= btrfs_del_item(trans
, root
, path
);
3316 btrfs_free_path(path
);
3317 err
= btrfs_commit_transaction(trans
);
3324 * This is a heuristic used to reduce the number of chunks balanced on
3325 * resume after balance was interrupted.
3327 static void update_balance_args(struct btrfs_balance_control
*bctl
)
3330 * Turn on soft mode for chunk types that were being converted.
3332 if (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3333 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3334 if (bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3335 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3336 if (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3337 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3340 * Turn on usage filter if is not already used. The idea is
3341 * that chunks that we have already balanced should be
3342 * reasonably full. Don't do it for chunks that are being
3343 * converted - that will keep us from relocating unconverted
3344 * (albeit full) chunks.
3346 if (!(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3347 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3348 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3349 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3350 bctl
->data
.usage
= 90;
3352 if (!(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3353 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3354 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3355 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3356 bctl
->sys
.usage
= 90;
3358 if (!(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3359 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3360 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3361 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3362 bctl
->meta
.usage
= 90;
3367 * Clear the balance status in fs_info and delete the balance item from disk.
3369 static void reset_balance_state(struct btrfs_fs_info
*fs_info
)
3371 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3374 BUG_ON(!fs_info
->balance_ctl
);
3376 spin_lock(&fs_info
->balance_lock
);
3377 fs_info
->balance_ctl
= NULL
;
3378 spin_unlock(&fs_info
->balance_lock
);
3381 ret
= del_balance_item(fs_info
);
3383 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
3387 * Balance filters. Return 1 if chunk should be filtered out
3388 * (should not be balanced).
3390 static int chunk_profiles_filter(u64 chunk_type
,
3391 struct btrfs_balance_args
*bargs
)
3393 chunk_type
= chunk_to_extended(chunk_type
) &
3394 BTRFS_EXTENDED_PROFILE_MASK
;
3396 if (bargs
->profiles
& chunk_type
)
3402 static int chunk_usage_range_filter(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
,
3403 struct btrfs_balance_args
*bargs
)
3405 struct btrfs_block_group_cache
*cache
;
3407 u64 user_thresh_min
;
3408 u64 user_thresh_max
;
3411 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3412 chunk_used
= btrfs_block_group_used(&cache
->item
);
3414 if (bargs
->usage_min
== 0)
3415 user_thresh_min
= 0;
3417 user_thresh_min
= div_factor_fine(cache
->key
.offset
,
3420 if (bargs
->usage_max
== 0)
3421 user_thresh_max
= 1;
3422 else if (bargs
->usage_max
> 100)
3423 user_thresh_max
= cache
->key
.offset
;
3425 user_thresh_max
= div_factor_fine(cache
->key
.offset
,
3428 if (user_thresh_min
<= chunk_used
&& chunk_used
< user_thresh_max
)
3431 btrfs_put_block_group(cache
);
3435 static int chunk_usage_filter(struct btrfs_fs_info
*fs_info
,
3436 u64 chunk_offset
, struct btrfs_balance_args
*bargs
)
3438 struct btrfs_block_group_cache
*cache
;
3439 u64 chunk_used
, user_thresh
;
3442 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3443 chunk_used
= btrfs_block_group_used(&cache
->item
);
3445 if (bargs
->usage_min
== 0)
3447 else if (bargs
->usage
> 100)
3448 user_thresh
= cache
->key
.offset
;
3450 user_thresh
= div_factor_fine(cache
->key
.offset
,
3453 if (chunk_used
< user_thresh
)
3456 btrfs_put_block_group(cache
);
3460 static int chunk_devid_filter(struct extent_buffer
*leaf
,
3461 struct btrfs_chunk
*chunk
,
3462 struct btrfs_balance_args
*bargs
)
3464 struct btrfs_stripe
*stripe
;
3465 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3468 for (i
= 0; i
< num_stripes
; i
++) {
3469 stripe
= btrfs_stripe_nr(chunk
, i
);
3470 if (btrfs_stripe_devid(leaf
, stripe
) == bargs
->devid
)
3477 /* [pstart, pend) */
3478 static int chunk_drange_filter(struct extent_buffer
*leaf
,
3479 struct btrfs_chunk
*chunk
,
3480 struct btrfs_balance_args
*bargs
)
3482 struct btrfs_stripe
*stripe
;
3483 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3489 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
))
3492 if (btrfs_chunk_type(leaf
, chunk
) & (BTRFS_BLOCK_GROUP_DUP
|
3493 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
)) {
3494 factor
= num_stripes
/ 2;
3495 } else if (btrfs_chunk_type(leaf
, chunk
) & BTRFS_BLOCK_GROUP_RAID5
) {
3496 factor
= num_stripes
- 1;
3497 } else if (btrfs_chunk_type(leaf
, chunk
) & BTRFS_BLOCK_GROUP_RAID6
) {
3498 factor
= num_stripes
- 2;
3500 factor
= num_stripes
;
3503 for (i
= 0; i
< num_stripes
; i
++) {
3504 stripe
= btrfs_stripe_nr(chunk
, i
);
3505 if (btrfs_stripe_devid(leaf
, stripe
) != bargs
->devid
)
3508 stripe_offset
= btrfs_stripe_offset(leaf
, stripe
);
3509 stripe_length
= btrfs_chunk_length(leaf
, chunk
);
3510 stripe_length
= div_u64(stripe_length
, factor
);
3512 if (stripe_offset
< bargs
->pend
&&
3513 stripe_offset
+ stripe_length
> bargs
->pstart
)
3520 /* [vstart, vend) */
3521 static int chunk_vrange_filter(struct extent_buffer
*leaf
,
3522 struct btrfs_chunk
*chunk
,
3524 struct btrfs_balance_args
*bargs
)
3526 if (chunk_offset
< bargs
->vend
&&
3527 chunk_offset
+ btrfs_chunk_length(leaf
, chunk
) > bargs
->vstart
)
3528 /* at least part of the chunk is inside this vrange */
3534 static int chunk_stripes_range_filter(struct extent_buffer
*leaf
,
3535 struct btrfs_chunk
*chunk
,
3536 struct btrfs_balance_args
*bargs
)
3538 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3540 if (bargs
->stripes_min
<= num_stripes
3541 && num_stripes
<= bargs
->stripes_max
)
3547 static int chunk_soft_convert_filter(u64 chunk_type
,
3548 struct btrfs_balance_args
*bargs
)
3550 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
3553 chunk_type
= chunk_to_extended(chunk_type
) &
3554 BTRFS_EXTENDED_PROFILE_MASK
;
3556 if (bargs
->target
== chunk_type
)
3562 static int should_balance_chunk(struct extent_buffer
*leaf
,
3563 struct btrfs_chunk
*chunk
, u64 chunk_offset
)
3565 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3566 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3567 struct btrfs_balance_args
*bargs
= NULL
;
3568 u64 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3571 if (!((chunk_type
& BTRFS_BLOCK_GROUP_TYPE_MASK
) &
3572 (bctl
->flags
& BTRFS_BALANCE_TYPE_MASK
))) {
3576 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3577 bargs
= &bctl
->data
;
3578 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3580 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3581 bargs
= &bctl
->meta
;
3583 /* profiles filter */
3584 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_PROFILES
) &&
3585 chunk_profiles_filter(chunk_type
, bargs
)) {
3590 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3591 chunk_usage_filter(fs_info
, chunk_offset
, bargs
)) {
3593 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3594 chunk_usage_range_filter(fs_info
, chunk_offset
, bargs
)) {
3599 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
) &&
3600 chunk_devid_filter(leaf
, chunk
, bargs
)) {
3604 /* drange filter, makes sense only with devid filter */
3605 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DRANGE
) &&
3606 chunk_drange_filter(leaf
, chunk
, bargs
)) {
3611 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_VRANGE
) &&
3612 chunk_vrange_filter(leaf
, chunk
, chunk_offset
, bargs
)) {
3616 /* stripes filter */
3617 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
) &&
3618 chunk_stripes_range_filter(leaf
, chunk
, bargs
)) {
3622 /* soft profile changing mode */
3623 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_SOFT
) &&
3624 chunk_soft_convert_filter(chunk_type
, bargs
)) {
3629 * limited by count, must be the last filter
3631 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT
)) {
3632 if (bargs
->limit
== 0)
3636 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)) {
3638 * Same logic as the 'limit' filter; the minimum cannot be
3639 * determined here because we do not have the global information
3640 * about the count of all chunks that satisfy the filters.
3642 if (bargs
->limit_max
== 0)
3651 static int __btrfs_balance(struct btrfs_fs_info
*fs_info
)
3653 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3654 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3656 struct btrfs_chunk
*chunk
;
3657 struct btrfs_path
*path
= NULL
;
3658 struct btrfs_key key
;
3659 struct btrfs_key found_key
;
3660 struct extent_buffer
*leaf
;
3663 int enospc_errors
= 0;
3664 bool counting
= true;
3665 /* The single value limit and min/max limits use the same bytes in the */
3666 u64 limit_data
= bctl
->data
.limit
;
3667 u64 limit_meta
= bctl
->meta
.limit
;
3668 u64 limit_sys
= bctl
->sys
.limit
;
3672 int chunk_reserved
= 0;
3674 path
= btrfs_alloc_path();
3680 /* zero out stat counters */
3681 spin_lock(&fs_info
->balance_lock
);
3682 memset(&bctl
->stat
, 0, sizeof(bctl
->stat
));
3683 spin_unlock(&fs_info
->balance_lock
);
3687 * The single value limit and min/max limits use the same bytes
3690 bctl
->data
.limit
= limit_data
;
3691 bctl
->meta
.limit
= limit_meta
;
3692 bctl
->sys
.limit
= limit_sys
;
3694 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3695 key
.offset
= (u64
)-1;
3696 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3699 if ((!counting
&& atomic_read(&fs_info
->balance_pause_req
)) ||
3700 atomic_read(&fs_info
->balance_cancel_req
)) {
3705 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3706 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3708 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3713 * this shouldn't happen, it means the last relocate
3717 BUG(); /* FIXME break ? */
3719 ret
= btrfs_previous_item(chunk_root
, path
, 0,
3720 BTRFS_CHUNK_ITEM_KEY
);
3722 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3727 leaf
= path
->nodes
[0];
3728 slot
= path
->slots
[0];
3729 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3731 if (found_key
.objectid
!= key
.objectid
) {
3732 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3736 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3737 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3740 spin_lock(&fs_info
->balance_lock
);
3741 bctl
->stat
.considered
++;
3742 spin_unlock(&fs_info
->balance_lock
);
3745 ret
= should_balance_chunk(leaf
, chunk
, found_key
.offset
);
3747 btrfs_release_path(path
);
3749 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3754 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3755 spin_lock(&fs_info
->balance_lock
);
3756 bctl
->stat
.expected
++;
3757 spin_unlock(&fs_info
->balance_lock
);
3759 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3761 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3763 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3770 * Apply limit_min filter, no need to check if the LIMITS
3771 * filter is used, limit_min is 0 by default
3773 if (((chunk_type
& BTRFS_BLOCK_GROUP_DATA
) &&
3774 count_data
< bctl
->data
.limit_min
)
3775 || ((chunk_type
& BTRFS_BLOCK_GROUP_METADATA
) &&
3776 count_meta
< bctl
->meta
.limit_min
)
3777 || ((chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) &&
3778 count_sys
< bctl
->sys
.limit_min
)) {
3779 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3783 if (!chunk_reserved
) {
3785 * We may be relocating the only data chunk we have,
3786 * which could potentially end up with losing data's
3787 * raid profile, so lets allocate an empty one in
3790 ret
= btrfs_may_alloc_data_chunk(fs_info
,
3793 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3795 } else if (ret
== 1) {
3800 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3801 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3802 if (ret
== -ENOSPC
) {
3804 } else if (ret
== -ETXTBSY
) {
3806 "skipping relocation of block group %llu due to active swapfile",
3812 spin_lock(&fs_info
->balance_lock
);
3813 bctl
->stat
.completed
++;
3814 spin_unlock(&fs_info
->balance_lock
);
3817 if (found_key
.offset
== 0)
3819 key
.offset
= found_key
.offset
- 1;
3823 btrfs_release_path(path
);
3828 btrfs_free_path(path
);
3829 if (enospc_errors
) {
3830 btrfs_info(fs_info
, "%d enospc errors during balance",
3840 * alloc_profile_is_valid - see if a given profile is valid and reduced
3841 * @flags: profile to validate
3842 * @extended: if true @flags is treated as an extended profile
3844 static int alloc_profile_is_valid(u64 flags
, int extended
)
3846 u64 mask
= (extended
? BTRFS_EXTENDED_PROFILE_MASK
:
3847 BTRFS_BLOCK_GROUP_PROFILE_MASK
);
3849 flags
&= ~BTRFS_BLOCK_GROUP_TYPE_MASK
;
3851 /* 1) check that all other bits are zeroed */
3855 /* 2) see if profile is reduced */
3857 return !extended
; /* "0" is valid for usual profiles */
3859 /* true if exactly one bit set */
3860 return is_power_of_2(flags
);
3863 static inline int balance_need_close(struct btrfs_fs_info
*fs_info
)
3865 /* cancel requested || normal exit path */
3866 return atomic_read(&fs_info
->balance_cancel_req
) ||
3867 (atomic_read(&fs_info
->balance_pause_req
) == 0 &&
3868 atomic_read(&fs_info
->balance_cancel_req
) == 0);
3871 /* Non-zero return value signifies invalidity */
3872 static inline int validate_convert_profile(struct btrfs_balance_args
*bctl_arg
,
3875 return ((bctl_arg
->flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
3876 (!alloc_profile_is_valid(bctl_arg
->target
, 1) ||
3877 (bctl_arg
->target
& ~allowed
)));
3881 * Fill @buf with textual description of balance filter flags @bargs, up to
3882 * @size_buf including the terminating null. The output may be trimmed if it
3883 * does not fit into the provided buffer.
3885 static void describe_balance_args(struct btrfs_balance_args
*bargs
, char *buf
,
3889 u32 size_bp
= size_buf
;
3891 u64 flags
= bargs
->flags
;
3892 char tmp_buf
[128] = {'\0'};
3897 #define CHECK_APPEND_NOARG(a) \
3899 ret = snprintf(bp, size_bp, (a)); \
3900 if (ret < 0 || ret >= size_bp) \
3901 goto out_overflow; \
3906 #define CHECK_APPEND_1ARG(a, v1) \
3908 ret = snprintf(bp, size_bp, (a), (v1)); \
3909 if (ret < 0 || ret >= size_bp) \
3910 goto out_overflow; \
3915 #define CHECK_APPEND_2ARG(a, v1, v2) \
3917 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3918 if (ret < 0 || ret >= size_bp) \
3919 goto out_overflow; \
3924 if (flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
3925 int index
= btrfs_bg_flags_to_raid_index(bargs
->target
);
3927 CHECK_APPEND_1ARG("convert=%s,", get_raid_name(index
));
3930 if (flags
& BTRFS_BALANCE_ARGS_SOFT
)
3931 CHECK_APPEND_NOARG("soft,");
3933 if (flags
& BTRFS_BALANCE_ARGS_PROFILES
) {
3934 btrfs_describe_block_groups(bargs
->profiles
, tmp_buf
,
3936 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf
);
3939 if (flags
& BTRFS_BALANCE_ARGS_USAGE
)
3940 CHECK_APPEND_1ARG("usage=%llu,", bargs
->usage
);
3942 if (flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
)
3943 CHECK_APPEND_2ARG("usage=%u..%u,",
3944 bargs
->usage_min
, bargs
->usage_max
);
3946 if (flags
& BTRFS_BALANCE_ARGS_DEVID
)
3947 CHECK_APPEND_1ARG("devid=%llu,", bargs
->devid
);
3949 if (flags
& BTRFS_BALANCE_ARGS_DRANGE
)
3950 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3951 bargs
->pstart
, bargs
->pend
);
3953 if (flags
& BTRFS_BALANCE_ARGS_VRANGE
)
3954 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3955 bargs
->vstart
, bargs
->vend
);
3957 if (flags
& BTRFS_BALANCE_ARGS_LIMIT
)
3958 CHECK_APPEND_1ARG("limit=%llu,", bargs
->limit
);
3960 if (flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)
3961 CHECK_APPEND_2ARG("limit=%u..%u,",
3962 bargs
->limit_min
, bargs
->limit_max
);
3964 if (flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
)
3965 CHECK_APPEND_2ARG("stripes=%u..%u,",
3966 bargs
->stripes_min
, bargs
->stripes_max
);
3968 #undef CHECK_APPEND_2ARG
3969 #undef CHECK_APPEND_1ARG
3970 #undef CHECK_APPEND_NOARG
3974 if (size_bp
< size_buf
)
3975 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last , */
3980 static void describe_balance_start_or_resume(struct btrfs_fs_info
*fs_info
)
3982 u32 size_buf
= 1024;
3983 char tmp_buf
[192] = {'\0'};
3986 u32 size_bp
= size_buf
;
3988 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3990 buf
= kzalloc(size_buf
, GFP_KERNEL
);
3996 #define CHECK_APPEND_1ARG(a, v1) \
3998 ret = snprintf(bp, size_bp, (a), (v1)); \
3999 if (ret < 0 || ret >= size_bp) \
4000 goto out_overflow; \
4005 if (bctl
->flags
& BTRFS_BALANCE_FORCE
)
4006 CHECK_APPEND_1ARG("%s", "-f ");
4008 if (bctl
->flags
& BTRFS_BALANCE_DATA
) {
4009 describe_balance_args(&bctl
->data
, tmp_buf
, sizeof(tmp_buf
));
4010 CHECK_APPEND_1ARG("-d%s ", tmp_buf
);
4013 if (bctl
->flags
& BTRFS_BALANCE_METADATA
) {
4014 describe_balance_args(&bctl
->meta
, tmp_buf
, sizeof(tmp_buf
));
4015 CHECK_APPEND_1ARG("-m%s ", tmp_buf
);
4018 if (bctl
->flags
& BTRFS_BALANCE_SYSTEM
) {
4019 describe_balance_args(&bctl
->sys
, tmp_buf
, sizeof(tmp_buf
));
4020 CHECK_APPEND_1ARG("-s%s ", tmp_buf
);
4023 #undef CHECK_APPEND_1ARG
4027 if (size_bp
< size_buf
)
4028 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last " " */
4029 btrfs_info(fs_info
, "balance: %s %s",
4030 (bctl
->flags
& BTRFS_BALANCE_RESUME
) ?
4031 "resume" : "start", buf
);
4037 * Should be called with balance mutexe held
4039 int btrfs_balance(struct btrfs_fs_info
*fs_info
,
4040 struct btrfs_balance_control
*bctl
,
4041 struct btrfs_ioctl_balance_args
*bargs
)
4043 u64 meta_target
, data_target
;
4049 bool reducing_integrity
;
4051 if (btrfs_fs_closing(fs_info
) ||
4052 atomic_read(&fs_info
->balance_pause_req
) ||
4053 atomic_read(&fs_info
->balance_cancel_req
)) {
4058 allowed
= btrfs_super_incompat_flags(fs_info
->super_copy
);
4059 if (allowed
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
4063 * In case of mixed groups both data and meta should be picked,
4064 * and identical options should be given for both of them.
4066 allowed
= BTRFS_BALANCE_DATA
| BTRFS_BALANCE_METADATA
;
4067 if (mixed
&& (bctl
->flags
& allowed
)) {
4068 if (!(bctl
->flags
& BTRFS_BALANCE_DATA
) ||
4069 !(bctl
->flags
& BTRFS_BALANCE_METADATA
) ||
4070 memcmp(&bctl
->data
, &bctl
->meta
, sizeof(bctl
->data
))) {
4072 "balance: mixed groups data and metadata options must be the same");
4078 num_devices
= btrfs_num_devices(fs_info
);
4080 allowed
= BTRFS_AVAIL_ALLOC_BIT_SINGLE
| BTRFS_BLOCK_GROUP_DUP
;
4081 if (num_devices
> 1)
4082 allowed
|= (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
);
4083 if (num_devices
> 2)
4084 allowed
|= BTRFS_BLOCK_GROUP_RAID5
;
4085 if (num_devices
> 3)
4086 allowed
|= (BTRFS_BLOCK_GROUP_RAID10
|
4087 BTRFS_BLOCK_GROUP_RAID6
);
4088 if (validate_convert_profile(&bctl
->data
, allowed
)) {
4089 int index
= btrfs_bg_flags_to_raid_index(bctl
->data
.target
);
4092 "balance: invalid convert data profile %s",
4093 get_raid_name(index
));
4097 if (validate_convert_profile(&bctl
->meta
, allowed
)) {
4098 int index
= btrfs_bg_flags_to_raid_index(bctl
->meta
.target
);
4101 "balance: invalid convert metadata profile %s",
4102 get_raid_name(index
));
4106 if (validate_convert_profile(&bctl
->sys
, allowed
)) {
4107 int index
= btrfs_bg_flags_to_raid_index(bctl
->sys
.target
);
4110 "balance: invalid convert system profile %s",
4111 get_raid_name(index
));
4116 /* allow to reduce meta or sys integrity only if force set */
4117 allowed
= BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
|
4118 BTRFS_BLOCK_GROUP_RAID10
|
4119 BTRFS_BLOCK_GROUP_RAID5
|
4120 BTRFS_BLOCK_GROUP_RAID6
;
4122 seq
= read_seqbegin(&fs_info
->profiles_lock
);
4124 if (((bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4125 (fs_info
->avail_system_alloc_bits
& allowed
) &&
4126 !(bctl
->sys
.target
& allowed
)) ||
4127 ((bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4128 (fs_info
->avail_metadata_alloc_bits
& allowed
) &&
4129 !(bctl
->meta
.target
& allowed
)))
4130 reducing_integrity
= true;
4132 reducing_integrity
= false;
4134 /* if we're not converting, the target field is uninitialized */
4135 meta_target
= (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4136 bctl
->meta
.target
: fs_info
->avail_metadata_alloc_bits
;
4137 data_target
= (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4138 bctl
->data
.target
: fs_info
->avail_data_alloc_bits
;
4139 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
4141 if (reducing_integrity
) {
4142 if (bctl
->flags
& BTRFS_BALANCE_FORCE
) {
4144 "balance: force reducing metadata integrity");
4147 "balance: reduces metadata integrity, use --force if you want this");
4153 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target
) <
4154 btrfs_get_num_tolerated_disk_barrier_failures(data_target
)) {
4155 int meta_index
= btrfs_bg_flags_to_raid_index(meta_target
);
4156 int data_index
= btrfs_bg_flags_to_raid_index(data_target
);
4159 "balance: metadata profile %s has lower redundancy than data profile %s",
4160 get_raid_name(meta_index
), get_raid_name(data_index
));
4163 ret
= insert_balance_item(fs_info
, bctl
);
4164 if (ret
&& ret
!= -EEXIST
)
4167 if (!(bctl
->flags
& BTRFS_BALANCE_RESUME
)) {
4168 BUG_ON(ret
== -EEXIST
);
4169 BUG_ON(fs_info
->balance_ctl
);
4170 spin_lock(&fs_info
->balance_lock
);
4171 fs_info
->balance_ctl
= bctl
;
4172 spin_unlock(&fs_info
->balance_lock
);
4174 BUG_ON(ret
!= -EEXIST
);
4175 spin_lock(&fs_info
->balance_lock
);
4176 update_balance_args(bctl
);
4177 spin_unlock(&fs_info
->balance_lock
);
4180 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4181 set_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4182 describe_balance_start_or_resume(fs_info
);
4183 mutex_unlock(&fs_info
->balance_mutex
);
4185 ret
= __btrfs_balance(fs_info
);
4187 mutex_lock(&fs_info
->balance_mutex
);
4188 if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_pause_req
))
4189 btrfs_info(fs_info
, "balance: paused");
4190 else if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_cancel_req
))
4191 btrfs_info(fs_info
, "balance: canceled");
4193 btrfs_info(fs_info
, "balance: ended with status: %d", ret
);
4195 clear_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4198 memset(bargs
, 0, sizeof(*bargs
));
4199 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4202 if ((ret
&& ret
!= -ECANCELED
&& ret
!= -ENOSPC
) ||
4203 balance_need_close(fs_info
)) {
4204 reset_balance_state(fs_info
);
4205 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4208 wake_up(&fs_info
->balance_wait_q
);
4212 if (bctl
->flags
& BTRFS_BALANCE_RESUME
)
4213 reset_balance_state(fs_info
);
4216 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4221 static int balance_kthread(void *data
)
4223 struct btrfs_fs_info
*fs_info
= data
;
4226 mutex_lock(&fs_info
->balance_mutex
);
4227 if (fs_info
->balance_ctl
)
4228 ret
= btrfs_balance(fs_info
, fs_info
->balance_ctl
, NULL
);
4229 mutex_unlock(&fs_info
->balance_mutex
);
4234 int btrfs_resume_balance_async(struct btrfs_fs_info
*fs_info
)
4236 struct task_struct
*tsk
;
4238 mutex_lock(&fs_info
->balance_mutex
);
4239 if (!fs_info
->balance_ctl
) {
4240 mutex_unlock(&fs_info
->balance_mutex
);
4243 mutex_unlock(&fs_info
->balance_mutex
);
4245 if (btrfs_test_opt(fs_info
, SKIP_BALANCE
)) {
4246 btrfs_info(fs_info
, "balance: resume skipped");
4251 * A ro->rw remount sequence should continue with the paused balance
4252 * regardless of who pauses it, system or the user as of now, so set
4255 spin_lock(&fs_info
->balance_lock
);
4256 fs_info
->balance_ctl
->flags
|= BTRFS_BALANCE_RESUME
;
4257 spin_unlock(&fs_info
->balance_lock
);
4259 tsk
= kthread_run(balance_kthread
, fs_info
, "btrfs-balance");
4260 return PTR_ERR_OR_ZERO(tsk
);
4263 int btrfs_recover_balance(struct btrfs_fs_info
*fs_info
)
4265 struct btrfs_balance_control
*bctl
;
4266 struct btrfs_balance_item
*item
;
4267 struct btrfs_disk_balance_args disk_bargs
;
4268 struct btrfs_path
*path
;
4269 struct extent_buffer
*leaf
;
4270 struct btrfs_key key
;
4273 path
= btrfs_alloc_path();
4277 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
4278 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
4281 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4284 if (ret
> 0) { /* ret = -ENOENT; */
4289 bctl
= kzalloc(sizeof(*bctl
), GFP_NOFS
);
4295 leaf
= path
->nodes
[0];
4296 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
4298 bctl
->flags
= btrfs_balance_flags(leaf
, item
);
4299 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4301 btrfs_balance_data(leaf
, item
, &disk_bargs
);
4302 btrfs_disk_balance_args_to_cpu(&bctl
->data
, &disk_bargs
);
4303 btrfs_balance_meta(leaf
, item
, &disk_bargs
);
4304 btrfs_disk_balance_args_to_cpu(&bctl
->meta
, &disk_bargs
);
4305 btrfs_balance_sys(leaf
, item
, &disk_bargs
);
4306 btrfs_disk_balance_args_to_cpu(&bctl
->sys
, &disk_bargs
);
4309 * This should never happen, as the paused balance state is recovered
4310 * during mount without any chance of other exclusive ops to collide.
4312 * This gives the exclusive op status to balance and keeps in paused
4313 * state until user intervention (cancel or umount). If the ownership
4314 * cannot be assigned, show a message but do not fail. The balance
4315 * is in a paused state and must have fs_info::balance_ctl properly
4318 if (test_and_set_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
))
4320 "balance: cannot set exclusive op status, resume manually");
4322 mutex_lock(&fs_info
->balance_mutex
);
4323 BUG_ON(fs_info
->balance_ctl
);
4324 spin_lock(&fs_info
->balance_lock
);
4325 fs_info
->balance_ctl
= bctl
;
4326 spin_unlock(&fs_info
->balance_lock
);
4327 mutex_unlock(&fs_info
->balance_mutex
);
4329 btrfs_free_path(path
);
4333 int btrfs_pause_balance(struct btrfs_fs_info
*fs_info
)
4337 mutex_lock(&fs_info
->balance_mutex
);
4338 if (!fs_info
->balance_ctl
) {
4339 mutex_unlock(&fs_info
->balance_mutex
);
4343 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4344 atomic_inc(&fs_info
->balance_pause_req
);
4345 mutex_unlock(&fs_info
->balance_mutex
);
4347 wait_event(fs_info
->balance_wait_q
,
4348 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4350 mutex_lock(&fs_info
->balance_mutex
);
4351 /* we are good with balance_ctl ripped off from under us */
4352 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4353 atomic_dec(&fs_info
->balance_pause_req
);
4358 mutex_unlock(&fs_info
->balance_mutex
);
4362 int btrfs_cancel_balance(struct btrfs_fs_info
*fs_info
)
4364 mutex_lock(&fs_info
->balance_mutex
);
4365 if (!fs_info
->balance_ctl
) {
4366 mutex_unlock(&fs_info
->balance_mutex
);
4371 * A paused balance with the item stored on disk can be resumed at
4372 * mount time if the mount is read-write. Otherwise it's still paused
4373 * and we must not allow cancelling as it deletes the item.
4375 if (sb_rdonly(fs_info
->sb
)) {
4376 mutex_unlock(&fs_info
->balance_mutex
);
4380 atomic_inc(&fs_info
->balance_cancel_req
);
4382 * if we are running just wait and return, balance item is
4383 * deleted in btrfs_balance in this case
4385 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4386 mutex_unlock(&fs_info
->balance_mutex
);
4387 wait_event(fs_info
->balance_wait_q
,
4388 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4389 mutex_lock(&fs_info
->balance_mutex
);
4391 mutex_unlock(&fs_info
->balance_mutex
);
4393 * Lock released to allow other waiters to continue, we'll
4394 * reexamine the status again.
4396 mutex_lock(&fs_info
->balance_mutex
);
4398 if (fs_info
->balance_ctl
) {
4399 reset_balance_state(fs_info
);
4400 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4401 btrfs_info(fs_info
, "balance: canceled");
4405 BUG_ON(fs_info
->balance_ctl
||
4406 test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4407 atomic_dec(&fs_info
->balance_cancel_req
);
4408 mutex_unlock(&fs_info
->balance_mutex
);
4412 static int btrfs_uuid_scan_kthread(void *data
)
4414 struct btrfs_fs_info
*fs_info
= data
;
4415 struct btrfs_root
*root
= fs_info
->tree_root
;
4416 struct btrfs_key key
;
4417 struct btrfs_path
*path
= NULL
;
4419 struct extent_buffer
*eb
;
4421 struct btrfs_root_item root_item
;
4423 struct btrfs_trans_handle
*trans
= NULL
;
4425 path
= btrfs_alloc_path();
4432 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4436 ret
= btrfs_search_forward(root
, &key
, path
,
4437 BTRFS_OLDEST_GENERATION
);
4444 if (key
.type
!= BTRFS_ROOT_ITEM_KEY
||
4445 (key
.objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
4446 key
.objectid
!= BTRFS_FS_TREE_OBJECTID
) ||
4447 key
.objectid
> BTRFS_LAST_FREE_OBJECTID
)
4450 eb
= path
->nodes
[0];
4451 slot
= path
->slots
[0];
4452 item_size
= btrfs_item_size_nr(eb
, slot
);
4453 if (item_size
< sizeof(root_item
))
4456 read_extent_buffer(eb
, &root_item
,
4457 btrfs_item_ptr_offset(eb
, slot
),
4458 (int)sizeof(root_item
));
4459 if (btrfs_root_refs(&root_item
) == 0)
4462 if (!btrfs_is_empty_uuid(root_item
.uuid
) ||
4463 !btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4467 btrfs_release_path(path
);
4469 * 1 - subvol uuid item
4470 * 1 - received_subvol uuid item
4472 trans
= btrfs_start_transaction(fs_info
->uuid_root
, 2);
4473 if (IS_ERR(trans
)) {
4474 ret
= PTR_ERR(trans
);
4482 if (!btrfs_is_empty_uuid(root_item
.uuid
)) {
4483 ret
= btrfs_uuid_tree_add(trans
, root_item
.uuid
,
4484 BTRFS_UUID_KEY_SUBVOL
,
4487 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4493 if (!btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4494 ret
= btrfs_uuid_tree_add(trans
,
4495 root_item
.received_uuid
,
4496 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4499 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4507 ret
= btrfs_end_transaction(trans
);
4513 btrfs_release_path(path
);
4514 if (key
.offset
< (u64
)-1) {
4516 } else if (key
.type
< BTRFS_ROOT_ITEM_KEY
) {
4518 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4519 } else if (key
.objectid
< (u64
)-1) {
4521 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4530 btrfs_free_path(path
);
4531 if (trans
&& !IS_ERR(trans
))
4532 btrfs_end_transaction(trans
);
4534 btrfs_warn(fs_info
, "btrfs_uuid_scan_kthread failed %d", ret
);
4536 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
);
4537 up(&fs_info
->uuid_tree_rescan_sem
);
4542 * Callback for btrfs_uuid_tree_iterate().
4544 * 0 check succeeded, the entry is not outdated.
4545 * < 0 if an error occurred.
4546 * > 0 if the check failed, which means the caller shall remove the entry.
4548 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info
*fs_info
,
4549 u8
*uuid
, u8 type
, u64 subid
)
4551 struct btrfs_key key
;
4553 struct btrfs_root
*subvol_root
;
4555 if (type
!= BTRFS_UUID_KEY_SUBVOL
&&
4556 type
!= BTRFS_UUID_KEY_RECEIVED_SUBVOL
)
4559 key
.objectid
= subid
;
4560 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4561 key
.offset
= (u64
)-1;
4562 subvol_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4563 if (IS_ERR(subvol_root
)) {
4564 ret
= PTR_ERR(subvol_root
);
4571 case BTRFS_UUID_KEY_SUBVOL
:
4572 if (memcmp(uuid
, subvol_root
->root_item
.uuid
, BTRFS_UUID_SIZE
))
4575 case BTRFS_UUID_KEY_RECEIVED_SUBVOL
:
4576 if (memcmp(uuid
, subvol_root
->root_item
.received_uuid
,
4586 static int btrfs_uuid_rescan_kthread(void *data
)
4588 struct btrfs_fs_info
*fs_info
= (struct btrfs_fs_info
*)data
;
4592 * 1st step is to iterate through the existing UUID tree and
4593 * to delete all entries that contain outdated data.
4594 * 2nd step is to add all missing entries to the UUID tree.
4596 ret
= btrfs_uuid_tree_iterate(fs_info
, btrfs_check_uuid_tree_entry
);
4598 btrfs_warn(fs_info
, "iterating uuid_tree failed %d", ret
);
4599 up(&fs_info
->uuid_tree_rescan_sem
);
4602 return btrfs_uuid_scan_kthread(data
);
4605 int btrfs_create_uuid_tree(struct btrfs_fs_info
*fs_info
)
4607 struct btrfs_trans_handle
*trans
;
4608 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
4609 struct btrfs_root
*uuid_root
;
4610 struct task_struct
*task
;
4617 trans
= btrfs_start_transaction(tree_root
, 2);
4619 return PTR_ERR(trans
);
4621 uuid_root
= btrfs_create_tree(trans
, BTRFS_UUID_TREE_OBJECTID
);
4622 if (IS_ERR(uuid_root
)) {
4623 ret
= PTR_ERR(uuid_root
);
4624 btrfs_abort_transaction(trans
, ret
);
4625 btrfs_end_transaction(trans
);
4629 fs_info
->uuid_root
= uuid_root
;
4631 ret
= btrfs_commit_transaction(trans
);
4635 down(&fs_info
->uuid_tree_rescan_sem
);
4636 task
= kthread_run(btrfs_uuid_scan_kthread
, fs_info
, "btrfs-uuid");
4638 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4639 btrfs_warn(fs_info
, "failed to start uuid_scan task");
4640 up(&fs_info
->uuid_tree_rescan_sem
);
4641 return PTR_ERR(task
);
4647 int btrfs_check_uuid_tree(struct btrfs_fs_info
*fs_info
)
4649 struct task_struct
*task
;
4651 down(&fs_info
->uuid_tree_rescan_sem
);
4652 task
= kthread_run(btrfs_uuid_rescan_kthread
, fs_info
, "btrfs-uuid");
4654 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4655 btrfs_warn(fs_info
, "failed to start uuid_rescan task");
4656 up(&fs_info
->uuid_tree_rescan_sem
);
4657 return PTR_ERR(task
);
4664 * shrinking a device means finding all of the device extents past
4665 * the new size, and then following the back refs to the chunks.
4666 * The chunk relocation code actually frees the device extent
4668 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
4670 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
4671 struct btrfs_root
*root
= fs_info
->dev_root
;
4672 struct btrfs_trans_handle
*trans
;
4673 struct btrfs_dev_extent
*dev_extent
= NULL
;
4674 struct btrfs_path
*path
;
4680 bool retried
= false;
4681 struct extent_buffer
*l
;
4682 struct btrfs_key key
;
4683 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4684 u64 old_total
= btrfs_super_total_bytes(super_copy
);
4685 u64 old_size
= btrfs_device_get_total_bytes(device
);
4689 new_size
= round_down(new_size
, fs_info
->sectorsize
);
4691 diff
= round_down(old_size
- new_size
, fs_info
->sectorsize
);
4693 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4696 path
= btrfs_alloc_path();
4700 path
->reada
= READA_BACK
;
4702 trans
= btrfs_start_transaction(root
, 0);
4703 if (IS_ERR(trans
)) {
4704 btrfs_free_path(path
);
4705 return PTR_ERR(trans
);
4708 mutex_lock(&fs_info
->chunk_mutex
);
4710 btrfs_device_set_total_bytes(device
, new_size
);
4711 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4712 device
->fs_devices
->total_rw_bytes
-= diff
;
4713 atomic64_sub(diff
, &fs_info
->free_chunk_space
);
4717 * Once the device's size has been set to the new size, ensure all
4718 * in-memory chunks are synced to disk so that the loop below sees them
4719 * and relocates them accordingly.
4721 if (contains_pending_extent(device
, &start
, diff
)) {
4722 mutex_unlock(&fs_info
->chunk_mutex
);
4723 ret
= btrfs_commit_transaction(trans
);
4727 mutex_unlock(&fs_info
->chunk_mutex
);
4728 btrfs_end_transaction(trans
);
4732 key
.objectid
= device
->devid
;
4733 key
.offset
= (u64
)-1;
4734 key
.type
= BTRFS_DEV_EXTENT_KEY
;
4737 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
4738 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4740 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4744 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
4746 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4751 btrfs_release_path(path
);
4756 slot
= path
->slots
[0];
4757 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
4759 if (key
.objectid
!= device
->devid
) {
4760 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4761 btrfs_release_path(path
);
4765 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
4766 length
= btrfs_dev_extent_length(l
, dev_extent
);
4768 if (key
.offset
+ length
<= new_size
) {
4769 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4770 btrfs_release_path(path
);
4774 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
4775 btrfs_release_path(path
);
4778 * We may be relocating the only data chunk we have,
4779 * which could potentially end up with losing data's
4780 * raid profile, so lets allocate an empty one in
4783 ret
= btrfs_may_alloc_data_chunk(fs_info
, chunk_offset
);
4785 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4789 ret
= btrfs_relocate_chunk(fs_info
, chunk_offset
);
4790 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4791 if (ret
== -ENOSPC
) {
4794 if (ret
== -ETXTBSY
) {
4796 "could not shrink block group %llu due to active swapfile",
4801 } while (key
.offset
-- > 0);
4803 if (failed
&& !retried
) {
4807 } else if (failed
&& retried
) {
4812 /* Shrinking succeeded, else we would be at "done". */
4813 trans
= btrfs_start_transaction(root
, 0);
4814 if (IS_ERR(trans
)) {
4815 ret
= PTR_ERR(trans
);
4819 mutex_lock(&fs_info
->chunk_mutex
);
4820 btrfs_device_set_disk_total_bytes(device
, new_size
);
4821 if (list_empty(&device
->post_commit_list
))
4822 list_add_tail(&device
->post_commit_list
,
4823 &trans
->transaction
->dev_update_list
);
4825 WARN_ON(diff
> old_total
);
4826 btrfs_set_super_total_bytes(super_copy
,
4827 round_down(old_total
- diff
, fs_info
->sectorsize
));
4828 mutex_unlock(&fs_info
->chunk_mutex
);
4830 /* Now btrfs_update_device() will change the on-disk size. */
4831 ret
= btrfs_update_device(trans
, device
);
4833 btrfs_abort_transaction(trans
, ret
);
4834 btrfs_end_transaction(trans
);
4836 ret
= btrfs_commit_transaction(trans
);
4839 btrfs_free_path(path
);
4841 mutex_lock(&fs_info
->chunk_mutex
);
4842 btrfs_device_set_total_bytes(device
, old_size
);
4843 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
4844 device
->fs_devices
->total_rw_bytes
+= diff
;
4845 atomic64_add(diff
, &fs_info
->free_chunk_space
);
4846 mutex_unlock(&fs_info
->chunk_mutex
);
4851 static int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
,
4852 struct btrfs_key
*key
,
4853 struct btrfs_chunk
*chunk
, int item_size
)
4855 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4856 struct btrfs_disk_key disk_key
;
4860 mutex_lock(&fs_info
->chunk_mutex
);
4861 array_size
= btrfs_super_sys_array_size(super_copy
);
4862 if (array_size
+ item_size
+ sizeof(disk_key
)
4863 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) {
4864 mutex_unlock(&fs_info
->chunk_mutex
);
4868 ptr
= super_copy
->sys_chunk_array
+ array_size
;
4869 btrfs_cpu_key_to_disk(&disk_key
, key
);
4870 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
4871 ptr
+= sizeof(disk_key
);
4872 memcpy(ptr
, chunk
, item_size
);
4873 item_size
+= sizeof(disk_key
);
4874 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
4875 mutex_unlock(&fs_info
->chunk_mutex
);
4881 * sort the devices in descending order by max_avail, total_avail
4883 static int btrfs_cmp_device_info(const void *a
, const void *b
)
4885 const struct btrfs_device_info
*di_a
= a
;
4886 const struct btrfs_device_info
*di_b
= b
;
4888 if (di_a
->max_avail
> di_b
->max_avail
)
4890 if (di_a
->max_avail
< di_b
->max_avail
)
4892 if (di_a
->total_avail
> di_b
->total_avail
)
4894 if (di_a
->total_avail
< di_b
->total_avail
)
4899 static void check_raid56_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
4901 if (!(type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
4904 btrfs_set_fs_incompat(info
, RAID56
);
4907 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
4908 u64 start
, u64 type
)
4910 struct btrfs_fs_info
*info
= trans
->fs_info
;
4911 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
4912 struct btrfs_device
*device
;
4913 struct map_lookup
*map
= NULL
;
4914 struct extent_map_tree
*em_tree
;
4915 struct extent_map
*em
;
4916 struct btrfs_device_info
*devices_info
= NULL
;
4918 int num_stripes
; /* total number of stripes to allocate */
4919 int data_stripes
; /* number of stripes that count for
4921 int sub_stripes
; /* sub_stripes info for map */
4922 int dev_stripes
; /* stripes per dev */
4923 int devs_max
; /* max devs to use */
4924 int devs_min
; /* min devs needed */
4925 int devs_increment
; /* ndevs has to be a multiple of this */
4926 int ncopies
; /* how many copies to data has */
4927 int nparity
; /* number of stripes worth of bytes to
4928 store parity information */
4930 u64 max_stripe_size
;
4939 BUG_ON(!alloc_profile_is_valid(type
, 0));
4941 if (list_empty(&fs_devices
->alloc_list
)) {
4942 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
4943 btrfs_debug(info
, "%s: no writable device", __func__
);
4947 index
= btrfs_bg_flags_to_raid_index(type
);
4949 sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
4950 dev_stripes
= btrfs_raid_array
[index
].dev_stripes
;
4951 devs_max
= btrfs_raid_array
[index
].devs_max
;
4952 devs_min
= btrfs_raid_array
[index
].devs_min
;
4953 devs_increment
= btrfs_raid_array
[index
].devs_increment
;
4954 ncopies
= btrfs_raid_array
[index
].ncopies
;
4955 nparity
= btrfs_raid_array
[index
].nparity
;
4957 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
4958 max_stripe_size
= SZ_1G
;
4959 max_chunk_size
= BTRFS_MAX_DATA_CHUNK_SIZE
;
4961 devs_max
= BTRFS_MAX_DEVS(info
);
4962 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
4963 /* for larger filesystems, use larger metadata chunks */
4964 if (fs_devices
->total_rw_bytes
> 50ULL * SZ_1G
)
4965 max_stripe_size
= SZ_1G
;
4967 max_stripe_size
= SZ_256M
;
4968 max_chunk_size
= max_stripe_size
;
4970 devs_max
= BTRFS_MAX_DEVS(info
);
4971 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
4972 max_stripe_size
= SZ_32M
;
4973 max_chunk_size
= 2 * max_stripe_size
;
4975 devs_max
= BTRFS_MAX_DEVS_SYS_CHUNK
;
4977 btrfs_err(info
, "invalid chunk type 0x%llx requested",
4982 /* We don't want a chunk larger than 10% of writable space */
4983 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
4986 devices_info
= kcalloc(fs_devices
->rw_devices
, sizeof(*devices_info
),
4992 * in the first pass through the devices list, we gather information
4993 * about the available holes on each device.
4996 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
5000 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
5002 "BTRFS: read-only device in alloc_list\n");
5006 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
5007 &device
->dev_state
) ||
5008 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
5011 if (device
->total_bytes
> device
->bytes_used
)
5012 total_avail
= device
->total_bytes
- device
->bytes_used
;
5016 /* If there is no space on this device, skip it. */
5017 if (total_avail
== 0)
5020 ret
= find_free_dev_extent(device
,
5021 max_stripe_size
* dev_stripes
,
5022 &dev_offset
, &max_avail
);
5023 if (ret
&& ret
!= -ENOSPC
)
5027 max_avail
= max_stripe_size
* dev_stripes
;
5029 if (max_avail
< BTRFS_STRIPE_LEN
* dev_stripes
) {
5030 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5032 "%s: devid %llu has no free space, have=%llu want=%u",
5033 __func__
, device
->devid
, max_avail
,
5034 BTRFS_STRIPE_LEN
* dev_stripes
);
5038 if (ndevs
== fs_devices
->rw_devices
) {
5039 WARN(1, "%s: found more than %llu devices\n",
5040 __func__
, fs_devices
->rw_devices
);
5043 devices_info
[ndevs
].dev_offset
= dev_offset
;
5044 devices_info
[ndevs
].max_avail
= max_avail
;
5045 devices_info
[ndevs
].total_avail
= total_avail
;
5046 devices_info
[ndevs
].dev
= device
;
5051 * now sort the devices by hole size / available space
5053 sort(devices_info
, ndevs
, sizeof(struct btrfs_device_info
),
5054 btrfs_cmp_device_info
, NULL
);
5056 /* round down to number of usable stripes */
5057 ndevs
= round_down(ndevs
, devs_increment
);
5059 if (ndevs
< devs_min
) {
5061 if (btrfs_test_opt(info
, ENOSPC_DEBUG
)) {
5063 "%s: not enough devices with free space: have=%d minimum required=%d",
5064 __func__
, ndevs
, devs_min
);
5069 ndevs
= min(ndevs
, devs_max
);
5072 * The primary goal is to maximize the number of stripes, so use as
5073 * many devices as possible, even if the stripes are not maximum sized.
5075 * The DUP profile stores more than one stripe per device, the
5076 * max_avail is the total size so we have to adjust.
5078 stripe_size
= div_u64(devices_info
[ndevs
- 1].max_avail
, dev_stripes
);
5079 num_stripes
= ndevs
* dev_stripes
;
5082 * this will have to be fixed for RAID1 and RAID10 over
5085 data_stripes
= (num_stripes
- nparity
) / ncopies
;
5088 * Use the number of data stripes to figure out how big this chunk
5089 * is really going to be in terms of logical address space,
5090 * and compare that answer with the max chunk size. If it's higher,
5091 * we try to reduce stripe_size.
5093 if (stripe_size
* data_stripes
> max_chunk_size
) {
5095 * Reduce stripe_size, round it up to a 16MB boundary again and
5096 * then use it, unless it ends up being even bigger than the
5097 * previous value we had already.
5099 stripe_size
= min(round_up(div_u64(max_chunk_size
,
5100 data_stripes
), SZ_16M
),
5104 /* align to BTRFS_STRIPE_LEN */
5105 stripe_size
= round_down(stripe_size
, BTRFS_STRIPE_LEN
);
5107 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
5112 map
->num_stripes
= num_stripes
;
5114 for (i
= 0; i
< ndevs
; ++i
) {
5115 for (j
= 0; j
< dev_stripes
; ++j
) {
5116 int s
= i
* dev_stripes
+ j
;
5117 map
->stripes
[s
].dev
= devices_info
[i
].dev
;
5118 map
->stripes
[s
].physical
= devices_info
[i
].dev_offset
+
5122 map
->stripe_len
= BTRFS_STRIPE_LEN
;
5123 map
->io_align
= BTRFS_STRIPE_LEN
;
5124 map
->io_width
= BTRFS_STRIPE_LEN
;
5126 map
->sub_stripes
= sub_stripes
;
5128 chunk_size
= stripe_size
* data_stripes
;
5130 trace_btrfs_chunk_alloc(info
, map
, start
, chunk_size
);
5132 em
= alloc_extent_map();
5138 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
5139 em
->map_lookup
= map
;
5141 em
->len
= chunk_size
;
5142 em
->block_start
= 0;
5143 em
->block_len
= em
->len
;
5144 em
->orig_block_len
= stripe_size
;
5146 em_tree
= &info
->mapping_tree
.map_tree
;
5147 write_lock(&em_tree
->lock
);
5148 ret
= add_extent_mapping(em_tree
, em
, 0);
5150 write_unlock(&em_tree
->lock
);
5151 free_extent_map(em
);
5154 write_unlock(&em_tree
->lock
);
5156 ret
= btrfs_make_block_group(trans
, 0, type
, start
, chunk_size
);
5158 goto error_del_extent
;
5160 for (i
= 0; i
< map
->num_stripes
; i
++) {
5161 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
5163 btrfs_device_set_bytes_used(dev
, dev
->bytes_used
+ stripe_size
);
5164 if (list_empty(&dev
->post_commit_list
))
5165 list_add_tail(&dev
->post_commit_list
,
5166 &trans
->transaction
->dev_update_list
);
5169 atomic64_sub(stripe_size
* map
->num_stripes
, &info
->free_chunk_space
);
5171 free_extent_map(em
);
5172 check_raid56_incompat_flag(info
, type
);
5174 kfree(devices_info
);
5178 write_lock(&em_tree
->lock
);
5179 remove_extent_mapping(em_tree
, em
);
5180 write_unlock(&em_tree
->lock
);
5182 /* One for our allocation */
5183 free_extent_map(em
);
5184 /* One for the tree reference */
5185 free_extent_map(em
);
5187 kfree(devices_info
);
5191 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
5192 u64 chunk_offset
, u64 chunk_size
)
5194 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5195 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
5196 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
5197 struct btrfs_key key
;
5198 struct btrfs_device
*device
;
5199 struct btrfs_chunk
*chunk
;
5200 struct btrfs_stripe
*stripe
;
5201 struct extent_map
*em
;
5202 struct map_lookup
*map
;
5209 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, chunk_size
);
5213 map
= em
->map_lookup
;
5214 item_size
= btrfs_chunk_item_size(map
->num_stripes
);
5215 stripe_size
= em
->orig_block_len
;
5217 chunk
= kzalloc(item_size
, GFP_NOFS
);
5224 * Take the device list mutex to prevent races with the final phase of
5225 * a device replace operation that replaces the device object associated
5226 * with the map's stripes, because the device object's id can change
5227 * at any time during that final phase of the device replace operation
5228 * (dev-replace.c:btrfs_dev_replace_finishing()).
5230 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
5231 for (i
= 0; i
< map
->num_stripes
; i
++) {
5232 device
= map
->stripes
[i
].dev
;
5233 dev_offset
= map
->stripes
[i
].physical
;
5235 ret
= btrfs_update_device(trans
, device
);
5238 ret
= btrfs_alloc_dev_extent(trans
, device
, chunk_offset
,
5239 dev_offset
, stripe_size
);
5244 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5248 stripe
= &chunk
->stripe
;
5249 for (i
= 0; i
< map
->num_stripes
; i
++) {
5250 device
= map
->stripes
[i
].dev
;
5251 dev_offset
= map
->stripes
[i
].physical
;
5253 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
5254 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
5255 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
5258 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5260 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
5261 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
5262 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
5263 btrfs_set_stack_chunk_type(chunk
, map
->type
);
5264 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
5265 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
5266 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
5267 btrfs_set_stack_chunk_sector_size(chunk
, fs_info
->sectorsize
);
5268 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
5270 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
5271 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
5272 key
.offset
= chunk_offset
;
5274 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
5275 if (ret
== 0 && map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5277 * TODO: Cleanup of inserted chunk root in case of
5280 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
, item_size
);
5285 free_extent_map(em
);
5290 * Chunk allocation falls into two parts. The first part does work
5291 * that makes the new allocated chunk usable, but does not do any operation
5292 * that modifies the chunk tree. The second part does the work that
5293 * requires modifying the chunk tree. This division is important for the
5294 * bootstrap process of adding storage to a seed btrfs.
5296 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
, u64 type
)
5300 lockdep_assert_held(&trans
->fs_info
->chunk_mutex
);
5301 chunk_offset
= find_next_chunk(trans
->fs_info
);
5302 return __btrfs_alloc_chunk(trans
, chunk_offset
, type
);
5305 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
)
5307 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5309 u64 sys_chunk_offset
;
5313 chunk_offset
= find_next_chunk(fs_info
);
5314 alloc_profile
= btrfs_metadata_alloc_profile(fs_info
);
5315 ret
= __btrfs_alloc_chunk(trans
, chunk_offset
, alloc_profile
);
5319 sys_chunk_offset
= find_next_chunk(fs_info
);
5320 alloc_profile
= btrfs_system_alloc_profile(fs_info
);
5321 ret
= __btrfs_alloc_chunk(trans
, sys_chunk_offset
, alloc_profile
);
5325 static inline int btrfs_chunk_max_errors(struct map_lookup
*map
)
5329 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
5330 BTRFS_BLOCK_GROUP_RAID10
|
5331 BTRFS_BLOCK_GROUP_RAID5
|
5332 BTRFS_BLOCK_GROUP_DUP
)) {
5334 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
) {
5343 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
5345 struct extent_map
*em
;
5346 struct map_lookup
*map
;
5351 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
5355 map
= em
->map_lookup
;
5356 for (i
= 0; i
< map
->num_stripes
; i
++) {
5357 if (test_bit(BTRFS_DEV_STATE_MISSING
,
5358 &map
->stripes
[i
].dev
->dev_state
)) {
5362 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
,
5363 &map
->stripes
[i
].dev
->dev_state
)) {
5370 * If the number of missing devices is larger than max errors,
5371 * we can not write the data into that chunk successfully, so
5374 if (miss_ndevs
> btrfs_chunk_max_errors(map
))
5377 free_extent_map(em
);
5381 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
5383 extent_map_tree_init(&tree
->map_tree
);
5386 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
5388 struct extent_map
*em
;
5391 write_lock(&tree
->map_tree
.lock
);
5392 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
5394 remove_extent_mapping(&tree
->map_tree
, em
);
5395 write_unlock(&tree
->map_tree
.lock
);
5399 free_extent_map(em
);
5400 /* once for the tree */
5401 free_extent_map(em
);
5405 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5407 struct extent_map
*em
;
5408 struct map_lookup
*map
;
5411 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5414 * We could return errors for these cases, but that could get
5415 * ugly and we'd probably do the same thing which is just not do
5416 * anything else and exit, so return 1 so the callers don't try
5417 * to use other copies.
5421 map
= em
->map_lookup
;
5422 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
5423 ret
= map
->num_stripes
;
5424 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5425 ret
= map
->sub_stripes
;
5426 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
5428 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
5430 * There could be two corrupted data stripes, we need
5431 * to loop retry in order to rebuild the correct data.
5433 * Fail a stripe at a time on every retry except the
5434 * stripe under reconstruction.
5436 ret
= map
->num_stripes
;
5439 free_extent_map(em
);
5441 down_read(&fs_info
->dev_replace
.rwsem
);
5442 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
) &&
5443 fs_info
->dev_replace
.tgtdev
)
5445 up_read(&fs_info
->dev_replace
.rwsem
);
5450 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info
*fs_info
,
5453 struct extent_map
*em
;
5454 struct map_lookup
*map
;
5455 unsigned long len
= fs_info
->sectorsize
;
5457 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5459 if (!WARN_ON(IS_ERR(em
))) {
5460 map
= em
->map_lookup
;
5461 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5462 len
= map
->stripe_len
* nr_data_stripes(map
);
5463 free_extent_map(em
);
5468 int btrfs_is_parity_mirror(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5470 struct extent_map
*em
;
5471 struct map_lookup
*map
;
5474 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5476 if(!WARN_ON(IS_ERR(em
))) {
5477 map
= em
->map_lookup
;
5478 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5480 free_extent_map(em
);
5485 static int find_live_mirror(struct btrfs_fs_info
*fs_info
,
5486 struct map_lookup
*map
, int first
,
5487 int dev_replace_is_ongoing
)
5491 int preferred_mirror
;
5493 struct btrfs_device
*srcdev
;
5496 (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
)));
5498 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5499 num_stripes
= map
->sub_stripes
;
5501 num_stripes
= map
->num_stripes
;
5503 preferred_mirror
= first
+ current
->pid
% num_stripes
;
5505 if (dev_replace_is_ongoing
&&
5506 fs_info
->dev_replace
.cont_reading_from_srcdev_mode
==
5507 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID
)
5508 srcdev
= fs_info
->dev_replace
.srcdev
;
5513 * try to avoid the drive that is the source drive for a
5514 * dev-replace procedure, only choose it if no other non-missing
5515 * mirror is available
5517 for (tolerance
= 0; tolerance
< 2; tolerance
++) {
5518 if (map
->stripes
[preferred_mirror
].dev
->bdev
&&
5519 (tolerance
|| map
->stripes
[preferred_mirror
].dev
!= srcdev
))
5520 return preferred_mirror
;
5521 for (i
= first
; i
< first
+ num_stripes
; i
++) {
5522 if (map
->stripes
[i
].dev
->bdev
&&
5523 (tolerance
|| map
->stripes
[i
].dev
!= srcdev
))
5528 /* we couldn't find one that doesn't fail. Just return something
5529 * and the io error handling code will clean up eventually
5531 return preferred_mirror
;
5534 static inline int parity_smaller(u64 a
, u64 b
)
5539 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5540 static void sort_parity_stripes(struct btrfs_bio
*bbio
, int num_stripes
)
5542 struct btrfs_bio_stripe s
;
5549 for (i
= 0; i
< num_stripes
- 1; i
++) {
5550 if (parity_smaller(bbio
->raid_map
[i
],
5551 bbio
->raid_map
[i
+1])) {
5552 s
= bbio
->stripes
[i
];
5553 l
= bbio
->raid_map
[i
];
5554 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
5555 bbio
->raid_map
[i
] = bbio
->raid_map
[i
+1];
5556 bbio
->stripes
[i
+1] = s
;
5557 bbio
->raid_map
[i
+1] = l
;
5565 static struct btrfs_bio
*alloc_btrfs_bio(int total_stripes
, int real_stripes
)
5567 struct btrfs_bio
*bbio
= kzalloc(
5568 /* the size of the btrfs_bio */
5569 sizeof(struct btrfs_bio
) +
5570 /* plus the variable array for the stripes */
5571 sizeof(struct btrfs_bio_stripe
) * (total_stripes
) +
5572 /* plus the variable array for the tgt dev */
5573 sizeof(int) * (real_stripes
) +
5575 * plus the raid_map, which includes both the tgt dev
5578 sizeof(u64
) * (total_stripes
),
5579 GFP_NOFS
|__GFP_NOFAIL
);
5581 atomic_set(&bbio
->error
, 0);
5582 refcount_set(&bbio
->refs
, 1);
5587 void btrfs_get_bbio(struct btrfs_bio
*bbio
)
5589 WARN_ON(!refcount_read(&bbio
->refs
));
5590 refcount_inc(&bbio
->refs
);
5593 void btrfs_put_bbio(struct btrfs_bio
*bbio
)
5597 if (refcount_dec_and_test(&bbio
->refs
))
5601 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5603 * Please note that, discard won't be sent to target device of device
5606 static int __btrfs_map_block_for_discard(struct btrfs_fs_info
*fs_info
,
5607 u64 logical
, u64 length
,
5608 struct btrfs_bio
**bbio_ret
)
5610 struct extent_map
*em
;
5611 struct map_lookup
*map
;
5612 struct btrfs_bio
*bbio
;
5616 u64 stripe_end_offset
;
5623 u32 sub_stripes
= 0;
5624 u64 stripes_per_dev
= 0;
5625 u32 remaining_stripes
= 0;
5626 u32 last_stripe
= 0;
5630 /* discard always return a bbio */
5633 em
= btrfs_get_chunk_map(fs_info
, logical
, length
);
5637 map
= em
->map_lookup
;
5638 /* we don't discard raid56 yet */
5639 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5644 offset
= logical
- em
->start
;
5645 length
= min_t(u64
, em
->len
- offset
, length
);
5647 stripe_len
= map
->stripe_len
;
5649 * stripe_nr counts the total number of stripes we have to stride
5650 * to get to this block
5652 stripe_nr
= div64_u64(offset
, stripe_len
);
5654 /* stripe_offset is the offset of this block in its stripe */
5655 stripe_offset
= offset
- stripe_nr
* stripe_len
;
5657 stripe_nr_end
= round_up(offset
+ length
, map
->stripe_len
);
5658 stripe_nr_end
= div64_u64(stripe_nr_end
, map
->stripe_len
);
5659 stripe_cnt
= stripe_nr_end
- stripe_nr
;
5660 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
5663 * after this, stripe_nr is the number of stripes on this
5664 * device we have to walk to find the data, and stripe_index is
5665 * the number of our device in the stripe array
5669 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5670 BTRFS_BLOCK_GROUP_RAID10
)) {
5671 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
5674 sub_stripes
= map
->sub_stripes
;
5676 factor
= map
->num_stripes
/ sub_stripes
;
5677 num_stripes
= min_t(u64
, map
->num_stripes
,
5678 sub_stripes
* stripe_cnt
);
5679 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
5680 stripe_index
*= sub_stripes
;
5681 stripes_per_dev
= div_u64_rem(stripe_cnt
, factor
,
5682 &remaining_stripes
);
5683 div_u64_rem(stripe_nr_end
- 1, factor
, &last_stripe
);
5684 last_stripe
*= sub_stripes
;
5685 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
5686 BTRFS_BLOCK_GROUP_DUP
)) {
5687 num_stripes
= map
->num_stripes
;
5689 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
5693 bbio
= alloc_btrfs_bio(num_stripes
, 0);
5699 for (i
= 0; i
< num_stripes
; i
++) {
5700 bbio
->stripes
[i
].physical
=
5701 map
->stripes
[stripe_index
].physical
+
5702 stripe_offset
+ stripe_nr
* map
->stripe_len
;
5703 bbio
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
5705 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5706 BTRFS_BLOCK_GROUP_RAID10
)) {
5707 bbio
->stripes
[i
].length
= stripes_per_dev
*
5710 if (i
/ sub_stripes
< remaining_stripes
)
5711 bbio
->stripes
[i
].length
+=
5715 * Special for the first stripe and
5718 * |-------|...|-------|
5722 if (i
< sub_stripes
)
5723 bbio
->stripes
[i
].length
-=
5726 if (stripe_index
>= last_stripe
&&
5727 stripe_index
<= (last_stripe
+
5729 bbio
->stripes
[i
].length
-=
5732 if (i
== sub_stripes
- 1)
5735 bbio
->stripes
[i
].length
= length
;
5739 if (stripe_index
== map
->num_stripes
) {
5746 bbio
->map_type
= map
->type
;
5747 bbio
->num_stripes
= num_stripes
;
5749 free_extent_map(em
);
5754 * In dev-replace case, for repair case (that's the only case where the mirror
5755 * is selected explicitly when calling btrfs_map_block), blocks left of the
5756 * left cursor can also be read from the target drive.
5758 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5760 * For READ, it also needs to be supported using the same mirror number.
5762 * If the requested block is not left of the left cursor, EIO is returned. This
5763 * can happen because btrfs_num_copies() returns one more in the dev-replace
5766 static int get_extra_mirror_from_replace(struct btrfs_fs_info
*fs_info
,
5767 u64 logical
, u64 length
,
5768 u64 srcdev_devid
, int *mirror_num
,
5771 struct btrfs_bio
*bbio
= NULL
;
5773 int index_srcdev
= 0;
5775 u64 physical_of_found
= 0;
5779 ret
= __btrfs_map_block(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
5780 logical
, &length
, &bbio
, 0, 0);
5782 ASSERT(bbio
== NULL
);
5786 num_stripes
= bbio
->num_stripes
;
5787 if (*mirror_num
> num_stripes
) {
5789 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5790 * that means that the requested area is not left of the left
5793 btrfs_put_bbio(bbio
);
5798 * process the rest of the function using the mirror_num of the source
5799 * drive. Therefore look it up first. At the end, patch the device
5800 * pointer to the one of the target drive.
5802 for (i
= 0; i
< num_stripes
; i
++) {
5803 if (bbio
->stripes
[i
].dev
->devid
!= srcdev_devid
)
5807 * In case of DUP, in order to keep it simple, only add the
5808 * mirror with the lowest physical address
5811 physical_of_found
<= bbio
->stripes
[i
].physical
)
5816 physical_of_found
= bbio
->stripes
[i
].physical
;
5819 btrfs_put_bbio(bbio
);
5825 *mirror_num
= index_srcdev
+ 1;
5826 *physical
= physical_of_found
;
5830 static void handle_ops_on_dev_replace(enum btrfs_map_op op
,
5831 struct btrfs_bio
**bbio_ret
,
5832 struct btrfs_dev_replace
*dev_replace
,
5833 int *num_stripes_ret
, int *max_errors_ret
)
5835 struct btrfs_bio
*bbio
= *bbio_ret
;
5836 u64 srcdev_devid
= dev_replace
->srcdev
->devid
;
5837 int tgtdev_indexes
= 0;
5838 int num_stripes
= *num_stripes_ret
;
5839 int max_errors
= *max_errors_ret
;
5842 if (op
== BTRFS_MAP_WRITE
) {
5843 int index_where_to_add
;
5846 * duplicate the write operations while the dev replace
5847 * procedure is running. Since the copying of the old disk to
5848 * the new disk takes place at run time while the filesystem is
5849 * mounted writable, the regular write operations to the old
5850 * disk have to be duplicated to go to the new disk as well.
5852 * Note that device->missing is handled by the caller, and that
5853 * the write to the old disk is already set up in the stripes
5856 index_where_to_add
= num_stripes
;
5857 for (i
= 0; i
< num_stripes
; i
++) {
5858 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5859 /* write to new disk, too */
5860 struct btrfs_bio_stripe
*new =
5861 bbio
->stripes
+ index_where_to_add
;
5862 struct btrfs_bio_stripe
*old
=
5865 new->physical
= old
->physical
;
5866 new->length
= old
->length
;
5867 new->dev
= dev_replace
->tgtdev
;
5868 bbio
->tgtdev_map
[i
] = index_where_to_add
;
5869 index_where_to_add
++;
5874 num_stripes
= index_where_to_add
;
5875 } else if (op
== BTRFS_MAP_GET_READ_MIRRORS
) {
5876 int index_srcdev
= 0;
5878 u64 physical_of_found
= 0;
5881 * During the dev-replace procedure, the target drive can also
5882 * be used to read data in case it is needed to repair a corrupt
5883 * block elsewhere. This is possible if the requested area is
5884 * left of the left cursor. In this area, the target drive is a
5885 * full copy of the source drive.
5887 for (i
= 0; i
< num_stripes
; i
++) {
5888 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5890 * In case of DUP, in order to keep it simple,
5891 * only add the mirror with the lowest physical
5895 physical_of_found
<=
5896 bbio
->stripes
[i
].physical
)
5900 physical_of_found
= bbio
->stripes
[i
].physical
;
5904 struct btrfs_bio_stripe
*tgtdev_stripe
=
5905 bbio
->stripes
+ num_stripes
;
5907 tgtdev_stripe
->physical
= physical_of_found
;
5908 tgtdev_stripe
->length
=
5909 bbio
->stripes
[index_srcdev
].length
;
5910 tgtdev_stripe
->dev
= dev_replace
->tgtdev
;
5911 bbio
->tgtdev_map
[index_srcdev
] = num_stripes
;
5918 *num_stripes_ret
= num_stripes
;
5919 *max_errors_ret
= max_errors
;
5920 bbio
->num_tgtdevs
= tgtdev_indexes
;
5924 static bool need_full_stripe(enum btrfs_map_op op
)
5926 return (op
== BTRFS_MAP_WRITE
|| op
== BTRFS_MAP_GET_READ_MIRRORS
);
5929 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
5930 enum btrfs_map_op op
,
5931 u64 logical
, u64
*length
,
5932 struct btrfs_bio
**bbio_ret
,
5933 int mirror_num
, int need_raid_map
)
5935 struct extent_map
*em
;
5936 struct map_lookup
*map
;
5946 int tgtdev_indexes
= 0;
5947 struct btrfs_bio
*bbio
= NULL
;
5948 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
5949 int dev_replace_is_ongoing
= 0;
5950 int num_alloc_stripes
;
5951 int patch_the_first_stripe_for_dev_replace
= 0;
5952 u64 physical_to_patch_in_first_stripe
= 0;
5953 u64 raid56_full_stripe_start
= (u64
)-1;
5955 if (op
== BTRFS_MAP_DISCARD
)
5956 return __btrfs_map_block_for_discard(fs_info
, logical
,
5959 em
= btrfs_get_chunk_map(fs_info
, logical
, *length
);
5963 map
= em
->map_lookup
;
5964 offset
= logical
- em
->start
;
5966 stripe_len
= map
->stripe_len
;
5969 * stripe_nr counts the total number of stripes we have to stride
5970 * to get to this block
5972 stripe_nr
= div64_u64(stripe_nr
, stripe_len
);
5974 stripe_offset
= stripe_nr
* stripe_len
;
5975 if (offset
< stripe_offset
) {
5977 "stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
5978 stripe_offset
, offset
, em
->start
, logical
,
5980 free_extent_map(em
);
5984 /* stripe_offset is the offset of this block in its stripe*/
5985 stripe_offset
= offset
- stripe_offset
;
5987 /* if we're here for raid56, we need to know the stripe aligned start */
5988 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5989 unsigned long full_stripe_len
= stripe_len
* nr_data_stripes(map
);
5990 raid56_full_stripe_start
= offset
;
5992 /* allow a write of a full stripe, but make sure we don't
5993 * allow straddling of stripes
5995 raid56_full_stripe_start
= div64_u64(raid56_full_stripe_start
,
5997 raid56_full_stripe_start
*= full_stripe_len
;
6000 if (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6002 /* For writes to RAID[56], allow a full stripeset across all disks.
6003 For other RAID types and for RAID[56] reads, just allow a single
6004 stripe (on a single disk). */
6005 if ((map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) &&
6006 (op
== BTRFS_MAP_WRITE
)) {
6007 max_len
= stripe_len
* nr_data_stripes(map
) -
6008 (offset
- raid56_full_stripe_start
);
6010 /* we limit the length of each bio to what fits in a stripe */
6011 max_len
= stripe_len
- stripe_offset
;
6013 *length
= min_t(u64
, em
->len
- offset
, max_len
);
6015 *length
= em
->len
- offset
;
6019 * This is for when we're called from btrfs_bio_fits_in_stripe and all
6020 * it cares about is the length
6025 down_read(&dev_replace
->rwsem
);
6026 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
6028 * Hold the semaphore for read during the whole operation, write is
6029 * requested at commit time but must wait.
6031 if (!dev_replace_is_ongoing
)
6032 up_read(&dev_replace
->rwsem
);
6034 if (dev_replace_is_ongoing
&& mirror_num
== map
->num_stripes
+ 1 &&
6035 !need_full_stripe(op
) && dev_replace
->tgtdev
!= NULL
) {
6036 ret
= get_extra_mirror_from_replace(fs_info
, logical
, *length
,
6037 dev_replace
->srcdev
->devid
,
6039 &physical_to_patch_in_first_stripe
);
6043 patch_the_first_stripe_for_dev_replace
= 1;
6044 } else if (mirror_num
> map
->num_stripes
) {
6050 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
6051 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6053 if (!need_full_stripe(op
))
6055 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
6056 if (need_full_stripe(op
))
6057 num_stripes
= map
->num_stripes
;
6058 else if (mirror_num
)
6059 stripe_index
= mirror_num
- 1;
6061 stripe_index
= find_live_mirror(fs_info
, map
, 0,
6062 dev_replace_is_ongoing
);
6063 mirror_num
= stripe_index
+ 1;
6066 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
6067 if (need_full_stripe(op
)) {
6068 num_stripes
= map
->num_stripes
;
6069 } else if (mirror_num
) {
6070 stripe_index
= mirror_num
- 1;
6075 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
6076 u32 factor
= map
->num_stripes
/ map
->sub_stripes
;
6078 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
6079 stripe_index
*= map
->sub_stripes
;
6081 if (need_full_stripe(op
))
6082 num_stripes
= map
->sub_stripes
;
6083 else if (mirror_num
)
6084 stripe_index
+= mirror_num
- 1;
6086 int old_stripe_index
= stripe_index
;
6087 stripe_index
= find_live_mirror(fs_info
, map
,
6089 dev_replace_is_ongoing
);
6090 mirror_num
= stripe_index
- old_stripe_index
+ 1;
6093 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6094 if (need_raid_map
&& (need_full_stripe(op
) || mirror_num
> 1)) {
6095 /* push stripe_nr back to the start of the full stripe */
6096 stripe_nr
= div64_u64(raid56_full_stripe_start
,
6097 stripe_len
* nr_data_stripes(map
));
6099 /* RAID[56] write or recovery. Return all stripes */
6100 num_stripes
= map
->num_stripes
;
6101 max_errors
= nr_parity_stripes(map
);
6103 *length
= map
->stripe_len
;
6108 * Mirror #0 or #1 means the original data block.
6109 * Mirror #2 is RAID5 parity block.
6110 * Mirror #3 is RAID6 Q block.
6112 stripe_nr
= div_u64_rem(stripe_nr
,
6113 nr_data_stripes(map
), &stripe_index
);
6115 stripe_index
= nr_data_stripes(map
) +
6118 /* We distribute the parity blocks across stripes */
6119 div_u64_rem(stripe_nr
+ stripe_index
, map
->num_stripes
,
6121 if (!need_full_stripe(op
) && mirror_num
<= 1)
6126 * after this, stripe_nr is the number of stripes on this
6127 * device we have to walk to find the data, and stripe_index is
6128 * the number of our device in the stripe array
6130 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6132 mirror_num
= stripe_index
+ 1;
6134 if (stripe_index
>= map
->num_stripes
) {
6136 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6137 stripe_index
, map
->num_stripes
);
6142 num_alloc_stripes
= num_stripes
;
6143 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
) {
6144 if (op
== BTRFS_MAP_WRITE
)
6145 num_alloc_stripes
<<= 1;
6146 if (op
== BTRFS_MAP_GET_READ_MIRRORS
)
6147 num_alloc_stripes
++;
6148 tgtdev_indexes
= num_stripes
;
6151 bbio
= alloc_btrfs_bio(num_alloc_stripes
, tgtdev_indexes
);
6156 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
)
6157 bbio
->tgtdev_map
= (int *)(bbio
->stripes
+ num_alloc_stripes
);
6159 /* build raid_map */
6160 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
&& need_raid_map
&&
6161 (need_full_stripe(op
) || mirror_num
> 1)) {
6165 bbio
->raid_map
= (u64
*)((void *)bbio
->stripes
+
6166 sizeof(struct btrfs_bio_stripe
) *
6168 sizeof(int) * tgtdev_indexes
);
6170 /* Work out the disk rotation on this stripe-set */
6171 div_u64_rem(stripe_nr
, num_stripes
, &rot
);
6173 /* Fill in the logical address of each stripe */
6174 tmp
= stripe_nr
* nr_data_stripes(map
);
6175 for (i
= 0; i
< nr_data_stripes(map
); i
++)
6176 bbio
->raid_map
[(i
+rot
) % num_stripes
] =
6177 em
->start
+ (tmp
+ i
) * map
->stripe_len
;
6179 bbio
->raid_map
[(i
+rot
) % map
->num_stripes
] = RAID5_P_STRIPE
;
6180 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
6181 bbio
->raid_map
[(i
+rot
+1) % num_stripes
] =
6186 for (i
= 0; i
< num_stripes
; i
++) {
6187 bbio
->stripes
[i
].physical
=
6188 map
->stripes
[stripe_index
].physical
+
6190 stripe_nr
* map
->stripe_len
;
6191 bbio
->stripes
[i
].dev
=
6192 map
->stripes
[stripe_index
].dev
;
6196 if (need_full_stripe(op
))
6197 max_errors
= btrfs_chunk_max_errors(map
);
6200 sort_parity_stripes(bbio
, num_stripes
);
6202 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6203 need_full_stripe(op
)) {
6204 handle_ops_on_dev_replace(op
, &bbio
, dev_replace
, &num_stripes
,
6209 bbio
->map_type
= map
->type
;
6210 bbio
->num_stripes
= num_stripes
;
6211 bbio
->max_errors
= max_errors
;
6212 bbio
->mirror_num
= mirror_num
;
6215 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6216 * mirror_num == num_stripes + 1 && dev_replace target drive is
6217 * available as a mirror
6219 if (patch_the_first_stripe_for_dev_replace
&& num_stripes
> 0) {
6220 WARN_ON(num_stripes
> 1);
6221 bbio
->stripes
[0].dev
= dev_replace
->tgtdev
;
6222 bbio
->stripes
[0].physical
= physical_to_patch_in_first_stripe
;
6223 bbio
->mirror_num
= map
->num_stripes
+ 1;
6226 if (dev_replace_is_ongoing
) {
6227 lockdep_assert_held(&dev_replace
->rwsem
);
6228 /* Unlock and let waiting writers proceed */
6229 up_read(&dev_replace
->rwsem
);
6231 free_extent_map(em
);
6235 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6236 u64 logical
, u64
*length
,
6237 struct btrfs_bio
**bbio_ret
, int mirror_num
)
6239 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
,
6243 /* For Scrub/replace */
6244 int btrfs_map_sblock(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6245 u64 logical
, u64
*length
,
6246 struct btrfs_bio
**bbio_ret
)
6248 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
, 0, 1);
6251 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
6252 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
)
6254 struct extent_map
*em
;
6255 struct map_lookup
*map
;
6263 em
= btrfs_get_chunk_map(fs_info
, chunk_start
, 1);
6267 map
= em
->map_lookup
;
6269 rmap_len
= map
->stripe_len
;
6271 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
6272 length
= div_u64(length
, map
->num_stripes
/ map
->sub_stripes
);
6273 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
6274 length
= div_u64(length
, map
->num_stripes
);
6275 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6276 length
= div_u64(length
, nr_data_stripes(map
));
6277 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
6280 buf
= kcalloc(map
->num_stripes
, sizeof(u64
), GFP_NOFS
);
6281 BUG_ON(!buf
); /* -ENOMEM */
6283 for (i
= 0; i
< map
->num_stripes
; i
++) {
6284 if (map
->stripes
[i
].physical
> physical
||
6285 map
->stripes
[i
].physical
+ length
<= physical
)
6288 stripe_nr
= physical
- map
->stripes
[i
].physical
;
6289 stripe_nr
= div64_u64(stripe_nr
, map
->stripe_len
);
6291 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
6292 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
6293 stripe_nr
= div_u64(stripe_nr
, map
->sub_stripes
);
6294 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
6295 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
6296 } /* else if RAID[56], multiply by nr_data_stripes().
6297 * Alternatively, just use rmap_len below instead of
6298 * map->stripe_len */
6300 bytenr
= chunk_start
+ stripe_nr
* rmap_len
;
6301 WARN_ON(nr
>= map
->num_stripes
);
6302 for (j
= 0; j
< nr
; j
++) {
6303 if (buf
[j
] == bytenr
)
6307 WARN_ON(nr
>= map
->num_stripes
);
6314 *stripe_len
= rmap_len
;
6316 free_extent_map(em
);
6320 static inline void btrfs_end_bbio(struct btrfs_bio
*bbio
, struct bio
*bio
)
6322 bio
->bi_private
= bbio
->private;
6323 bio
->bi_end_io
= bbio
->end_io
;
6326 btrfs_put_bbio(bbio
);
6329 static void btrfs_end_bio(struct bio
*bio
)
6331 struct btrfs_bio
*bbio
= bio
->bi_private
;
6332 int is_orig_bio
= 0;
6334 if (bio
->bi_status
) {
6335 atomic_inc(&bbio
->error
);
6336 if (bio
->bi_status
== BLK_STS_IOERR
||
6337 bio
->bi_status
== BLK_STS_TARGET
) {
6338 unsigned int stripe_index
=
6339 btrfs_io_bio(bio
)->stripe_index
;
6340 struct btrfs_device
*dev
;
6342 BUG_ON(stripe_index
>= bbio
->num_stripes
);
6343 dev
= bbio
->stripes
[stripe_index
].dev
;
6345 if (bio_op(bio
) == REQ_OP_WRITE
)
6346 btrfs_dev_stat_inc_and_print(dev
,
6347 BTRFS_DEV_STAT_WRITE_ERRS
);
6348 else if (!(bio
->bi_opf
& REQ_RAHEAD
))
6349 btrfs_dev_stat_inc_and_print(dev
,
6350 BTRFS_DEV_STAT_READ_ERRS
);
6351 if (bio
->bi_opf
& REQ_PREFLUSH
)
6352 btrfs_dev_stat_inc_and_print(dev
,
6353 BTRFS_DEV_STAT_FLUSH_ERRS
);
6358 if (bio
== bbio
->orig_bio
)
6361 btrfs_bio_counter_dec(bbio
->fs_info
);
6363 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6366 bio
= bbio
->orig_bio
;
6369 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6370 /* only send an error to the higher layers if it is
6371 * beyond the tolerance of the btrfs bio
6373 if (atomic_read(&bbio
->error
) > bbio
->max_errors
) {
6374 bio
->bi_status
= BLK_STS_IOERR
;
6377 * this bio is actually up to date, we didn't
6378 * go over the max number of errors
6380 bio
->bi_status
= BLK_STS_OK
;
6383 btrfs_end_bbio(bbio
, bio
);
6384 } else if (!is_orig_bio
) {
6390 * see run_scheduled_bios for a description of why bios are collected for
6393 * This will add one bio to the pending list for a device and make sure
6394 * the work struct is scheduled.
6396 static noinline
void btrfs_schedule_bio(struct btrfs_device
*device
,
6399 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
6400 int should_queue
= 1;
6401 struct btrfs_pending_bios
*pending_bios
;
6403 /* don't bother with additional async steps for reads, right now */
6404 if (bio_op(bio
) == REQ_OP_READ
) {
6405 btrfsic_submit_bio(bio
);
6409 WARN_ON(bio
->bi_next
);
6410 bio
->bi_next
= NULL
;
6412 spin_lock(&device
->io_lock
);
6413 if (op_is_sync(bio
->bi_opf
))
6414 pending_bios
= &device
->pending_sync_bios
;
6416 pending_bios
= &device
->pending_bios
;
6418 if (pending_bios
->tail
)
6419 pending_bios
->tail
->bi_next
= bio
;
6421 pending_bios
->tail
= bio
;
6422 if (!pending_bios
->head
)
6423 pending_bios
->head
= bio
;
6424 if (device
->running_pending
)
6427 spin_unlock(&device
->io_lock
);
6430 btrfs_queue_work(fs_info
->submit_workers
, &device
->work
);
6433 static void submit_stripe_bio(struct btrfs_bio
*bbio
, struct bio
*bio
,
6434 u64 physical
, int dev_nr
, int async
)
6436 struct btrfs_device
*dev
= bbio
->stripes
[dev_nr
].dev
;
6437 struct btrfs_fs_info
*fs_info
= bbio
->fs_info
;
6439 bio
->bi_private
= bbio
;
6440 btrfs_io_bio(bio
)->stripe_index
= dev_nr
;
6441 bio
->bi_end_io
= btrfs_end_bio
;
6442 bio
->bi_iter
.bi_sector
= physical
>> 9;
6443 btrfs_debug_in_rcu(fs_info
,
6444 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6445 bio_op(bio
), bio
->bi_opf
, (u64
)bio
->bi_iter
.bi_sector
,
6446 (u_long
)dev
->bdev
->bd_dev
, rcu_str_deref(dev
->name
), dev
->devid
,
6447 bio
->bi_iter
.bi_size
);
6448 bio_set_dev(bio
, dev
->bdev
);
6450 btrfs_bio_counter_inc_noblocked(fs_info
);
6453 btrfs_schedule_bio(dev
, bio
);
6455 btrfsic_submit_bio(bio
);
6458 static void bbio_error(struct btrfs_bio
*bbio
, struct bio
*bio
, u64 logical
)
6460 atomic_inc(&bbio
->error
);
6461 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6462 /* Should be the original bio. */
6463 WARN_ON(bio
!= bbio
->orig_bio
);
6465 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6466 bio
->bi_iter
.bi_sector
= logical
>> 9;
6467 if (atomic_read(&bbio
->error
) > bbio
->max_errors
)
6468 bio
->bi_status
= BLK_STS_IOERR
;
6470 bio
->bi_status
= BLK_STS_OK
;
6471 btrfs_end_bbio(bbio
, bio
);
6475 blk_status_t
btrfs_map_bio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
6476 int mirror_num
, int async_submit
)
6478 struct btrfs_device
*dev
;
6479 struct bio
*first_bio
= bio
;
6480 u64 logical
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
6486 struct btrfs_bio
*bbio
= NULL
;
6488 length
= bio
->bi_iter
.bi_size
;
6489 map_length
= length
;
6491 btrfs_bio_counter_inc_blocked(fs_info
);
6492 ret
= __btrfs_map_block(fs_info
, btrfs_op(bio
), logical
,
6493 &map_length
, &bbio
, mirror_num
, 1);
6495 btrfs_bio_counter_dec(fs_info
);
6496 return errno_to_blk_status(ret
);
6499 total_devs
= bbio
->num_stripes
;
6500 bbio
->orig_bio
= first_bio
;
6501 bbio
->private = first_bio
->bi_private
;
6502 bbio
->end_io
= first_bio
->bi_end_io
;
6503 bbio
->fs_info
= fs_info
;
6504 atomic_set(&bbio
->stripes_pending
, bbio
->num_stripes
);
6506 if ((bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) &&
6507 ((bio_op(bio
) == REQ_OP_WRITE
) || (mirror_num
> 1))) {
6508 /* In this case, map_length has been set to the length of
6509 a single stripe; not the whole write */
6510 if (bio_op(bio
) == REQ_OP_WRITE
) {
6511 ret
= raid56_parity_write(fs_info
, bio
, bbio
,
6514 ret
= raid56_parity_recover(fs_info
, bio
, bbio
,
6515 map_length
, mirror_num
, 1);
6518 btrfs_bio_counter_dec(fs_info
);
6519 return errno_to_blk_status(ret
);
6522 if (map_length
< length
) {
6524 "mapping failed logical %llu bio len %llu len %llu",
6525 logical
, length
, map_length
);
6529 for (dev_nr
= 0; dev_nr
< total_devs
; dev_nr
++) {
6530 dev
= bbio
->stripes
[dev_nr
].dev
;
6531 if (!dev
|| !dev
->bdev
|| test_bit(BTRFS_DEV_STATE_MISSING
,
6533 (bio_op(first_bio
) == REQ_OP_WRITE
&&
6534 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))) {
6535 bbio_error(bbio
, first_bio
, logical
);
6539 if (dev_nr
< total_devs
- 1)
6540 bio
= btrfs_bio_clone(first_bio
);
6544 submit_stripe_bio(bbio
, bio
, bbio
->stripes
[dev_nr
].physical
,
6545 dev_nr
, async_submit
);
6547 btrfs_bio_counter_dec(fs_info
);
6552 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6555 * If devid and uuid are both specified, the match must be exact, otherwise
6556 * only devid is used.
6558 * If @seed is true, traverse through the seed devices.
6560 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_devices
*fs_devices
,
6561 u64 devid
, u8
*uuid
, u8
*fsid
,
6564 struct btrfs_device
*device
;
6566 while (fs_devices
) {
6568 !memcmp(fs_devices
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
)) {
6569 list_for_each_entry(device
, &fs_devices
->devices
,
6571 if (device
->devid
== devid
&&
6572 (!uuid
|| memcmp(device
->uuid
, uuid
,
6573 BTRFS_UUID_SIZE
) == 0))
6578 fs_devices
= fs_devices
->seed
;
6585 static struct btrfs_device
*add_missing_dev(struct btrfs_fs_devices
*fs_devices
,
6586 u64 devid
, u8
*dev_uuid
)
6588 struct btrfs_device
*device
;
6590 device
= btrfs_alloc_device(NULL
, &devid
, dev_uuid
);
6594 list_add(&device
->dev_list
, &fs_devices
->devices
);
6595 device
->fs_devices
= fs_devices
;
6596 fs_devices
->num_devices
++;
6598 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6599 fs_devices
->missing_devices
++;
6605 * btrfs_alloc_device - allocate struct btrfs_device
6606 * @fs_info: used only for generating a new devid, can be NULL if
6607 * devid is provided (i.e. @devid != NULL).
6608 * @devid: a pointer to devid for this device. If NULL a new devid
6610 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6613 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6614 * on error. Returned struct is not linked onto any lists and must be
6615 * destroyed with btrfs_free_device.
6617 struct btrfs_device
*btrfs_alloc_device(struct btrfs_fs_info
*fs_info
,
6621 struct btrfs_device
*dev
;
6624 if (WARN_ON(!devid
&& !fs_info
))
6625 return ERR_PTR(-EINVAL
);
6627 dev
= __alloc_device();
6636 ret
= find_next_devid(fs_info
, &tmp
);
6638 btrfs_free_device(dev
);
6639 return ERR_PTR(ret
);
6645 memcpy(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
);
6647 generate_random_uuid(dev
->uuid
);
6649 btrfs_init_work(&dev
->work
, btrfs_submit_helper
,
6650 pending_bios_fn
, NULL
, NULL
);
6655 static void btrfs_report_missing_device(struct btrfs_fs_info
*fs_info
,
6656 u64 devid
, u8
*uuid
, bool error
)
6659 btrfs_err_rl(fs_info
, "devid %llu uuid %pU is missing",
6662 btrfs_warn_rl(fs_info
, "devid %llu uuid %pU is missing",
6666 static u64
calc_stripe_length(u64 type
, u64 chunk_len
, int num_stripes
)
6668 int index
= btrfs_bg_flags_to_raid_index(type
);
6669 int ncopies
= btrfs_raid_array
[index
].ncopies
;
6672 switch (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6673 case BTRFS_BLOCK_GROUP_RAID5
:
6674 data_stripes
= num_stripes
- 1;
6676 case BTRFS_BLOCK_GROUP_RAID6
:
6677 data_stripes
= num_stripes
- 2;
6680 data_stripes
= num_stripes
/ ncopies
;
6683 return div_u64(chunk_len
, data_stripes
);
6686 static int read_one_chunk(struct btrfs_key
*key
, struct extent_buffer
*leaf
,
6687 struct btrfs_chunk
*chunk
)
6689 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6690 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
6691 struct map_lookup
*map
;
6692 struct extent_map
*em
;
6696 u8 uuid
[BTRFS_UUID_SIZE
];
6701 logical
= key
->offset
;
6702 length
= btrfs_chunk_length(leaf
, chunk
);
6703 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
6706 * Only need to verify chunk item if we're reading from sys chunk array,
6707 * as chunk item in tree block is already verified by tree-checker.
6709 if (leaf
->start
== BTRFS_SUPER_INFO_OFFSET
) {
6710 ret
= btrfs_check_chunk_valid(leaf
, chunk
, logical
);
6715 read_lock(&map_tree
->map_tree
.lock
);
6716 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
6717 read_unlock(&map_tree
->map_tree
.lock
);
6719 /* already mapped? */
6720 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
6721 free_extent_map(em
);
6724 free_extent_map(em
);
6727 em
= alloc_extent_map();
6730 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
6732 free_extent_map(em
);
6736 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
6737 em
->map_lookup
= map
;
6738 em
->start
= logical
;
6741 em
->block_start
= 0;
6742 em
->block_len
= em
->len
;
6744 map
->num_stripes
= num_stripes
;
6745 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
6746 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
6747 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
6748 map
->type
= btrfs_chunk_type(leaf
, chunk
);
6749 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
6750 map
->verified_stripes
= 0;
6751 em
->orig_block_len
= calc_stripe_length(map
->type
, em
->len
,
6753 for (i
= 0; i
< num_stripes
; i
++) {
6754 map
->stripes
[i
].physical
=
6755 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
6756 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
6757 read_extent_buffer(leaf
, uuid
, (unsigned long)
6758 btrfs_stripe_dev_uuid_nr(chunk
, i
),
6760 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
->fs_devices
,
6761 devid
, uuid
, NULL
, true);
6762 if (!map
->stripes
[i
].dev
&&
6763 !btrfs_test_opt(fs_info
, DEGRADED
)) {
6764 free_extent_map(em
);
6765 btrfs_report_missing_device(fs_info
, devid
, uuid
, true);
6768 if (!map
->stripes
[i
].dev
) {
6769 map
->stripes
[i
].dev
=
6770 add_missing_dev(fs_info
->fs_devices
, devid
,
6772 if (IS_ERR(map
->stripes
[i
].dev
)) {
6773 free_extent_map(em
);
6775 "failed to init missing dev %llu: %ld",
6776 devid
, PTR_ERR(map
->stripes
[i
].dev
));
6777 return PTR_ERR(map
->stripes
[i
].dev
);
6779 btrfs_report_missing_device(fs_info
, devid
, uuid
, false);
6781 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
6782 &(map
->stripes
[i
].dev
->dev_state
));
6786 write_lock(&map_tree
->map_tree
.lock
);
6787 ret
= add_extent_mapping(&map_tree
->map_tree
, em
, 0);
6788 write_unlock(&map_tree
->map_tree
.lock
);
6791 "failed to add chunk map, start=%llu len=%llu: %d",
6792 em
->start
, em
->len
, ret
);
6794 free_extent_map(em
);
6799 static void fill_device_from_item(struct extent_buffer
*leaf
,
6800 struct btrfs_dev_item
*dev_item
,
6801 struct btrfs_device
*device
)
6805 device
->devid
= btrfs_device_id(leaf
, dev_item
);
6806 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
6807 device
->total_bytes
= device
->disk_total_bytes
;
6808 device
->commit_total_bytes
= device
->disk_total_bytes
;
6809 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
6810 device
->commit_bytes_used
= device
->bytes_used
;
6811 device
->type
= btrfs_device_type(leaf
, dev_item
);
6812 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
6813 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
6814 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
6815 WARN_ON(device
->devid
== BTRFS_DEV_REPLACE_DEVID
);
6816 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
6818 ptr
= btrfs_device_uuid(dev_item
);
6819 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
6822 static struct btrfs_fs_devices
*open_seed_devices(struct btrfs_fs_info
*fs_info
,
6825 struct btrfs_fs_devices
*fs_devices
;
6828 lockdep_assert_held(&uuid_mutex
);
6831 fs_devices
= fs_info
->fs_devices
->seed
;
6832 while (fs_devices
) {
6833 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
))
6836 fs_devices
= fs_devices
->seed
;
6839 fs_devices
= find_fsid(fsid
, NULL
);
6841 if (!btrfs_test_opt(fs_info
, DEGRADED
))
6842 return ERR_PTR(-ENOENT
);
6844 fs_devices
= alloc_fs_devices(fsid
, NULL
);
6845 if (IS_ERR(fs_devices
))
6848 fs_devices
->seeding
= 1;
6849 fs_devices
->opened
= 1;
6853 fs_devices
= clone_fs_devices(fs_devices
);
6854 if (IS_ERR(fs_devices
))
6857 ret
= open_fs_devices(fs_devices
, FMODE_READ
, fs_info
->bdev_holder
);
6859 free_fs_devices(fs_devices
);
6860 fs_devices
= ERR_PTR(ret
);
6864 if (!fs_devices
->seeding
) {
6865 close_fs_devices(fs_devices
);
6866 free_fs_devices(fs_devices
);
6867 fs_devices
= ERR_PTR(-EINVAL
);
6871 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
6872 fs_info
->fs_devices
->seed
= fs_devices
;
6877 static int read_one_dev(struct extent_buffer
*leaf
,
6878 struct btrfs_dev_item
*dev_item
)
6880 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6881 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
6882 struct btrfs_device
*device
;
6885 u8 fs_uuid
[BTRFS_FSID_SIZE
];
6886 u8 dev_uuid
[BTRFS_UUID_SIZE
];
6888 devid
= btrfs_device_id(leaf
, dev_item
);
6889 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
6891 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
6894 if (memcmp(fs_uuid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
)) {
6895 fs_devices
= open_seed_devices(fs_info
, fs_uuid
);
6896 if (IS_ERR(fs_devices
))
6897 return PTR_ERR(fs_devices
);
6900 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
6903 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6904 btrfs_report_missing_device(fs_info
, devid
,
6909 device
= add_missing_dev(fs_devices
, devid
, dev_uuid
);
6910 if (IS_ERR(device
)) {
6912 "failed to add missing dev %llu: %ld",
6913 devid
, PTR_ERR(device
));
6914 return PTR_ERR(device
);
6916 btrfs_report_missing_device(fs_info
, devid
, dev_uuid
, false);
6918 if (!device
->bdev
) {
6919 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
6920 btrfs_report_missing_device(fs_info
,
6921 devid
, dev_uuid
, true);
6924 btrfs_report_missing_device(fs_info
, devid
,
6928 if (!device
->bdev
&&
6929 !test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
6931 * this happens when a device that was properly setup
6932 * in the device info lists suddenly goes bad.
6933 * device->bdev is NULL, and so we have to set
6934 * device->missing to one here
6936 device
->fs_devices
->missing_devices
++;
6937 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6940 /* Move the device to its own fs_devices */
6941 if (device
->fs_devices
!= fs_devices
) {
6942 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING
,
6943 &device
->dev_state
));
6945 list_move(&device
->dev_list
, &fs_devices
->devices
);
6946 device
->fs_devices
->num_devices
--;
6947 fs_devices
->num_devices
++;
6949 device
->fs_devices
->missing_devices
--;
6950 fs_devices
->missing_devices
++;
6952 device
->fs_devices
= fs_devices
;
6956 if (device
->fs_devices
!= fs_info
->fs_devices
) {
6957 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
));
6958 if (device
->generation
!=
6959 btrfs_device_generation(leaf
, dev_item
))
6963 fill_device_from_item(leaf
, dev_item
, device
);
6964 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
6965 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
6966 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
6967 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
6968 atomic64_add(device
->total_bytes
- device
->bytes_used
,
6969 &fs_info
->free_chunk_space
);
6975 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
6977 struct btrfs_root
*root
= fs_info
->tree_root
;
6978 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
6979 struct extent_buffer
*sb
;
6980 struct btrfs_disk_key
*disk_key
;
6981 struct btrfs_chunk
*chunk
;
6983 unsigned long sb_array_offset
;
6990 struct btrfs_key key
;
6992 ASSERT(BTRFS_SUPER_INFO_SIZE
<= fs_info
->nodesize
);
6994 * This will create extent buffer of nodesize, superblock size is
6995 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6996 * overallocate but we can keep it as-is, only the first page is used.
6998 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
7001 set_extent_buffer_uptodate(sb
);
7002 btrfs_set_buffer_lockdep_class(root
->root_key
.objectid
, sb
, 0);
7004 * The sb extent buffer is artificial and just used to read the system array.
7005 * set_extent_buffer_uptodate() call does not properly mark all it's
7006 * pages up-to-date when the page is larger: extent does not cover the
7007 * whole page and consequently check_page_uptodate does not find all
7008 * the page's extents up-to-date (the hole beyond sb),
7009 * write_extent_buffer then triggers a WARN_ON.
7011 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7012 * but sb spans only this function. Add an explicit SetPageUptodate call
7013 * to silence the warning eg. on PowerPC 64.
7015 if (PAGE_SIZE
> BTRFS_SUPER_INFO_SIZE
)
7016 SetPageUptodate(sb
->pages
[0]);
7018 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
7019 array_size
= btrfs_super_sys_array_size(super_copy
);
7021 array_ptr
= super_copy
->sys_chunk_array
;
7022 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
7025 while (cur_offset
< array_size
) {
7026 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
7027 len
= sizeof(*disk_key
);
7028 if (cur_offset
+ len
> array_size
)
7029 goto out_short_read
;
7031 btrfs_disk_key_to_cpu(&key
, disk_key
);
7034 sb_array_offset
+= len
;
7037 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7038 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
7040 * At least one btrfs_chunk with one stripe must be
7041 * present, exact stripe count check comes afterwards
7043 len
= btrfs_chunk_item_size(1);
7044 if (cur_offset
+ len
> array_size
)
7045 goto out_short_read
;
7047 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
7050 "invalid number of stripes %u in sys_array at offset %u",
7051 num_stripes
, cur_offset
);
7056 type
= btrfs_chunk_type(sb
, chunk
);
7057 if ((type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
7059 "invalid chunk type %llu in sys_array at offset %u",
7065 len
= btrfs_chunk_item_size(num_stripes
);
7066 if (cur_offset
+ len
> array_size
)
7067 goto out_short_read
;
7069 ret
= read_one_chunk(&key
, sb
, chunk
);
7074 "unexpected item type %u in sys_array at offset %u",
7075 (u32
)key
.type
, cur_offset
);
7080 sb_array_offset
+= len
;
7083 clear_extent_buffer_uptodate(sb
);
7084 free_extent_buffer_stale(sb
);
7088 btrfs_err(fs_info
, "sys_array too short to read %u bytes at offset %u",
7090 clear_extent_buffer_uptodate(sb
);
7091 free_extent_buffer_stale(sb
);
7096 * Check if all chunks in the fs are OK for read-write degraded mount
7098 * If the @failing_dev is specified, it's accounted as missing.
7100 * Return true if all chunks meet the minimal RW mount requirements.
7101 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7103 bool btrfs_check_rw_degradable(struct btrfs_fs_info
*fs_info
,
7104 struct btrfs_device
*failing_dev
)
7106 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
7107 struct extent_map
*em
;
7111 read_lock(&map_tree
->map_tree
.lock
);
7112 em
= lookup_extent_mapping(&map_tree
->map_tree
, 0, (u64
)-1);
7113 read_unlock(&map_tree
->map_tree
.lock
);
7114 /* No chunk at all? Return false anyway */
7120 struct map_lookup
*map
;
7125 map
= em
->map_lookup
;
7127 btrfs_get_num_tolerated_disk_barrier_failures(
7129 for (i
= 0; i
< map
->num_stripes
; i
++) {
7130 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
7132 if (!dev
|| !dev
->bdev
||
7133 test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) ||
7134 dev
->last_flush_error
)
7136 else if (failing_dev
&& failing_dev
== dev
)
7139 if (missing
> max_tolerated
) {
7142 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7143 em
->start
, missing
, max_tolerated
);
7144 free_extent_map(em
);
7148 next_start
= extent_map_end(em
);
7149 free_extent_map(em
);
7151 read_lock(&map_tree
->map_tree
.lock
);
7152 em
= lookup_extent_mapping(&map_tree
->map_tree
, next_start
,
7153 (u64
)(-1) - next_start
);
7154 read_unlock(&map_tree
->map_tree
.lock
);
7160 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
7162 struct btrfs_root
*root
= fs_info
->chunk_root
;
7163 struct btrfs_path
*path
;
7164 struct extent_buffer
*leaf
;
7165 struct btrfs_key key
;
7166 struct btrfs_key found_key
;
7171 path
= btrfs_alloc_path();
7176 * uuid_mutex is needed only if we are mounting a sprout FS
7177 * otherwise we don't need it.
7179 mutex_lock(&uuid_mutex
);
7180 mutex_lock(&fs_info
->chunk_mutex
);
7183 * Read all device items, and then all the chunk items. All
7184 * device items are found before any chunk item (their object id
7185 * is smaller than the lowest possible object id for a chunk
7186 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7188 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
7191 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7195 leaf
= path
->nodes
[0];
7196 slot
= path
->slots
[0];
7197 if (slot
>= btrfs_header_nritems(leaf
)) {
7198 ret
= btrfs_next_leaf(root
, path
);
7205 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
7206 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
7207 struct btrfs_dev_item
*dev_item
;
7208 dev_item
= btrfs_item_ptr(leaf
, slot
,
7209 struct btrfs_dev_item
);
7210 ret
= read_one_dev(leaf
, dev_item
);
7214 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7215 struct btrfs_chunk
*chunk
;
7216 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
7217 ret
= read_one_chunk(&found_key
, leaf
, chunk
);
7225 * After loading chunk tree, we've got all device information,
7226 * do another round of validation checks.
7228 if (total_dev
!= fs_info
->fs_devices
->total_devices
) {
7230 "super_num_devices %llu mismatch with num_devices %llu found here",
7231 btrfs_super_num_devices(fs_info
->super_copy
),
7236 if (btrfs_super_total_bytes(fs_info
->super_copy
) <
7237 fs_info
->fs_devices
->total_rw_bytes
) {
7239 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7240 btrfs_super_total_bytes(fs_info
->super_copy
),
7241 fs_info
->fs_devices
->total_rw_bytes
);
7247 mutex_unlock(&fs_info
->chunk_mutex
);
7248 mutex_unlock(&uuid_mutex
);
7250 btrfs_free_path(path
);
7254 void btrfs_init_devices_late(struct btrfs_fs_info
*fs_info
)
7256 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7257 struct btrfs_device
*device
;
7259 while (fs_devices
) {
7260 mutex_lock(&fs_devices
->device_list_mutex
);
7261 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
)
7262 device
->fs_info
= fs_info
;
7263 mutex_unlock(&fs_devices
->device_list_mutex
);
7265 fs_devices
= fs_devices
->seed
;
7269 static void __btrfs_reset_dev_stats(struct btrfs_device
*dev
)
7273 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7274 btrfs_dev_stat_reset(dev
, i
);
7277 int btrfs_init_dev_stats(struct btrfs_fs_info
*fs_info
)
7279 struct btrfs_key key
;
7280 struct btrfs_key found_key
;
7281 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7282 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7283 struct extent_buffer
*eb
;
7286 struct btrfs_device
*device
;
7287 struct btrfs_path
*path
= NULL
;
7290 path
= btrfs_alloc_path();
7296 mutex_lock(&fs_devices
->device_list_mutex
);
7297 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7299 struct btrfs_dev_stats_item
*ptr
;
7301 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7302 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7303 key
.offset
= device
->devid
;
7304 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, path
, 0, 0);
7306 __btrfs_reset_dev_stats(device
);
7307 device
->dev_stats_valid
= 1;
7308 btrfs_release_path(path
);
7311 slot
= path
->slots
[0];
7312 eb
= path
->nodes
[0];
7313 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
7314 item_size
= btrfs_item_size_nr(eb
, slot
);
7316 ptr
= btrfs_item_ptr(eb
, slot
,
7317 struct btrfs_dev_stats_item
);
7319 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7320 if (item_size
>= (1 + i
) * sizeof(__le64
))
7321 btrfs_dev_stat_set(device
, i
,
7322 btrfs_dev_stats_value(eb
, ptr
, i
));
7324 btrfs_dev_stat_reset(device
, i
);
7327 device
->dev_stats_valid
= 1;
7328 btrfs_dev_stat_print_on_load(device
);
7329 btrfs_release_path(path
);
7331 mutex_unlock(&fs_devices
->device_list_mutex
);
7334 btrfs_free_path(path
);
7335 return ret
< 0 ? ret
: 0;
7338 static int update_dev_stat_item(struct btrfs_trans_handle
*trans
,
7339 struct btrfs_device
*device
)
7341 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7342 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7343 struct btrfs_path
*path
;
7344 struct btrfs_key key
;
7345 struct extent_buffer
*eb
;
7346 struct btrfs_dev_stats_item
*ptr
;
7350 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7351 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7352 key
.offset
= device
->devid
;
7354 path
= btrfs_alloc_path();
7357 ret
= btrfs_search_slot(trans
, dev_root
, &key
, path
, -1, 1);
7359 btrfs_warn_in_rcu(fs_info
,
7360 "error %d while searching for dev_stats item for device %s",
7361 ret
, rcu_str_deref(device
->name
));
7366 btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]) < sizeof(*ptr
)) {
7367 /* need to delete old one and insert a new one */
7368 ret
= btrfs_del_item(trans
, dev_root
, path
);
7370 btrfs_warn_in_rcu(fs_info
,
7371 "delete too small dev_stats item for device %s failed %d",
7372 rcu_str_deref(device
->name
), ret
);
7379 /* need to insert a new item */
7380 btrfs_release_path(path
);
7381 ret
= btrfs_insert_empty_item(trans
, dev_root
, path
,
7382 &key
, sizeof(*ptr
));
7384 btrfs_warn_in_rcu(fs_info
,
7385 "insert dev_stats item for device %s failed %d",
7386 rcu_str_deref(device
->name
), ret
);
7391 eb
= path
->nodes
[0];
7392 ptr
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_dev_stats_item
);
7393 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7394 btrfs_set_dev_stats_value(eb
, ptr
, i
,
7395 btrfs_dev_stat_read(device
, i
));
7396 btrfs_mark_buffer_dirty(eb
);
7399 btrfs_free_path(path
);
7404 * called from commit_transaction. Writes all changed device stats to disk.
7406 int btrfs_run_dev_stats(struct btrfs_trans_handle
*trans
)
7408 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7409 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7410 struct btrfs_device
*device
;
7414 mutex_lock(&fs_devices
->device_list_mutex
);
7415 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7416 stats_cnt
= atomic_read(&device
->dev_stats_ccnt
);
7417 if (!device
->dev_stats_valid
|| stats_cnt
== 0)
7422 * There is a LOAD-LOAD control dependency between the value of
7423 * dev_stats_ccnt and updating the on-disk values which requires
7424 * reading the in-memory counters. Such control dependencies
7425 * require explicit read memory barriers.
7427 * This memory barriers pairs with smp_mb__before_atomic in
7428 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7429 * barrier implied by atomic_xchg in
7430 * btrfs_dev_stats_read_and_reset
7434 ret
= update_dev_stat_item(trans
, device
);
7436 atomic_sub(stats_cnt
, &device
->dev_stats_ccnt
);
7438 mutex_unlock(&fs_devices
->device_list_mutex
);
7443 void btrfs_dev_stat_inc_and_print(struct btrfs_device
*dev
, int index
)
7445 btrfs_dev_stat_inc(dev
, index
);
7446 btrfs_dev_stat_print_on_error(dev
);
7449 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
)
7451 if (!dev
->dev_stats_valid
)
7453 btrfs_err_rl_in_rcu(dev
->fs_info
,
7454 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7455 rcu_str_deref(dev
->name
),
7456 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7457 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7458 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7459 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7460 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7463 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*dev
)
7467 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7468 if (btrfs_dev_stat_read(dev
, i
) != 0)
7470 if (i
== BTRFS_DEV_STAT_VALUES_MAX
)
7471 return; /* all values == 0, suppress message */
7473 btrfs_info_in_rcu(dev
->fs_info
,
7474 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7475 rcu_str_deref(dev
->name
),
7476 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7477 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7478 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7479 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7480 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7483 int btrfs_get_dev_stats(struct btrfs_fs_info
*fs_info
,
7484 struct btrfs_ioctl_get_dev_stats
*stats
)
7486 struct btrfs_device
*dev
;
7487 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7490 mutex_lock(&fs_devices
->device_list_mutex
);
7491 dev
= btrfs_find_device(fs_info
->fs_devices
, stats
->devid
, NULL
, NULL
,
7493 mutex_unlock(&fs_devices
->device_list_mutex
);
7496 btrfs_warn(fs_info
, "get dev_stats failed, device not found");
7498 } else if (!dev
->dev_stats_valid
) {
7499 btrfs_warn(fs_info
, "get dev_stats failed, not yet valid");
7501 } else if (stats
->flags
& BTRFS_DEV_STATS_RESET
) {
7502 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7503 if (stats
->nr_items
> i
)
7505 btrfs_dev_stat_read_and_reset(dev
, i
);
7507 btrfs_dev_stat_reset(dev
, i
);
7510 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7511 if (stats
->nr_items
> i
)
7512 stats
->values
[i
] = btrfs_dev_stat_read(dev
, i
);
7514 if (stats
->nr_items
> BTRFS_DEV_STAT_VALUES_MAX
)
7515 stats
->nr_items
= BTRFS_DEV_STAT_VALUES_MAX
;
7519 void btrfs_scratch_superblocks(struct block_device
*bdev
, const char *device_path
)
7521 struct buffer_head
*bh
;
7522 struct btrfs_super_block
*disk_super
;
7528 for (copy_num
= 0; copy_num
< BTRFS_SUPER_MIRROR_MAX
;
7531 if (btrfs_read_dev_one_super(bdev
, copy_num
, &bh
))
7534 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
7536 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
7537 set_buffer_dirty(bh
);
7538 sync_dirty_buffer(bh
);
7542 /* Notify udev that device has changed */
7543 btrfs_kobject_uevent(bdev
, KOBJ_CHANGE
);
7545 /* Update ctime/mtime for device path for libblkid */
7546 update_dev_time(device_path
);
7550 * Update the size and bytes used for each device where it changed. This is
7551 * delayed since we would otherwise get errors while writing out the
7554 * Must be invoked during transaction commit.
7556 void btrfs_commit_device_sizes(struct btrfs_transaction
*trans
)
7558 struct btrfs_device
*curr
, *next
;
7560 ASSERT(trans
->state
== TRANS_STATE_COMMIT_DOING
);
7562 if (list_empty(&trans
->dev_update_list
))
7566 * We don't need the device_list_mutex here. This list is owned by the
7567 * transaction and the transaction must complete before the device is
7570 mutex_lock(&trans
->fs_info
->chunk_mutex
);
7571 list_for_each_entry_safe(curr
, next
, &trans
->dev_update_list
,
7573 list_del_init(&curr
->post_commit_list
);
7574 curr
->commit_total_bytes
= curr
->disk_total_bytes
;
7575 curr
->commit_bytes_used
= curr
->bytes_used
;
7577 mutex_unlock(&trans
->fs_info
->chunk_mutex
);
7580 void btrfs_set_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7582 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7583 while (fs_devices
) {
7584 fs_devices
->fs_info
= fs_info
;
7585 fs_devices
= fs_devices
->seed
;
7589 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7591 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7592 while (fs_devices
) {
7593 fs_devices
->fs_info
= NULL
;
7594 fs_devices
= fs_devices
->seed
;
7599 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7601 int btrfs_bg_type_to_factor(u64 flags
)
7603 if (flags
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
|
7604 BTRFS_BLOCK_GROUP_RAID10
))
7611 static int verify_one_dev_extent(struct btrfs_fs_info
*fs_info
,
7612 u64 chunk_offset
, u64 devid
,
7613 u64 physical_offset
, u64 physical_len
)
7615 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
.map_tree
;
7616 struct extent_map
*em
;
7617 struct map_lookup
*map
;
7618 struct btrfs_device
*dev
;
7624 read_lock(&em_tree
->lock
);
7625 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
7626 read_unlock(&em_tree
->lock
);
7630 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7631 physical_offset
, devid
);
7636 map
= em
->map_lookup
;
7637 stripe_len
= calc_stripe_length(map
->type
, em
->len
, map
->num_stripes
);
7638 if (physical_len
!= stripe_len
) {
7640 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7641 physical_offset
, devid
, em
->start
, physical_len
,
7647 for (i
= 0; i
< map
->num_stripes
; i
++) {
7648 if (map
->stripes
[i
].dev
->devid
== devid
&&
7649 map
->stripes
[i
].physical
== physical_offset
) {
7651 if (map
->verified_stripes
>= map
->num_stripes
) {
7653 "too many dev extents for chunk %llu found",
7658 map
->verified_stripes
++;
7664 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7665 physical_offset
, devid
);
7669 /* Make sure no dev extent is beyond device bondary */
7670 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
7672 btrfs_err(fs_info
, "failed to find devid %llu", devid
);
7677 /* It's possible this device is a dummy for seed device */
7678 if (dev
->disk_total_bytes
== 0) {
7679 dev
= btrfs_find_device(fs_info
->fs_devices
->seed
, devid
, NULL
,
7682 btrfs_err(fs_info
, "failed to find seed devid %llu",
7689 if (physical_offset
+ physical_len
> dev
->disk_total_bytes
) {
7691 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7692 devid
, physical_offset
, physical_len
,
7693 dev
->disk_total_bytes
);
7698 free_extent_map(em
);
7702 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info
*fs_info
)
7704 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
.map_tree
;
7705 struct extent_map
*em
;
7706 struct rb_node
*node
;
7709 read_lock(&em_tree
->lock
);
7710 for (node
= rb_first_cached(&em_tree
->map
); node
; node
= rb_next(node
)) {
7711 em
= rb_entry(node
, struct extent_map
, rb_node
);
7712 if (em
->map_lookup
->num_stripes
!=
7713 em
->map_lookup
->verified_stripes
) {
7715 "chunk %llu has missing dev extent, have %d expect %d",
7716 em
->start
, em
->map_lookup
->verified_stripes
,
7717 em
->map_lookup
->num_stripes
);
7723 read_unlock(&em_tree
->lock
);
7728 * Ensure that all dev extents are mapped to correct chunk, otherwise
7729 * later chunk allocation/free would cause unexpected behavior.
7731 * NOTE: This will iterate through the whole device tree, which should be of
7732 * the same size level as the chunk tree. This slightly increases mount time.
7734 int btrfs_verify_dev_extents(struct btrfs_fs_info
*fs_info
)
7736 struct btrfs_path
*path
;
7737 struct btrfs_root
*root
= fs_info
->dev_root
;
7738 struct btrfs_key key
;
7740 u64 prev_dev_ext_end
= 0;
7744 key
.type
= BTRFS_DEV_EXTENT_KEY
;
7747 path
= btrfs_alloc_path();
7751 path
->reada
= READA_FORWARD
;
7752 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7756 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
7757 ret
= btrfs_next_item(root
, path
);
7760 /* No dev extents at all? Not good */
7767 struct extent_buffer
*leaf
= path
->nodes
[0];
7768 struct btrfs_dev_extent
*dext
;
7769 int slot
= path
->slots
[0];
7771 u64 physical_offset
;
7775 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7776 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
7778 devid
= key
.objectid
;
7779 physical_offset
= key
.offset
;
7781 dext
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dev_extent
);
7782 chunk_offset
= btrfs_dev_extent_chunk_offset(leaf
, dext
);
7783 physical_len
= btrfs_dev_extent_length(leaf
, dext
);
7785 /* Check if this dev extent overlaps with the previous one */
7786 if (devid
== prev_devid
&& physical_offset
< prev_dev_ext_end
) {
7788 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7789 devid
, physical_offset
, prev_dev_ext_end
);
7794 ret
= verify_one_dev_extent(fs_info
, chunk_offset
, devid
,
7795 physical_offset
, physical_len
);
7799 prev_dev_ext_end
= physical_offset
+ physical_len
;
7801 ret
= btrfs_next_item(root
, path
);
7810 /* Ensure all chunks have corresponding dev extents */
7811 ret
= verify_chunk_dev_extent_mapping(fs_info
);
7813 btrfs_free_path(path
);
7818 * Check whether the given block group or device is pinned by any inode being
7819 * used as a swapfile.
7821 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info
*fs_info
, void *ptr
)
7823 struct btrfs_swapfile_pin
*sp
;
7824 struct rb_node
*node
;
7826 spin_lock(&fs_info
->swapfile_pins_lock
);
7827 node
= fs_info
->swapfile_pins
.rb_node
;
7829 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
7831 node
= node
->rb_left
;
7832 else if (ptr
> sp
->ptr
)
7833 node
= node
->rb_right
;
7837 spin_unlock(&fs_info
->swapfile_pins_lock
);
7838 return node
!= NULL
;