1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
9 #include <linux/slab.h>
10 #include <linux/buffer_head.h>
11 #include <linux/blkdev.h>
12 #include <linux/ratelimit.h>
13 #include <linux/kthread.h>
14 #include <linux/raid/pq.h>
15 #include <linux/semaphore.h>
16 #include <linux/uuid.h>
17 #include <linux/list_sort.h>
20 #include "extent_map.h"
22 #include "transaction.h"
23 #include "print-tree.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
35 const struct btrfs_raid_attr btrfs_raid_array
[BTRFS_NR_RAID_TYPES
] = {
36 [BTRFS_RAID_RAID10
] = {
39 .devs_max
= 0, /* 0 == as many as possible */
41 .tolerated_failures
= 1,
45 .raid_name
= "raid10",
46 .bg_flag
= BTRFS_BLOCK_GROUP_RAID10
,
47 .mindev_error
= BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET
,
49 [BTRFS_RAID_RAID1
] = {
54 .tolerated_failures
= 1,
59 .bg_flag
= BTRFS_BLOCK_GROUP_RAID1
,
60 .mindev_error
= BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET
,
67 .tolerated_failures
= 0,
72 .bg_flag
= BTRFS_BLOCK_GROUP_DUP
,
75 [BTRFS_RAID_RAID0
] = {
80 .tolerated_failures
= 0,
85 .bg_flag
= BTRFS_BLOCK_GROUP_RAID0
,
88 [BTRFS_RAID_SINGLE
] = {
93 .tolerated_failures
= 0,
97 .raid_name
= "single",
101 [BTRFS_RAID_RAID5
] = {
106 .tolerated_failures
= 1,
110 .raid_name
= "raid5",
111 .bg_flag
= BTRFS_BLOCK_GROUP_RAID5
,
112 .mindev_error
= BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET
,
114 [BTRFS_RAID_RAID6
] = {
119 .tolerated_failures
= 2,
123 .raid_name
= "raid6",
124 .bg_flag
= BTRFS_BLOCK_GROUP_RAID6
,
125 .mindev_error
= BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET
,
129 const char *btrfs_bg_type_to_raid_name(u64 flags
)
131 const int index
= btrfs_bg_flags_to_raid_index(flags
);
133 if (index
>= BTRFS_NR_RAID_TYPES
)
136 return btrfs_raid_array
[index
].raid_name
;
140 * Fill @buf with textual description of @bg_flags, no more than @size_buf
141 * bytes including terminating null byte.
143 void btrfs_describe_block_groups(u64 bg_flags
, char *buf
, u32 size_buf
)
148 u64 flags
= bg_flags
;
149 u32 size_bp
= size_buf
;
156 #define DESCRIBE_FLAG(flag, desc) \
158 if (flags & (flag)) { \
159 ret = snprintf(bp, size_bp, "%s|", (desc)); \
160 if (ret < 0 || ret >= size_bp) \
168 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA
, "data");
169 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM
, "system");
170 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA
, "metadata");
172 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE
, "single");
173 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
174 DESCRIBE_FLAG(btrfs_raid_array
[i
].bg_flag
,
175 btrfs_raid_array
[i
].raid_name
);
179 ret
= snprintf(bp
, size_bp
, "0x%llx|", flags
);
183 if (size_bp
< size_buf
)
184 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last | */
187 * The text is trimmed, it's up to the caller to provide sufficiently
193 static int init_first_rw_device(struct btrfs_trans_handle
*trans
);
194 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
);
195 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
);
196 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*device
);
197 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
198 enum btrfs_map_op op
,
199 u64 logical
, u64
*length
,
200 struct btrfs_bio
**bbio_ret
,
201 int mirror_num
, int need_raid_map
);
207 * There are several mutexes that protect manipulation of devices and low-level
208 * structures like chunks but not block groups, extents or files
210 * uuid_mutex (global lock)
211 * ------------------------
212 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
213 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
214 * device) or requested by the device= mount option
216 * the mutex can be very coarse and can cover long-running operations
218 * protects: updates to fs_devices counters like missing devices, rw devices,
219 * seeding, structure cloning, opening/closing devices at mount/umount time
221 * global::fs_devs - add, remove, updates to the global list
223 * does not protect: manipulation of the fs_devices::devices list in general
224 * but in mount context it could be used to exclude list modifications by eg.
227 * btrfs_device::name - renames (write side), read is RCU
229 * fs_devices::device_list_mutex (per-fs, with RCU)
230 * ------------------------------------------------
231 * protects updates to fs_devices::devices, ie. adding and deleting
233 * simple list traversal with read-only actions can be done with RCU protection
235 * may be used to exclude some operations from running concurrently without any
236 * modifications to the list (see write_all_supers)
238 * Is not required at mount and close times, because our device list is
239 * protected by the uuid_mutex at that point.
243 * protects balance structures (status, state) and context accessed from
244 * several places (internally, ioctl)
248 * protects chunks, adding or removing during allocation, trim or when a new
249 * device is added/removed. Additionally it also protects post_commit_list of
250 * individual devices, since they can be added to the transaction's
251 * post_commit_list only with chunk_mutex held.
255 * a big lock that is held by the cleaner thread and prevents running subvolume
256 * cleaning together with relocation or delayed iputs
269 * Exclusive operations, BTRFS_FS_EXCL_OP
270 * ======================================
272 * Maintains the exclusivity of the following operations that apply to the
273 * whole filesystem and cannot run in parallel.
278 * - Device replace (*)
281 * The device operations (as above) can be in one of the following states:
287 * Only device operations marked with (*) can go into the Paused state for the
290 * - ioctl (only Balance can be Paused through ioctl)
291 * - filesystem remounted as read-only
292 * - filesystem unmounted and mounted as read-only
293 * - system power-cycle and filesystem mounted as read-only
294 * - filesystem or device errors leading to forced read-only
296 * BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
297 * During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
298 * A device operation in Paused or Running state can be canceled or resumed
299 * either by ioctl (Balance only) or when remounted as read-write.
300 * BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
304 DEFINE_MUTEX(uuid_mutex
);
305 static LIST_HEAD(fs_uuids
);
306 struct list_head
*btrfs_get_fs_uuids(void)
312 * alloc_fs_devices - allocate struct btrfs_fs_devices
313 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
314 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
316 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
317 * The returned struct is not linked onto any lists and can be destroyed with
318 * kfree() right away.
320 static struct btrfs_fs_devices
*alloc_fs_devices(const u8
*fsid
,
321 const u8
*metadata_fsid
)
323 struct btrfs_fs_devices
*fs_devs
;
325 fs_devs
= kzalloc(sizeof(*fs_devs
), GFP_KERNEL
);
327 return ERR_PTR(-ENOMEM
);
329 mutex_init(&fs_devs
->device_list_mutex
);
331 INIT_LIST_HEAD(&fs_devs
->devices
);
332 INIT_LIST_HEAD(&fs_devs
->alloc_list
);
333 INIT_LIST_HEAD(&fs_devs
->fs_list
);
335 memcpy(fs_devs
->fsid
, fsid
, BTRFS_FSID_SIZE
);
338 memcpy(fs_devs
->metadata_uuid
, metadata_fsid
, BTRFS_FSID_SIZE
);
340 memcpy(fs_devs
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
);
345 void btrfs_free_device(struct btrfs_device
*device
)
347 WARN_ON(!list_empty(&device
->post_commit_list
));
348 rcu_string_free(device
->name
);
349 extent_io_tree_release(&device
->alloc_state
);
350 bio_put(device
->flush_bio
);
354 static void free_fs_devices(struct btrfs_fs_devices
*fs_devices
)
356 struct btrfs_device
*device
;
357 WARN_ON(fs_devices
->opened
);
358 while (!list_empty(&fs_devices
->devices
)) {
359 device
= list_entry(fs_devices
->devices
.next
,
360 struct btrfs_device
, dev_list
);
361 list_del(&device
->dev_list
);
362 btrfs_free_device(device
);
367 void __exit
btrfs_cleanup_fs_uuids(void)
369 struct btrfs_fs_devices
*fs_devices
;
371 while (!list_empty(&fs_uuids
)) {
372 fs_devices
= list_entry(fs_uuids
.next
,
373 struct btrfs_fs_devices
, fs_list
);
374 list_del(&fs_devices
->fs_list
);
375 free_fs_devices(fs_devices
);
380 * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
381 * Returned struct is not linked onto any lists and must be destroyed using
384 static struct btrfs_device
*__alloc_device(void)
386 struct btrfs_device
*dev
;
388 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
390 return ERR_PTR(-ENOMEM
);
393 * Preallocate a bio that's always going to be used for flushing device
394 * barriers and matches the device lifespan
396 dev
->flush_bio
= bio_alloc_bioset(GFP_KERNEL
, 0, NULL
);
397 if (!dev
->flush_bio
) {
399 return ERR_PTR(-ENOMEM
);
402 INIT_LIST_HEAD(&dev
->dev_list
);
403 INIT_LIST_HEAD(&dev
->dev_alloc_list
);
404 INIT_LIST_HEAD(&dev
->post_commit_list
);
406 spin_lock_init(&dev
->io_lock
);
408 atomic_set(&dev
->reada_in_flight
, 0);
409 atomic_set(&dev
->dev_stats_ccnt
, 0);
410 btrfs_device_data_ordered_init(dev
);
411 INIT_RADIX_TREE(&dev
->reada_zones
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
412 INIT_RADIX_TREE(&dev
->reada_extents
, GFP_NOFS
& ~__GFP_DIRECT_RECLAIM
);
413 extent_io_tree_init(NULL
, &dev
->alloc_state
, 0, NULL
);
418 static noinline
struct btrfs_fs_devices
*find_fsid(
419 const u8
*fsid
, const u8
*metadata_fsid
)
421 struct btrfs_fs_devices
*fs_devices
;
427 * Handle scanned device having completed its fsid change but
428 * belonging to a fs_devices that was created by first scanning
429 * a device which didn't have its fsid/metadata_uuid changed
430 * at all and the CHANGING_FSID_V2 flag set.
432 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
433 if (fs_devices
->fsid_change
&&
434 memcmp(metadata_fsid
, fs_devices
->fsid
,
435 BTRFS_FSID_SIZE
) == 0 &&
436 memcmp(fs_devices
->fsid
, fs_devices
->metadata_uuid
,
437 BTRFS_FSID_SIZE
) == 0) {
442 * Handle scanned device having completed its fsid change but
443 * belonging to a fs_devices that was created by a device that
444 * has an outdated pair of fsid/metadata_uuid and
445 * CHANGING_FSID_V2 flag set.
447 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
448 if (fs_devices
->fsid_change
&&
449 memcmp(fs_devices
->metadata_uuid
,
450 fs_devices
->fsid
, BTRFS_FSID_SIZE
) != 0 &&
451 memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
452 BTRFS_FSID_SIZE
) == 0) {
458 /* Handle non-split brain cases */
459 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
461 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0
462 && memcmp(metadata_fsid
, fs_devices
->metadata_uuid
,
463 BTRFS_FSID_SIZE
) == 0)
466 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
474 btrfs_get_bdev_and_sb(const char *device_path
, fmode_t flags
, void *holder
,
475 int flush
, struct block_device
**bdev
,
476 struct buffer_head
**bh
)
480 *bdev
= blkdev_get_by_path(device_path
, flags
, holder
);
483 ret
= PTR_ERR(*bdev
);
488 filemap_write_and_wait((*bdev
)->bd_inode
->i_mapping
);
489 ret
= set_blocksize(*bdev
, BTRFS_BDEV_BLOCKSIZE
);
491 blkdev_put(*bdev
, flags
);
494 invalidate_bdev(*bdev
);
495 *bh
= btrfs_read_dev_super(*bdev
);
498 blkdev_put(*bdev
, flags
);
510 static void requeue_list(struct btrfs_pending_bios
*pending_bios
,
511 struct bio
*head
, struct bio
*tail
)
514 struct bio
*old_head
;
516 old_head
= pending_bios
->head
;
517 pending_bios
->head
= head
;
518 if (pending_bios
->tail
)
519 tail
->bi_next
= old_head
;
521 pending_bios
->tail
= tail
;
525 * we try to collect pending bios for a device so we don't get a large
526 * number of procs sending bios down to the same device. This greatly
527 * improves the schedulers ability to collect and merge the bios.
529 * But, it also turns into a long list of bios to process and that is sure
530 * to eventually make the worker thread block. The solution here is to
531 * make some progress and then put this work struct back at the end of
532 * the list if the block device is congested. This way, multiple devices
533 * can make progress from a single worker thread.
535 static noinline
void run_scheduled_bios(struct btrfs_device
*device
)
537 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
539 struct backing_dev_info
*bdi
;
540 struct btrfs_pending_bios
*pending_bios
;
544 unsigned long num_run
;
545 unsigned long batch_run
= 0;
546 unsigned long last_waited
= 0;
548 int sync_pending
= 0;
549 struct blk_plug plug
;
552 * this function runs all the bios we've collected for
553 * a particular device. We don't want to wander off to
554 * another device without first sending all of these down.
555 * So, setup a plug here and finish it off before we return
557 blk_start_plug(&plug
);
559 bdi
= device
->bdev
->bd_bdi
;
562 spin_lock(&device
->io_lock
);
567 /* take all the bios off the list at once and process them
568 * later on (without the lock held). But, remember the
569 * tail and other pointers so the bios can be properly reinserted
570 * into the list if we hit congestion
572 if (!force_reg
&& device
->pending_sync_bios
.head
) {
573 pending_bios
= &device
->pending_sync_bios
;
576 pending_bios
= &device
->pending_bios
;
580 pending
= pending_bios
->head
;
581 tail
= pending_bios
->tail
;
582 WARN_ON(pending
&& !tail
);
585 * if pending was null this time around, no bios need processing
586 * at all and we can stop. Otherwise it'll loop back up again
587 * and do an additional check so no bios are missed.
589 * device->running_pending is used to synchronize with the
592 if (device
->pending_sync_bios
.head
== NULL
&&
593 device
->pending_bios
.head
== NULL
) {
595 device
->running_pending
= 0;
598 device
->running_pending
= 1;
601 pending_bios
->head
= NULL
;
602 pending_bios
->tail
= NULL
;
604 spin_unlock(&device
->io_lock
);
609 /* we want to work on both lists, but do more bios on the
610 * sync list than the regular list
613 pending_bios
!= &device
->pending_sync_bios
&&
614 device
->pending_sync_bios
.head
) ||
615 (num_run
> 64 && pending_bios
== &device
->pending_sync_bios
&&
616 device
->pending_bios
.head
)) {
617 spin_lock(&device
->io_lock
);
618 requeue_list(pending_bios
, pending
, tail
);
623 pending
= pending
->bi_next
;
626 BUG_ON(atomic_read(&cur
->__bi_cnt
) == 0);
629 * if we're doing the sync list, record that our
630 * plug has some sync requests on it
632 * If we're doing the regular list and there are
633 * sync requests sitting around, unplug before
636 if (pending_bios
== &device
->pending_sync_bios
) {
638 } else if (sync_pending
) {
639 blk_finish_plug(&plug
);
640 blk_start_plug(&plug
);
644 btrfsic_submit_bio(cur
);
651 * we made progress, there is more work to do and the bdi
652 * is now congested. Back off and let other work structs
655 if (pending
&& bdi_write_congested(bdi
) && batch_run
> 8 &&
656 fs_info
->fs_devices
->open_devices
> 1) {
657 struct io_context
*ioc
;
659 ioc
= current
->io_context
;
662 * the main goal here is that we don't want to
663 * block if we're going to be able to submit
664 * more requests without blocking.
666 * This code does two great things, it pokes into
667 * the elevator code from a filesystem _and_
668 * it makes assumptions about how batching works.
670 if (ioc
&& ioc
->nr_batch_requests
> 0 &&
671 time_before(jiffies
, ioc
->last_waited
+ HZ
/50UL) &&
673 ioc
->last_waited
== last_waited
)) {
675 * we want to go through our batch of
676 * requests and stop. So, we copy out
677 * the ioc->last_waited time and test
678 * against it before looping
680 last_waited
= ioc
->last_waited
;
684 spin_lock(&device
->io_lock
);
685 requeue_list(pending_bios
, pending
, tail
);
686 device
->running_pending
= 1;
688 spin_unlock(&device
->io_lock
);
689 btrfs_queue_work(fs_info
->submit_workers
,
699 spin_lock(&device
->io_lock
);
700 if (device
->pending_bios
.head
|| device
->pending_sync_bios
.head
)
702 spin_unlock(&device
->io_lock
);
705 blk_finish_plug(&plug
);
708 static void pending_bios_fn(struct btrfs_work
*work
)
710 struct btrfs_device
*device
;
712 device
= container_of(work
, struct btrfs_device
, work
);
713 run_scheduled_bios(device
);
716 static bool device_path_matched(const char *path
, struct btrfs_device
*device
)
721 found
= strcmp(rcu_str_deref(device
->name
), path
);
728 * Search and remove all stale (devices which are not mounted) devices.
729 * When both inputs are NULL, it will search and release all stale devices.
730 * path: Optional. When provided will it release all unmounted devices
731 * matching this path only.
732 * skip_dev: Optional. Will skip this device when searching for the stale
734 * Return: 0 for success or if @path is NULL.
735 * -EBUSY if @path is a mounted device.
736 * -ENOENT if @path does not match any device in the list.
738 static int btrfs_free_stale_devices(const char *path
,
739 struct btrfs_device
*skip_device
)
741 struct btrfs_fs_devices
*fs_devices
, *tmp_fs_devices
;
742 struct btrfs_device
*device
, *tmp_device
;
748 list_for_each_entry_safe(fs_devices
, tmp_fs_devices
, &fs_uuids
, fs_list
) {
750 mutex_lock(&fs_devices
->device_list_mutex
);
751 list_for_each_entry_safe(device
, tmp_device
,
752 &fs_devices
->devices
, dev_list
) {
753 if (skip_device
&& skip_device
== device
)
755 if (path
&& !device
->name
)
757 if (path
&& !device_path_matched(path
, device
))
759 if (fs_devices
->opened
) {
760 /* for an already deleted device return 0 */
761 if (path
&& ret
!= 0)
766 /* delete the stale device */
767 fs_devices
->num_devices
--;
768 list_del(&device
->dev_list
);
769 btrfs_free_device(device
);
772 if (fs_devices
->num_devices
== 0)
775 mutex_unlock(&fs_devices
->device_list_mutex
);
777 if (fs_devices
->num_devices
== 0) {
778 btrfs_sysfs_remove_fsid(fs_devices
);
779 list_del(&fs_devices
->fs_list
);
780 free_fs_devices(fs_devices
);
788 * This is only used on mount, and we are protected from competing things
789 * messing with our fs_devices by the uuid_mutex, thus we do not need the
790 * fs_devices->device_list_mutex here.
792 static int btrfs_open_one_device(struct btrfs_fs_devices
*fs_devices
,
793 struct btrfs_device
*device
, fmode_t flags
,
796 struct request_queue
*q
;
797 struct block_device
*bdev
;
798 struct buffer_head
*bh
;
799 struct btrfs_super_block
*disk_super
;
808 ret
= btrfs_get_bdev_and_sb(device
->name
->str
, flags
, holder
, 1,
813 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
814 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
815 if (devid
!= device
->devid
)
818 if (memcmp(device
->uuid
, disk_super
->dev_item
.uuid
, BTRFS_UUID_SIZE
))
821 device
->generation
= btrfs_super_generation(disk_super
);
823 if (btrfs_super_flags(disk_super
) & BTRFS_SUPER_FLAG_SEEDING
) {
824 if (btrfs_super_incompat_flags(disk_super
) &
825 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
) {
827 "BTRFS: Invalid seeding and uuid-changed device detected\n");
831 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
832 fs_devices
->seeding
= 1;
834 if (bdev_read_only(bdev
))
835 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
837 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
840 q
= bdev_get_queue(bdev
);
841 if (!blk_queue_nonrot(q
))
842 fs_devices
->rotating
= 1;
845 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
846 device
->mode
= flags
;
848 fs_devices
->open_devices
++;
849 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
850 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
851 fs_devices
->rw_devices
++;
852 list_add_tail(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
860 blkdev_put(bdev
, flags
);
866 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
867 * being created with a disk that has already completed its fsid change.
869 static struct btrfs_fs_devices
*find_fsid_inprogress(
870 struct btrfs_super_block
*disk_super
)
872 struct btrfs_fs_devices
*fs_devices
;
874 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
875 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
876 BTRFS_FSID_SIZE
) != 0 &&
877 memcmp(fs_devices
->metadata_uuid
, disk_super
->fsid
,
878 BTRFS_FSID_SIZE
) == 0 && !fs_devices
->fsid_change
) {
887 static struct btrfs_fs_devices
*find_fsid_changed(
888 struct btrfs_super_block
*disk_super
)
890 struct btrfs_fs_devices
*fs_devices
;
893 * Handles the case where scanned device is part of an fs that had
894 * multiple successful changes of FSID but curently device didn't
895 * observe it. Meaning our fsid will be different than theirs. We need
896 * to handle two subcases :
897 * 1 - The fs still continues to have different METADATA/FSID uuids.
898 * 2 - The fs is switched back to its original FSID (METADATA/FSID
901 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
903 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
904 BTRFS_FSID_SIZE
) != 0 &&
905 memcmp(fs_devices
->metadata_uuid
, disk_super
->metadata_uuid
,
906 BTRFS_FSID_SIZE
) == 0 &&
907 memcmp(fs_devices
->fsid
, disk_super
->fsid
,
908 BTRFS_FSID_SIZE
) != 0)
911 /* Unchanged UUIDs */
912 if (memcmp(fs_devices
->metadata_uuid
, fs_devices
->fsid
,
913 BTRFS_FSID_SIZE
) == 0 &&
914 memcmp(fs_devices
->fsid
, disk_super
->metadata_uuid
,
915 BTRFS_FSID_SIZE
) == 0)
922 static struct btrfs_fs_devices
*find_fsid_reverted_metadata(
923 struct btrfs_super_block
*disk_super
)
925 struct btrfs_fs_devices
*fs_devices
;
928 * Handle the case where the scanned device is part of an fs whose last
929 * metadata UUID change reverted it to the original FSID. At the same
930 * time * fs_devices was first created by another constitutent device
931 * which didn't fully observe the operation. This results in an
932 * btrfs_fs_devices created with metadata/fsid different AND
933 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
934 * fs_devices equal to the FSID of the disk.
936 list_for_each_entry(fs_devices
, &fs_uuids
, fs_list
) {
937 if (memcmp(fs_devices
->fsid
, fs_devices
->metadata_uuid
,
938 BTRFS_FSID_SIZE
) != 0 &&
939 memcmp(fs_devices
->metadata_uuid
, disk_super
->fsid
,
940 BTRFS_FSID_SIZE
) == 0 &&
941 fs_devices
->fsid_change
)
948 * Add new device to list of registered devices
951 * device pointer which was just added or updated when successful
952 * error pointer when failed
954 static noinline
struct btrfs_device
*device_list_add(const char *path
,
955 struct btrfs_super_block
*disk_super
,
956 bool *new_device_added
)
958 struct btrfs_device
*device
;
959 struct btrfs_fs_devices
*fs_devices
= NULL
;
960 struct rcu_string
*name
;
961 u64 found_transid
= btrfs_super_generation(disk_super
);
962 u64 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
963 bool has_metadata_uuid
= (btrfs_super_incompat_flags(disk_super
) &
964 BTRFS_FEATURE_INCOMPAT_METADATA_UUID
);
965 bool fsid_change_in_progress
= (btrfs_super_flags(disk_super
) &
966 BTRFS_SUPER_FLAG_CHANGING_FSID_V2
);
968 if (fsid_change_in_progress
) {
969 if (!has_metadata_uuid
) {
971 * When we have an image which has CHANGING_FSID_V2 set
972 * it might belong to either a filesystem which has
973 * disks with completed fsid change or it might belong
974 * to fs with no UUID changes in effect, handle both.
976 fs_devices
= find_fsid_inprogress(disk_super
);
978 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
980 fs_devices
= find_fsid_changed(disk_super
);
982 } else if (has_metadata_uuid
) {
983 fs_devices
= find_fsid(disk_super
->fsid
,
984 disk_super
->metadata_uuid
);
986 fs_devices
= find_fsid_reverted_metadata(disk_super
);
988 fs_devices
= find_fsid(disk_super
->fsid
, NULL
);
993 if (has_metadata_uuid
)
994 fs_devices
= alloc_fs_devices(disk_super
->fsid
,
995 disk_super
->metadata_uuid
);
997 fs_devices
= alloc_fs_devices(disk_super
->fsid
, NULL
);
999 if (IS_ERR(fs_devices
))
1000 return ERR_CAST(fs_devices
);
1002 fs_devices
->fsid_change
= fsid_change_in_progress
;
1004 mutex_lock(&fs_devices
->device_list_mutex
);
1005 list_add(&fs_devices
->fs_list
, &fs_uuids
);
1009 mutex_lock(&fs_devices
->device_list_mutex
);
1010 device
= btrfs_find_device(fs_devices
, devid
,
1011 disk_super
->dev_item
.uuid
, NULL
, false);
1014 * If this disk has been pulled into an fs devices created by
1015 * a device which had the CHANGING_FSID_V2 flag then replace the
1016 * metadata_uuid/fsid values of the fs_devices.
1018 if (fs_devices
->fsid_change
&&
1019 found_transid
> fs_devices
->latest_generation
) {
1020 memcpy(fs_devices
->fsid
, disk_super
->fsid
,
1023 if (has_metadata_uuid
)
1024 memcpy(fs_devices
->metadata_uuid
,
1025 disk_super
->metadata_uuid
,
1028 memcpy(fs_devices
->metadata_uuid
,
1029 disk_super
->fsid
, BTRFS_FSID_SIZE
);
1031 fs_devices
->fsid_change
= false;
1036 if (fs_devices
->opened
) {
1037 mutex_unlock(&fs_devices
->device_list_mutex
);
1038 return ERR_PTR(-EBUSY
);
1041 device
= btrfs_alloc_device(NULL
, &devid
,
1042 disk_super
->dev_item
.uuid
);
1043 if (IS_ERR(device
)) {
1044 mutex_unlock(&fs_devices
->device_list_mutex
);
1045 /* we can safely leave the fs_devices entry around */
1049 name
= rcu_string_strdup(path
, GFP_NOFS
);
1051 btrfs_free_device(device
);
1052 mutex_unlock(&fs_devices
->device_list_mutex
);
1053 return ERR_PTR(-ENOMEM
);
1055 rcu_assign_pointer(device
->name
, name
);
1057 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
1058 fs_devices
->num_devices
++;
1060 device
->fs_devices
= fs_devices
;
1061 *new_device_added
= true;
1063 if (disk_super
->label
[0])
1064 pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
1065 disk_super
->label
, devid
, found_transid
, path
);
1067 pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
1068 disk_super
->fsid
, devid
, found_transid
, path
);
1070 } else if (!device
->name
|| strcmp(device
->name
->str
, path
)) {
1072 * When FS is already mounted.
1073 * 1. If you are here and if the device->name is NULL that
1074 * means this device was missing at time of FS mount.
1075 * 2. If you are here and if the device->name is different
1076 * from 'path' that means either
1077 * a. The same device disappeared and reappeared with
1078 * different name. or
1079 * b. The missing-disk-which-was-replaced, has
1082 * We must allow 1 and 2a above. But 2b would be a spurious
1083 * and unintentional.
1085 * Further in case of 1 and 2a above, the disk at 'path'
1086 * would have missed some transaction when it was away and
1087 * in case of 2a the stale bdev has to be updated as well.
1088 * 2b must not be allowed at all time.
1092 * For now, we do allow update to btrfs_fs_device through the
1093 * btrfs dev scan cli after FS has been mounted. We're still
1094 * tracking a problem where systems fail mount by subvolume id
1095 * when we reject replacement on a mounted FS.
1097 if (!fs_devices
->opened
&& found_transid
< device
->generation
) {
1099 * That is if the FS is _not_ mounted and if you
1100 * are here, that means there is more than one
1101 * disk with same uuid and devid.We keep the one
1102 * with larger generation number or the last-in if
1103 * generation are equal.
1105 mutex_unlock(&fs_devices
->device_list_mutex
);
1106 return ERR_PTR(-EEXIST
);
1110 * We are going to replace the device path for a given devid,
1111 * make sure it's the same device if the device is mounted
1114 struct block_device
*path_bdev
;
1116 path_bdev
= lookup_bdev(path
);
1117 if (IS_ERR(path_bdev
)) {
1118 mutex_unlock(&fs_devices
->device_list_mutex
);
1119 return ERR_CAST(path_bdev
);
1122 if (device
->bdev
!= path_bdev
) {
1124 mutex_unlock(&fs_devices
->device_list_mutex
);
1126 * device->fs_info may not be reliable here, so
1127 * pass in a NULL instead. This avoids a
1128 * possible use-after-free when the fs_info and
1129 * fs_info->sb are already torn down.
1131 btrfs_warn_in_rcu(NULL
,
1132 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
1133 path
, devid
, found_transid
,
1135 task_pid_nr(current
));
1136 return ERR_PTR(-EEXIST
);
1139 btrfs_info_in_rcu(device
->fs_info
,
1140 "devid %llu device path %s changed to %s scanned by %s (%d)",
1141 devid
, rcu_str_deref(device
->name
),
1142 path
, current
->comm
,
1143 task_pid_nr(current
));
1146 name
= rcu_string_strdup(path
, GFP_NOFS
);
1148 mutex_unlock(&fs_devices
->device_list_mutex
);
1149 return ERR_PTR(-ENOMEM
);
1151 rcu_string_free(device
->name
);
1152 rcu_assign_pointer(device
->name
, name
);
1153 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
1154 fs_devices
->missing_devices
--;
1155 clear_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
1160 * Unmount does not free the btrfs_device struct but would zero
1161 * generation along with most of the other members. So just update
1162 * it back. We need it to pick the disk with largest generation
1165 if (!fs_devices
->opened
) {
1166 device
->generation
= found_transid
;
1167 fs_devices
->latest_generation
= max_t(u64
, found_transid
,
1168 fs_devices
->latest_generation
);
1171 fs_devices
->total_devices
= btrfs_super_num_devices(disk_super
);
1173 mutex_unlock(&fs_devices
->device_list_mutex
);
1177 static struct btrfs_fs_devices
*clone_fs_devices(struct btrfs_fs_devices
*orig
)
1179 struct btrfs_fs_devices
*fs_devices
;
1180 struct btrfs_device
*device
;
1181 struct btrfs_device
*orig_dev
;
1184 fs_devices
= alloc_fs_devices(orig
->fsid
, NULL
);
1185 if (IS_ERR(fs_devices
))
1188 mutex_lock(&orig
->device_list_mutex
);
1189 fs_devices
->total_devices
= orig
->total_devices
;
1191 list_for_each_entry(orig_dev
, &orig
->devices
, dev_list
) {
1192 struct rcu_string
*name
;
1194 device
= btrfs_alloc_device(NULL
, &orig_dev
->devid
,
1196 if (IS_ERR(device
)) {
1197 ret
= PTR_ERR(device
);
1202 * This is ok to do without rcu read locked because we hold the
1203 * uuid mutex so nothing we touch in here is going to disappear.
1205 if (orig_dev
->name
) {
1206 name
= rcu_string_strdup(orig_dev
->name
->str
,
1209 btrfs_free_device(device
);
1213 rcu_assign_pointer(device
->name
, name
);
1216 list_add(&device
->dev_list
, &fs_devices
->devices
);
1217 device
->fs_devices
= fs_devices
;
1218 fs_devices
->num_devices
++;
1220 mutex_unlock(&orig
->device_list_mutex
);
1223 mutex_unlock(&orig
->device_list_mutex
);
1224 free_fs_devices(fs_devices
);
1225 return ERR_PTR(ret
);
1229 * After we have read the system tree and know devids belonging to
1230 * this filesystem, remove the device which does not belong there.
1232 void btrfs_free_extra_devids(struct btrfs_fs_devices
*fs_devices
, int step
)
1234 struct btrfs_device
*device
, *next
;
1235 struct btrfs_device
*latest_dev
= NULL
;
1237 mutex_lock(&uuid_mutex
);
1239 /* This is the initialized path, it is safe to release the devices. */
1240 list_for_each_entry_safe(device
, next
, &fs_devices
->devices
, dev_list
) {
1241 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
1242 &device
->dev_state
)) {
1243 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT
,
1244 &device
->dev_state
) &&
1245 !test_bit(BTRFS_DEV_STATE_MISSING
,
1246 &device
->dev_state
) &&
1248 device
->generation
> latest_dev
->generation
)) {
1249 latest_dev
= device
;
1255 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1256 * in btrfs_init_dev_replace() so just continue.
1258 if (device
->devid
== BTRFS_DEV_REPLACE_DEVID
)
1262 blkdev_put(device
->bdev
, device
->mode
);
1263 device
->bdev
= NULL
;
1264 fs_devices
->open_devices
--;
1266 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1267 list_del_init(&device
->dev_alloc_list
);
1268 clear_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
1270 list_del_init(&device
->dev_list
);
1271 fs_devices
->num_devices
--;
1272 btrfs_free_device(device
);
1275 if (fs_devices
->seed
) {
1276 fs_devices
= fs_devices
->seed
;
1280 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1282 mutex_unlock(&uuid_mutex
);
1285 static void btrfs_close_bdev(struct btrfs_device
*device
)
1290 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
1291 sync_blockdev(device
->bdev
);
1292 invalidate_bdev(device
->bdev
);
1295 blkdev_put(device
->bdev
, device
->mode
);
1298 static void btrfs_close_one_device(struct btrfs_device
*device
)
1300 struct btrfs_fs_devices
*fs_devices
= device
->fs_devices
;
1301 struct btrfs_device
*new_device
;
1302 struct rcu_string
*name
;
1305 fs_devices
->open_devices
--;
1307 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
1308 device
->devid
!= BTRFS_DEV_REPLACE_DEVID
) {
1309 list_del_init(&device
->dev_alloc_list
);
1310 fs_devices
->rw_devices
--;
1313 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
1314 fs_devices
->missing_devices
--;
1316 btrfs_close_bdev(device
);
1318 new_device
= btrfs_alloc_device(NULL
, &device
->devid
,
1320 BUG_ON(IS_ERR(new_device
)); /* -ENOMEM */
1322 /* Safe because we are under uuid_mutex */
1324 name
= rcu_string_strdup(device
->name
->str
, GFP_NOFS
);
1325 BUG_ON(!name
); /* -ENOMEM */
1326 rcu_assign_pointer(new_device
->name
, name
);
1329 list_replace_rcu(&device
->dev_list
, &new_device
->dev_list
);
1330 new_device
->fs_devices
= device
->fs_devices
;
1333 btrfs_free_device(device
);
1336 static int close_fs_devices(struct btrfs_fs_devices
*fs_devices
)
1338 struct btrfs_device
*device
, *tmp
;
1340 if (--fs_devices
->opened
> 0)
1343 mutex_lock(&fs_devices
->device_list_mutex
);
1344 list_for_each_entry_safe(device
, tmp
, &fs_devices
->devices
, dev_list
) {
1345 btrfs_close_one_device(device
);
1347 mutex_unlock(&fs_devices
->device_list_mutex
);
1349 WARN_ON(fs_devices
->open_devices
);
1350 WARN_ON(fs_devices
->rw_devices
);
1351 fs_devices
->opened
= 0;
1352 fs_devices
->seeding
= 0;
1357 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
1359 struct btrfs_fs_devices
*seed_devices
= NULL
;
1362 mutex_lock(&uuid_mutex
);
1363 ret
= close_fs_devices(fs_devices
);
1364 if (!fs_devices
->opened
) {
1365 seed_devices
= fs_devices
->seed
;
1366 fs_devices
->seed
= NULL
;
1368 mutex_unlock(&uuid_mutex
);
1370 while (seed_devices
) {
1371 fs_devices
= seed_devices
;
1372 seed_devices
= fs_devices
->seed
;
1373 close_fs_devices(fs_devices
);
1374 free_fs_devices(fs_devices
);
1379 static int open_fs_devices(struct btrfs_fs_devices
*fs_devices
,
1380 fmode_t flags
, void *holder
)
1382 struct btrfs_device
*device
;
1383 struct btrfs_device
*latest_dev
= NULL
;
1386 flags
|= FMODE_EXCL
;
1388 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1389 /* Just open everything we can; ignore failures here */
1390 if (btrfs_open_one_device(fs_devices
, device
, flags
, holder
))
1394 device
->generation
> latest_dev
->generation
)
1395 latest_dev
= device
;
1397 if (fs_devices
->open_devices
== 0) {
1401 fs_devices
->opened
= 1;
1402 fs_devices
->latest_bdev
= latest_dev
->bdev
;
1403 fs_devices
->total_rw_bytes
= 0;
1408 static int devid_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1410 struct btrfs_device
*dev1
, *dev2
;
1412 dev1
= list_entry(a
, struct btrfs_device
, dev_list
);
1413 dev2
= list_entry(b
, struct btrfs_device
, dev_list
);
1415 if (dev1
->devid
< dev2
->devid
)
1417 else if (dev1
->devid
> dev2
->devid
)
1422 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
1423 fmode_t flags
, void *holder
)
1427 lockdep_assert_held(&uuid_mutex
);
1429 * The device_list_mutex cannot be taken here in case opening the
1430 * underlying device takes further locks like bd_mutex.
1432 * We also don't need the lock here as this is called during mount and
1433 * exclusion is provided by uuid_mutex
1436 if (fs_devices
->opened
) {
1437 fs_devices
->opened
++;
1440 list_sort(NULL
, &fs_devices
->devices
, devid_cmp
);
1441 ret
= open_fs_devices(fs_devices
, flags
, holder
);
1447 static void btrfs_release_disk_super(struct page
*page
)
1453 static int btrfs_read_disk_super(struct block_device
*bdev
, u64 bytenr
,
1455 struct btrfs_super_block
**disk_super
)
1460 /* make sure our super fits in the device */
1461 if (bytenr
+ PAGE_SIZE
>= i_size_read(bdev
->bd_inode
))
1464 /* make sure our super fits in the page */
1465 if (sizeof(**disk_super
) > PAGE_SIZE
)
1468 /* make sure our super doesn't straddle pages on disk */
1469 index
= bytenr
>> PAGE_SHIFT
;
1470 if ((bytenr
+ sizeof(**disk_super
) - 1) >> PAGE_SHIFT
!= index
)
1473 /* pull in the page with our super */
1474 *page
= read_cache_page_gfp(bdev
->bd_inode
->i_mapping
,
1477 if (IS_ERR_OR_NULL(*page
))
1482 /* align our pointer to the offset of the super block */
1483 *disk_super
= p
+ offset_in_page(bytenr
);
1485 if (btrfs_super_bytenr(*disk_super
) != bytenr
||
1486 btrfs_super_magic(*disk_super
) != BTRFS_MAGIC
) {
1487 btrfs_release_disk_super(*page
);
1491 if ((*disk_super
)->label
[0] &&
1492 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1])
1493 (*disk_super
)->label
[BTRFS_LABEL_SIZE
- 1] = '\0';
1498 int btrfs_forget_devices(const char *path
)
1502 mutex_lock(&uuid_mutex
);
1503 ret
= btrfs_free_stale_devices(strlen(path
) ? path
: NULL
, NULL
);
1504 mutex_unlock(&uuid_mutex
);
1510 * Look for a btrfs signature on a device. This may be called out of the mount path
1511 * and we are not allowed to call set_blocksize during the scan. The superblock
1512 * is read via pagecache
1514 struct btrfs_device
*btrfs_scan_one_device(const char *path
, fmode_t flags
,
1517 struct btrfs_super_block
*disk_super
;
1518 bool new_device_added
= false;
1519 struct btrfs_device
*device
= NULL
;
1520 struct block_device
*bdev
;
1524 lockdep_assert_held(&uuid_mutex
);
1527 * we would like to check all the supers, but that would make
1528 * a btrfs mount succeed after a mkfs from a different FS.
1529 * So, we need to add a special mount option to scan for
1530 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1532 bytenr
= btrfs_sb_offset(0);
1533 flags
|= FMODE_EXCL
;
1535 bdev
= blkdev_get_by_path(path
, flags
, holder
);
1537 return ERR_CAST(bdev
);
1539 if (btrfs_read_disk_super(bdev
, bytenr
, &page
, &disk_super
)) {
1540 device
= ERR_PTR(-EINVAL
);
1541 goto error_bdev_put
;
1544 device
= device_list_add(path
, disk_super
, &new_device_added
);
1545 if (!IS_ERR(device
)) {
1546 if (new_device_added
)
1547 btrfs_free_stale_devices(path
, device
);
1550 btrfs_release_disk_super(page
);
1553 blkdev_put(bdev
, flags
);
1559 * Try to find a chunk that intersects [start, start + len] range and when one
1560 * such is found, record the end of it in *start
1562 static bool contains_pending_extent(struct btrfs_device
*device
, u64
*start
,
1565 u64 physical_start
, physical_end
;
1567 lockdep_assert_held(&device
->fs_info
->chunk_mutex
);
1569 if (!find_first_extent_bit(&device
->alloc_state
, *start
,
1570 &physical_start
, &physical_end
,
1571 CHUNK_ALLOCATED
, NULL
)) {
1573 if (in_range(physical_start
, *start
, len
) ||
1574 in_range(*start
, physical_start
,
1575 physical_end
- physical_start
)) {
1576 *start
= physical_end
+ 1;
1585 * find_free_dev_extent_start - find free space in the specified device
1586 * @device: the device which we search the free space in
1587 * @num_bytes: the size of the free space that we need
1588 * @search_start: the position from which to begin the search
1589 * @start: store the start of the free space.
1590 * @len: the size of the free space. that we find, or the size
1591 * of the max free space if we don't find suitable free space
1593 * this uses a pretty simple search, the expectation is that it is
1594 * called very infrequently and that a given device has a small number
1597 * @start is used to store the start of the free space if we find. But if we
1598 * don't find suitable free space, it will be used to store the start position
1599 * of the max free space.
1601 * @len is used to store the size of the free space that we find.
1602 * But if we don't find suitable free space, it is used to store the size of
1603 * the max free space.
1605 * NOTE: This function will search *commit* root of device tree, and does extra
1606 * check to ensure dev extents are not double allocated.
1607 * This makes the function safe to allocate dev extents but may not report
1608 * correct usable device space, as device extent freed in current transaction
1609 * is not reported as avaiable.
1611 static int find_free_dev_extent_start(struct btrfs_device
*device
,
1612 u64 num_bytes
, u64 search_start
, u64
*start
,
1615 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1616 struct btrfs_root
*root
= fs_info
->dev_root
;
1617 struct btrfs_key key
;
1618 struct btrfs_dev_extent
*dev_extent
;
1619 struct btrfs_path
*path
;
1624 u64 search_end
= device
->total_bytes
;
1627 struct extent_buffer
*l
;
1630 * We don't want to overwrite the superblock on the drive nor any area
1631 * used by the boot loader (grub for example), so we make sure to start
1632 * at an offset of at least 1MB.
1634 search_start
= max_t(u64
, search_start
, SZ_1M
);
1636 path
= btrfs_alloc_path();
1640 max_hole_start
= search_start
;
1644 if (search_start
>= search_end
||
1645 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
1650 path
->reada
= READA_FORWARD
;
1651 path
->search_commit_root
= 1;
1652 path
->skip_locking
= 1;
1654 key
.objectid
= device
->devid
;
1655 key
.offset
= search_start
;
1656 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1658 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1662 ret
= btrfs_previous_item(root
, path
, key
.objectid
, key
.type
);
1669 slot
= path
->slots
[0];
1670 if (slot
>= btrfs_header_nritems(l
)) {
1671 ret
= btrfs_next_leaf(root
, path
);
1679 btrfs_item_key_to_cpu(l
, &key
, slot
);
1681 if (key
.objectid
< device
->devid
)
1684 if (key
.objectid
> device
->devid
)
1687 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
1690 if (key
.offset
> search_start
) {
1691 hole_size
= key
.offset
- search_start
;
1694 * Have to check before we set max_hole_start, otherwise
1695 * we could end up sending back this offset anyway.
1697 if (contains_pending_extent(device
, &search_start
,
1699 if (key
.offset
>= search_start
)
1700 hole_size
= key
.offset
- search_start
;
1705 if (hole_size
> max_hole_size
) {
1706 max_hole_start
= search_start
;
1707 max_hole_size
= hole_size
;
1711 * If this free space is greater than which we need,
1712 * it must be the max free space that we have found
1713 * until now, so max_hole_start must point to the start
1714 * of this free space and the length of this free space
1715 * is stored in max_hole_size. Thus, we return
1716 * max_hole_start and max_hole_size and go back to the
1719 if (hole_size
>= num_bytes
) {
1725 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
1726 extent_end
= key
.offset
+ btrfs_dev_extent_length(l
,
1728 if (extent_end
> search_start
)
1729 search_start
= extent_end
;
1736 * At this point, search_start should be the end of
1737 * allocated dev extents, and when shrinking the device,
1738 * search_end may be smaller than search_start.
1740 if (search_end
> search_start
) {
1741 hole_size
= search_end
- search_start
;
1743 if (contains_pending_extent(device
, &search_start
, hole_size
)) {
1744 btrfs_release_path(path
);
1748 if (hole_size
> max_hole_size
) {
1749 max_hole_start
= search_start
;
1750 max_hole_size
= hole_size
;
1755 if (max_hole_size
< num_bytes
)
1761 btrfs_free_path(path
);
1762 *start
= max_hole_start
;
1764 *len
= max_hole_size
;
1768 int find_free_dev_extent(struct btrfs_device
*device
, u64 num_bytes
,
1769 u64
*start
, u64
*len
)
1771 /* FIXME use last free of some kind */
1772 return find_free_dev_extent_start(device
, num_bytes
, 0, start
, len
);
1775 static int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
1776 struct btrfs_device
*device
,
1777 u64 start
, u64
*dev_extent_len
)
1779 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1780 struct btrfs_root
*root
= fs_info
->dev_root
;
1782 struct btrfs_path
*path
;
1783 struct btrfs_key key
;
1784 struct btrfs_key found_key
;
1785 struct extent_buffer
*leaf
= NULL
;
1786 struct btrfs_dev_extent
*extent
= NULL
;
1788 path
= btrfs_alloc_path();
1792 key
.objectid
= device
->devid
;
1794 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1796 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1798 ret
= btrfs_previous_item(root
, path
, key
.objectid
,
1799 BTRFS_DEV_EXTENT_KEY
);
1802 leaf
= path
->nodes
[0];
1803 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1804 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1805 struct btrfs_dev_extent
);
1806 BUG_ON(found_key
.offset
> start
|| found_key
.offset
+
1807 btrfs_dev_extent_length(leaf
, extent
) < start
);
1809 btrfs_release_path(path
);
1811 } else if (ret
== 0) {
1812 leaf
= path
->nodes
[0];
1813 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1814 struct btrfs_dev_extent
);
1816 btrfs_handle_fs_error(fs_info
, ret
, "Slot search failed");
1820 *dev_extent_len
= btrfs_dev_extent_length(leaf
, extent
);
1822 ret
= btrfs_del_item(trans
, root
, path
);
1824 btrfs_handle_fs_error(fs_info
, ret
,
1825 "Failed to remove dev extent item");
1827 set_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
);
1830 btrfs_free_path(path
);
1834 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
1835 struct btrfs_device
*device
,
1836 u64 chunk_offset
, u64 start
, u64 num_bytes
)
1839 struct btrfs_path
*path
;
1840 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
1841 struct btrfs_root
*root
= fs_info
->dev_root
;
1842 struct btrfs_dev_extent
*extent
;
1843 struct extent_buffer
*leaf
;
1844 struct btrfs_key key
;
1846 WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
));
1847 WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
));
1848 path
= btrfs_alloc_path();
1852 key
.objectid
= device
->devid
;
1854 key
.type
= BTRFS_DEV_EXTENT_KEY
;
1855 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
1860 leaf
= path
->nodes
[0];
1861 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
1862 struct btrfs_dev_extent
);
1863 btrfs_set_dev_extent_chunk_tree(leaf
, extent
,
1864 BTRFS_CHUNK_TREE_OBJECTID
);
1865 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
,
1866 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
1867 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
1869 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
1870 btrfs_mark_buffer_dirty(leaf
);
1872 btrfs_free_path(path
);
1876 static u64
find_next_chunk(struct btrfs_fs_info
*fs_info
)
1878 struct extent_map_tree
*em_tree
;
1879 struct extent_map
*em
;
1883 em_tree
= &fs_info
->mapping_tree
;
1884 read_lock(&em_tree
->lock
);
1885 n
= rb_last(&em_tree
->map
.rb_root
);
1887 em
= rb_entry(n
, struct extent_map
, rb_node
);
1888 ret
= em
->start
+ em
->len
;
1890 read_unlock(&em_tree
->lock
);
1895 static noinline
int find_next_devid(struct btrfs_fs_info
*fs_info
,
1899 struct btrfs_key key
;
1900 struct btrfs_key found_key
;
1901 struct btrfs_path
*path
;
1903 path
= btrfs_alloc_path();
1907 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1908 key
.type
= BTRFS_DEV_ITEM_KEY
;
1909 key
.offset
= (u64
)-1;
1911 ret
= btrfs_search_slot(NULL
, fs_info
->chunk_root
, &key
, path
, 0, 0);
1917 btrfs_err(fs_info
, "corrupted chunk tree devid -1 matched");
1922 ret
= btrfs_previous_item(fs_info
->chunk_root
, path
,
1923 BTRFS_DEV_ITEMS_OBJECTID
,
1924 BTRFS_DEV_ITEM_KEY
);
1928 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1930 *devid_ret
= found_key
.offset
+ 1;
1934 btrfs_free_path(path
);
1939 * the device information is stored in the chunk root
1940 * the btrfs_device struct should be fully filled in
1942 static int btrfs_add_dev_item(struct btrfs_trans_handle
*trans
,
1943 struct btrfs_device
*device
)
1946 struct btrfs_path
*path
;
1947 struct btrfs_dev_item
*dev_item
;
1948 struct extent_buffer
*leaf
;
1949 struct btrfs_key key
;
1952 path
= btrfs_alloc_path();
1956 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1957 key
.type
= BTRFS_DEV_ITEM_KEY
;
1958 key
.offset
= device
->devid
;
1960 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->chunk_root
, path
,
1961 &key
, sizeof(*dev_item
));
1965 leaf
= path
->nodes
[0];
1966 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
1968 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
1969 btrfs_set_device_generation(leaf
, dev_item
, 0);
1970 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
1971 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
1972 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
1973 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
1974 btrfs_set_device_total_bytes(leaf
, dev_item
,
1975 btrfs_device_get_disk_total_bytes(device
));
1976 btrfs_set_device_bytes_used(leaf
, dev_item
,
1977 btrfs_device_get_bytes_used(device
));
1978 btrfs_set_device_group(leaf
, dev_item
, 0);
1979 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
1980 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
1981 btrfs_set_device_start_offset(leaf
, dev_item
, 0);
1983 ptr
= btrfs_device_uuid(dev_item
);
1984 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1985 ptr
= btrfs_device_fsid(dev_item
);
1986 write_extent_buffer(leaf
, trans
->fs_info
->fs_devices
->metadata_uuid
,
1987 ptr
, BTRFS_FSID_SIZE
);
1988 btrfs_mark_buffer_dirty(leaf
);
1992 btrfs_free_path(path
);
1997 * Function to update ctime/mtime for a given device path.
1998 * Mainly used for ctime/mtime based probe like libblkid.
2000 static void update_dev_time(const char *path_name
)
2004 filp
= filp_open(path_name
, O_RDWR
, 0);
2007 file_update_time(filp
);
2008 filp_close(filp
, NULL
);
2011 static int btrfs_rm_dev_item(struct btrfs_device
*device
)
2013 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2015 struct btrfs_path
*path
;
2016 struct btrfs_key key
;
2017 struct btrfs_trans_handle
*trans
;
2019 path
= btrfs_alloc_path();
2023 trans
= btrfs_start_transaction(root
, 0);
2024 if (IS_ERR(trans
)) {
2025 btrfs_free_path(path
);
2026 return PTR_ERR(trans
);
2028 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2029 key
.type
= BTRFS_DEV_ITEM_KEY
;
2030 key
.offset
= device
->devid
;
2032 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2036 btrfs_abort_transaction(trans
, ret
);
2037 btrfs_end_transaction(trans
);
2041 ret
= btrfs_del_item(trans
, root
, path
);
2043 btrfs_abort_transaction(trans
, ret
);
2044 btrfs_end_transaction(trans
);
2048 btrfs_free_path(path
);
2050 ret
= btrfs_commit_transaction(trans
);
2055 * Verify that @num_devices satisfies the RAID profile constraints in the whole
2056 * filesystem. It's up to the caller to adjust that number regarding eg. device
2059 static int btrfs_check_raid_min_devices(struct btrfs_fs_info
*fs_info
,
2067 seq
= read_seqbegin(&fs_info
->profiles_lock
);
2069 all_avail
= fs_info
->avail_data_alloc_bits
|
2070 fs_info
->avail_system_alloc_bits
|
2071 fs_info
->avail_metadata_alloc_bits
;
2072 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
2074 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++) {
2075 if (!(all_avail
& btrfs_raid_array
[i
].bg_flag
))
2078 if (num_devices
< btrfs_raid_array
[i
].devs_min
) {
2079 int ret
= btrfs_raid_array
[i
].mindev_error
;
2089 static struct btrfs_device
* btrfs_find_next_active_device(
2090 struct btrfs_fs_devices
*fs_devs
, struct btrfs_device
*device
)
2092 struct btrfs_device
*next_device
;
2094 list_for_each_entry(next_device
, &fs_devs
->devices
, dev_list
) {
2095 if (next_device
!= device
&&
2096 !test_bit(BTRFS_DEV_STATE_MISSING
, &next_device
->dev_state
)
2097 && next_device
->bdev
)
2105 * Helper function to check if the given device is part of s_bdev / latest_bdev
2106 * and replace it with the provided or the next active device, in the context
2107 * where this function called, there should be always be another device (or
2108 * this_dev) which is active.
2110 void btrfs_assign_next_active_device(struct btrfs_device
*device
,
2111 struct btrfs_device
*this_dev
)
2113 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2114 struct btrfs_device
*next_device
;
2117 next_device
= this_dev
;
2119 next_device
= btrfs_find_next_active_device(fs_info
->fs_devices
,
2121 ASSERT(next_device
);
2123 if (fs_info
->sb
->s_bdev
&&
2124 (fs_info
->sb
->s_bdev
== device
->bdev
))
2125 fs_info
->sb
->s_bdev
= next_device
->bdev
;
2127 if (fs_info
->fs_devices
->latest_bdev
== device
->bdev
)
2128 fs_info
->fs_devices
->latest_bdev
= next_device
->bdev
;
2132 * Return btrfs_fs_devices::num_devices excluding the device that's being
2133 * currently replaced.
2135 static u64
btrfs_num_devices(struct btrfs_fs_info
*fs_info
)
2137 u64 num_devices
= fs_info
->fs_devices
->num_devices
;
2139 down_read(&fs_info
->dev_replace
.rwsem
);
2140 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
)) {
2141 ASSERT(num_devices
> 1);
2144 up_read(&fs_info
->dev_replace
.rwsem
);
2149 int btrfs_rm_device(struct btrfs_fs_info
*fs_info
, const char *device_path
,
2152 struct btrfs_device
*device
;
2153 struct btrfs_fs_devices
*cur_devices
;
2154 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2158 mutex_lock(&uuid_mutex
);
2160 num_devices
= btrfs_num_devices(fs_info
);
2162 ret
= btrfs_check_raid_min_devices(fs_info
, num_devices
- 1);
2166 device
= btrfs_find_device_by_devspec(fs_info
, devid
, device_path
);
2168 if (IS_ERR(device
)) {
2169 if (PTR_ERR(device
) == -ENOENT
&&
2170 strcmp(device_path
, "missing") == 0)
2171 ret
= BTRFS_ERROR_DEV_MISSING_NOT_FOUND
;
2173 ret
= PTR_ERR(device
);
2177 if (btrfs_pinned_by_swapfile(fs_info
, device
)) {
2178 btrfs_warn_in_rcu(fs_info
,
2179 "cannot remove device %s (devid %llu) due to active swapfile",
2180 rcu_str_deref(device
->name
), device
->devid
);
2185 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2186 ret
= BTRFS_ERROR_DEV_TGT_REPLACE
;
2190 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
2191 fs_info
->fs_devices
->rw_devices
== 1) {
2192 ret
= BTRFS_ERROR_DEV_ONLY_WRITABLE
;
2196 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2197 mutex_lock(&fs_info
->chunk_mutex
);
2198 list_del_init(&device
->dev_alloc_list
);
2199 device
->fs_devices
->rw_devices
--;
2200 mutex_unlock(&fs_info
->chunk_mutex
);
2203 mutex_unlock(&uuid_mutex
);
2204 ret
= btrfs_shrink_device(device
, 0);
2206 btrfs_reada_remove_dev(device
);
2207 mutex_lock(&uuid_mutex
);
2212 * TODO: the superblock still includes this device in its num_devices
2213 * counter although write_all_supers() is not locked out. This
2214 * could give a filesystem state which requires a degraded mount.
2216 ret
= btrfs_rm_dev_item(device
);
2220 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2221 btrfs_scrub_cancel_dev(device
);
2224 * the device list mutex makes sure that we don't change
2225 * the device list while someone else is writing out all
2226 * the device supers. Whoever is writing all supers, should
2227 * lock the device list mutex before getting the number of
2228 * devices in the super block (super_copy). Conversely,
2229 * whoever updates the number of devices in the super block
2230 * (super_copy) should hold the device list mutex.
2234 * In normal cases the cur_devices == fs_devices. But in case
2235 * of deleting a seed device, the cur_devices should point to
2236 * its own fs_devices listed under the fs_devices->seed.
2238 cur_devices
= device
->fs_devices
;
2239 mutex_lock(&fs_devices
->device_list_mutex
);
2240 list_del_rcu(&device
->dev_list
);
2242 cur_devices
->num_devices
--;
2243 cur_devices
->total_devices
--;
2244 /* Update total_devices of the parent fs_devices if it's seed */
2245 if (cur_devices
!= fs_devices
)
2246 fs_devices
->total_devices
--;
2248 if (test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
))
2249 cur_devices
->missing_devices
--;
2251 btrfs_assign_next_active_device(device
, NULL
);
2254 cur_devices
->open_devices
--;
2255 /* remove sysfs entry */
2256 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2259 num_devices
= btrfs_super_num_devices(fs_info
->super_copy
) - 1;
2260 btrfs_set_super_num_devices(fs_info
->super_copy
, num_devices
);
2261 mutex_unlock(&fs_devices
->device_list_mutex
);
2264 * at this point, the device is zero sized and detached from
2265 * the devices list. All that's left is to zero out the old
2266 * supers and free the device.
2268 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2269 btrfs_scratch_superblocks(device
->bdev
, device
->name
->str
);
2271 btrfs_close_bdev(device
);
2273 btrfs_free_device(device
);
2275 if (cur_devices
->open_devices
== 0) {
2276 while (fs_devices
) {
2277 if (fs_devices
->seed
== cur_devices
) {
2278 fs_devices
->seed
= cur_devices
->seed
;
2281 fs_devices
= fs_devices
->seed
;
2283 cur_devices
->seed
= NULL
;
2284 close_fs_devices(cur_devices
);
2285 free_fs_devices(cur_devices
);
2289 mutex_unlock(&uuid_mutex
);
2293 btrfs_reada_undo_remove_dev(device
);
2294 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
2295 mutex_lock(&fs_info
->chunk_mutex
);
2296 list_add(&device
->dev_alloc_list
,
2297 &fs_devices
->alloc_list
);
2298 device
->fs_devices
->rw_devices
++;
2299 mutex_unlock(&fs_info
->chunk_mutex
);
2304 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device
*srcdev
)
2306 struct btrfs_fs_devices
*fs_devices
;
2308 lockdep_assert_held(&srcdev
->fs_info
->fs_devices
->device_list_mutex
);
2311 * in case of fs with no seed, srcdev->fs_devices will point
2312 * to fs_devices of fs_info. However when the dev being replaced is
2313 * a seed dev it will point to the seed's local fs_devices. In short
2314 * srcdev will have its correct fs_devices in both the cases.
2316 fs_devices
= srcdev
->fs_devices
;
2318 list_del_rcu(&srcdev
->dev_list
);
2319 list_del(&srcdev
->dev_alloc_list
);
2320 fs_devices
->num_devices
--;
2321 if (test_bit(BTRFS_DEV_STATE_MISSING
, &srcdev
->dev_state
))
2322 fs_devices
->missing_devices
--;
2324 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
))
2325 fs_devices
->rw_devices
--;
2328 fs_devices
->open_devices
--;
2331 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device
*srcdev
)
2333 struct btrfs_fs_info
*fs_info
= srcdev
->fs_info
;
2334 struct btrfs_fs_devices
*fs_devices
= srcdev
->fs_devices
;
2336 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &srcdev
->dev_state
)) {
2337 /* zero out the old super if it is writable */
2338 btrfs_scratch_superblocks(srcdev
->bdev
, srcdev
->name
->str
);
2341 btrfs_close_bdev(srcdev
);
2343 btrfs_free_device(srcdev
);
2345 /* if this is no devs we rather delete the fs_devices */
2346 if (!fs_devices
->num_devices
) {
2347 struct btrfs_fs_devices
*tmp_fs_devices
;
2350 * On a mounted FS, num_devices can't be zero unless it's a
2351 * seed. In case of a seed device being replaced, the replace
2352 * target added to the sprout FS, so there will be no more
2353 * device left under the seed FS.
2355 ASSERT(fs_devices
->seeding
);
2357 tmp_fs_devices
= fs_info
->fs_devices
;
2358 while (tmp_fs_devices
) {
2359 if (tmp_fs_devices
->seed
== fs_devices
) {
2360 tmp_fs_devices
->seed
= fs_devices
->seed
;
2363 tmp_fs_devices
= tmp_fs_devices
->seed
;
2365 fs_devices
->seed
= NULL
;
2366 close_fs_devices(fs_devices
);
2367 free_fs_devices(fs_devices
);
2371 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device
*tgtdev
)
2373 struct btrfs_fs_devices
*fs_devices
= tgtdev
->fs_info
->fs_devices
;
2376 mutex_lock(&fs_devices
->device_list_mutex
);
2378 btrfs_sysfs_rm_device_link(fs_devices
, tgtdev
);
2381 fs_devices
->open_devices
--;
2383 fs_devices
->num_devices
--;
2385 btrfs_assign_next_active_device(tgtdev
, NULL
);
2387 list_del_rcu(&tgtdev
->dev_list
);
2389 mutex_unlock(&fs_devices
->device_list_mutex
);
2392 * The update_dev_time() with in btrfs_scratch_superblocks()
2393 * may lead to a call to btrfs_show_devname() which will try
2394 * to hold device_list_mutex. And here this device
2395 * is already out of device list, so we don't have to hold
2396 * the device_list_mutex lock.
2398 btrfs_scratch_superblocks(tgtdev
->bdev
, tgtdev
->name
->str
);
2400 btrfs_close_bdev(tgtdev
);
2402 btrfs_free_device(tgtdev
);
2405 static struct btrfs_device
*btrfs_find_device_by_path(
2406 struct btrfs_fs_info
*fs_info
, const char *device_path
)
2409 struct btrfs_super_block
*disk_super
;
2412 struct block_device
*bdev
;
2413 struct buffer_head
*bh
;
2414 struct btrfs_device
*device
;
2416 ret
= btrfs_get_bdev_and_sb(device_path
, FMODE_READ
,
2417 fs_info
->bdev_holder
, 0, &bdev
, &bh
);
2419 return ERR_PTR(ret
);
2420 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
2421 devid
= btrfs_stack_device_id(&disk_super
->dev_item
);
2422 dev_uuid
= disk_super
->dev_item
.uuid
;
2423 if (btrfs_fs_incompat(fs_info
, METADATA_UUID
))
2424 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2425 disk_super
->metadata_uuid
, true);
2427 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2428 disk_super
->fsid
, true);
2432 device
= ERR_PTR(-ENOENT
);
2433 blkdev_put(bdev
, FMODE_READ
);
2438 * Lookup a device given by device id, or the path if the id is 0.
2440 struct btrfs_device
*btrfs_find_device_by_devspec(
2441 struct btrfs_fs_info
*fs_info
, u64 devid
,
2442 const char *device_path
)
2444 struct btrfs_device
*device
;
2447 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
,
2450 return ERR_PTR(-ENOENT
);
2454 if (!device_path
|| !device_path
[0])
2455 return ERR_PTR(-EINVAL
);
2457 if (strcmp(device_path
, "missing") == 0) {
2458 /* Find first missing device */
2459 list_for_each_entry(device
, &fs_info
->fs_devices
->devices
,
2461 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
2462 &device
->dev_state
) && !device
->bdev
)
2465 return ERR_PTR(-ENOENT
);
2468 return btrfs_find_device_by_path(fs_info
, device_path
);
2472 * does all the dirty work required for changing file system's UUID.
2474 static int btrfs_prepare_sprout(struct btrfs_fs_info
*fs_info
)
2476 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2477 struct btrfs_fs_devices
*old_devices
;
2478 struct btrfs_fs_devices
*seed_devices
;
2479 struct btrfs_super_block
*disk_super
= fs_info
->super_copy
;
2480 struct btrfs_device
*device
;
2483 lockdep_assert_held(&uuid_mutex
);
2484 if (!fs_devices
->seeding
)
2487 seed_devices
= alloc_fs_devices(NULL
, NULL
);
2488 if (IS_ERR(seed_devices
))
2489 return PTR_ERR(seed_devices
);
2491 old_devices
= clone_fs_devices(fs_devices
);
2492 if (IS_ERR(old_devices
)) {
2493 kfree(seed_devices
);
2494 return PTR_ERR(old_devices
);
2497 list_add(&old_devices
->fs_list
, &fs_uuids
);
2499 memcpy(seed_devices
, fs_devices
, sizeof(*seed_devices
));
2500 seed_devices
->opened
= 1;
2501 INIT_LIST_HEAD(&seed_devices
->devices
);
2502 INIT_LIST_HEAD(&seed_devices
->alloc_list
);
2503 mutex_init(&seed_devices
->device_list_mutex
);
2505 mutex_lock(&fs_devices
->device_list_mutex
);
2506 list_splice_init_rcu(&fs_devices
->devices
, &seed_devices
->devices
,
2508 list_for_each_entry(device
, &seed_devices
->devices
, dev_list
)
2509 device
->fs_devices
= seed_devices
;
2511 mutex_lock(&fs_info
->chunk_mutex
);
2512 list_splice_init(&fs_devices
->alloc_list
, &seed_devices
->alloc_list
);
2513 mutex_unlock(&fs_info
->chunk_mutex
);
2515 fs_devices
->seeding
= 0;
2516 fs_devices
->num_devices
= 0;
2517 fs_devices
->open_devices
= 0;
2518 fs_devices
->missing_devices
= 0;
2519 fs_devices
->rotating
= 0;
2520 fs_devices
->seed
= seed_devices
;
2522 generate_random_uuid(fs_devices
->fsid
);
2523 memcpy(fs_devices
->metadata_uuid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2524 memcpy(disk_super
->fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
);
2525 mutex_unlock(&fs_devices
->device_list_mutex
);
2527 super_flags
= btrfs_super_flags(disk_super
) &
2528 ~BTRFS_SUPER_FLAG_SEEDING
;
2529 btrfs_set_super_flags(disk_super
, super_flags
);
2535 * Store the expected generation for seed devices in device items.
2537 static int btrfs_finish_sprout(struct btrfs_trans_handle
*trans
)
2539 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2540 struct btrfs_root
*root
= fs_info
->chunk_root
;
2541 struct btrfs_path
*path
;
2542 struct extent_buffer
*leaf
;
2543 struct btrfs_dev_item
*dev_item
;
2544 struct btrfs_device
*device
;
2545 struct btrfs_key key
;
2546 u8 fs_uuid
[BTRFS_FSID_SIZE
];
2547 u8 dev_uuid
[BTRFS_UUID_SIZE
];
2551 path
= btrfs_alloc_path();
2555 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2557 key
.type
= BTRFS_DEV_ITEM_KEY
;
2560 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2564 leaf
= path
->nodes
[0];
2566 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2567 ret
= btrfs_next_leaf(root
, path
);
2572 leaf
= path
->nodes
[0];
2573 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2574 btrfs_release_path(path
);
2578 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2579 if (key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
||
2580 key
.type
!= BTRFS_DEV_ITEM_KEY
)
2583 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2584 struct btrfs_dev_item
);
2585 devid
= btrfs_device_id(leaf
, dev_item
);
2586 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
2588 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
2590 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
2592 BUG_ON(!device
); /* Logic error */
2594 if (device
->fs_devices
->seeding
) {
2595 btrfs_set_device_generation(leaf
, dev_item
,
2596 device
->generation
);
2597 btrfs_mark_buffer_dirty(leaf
);
2605 btrfs_free_path(path
);
2609 int btrfs_init_new_device(struct btrfs_fs_info
*fs_info
, const char *device_path
)
2611 struct btrfs_root
*root
= fs_info
->dev_root
;
2612 struct request_queue
*q
;
2613 struct btrfs_trans_handle
*trans
;
2614 struct btrfs_device
*device
;
2615 struct block_device
*bdev
;
2616 struct super_block
*sb
= fs_info
->sb
;
2617 struct rcu_string
*name
;
2618 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
2619 u64 orig_super_total_bytes
;
2620 u64 orig_super_num_devices
;
2621 int seeding_dev
= 0;
2623 bool unlocked
= false;
2625 if (sb_rdonly(sb
) && !fs_devices
->seeding
)
2628 bdev
= blkdev_get_by_path(device_path
, FMODE_WRITE
| FMODE_EXCL
,
2629 fs_info
->bdev_holder
);
2631 return PTR_ERR(bdev
);
2633 if (fs_devices
->seeding
) {
2635 down_write(&sb
->s_umount
);
2636 mutex_lock(&uuid_mutex
);
2639 filemap_write_and_wait(bdev
->bd_inode
->i_mapping
);
2641 mutex_lock(&fs_devices
->device_list_mutex
);
2642 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
2643 if (device
->bdev
== bdev
) {
2646 &fs_devices
->device_list_mutex
);
2650 mutex_unlock(&fs_devices
->device_list_mutex
);
2652 device
= btrfs_alloc_device(fs_info
, NULL
, NULL
);
2653 if (IS_ERR(device
)) {
2654 /* we can safely leave the fs_devices entry around */
2655 ret
= PTR_ERR(device
);
2659 name
= rcu_string_strdup(device_path
, GFP_KERNEL
);
2662 goto error_free_device
;
2664 rcu_assign_pointer(device
->name
, name
);
2666 trans
= btrfs_start_transaction(root
, 0);
2667 if (IS_ERR(trans
)) {
2668 ret
= PTR_ERR(trans
);
2669 goto error_free_device
;
2672 q
= bdev_get_queue(bdev
);
2673 set_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
);
2674 device
->generation
= trans
->transid
;
2675 device
->io_width
= fs_info
->sectorsize
;
2676 device
->io_align
= fs_info
->sectorsize
;
2677 device
->sector_size
= fs_info
->sectorsize
;
2678 device
->total_bytes
= round_down(i_size_read(bdev
->bd_inode
),
2679 fs_info
->sectorsize
);
2680 device
->disk_total_bytes
= device
->total_bytes
;
2681 device
->commit_total_bytes
= device
->total_bytes
;
2682 device
->fs_info
= fs_info
;
2683 device
->bdev
= bdev
;
2684 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
2685 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
2686 device
->mode
= FMODE_EXCL
;
2687 device
->dev_stats_valid
= 1;
2688 set_blocksize(device
->bdev
, BTRFS_BDEV_BLOCKSIZE
);
2691 sb
->s_flags
&= ~SB_RDONLY
;
2692 ret
= btrfs_prepare_sprout(fs_info
);
2694 btrfs_abort_transaction(trans
, ret
);
2699 device
->fs_devices
= fs_devices
;
2701 mutex_lock(&fs_devices
->device_list_mutex
);
2702 mutex_lock(&fs_info
->chunk_mutex
);
2703 list_add_rcu(&device
->dev_list
, &fs_devices
->devices
);
2704 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
2705 fs_devices
->num_devices
++;
2706 fs_devices
->open_devices
++;
2707 fs_devices
->rw_devices
++;
2708 fs_devices
->total_devices
++;
2709 fs_devices
->total_rw_bytes
+= device
->total_bytes
;
2711 atomic64_add(device
->total_bytes
, &fs_info
->free_chunk_space
);
2713 if (!blk_queue_nonrot(q
))
2714 fs_devices
->rotating
= 1;
2716 orig_super_total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
2717 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2718 round_down(orig_super_total_bytes
+ device
->total_bytes
,
2719 fs_info
->sectorsize
));
2721 orig_super_num_devices
= btrfs_super_num_devices(fs_info
->super_copy
);
2722 btrfs_set_super_num_devices(fs_info
->super_copy
,
2723 orig_super_num_devices
+ 1);
2726 * we've got more storage, clear any full flags on the space
2729 btrfs_clear_space_info_full(fs_info
);
2731 mutex_unlock(&fs_info
->chunk_mutex
);
2733 /* Add sysfs device entry */
2734 btrfs_sysfs_add_device_link(fs_devices
, device
);
2736 mutex_unlock(&fs_devices
->device_list_mutex
);
2739 mutex_lock(&fs_info
->chunk_mutex
);
2740 ret
= init_first_rw_device(trans
);
2741 mutex_unlock(&fs_info
->chunk_mutex
);
2743 btrfs_abort_transaction(trans
, ret
);
2748 ret
= btrfs_add_dev_item(trans
, device
);
2750 btrfs_abort_transaction(trans
, ret
);
2755 ret
= btrfs_finish_sprout(trans
);
2757 btrfs_abort_transaction(trans
, ret
);
2761 btrfs_sysfs_update_sprout_fsid(fs_devices
,
2762 fs_info
->fs_devices
->fsid
);
2765 ret
= btrfs_commit_transaction(trans
);
2768 mutex_unlock(&uuid_mutex
);
2769 up_write(&sb
->s_umount
);
2772 if (ret
) /* transaction commit */
2775 ret
= btrfs_relocate_sys_chunks(fs_info
);
2777 btrfs_handle_fs_error(fs_info
, ret
,
2778 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2779 trans
= btrfs_attach_transaction(root
);
2780 if (IS_ERR(trans
)) {
2781 if (PTR_ERR(trans
) == -ENOENT
)
2783 ret
= PTR_ERR(trans
);
2787 ret
= btrfs_commit_transaction(trans
);
2791 * Now that we have written a new super block to this device, check all
2792 * other fs_devices list if device_path alienates any other scanned
2794 * We can ignore the return value as it typically returns -EINVAL and
2795 * only succeeds if the device was an alien.
2797 btrfs_forget_devices(device_path
);
2799 /* Update ctime/mtime for blkid or udev */
2800 update_dev_time(device_path
);
2805 btrfs_sysfs_rm_device_link(fs_devices
, device
);
2806 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
2807 mutex_lock(&fs_info
->chunk_mutex
);
2808 list_del_rcu(&device
->dev_list
);
2809 list_del(&device
->dev_alloc_list
);
2810 fs_info
->fs_devices
->num_devices
--;
2811 fs_info
->fs_devices
->open_devices
--;
2812 fs_info
->fs_devices
->rw_devices
--;
2813 fs_info
->fs_devices
->total_devices
--;
2814 fs_info
->fs_devices
->total_rw_bytes
-= device
->total_bytes
;
2815 atomic64_sub(device
->total_bytes
, &fs_info
->free_chunk_space
);
2816 btrfs_set_super_total_bytes(fs_info
->super_copy
,
2817 orig_super_total_bytes
);
2818 btrfs_set_super_num_devices(fs_info
->super_copy
,
2819 orig_super_num_devices
);
2820 mutex_unlock(&fs_info
->chunk_mutex
);
2821 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
2824 sb
->s_flags
|= SB_RDONLY
;
2826 btrfs_end_transaction(trans
);
2828 btrfs_free_device(device
);
2830 blkdev_put(bdev
, FMODE_EXCL
);
2831 if (seeding_dev
&& !unlocked
) {
2832 mutex_unlock(&uuid_mutex
);
2833 up_write(&sb
->s_umount
);
2838 static noinline
int btrfs_update_device(struct btrfs_trans_handle
*trans
,
2839 struct btrfs_device
*device
)
2842 struct btrfs_path
*path
;
2843 struct btrfs_root
*root
= device
->fs_info
->chunk_root
;
2844 struct btrfs_dev_item
*dev_item
;
2845 struct extent_buffer
*leaf
;
2846 struct btrfs_key key
;
2848 path
= btrfs_alloc_path();
2852 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
2853 key
.type
= BTRFS_DEV_ITEM_KEY
;
2854 key
.offset
= device
->devid
;
2856 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2865 leaf
= path
->nodes
[0];
2866 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
2868 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
2869 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
2870 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
2871 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
2872 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
2873 btrfs_set_device_total_bytes(leaf
, dev_item
,
2874 btrfs_device_get_disk_total_bytes(device
));
2875 btrfs_set_device_bytes_used(leaf
, dev_item
,
2876 btrfs_device_get_bytes_used(device
));
2877 btrfs_mark_buffer_dirty(leaf
);
2880 btrfs_free_path(path
);
2884 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
2885 struct btrfs_device
*device
, u64 new_size
)
2887 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
2888 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2892 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
2895 new_size
= round_down(new_size
, fs_info
->sectorsize
);
2897 mutex_lock(&fs_info
->chunk_mutex
);
2898 old_total
= btrfs_super_total_bytes(super_copy
);
2899 diff
= round_down(new_size
- device
->total_bytes
, fs_info
->sectorsize
);
2901 if (new_size
<= device
->total_bytes
||
2902 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
2903 mutex_unlock(&fs_info
->chunk_mutex
);
2907 btrfs_set_super_total_bytes(super_copy
,
2908 round_down(old_total
+ diff
, fs_info
->sectorsize
));
2909 device
->fs_devices
->total_rw_bytes
+= diff
;
2911 btrfs_device_set_total_bytes(device
, new_size
);
2912 btrfs_device_set_disk_total_bytes(device
, new_size
);
2913 btrfs_clear_space_info_full(device
->fs_info
);
2914 if (list_empty(&device
->post_commit_list
))
2915 list_add_tail(&device
->post_commit_list
,
2916 &trans
->transaction
->dev_update_list
);
2917 mutex_unlock(&fs_info
->chunk_mutex
);
2919 return btrfs_update_device(trans
, device
);
2922 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
2924 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2925 struct btrfs_root
*root
= fs_info
->chunk_root
;
2927 struct btrfs_path
*path
;
2928 struct btrfs_key key
;
2930 path
= btrfs_alloc_path();
2934 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
2935 key
.offset
= chunk_offset
;
2936 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
2938 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
2941 else if (ret
> 0) { /* Logic error or corruption */
2942 btrfs_handle_fs_error(fs_info
, -ENOENT
,
2943 "Failed lookup while freeing chunk.");
2948 ret
= btrfs_del_item(trans
, root
, path
);
2950 btrfs_handle_fs_error(fs_info
, ret
,
2951 "Failed to delete chunk item.");
2953 btrfs_free_path(path
);
2957 static int btrfs_del_sys_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
2959 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
2960 struct btrfs_disk_key
*disk_key
;
2961 struct btrfs_chunk
*chunk
;
2968 struct btrfs_key key
;
2970 mutex_lock(&fs_info
->chunk_mutex
);
2971 array_size
= btrfs_super_sys_array_size(super_copy
);
2973 ptr
= super_copy
->sys_chunk_array
;
2976 while (cur
< array_size
) {
2977 disk_key
= (struct btrfs_disk_key
*)ptr
;
2978 btrfs_disk_key_to_cpu(&key
, disk_key
);
2980 len
= sizeof(*disk_key
);
2982 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
2983 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
2984 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
2985 len
+= btrfs_chunk_item_size(num_stripes
);
2990 if (key
.objectid
== BTRFS_FIRST_CHUNK_TREE_OBJECTID
&&
2991 key
.offset
== chunk_offset
) {
2992 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
2994 btrfs_set_super_sys_array_size(super_copy
, array_size
);
3000 mutex_unlock(&fs_info
->chunk_mutex
);
3005 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3006 * @logical: Logical block offset in bytes.
3007 * @length: Length of extent in bytes.
3009 * Return: Chunk mapping or ERR_PTR.
3011 struct extent_map
*btrfs_get_chunk_map(struct btrfs_fs_info
*fs_info
,
3012 u64 logical
, u64 length
)
3014 struct extent_map_tree
*em_tree
;
3015 struct extent_map
*em
;
3017 em_tree
= &fs_info
->mapping_tree
;
3018 read_lock(&em_tree
->lock
);
3019 em
= lookup_extent_mapping(em_tree
, logical
, length
);
3020 read_unlock(&em_tree
->lock
);
3023 btrfs_crit(fs_info
, "unable to find logical %llu length %llu",
3025 return ERR_PTR(-EINVAL
);
3028 if (em
->start
> logical
|| em
->start
+ em
->len
< logical
) {
3030 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3031 logical
, length
, em
->start
, em
->start
+ em
->len
);
3032 free_extent_map(em
);
3033 return ERR_PTR(-EINVAL
);
3036 /* callers are responsible for dropping em's ref. */
3040 int btrfs_remove_chunk(struct btrfs_trans_handle
*trans
, u64 chunk_offset
)
3042 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3043 struct extent_map
*em
;
3044 struct map_lookup
*map
;
3045 u64 dev_extent_len
= 0;
3047 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
3049 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
3052 * This is a logic error, but we don't want to just rely on the
3053 * user having built with ASSERT enabled, so if ASSERT doesn't
3054 * do anything we still error out.
3059 map
= em
->map_lookup
;
3060 mutex_lock(&fs_info
->chunk_mutex
);
3061 check_system_chunk(trans
, map
->type
);
3062 mutex_unlock(&fs_info
->chunk_mutex
);
3065 * Take the device list mutex to prevent races with the final phase of
3066 * a device replace operation that replaces the device object associated
3067 * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
3069 mutex_lock(&fs_devices
->device_list_mutex
);
3070 for (i
= 0; i
< map
->num_stripes
; i
++) {
3071 struct btrfs_device
*device
= map
->stripes
[i
].dev
;
3072 ret
= btrfs_free_dev_extent(trans
, device
,
3073 map
->stripes
[i
].physical
,
3076 mutex_unlock(&fs_devices
->device_list_mutex
);
3077 btrfs_abort_transaction(trans
, ret
);
3081 if (device
->bytes_used
> 0) {
3082 mutex_lock(&fs_info
->chunk_mutex
);
3083 btrfs_device_set_bytes_used(device
,
3084 device
->bytes_used
- dev_extent_len
);
3085 atomic64_add(dev_extent_len
, &fs_info
->free_chunk_space
);
3086 btrfs_clear_space_info_full(fs_info
);
3087 mutex_unlock(&fs_info
->chunk_mutex
);
3090 ret
= btrfs_update_device(trans
, device
);
3092 mutex_unlock(&fs_devices
->device_list_mutex
);
3093 btrfs_abort_transaction(trans
, ret
);
3097 mutex_unlock(&fs_devices
->device_list_mutex
);
3099 ret
= btrfs_free_chunk(trans
, chunk_offset
);
3101 btrfs_abort_transaction(trans
, ret
);
3105 trace_btrfs_chunk_free(fs_info
, map
, chunk_offset
, em
->len
);
3107 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3108 ret
= btrfs_del_sys_chunk(fs_info
, chunk_offset
);
3110 btrfs_abort_transaction(trans
, ret
);
3115 ret
= btrfs_remove_block_group(trans
, chunk_offset
, em
);
3117 btrfs_abort_transaction(trans
, ret
);
3123 free_extent_map(em
);
3127 static int btrfs_relocate_chunk(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
3129 struct btrfs_root
*root
= fs_info
->chunk_root
;
3130 struct btrfs_trans_handle
*trans
;
3134 * Prevent races with automatic removal of unused block groups.
3135 * After we relocate and before we remove the chunk with offset
3136 * chunk_offset, automatic removal of the block group can kick in,
3137 * resulting in a failure when calling btrfs_remove_chunk() below.
3139 * Make sure to acquire this mutex before doing a tree search (dev
3140 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3141 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3142 * we release the path used to search the chunk/dev tree and before
3143 * the current task acquires this mutex and calls us.
3145 lockdep_assert_held(&fs_info
->delete_unused_bgs_mutex
);
3147 /* step one, relocate all the extents inside this chunk */
3148 btrfs_scrub_pause(fs_info
);
3149 ret
= btrfs_relocate_block_group(fs_info
, chunk_offset
);
3150 btrfs_scrub_continue(fs_info
);
3154 trans
= btrfs_start_trans_remove_block_group(root
->fs_info
,
3156 if (IS_ERR(trans
)) {
3157 ret
= PTR_ERR(trans
);
3158 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
3163 * step two, delete the device extents and the
3164 * chunk tree entries
3166 ret
= btrfs_remove_chunk(trans
, chunk_offset
);
3167 btrfs_end_transaction(trans
);
3171 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info
*fs_info
)
3173 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3174 struct btrfs_path
*path
;
3175 struct extent_buffer
*leaf
;
3176 struct btrfs_chunk
*chunk
;
3177 struct btrfs_key key
;
3178 struct btrfs_key found_key
;
3180 bool retried
= false;
3184 path
= btrfs_alloc_path();
3189 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3190 key
.offset
= (u64
)-1;
3191 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3194 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3195 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3197 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3200 BUG_ON(ret
== 0); /* Corruption */
3202 ret
= btrfs_previous_item(chunk_root
, path
, key
.objectid
,
3205 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3211 leaf
= path
->nodes
[0];
3212 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3214 chunk
= btrfs_item_ptr(leaf
, path
->slots
[0],
3215 struct btrfs_chunk
);
3216 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3217 btrfs_release_path(path
);
3219 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3220 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3226 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3228 if (found_key
.offset
== 0)
3230 key
.offset
= found_key
.offset
- 1;
3233 if (failed
&& !retried
) {
3237 } else if (WARN_ON(failed
&& retried
)) {
3241 btrfs_free_path(path
);
3246 * return 1 : allocate a data chunk successfully,
3247 * return <0: errors during allocating a data chunk,
3248 * return 0 : no need to allocate a data chunk.
3250 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info
*fs_info
,
3253 struct btrfs_block_group_cache
*cache
;
3257 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3259 chunk_type
= cache
->flags
;
3260 btrfs_put_block_group(cache
);
3262 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
) {
3263 spin_lock(&fs_info
->data_sinfo
->lock
);
3264 bytes_used
= fs_info
->data_sinfo
->bytes_used
;
3265 spin_unlock(&fs_info
->data_sinfo
->lock
);
3268 struct btrfs_trans_handle
*trans
;
3271 trans
= btrfs_join_transaction(fs_info
->tree_root
);
3273 return PTR_ERR(trans
);
3275 ret
= btrfs_force_chunk_alloc(trans
,
3276 BTRFS_BLOCK_GROUP_DATA
);
3277 btrfs_end_transaction(trans
);
3286 static int insert_balance_item(struct btrfs_fs_info
*fs_info
,
3287 struct btrfs_balance_control
*bctl
)
3289 struct btrfs_root
*root
= fs_info
->tree_root
;
3290 struct btrfs_trans_handle
*trans
;
3291 struct btrfs_balance_item
*item
;
3292 struct btrfs_disk_balance_args disk_bargs
;
3293 struct btrfs_path
*path
;
3294 struct extent_buffer
*leaf
;
3295 struct btrfs_key key
;
3298 path
= btrfs_alloc_path();
3302 trans
= btrfs_start_transaction_fallback_global_rsv(root
, 0);
3303 if (IS_ERR(trans
)) {
3304 btrfs_free_path(path
);
3305 return PTR_ERR(trans
);
3308 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3309 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3312 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
3317 leaf
= path
->nodes
[0];
3318 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
3320 memzero_extent_buffer(leaf
, (unsigned long)item
, sizeof(*item
));
3322 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->data
);
3323 btrfs_set_balance_data(leaf
, item
, &disk_bargs
);
3324 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->meta
);
3325 btrfs_set_balance_meta(leaf
, item
, &disk_bargs
);
3326 btrfs_cpu_balance_args_to_disk(&disk_bargs
, &bctl
->sys
);
3327 btrfs_set_balance_sys(leaf
, item
, &disk_bargs
);
3329 btrfs_set_balance_flags(leaf
, item
, bctl
->flags
);
3331 btrfs_mark_buffer_dirty(leaf
);
3333 btrfs_free_path(path
);
3334 err
= btrfs_commit_transaction(trans
);
3340 static int del_balance_item(struct btrfs_fs_info
*fs_info
)
3342 struct btrfs_root
*root
= fs_info
->tree_root
;
3343 struct btrfs_trans_handle
*trans
;
3344 struct btrfs_path
*path
;
3345 struct btrfs_key key
;
3348 path
= btrfs_alloc_path();
3352 trans
= btrfs_start_transaction(root
, 0);
3353 if (IS_ERR(trans
)) {
3354 btrfs_free_path(path
);
3355 return PTR_ERR(trans
);
3358 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
3359 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
3362 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3370 ret
= btrfs_del_item(trans
, root
, path
);
3372 btrfs_free_path(path
);
3373 err
= btrfs_commit_transaction(trans
);
3380 * This is a heuristic used to reduce the number of chunks balanced on
3381 * resume after balance was interrupted.
3383 static void update_balance_args(struct btrfs_balance_control
*bctl
)
3386 * Turn on soft mode for chunk types that were being converted.
3388 if (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3389 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3390 if (bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3391 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3392 if (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3393 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_SOFT
;
3396 * Turn on usage filter if is not already used. The idea is
3397 * that chunks that we have already balanced should be
3398 * reasonably full. Don't do it for chunks that are being
3399 * converted - that will keep us from relocating unconverted
3400 * (albeit full) chunks.
3402 if (!(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3403 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3404 !(bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3405 bctl
->data
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3406 bctl
->data
.usage
= 90;
3408 if (!(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3409 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3410 !(bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3411 bctl
->sys
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3412 bctl
->sys
.usage
= 90;
3414 if (!(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3415 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3416 !(bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
)) {
3417 bctl
->meta
.flags
|= BTRFS_BALANCE_ARGS_USAGE
;
3418 bctl
->meta
.usage
= 90;
3423 * Clear the balance status in fs_info and delete the balance item from disk.
3425 static void reset_balance_state(struct btrfs_fs_info
*fs_info
)
3427 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3430 BUG_ON(!fs_info
->balance_ctl
);
3432 spin_lock(&fs_info
->balance_lock
);
3433 fs_info
->balance_ctl
= NULL
;
3434 spin_unlock(&fs_info
->balance_lock
);
3437 ret
= del_balance_item(fs_info
);
3439 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
3443 * Balance filters. Return 1 if chunk should be filtered out
3444 * (should not be balanced).
3446 static int chunk_profiles_filter(u64 chunk_type
,
3447 struct btrfs_balance_args
*bargs
)
3449 chunk_type
= chunk_to_extended(chunk_type
) &
3450 BTRFS_EXTENDED_PROFILE_MASK
;
3452 if (bargs
->profiles
& chunk_type
)
3458 static int chunk_usage_range_filter(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
,
3459 struct btrfs_balance_args
*bargs
)
3461 struct btrfs_block_group_cache
*cache
;
3463 u64 user_thresh_min
;
3464 u64 user_thresh_max
;
3467 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3468 chunk_used
= btrfs_block_group_used(&cache
->item
);
3470 if (bargs
->usage_min
== 0)
3471 user_thresh_min
= 0;
3473 user_thresh_min
= div_factor_fine(cache
->key
.offset
,
3476 if (bargs
->usage_max
== 0)
3477 user_thresh_max
= 1;
3478 else if (bargs
->usage_max
> 100)
3479 user_thresh_max
= cache
->key
.offset
;
3481 user_thresh_max
= div_factor_fine(cache
->key
.offset
,
3484 if (user_thresh_min
<= chunk_used
&& chunk_used
< user_thresh_max
)
3487 btrfs_put_block_group(cache
);
3491 static int chunk_usage_filter(struct btrfs_fs_info
*fs_info
,
3492 u64 chunk_offset
, struct btrfs_balance_args
*bargs
)
3494 struct btrfs_block_group_cache
*cache
;
3495 u64 chunk_used
, user_thresh
;
3498 cache
= btrfs_lookup_block_group(fs_info
, chunk_offset
);
3499 chunk_used
= btrfs_block_group_used(&cache
->item
);
3501 if (bargs
->usage_min
== 0)
3503 else if (bargs
->usage
> 100)
3504 user_thresh
= cache
->key
.offset
;
3506 user_thresh
= div_factor_fine(cache
->key
.offset
,
3509 if (chunk_used
< user_thresh
)
3512 btrfs_put_block_group(cache
);
3516 static int chunk_devid_filter(struct extent_buffer
*leaf
,
3517 struct btrfs_chunk
*chunk
,
3518 struct btrfs_balance_args
*bargs
)
3520 struct btrfs_stripe
*stripe
;
3521 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3524 for (i
= 0; i
< num_stripes
; i
++) {
3525 stripe
= btrfs_stripe_nr(chunk
, i
);
3526 if (btrfs_stripe_devid(leaf
, stripe
) == bargs
->devid
)
3533 static u64
calc_data_stripes(u64 type
, int num_stripes
)
3535 const int index
= btrfs_bg_flags_to_raid_index(type
);
3536 const int ncopies
= btrfs_raid_array
[index
].ncopies
;
3537 const int nparity
= btrfs_raid_array
[index
].nparity
;
3540 return num_stripes
- nparity
;
3542 return num_stripes
/ ncopies
;
3545 /* [pstart, pend) */
3546 static int chunk_drange_filter(struct extent_buffer
*leaf
,
3547 struct btrfs_chunk
*chunk
,
3548 struct btrfs_balance_args
*bargs
)
3550 struct btrfs_stripe
*stripe
;
3551 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3558 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
))
3561 type
= btrfs_chunk_type(leaf
, chunk
);
3562 factor
= calc_data_stripes(type
, num_stripes
);
3564 for (i
= 0; i
< num_stripes
; i
++) {
3565 stripe
= btrfs_stripe_nr(chunk
, i
);
3566 if (btrfs_stripe_devid(leaf
, stripe
) != bargs
->devid
)
3569 stripe_offset
= btrfs_stripe_offset(leaf
, stripe
);
3570 stripe_length
= btrfs_chunk_length(leaf
, chunk
);
3571 stripe_length
= div_u64(stripe_length
, factor
);
3573 if (stripe_offset
< bargs
->pend
&&
3574 stripe_offset
+ stripe_length
> bargs
->pstart
)
3581 /* [vstart, vend) */
3582 static int chunk_vrange_filter(struct extent_buffer
*leaf
,
3583 struct btrfs_chunk
*chunk
,
3585 struct btrfs_balance_args
*bargs
)
3587 if (chunk_offset
< bargs
->vend
&&
3588 chunk_offset
+ btrfs_chunk_length(leaf
, chunk
) > bargs
->vstart
)
3589 /* at least part of the chunk is inside this vrange */
3595 static int chunk_stripes_range_filter(struct extent_buffer
*leaf
,
3596 struct btrfs_chunk
*chunk
,
3597 struct btrfs_balance_args
*bargs
)
3599 int num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
3601 if (bargs
->stripes_min
<= num_stripes
3602 && num_stripes
<= bargs
->stripes_max
)
3608 static int chunk_soft_convert_filter(u64 chunk_type
,
3609 struct btrfs_balance_args
*bargs
)
3611 if (!(bargs
->flags
& BTRFS_BALANCE_ARGS_CONVERT
))
3614 chunk_type
= chunk_to_extended(chunk_type
) &
3615 BTRFS_EXTENDED_PROFILE_MASK
;
3617 if (bargs
->target
== chunk_type
)
3623 static int should_balance_chunk(struct extent_buffer
*leaf
,
3624 struct btrfs_chunk
*chunk
, u64 chunk_offset
)
3626 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3627 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3628 struct btrfs_balance_args
*bargs
= NULL
;
3629 u64 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3632 if (!((chunk_type
& BTRFS_BLOCK_GROUP_TYPE_MASK
) &
3633 (bctl
->flags
& BTRFS_BALANCE_TYPE_MASK
))) {
3637 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3638 bargs
= &bctl
->data
;
3639 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3641 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3642 bargs
= &bctl
->meta
;
3644 /* profiles filter */
3645 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_PROFILES
) &&
3646 chunk_profiles_filter(chunk_type
, bargs
)) {
3651 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE
) &&
3652 chunk_usage_filter(fs_info
, chunk_offset
, bargs
)) {
3654 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
) &&
3655 chunk_usage_range_filter(fs_info
, chunk_offset
, bargs
)) {
3660 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DEVID
) &&
3661 chunk_devid_filter(leaf
, chunk
, bargs
)) {
3665 /* drange filter, makes sense only with devid filter */
3666 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_DRANGE
) &&
3667 chunk_drange_filter(leaf
, chunk
, bargs
)) {
3672 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_VRANGE
) &&
3673 chunk_vrange_filter(leaf
, chunk
, chunk_offset
, bargs
)) {
3677 /* stripes filter */
3678 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
) &&
3679 chunk_stripes_range_filter(leaf
, chunk
, bargs
)) {
3683 /* soft profile changing mode */
3684 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_SOFT
) &&
3685 chunk_soft_convert_filter(chunk_type
, bargs
)) {
3690 * limited by count, must be the last filter
3692 if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT
)) {
3693 if (bargs
->limit
== 0)
3697 } else if ((bargs
->flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)) {
3699 * Same logic as the 'limit' filter; the minimum cannot be
3700 * determined here because we do not have the global information
3701 * about the count of all chunks that satisfy the filters.
3703 if (bargs
->limit_max
== 0)
3712 static int __btrfs_balance(struct btrfs_fs_info
*fs_info
)
3714 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
3715 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
3717 struct btrfs_chunk
*chunk
;
3718 struct btrfs_path
*path
= NULL
;
3719 struct btrfs_key key
;
3720 struct btrfs_key found_key
;
3721 struct extent_buffer
*leaf
;
3724 int enospc_errors
= 0;
3725 bool counting
= true;
3726 /* The single value limit and min/max limits use the same bytes in the */
3727 u64 limit_data
= bctl
->data
.limit
;
3728 u64 limit_meta
= bctl
->meta
.limit
;
3729 u64 limit_sys
= bctl
->sys
.limit
;
3733 int chunk_reserved
= 0;
3735 path
= btrfs_alloc_path();
3741 /* zero out stat counters */
3742 spin_lock(&fs_info
->balance_lock
);
3743 memset(&bctl
->stat
, 0, sizeof(bctl
->stat
));
3744 spin_unlock(&fs_info
->balance_lock
);
3748 * The single value limit and min/max limits use the same bytes
3751 bctl
->data
.limit
= limit_data
;
3752 bctl
->meta
.limit
= limit_meta
;
3753 bctl
->sys
.limit
= limit_sys
;
3755 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3756 key
.offset
= (u64
)-1;
3757 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3760 if ((!counting
&& atomic_read(&fs_info
->balance_pause_req
)) ||
3761 atomic_read(&fs_info
->balance_cancel_req
)) {
3766 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
3767 ret
= btrfs_search_slot(NULL
, chunk_root
, &key
, path
, 0, 0);
3769 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3774 * this shouldn't happen, it means the last relocate
3778 BUG(); /* FIXME break ? */
3780 ret
= btrfs_previous_item(chunk_root
, path
, 0,
3781 BTRFS_CHUNK_ITEM_KEY
);
3783 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3788 leaf
= path
->nodes
[0];
3789 slot
= path
->slots
[0];
3790 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3792 if (found_key
.objectid
!= key
.objectid
) {
3793 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3797 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
3798 chunk_type
= btrfs_chunk_type(leaf
, chunk
);
3801 spin_lock(&fs_info
->balance_lock
);
3802 bctl
->stat
.considered
++;
3803 spin_unlock(&fs_info
->balance_lock
);
3806 ret
= should_balance_chunk(leaf
, chunk
, found_key
.offset
);
3808 btrfs_release_path(path
);
3810 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3815 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3816 spin_lock(&fs_info
->balance_lock
);
3817 bctl
->stat
.expected
++;
3818 spin_unlock(&fs_info
->balance_lock
);
3820 if (chunk_type
& BTRFS_BLOCK_GROUP_DATA
)
3822 else if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3824 else if (chunk_type
& BTRFS_BLOCK_GROUP_METADATA
)
3831 * Apply limit_min filter, no need to check if the LIMITS
3832 * filter is used, limit_min is 0 by default
3834 if (((chunk_type
& BTRFS_BLOCK_GROUP_DATA
) &&
3835 count_data
< bctl
->data
.limit_min
)
3836 || ((chunk_type
& BTRFS_BLOCK_GROUP_METADATA
) &&
3837 count_meta
< bctl
->meta
.limit_min
)
3838 || ((chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
) &&
3839 count_sys
< bctl
->sys
.limit_min
)) {
3840 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3844 if (!chunk_reserved
) {
3846 * We may be relocating the only data chunk we have,
3847 * which could potentially end up with losing data's
3848 * raid profile, so lets allocate an empty one in
3851 ret
= btrfs_may_alloc_data_chunk(fs_info
,
3854 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3856 } else if (ret
== 1) {
3861 ret
= btrfs_relocate_chunk(fs_info
, found_key
.offset
);
3862 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
3863 if (ret
== -ENOSPC
) {
3865 } else if (ret
== -ETXTBSY
) {
3867 "skipping relocation of block group %llu due to active swapfile",
3873 spin_lock(&fs_info
->balance_lock
);
3874 bctl
->stat
.completed
++;
3875 spin_unlock(&fs_info
->balance_lock
);
3878 if (found_key
.offset
== 0)
3880 key
.offset
= found_key
.offset
- 1;
3884 btrfs_release_path(path
);
3889 btrfs_free_path(path
);
3890 if (enospc_errors
) {
3891 btrfs_info(fs_info
, "%d enospc errors during balance",
3901 * alloc_profile_is_valid - see if a given profile is valid and reduced
3902 * @flags: profile to validate
3903 * @extended: if true @flags is treated as an extended profile
3905 static int alloc_profile_is_valid(u64 flags
, int extended
)
3907 u64 mask
= (extended
? BTRFS_EXTENDED_PROFILE_MASK
:
3908 BTRFS_BLOCK_GROUP_PROFILE_MASK
);
3910 flags
&= ~BTRFS_BLOCK_GROUP_TYPE_MASK
;
3912 /* 1) check that all other bits are zeroed */
3916 /* 2) see if profile is reduced */
3918 return !extended
; /* "0" is valid for usual profiles */
3920 /* true if exactly one bit set */
3922 * Don't use is_power_of_2(unsigned long) because it won't work
3923 * for the single profile (1ULL << 48) on 32-bit CPUs.
3925 return flags
!= 0 && (flags
& (flags
- 1)) == 0;
3928 static inline int balance_need_close(struct btrfs_fs_info
*fs_info
)
3930 /* cancel requested || normal exit path */
3931 return atomic_read(&fs_info
->balance_cancel_req
) ||
3932 (atomic_read(&fs_info
->balance_pause_req
) == 0 &&
3933 atomic_read(&fs_info
->balance_cancel_req
) == 0);
3936 /* Non-zero return value signifies invalidity */
3937 static inline int validate_convert_profile(struct btrfs_balance_args
*bctl_arg
,
3940 return ((bctl_arg
->flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
3941 (!alloc_profile_is_valid(bctl_arg
->target
, 1) ||
3942 (bctl_arg
->target
& ~allowed
)));
3946 * Fill @buf with textual description of balance filter flags @bargs, up to
3947 * @size_buf including the terminating null. The output may be trimmed if it
3948 * does not fit into the provided buffer.
3950 static void describe_balance_args(struct btrfs_balance_args
*bargs
, char *buf
,
3954 u32 size_bp
= size_buf
;
3956 u64 flags
= bargs
->flags
;
3957 char tmp_buf
[128] = {'\0'};
3962 #define CHECK_APPEND_NOARG(a) \
3964 ret = snprintf(bp, size_bp, (a)); \
3965 if (ret < 0 || ret >= size_bp) \
3966 goto out_overflow; \
3971 #define CHECK_APPEND_1ARG(a, v1) \
3973 ret = snprintf(bp, size_bp, (a), (v1)); \
3974 if (ret < 0 || ret >= size_bp) \
3975 goto out_overflow; \
3980 #define CHECK_APPEND_2ARG(a, v1, v2) \
3982 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
3983 if (ret < 0 || ret >= size_bp) \
3984 goto out_overflow; \
3989 if (flags
& BTRFS_BALANCE_ARGS_CONVERT
)
3990 CHECK_APPEND_1ARG("convert=%s,",
3991 btrfs_bg_type_to_raid_name(bargs
->target
));
3993 if (flags
& BTRFS_BALANCE_ARGS_SOFT
)
3994 CHECK_APPEND_NOARG("soft,");
3996 if (flags
& BTRFS_BALANCE_ARGS_PROFILES
) {
3997 btrfs_describe_block_groups(bargs
->profiles
, tmp_buf
,
3999 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf
);
4002 if (flags
& BTRFS_BALANCE_ARGS_USAGE
)
4003 CHECK_APPEND_1ARG("usage=%llu,", bargs
->usage
);
4005 if (flags
& BTRFS_BALANCE_ARGS_USAGE_RANGE
)
4006 CHECK_APPEND_2ARG("usage=%u..%u,",
4007 bargs
->usage_min
, bargs
->usage_max
);
4009 if (flags
& BTRFS_BALANCE_ARGS_DEVID
)
4010 CHECK_APPEND_1ARG("devid=%llu,", bargs
->devid
);
4012 if (flags
& BTRFS_BALANCE_ARGS_DRANGE
)
4013 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4014 bargs
->pstart
, bargs
->pend
);
4016 if (flags
& BTRFS_BALANCE_ARGS_VRANGE
)
4017 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4018 bargs
->vstart
, bargs
->vend
);
4020 if (flags
& BTRFS_BALANCE_ARGS_LIMIT
)
4021 CHECK_APPEND_1ARG("limit=%llu,", bargs
->limit
);
4023 if (flags
& BTRFS_BALANCE_ARGS_LIMIT_RANGE
)
4024 CHECK_APPEND_2ARG("limit=%u..%u,",
4025 bargs
->limit_min
, bargs
->limit_max
);
4027 if (flags
& BTRFS_BALANCE_ARGS_STRIPES_RANGE
)
4028 CHECK_APPEND_2ARG("stripes=%u..%u,",
4029 bargs
->stripes_min
, bargs
->stripes_max
);
4031 #undef CHECK_APPEND_2ARG
4032 #undef CHECK_APPEND_1ARG
4033 #undef CHECK_APPEND_NOARG
4037 if (size_bp
< size_buf
)
4038 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last , */
4043 static void describe_balance_start_or_resume(struct btrfs_fs_info
*fs_info
)
4045 u32 size_buf
= 1024;
4046 char tmp_buf
[192] = {'\0'};
4049 u32 size_bp
= size_buf
;
4051 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
4053 buf
= kzalloc(size_buf
, GFP_KERNEL
);
4059 #define CHECK_APPEND_1ARG(a, v1) \
4061 ret = snprintf(bp, size_bp, (a), (v1)); \
4062 if (ret < 0 || ret >= size_bp) \
4063 goto out_overflow; \
4068 if (bctl
->flags
& BTRFS_BALANCE_FORCE
)
4069 CHECK_APPEND_1ARG("%s", "-f ");
4071 if (bctl
->flags
& BTRFS_BALANCE_DATA
) {
4072 describe_balance_args(&bctl
->data
, tmp_buf
, sizeof(tmp_buf
));
4073 CHECK_APPEND_1ARG("-d%s ", tmp_buf
);
4076 if (bctl
->flags
& BTRFS_BALANCE_METADATA
) {
4077 describe_balance_args(&bctl
->meta
, tmp_buf
, sizeof(tmp_buf
));
4078 CHECK_APPEND_1ARG("-m%s ", tmp_buf
);
4081 if (bctl
->flags
& BTRFS_BALANCE_SYSTEM
) {
4082 describe_balance_args(&bctl
->sys
, tmp_buf
, sizeof(tmp_buf
));
4083 CHECK_APPEND_1ARG("-s%s ", tmp_buf
);
4086 #undef CHECK_APPEND_1ARG
4090 if (size_bp
< size_buf
)
4091 buf
[size_buf
- size_bp
- 1] = '\0'; /* remove last " " */
4092 btrfs_info(fs_info
, "balance: %s %s",
4093 (bctl
->flags
& BTRFS_BALANCE_RESUME
) ?
4094 "resume" : "start", buf
);
4100 * Should be called with balance mutexe held
4102 int btrfs_balance(struct btrfs_fs_info
*fs_info
,
4103 struct btrfs_balance_control
*bctl
,
4104 struct btrfs_ioctl_balance_args
*bargs
)
4106 u64 meta_target
, data_target
;
4112 bool reducing_integrity
;
4115 if (btrfs_fs_closing(fs_info
) ||
4116 atomic_read(&fs_info
->balance_pause_req
) ||
4117 atomic_read(&fs_info
->balance_cancel_req
)) {
4122 allowed
= btrfs_super_incompat_flags(fs_info
->super_copy
);
4123 if (allowed
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
4127 * In case of mixed groups both data and meta should be picked,
4128 * and identical options should be given for both of them.
4130 allowed
= BTRFS_BALANCE_DATA
| BTRFS_BALANCE_METADATA
;
4131 if (mixed
&& (bctl
->flags
& allowed
)) {
4132 if (!(bctl
->flags
& BTRFS_BALANCE_DATA
) ||
4133 !(bctl
->flags
& BTRFS_BALANCE_METADATA
) ||
4134 memcmp(&bctl
->data
, &bctl
->meta
, sizeof(bctl
->data
))) {
4136 "balance: mixed groups data and metadata options must be the same");
4143 * rw_devices will not change at the moment, device add/delete/replace
4144 * are excluded by EXCL_OP
4146 num_devices
= fs_info
->fs_devices
->rw_devices
;
4149 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4150 * special bit for it, to make it easier to distinguish. Thus we need
4151 * to set it manually, or balance would refuse the profile.
4153 allowed
= BTRFS_AVAIL_ALLOC_BIT_SINGLE
;
4154 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++)
4155 if (num_devices
>= btrfs_raid_array
[i
].devs_min
)
4156 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4158 if (validate_convert_profile(&bctl
->data
, allowed
)) {
4160 "balance: invalid convert data profile %s",
4161 btrfs_bg_type_to_raid_name(bctl
->data
.target
));
4165 if (validate_convert_profile(&bctl
->meta
, allowed
)) {
4167 "balance: invalid convert metadata profile %s",
4168 btrfs_bg_type_to_raid_name(bctl
->meta
.target
));
4172 if (validate_convert_profile(&bctl
->sys
, allowed
)) {
4174 "balance: invalid convert system profile %s",
4175 btrfs_bg_type_to_raid_name(bctl
->sys
.target
));
4181 * Allow to reduce metadata or system integrity only if force set for
4182 * profiles with redundancy (copies, parity)
4185 for (i
= 0; i
< ARRAY_SIZE(btrfs_raid_array
); i
++) {
4186 if (btrfs_raid_array
[i
].ncopies
>= 2 ||
4187 btrfs_raid_array
[i
].tolerated_failures
>= 1)
4188 allowed
|= btrfs_raid_array
[i
].bg_flag
;
4191 seq
= read_seqbegin(&fs_info
->profiles_lock
);
4193 if (((bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4194 (fs_info
->avail_system_alloc_bits
& allowed
) &&
4195 !(bctl
->sys
.target
& allowed
)) ||
4196 ((bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) &&
4197 (fs_info
->avail_metadata_alloc_bits
& allowed
) &&
4198 !(bctl
->meta
.target
& allowed
)))
4199 reducing_integrity
= true;
4201 reducing_integrity
= false;
4203 /* if we're not converting, the target field is uninitialized */
4204 meta_target
= (bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4205 bctl
->meta
.target
: fs_info
->avail_metadata_alloc_bits
;
4206 data_target
= (bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) ?
4207 bctl
->data
.target
: fs_info
->avail_data_alloc_bits
;
4208 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
4210 if (reducing_integrity
) {
4211 if (bctl
->flags
& BTRFS_BALANCE_FORCE
) {
4213 "balance: force reducing metadata integrity");
4216 "balance: reduces metadata integrity, use --force if you want this");
4222 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target
) <
4223 btrfs_get_num_tolerated_disk_barrier_failures(data_target
)) {
4225 "balance: metadata profile %s has lower redundancy than data profile %s",
4226 btrfs_bg_type_to_raid_name(meta_target
),
4227 btrfs_bg_type_to_raid_name(data_target
));
4230 if (fs_info
->send_in_progress
) {
4231 btrfs_warn_rl(fs_info
,
4232 "cannot run balance while send operations are in progress (%d in progress)",
4233 fs_info
->send_in_progress
);
4238 ret
= insert_balance_item(fs_info
, bctl
);
4239 if (ret
&& ret
!= -EEXIST
)
4242 if (!(bctl
->flags
& BTRFS_BALANCE_RESUME
)) {
4243 BUG_ON(ret
== -EEXIST
);
4244 BUG_ON(fs_info
->balance_ctl
);
4245 spin_lock(&fs_info
->balance_lock
);
4246 fs_info
->balance_ctl
= bctl
;
4247 spin_unlock(&fs_info
->balance_lock
);
4249 BUG_ON(ret
!= -EEXIST
);
4250 spin_lock(&fs_info
->balance_lock
);
4251 update_balance_args(bctl
);
4252 spin_unlock(&fs_info
->balance_lock
);
4255 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4256 set_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4257 describe_balance_start_or_resume(fs_info
);
4258 mutex_unlock(&fs_info
->balance_mutex
);
4260 ret
= __btrfs_balance(fs_info
);
4262 mutex_lock(&fs_info
->balance_mutex
);
4263 if (ret
== -ECANCELED
&& atomic_read(&fs_info
->balance_pause_req
))
4264 btrfs_info(fs_info
, "balance: paused");
4266 * Balance can be canceled by:
4268 * - Regular cancel request
4269 * Then ret == -ECANCELED and balance_cancel_req > 0
4271 * - Fatal signal to "btrfs" process
4272 * Either the signal caught by wait_reserve_ticket() and callers
4273 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4275 * Either way, in this case balance_cancel_req = 0, and
4276 * ret == -EINTR or ret == -ECANCELED.
4278 * So here we only check the return value to catch canceled balance.
4280 else if (ret
== -ECANCELED
|| ret
== -EINTR
)
4281 btrfs_info(fs_info
, "balance: canceled");
4283 btrfs_info(fs_info
, "balance: ended with status: %d", ret
);
4285 clear_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
);
4288 memset(bargs
, 0, sizeof(*bargs
));
4289 btrfs_update_ioctl_balance_args(fs_info
, bargs
);
4292 if ((ret
&& ret
!= -ECANCELED
&& ret
!= -ENOSPC
) ||
4293 balance_need_close(fs_info
)) {
4294 reset_balance_state(fs_info
);
4295 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4298 wake_up(&fs_info
->balance_wait_q
);
4302 if (bctl
->flags
& BTRFS_BALANCE_RESUME
)
4303 reset_balance_state(fs_info
);
4306 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4311 static int balance_kthread(void *data
)
4313 struct btrfs_fs_info
*fs_info
= data
;
4316 mutex_lock(&fs_info
->balance_mutex
);
4317 if (fs_info
->balance_ctl
)
4318 ret
= btrfs_balance(fs_info
, fs_info
->balance_ctl
, NULL
);
4319 mutex_unlock(&fs_info
->balance_mutex
);
4324 int btrfs_resume_balance_async(struct btrfs_fs_info
*fs_info
)
4326 struct task_struct
*tsk
;
4328 mutex_lock(&fs_info
->balance_mutex
);
4329 if (!fs_info
->balance_ctl
) {
4330 mutex_unlock(&fs_info
->balance_mutex
);
4333 mutex_unlock(&fs_info
->balance_mutex
);
4335 if (btrfs_test_opt(fs_info
, SKIP_BALANCE
)) {
4336 btrfs_info(fs_info
, "balance: resume skipped");
4341 * A ro->rw remount sequence should continue with the paused balance
4342 * regardless of who pauses it, system or the user as of now, so set
4345 spin_lock(&fs_info
->balance_lock
);
4346 fs_info
->balance_ctl
->flags
|= BTRFS_BALANCE_RESUME
;
4347 spin_unlock(&fs_info
->balance_lock
);
4349 tsk
= kthread_run(balance_kthread
, fs_info
, "btrfs-balance");
4350 return PTR_ERR_OR_ZERO(tsk
);
4353 int btrfs_recover_balance(struct btrfs_fs_info
*fs_info
)
4355 struct btrfs_balance_control
*bctl
;
4356 struct btrfs_balance_item
*item
;
4357 struct btrfs_disk_balance_args disk_bargs
;
4358 struct btrfs_path
*path
;
4359 struct extent_buffer
*leaf
;
4360 struct btrfs_key key
;
4363 path
= btrfs_alloc_path();
4367 key
.objectid
= BTRFS_BALANCE_OBJECTID
;
4368 key
.type
= BTRFS_TEMPORARY_ITEM_KEY
;
4371 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4374 if (ret
> 0) { /* ret = -ENOENT; */
4379 bctl
= kzalloc(sizeof(*bctl
), GFP_NOFS
);
4385 leaf
= path
->nodes
[0];
4386 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_balance_item
);
4388 bctl
->flags
= btrfs_balance_flags(leaf
, item
);
4389 bctl
->flags
|= BTRFS_BALANCE_RESUME
;
4391 btrfs_balance_data(leaf
, item
, &disk_bargs
);
4392 btrfs_disk_balance_args_to_cpu(&bctl
->data
, &disk_bargs
);
4393 btrfs_balance_meta(leaf
, item
, &disk_bargs
);
4394 btrfs_disk_balance_args_to_cpu(&bctl
->meta
, &disk_bargs
);
4395 btrfs_balance_sys(leaf
, item
, &disk_bargs
);
4396 btrfs_disk_balance_args_to_cpu(&bctl
->sys
, &disk_bargs
);
4399 * This should never happen, as the paused balance state is recovered
4400 * during mount without any chance of other exclusive ops to collide.
4402 * This gives the exclusive op status to balance and keeps in paused
4403 * state until user intervention (cancel or umount). If the ownership
4404 * cannot be assigned, show a message but do not fail. The balance
4405 * is in a paused state and must have fs_info::balance_ctl properly
4408 if (test_and_set_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
))
4410 "balance: cannot set exclusive op status, resume manually");
4412 btrfs_release_path(path
);
4414 mutex_lock(&fs_info
->balance_mutex
);
4415 BUG_ON(fs_info
->balance_ctl
);
4416 spin_lock(&fs_info
->balance_lock
);
4417 fs_info
->balance_ctl
= bctl
;
4418 spin_unlock(&fs_info
->balance_lock
);
4419 mutex_unlock(&fs_info
->balance_mutex
);
4421 btrfs_free_path(path
);
4425 int btrfs_pause_balance(struct btrfs_fs_info
*fs_info
)
4429 mutex_lock(&fs_info
->balance_mutex
);
4430 if (!fs_info
->balance_ctl
) {
4431 mutex_unlock(&fs_info
->balance_mutex
);
4435 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4436 atomic_inc(&fs_info
->balance_pause_req
);
4437 mutex_unlock(&fs_info
->balance_mutex
);
4439 wait_event(fs_info
->balance_wait_q
,
4440 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4442 mutex_lock(&fs_info
->balance_mutex
);
4443 /* we are good with balance_ctl ripped off from under us */
4444 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4445 atomic_dec(&fs_info
->balance_pause_req
);
4450 mutex_unlock(&fs_info
->balance_mutex
);
4454 int btrfs_cancel_balance(struct btrfs_fs_info
*fs_info
)
4456 mutex_lock(&fs_info
->balance_mutex
);
4457 if (!fs_info
->balance_ctl
) {
4458 mutex_unlock(&fs_info
->balance_mutex
);
4463 * A paused balance with the item stored on disk can be resumed at
4464 * mount time if the mount is read-write. Otherwise it's still paused
4465 * and we must not allow cancelling as it deletes the item.
4467 if (sb_rdonly(fs_info
->sb
)) {
4468 mutex_unlock(&fs_info
->balance_mutex
);
4472 atomic_inc(&fs_info
->balance_cancel_req
);
4474 * if we are running just wait and return, balance item is
4475 * deleted in btrfs_balance in this case
4477 if (test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
)) {
4478 mutex_unlock(&fs_info
->balance_mutex
);
4479 wait_event(fs_info
->balance_wait_q
,
4480 !test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4481 mutex_lock(&fs_info
->balance_mutex
);
4483 mutex_unlock(&fs_info
->balance_mutex
);
4485 * Lock released to allow other waiters to continue, we'll
4486 * reexamine the status again.
4488 mutex_lock(&fs_info
->balance_mutex
);
4490 if (fs_info
->balance_ctl
) {
4491 reset_balance_state(fs_info
);
4492 clear_bit(BTRFS_FS_EXCL_OP
, &fs_info
->flags
);
4493 btrfs_info(fs_info
, "balance: canceled");
4497 BUG_ON(fs_info
->balance_ctl
||
4498 test_bit(BTRFS_FS_BALANCE_RUNNING
, &fs_info
->flags
));
4499 atomic_dec(&fs_info
->balance_cancel_req
);
4500 mutex_unlock(&fs_info
->balance_mutex
);
4504 static int btrfs_uuid_scan_kthread(void *data
)
4506 struct btrfs_fs_info
*fs_info
= data
;
4507 struct btrfs_root
*root
= fs_info
->tree_root
;
4508 struct btrfs_key key
;
4509 struct btrfs_path
*path
= NULL
;
4511 struct extent_buffer
*eb
;
4513 struct btrfs_root_item root_item
;
4515 struct btrfs_trans_handle
*trans
= NULL
;
4517 path
= btrfs_alloc_path();
4524 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4528 ret
= btrfs_search_forward(root
, &key
, path
,
4529 BTRFS_OLDEST_GENERATION
);
4536 if (key
.type
!= BTRFS_ROOT_ITEM_KEY
||
4537 (key
.objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
4538 key
.objectid
!= BTRFS_FS_TREE_OBJECTID
) ||
4539 key
.objectid
> BTRFS_LAST_FREE_OBJECTID
)
4542 eb
= path
->nodes
[0];
4543 slot
= path
->slots
[0];
4544 item_size
= btrfs_item_size_nr(eb
, slot
);
4545 if (item_size
< sizeof(root_item
))
4548 read_extent_buffer(eb
, &root_item
,
4549 btrfs_item_ptr_offset(eb
, slot
),
4550 (int)sizeof(root_item
));
4551 if (btrfs_root_refs(&root_item
) == 0)
4554 if (!btrfs_is_empty_uuid(root_item
.uuid
) ||
4555 !btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4559 btrfs_release_path(path
);
4561 * 1 - subvol uuid item
4562 * 1 - received_subvol uuid item
4564 trans
= btrfs_start_transaction(fs_info
->uuid_root
, 2);
4565 if (IS_ERR(trans
)) {
4566 ret
= PTR_ERR(trans
);
4574 btrfs_release_path(path
);
4575 if (!btrfs_is_empty_uuid(root_item
.uuid
)) {
4576 ret
= btrfs_uuid_tree_add(trans
, root_item
.uuid
,
4577 BTRFS_UUID_KEY_SUBVOL
,
4580 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4586 if (!btrfs_is_empty_uuid(root_item
.received_uuid
)) {
4587 ret
= btrfs_uuid_tree_add(trans
,
4588 root_item
.received_uuid
,
4589 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4592 btrfs_warn(fs_info
, "uuid_tree_add failed %d",
4599 btrfs_release_path(path
);
4601 ret
= btrfs_end_transaction(trans
);
4607 if (key
.offset
< (u64
)-1) {
4609 } else if (key
.type
< BTRFS_ROOT_ITEM_KEY
) {
4611 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4612 } else if (key
.objectid
< (u64
)-1) {
4614 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4623 btrfs_free_path(path
);
4624 if (trans
&& !IS_ERR(trans
))
4625 btrfs_end_transaction(trans
);
4627 btrfs_warn(fs_info
, "btrfs_uuid_scan_kthread failed %d", ret
);
4629 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN
, &fs_info
->flags
);
4630 up(&fs_info
->uuid_tree_rescan_sem
);
4635 * Callback for btrfs_uuid_tree_iterate().
4637 * 0 check succeeded, the entry is not outdated.
4638 * < 0 if an error occurred.
4639 * > 0 if the check failed, which means the caller shall remove the entry.
4641 static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info
*fs_info
,
4642 u8
*uuid
, u8 type
, u64 subid
)
4644 struct btrfs_key key
;
4646 struct btrfs_root
*subvol_root
;
4648 if (type
!= BTRFS_UUID_KEY_SUBVOL
&&
4649 type
!= BTRFS_UUID_KEY_RECEIVED_SUBVOL
)
4652 key
.objectid
= subid
;
4653 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4654 key
.offset
= (u64
)-1;
4655 subvol_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
4656 if (IS_ERR(subvol_root
)) {
4657 ret
= PTR_ERR(subvol_root
);
4664 case BTRFS_UUID_KEY_SUBVOL
:
4665 if (memcmp(uuid
, subvol_root
->root_item
.uuid
, BTRFS_UUID_SIZE
))
4668 case BTRFS_UUID_KEY_RECEIVED_SUBVOL
:
4669 if (memcmp(uuid
, subvol_root
->root_item
.received_uuid
,
4679 static int btrfs_uuid_rescan_kthread(void *data
)
4681 struct btrfs_fs_info
*fs_info
= (struct btrfs_fs_info
*)data
;
4685 * 1st step is to iterate through the existing UUID tree and
4686 * to delete all entries that contain outdated data.
4687 * 2nd step is to add all missing entries to the UUID tree.
4689 ret
= btrfs_uuid_tree_iterate(fs_info
, btrfs_check_uuid_tree_entry
);
4691 btrfs_warn(fs_info
, "iterating uuid_tree failed %d", ret
);
4692 up(&fs_info
->uuid_tree_rescan_sem
);
4695 return btrfs_uuid_scan_kthread(data
);
4698 int btrfs_create_uuid_tree(struct btrfs_fs_info
*fs_info
)
4700 struct btrfs_trans_handle
*trans
;
4701 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
4702 struct btrfs_root
*uuid_root
;
4703 struct task_struct
*task
;
4710 trans
= btrfs_start_transaction(tree_root
, 2);
4712 return PTR_ERR(trans
);
4714 uuid_root
= btrfs_create_tree(trans
, BTRFS_UUID_TREE_OBJECTID
);
4715 if (IS_ERR(uuid_root
)) {
4716 ret
= PTR_ERR(uuid_root
);
4717 btrfs_abort_transaction(trans
, ret
);
4718 btrfs_end_transaction(trans
);
4722 fs_info
->uuid_root
= uuid_root
;
4724 ret
= btrfs_commit_transaction(trans
);
4728 down(&fs_info
->uuid_tree_rescan_sem
);
4729 task
= kthread_run(btrfs_uuid_scan_kthread
, fs_info
, "btrfs-uuid");
4731 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4732 btrfs_warn(fs_info
, "failed to start uuid_scan task");
4733 up(&fs_info
->uuid_tree_rescan_sem
);
4734 return PTR_ERR(task
);
4740 int btrfs_check_uuid_tree(struct btrfs_fs_info
*fs_info
)
4742 struct task_struct
*task
;
4744 down(&fs_info
->uuid_tree_rescan_sem
);
4745 task
= kthread_run(btrfs_uuid_rescan_kthread
, fs_info
, "btrfs-uuid");
4747 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4748 btrfs_warn(fs_info
, "failed to start uuid_rescan task");
4749 up(&fs_info
->uuid_tree_rescan_sem
);
4750 return PTR_ERR(task
);
4757 * shrinking a device means finding all of the device extents past
4758 * the new size, and then following the back refs to the chunks.
4759 * The chunk relocation code actually frees the device extent
4761 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
4763 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
4764 struct btrfs_root
*root
= fs_info
->dev_root
;
4765 struct btrfs_trans_handle
*trans
;
4766 struct btrfs_dev_extent
*dev_extent
= NULL
;
4767 struct btrfs_path
*path
;
4773 bool retried
= false;
4774 struct extent_buffer
*l
;
4775 struct btrfs_key key
;
4776 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4777 u64 old_total
= btrfs_super_total_bytes(super_copy
);
4778 u64 old_size
= btrfs_device_get_total_bytes(device
);
4782 new_size
= round_down(new_size
, fs_info
->sectorsize
);
4784 diff
= round_down(old_size
- new_size
, fs_info
->sectorsize
);
4786 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
4789 path
= btrfs_alloc_path();
4793 path
->reada
= READA_BACK
;
4795 trans
= btrfs_start_transaction(root
, 0);
4796 if (IS_ERR(trans
)) {
4797 btrfs_free_path(path
);
4798 return PTR_ERR(trans
);
4801 mutex_lock(&fs_info
->chunk_mutex
);
4803 btrfs_device_set_total_bytes(device
, new_size
);
4804 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
4805 device
->fs_devices
->total_rw_bytes
-= diff
;
4806 atomic64_sub(diff
, &fs_info
->free_chunk_space
);
4810 * Once the device's size has been set to the new size, ensure all
4811 * in-memory chunks are synced to disk so that the loop below sees them
4812 * and relocates them accordingly.
4814 if (contains_pending_extent(device
, &start
, diff
)) {
4815 mutex_unlock(&fs_info
->chunk_mutex
);
4816 ret
= btrfs_commit_transaction(trans
);
4820 mutex_unlock(&fs_info
->chunk_mutex
);
4821 btrfs_end_transaction(trans
);
4825 key
.objectid
= device
->devid
;
4826 key
.offset
= (u64
)-1;
4827 key
.type
= BTRFS_DEV_EXTENT_KEY
;
4830 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
4831 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4833 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4837 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
4839 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4844 btrfs_release_path(path
);
4849 slot
= path
->slots
[0];
4850 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
4852 if (key
.objectid
!= device
->devid
) {
4853 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4854 btrfs_release_path(path
);
4858 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
4859 length
= btrfs_dev_extent_length(l
, dev_extent
);
4861 if (key
.offset
+ length
<= new_size
) {
4862 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4863 btrfs_release_path(path
);
4867 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
4868 btrfs_release_path(path
);
4871 * We may be relocating the only data chunk we have,
4872 * which could potentially end up with losing data's
4873 * raid profile, so lets allocate an empty one in
4876 ret
= btrfs_may_alloc_data_chunk(fs_info
, chunk_offset
);
4878 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4882 ret
= btrfs_relocate_chunk(fs_info
, chunk_offset
);
4883 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
4884 if (ret
== -ENOSPC
) {
4887 if (ret
== -ETXTBSY
) {
4889 "could not shrink block group %llu due to active swapfile",
4894 } while (key
.offset
-- > 0);
4896 if (failed
&& !retried
) {
4900 } else if (failed
&& retried
) {
4905 /* Shrinking succeeded, else we would be at "done". */
4906 trans
= btrfs_start_transaction(root
, 0);
4907 if (IS_ERR(trans
)) {
4908 ret
= PTR_ERR(trans
);
4912 mutex_lock(&fs_info
->chunk_mutex
);
4913 /* Clear all state bits beyond the shrunk device size */
4914 clear_extent_bits(&device
->alloc_state
, new_size
, (u64
)-1,
4917 btrfs_device_set_disk_total_bytes(device
, new_size
);
4918 if (list_empty(&device
->post_commit_list
))
4919 list_add_tail(&device
->post_commit_list
,
4920 &trans
->transaction
->dev_update_list
);
4922 WARN_ON(diff
> old_total
);
4923 btrfs_set_super_total_bytes(super_copy
,
4924 round_down(old_total
- diff
, fs_info
->sectorsize
));
4925 mutex_unlock(&fs_info
->chunk_mutex
);
4927 /* Now btrfs_update_device() will change the on-disk size. */
4928 ret
= btrfs_update_device(trans
, device
);
4930 btrfs_abort_transaction(trans
, ret
);
4931 btrfs_end_transaction(trans
);
4933 ret
= btrfs_commit_transaction(trans
);
4936 btrfs_free_path(path
);
4938 mutex_lock(&fs_info
->chunk_mutex
);
4939 btrfs_device_set_total_bytes(device
, old_size
);
4940 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
4941 device
->fs_devices
->total_rw_bytes
+= diff
;
4942 atomic64_add(diff
, &fs_info
->free_chunk_space
);
4943 mutex_unlock(&fs_info
->chunk_mutex
);
4948 static int btrfs_add_system_chunk(struct btrfs_fs_info
*fs_info
,
4949 struct btrfs_key
*key
,
4950 struct btrfs_chunk
*chunk
, int item_size
)
4952 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
4953 struct btrfs_disk_key disk_key
;
4957 mutex_lock(&fs_info
->chunk_mutex
);
4958 array_size
= btrfs_super_sys_array_size(super_copy
);
4959 if (array_size
+ item_size
+ sizeof(disk_key
)
4960 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
) {
4961 mutex_unlock(&fs_info
->chunk_mutex
);
4965 ptr
= super_copy
->sys_chunk_array
+ array_size
;
4966 btrfs_cpu_key_to_disk(&disk_key
, key
);
4967 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
4968 ptr
+= sizeof(disk_key
);
4969 memcpy(ptr
, chunk
, item_size
);
4970 item_size
+= sizeof(disk_key
);
4971 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
4972 mutex_unlock(&fs_info
->chunk_mutex
);
4978 * sort the devices in descending order by max_avail, total_avail
4980 static int btrfs_cmp_device_info(const void *a
, const void *b
)
4982 const struct btrfs_device_info
*di_a
= a
;
4983 const struct btrfs_device_info
*di_b
= b
;
4985 if (di_a
->max_avail
> di_b
->max_avail
)
4987 if (di_a
->max_avail
< di_b
->max_avail
)
4989 if (di_a
->total_avail
> di_b
->total_avail
)
4991 if (di_a
->total_avail
< di_b
->total_avail
)
4996 static void check_raid56_incompat_flag(struct btrfs_fs_info
*info
, u64 type
)
4998 if (!(type
& BTRFS_BLOCK_GROUP_RAID56_MASK
))
5001 btrfs_set_fs_incompat(info
, RAID56
);
5004 static int __btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
5005 u64 start
, u64 type
)
5007 struct btrfs_fs_info
*info
= trans
->fs_info
;
5008 struct btrfs_fs_devices
*fs_devices
= info
->fs_devices
;
5009 struct btrfs_device
*device
;
5010 struct map_lookup
*map
= NULL
;
5011 struct extent_map_tree
*em_tree
;
5012 struct extent_map
*em
;
5013 struct btrfs_device_info
*devices_info
= NULL
;
5015 int num_stripes
; /* total number of stripes to allocate */
5016 int data_stripes
; /* number of stripes that count for
5018 int sub_stripes
; /* sub_stripes info for map */
5019 int dev_stripes
; /* stripes per dev */
5020 int devs_max
; /* max devs to use */
5021 int devs_min
; /* min devs needed */
5022 int devs_increment
; /* ndevs has to be a multiple of this */
5023 int ncopies
; /* how many copies to data has */
5024 int nparity
; /* number of stripes worth of bytes to
5025 store parity information */
5027 u64 max_stripe_size
;
5036 BUG_ON(!alloc_profile_is_valid(type
, 0));
5038 if (list_empty(&fs_devices
->alloc_list
)) {
5039 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5040 btrfs_debug(info
, "%s: no writable device", __func__
);
5044 index
= btrfs_bg_flags_to_raid_index(type
);
5046 sub_stripes
= btrfs_raid_array
[index
].sub_stripes
;
5047 dev_stripes
= btrfs_raid_array
[index
].dev_stripes
;
5048 devs_max
= btrfs_raid_array
[index
].devs_max
;
5050 devs_max
= BTRFS_MAX_DEVS(info
);
5051 devs_min
= btrfs_raid_array
[index
].devs_min
;
5052 devs_increment
= btrfs_raid_array
[index
].devs_increment
;
5053 ncopies
= btrfs_raid_array
[index
].ncopies
;
5054 nparity
= btrfs_raid_array
[index
].nparity
;
5056 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
5057 max_stripe_size
= SZ_1G
;
5058 max_chunk_size
= BTRFS_MAX_DATA_CHUNK_SIZE
;
5059 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
5060 /* for larger filesystems, use larger metadata chunks */
5061 if (fs_devices
->total_rw_bytes
> 50ULL * SZ_1G
)
5062 max_stripe_size
= SZ_1G
;
5064 max_stripe_size
= SZ_256M
;
5065 max_chunk_size
= max_stripe_size
;
5066 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5067 max_stripe_size
= SZ_32M
;
5068 max_chunk_size
= 2 * max_stripe_size
;
5069 devs_max
= min_t(int, devs_max
, BTRFS_MAX_DEVS_SYS_CHUNK
);
5071 btrfs_err(info
, "invalid chunk type 0x%llx requested",
5076 /* We don't want a chunk larger than 10% of writable space */
5077 max_chunk_size
= min(div_factor(fs_devices
->total_rw_bytes
, 1),
5080 devices_info
= kcalloc(fs_devices
->rw_devices
, sizeof(*devices_info
),
5086 * in the first pass through the devices list, we gather information
5087 * about the available holes on each device.
5090 list_for_each_entry(device
, &fs_devices
->alloc_list
, dev_alloc_list
) {
5094 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
)) {
5096 "BTRFS: read-only device in alloc_list\n");
5100 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
5101 &device
->dev_state
) ||
5102 test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
))
5105 if (device
->total_bytes
> device
->bytes_used
)
5106 total_avail
= device
->total_bytes
- device
->bytes_used
;
5110 /* If there is no space on this device, skip it. */
5111 if (total_avail
== 0)
5114 ret
= find_free_dev_extent(device
,
5115 max_stripe_size
* dev_stripes
,
5116 &dev_offset
, &max_avail
);
5117 if (ret
&& ret
!= -ENOSPC
)
5121 max_avail
= max_stripe_size
* dev_stripes
;
5123 if (max_avail
< BTRFS_STRIPE_LEN
* dev_stripes
) {
5124 if (btrfs_test_opt(info
, ENOSPC_DEBUG
))
5126 "%s: devid %llu has no free space, have=%llu want=%u",
5127 __func__
, device
->devid
, max_avail
,
5128 BTRFS_STRIPE_LEN
* dev_stripes
);
5132 if (ndevs
== fs_devices
->rw_devices
) {
5133 WARN(1, "%s: found more than %llu devices\n",
5134 __func__
, fs_devices
->rw_devices
);
5137 devices_info
[ndevs
].dev_offset
= dev_offset
;
5138 devices_info
[ndevs
].max_avail
= max_avail
;
5139 devices_info
[ndevs
].total_avail
= total_avail
;
5140 devices_info
[ndevs
].dev
= device
;
5145 * now sort the devices by hole size / available space
5147 sort(devices_info
, ndevs
, sizeof(struct btrfs_device_info
),
5148 btrfs_cmp_device_info
, NULL
);
5150 /* round down to number of usable stripes */
5151 ndevs
= round_down(ndevs
, devs_increment
);
5153 if (ndevs
< devs_min
) {
5155 if (btrfs_test_opt(info
, ENOSPC_DEBUG
)) {
5157 "%s: not enough devices with free space: have=%d minimum required=%d",
5158 __func__
, ndevs
, devs_min
);
5163 ndevs
= min(ndevs
, devs_max
);
5166 * The primary goal is to maximize the number of stripes, so use as
5167 * many devices as possible, even if the stripes are not maximum sized.
5169 * The DUP profile stores more than one stripe per device, the
5170 * max_avail is the total size so we have to adjust.
5172 stripe_size
= div_u64(devices_info
[ndevs
- 1].max_avail
, dev_stripes
);
5173 num_stripes
= ndevs
* dev_stripes
;
5176 * this will have to be fixed for RAID1 and RAID10 over
5179 data_stripes
= (num_stripes
- nparity
) / ncopies
;
5182 * Use the number of data stripes to figure out how big this chunk
5183 * is really going to be in terms of logical address space,
5184 * and compare that answer with the max chunk size. If it's higher,
5185 * we try to reduce stripe_size.
5187 if (stripe_size
* data_stripes
> max_chunk_size
) {
5189 * Reduce stripe_size, round it up to a 16MB boundary again and
5190 * then use it, unless it ends up being even bigger than the
5191 * previous value we had already.
5193 stripe_size
= min(round_up(div_u64(max_chunk_size
,
5194 data_stripes
), SZ_16M
),
5198 /* align to BTRFS_STRIPE_LEN */
5199 stripe_size
= round_down(stripe_size
, BTRFS_STRIPE_LEN
);
5201 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
5206 map
->num_stripes
= num_stripes
;
5208 for (i
= 0; i
< ndevs
; ++i
) {
5209 for (j
= 0; j
< dev_stripes
; ++j
) {
5210 int s
= i
* dev_stripes
+ j
;
5211 map
->stripes
[s
].dev
= devices_info
[i
].dev
;
5212 map
->stripes
[s
].physical
= devices_info
[i
].dev_offset
+
5216 map
->stripe_len
= BTRFS_STRIPE_LEN
;
5217 map
->io_align
= BTRFS_STRIPE_LEN
;
5218 map
->io_width
= BTRFS_STRIPE_LEN
;
5220 map
->sub_stripes
= sub_stripes
;
5222 chunk_size
= stripe_size
* data_stripes
;
5224 trace_btrfs_chunk_alloc(info
, map
, start
, chunk_size
);
5226 em
= alloc_extent_map();
5232 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
5233 em
->map_lookup
= map
;
5235 em
->len
= chunk_size
;
5236 em
->block_start
= 0;
5237 em
->block_len
= em
->len
;
5238 em
->orig_block_len
= stripe_size
;
5240 em_tree
= &info
->mapping_tree
;
5241 write_lock(&em_tree
->lock
);
5242 ret
= add_extent_mapping(em_tree
, em
, 0);
5244 write_unlock(&em_tree
->lock
);
5245 free_extent_map(em
);
5248 write_unlock(&em_tree
->lock
);
5250 ret
= btrfs_make_block_group(trans
, 0, type
, start
, chunk_size
);
5252 goto error_del_extent
;
5254 for (i
= 0; i
< map
->num_stripes
; i
++) {
5255 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
5257 btrfs_device_set_bytes_used(dev
, dev
->bytes_used
+ stripe_size
);
5258 if (list_empty(&dev
->post_commit_list
))
5259 list_add_tail(&dev
->post_commit_list
,
5260 &trans
->transaction
->dev_update_list
);
5263 atomic64_sub(stripe_size
* map
->num_stripes
, &info
->free_chunk_space
);
5265 free_extent_map(em
);
5266 check_raid56_incompat_flag(info
, type
);
5268 kfree(devices_info
);
5272 write_lock(&em_tree
->lock
);
5273 remove_extent_mapping(em_tree
, em
);
5274 write_unlock(&em_tree
->lock
);
5276 /* One for our allocation */
5277 free_extent_map(em
);
5278 /* One for the tree reference */
5279 free_extent_map(em
);
5281 kfree(devices_info
);
5285 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle
*trans
,
5286 u64 chunk_offset
, u64 chunk_size
)
5288 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5289 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
5290 struct btrfs_root
*chunk_root
= fs_info
->chunk_root
;
5291 struct btrfs_key key
;
5292 struct btrfs_device
*device
;
5293 struct btrfs_chunk
*chunk
;
5294 struct btrfs_stripe
*stripe
;
5295 struct extent_map
*em
;
5296 struct map_lookup
*map
;
5303 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, chunk_size
);
5307 map
= em
->map_lookup
;
5308 item_size
= btrfs_chunk_item_size(map
->num_stripes
);
5309 stripe_size
= em
->orig_block_len
;
5311 chunk
= kzalloc(item_size
, GFP_NOFS
);
5318 * Take the device list mutex to prevent races with the final phase of
5319 * a device replace operation that replaces the device object associated
5320 * with the map's stripes, because the device object's id can change
5321 * at any time during that final phase of the device replace operation
5322 * (dev-replace.c:btrfs_dev_replace_finishing()).
5324 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
5325 for (i
= 0; i
< map
->num_stripes
; i
++) {
5326 device
= map
->stripes
[i
].dev
;
5327 dev_offset
= map
->stripes
[i
].physical
;
5329 ret
= btrfs_update_device(trans
, device
);
5332 ret
= btrfs_alloc_dev_extent(trans
, device
, chunk_offset
,
5333 dev_offset
, stripe_size
);
5338 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5342 stripe
= &chunk
->stripe
;
5343 for (i
= 0; i
< map
->num_stripes
; i
++) {
5344 device
= map
->stripes
[i
].dev
;
5345 dev_offset
= map
->stripes
[i
].physical
;
5347 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
5348 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
5349 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
5352 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5354 btrfs_set_stack_chunk_length(chunk
, chunk_size
);
5355 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
5356 btrfs_set_stack_chunk_stripe_len(chunk
, map
->stripe_len
);
5357 btrfs_set_stack_chunk_type(chunk
, map
->type
);
5358 btrfs_set_stack_chunk_num_stripes(chunk
, map
->num_stripes
);
5359 btrfs_set_stack_chunk_io_align(chunk
, map
->stripe_len
);
5360 btrfs_set_stack_chunk_io_width(chunk
, map
->stripe_len
);
5361 btrfs_set_stack_chunk_sector_size(chunk
, fs_info
->sectorsize
);
5362 btrfs_set_stack_chunk_sub_stripes(chunk
, map
->sub_stripes
);
5364 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
5365 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
5366 key
.offset
= chunk_offset
;
5368 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
, item_size
);
5369 if (ret
== 0 && map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
5371 * TODO: Cleanup of inserted chunk root in case of
5374 ret
= btrfs_add_system_chunk(fs_info
, &key
, chunk
, item_size
);
5379 free_extent_map(em
);
5384 * Chunk allocation falls into two parts. The first part does work
5385 * that makes the new allocated chunk usable, but does not do any operation
5386 * that modifies the chunk tree. The second part does the work that
5387 * requires modifying the chunk tree. This division is important for the
5388 * bootstrap process of adding storage to a seed btrfs.
5390 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
, u64 type
)
5394 lockdep_assert_held(&trans
->fs_info
->chunk_mutex
);
5395 chunk_offset
= find_next_chunk(trans
->fs_info
);
5396 return __btrfs_alloc_chunk(trans
, chunk_offset
, type
);
5399 static noinline
int init_first_rw_device(struct btrfs_trans_handle
*trans
)
5401 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
5403 u64 sys_chunk_offset
;
5407 chunk_offset
= find_next_chunk(fs_info
);
5408 alloc_profile
= btrfs_metadata_alloc_profile(fs_info
);
5409 ret
= __btrfs_alloc_chunk(trans
, chunk_offset
, alloc_profile
);
5413 sys_chunk_offset
= find_next_chunk(fs_info
);
5414 alloc_profile
= btrfs_system_alloc_profile(fs_info
);
5415 ret
= __btrfs_alloc_chunk(trans
, sys_chunk_offset
, alloc_profile
);
5419 static inline int btrfs_chunk_max_errors(struct map_lookup
*map
)
5421 const int index
= btrfs_bg_flags_to_raid_index(map
->type
);
5423 return btrfs_raid_array
[index
].tolerated_failures
;
5426 int btrfs_chunk_readonly(struct btrfs_fs_info
*fs_info
, u64 chunk_offset
)
5428 struct extent_map
*em
;
5429 struct map_lookup
*map
;
5434 em
= btrfs_get_chunk_map(fs_info
, chunk_offset
, 1);
5438 map
= em
->map_lookup
;
5439 for (i
= 0; i
< map
->num_stripes
; i
++) {
5440 if (test_bit(BTRFS_DEV_STATE_MISSING
,
5441 &map
->stripes
[i
].dev
->dev_state
)) {
5445 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
,
5446 &map
->stripes
[i
].dev
->dev_state
)) {
5453 * If the number of missing devices is larger than max errors,
5454 * we can not write the data into that chunk successfully, so
5457 if (miss_ndevs
> btrfs_chunk_max_errors(map
))
5460 free_extent_map(em
);
5464 void btrfs_mapping_tree_free(struct extent_map_tree
*tree
)
5466 struct extent_map
*em
;
5469 write_lock(&tree
->lock
);
5470 em
= lookup_extent_mapping(tree
, 0, (u64
)-1);
5472 remove_extent_mapping(tree
, em
);
5473 write_unlock(&tree
->lock
);
5477 free_extent_map(em
);
5478 /* once for the tree */
5479 free_extent_map(em
);
5483 int btrfs_num_copies(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5485 struct extent_map
*em
;
5486 struct map_lookup
*map
;
5489 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5492 * We could return errors for these cases, but that could get
5493 * ugly and we'd probably do the same thing which is just not do
5494 * anything else and exit, so return 1 so the callers don't try
5495 * to use other copies.
5499 map
= em
->map_lookup
;
5500 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1_MASK
))
5501 ret
= map
->num_stripes
;
5502 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5503 ret
= map
->sub_stripes
;
5504 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID5
)
5506 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
5508 * There could be two corrupted data stripes, we need
5509 * to loop retry in order to rebuild the correct data.
5511 * Fail a stripe at a time on every retry except the
5512 * stripe under reconstruction.
5514 ret
= map
->num_stripes
;
5517 free_extent_map(em
);
5519 down_read(&fs_info
->dev_replace
.rwsem
);
5520 if (btrfs_dev_replace_is_ongoing(&fs_info
->dev_replace
) &&
5521 fs_info
->dev_replace
.tgtdev
)
5523 up_read(&fs_info
->dev_replace
.rwsem
);
5528 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info
*fs_info
,
5531 struct extent_map
*em
;
5532 struct map_lookup
*map
;
5533 unsigned long len
= fs_info
->sectorsize
;
5535 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5537 if (!WARN_ON(IS_ERR(em
))) {
5538 map
= em
->map_lookup
;
5539 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5540 len
= map
->stripe_len
* nr_data_stripes(map
);
5541 free_extent_map(em
);
5546 int btrfs_is_parity_mirror(struct btrfs_fs_info
*fs_info
, u64 logical
, u64 len
)
5548 struct extent_map
*em
;
5549 struct map_lookup
*map
;
5552 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
5554 if(!WARN_ON(IS_ERR(em
))) {
5555 map
= em
->map_lookup
;
5556 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
5558 free_extent_map(em
);
5563 static int find_live_mirror(struct btrfs_fs_info
*fs_info
,
5564 struct map_lookup
*map
, int first
,
5565 int dev_replace_is_ongoing
)
5569 int preferred_mirror
;
5571 struct btrfs_device
*srcdev
;
5574 (BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
)));
5576 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
5577 num_stripes
= map
->sub_stripes
;
5579 num_stripes
= map
->num_stripes
;
5581 preferred_mirror
= first
+ current
->pid
% num_stripes
;
5583 if (dev_replace_is_ongoing
&&
5584 fs_info
->dev_replace
.cont_reading_from_srcdev_mode
==
5585 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID
)
5586 srcdev
= fs_info
->dev_replace
.srcdev
;
5591 * try to avoid the drive that is the source drive for a
5592 * dev-replace procedure, only choose it if no other non-missing
5593 * mirror is available
5595 for (tolerance
= 0; tolerance
< 2; tolerance
++) {
5596 if (map
->stripes
[preferred_mirror
].dev
->bdev
&&
5597 (tolerance
|| map
->stripes
[preferred_mirror
].dev
!= srcdev
))
5598 return preferred_mirror
;
5599 for (i
= first
; i
< first
+ num_stripes
; i
++) {
5600 if (map
->stripes
[i
].dev
->bdev
&&
5601 (tolerance
|| map
->stripes
[i
].dev
!= srcdev
))
5606 /* we couldn't find one that doesn't fail. Just return something
5607 * and the io error handling code will clean up eventually
5609 return preferred_mirror
;
5612 static inline int parity_smaller(u64 a
, u64 b
)
5617 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5618 static void sort_parity_stripes(struct btrfs_bio
*bbio
, int num_stripes
)
5620 struct btrfs_bio_stripe s
;
5627 for (i
= 0; i
< num_stripes
- 1; i
++) {
5628 if (parity_smaller(bbio
->raid_map
[i
],
5629 bbio
->raid_map
[i
+1])) {
5630 s
= bbio
->stripes
[i
];
5631 l
= bbio
->raid_map
[i
];
5632 bbio
->stripes
[i
] = bbio
->stripes
[i
+1];
5633 bbio
->raid_map
[i
] = bbio
->raid_map
[i
+1];
5634 bbio
->stripes
[i
+1] = s
;
5635 bbio
->raid_map
[i
+1] = l
;
5643 static struct btrfs_bio
*alloc_btrfs_bio(int total_stripes
, int real_stripes
)
5645 struct btrfs_bio
*bbio
= kzalloc(
5646 /* the size of the btrfs_bio */
5647 sizeof(struct btrfs_bio
) +
5648 /* plus the variable array for the stripes */
5649 sizeof(struct btrfs_bio_stripe
) * (total_stripes
) +
5650 /* plus the variable array for the tgt dev */
5651 sizeof(int) * (real_stripes
) +
5653 * plus the raid_map, which includes both the tgt dev
5656 sizeof(u64
) * (total_stripes
),
5657 GFP_NOFS
|__GFP_NOFAIL
);
5659 atomic_set(&bbio
->error
, 0);
5660 refcount_set(&bbio
->refs
, 1);
5665 void btrfs_get_bbio(struct btrfs_bio
*bbio
)
5667 WARN_ON(!refcount_read(&bbio
->refs
));
5668 refcount_inc(&bbio
->refs
);
5671 void btrfs_put_bbio(struct btrfs_bio
*bbio
)
5675 if (refcount_dec_and_test(&bbio
->refs
))
5679 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5681 * Please note that, discard won't be sent to target device of device
5684 static int __btrfs_map_block_for_discard(struct btrfs_fs_info
*fs_info
,
5685 u64 logical
, u64
*length_ret
,
5686 struct btrfs_bio
**bbio_ret
)
5688 struct extent_map
*em
;
5689 struct map_lookup
*map
;
5690 struct btrfs_bio
*bbio
;
5691 u64 length
= *length_ret
;
5695 u64 stripe_end_offset
;
5702 u32 sub_stripes
= 0;
5703 u64 stripes_per_dev
= 0;
5704 u32 remaining_stripes
= 0;
5705 u32 last_stripe
= 0;
5709 /* discard always return a bbio */
5712 em
= btrfs_get_chunk_map(fs_info
, logical
, length
);
5716 map
= em
->map_lookup
;
5717 /* we don't discard raid56 yet */
5718 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
5723 offset
= logical
- em
->start
;
5724 length
= min_t(u64
, em
->start
+ em
->len
- logical
, length
);
5725 *length_ret
= length
;
5727 stripe_len
= map
->stripe_len
;
5729 * stripe_nr counts the total number of stripes we have to stride
5730 * to get to this block
5732 stripe_nr
= div64_u64(offset
, stripe_len
);
5734 /* stripe_offset is the offset of this block in its stripe */
5735 stripe_offset
= offset
- stripe_nr
* stripe_len
;
5737 stripe_nr_end
= round_up(offset
+ length
, map
->stripe_len
);
5738 stripe_nr_end
= div64_u64(stripe_nr_end
, map
->stripe_len
);
5739 stripe_cnt
= stripe_nr_end
- stripe_nr
;
5740 stripe_end_offset
= stripe_nr_end
* map
->stripe_len
-
5743 * after this, stripe_nr is the number of stripes on this
5744 * device we have to walk to find the data, and stripe_index is
5745 * the number of our device in the stripe array
5749 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5750 BTRFS_BLOCK_GROUP_RAID10
)) {
5751 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
5754 sub_stripes
= map
->sub_stripes
;
5756 factor
= map
->num_stripes
/ sub_stripes
;
5757 num_stripes
= min_t(u64
, map
->num_stripes
,
5758 sub_stripes
* stripe_cnt
);
5759 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
5760 stripe_index
*= sub_stripes
;
5761 stripes_per_dev
= div_u64_rem(stripe_cnt
, factor
,
5762 &remaining_stripes
);
5763 div_u64_rem(stripe_nr_end
- 1, factor
, &last_stripe
);
5764 last_stripe
*= sub_stripes
;
5765 } else if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
5766 BTRFS_BLOCK_GROUP_DUP
)) {
5767 num_stripes
= map
->num_stripes
;
5769 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
5773 bbio
= alloc_btrfs_bio(num_stripes
, 0);
5779 for (i
= 0; i
< num_stripes
; i
++) {
5780 bbio
->stripes
[i
].physical
=
5781 map
->stripes
[stripe_index
].physical
+
5782 stripe_offset
+ stripe_nr
* map
->stripe_len
;
5783 bbio
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
5785 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
|
5786 BTRFS_BLOCK_GROUP_RAID10
)) {
5787 bbio
->stripes
[i
].length
= stripes_per_dev
*
5790 if (i
/ sub_stripes
< remaining_stripes
)
5791 bbio
->stripes
[i
].length
+=
5795 * Special for the first stripe and
5798 * |-------|...|-------|
5802 if (i
< sub_stripes
)
5803 bbio
->stripes
[i
].length
-=
5806 if (stripe_index
>= last_stripe
&&
5807 stripe_index
<= (last_stripe
+
5809 bbio
->stripes
[i
].length
-=
5812 if (i
== sub_stripes
- 1)
5815 bbio
->stripes
[i
].length
= length
;
5819 if (stripe_index
== map
->num_stripes
) {
5826 bbio
->map_type
= map
->type
;
5827 bbio
->num_stripes
= num_stripes
;
5829 free_extent_map(em
);
5834 * In dev-replace case, for repair case (that's the only case where the mirror
5835 * is selected explicitly when calling btrfs_map_block), blocks left of the
5836 * left cursor can also be read from the target drive.
5838 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5840 * For READ, it also needs to be supported using the same mirror number.
5842 * If the requested block is not left of the left cursor, EIO is returned. This
5843 * can happen because btrfs_num_copies() returns one more in the dev-replace
5846 static int get_extra_mirror_from_replace(struct btrfs_fs_info
*fs_info
,
5847 u64 logical
, u64 length
,
5848 u64 srcdev_devid
, int *mirror_num
,
5851 struct btrfs_bio
*bbio
= NULL
;
5853 int index_srcdev
= 0;
5855 u64 physical_of_found
= 0;
5859 ret
= __btrfs_map_block(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
,
5860 logical
, &length
, &bbio
, 0, 0);
5862 ASSERT(bbio
== NULL
);
5866 num_stripes
= bbio
->num_stripes
;
5867 if (*mirror_num
> num_stripes
) {
5869 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5870 * that means that the requested area is not left of the left
5873 btrfs_put_bbio(bbio
);
5878 * process the rest of the function using the mirror_num of the source
5879 * drive. Therefore look it up first. At the end, patch the device
5880 * pointer to the one of the target drive.
5882 for (i
= 0; i
< num_stripes
; i
++) {
5883 if (bbio
->stripes
[i
].dev
->devid
!= srcdev_devid
)
5887 * In case of DUP, in order to keep it simple, only add the
5888 * mirror with the lowest physical address
5891 physical_of_found
<= bbio
->stripes
[i
].physical
)
5896 physical_of_found
= bbio
->stripes
[i
].physical
;
5899 btrfs_put_bbio(bbio
);
5905 *mirror_num
= index_srcdev
+ 1;
5906 *physical
= physical_of_found
;
5910 static void handle_ops_on_dev_replace(enum btrfs_map_op op
,
5911 struct btrfs_bio
**bbio_ret
,
5912 struct btrfs_dev_replace
*dev_replace
,
5913 int *num_stripes_ret
, int *max_errors_ret
)
5915 struct btrfs_bio
*bbio
= *bbio_ret
;
5916 u64 srcdev_devid
= dev_replace
->srcdev
->devid
;
5917 int tgtdev_indexes
= 0;
5918 int num_stripes
= *num_stripes_ret
;
5919 int max_errors
= *max_errors_ret
;
5922 if (op
== BTRFS_MAP_WRITE
) {
5923 int index_where_to_add
;
5926 * duplicate the write operations while the dev replace
5927 * procedure is running. Since the copying of the old disk to
5928 * the new disk takes place at run time while the filesystem is
5929 * mounted writable, the regular write operations to the old
5930 * disk have to be duplicated to go to the new disk as well.
5932 * Note that device->missing is handled by the caller, and that
5933 * the write to the old disk is already set up in the stripes
5936 index_where_to_add
= num_stripes
;
5937 for (i
= 0; i
< num_stripes
; i
++) {
5938 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5939 /* write to new disk, too */
5940 struct btrfs_bio_stripe
*new =
5941 bbio
->stripes
+ index_where_to_add
;
5942 struct btrfs_bio_stripe
*old
=
5945 new->physical
= old
->physical
;
5946 new->length
= old
->length
;
5947 new->dev
= dev_replace
->tgtdev
;
5948 bbio
->tgtdev_map
[i
] = index_where_to_add
;
5949 index_where_to_add
++;
5954 num_stripes
= index_where_to_add
;
5955 } else if (op
== BTRFS_MAP_GET_READ_MIRRORS
) {
5956 int index_srcdev
= 0;
5958 u64 physical_of_found
= 0;
5961 * During the dev-replace procedure, the target drive can also
5962 * be used to read data in case it is needed to repair a corrupt
5963 * block elsewhere. This is possible if the requested area is
5964 * left of the left cursor. In this area, the target drive is a
5965 * full copy of the source drive.
5967 for (i
= 0; i
< num_stripes
; i
++) {
5968 if (bbio
->stripes
[i
].dev
->devid
== srcdev_devid
) {
5970 * In case of DUP, in order to keep it simple,
5971 * only add the mirror with the lowest physical
5975 physical_of_found
<=
5976 bbio
->stripes
[i
].physical
)
5980 physical_of_found
= bbio
->stripes
[i
].physical
;
5984 struct btrfs_bio_stripe
*tgtdev_stripe
=
5985 bbio
->stripes
+ num_stripes
;
5987 tgtdev_stripe
->physical
= physical_of_found
;
5988 tgtdev_stripe
->length
=
5989 bbio
->stripes
[index_srcdev
].length
;
5990 tgtdev_stripe
->dev
= dev_replace
->tgtdev
;
5991 bbio
->tgtdev_map
[index_srcdev
] = num_stripes
;
5998 *num_stripes_ret
= num_stripes
;
5999 *max_errors_ret
= max_errors
;
6000 bbio
->num_tgtdevs
= tgtdev_indexes
;
6004 static bool need_full_stripe(enum btrfs_map_op op
)
6006 return (op
== BTRFS_MAP_WRITE
|| op
== BTRFS_MAP_GET_READ_MIRRORS
);
6010 * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
6011 * tuple. This information is used to calculate how big a
6012 * particular bio can get before it straddles a stripe.
6014 * @fs_info - the filesystem
6015 * @logical - address that we want to figure out the geometry of
6016 * @len - the length of IO we are going to perform, starting at @logical
6017 * @op - type of operation - write or read
6018 * @io_geom - pointer used to return values
6020 * Returns < 0 in case a chunk for the given logical address cannot be found,
6021 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6023 int btrfs_get_io_geometry(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6024 u64 logical
, u64 len
, struct btrfs_io_geometry
*io_geom
)
6026 struct extent_map
*em
;
6027 struct map_lookup
*map
;
6032 u64 raid56_full_stripe_start
= (u64
)-1;
6036 ASSERT(op
!= BTRFS_MAP_DISCARD
);
6038 em
= btrfs_get_chunk_map(fs_info
, logical
, len
);
6042 map
= em
->map_lookup
;
6043 /* Offset of this logical address in the chunk */
6044 offset
= logical
- em
->start
;
6045 /* Len of a stripe in a chunk */
6046 stripe_len
= map
->stripe_len
;
6047 /* Stripe wher this block falls in */
6048 stripe_nr
= div64_u64(offset
, stripe_len
);
6049 /* Offset of stripe in the chunk */
6050 stripe_offset
= stripe_nr
* stripe_len
;
6051 if (offset
< stripe_offset
) {
6053 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6054 stripe_offset
, offset
, em
->start
, logical
, stripe_len
);
6059 /* stripe_offset is the offset of this block in its stripe */
6060 stripe_offset
= offset
- stripe_offset
;
6061 data_stripes
= nr_data_stripes(map
);
6063 if (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6064 u64 max_len
= stripe_len
- stripe_offset
;
6067 * In case of raid56, we need to know the stripe aligned start
6069 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6070 unsigned long full_stripe_len
= stripe_len
* data_stripes
;
6071 raid56_full_stripe_start
= offset
;
6074 * Allow a write of a full stripe, but make sure we
6075 * don't allow straddling of stripes
6077 raid56_full_stripe_start
= div64_u64(raid56_full_stripe_start
,
6079 raid56_full_stripe_start
*= full_stripe_len
;
6082 * For writes to RAID[56], allow a full stripeset across
6083 * all disks. For other RAID types and for RAID[56]
6084 * reads, just allow a single stripe (on a single disk).
6086 if (op
== BTRFS_MAP_WRITE
) {
6087 max_len
= stripe_len
* data_stripes
-
6088 (offset
- raid56_full_stripe_start
);
6091 len
= min_t(u64
, em
->len
- offset
, max_len
);
6093 len
= em
->len
- offset
;
6097 io_geom
->offset
= offset
;
6098 io_geom
->stripe_len
= stripe_len
;
6099 io_geom
->stripe_nr
= stripe_nr
;
6100 io_geom
->stripe_offset
= stripe_offset
;
6101 io_geom
->raid56_stripe_offset
= raid56_full_stripe_start
;
6105 free_extent_map(em
);
6109 static int __btrfs_map_block(struct btrfs_fs_info
*fs_info
,
6110 enum btrfs_map_op op
,
6111 u64 logical
, u64
*length
,
6112 struct btrfs_bio
**bbio_ret
,
6113 int mirror_num
, int need_raid_map
)
6115 struct extent_map
*em
;
6116 struct map_lookup
*map
;
6126 int tgtdev_indexes
= 0;
6127 struct btrfs_bio
*bbio
= NULL
;
6128 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
6129 int dev_replace_is_ongoing
= 0;
6130 int num_alloc_stripes
;
6131 int patch_the_first_stripe_for_dev_replace
= 0;
6132 u64 physical_to_patch_in_first_stripe
= 0;
6133 u64 raid56_full_stripe_start
= (u64
)-1;
6134 struct btrfs_io_geometry geom
;
6138 if (op
== BTRFS_MAP_DISCARD
)
6139 return __btrfs_map_block_for_discard(fs_info
, logical
,
6142 ret
= btrfs_get_io_geometry(fs_info
, op
, logical
, *length
, &geom
);
6146 em
= btrfs_get_chunk_map(fs_info
, logical
, *length
);
6147 ASSERT(!IS_ERR(em
));
6148 map
= em
->map_lookup
;
6151 stripe_len
= geom
.stripe_len
;
6152 stripe_nr
= geom
.stripe_nr
;
6153 stripe_offset
= geom
.stripe_offset
;
6154 raid56_full_stripe_start
= geom
.raid56_stripe_offset
;
6155 data_stripes
= nr_data_stripes(map
);
6157 down_read(&dev_replace
->rwsem
);
6158 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
6160 * Hold the semaphore for read during the whole operation, write is
6161 * requested at commit time but must wait.
6163 if (!dev_replace_is_ongoing
)
6164 up_read(&dev_replace
->rwsem
);
6166 if (dev_replace_is_ongoing
&& mirror_num
== map
->num_stripes
+ 1 &&
6167 !need_full_stripe(op
) && dev_replace
->tgtdev
!= NULL
) {
6168 ret
= get_extra_mirror_from_replace(fs_info
, logical
, *length
,
6169 dev_replace
->srcdev
->devid
,
6171 &physical_to_patch_in_first_stripe
);
6175 patch_the_first_stripe_for_dev_replace
= 1;
6176 } else if (mirror_num
> map
->num_stripes
) {
6182 if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
6183 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6185 if (!need_full_stripe(op
))
6187 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID1_MASK
) {
6188 if (need_full_stripe(op
))
6189 num_stripes
= map
->num_stripes
;
6190 else if (mirror_num
)
6191 stripe_index
= mirror_num
- 1;
6193 stripe_index
= find_live_mirror(fs_info
, map
, 0,
6194 dev_replace_is_ongoing
);
6195 mirror_num
= stripe_index
+ 1;
6198 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
6199 if (need_full_stripe(op
)) {
6200 num_stripes
= map
->num_stripes
;
6201 } else if (mirror_num
) {
6202 stripe_index
= mirror_num
- 1;
6207 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
6208 u32 factor
= map
->num_stripes
/ map
->sub_stripes
;
6210 stripe_nr
= div_u64_rem(stripe_nr
, factor
, &stripe_index
);
6211 stripe_index
*= map
->sub_stripes
;
6213 if (need_full_stripe(op
))
6214 num_stripes
= map
->sub_stripes
;
6215 else if (mirror_num
)
6216 stripe_index
+= mirror_num
- 1;
6218 int old_stripe_index
= stripe_index
;
6219 stripe_index
= find_live_mirror(fs_info
, map
,
6221 dev_replace_is_ongoing
);
6222 mirror_num
= stripe_index
- old_stripe_index
+ 1;
6225 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6226 if (need_raid_map
&& (need_full_stripe(op
) || mirror_num
> 1)) {
6227 /* push stripe_nr back to the start of the full stripe */
6228 stripe_nr
= div64_u64(raid56_full_stripe_start
,
6229 stripe_len
* data_stripes
);
6231 /* RAID[56] write or recovery. Return all stripes */
6232 num_stripes
= map
->num_stripes
;
6233 max_errors
= nr_parity_stripes(map
);
6235 *length
= map
->stripe_len
;
6240 * Mirror #0 or #1 means the original data block.
6241 * Mirror #2 is RAID5 parity block.
6242 * Mirror #3 is RAID6 Q block.
6244 stripe_nr
= div_u64_rem(stripe_nr
,
6245 data_stripes
, &stripe_index
);
6247 stripe_index
= data_stripes
+ mirror_num
- 2;
6249 /* We distribute the parity blocks across stripes */
6250 div_u64_rem(stripe_nr
+ stripe_index
, map
->num_stripes
,
6252 if (!need_full_stripe(op
) && mirror_num
<= 1)
6257 * after this, stripe_nr is the number of stripes on this
6258 * device we have to walk to find the data, and stripe_index is
6259 * the number of our device in the stripe array
6261 stripe_nr
= div_u64_rem(stripe_nr
, map
->num_stripes
,
6263 mirror_num
= stripe_index
+ 1;
6265 if (stripe_index
>= map
->num_stripes
) {
6267 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6268 stripe_index
, map
->num_stripes
);
6273 num_alloc_stripes
= num_stripes
;
6274 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
) {
6275 if (op
== BTRFS_MAP_WRITE
)
6276 num_alloc_stripes
<<= 1;
6277 if (op
== BTRFS_MAP_GET_READ_MIRRORS
)
6278 num_alloc_stripes
++;
6279 tgtdev_indexes
= num_stripes
;
6282 bbio
= alloc_btrfs_bio(num_alloc_stripes
, tgtdev_indexes
);
6287 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
)
6288 bbio
->tgtdev_map
= (int *)(bbio
->stripes
+ num_alloc_stripes
);
6290 /* build raid_map */
6291 if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
&& need_raid_map
&&
6292 (need_full_stripe(op
) || mirror_num
> 1)) {
6296 bbio
->raid_map
= (u64
*)((void *)bbio
->stripes
+
6297 sizeof(struct btrfs_bio_stripe
) *
6299 sizeof(int) * tgtdev_indexes
);
6301 /* Work out the disk rotation on this stripe-set */
6302 div_u64_rem(stripe_nr
, num_stripes
, &rot
);
6304 /* Fill in the logical address of each stripe */
6305 tmp
= stripe_nr
* data_stripes
;
6306 for (i
= 0; i
< data_stripes
; i
++)
6307 bbio
->raid_map
[(i
+rot
) % num_stripes
] =
6308 em
->start
+ (tmp
+ i
) * map
->stripe_len
;
6310 bbio
->raid_map
[(i
+rot
) % map
->num_stripes
] = RAID5_P_STRIPE
;
6311 if (map
->type
& BTRFS_BLOCK_GROUP_RAID6
)
6312 bbio
->raid_map
[(i
+rot
+1) % num_stripes
] =
6317 for (i
= 0; i
< num_stripes
; i
++) {
6318 bbio
->stripes
[i
].physical
=
6319 map
->stripes
[stripe_index
].physical
+
6321 stripe_nr
* map
->stripe_len
;
6322 bbio
->stripes
[i
].dev
=
6323 map
->stripes
[stripe_index
].dev
;
6327 if (need_full_stripe(op
))
6328 max_errors
= btrfs_chunk_max_errors(map
);
6331 sort_parity_stripes(bbio
, num_stripes
);
6333 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
&&
6334 need_full_stripe(op
)) {
6335 handle_ops_on_dev_replace(op
, &bbio
, dev_replace
, &num_stripes
,
6340 bbio
->map_type
= map
->type
;
6341 bbio
->num_stripes
= num_stripes
;
6342 bbio
->max_errors
= max_errors
;
6343 bbio
->mirror_num
= mirror_num
;
6346 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6347 * mirror_num == num_stripes + 1 && dev_replace target drive is
6348 * available as a mirror
6350 if (patch_the_first_stripe_for_dev_replace
&& num_stripes
> 0) {
6351 WARN_ON(num_stripes
> 1);
6352 bbio
->stripes
[0].dev
= dev_replace
->tgtdev
;
6353 bbio
->stripes
[0].physical
= physical_to_patch_in_first_stripe
;
6354 bbio
->mirror_num
= map
->num_stripes
+ 1;
6357 if (dev_replace_is_ongoing
) {
6358 lockdep_assert_held(&dev_replace
->rwsem
);
6359 /* Unlock and let waiting writers proceed */
6360 up_read(&dev_replace
->rwsem
);
6362 free_extent_map(em
);
6366 int btrfs_map_block(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6367 u64 logical
, u64
*length
,
6368 struct btrfs_bio
**bbio_ret
, int mirror_num
)
6370 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
,
6374 /* For Scrub/replace */
6375 int btrfs_map_sblock(struct btrfs_fs_info
*fs_info
, enum btrfs_map_op op
,
6376 u64 logical
, u64
*length
,
6377 struct btrfs_bio
**bbio_ret
)
6379 return __btrfs_map_block(fs_info
, op
, logical
, length
, bbio_ret
, 0, 1);
6382 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
6383 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
)
6385 struct extent_map
*em
;
6386 struct map_lookup
*map
;
6394 em
= btrfs_get_chunk_map(fs_info
, chunk_start
, 1);
6398 map
= em
->map_lookup
;
6400 rmap_len
= map
->stripe_len
;
6402 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
6403 length
= div_u64(length
, map
->num_stripes
/ map
->sub_stripes
);
6404 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
6405 length
= div_u64(length
, map
->num_stripes
);
6406 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
6407 length
= div_u64(length
, nr_data_stripes(map
));
6408 rmap_len
= map
->stripe_len
* nr_data_stripes(map
);
6411 buf
= kcalloc(map
->num_stripes
, sizeof(u64
), GFP_NOFS
);
6412 BUG_ON(!buf
); /* -ENOMEM */
6414 for (i
= 0; i
< map
->num_stripes
; i
++) {
6415 if (map
->stripes
[i
].physical
> physical
||
6416 map
->stripes
[i
].physical
+ length
<= physical
)
6419 stripe_nr
= physical
- map
->stripes
[i
].physical
;
6420 stripe_nr
= div64_u64(stripe_nr
, map
->stripe_len
);
6422 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
6423 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
6424 stripe_nr
= div_u64(stripe_nr
, map
->sub_stripes
);
6425 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
6426 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
6427 } /* else if RAID[56], multiply by nr_data_stripes().
6428 * Alternatively, just use rmap_len below instead of
6429 * map->stripe_len */
6431 bytenr
= chunk_start
+ stripe_nr
* rmap_len
;
6432 WARN_ON(nr
>= map
->num_stripes
);
6433 for (j
= 0; j
< nr
; j
++) {
6434 if (buf
[j
] == bytenr
)
6438 WARN_ON(nr
>= map
->num_stripes
);
6445 *stripe_len
= rmap_len
;
6447 free_extent_map(em
);
6451 static inline void btrfs_end_bbio(struct btrfs_bio
*bbio
, struct bio
*bio
)
6453 bio
->bi_private
= bbio
->private;
6454 bio
->bi_end_io
= bbio
->end_io
;
6457 btrfs_put_bbio(bbio
);
6460 static void btrfs_end_bio(struct bio
*bio
)
6462 struct btrfs_bio
*bbio
= bio
->bi_private
;
6463 int is_orig_bio
= 0;
6465 if (bio
->bi_status
) {
6466 atomic_inc(&bbio
->error
);
6467 if (bio
->bi_status
== BLK_STS_IOERR
||
6468 bio
->bi_status
== BLK_STS_TARGET
) {
6469 unsigned int stripe_index
=
6470 btrfs_io_bio(bio
)->stripe_index
;
6471 struct btrfs_device
*dev
;
6473 BUG_ON(stripe_index
>= bbio
->num_stripes
);
6474 dev
= bbio
->stripes
[stripe_index
].dev
;
6476 if (bio_op(bio
) == REQ_OP_WRITE
)
6477 btrfs_dev_stat_inc_and_print(dev
,
6478 BTRFS_DEV_STAT_WRITE_ERRS
);
6479 else if (!(bio
->bi_opf
& REQ_RAHEAD
))
6480 btrfs_dev_stat_inc_and_print(dev
,
6481 BTRFS_DEV_STAT_READ_ERRS
);
6482 if (bio
->bi_opf
& REQ_PREFLUSH
)
6483 btrfs_dev_stat_inc_and_print(dev
,
6484 BTRFS_DEV_STAT_FLUSH_ERRS
);
6489 if (bio
== bbio
->orig_bio
)
6492 btrfs_bio_counter_dec(bbio
->fs_info
);
6494 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6497 bio
= bbio
->orig_bio
;
6500 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6501 /* only send an error to the higher layers if it is
6502 * beyond the tolerance of the btrfs bio
6504 if (atomic_read(&bbio
->error
) > bbio
->max_errors
) {
6505 bio
->bi_status
= BLK_STS_IOERR
;
6508 * this bio is actually up to date, we didn't
6509 * go over the max number of errors
6511 bio
->bi_status
= BLK_STS_OK
;
6514 btrfs_end_bbio(bbio
, bio
);
6515 } else if (!is_orig_bio
) {
6521 * see run_scheduled_bios for a description of why bios are collected for
6524 * This will add one bio to the pending list for a device and make sure
6525 * the work struct is scheduled.
6527 static noinline
void btrfs_schedule_bio(struct btrfs_device
*device
,
6530 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
6531 int should_queue
= 1;
6532 struct btrfs_pending_bios
*pending_bios
;
6534 /* don't bother with additional async steps for reads, right now */
6535 if (bio_op(bio
) == REQ_OP_READ
) {
6536 btrfsic_submit_bio(bio
);
6540 WARN_ON(bio
->bi_next
);
6541 bio
->bi_next
= NULL
;
6543 spin_lock(&device
->io_lock
);
6544 if (op_is_sync(bio
->bi_opf
))
6545 pending_bios
= &device
->pending_sync_bios
;
6547 pending_bios
= &device
->pending_bios
;
6549 if (pending_bios
->tail
)
6550 pending_bios
->tail
->bi_next
= bio
;
6552 pending_bios
->tail
= bio
;
6553 if (!pending_bios
->head
)
6554 pending_bios
->head
= bio
;
6555 if (device
->running_pending
)
6558 spin_unlock(&device
->io_lock
);
6561 btrfs_queue_work(fs_info
->submit_workers
, &device
->work
);
6564 static void submit_stripe_bio(struct btrfs_bio
*bbio
, struct bio
*bio
,
6565 u64 physical
, int dev_nr
, int async
)
6567 struct btrfs_device
*dev
= bbio
->stripes
[dev_nr
].dev
;
6568 struct btrfs_fs_info
*fs_info
= bbio
->fs_info
;
6570 bio
->bi_private
= bbio
;
6571 btrfs_io_bio(bio
)->stripe_index
= dev_nr
;
6572 bio
->bi_end_io
= btrfs_end_bio
;
6573 bio
->bi_iter
.bi_sector
= physical
>> 9;
6574 btrfs_debug_in_rcu(fs_info
,
6575 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6576 bio_op(bio
), bio
->bi_opf
, (u64
)bio
->bi_iter
.bi_sector
,
6577 (u_long
)dev
->bdev
->bd_dev
, rcu_str_deref(dev
->name
), dev
->devid
,
6578 bio
->bi_iter
.bi_size
);
6579 bio_set_dev(bio
, dev
->bdev
);
6581 btrfs_bio_counter_inc_noblocked(fs_info
);
6584 btrfs_schedule_bio(dev
, bio
);
6586 btrfsic_submit_bio(bio
);
6589 static void bbio_error(struct btrfs_bio
*bbio
, struct bio
*bio
, u64 logical
)
6591 atomic_inc(&bbio
->error
);
6592 if (atomic_dec_and_test(&bbio
->stripes_pending
)) {
6593 /* Should be the original bio. */
6594 WARN_ON(bio
!= bbio
->orig_bio
);
6596 btrfs_io_bio(bio
)->mirror_num
= bbio
->mirror_num
;
6597 bio
->bi_iter
.bi_sector
= logical
>> 9;
6598 if (atomic_read(&bbio
->error
) > bbio
->max_errors
)
6599 bio
->bi_status
= BLK_STS_IOERR
;
6601 bio
->bi_status
= BLK_STS_OK
;
6602 btrfs_end_bbio(bbio
, bio
);
6606 blk_status_t
btrfs_map_bio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
6607 int mirror_num
, int async_submit
)
6609 struct btrfs_device
*dev
;
6610 struct bio
*first_bio
= bio
;
6611 u64 logical
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
6617 struct btrfs_bio
*bbio
= NULL
;
6619 length
= bio
->bi_iter
.bi_size
;
6620 map_length
= length
;
6622 btrfs_bio_counter_inc_blocked(fs_info
);
6623 ret
= __btrfs_map_block(fs_info
, btrfs_op(bio
), logical
,
6624 &map_length
, &bbio
, mirror_num
, 1);
6626 btrfs_bio_counter_dec(fs_info
);
6627 return errno_to_blk_status(ret
);
6630 total_devs
= bbio
->num_stripes
;
6631 bbio
->orig_bio
= first_bio
;
6632 bbio
->private = first_bio
->bi_private
;
6633 bbio
->end_io
= first_bio
->bi_end_io
;
6634 bbio
->fs_info
= fs_info
;
6635 atomic_set(&bbio
->stripes_pending
, bbio
->num_stripes
);
6637 if ((bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) &&
6638 ((bio_op(bio
) == REQ_OP_WRITE
) || (mirror_num
> 1))) {
6639 /* In this case, map_length has been set to the length of
6640 a single stripe; not the whole write */
6641 if (bio_op(bio
) == REQ_OP_WRITE
) {
6642 ret
= raid56_parity_write(fs_info
, bio
, bbio
,
6645 ret
= raid56_parity_recover(fs_info
, bio
, bbio
,
6646 map_length
, mirror_num
, 1);
6649 btrfs_bio_counter_dec(fs_info
);
6650 return errno_to_blk_status(ret
);
6653 if (map_length
< length
) {
6655 "mapping failed logical %llu bio len %llu len %llu",
6656 logical
, length
, map_length
);
6660 for (dev_nr
= 0; dev_nr
< total_devs
; dev_nr
++) {
6661 dev
= bbio
->stripes
[dev_nr
].dev
;
6662 if (!dev
|| !dev
->bdev
|| test_bit(BTRFS_DEV_STATE_MISSING
,
6664 (bio_op(first_bio
) == REQ_OP_WRITE
&&
6665 !test_bit(BTRFS_DEV_STATE_WRITEABLE
, &dev
->dev_state
))) {
6666 bbio_error(bbio
, first_bio
, logical
);
6670 if (dev_nr
< total_devs
- 1)
6671 bio
= btrfs_bio_clone(first_bio
);
6675 submit_stripe_bio(bbio
, bio
, bbio
->stripes
[dev_nr
].physical
,
6676 dev_nr
, async_submit
);
6678 btrfs_bio_counter_dec(fs_info
);
6683 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6686 * If devid and uuid are both specified, the match must be exact, otherwise
6687 * only devid is used.
6689 * If @seed is true, traverse through the seed devices.
6691 struct btrfs_device
*btrfs_find_device(struct btrfs_fs_devices
*fs_devices
,
6692 u64 devid
, u8
*uuid
, u8
*fsid
,
6695 struct btrfs_device
*device
;
6697 while (fs_devices
) {
6699 !memcmp(fs_devices
->metadata_uuid
, fsid
, BTRFS_FSID_SIZE
)) {
6700 list_for_each_entry(device
, &fs_devices
->devices
,
6702 if (device
->devid
== devid
&&
6703 (!uuid
|| memcmp(device
->uuid
, uuid
,
6704 BTRFS_UUID_SIZE
) == 0))
6709 fs_devices
= fs_devices
->seed
;
6716 static struct btrfs_device
*add_missing_dev(struct btrfs_fs_devices
*fs_devices
,
6717 u64 devid
, u8
*dev_uuid
)
6719 struct btrfs_device
*device
;
6720 unsigned int nofs_flag
;
6723 * We call this under the chunk_mutex, so we want to use NOFS for this
6724 * allocation, however we don't want to change btrfs_alloc_device() to
6725 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6728 nofs_flag
= memalloc_nofs_save();
6729 device
= btrfs_alloc_device(NULL
, &devid
, dev_uuid
);
6730 memalloc_nofs_restore(nofs_flag
);
6734 list_add(&device
->dev_list
, &fs_devices
->devices
);
6735 device
->fs_devices
= fs_devices
;
6736 fs_devices
->num_devices
++;
6738 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
6739 fs_devices
->missing_devices
++;
6745 * btrfs_alloc_device - allocate struct btrfs_device
6746 * @fs_info: used only for generating a new devid, can be NULL if
6747 * devid is provided (i.e. @devid != NULL).
6748 * @devid: a pointer to devid for this device. If NULL a new devid
6750 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6753 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6754 * on error. Returned struct is not linked onto any lists and must be
6755 * destroyed with btrfs_free_device.
6757 struct btrfs_device
*btrfs_alloc_device(struct btrfs_fs_info
*fs_info
,
6761 struct btrfs_device
*dev
;
6764 if (WARN_ON(!devid
&& !fs_info
))
6765 return ERR_PTR(-EINVAL
);
6767 dev
= __alloc_device();
6776 ret
= find_next_devid(fs_info
, &tmp
);
6778 btrfs_free_device(dev
);
6779 return ERR_PTR(ret
);
6785 memcpy(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
);
6787 generate_random_uuid(dev
->uuid
);
6789 btrfs_init_work(&dev
->work
, pending_bios_fn
, NULL
, NULL
);
6794 static void btrfs_report_missing_device(struct btrfs_fs_info
*fs_info
,
6795 u64 devid
, u8
*uuid
, bool error
)
6798 btrfs_err_rl(fs_info
, "devid %llu uuid %pU is missing",
6801 btrfs_warn_rl(fs_info
, "devid %llu uuid %pU is missing",
6805 static u64
calc_stripe_length(u64 type
, u64 chunk_len
, int num_stripes
)
6807 int index
= btrfs_bg_flags_to_raid_index(type
);
6808 int ncopies
= btrfs_raid_array
[index
].ncopies
;
6811 switch (type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
6812 case BTRFS_BLOCK_GROUP_RAID5
:
6813 data_stripes
= num_stripes
- 1;
6815 case BTRFS_BLOCK_GROUP_RAID6
:
6816 data_stripes
= num_stripes
- 2;
6819 data_stripes
= num_stripes
/ ncopies
;
6822 return div_u64(chunk_len
, data_stripes
);
6825 static int read_one_chunk(struct btrfs_key
*key
, struct extent_buffer
*leaf
,
6826 struct btrfs_chunk
*chunk
)
6828 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
6829 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
6830 struct map_lookup
*map
;
6831 struct extent_map
*em
;
6835 u8 uuid
[BTRFS_UUID_SIZE
];
6840 logical
= key
->offset
;
6841 length
= btrfs_chunk_length(leaf
, chunk
);
6842 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
6845 * Only need to verify chunk item if we're reading from sys chunk array,
6846 * as chunk item in tree block is already verified by tree-checker.
6848 if (leaf
->start
== BTRFS_SUPER_INFO_OFFSET
) {
6849 ret
= btrfs_check_chunk_valid(leaf
, chunk
, logical
);
6854 read_lock(&map_tree
->lock
);
6855 em
= lookup_extent_mapping(map_tree
, logical
, 1);
6856 read_unlock(&map_tree
->lock
);
6858 /* already mapped? */
6859 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
6860 free_extent_map(em
);
6863 free_extent_map(em
);
6866 em
= alloc_extent_map();
6869 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
6871 free_extent_map(em
);
6875 set_bit(EXTENT_FLAG_FS_MAPPING
, &em
->flags
);
6876 em
->map_lookup
= map
;
6877 em
->start
= logical
;
6880 em
->block_start
= 0;
6881 em
->block_len
= em
->len
;
6883 map
->num_stripes
= num_stripes
;
6884 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
6885 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
6886 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
6887 map
->type
= btrfs_chunk_type(leaf
, chunk
);
6888 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
6889 map
->verified_stripes
= 0;
6890 em
->orig_block_len
= calc_stripe_length(map
->type
, em
->len
,
6892 for (i
= 0; i
< num_stripes
; i
++) {
6893 map
->stripes
[i
].physical
=
6894 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
6895 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
6896 read_extent_buffer(leaf
, uuid
, (unsigned long)
6897 btrfs_stripe_dev_uuid_nr(chunk
, i
),
6899 map
->stripes
[i
].dev
= btrfs_find_device(fs_info
->fs_devices
,
6900 devid
, uuid
, NULL
, true);
6901 if (!map
->stripes
[i
].dev
&&
6902 !btrfs_test_opt(fs_info
, DEGRADED
)) {
6903 free_extent_map(em
);
6904 btrfs_report_missing_device(fs_info
, devid
, uuid
, true);
6907 if (!map
->stripes
[i
].dev
) {
6908 map
->stripes
[i
].dev
=
6909 add_missing_dev(fs_info
->fs_devices
, devid
,
6911 if (IS_ERR(map
->stripes
[i
].dev
)) {
6912 free_extent_map(em
);
6914 "failed to init missing dev %llu: %ld",
6915 devid
, PTR_ERR(map
->stripes
[i
].dev
));
6916 return PTR_ERR(map
->stripes
[i
].dev
);
6918 btrfs_report_missing_device(fs_info
, devid
, uuid
, false);
6920 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
,
6921 &(map
->stripes
[i
].dev
->dev_state
));
6925 write_lock(&map_tree
->lock
);
6926 ret
= add_extent_mapping(map_tree
, em
, 0);
6927 write_unlock(&map_tree
->lock
);
6930 "failed to add chunk map, start=%llu len=%llu: %d",
6931 em
->start
, em
->len
, ret
);
6933 free_extent_map(em
);
6938 static void fill_device_from_item(struct extent_buffer
*leaf
,
6939 struct btrfs_dev_item
*dev_item
,
6940 struct btrfs_device
*device
)
6944 device
->devid
= btrfs_device_id(leaf
, dev_item
);
6945 device
->disk_total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
6946 device
->total_bytes
= device
->disk_total_bytes
;
6947 device
->commit_total_bytes
= device
->disk_total_bytes
;
6948 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
6949 device
->commit_bytes_used
= device
->bytes_used
;
6950 device
->type
= btrfs_device_type(leaf
, dev_item
);
6951 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
6952 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
6953 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
6954 WARN_ON(device
->devid
== BTRFS_DEV_REPLACE_DEVID
);
6955 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
);
6957 ptr
= btrfs_device_uuid(dev_item
);
6958 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
6961 static struct btrfs_fs_devices
*open_seed_devices(struct btrfs_fs_info
*fs_info
,
6964 struct btrfs_fs_devices
*fs_devices
;
6967 lockdep_assert_held(&uuid_mutex
);
6970 fs_devices
= fs_info
->fs_devices
->seed
;
6971 while (fs_devices
) {
6972 if (!memcmp(fs_devices
->fsid
, fsid
, BTRFS_FSID_SIZE
))
6975 fs_devices
= fs_devices
->seed
;
6978 fs_devices
= find_fsid(fsid
, NULL
);
6980 if (!btrfs_test_opt(fs_info
, DEGRADED
))
6981 return ERR_PTR(-ENOENT
);
6983 fs_devices
= alloc_fs_devices(fsid
, NULL
);
6984 if (IS_ERR(fs_devices
))
6987 fs_devices
->seeding
= 1;
6988 fs_devices
->opened
= 1;
6992 fs_devices
= clone_fs_devices(fs_devices
);
6993 if (IS_ERR(fs_devices
))
6996 ret
= open_fs_devices(fs_devices
, FMODE_READ
, fs_info
->bdev_holder
);
6998 free_fs_devices(fs_devices
);
6999 fs_devices
= ERR_PTR(ret
);
7003 if (!fs_devices
->seeding
) {
7004 close_fs_devices(fs_devices
);
7005 free_fs_devices(fs_devices
);
7006 fs_devices
= ERR_PTR(-EINVAL
);
7010 fs_devices
->seed
= fs_info
->fs_devices
->seed
;
7011 fs_info
->fs_devices
->seed
= fs_devices
;
7016 static int read_one_dev(struct extent_buffer
*leaf
,
7017 struct btrfs_dev_item
*dev_item
)
7019 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
7020 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7021 struct btrfs_device
*device
;
7024 u8 fs_uuid
[BTRFS_FSID_SIZE
];
7025 u8 dev_uuid
[BTRFS_UUID_SIZE
];
7027 devid
= btrfs_device_id(leaf
, dev_item
);
7028 read_extent_buffer(leaf
, dev_uuid
, btrfs_device_uuid(dev_item
),
7030 read_extent_buffer(leaf
, fs_uuid
, btrfs_device_fsid(dev_item
),
7033 if (memcmp(fs_uuid
, fs_devices
->metadata_uuid
, BTRFS_FSID_SIZE
)) {
7034 fs_devices
= open_seed_devices(fs_info
, fs_uuid
);
7035 if (IS_ERR(fs_devices
))
7036 return PTR_ERR(fs_devices
);
7039 device
= btrfs_find_device(fs_info
->fs_devices
, devid
, dev_uuid
,
7042 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
7043 btrfs_report_missing_device(fs_info
, devid
,
7048 device
= add_missing_dev(fs_devices
, devid
, dev_uuid
);
7049 if (IS_ERR(device
)) {
7051 "failed to add missing dev %llu: %ld",
7052 devid
, PTR_ERR(device
));
7053 return PTR_ERR(device
);
7055 btrfs_report_missing_device(fs_info
, devid
, dev_uuid
, false);
7057 if (!device
->bdev
) {
7058 if (!btrfs_test_opt(fs_info
, DEGRADED
)) {
7059 btrfs_report_missing_device(fs_info
,
7060 devid
, dev_uuid
, true);
7063 btrfs_report_missing_device(fs_info
, devid
,
7067 if (!device
->bdev
&&
7068 !test_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
)) {
7070 * this happens when a device that was properly setup
7071 * in the device info lists suddenly goes bad.
7072 * device->bdev is NULL, and so we have to set
7073 * device->missing to one here
7075 device
->fs_devices
->missing_devices
++;
7076 set_bit(BTRFS_DEV_STATE_MISSING
, &device
->dev_state
);
7079 /* Move the device to its own fs_devices */
7080 if (device
->fs_devices
!= fs_devices
) {
7081 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING
,
7082 &device
->dev_state
));
7084 list_move(&device
->dev_list
, &fs_devices
->devices
);
7085 device
->fs_devices
->num_devices
--;
7086 fs_devices
->num_devices
++;
7088 device
->fs_devices
->missing_devices
--;
7089 fs_devices
->missing_devices
++;
7091 device
->fs_devices
= fs_devices
;
7095 if (device
->fs_devices
!= fs_info
->fs_devices
) {
7096 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
));
7097 if (device
->generation
!=
7098 btrfs_device_generation(leaf
, dev_item
))
7102 fill_device_from_item(leaf
, dev_item
, device
);
7103 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA
, &device
->dev_state
);
7104 if (test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
) &&
7105 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT
, &device
->dev_state
)) {
7106 device
->fs_devices
->total_rw_bytes
+= device
->total_bytes
;
7107 atomic64_add(device
->total_bytes
- device
->bytes_used
,
7108 &fs_info
->free_chunk_space
);
7114 int btrfs_read_sys_array(struct btrfs_fs_info
*fs_info
)
7116 struct btrfs_root
*root
= fs_info
->tree_root
;
7117 struct btrfs_super_block
*super_copy
= fs_info
->super_copy
;
7118 struct extent_buffer
*sb
;
7119 struct btrfs_disk_key
*disk_key
;
7120 struct btrfs_chunk
*chunk
;
7122 unsigned long sb_array_offset
;
7129 struct btrfs_key key
;
7131 ASSERT(BTRFS_SUPER_INFO_SIZE
<= fs_info
->nodesize
);
7133 * This will create extent buffer of nodesize, superblock size is
7134 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7135 * overallocate but we can keep it as-is, only the first page is used.
7137 sb
= btrfs_find_create_tree_block(fs_info
, BTRFS_SUPER_INFO_OFFSET
);
7140 set_extent_buffer_uptodate(sb
);
7141 btrfs_set_buffer_lockdep_class(root
->root_key
.objectid
, sb
, 0);
7143 * The sb extent buffer is artificial and just used to read the system array.
7144 * set_extent_buffer_uptodate() call does not properly mark all it's
7145 * pages up-to-date when the page is larger: extent does not cover the
7146 * whole page and consequently check_page_uptodate does not find all
7147 * the page's extents up-to-date (the hole beyond sb),
7148 * write_extent_buffer then triggers a WARN_ON.
7150 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7151 * but sb spans only this function. Add an explicit SetPageUptodate call
7152 * to silence the warning eg. on PowerPC 64.
7154 if (PAGE_SIZE
> BTRFS_SUPER_INFO_SIZE
)
7155 SetPageUptodate(sb
->pages
[0]);
7157 write_extent_buffer(sb
, super_copy
, 0, BTRFS_SUPER_INFO_SIZE
);
7158 array_size
= btrfs_super_sys_array_size(super_copy
);
7160 array_ptr
= super_copy
->sys_chunk_array
;
7161 sb_array_offset
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
7164 while (cur_offset
< array_size
) {
7165 disk_key
= (struct btrfs_disk_key
*)array_ptr
;
7166 len
= sizeof(*disk_key
);
7167 if (cur_offset
+ len
> array_size
)
7168 goto out_short_read
;
7170 btrfs_disk_key_to_cpu(&key
, disk_key
);
7173 sb_array_offset
+= len
;
7176 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7177 chunk
= (struct btrfs_chunk
*)sb_array_offset
;
7179 * At least one btrfs_chunk with one stripe must be
7180 * present, exact stripe count check comes afterwards
7182 len
= btrfs_chunk_item_size(1);
7183 if (cur_offset
+ len
> array_size
)
7184 goto out_short_read
;
7186 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
7189 "invalid number of stripes %u in sys_array at offset %u",
7190 num_stripes
, cur_offset
);
7195 type
= btrfs_chunk_type(sb
, chunk
);
7196 if ((type
& BTRFS_BLOCK_GROUP_SYSTEM
) == 0) {
7198 "invalid chunk type %llu in sys_array at offset %u",
7204 len
= btrfs_chunk_item_size(num_stripes
);
7205 if (cur_offset
+ len
> array_size
)
7206 goto out_short_read
;
7208 ret
= read_one_chunk(&key
, sb
, chunk
);
7213 "unexpected item type %u in sys_array at offset %u",
7214 (u32
)key
.type
, cur_offset
);
7219 sb_array_offset
+= len
;
7222 clear_extent_buffer_uptodate(sb
);
7223 free_extent_buffer_stale(sb
);
7227 btrfs_err(fs_info
, "sys_array too short to read %u bytes at offset %u",
7229 clear_extent_buffer_uptodate(sb
);
7230 free_extent_buffer_stale(sb
);
7235 * Check if all chunks in the fs are OK for read-write degraded mount
7237 * If the @failing_dev is specified, it's accounted as missing.
7239 * Return true if all chunks meet the minimal RW mount requirements.
7240 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7242 bool btrfs_check_rw_degradable(struct btrfs_fs_info
*fs_info
,
7243 struct btrfs_device
*failing_dev
)
7245 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
7246 struct extent_map
*em
;
7250 read_lock(&map_tree
->lock
);
7251 em
= lookup_extent_mapping(map_tree
, 0, (u64
)-1);
7252 read_unlock(&map_tree
->lock
);
7253 /* No chunk at all? Return false anyway */
7259 struct map_lookup
*map
;
7264 map
= em
->map_lookup
;
7266 btrfs_get_num_tolerated_disk_barrier_failures(
7268 for (i
= 0; i
< map
->num_stripes
; i
++) {
7269 struct btrfs_device
*dev
= map
->stripes
[i
].dev
;
7271 if (!dev
|| !dev
->bdev
||
7272 test_bit(BTRFS_DEV_STATE_MISSING
, &dev
->dev_state
) ||
7273 dev
->last_flush_error
)
7275 else if (failing_dev
&& failing_dev
== dev
)
7278 if (missing
> max_tolerated
) {
7281 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7282 em
->start
, missing
, max_tolerated
);
7283 free_extent_map(em
);
7287 next_start
= extent_map_end(em
);
7288 free_extent_map(em
);
7290 read_lock(&map_tree
->lock
);
7291 em
= lookup_extent_mapping(map_tree
, next_start
,
7292 (u64
)(-1) - next_start
);
7293 read_unlock(&map_tree
->lock
);
7299 int btrfs_read_chunk_tree(struct btrfs_fs_info
*fs_info
)
7301 struct btrfs_root
*root
= fs_info
->chunk_root
;
7302 struct btrfs_path
*path
;
7303 struct extent_buffer
*leaf
;
7304 struct btrfs_key key
;
7305 struct btrfs_key found_key
;
7310 path
= btrfs_alloc_path();
7315 * uuid_mutex is needed only if we are mounting a sprout FS
7316 * otherwise we don't need it.
7318 mutex_lock(&uuid_mutex
);
7321 * It is possible for mount and umount to race in such a way that
7322 * we execute this code path, but open_fs_devices failed to clear
7323 * total_rw_bytes. We certainly want it cleared before reading the
7324 * device items, so clear it here.
7326 fs_info
->fs_devices
->total_rw_bytes
= 0;
7329 * Read all device items, and then all the chunk items. All
7330 * device items are found before any chunk item (their object id
7331 * is smaller than the lowest possible object id for a chunk
7332 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7334 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
7337 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7341 leaf
= path
->nodes
[0];
7342 slot
= path
->slots
[0];
7343 if (slot
>= btrfs_header_nritems(leaf
)) {
7344 ret
= btrfs_next_leaf(root
, path
);
7351 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
7352 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
7353 struct btrfs_dev_item
*dev_item
;
7354 dev_item
= btrfs_item_ptr(leaf
, slot
,
7355 struct btrfs_dev_item
);
7356 ret
= read_one_dev(leaf
, dev_item
);
7360 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
7361 struct btrfs_chunk
*chunk
;
7362 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
7363 mutex_lock(&fs_info
->chunk_mutex
);
7364 ret
= read_one_chunk(&found_key
, leaf
, chunk
);
7365 mutex_unlock(&fs_info
->chunk_mutex
);
7373 * After loading chunk tree, we've got all device information,
7374 * do another round of validation checks.
7376 if (total_dev
!= fs_info
->fs_devices
->total_devices
) {
7378 "super_num_devices %llu mismatch with num_devices %llu found here",
7379 btrfs_super_num_devices(fs_info
->super_copy
),
7384 if (btrfs_super_total_bytes(fs_info
->super_copy
) <
7385 fs_info
->fs_devices
->total_rw_bytes
) {
7387 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7388 btrfs_super_total_bytes(fs_info
->super_copy
),
7389 fs_info
->fs_devices
->total_rw_bytes
);
7395 mutex_unlock(&uuid_mutex
);
7397 btrfs_free_path(path
);
7401 void btrfs_init_devices_late(struct btrfs_fs_info
*fs_info
)
7403 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7404 struct btrfs_device
*device
;
7406 while (fs_devices
) {
7407 mutex_lock(&fs_devices
->device_list_mutex
);
7408 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
)
7409 device
->fs_info
= fs_info
;
7410 mutex_unlock(&fs_devices
->device_list_mutex
);
7412 fs_devices
= fs_devices
->seed
;
7416 static u64
btrfs_dev_stats_value(const struct extent_buffer
*eb
,
7417 const struct btrfs_dev_stats_item
*ptr
,
7422 read_extent_buffer(eb
, &val
,
7423 offsetof(struct btrfs_dev_stats_item
, values
) +
7424 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7429 static void btrfs_set_dev_stats_value(struct extent_buffer
*eb
,
7430 struct btrfs_dev_stats_item
*ptr
,
7433 write_extent_buffer(eb
, &val
,
7434 offsetof(struct btrfs_dev_stats_item
, values
) +
7435 ((unsigned long)ptr
) + (index
* sizeof(u64
)),
7439 int btrfs_init_dev_stats(struct btrfs_fs_info
*fs_info
)
7441 struct btrfs_key key
;
7442 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7443 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7444 struct extent_buffer
*eb
;
7447 struct btrfs_device
*device
;
7448 struct btrfs_path
*path
= NULL
;
7451 path
= btrfs_alloc_path();
7455 mutex_lock(&fs_devices
->device_list_mutex
);
7456 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7458 struct btrfs_dev_stats_item
*ptr
;
7460 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7461 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7462 key
.offset
= device
->devid
;
7463 ret
= btrfs_search_slot(NULL
, dev_root
, &key
, path
, 0, 0);
7465 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7466 btrfs_dev_stat_set(device
, i
, 0);
7467 device
->dev_stats_valid
= 1;
7468 btrfs_release_path(path
);
7471 slot
= path
->slots
[0];
7472 eb
= path
->nodes
[0];
7473 item_size
= btrfs_item_size_nr(eb
, slot
);
7475 ptr
= btrfs_item_ptr(eb
, slot
,
7476 struct btrfs_dev_stats_item
);
7478 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7479 if (item_size
>= (1 + i
) * sizeof(__le64
))
7480 btrfs_dev_stat_set(device
, i
,
7481 btrfs_dev_stats_value(eb
, ptr
, i
));
7483 btrfs_dev_stat_set(device
, i
, 0);
7486 device
->dev_stats_valid
= 1;
7487 btrfs_dev_stat_print_on_load(device
);
7488 btrfs_release_path(path
);
7490 mutex_unlock(&fs_devices
->device_list_mutex
);
7492 btrfs_free_path(path
);
7493 return ret
< 0 ? ret
: 0;
7496 static int update_dev_stat_item(struct btrfs_trans_handle
*trans
,
7497 struct btrfs_device
*device
)
7499 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7500 struct btrfs_root
*dev_root
= fs_info
->dev_root
;
7501 struct btrfs_path
*path
;
7502 struct btrfs_key key
;
7503 struct extent_buffer
*eb
;
7504 struct btrfs_dev_stats_item
*ptr
;
7508 key
.objectid
= BTRFS_DEV_STATS_OBJECTID
;
7509 key
.type
= BTRFS_PERSISTENT_ITEM_KEY
;
7510 key
.offset
= device
->devid
;
7512 path
= btrfs_alloc_path();
7515 ret
= btrfs_search_slot(trans
, dev_root
, &key
, path
, -1, 1);
7517 btrfs_warn_in_rcu(fs_info
,
7518 "error %d while searching for dev_stats item for device %s",
7519 ret
, rcu_str_deref(device
->name
));
7524 btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]) < sizeof(*ptr
)) {
7525 /* need to delete old one and insert a new one */
7526 ret
= btrfs_del_item(trans
, dev_root
, path
);
7528 btrfs_warn_in_rcu(fs_info
,
7529 "delete too small dev_stats item for device %s failed %d",
7530 rcu_str_deref(device
->name
), ret
);
7537 /* need to insert a new item */
7538 btrfs_release_path(path
);
7539 ret
= btrfs_insert_empty_item(trans
, dev_root
, path
,
7540 &key
, sizeof(*ptr
));
7542 btrfs_warn_in_rcu(fs_info
,
7543 "insert dev_stats item for device %s failed %d",
7544 rcu_str_deref(device
->name
), ret
);
7549 eb
= path
->nodes
[0];
7550 ptr
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_dev_stats_item
);
7551 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7552 btrfs_set_dev_stats_value(eb
, ptr
, i
,
7553 btrfs_dev_stat_read(device
, i
));
7554 btrfs_mark_buffer_dirty(eb
);
7557 btrfs_free_path(path
);
7562 * called from commit_transaction. Writes all changed device stats to disk.
7564 int btrfs_run_dev_stats(struct btrfs_trans_handle
*trans
)
7566 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
7567 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7568 struct btrfs_device
*device
;
7572 mutex_lock(&fs_devices
->device_list_mutex
);
7573 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
7574 stats_cnt
= atomic_read(&device
->dev_stats_ccnt
);
7575 if (!device
->dev_stats_valid
|| stats_cnt
== 0)
7580 * There is a LOAD-LOAD control dependency between the value of
7581 * dev_stats_ccnt and updating the on-disk values which requires
7582 * reading the in-memory counters. Such control dependencies
7583 * require explicit read memory barriers.
7585 * This memory barriers pairs with smp_mb__before_atomic in
7586 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7587 * barrier implied by atomic_xchg in
7588 * btrfs_dev_stats_read_and_reset
7592 ret
= update_dev_stat_item(trans
, device
);
7594 atomic_sub(stats_cnt
, &device
->dev_stats_ccnt
);
7596 mutex_unlock(&fs_devices
->device_list_mutex
);
7601 void btrfs_dev_stat_inc_and_print(struct btrfs_device
*dev
, int index
)
7603 btrfs_dev_stat_inc(dev
, index
);
7604 btrfs_dev_stat_print_on_error(dev
);
7607 static void btrfs_dev_stat_print_on_error(struct btrfs_device
*dev
)
7609 if (!dev
->dev_stats_valid
)
7611 btrfs_err_rl_in_rcu(dev
->fs_info
,
7612 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7613 rcu_str_deref(dev
->name
),
7614 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7615 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7616 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7617 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7618 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7621 static void btrfs_dev_stat_print_on_load(struct btrfs_device
*dev
)
7625 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7626 if (btrfs_dev_stat_read(dev
, i
) != 0)
7628 if (i
== BTRFS_DEV_STAT_VALUES_MAX
)
7629 return; /* all values == 0, suppress message */
7631 btrfs_info_in_rcu(dev
->fs_info
,
7632 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7633 rcu_str_deref(dev
->name
),
7634 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_WRITE_ERRS
),
7635 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_READ_ERRS
),
7636 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_FLUSH_ERRS
),
7637 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
),
7638 btrfs_dev_stat_read(dev
, BTRFS_DEV_STAT_GENERATION_ERRS
));
7641 int btrfs_get_dev_stats(struct btrfs_fs_info
*fs_info
,
7642 struct btrfs_ioctl_get_dev_stats
*stats
)
7644 struct btrfs_device
*dev
;
7645 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7648 mutex_lock(&fs_devices
->device_list_mutex
);
7649 dev
= btrfs_find_device(fs_info
->fs_devices
, stats
->devid
, NULL
, NULL
,
7651 mutex_unlock(&fs_devices
->device_list_mutex
);
7654 btrfs_warn(fs_info
, "get dev_stats failed, device not found");
7656 } else if (!dev
->dev_stats_valid
) {
7657 btrfs_warn(fs_info
, "get dev_stats failed, not yet valid");
7659 } else if (stats
->flags
& BTRFS_DEV_STATS_RESET
) {
7660 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++) {
7661 if (stats
->nr_items
> i
)
7663 btrfs_dev_stat_read_and_reset(dev
, i
);
7665 btrfs_dev_stat_set(dev
, i
, 0);
7667 btrfs_info(fs_info
, "device stats zeroed by %s (%d)",
7668 current
->comm
, task_pid_nr(current
));
7670 for (i
= 0; i
< BTRFS_DEV_STAT_VALUES_MAX
; i
++)
7671 if (stats
->nr_items
> i
)
7672 stats
->values
[i
] = btrfs_dev_stat_read(dev
, i
);
7674 if (stats
->nr_items
> BTRFS_DEV_STAT_VALUES_MAX
)
7675 stats
->nr_items
= BTRFS_DEV_STAT_VALUES_MAX
;
7679 void btrfs_scratch_superblocks(struct block_device
*bdev
, const char *device_path
)
7681 struct buffer_head
*bh
;
7682 struct btrfs_super_block
*disk_super
;
7688 for (copy_num
= 0; copy_num
< BTRFS_SUPER_MIRROR_MAX
;
7691 if (btrfs_read_dev_one_super(bdev
, copy_num
, &bh
))
7694 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
7696 memset(&disk_super
->magic
, 0, sizeof(disk_super
->magic
));
7697 set_buffer_dirty(bh
);
7698 sync_dirty_buffer(bh
);
7702 /* Notify udev that device has changed */
7703 btrfs_kobject_uevent(bdev
, KOBJ_CHANGE
);
7705 /* Update ctime/mtime for device path for libblkid */
7706 update_dev_time(device_path
);
7710 * Update the size and bytes used for each device where it changed. This is
7711 * delayed since we would otherwise get errors while writing out the
7714 * Must be invoked during transaction commit.
7716 void btrfs_commit_device_sizes(struct btrfs_transaction
*trans
)
7718 struct btrfs_device
*curr
, *next
;
7720 ASSERT(trans
->state
== TRANS_STATE_COMMIT_DOING
);
7722 if (list_empty(&trans
->dev_update_list
))
7726 * We don't need the device_list_mutex here. This list is owned by the
7727 * transaction and the transaction must complete before the device is
7730 mutex_lock(&trans
->fs_info
->chunk_mutex
);
7731 list_for_each_entry_safe(curr
, next
, &trans
->dev_update_list
,
7733 list_del_init(&curr
->post_commit_list
);
7734 curr
->commit_total_bytes
= curr
->disk_total_bytes
;
7735 curr
->commit_bytes_used
= curr
->bytes_used
;
7737 mutex_unlock(&trans
->fs_info
->chunk_mutex
);
7740 void btrfs_set_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7742 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7743 while (fs_devices
) {
7744 fs_devices
->fs_info
= fs_info
;
7745 fs_devices
= fs_devices
->seed
;
7749 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info
*fs_info
)
7751 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
7752 while (fs_devices
) {
7753 fs_devices
->fs_info
= NULL
;
7754 fs_devices
= fs_devices
->seed
;
7759 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7761 int btrfs_bg_type_to_factor(u64 flags
)
7763 const int index
= btrfs_bg_flags_to_raid_index(flags
);
7765 return btrfs_raid_array
[index
].ncopies
;
7770 static int verify_one_dev_extent(struct btrfs_fs_info
*fs_info
,
7771 u64 chunk_offset
, u64 devid
,
7772 u64 physical_offset
, u64 physical_len
)
7774 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
7775 struct extent_map
*em
;
7776 struct map_lookup
*map
;
7777 struct btrfs_device
*dev
;
7783 read_lock(&em_tree
->lock
);
7784 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
7785 read_unlock(&em_tree
->lock
);
7789 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7790 physical_offset
, devid
);
7795 map
= em
->map_lookup
;
7796 stripe_len
= calc_stripe_length(map
->type
, em
->len
, map
->num_stripes
);
7797 if (physical_len
!= stripe_len
) {
7799 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7800 physical_offset
, devid
, em
->start
, physical_len
,
7806 for (i
= 0; i
< map
->num_stripes
; i
++) {
7807 if (map
->stripes
[i
].dev
->devid
== devid
&&
7808 map
->stripes
[i
].physical
== physical_offset
) {
7810 if (map
->verified_stripes
>= map
->num_stripes
) {
7812 "too many dev extents for chunk %llu found",
7817 map
->verified_stripes
++;
7823 "dev extent physical offset %llu devid %llu has no corresponding chunk",
7824 physical_offset
, devid
);
7828 /* Make sure no dev extent is beyond device bondary */
7829 dev
= btrfs_find_device(fs_info
->fs_devices
, devid
, NULL
, NULL
, true);
7831 btrfs_err(fs_info
, "failed to find devid %llu", devid
);
7836 /* It's possible this device is a dummy for seed device */
7837 if (dev
->disk_total_bytes
== 0) {
7838 dev
= btrfs_find_device(fs_info
->fs_devices
->seed
, devid
, NULL
,
7841 btrfs_err(fs_info
, "failed to find seed devid %llu",
7848 if (physical_offset
+ physical_len
> dev
->disk_total_bytes
) {
7850 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7851 devid
, physical_offset
, physical_len
,
7852 dev
->disk_total_bytes
);
7857 free_extent_map(em
);
7861 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info
*fs_info
)
7863 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
7864 struct extent_map
*em
;
7865 struct rb_node
*node
;
7868 read_lock(&em_tree
->lock
);
7869 for (node
= rb_first_cached(&em_tree
->map
); node
; node
= rb_next(node
)) {
7870 em
= rb_entry(node
, struct extent_map
, rb_node
);
7871 if (em
->map_lookup
->num_stripes
!=
7872 em
->map_lookup
->verified_stripes
) {
7874 "chunk %llu has missing dev extent, have %d expect %d",
7875 em
->start
, em
->map_lookup
->verified_stripes
,
7876 em
->map_lookup
->num_stripes
);
7882 read_unlock(&em_tree
->lock
);
7887 * Ensure that all dev extents are mapped to correct chunk, otherwise
7888 * later chunk allocation/free would cause unexpected behavior.
7890 * NOTE: This will iterate through the whole device tree, which should be of
7891 * the same size level as the chunk tree. This slightly increases mount time.
7893 int btrfs_verify_dev_extents(struct btrfs_fs_info
*fs_info
)
7895 struct btrfs_path
*path
;
7896 struct btrfs_root
*root
= fs_info
->dev_root
;
7897 struct btrfs_key key
;
7899 u64 prev_dev_ext_end
= 0;
7903 key
.type
= BTRFS_DEV_EXTENT_KEY
;
7906 path
= btrfs_alloc_path();
7910 path
->reada
= READA_FORWARD
;
7911 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7915 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
7916 ret
= btrfs_next_item(root
, path
);
7919 /* No dev extents at all? Not good */
7926 struct extent_buffer
*leaf
= path
->nodes
[0];
7927 struct btrfs_dev_extent
*dext
;
7928 int slot
= path
->slots
[0];
7930 u64 physical_offset
;
7934 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7935 if (key
.type
!= BTRFS_DEV_EXTENT_KEY
)
7937 devid
= key
.objectid
;
7938 physical_offset
= key
.offset
;
7940 dext
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dev_extent
);
7941 chunk_offset
= btrfs_dev_extent_chunk_offset(leaf
, dext
);
7942 physical_len
= btrfs_dev_extent_length(leaf
, dext
);
7944 /* Check if this dev extent overlaps with the previous one */
7945 if (devid
== prev_devid
&& physical_offset
< prev_dev_ext_end
) {
7947 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7948 devid
, physical_offset
, prev_dev_ext_end
);
7953 ret
= verify_one_dev_extent(fs_info
, chunk_offset
, devid
,
7954 physical_offset
, physical_len
);
7958 prev_dev_ext_end
= physical_offset
+ physical_len
;
7960 ret
= btrfs_next_item(root
, path
);
7969 /* Ensure all chunks have corresponding dev extents */
7970 ret
= verify_chunk_dev_extent_mapping(fs_info
);
7972 btrfs_free_path(path
);
7977 * Check whether the given block group or device is pinned by any inode being
7978 * used as a swapfile.
7980 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info
*fs_info
, void *ptr
)
7982 struct btrfs_swapfile_pin
*sp
;
7983 struct rb_node
*node
;
7985 spin_lock(&fs_info
->swapfile_pins_lock
);
7986 node
= fs_info
->swapfile_pins
.rb_node
;
7988 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
7990 node
= node
->rb_left
;
7991 else if (ptr
> sp
->ptr
)
7992 node
= node
->rb_right
;
7996 spin_unlock(&fs_info
->swapfile_pins_lock
);
7997 return node
!= NULL
;