1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/sched/mm.h>
7 #include <linux/vmalloc.h>
11 #include "rcu-string.h"
13 #include "block-group.h"
14 #include "transaction.h"
15 #include "dev-replace.h"
16 #include "space-info.h"
18 /* Maximum number of zones to report per blkdev_report_zones() call */
19 #define BTRFS_REPORT_NR_ZONES 4096
20 /* Invalid allocation pointer value for missing devices */
21 #define WP_MISSING_DEV ((u64)-1)
22 /* Pseudo write pointer value for conventional zone */
23 #define WP_CONVENTIONAL ((u64)-2)
26 * Location of the first zone of superblock logging zone pairs.
28 * - primary superblock: 0B (zone 0)
29 * - first copy: 512G (zone starting at that offset)
30 * - second copy: 4T (zone starting at that offset)
32 #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL)
33 #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G)
34 #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G)
36 #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
37 #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
39 /* Number of superblock log zones */
40 #define BTRFS_NR_SB_LOG_ZONES 2
43 * Maximum supported zone size. Currently, SMR disks have a zone size of
44 * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
45 * expect the zone size to become larger than 8GiB in the near future.
47 #define BTRFS_MAX_ZONE_SIZE SZ_8G
49 static int copy_zone_info_cb(struct blk_zone
*zone
, unsigned int idx
, void *data
)
51 struct blk_zone
*zones
= data
;
53 memcpy(&zones
[idx
], zone
, sizeof(*zone
));
58 static int sb_write_pointer(struct block_device
*bdev
, struct blk_zone
*zones
,
61 bool empty
[BTRFS_NR_SB_LOG_ZONES
];
62 bool full
[BTRFS_NR_SB_LOG_ZONES
];
65 ASSERT(zones
[0].type
!= BLK_ZONE_TYPE_CONVENTIONAL
&&
66 zones
[1].type
!= BLK_ZONE_TYPE_CONVENTIONAL
);
68 empty
[0] = (zones
[0].cond
== BLK_ZONE_COND_EMPTY
);
69 empty
[1] = (zones
[1].cond
== BLK_ZONE_COND_EMPTY
);
70 full
[0] = (zones
[0].cond
== BLK_ZONE_COND_FULL
);
71 full
[1] = (zones
[1].cond
== BLK_ZONE_COND_FULL
);
74 * Possible states of log buffer zones
76 * Empty[0] In use[0] Full[0]
82 * *: Special case, no superblock is written
83 * 0: Use write pointer of zones[0]
84 * 1: Use write pointer of zones[1]
85 * C: Compare super blocks from zones[0] and zones[1], use the latest
86 * one determined by generation
90 if (empty
[0] && empty
[1]) {
91 /* Special case to distinguish no superblock to read */
92 *wp_ret
= zones
[0].start
<< SECTOR_SHIFT
;
94 } else if (full
[0] && full
[1]) {
95 /* Compare two super blocks */
96 struct address_space
*mapping
= bdev
->bd_inode
->i_mapping
;
97 struct page
*page
[BTRFS_NR_SB_LOG_ZONES
];
98 struct btrfs_super_block
*super
[BTRFS_NR_SB_LOG_ZONES
];
101 for (i
= 0; i
< BTRFS_NR_SB_LOG_ZONES
; i
++) {
104 bytenr
= ((zones
[i
].start
+ zones
[i
].len
)
105 << SECTOR_SHIFT
) - BTRFS_SUPER_INFO_SIZE
;
107 page
[i
] = read_cache_page_gfp(mapping
,
108 bytenr
>> PAGE_SHIFT
, GFP_NOFS
);
109 if (IS_ERR(page
[i
])) {
111 btrfs_release_disk_super(super
[0]);
112 return PTR_ERR(page
[i
]);
114 super
[i
] = page_address(page
[i
]);
117 if (super
[0]->generation
> super
[1]->generation
)
118 sector
= zones
[1].start
;
120 sector
= zones
[0].start
;
122 for (i
= 0; i
< BTRFS_NR_SB_LOG_ZONES
; i
++)
123 btrfs_release_disk_super(super
[i
]);
124 } else if (!full
[0] && (empty
[1] || full
[1])) {
125 sector
= zones
[0].wp
;
126 } else if (full
[0]) {
127 sector
= zones
[1].wp
;
131 *wp_ret
= sector
<< SECTOR_SHIFT
;
136 * Get the first zone number of the superblock mirror
138 static inline u32
sb_zone_number(int shift
, int mirror
)
142 ASSERT(mirror
< BTRFS_SUPER_MIRROR_MAX
);
144 case 0: zone
= 0; break;
145 case 1: zone
= 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT
- shift
); break;
146 case 2: zone
= 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT
- shift
); break;
149 ASSERT(zone
<= U32_MAX
);
154 static inline sector_t
zone_start_sector(u32 zone_number
,
155 struct block_device
*bdev
)
157 return (sector_t
)zone_number
<< ilog2(bdev_zone_sectors(bdev
));
160 static inline u64
zone_start_physical(u32 zone_number
,
161 struct btrfs_zoned_device_info
*zone_info
)
163 return (u64
)zone_number
<< zone_info
->zone_size_shift
;
167 * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block
168 * device into static sized chunks and fake a conventional zone on each of
171 static int emulate_report_zones(struct btrfs_device
*device
, u64 pos
,
172 struct blk_zone
*zones
, unsigned int nr_zones
)
174 const sector_t zone_sectors
= device
->fs_info
->zone_size
>> SECTOR_SHIFT
;
175 sector_t bdev_size
= bdev_nr_sectors(device
->bdev
);
178 pos
>>= SECTOR_SHIFT
;
179 for (i
= 0; i
< nr_zones
; i
++) {
180 zones
[i
].start
= i
* zone_sectors
+ pos
;
181 zones
[i
].len
= zone_sectors
;
182 zones
[i
].capacity
= zone_sectors
;
183 zones
[i
].wp
= zones
[i
].start
+ zone_sectors
;
184 zones
[i
].type
= BLK_ZONE_TYPE_CONVENTIONAL
;
185 zones
[i
].cond
= BLK_ZONE_COND_NOT_WP
;
187 if (zones
[i
].wp
>= bdev_size
) {
196 static int btrfs_get_dev_zones(struct btrfs_device
*device
, u64 pos
,
197 struct blk_zone
*zones
, unsigned int *nr_zones
)
199 struct btrfs_zoned_device_info
*zinfo
= device
->zone_info
;
206 if (!bdev_is_zoned(device
->bdev
)) {
207 ret
= emulate_report_zones(device
, pos
, zones
, *nr_zones
);
213 if (zinfo
->zone_cache
) {
216 ASSERT(IS_ALIGNED(pos
, zinfo
->zone_size
));
217 zno
= pos
>> zinfo
->zone_size_shift
;
219 * We cannot report zones beyond the zone end. So, it is OK to
220 * cap *nr_zones to at the end.
222 *nr_zones
= min_t(u32
, *nr_zones
, zinfo
->nr_zones
- zno
);
224 for (i
= 0; i
< *nr_zones
; i
++) {
225 struct blk_zone
*zone_info
;
227 zone_info
= &zinfo
->zone_cache
[zno
+ i
];
232 if (i
== *nr_zones
) {
233 /* Cache hit on all the zones */
234 memcpy(zones
, zinfo
->zone_cache
+ zno
,
235 sizeof(*zinfo
->zone_cache
) * *nr_zones
);
240 ret
= blkdev_report_zones(device
->bdev
, pos
>> SECTOR_SHIFT
, *nr_zones
,
241 copy_zone_info_cb
, zones
);
243 btrfs_err_in_rcu(device
->fs_info
,
244 "zoned: failed to read zone %llu on %s (devid %llu)",
245 pos
, rcu_str_deref(device
->name
),
254 if (zinfo
->zone_cache
)
255 memcpy(zinfo
->zone_cache
+ zno
, zones
,
256 sizeof(*zinfo
->zone_cache
) * *nr_zones
);
261 /* The emulated zone size is determined from the size of device extent */
262 static int calculate_emulated_zone_size(struct btrfs_fs_info
*fs_info
)
264 struct btrfs_path
*path
;
265 struct btrfs_root
*root
= fs_info
->dev_root
;
266 struct btrfs_key key
;
267 struct extent_buffer
*leaf
;
268 struct btrfs_dev_extent
*dext
;
272 key
.type
= BTRFS_DEV_EXTENT_KEY
;
275 path
= btrfs_alloc_path();
279 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
283 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
284 ret
= btrfs_next_leaf(root
, path
);
287 /* No dev extents at all? Not good */
294 leaf
= path
->nodes
[0];
295 dext
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_extent
);
296 fs_info
->zone_size
= btrfs_dev_extent_length(leaf
, dext
);
300 btrfs_free_path(path
);
305 int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info
*fs_info
)
307 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
308 struct btrfs_device
*device
;
311 /* fs_info->zone_size might not set yet. Use the incomapt flag here. */
312 if (!btrfs_fs_incompat(fs_info
, ZONED
))
315 mutex_lock(&fs_devices
->device_list_mutex
);
316 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
317 /* We can skip reading of zone info for missing devices */
321 ret
= btrfs_get_dev_zone_info(device
, true);
325 mutex_unlock(&fs_devices
->device_list_mutex
);
330 int btrfs_get_dev_zone_info(struct btrfs_device
*device
, bool populate_cache
)
332 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
333 struct btrfs_zoned_device_info
*zone_info
= NULL
;
334 struct block_device
*bdev
= device
->bdev
;
337 struct blk_zone
*zones
= NULL
;
338 unsigned int i
, nreported
= 0, nr_zones
;
339 sector_t zone_sectors
;
340 char *model
, *emulated
;
344 * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not
347 if (!btrfs_fs_incompat(fs_info
, ZONED
))
350 if (device
->zone_info
)
353 zone_info
= kzalloc(sizeof(*zone_info
), GFP_KERNEL
);
357 device
->zone_info
= zone_info
;
359 if (!bdev_is_zoned(bdev
)) {
360 if (!fs_info
->zone_size
) {
361 ret
= calculate_emulated_zone_size(fs_info
);
366 ASSERT(fs_info
->zone_size
);
367 zone_sectors
= fs_info
->zone_size
>> SECTOR_SHIFT
;
369 zone_sectors
= bdev_zone_sectors(bdev
);
372 /* Check if it's power of 2 (see is_power_of_2) */
373 ASSERT(zone_sectors
!= 0 && (zone_sectors
& (zone_sectors
- 1)) == 0);
374 zone_info
->zone_size
= zone_sectors
<< SECTOR_SHIFT
;
376 /* We reject devices with a zone size larger than 8GB */
377 if (zone_info
->zone_size
> BTRFS_MAX_ZONE_SIZE
) {
378 btrfs_err_in_rcu(fs_info
,
379 "zoned: %s: zone size %llu larger than supported maximum %llu",
380 rcu_str_deref(device
->name
),
381 zone_info
->zone_size
, BTRFS_MAX_ZONE_SIZE
);
386 nr_sectors
= bdev_nr_sectors(bdev
);
387 zone_info
->zone_size_shift
= ilog2(zone_info
->zone_size
);
388 zone_info
->nr_zones
= nr_sectors
>> ilog2(zone_sectors
);
389 if (!IS_ALIGNED(nr_sectors
, zone_sectors
))
390 zone_info
->nr_zones
++;
392 zone_info
->seq_zones
= bitmap_zalloc(zone_info
->nr_zones
, GFP_KERNEL
);
393 if (!zone_info
->seq_zones
) {
398 zone_info
->empty_zones
= bitmap_zalloc(zone_info
->nr_zones
, GFP_KERNEL
);
399 if (!zone_info
->empty_zones
) {
404 zones
= kcalloc(BTRFS_REPORT_NR_ZONES
, sizeof(struct blk_zone
), GFP_KERNEL
);
411 * Enable zone cache only for a zoned device. On a non-zoned device, we
412 * fill the zone info with emulated CONVENTIONAL zones, so no need to
415 if (populate_cache
&& bdev_is_zoned(device
->bdev
)) {
416 zone_info
->zone_cache
= vzalloc(sizeof(struct blk_zone
) *
417 zone_info
->nr_zones
);
418 if (!zone_info
->zone_cache
) {
419 btrfs_err_in_rcu(device
->fs_info
,
420 "zoned: failed to allocate zone cache for %s",
421 rcu_str_deref(device
->name
));
428 while (sector
< nr_sectors
) {
429 nr_zones
= BTRFS_REPORT_NR_ZONES
;
430 ret
= btrfs_get_dev_zones(device
, sector
<< SECTOR_SHIFT
, zones
,
435 for (i
= 0; i
< nr_zones
; i
++) {
436 if (zones
[i
].type
== BLK_ZONE_TYPE_SEQWRITE_REQ
)
437 __set_bit(nreported
, zone_info
->seq_zones
);
438 if (zones
[i
].cond
== BLK_ZONE_COND_EMPTY
)
439 __set_bit(nreported
, zone_info
->empty_zones
);
442 sector
= zones
[nr_zones
- 1].start
+ zones
[nr_zones
- 1].len
;
445 if (nreported
!= zone_info
->nr_zones
) {
446 btrfs_err_in_rcu(device
->fs_info
,
447 "inconsistent number of zones on %s (%u/%u)",
448 rcu_str_deref(device
->name
), nreported
,
449 zone_info
->nr_zones
);
454 /* Validate superblock log */
455 nr_zones
= BTRFS_NR_SB_LOG_ZONES
;
456 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
459 int sb_pos
= BTRFS_NR_SB_LOG_ZONES
* i
;
461 sb_zone
= sb_zone_number(zone_info
->zone_size_shift
, i
);
462 if (sb_zone
+ 1 >= zone_info
->nr_zones
)
465 ret
= btrfs_get_dev_zones(device
,
466 zone_start_physical(sb_zone
, zone_info
),
467 &zone_info
->sb_zones
[sb_pos
],
472 if (nr_zones
!= BTRFS_NR_SB_LOG_ZONES
) {
473 btrfs_err_in_rcu(device
->fs_info
,
474 "zoned: failed to read super block log zone info at devid %llu zone %u",
475 device
->devid
, sb_zone
);
481 * If zones[0] is conventional, always use the beginning of the
482 * zone to record superblock. No need to validate in that case.
484 if (zone_info
->sb_zones
[BTRFS_NR_SB_LOG_ZONES
* i
].type
==
485 BLK_ZONE_TYPE_CONVENTIONAL
)
488 ret
= sb_write_pointer(device
->bdev
,
489 &zone_info
->sb_zones
[sb_pos
], &sb_wp
);
490 if (ret
!= -ENOENT
&& ret
) {
491 btrfs_err_in_rcu(device
->fs_info
,
492 "zoned: super block log zone corrupted devid %llu zone %u",
493 device
->devid
, sb_zone
);
502 switch (bdev_zoned_model(bdev
)) {
504 model
= "host-managed zoned";
508 model
= "host-aware zoned";
513 emulated
= "emulated ";
517 btrfs_err_in_rcu(fs_info
, "zoned: unsupported model %d on %s",
518 bdev_zoned_model(bdev
),
519 rcu_str_deref(device
->name
));
521 goto out_free_zone_info
;
524 btrfs_info_in_rcu(fs_info
,
525 "%s block device %s, %u %szones of %llu bytes",
526 model
, rcu_str_deref(device
->name
), zone_info
->nr_zones
,
527 emulated
, zone_info
->zone_size
);
534 btrfs_destroy_dev_zone_info(device
);
539 void btrfs_destroy_dev_zone_info(struct btrfs_device
*device
)
541 struct btrfs_zoned_device_info
*zone_info
= device
->zone_info
;
546 bitmap_free(zone_info
->seq_zones
);
547 bitmap_free(zone_info
->empty_zones
);
548 vfree(zone_info
->zone_cache
);
550 device
->zone_info
= NULL
;
553 int btrfs_get_dev_zone(struct btrfs_device
*device
, u64 pos
,
554 struct blk_zone
*zone
)
556 unsigned int nr_zones
= 1;
559 ret
= btrfs_get_dev_zones(device
, pos
, zone
, &nr_zones
);
560 if (ret
!= 0 || !nr_zones
)
561 return ret
? ret
: -EIO
;
566 int btrfs_check_zoned_mode(struct btrfs_fs_info
*fs_info
)
568 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
569 struct btrfs_device
*device
;
570 u64 zoned_devices
= 0;
573 const bool incompat_zoned
= btrfs_fs_incompat(fs_info
, ZONED
);
576 /* Count zoned devices */
577 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
578 enum blk_zoned_model model
;
583 model
= bdev_zoned_model(device
->bdev
);
585 * A Host-Managed zoned device must be used as a zoned device.
586 * A Host-Aware zoned device and a non-zoned devices can be
587 * treated as a zoned device, if ZONED flag is enabled in the
590 if (model
== BLK_ZONED_HM
||
591 (model
== BLK_ZONED_HA
&& incompat_zoned
) ||
592 (model
== BLK_ZONED_NONE
&& incompat_zoned
)) {
593 struct btrfs_zoned_device_info
*zone_info
=
596 zone_info
= device
->zone_info
;
599 zone_size
= zone_info
->zone_size
;
600 } else if (zone_info
->zone_size
!= zone_size
) {
602 "zoned: unequal block device zone sizes: have %llu found %llu",
603 device
->zone_info
->zone_size
,
612 if (!zoned_devices
&& !incompat_zoned
)
615 if (!zoned_devices
&& incompat_zoned
) {
616 /* No zoned block device found on ZONED filesystem */
618 "zoned: no zoned devices found on a zoned filesystem");
623 if (zoned_devices
&& !incompat_zoned
) {
625 "zoned: mode not enabled but zoned device found");
630 if (zoned_devices
!= nr_devices
) {
632 "zoned: cannot mix zoned and regular devices");
638 * stripe_size is always aligned to BTRFS_STRIPE_LEN in
639 * __btrfs_alloc_chunk(). Since we want stripe_len == zone_size,
640 * check the alignment here.
642 if (!IS_ALIGNED(zone_size
, BTRFS_STRIPE_LEN
)) {
644 "zoned: zone size %llu not aligned to stripe %u",
645 zone_size
, BTRFS_STRIPE_LEN
);
650 if (btrfs_fs_incompat(fs_info
, MIXED_GROUPS
)) {
651 btrfs_err(fs_info
, "zoned: mixed block groups not supported");
656 fs_info
->zone_size
= zone_size
;
657 fs_info
->fs_devices
->chunk_alloc_policy
= BTRFS_CHUNK_ALLOC_ZONED
;
660 * Check mount options here, because we might change fs_info->zoned
661 * from fs_info->zone_size.
663 ret
= btrfs_check_mountopts_zoned(fs_info
);
667 btrfs_info(fs_info
, "zoned mode enabled with zone size %llu", zone_size
);
672 int btrfs_check_mountopts_zoned(struct btrfs_fs_info
*info
)
674 if (!btrfs_is_zoned(info
))
678 * Space cache writing is not COWed. Disable that to avoid write errors
679 * in sequential zones.
681 if (btrfs_test_opt(info
, SPACE_CACHE
)) {
682 btrfs_err(info
, "zoned: space cache v1 is not supported");
686 if (btrfs_test_opt(info
, NODATACOW
)) {
687 btrfs_err(info
, "zoned: NODATACOW not supported");
694 static int sb_log_location(struct block_device
*bdev
, struct blk_zone
*zones
,
695 int rw
, u64
*bytenr_ret
)
700 if (zones
[0].type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
701 *bytenr_ret
= zones
[0].start
<< SECTOR_SHIFT
;
705 ret
= sb_write_pointer(bdev
, zones
, &wp
);
706 if (ret
!= -ENOENT
&& ret
< 0)
710 struct blk_zone
*reset
= NULL
;
712 if (wp
== zones
[0].start
<< SECTOR_SHIFT
)
714 else if (wp
== zones
[1].start
<< SECTOR_SHIFT
)
717 if (reset
&& reset
->cond
!= BLK_ZONE_COND_EMPTY
) {
718 ASSERT(reset
->cond
== BLK_ZONE_COND_FULL
);
720 ret
= blkdev_zone_mgmt(bdev
, REQ_OP_ZONE_RESET
,
721 reset
->start
, reset
->len
,
726 reset
->cond
= BLK_ZONE_COND_EMPTY
;
727 reset
->wp
= reset
->start
;
729 } else if (ret
!= -ENOENT
) {
730 /* For READ, we want the precious one */
731 if (wp
== zones
[0].start
<< SECTOR_SHIFT
)
732 wp
= (zones
[1].start
+ zones
[1].len
) << SECTOR_SHIFT
;
733 wp
-= BTRFS_SUPER_INFO_SIZE
;
741 int btrfs_sb_log_location_bdev(struct block_device
*bdev
, int mirror
, int rw
,
744 struct blk_zone zones
[BTRFS_NR_SB_LOG_ZONES
];
745 sector_t zone_sectors
;
748 u8 zone_sectors_shift
;
752 if (!bdev_is_zoned(bdev
)) {
753 *bytenr_ret
= btrfs_sb_offset(mirror
);
757 ASSERT(rw
== READ
|| rw
== WRITE
);
759 zone_sectors
= bdev_zone_sectors(bdev
);
760 if (!is_power_of_2(zone_sectors
))
762 zone_sectors_shift
= ilog2(zone_sectors
);
763 nr_sectors
= bdev_nr_sectors(bdev
);
764 nr_zones
= nr_sectors
>> zone_sectors_shift
;
766 sb_zone
= sb_zone_number(zone_sectors_shift
+ SECTOR_SHIFT
, mirror
);
767 if (sb_zone
+ 1 >= nr_zones
)
770 ret
= blkdev_report_zones(bdev
, zone_start_sector(sb_zone
, bdev
),
771 BTRFS_NR_SB_LOG_ZONES
, copy_zone_info_cb
,
775 if (ret
!= BTRFS_NR_SB_LOG_ZONES
)
778 return sb_log_location(bdev
, zones
, rw
, bytenr_ret
);
781 int btrfs_sb_log_location(struct btrfs_device
*device
, int mirror
, int rw
,
784 struct btrfs_zoned_device_info
*zinfo
= device
->zone_info
;
788 * For a zoned filesystem on a non-zoned block device, use the same
789 * super block locations as regular filesystem. Doing so, the super
790 * block can always be retrieved and the zoned flag of the volume
791 * detected from the super block information.
793 if (!bdev_is_zoned(device
->bdev
)) {
794 *bytenr_ret
= btrfs_sb_offset(mirror
);
798 zone_num
= sb_zone_number(zinfo
->zone_size_shift
, mirror
);
799 if (zone_num
+ 1 >= zinfo
->nr_zones
)
802 return sb_log_location(device
->bdev
,
803 &zinfo
->sb_zones
[BTRFS_NR_SB_LOG_ZONES
* mirror
],
807 static inline bool is_sb_log_zone(struct btrfs_zoned_device_info
*zinfo
,
815 zone_num
= sb_zone_number(zinfo
->zone_size_shift
, mirror
);
816 if (zone_num
+ 1 >= zinfo
->nr_zones
)
819 if (!test_bit(zone_num
, zinfo
->seq_zones
))
825 void btrfs_advance_sb_log(struct btrfs_device
*device
, int mirror
)
827 struct btrfs_zoned_device_info
*zinfo
= device
->zone_info
;
828 struct blk_zone
*zone
;
830 if (!is_sb_log_zone(zinfo
, mirror
))
833 zone
= &zinfo
->sb_zones
[BTRFS_NR_SB_LOG_ZONES
* mirror
];
834 if (zone
->cond
!= BLK_ZONE_COND_FULL
) {
835 if (zone
->cond
== BLK_ZONE_COND_EMPTY
)
836 zone
->cond
= BLK_ZONE_COND_IMP_OPEN
;
838 zone
->wp
+= (BTRFS_SUPER_INFO_SIZE
>> SECTOR_SHIFT
);
840 if (zone
->wp
== zone
->start
+ zone
->len
)
841 zone
->cond
= BLK_ZONE_COND_FULL
;
847 ASSERT(zone
->cond
!= BLK_ZONE_COND_FULL
);
848 if (zone
->cond
== BLK_ZONE_COND_EMPTY
)
849 zone
->cond
= BLK_ZONE_COND_IMP_OPEN
;
851 zone
->wp
+= (BTRFS_SUPER_INFO_SIZE
>> SECTOR_SHIFT
);
853 if (zone
->wp
== zone
->start
+ zone
->len
)
854 zone
->cond
= BLK_ZONE_COND_FULL
;
857 int btrfs_reset_sb_log_zones(struct block_device
*bdev
, int mirror
)
859 sector_t zone_sectors
;
861 u8 zone_sectors_shift
;
865 zone_sectors
= bdev_zone_sectors(bdev
);
866 zone_sectors_shift
= ilog2(zone_sectors
);
867 nr_sectors
= bdev_nr_sectors(bdev
);
868 nr_zones
= nr_sectors
>> zone_sectors_shift
;
870 sb_zone
= sb_zone_number(zone_sectors_shift
+ SECTOR_SHIFT
, mirror
);
871 if (sb_zone
+ 1 >= nr_zones
)
874 return blkdev_zone_mgmt(bdev
, REQ_OP_ZONE_RESET
,
875 zone_start_sector(sb_zone
, bdev
),
876 zone_sectors
* BTRFS_NR_SB_LOG_ZONES
, GFP_NOFS
);
880 * btrfs_find_allocatable_zones - find allocatable zones within a given region
882 * @device: the device to allocate a region on
883 * @hole_start: the position of the hole to allocate the region
884 * @num_bytes: size of wanted region
885 * @hole_end: the end of the hole
886 * @return: position of allocatable zones
888 * Allocatable region should not contain any superblock locations.
890 u64
btrfs_find_allocatable_zones(struct btrfs_device
*device
, u64 hole_start
,
891 u64 hole_end
, u64 num_bytes
)
893 struct btrfs_zoned_device_info
*zinfo
= device
->zone_info
;
894 const u8 shift
= zinfo
->zone_size_shift
;
895 u64 nzones
= num_bytes
>> shift
;
896 u64 pos
= hole_start
;
901 ASSERT(IS_ALIGNED(hole_start
, zinfo
->zone_size
));
902 ASSERT(IS_ALIGNED(num_bytes
, zinfo
->zone_size
));
904 while (pos
< hole_end
) {
905 begin
= pos
>> shift
;
906 end
= begin
+ nzones
;
908 if (end
> zinfo
->nr_zones
)
911 /* Check if zones in the region are all empty */
912 if (btrfs_dev_is_sequential(device
, pos
) &&
913 find_next_zero_bit(zinfo
->empty_zones
, end
, begin
) != end
) {
914 pos
+= zinfo
->zone_size
;
919 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
923 sb_zone
= sb_zone_number(shift
, i
);
924 if (!(end
<= sb_zone
||
925 sb_zone
+ BTRFS_NR_SB_LOG_ZONES
<= begin
)) {
927 pos
= zone_start_physical(
928 sb_zone
+ BTRFS_NR_SB_LOG_ZONES
, zinfo
);
932 /* We also need to exclude regular superblock positions */
933 sb_pos
= btrfs_sb_offset(i
);
934 if (!(pos
+ num_bytes
<= sb_pos
||
935 sb_pos
+ BTRFS_SUPER_INFO_SIZE
<= pos
)) {
937 pos
= ALIGN(sb_pos
+ BTRFS_SUPER_INFO_SIZE
,
949 int btrfs_reset_device_zone(struct btrfs_device
*device
, u64 physical
,
950 u64 length
, u64
*bytes
)
955 ret
= blkdev_zone_mgmt(device
->bdev
, REQ_OP_ZONE_RESET
,
956 physical
>> SECTOR_SHIFT
, length
>> SECTOR_SHIFT
,
963 btrfs_dev_set_zone_empty(device
, physical
);
964 physical
+= device
->zone_info
->zone_size
;
965 length
-= device
->zone_info
->zone_size
;
971 int btrfs_ensure_empty_zones(struct btrfs_device
*device
, u64 start
, u64 size
)
973 struct btrfs_zoned_device_info
*zinfo
= device
->zone_info
;
974 const u8 shift
= zinfo
->zone_size_shift
;
975 unsigned long begin
= start
>> shift
;
976 unsigned long end
= (start
+ size
) >> shift
;
980 ASSERT(IS_ALIGNED(start
, zinfo
->zone_size
));
981 ASSERT(IS_ALIGNED(size
, zinfo
->zone_size
));
983 if (end
> zinfo
->nr_zones
)
986 /* All the zones are conventional */
987 if (find_next_bit(zinfo
->seq_zones
, begin
, end
) == end
)
990 /* All the zones are sequential and empty */
991 if (find_next_zero_bit(zinfo
->seq_zones
, begin
, end
) == end
&&
992 find_next_zero_bit(zinfo
->empty_zones
, begin
, end
) == end
)
995 for (pos
= start
; pos
< start
+ size
; pos
+= zinfo
->zone_size
) {
998 if (!btrfs_dev_is_sequential(device
, pos
) ||
999 btrfs_dev_is_empty_zone(device
, pos
))
1002 /* Free regions should be empty */
1005 "zoned: resetting device %s (devid %llu) zone %llu for allocation",
1006 rcu_str_deref(device
->name
), device
->devid
, pos
>> shift
);
1009 ret
= btrfs_reset_device_zone(device
, pos
, zinfo
->zone_size
,
1019 * Calculate an allocation pointer from the extent allocation information
1020 * for a block group consist of conventional zones. It is pointed to the
1021 * end of the highest addressed extent in the block group as an allocation
1024 static int calculate_alloc_pointer(struct btrfs_block_group
*cache
,
1027 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
1028 struct btrfs_root
*root
= fs_info
->extent_root
;
1029 struct btrfs_path
*path
;
1030 struct btrfs_key key
;
1031 struct btrfs_key found_key
;
1035 path
= btrfs_alloc_path();
1039 key
.objectid
= cache
->start
+ cache
->length
;
1043 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1044 /* We should not find the exact match */
1050 ret
= btrfs_previous_extent_item(root
, path
, cache
->start
);
1059 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
, path
->slots
[0]);
1061 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
)
1062 length
= found_key
.offset
;
1064 length
= fs_info
->nodesize
;
1066 if (!(found_key
.objectid
>= cache
->start
&&
1067 found_key
.objectid
+ length
<= cache
->start
+ cache
->length
)) {
1071 *offset_ret
= found_key
.objectid
+ length
- cache
->start
;
1075 btrfs_free_path(path
);
1079 int btrfs_load_block_group_zone_info(struct btrfs_block_group
*cache
, bool new)
1081 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
1082 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
1083 struct extent_map
*em
;
1084 struct map_lookup
*map
;
1085 struct btrfs_device
*device
;
1086 u64 logical
= cache
->start
;
1087 u64 length
= cache
->length
;
1091 unsigned int nofs_flag
;
1092 u64
*alloc_offsets
= NULL
;
1094 u32 num_sequential
= 0, num_conventional
= 0;
1096 if (!btrfs_is_zoned(fs_info
))
1100 if (!IS_ALIGNED(length
, fs_info
->zone_size
)) {
1102 "zoned: block group %llu len %llu unaligned to zone size %llu",
1103 logical
, length
, fs_info
->zone_size
);
1107 /* Get the chunk mapping */
1108 read_lock(&em_tree
->lock
);
1109 em
= lookup_extent_mapping(em_tree
, logical
, length
);
1110 read_unlock(&em_tree
->lock
);
1115 map
= em
->map_lookup
;
1117 alloc_offsets
= kcalloc(map
->num_stripes
, sizeof(*alloc_offsets
), GFP_NOFS
);
1118 if (!alloc_offsets
) {
1119 free_extent_map(em
);
1123 for (i
= 0; i
< map
->num_stripes
; i
++) {
1125 struct blk_zone zone
;
1126 struct btrfs_dev_replace
*dev_replace
= &fs_info
->dev_replace
;
1127 int dev_replace_is_ongoing
= 0;
1129 device
= map
->stripes
[i
].dev
;
1130 physical
= map
->stripes
[i
].physical
;
1132 if (device
->bdev
== NULL
) {
1133 alloc_offsets
[i
] = WP_MISSING_DEV
;
1137 is_sequential
= btrfs_dev_is_sequential(device
, physical
);
1143 if (!is_sequential
) {
1144 alloc_offsets
[i
] = WP_CONVENTIONAL
;
1149 * This zone will be used for allocation, so mark this zone
1152 btrfs_dev_clear_zone_empty(device
, physical
);
1154 down_read(&dev_replace
->rwsem
);
1155 dev_replace_is_ongoing
= btrfs_dev_replace_is_ongoing(dev_replace
);
1156 if (dev_replace_is_ongoing
&& dev_replace
->tgtdev
!= NULL
)
1157 btrfs_dev_clear_zone_empty(dev_replace
->tgtdev
, physical
);
1158 up_read(&dev_replace
->rwsem
);
1161 * The group is mapped to a sequential zone. Get the zone write
1162 * pointer to determine the allocation offset within the zone.
1164 WARN_ON(!IS_ALIGNED(physical
, fs_info
->zone_size
));
1165 nofs_flag
= memalloc_nofs_save();
1166 ret
= btrfs_get_dev_zone(device
, physical
, &zone
);
1167 memalloc_nofs_restore(nofs_flag
);
1168 if (ret
== -EIO
|| ret
== -EOPNOTSUPP
) {
1170 alloc_offsets
[i
] = WP_MISSING_DEV
;
1176 if (zone
.type
== BLK_ZONE_TYPE_CONVENTIONAL
) {
1177 btrfs_err_in_rcu(fs_info
,
1178 "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
1179 zone
.start
<< SECTOR_SHIFT
,
1180 rcu_str_deref(device
->name
), device
->devid
);
1185 switch (zone
.cond
) {
1186 case BLK_ZONE_COND_OFFLINE
:
1187 case BLK_ZONE_COND_READONLY
:
1189 "zoned: offline/readonly zone %llu on device %s (devid %llu)",
1190 physical
>> device
->zone_info
->zone_size_shift
,
1191 rcu_str_deref(device
->name
), device
->devid
);
1192 alloc_offsets
[i
] = WP_MISSING_DEV
;
1194 case BLK_ZONE_COND_EMPTY
:
1195 alloc_offsets
[i
] = 0;
1197 case BLK_ZONE_COND_FULL
:
1198 alloc_offsets
[i
] = fs_info
->zone_size
;
1201 /* Partially used zone */
1203 ((zone
.wp
- zone
.start
) << SECTOR_SHIFT
);
1208 if (num_sequential
> 0)
1209 cache
->seq_zone
= true;
1211 if (num_conventional
> 0) {
1213 * Avoid calling calculate_alloc_pointer() for new BG. It
1214 * is no use for new BG. It must be always 0.
1216 * Also, we have a lock chain of extent buffer lock ->
1217 * chunk mutex. For new BG, this function is called from
1218 * btrfs_make_block_group() which is already taking the
1219 * chunk mutex. Thus, we cannot call
1220 * calculate_alloc_pointer() which takes extent buffer
1221 * locks to avoid deadlock.
1224 cache
->alloc_offset
= 0;
1227 ret
= calculate_alloc_pointer(cache
, &last_alloc
);
1228 if (ret
|| map
->num_stripes
== num_conventional
) {
1230 cache
->alloc_offset
= last_alloc
;
1233 "zoned: failed to determine allocation offset of bg %llu",
1239 switch (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
1240 case 0: /* single */
1241 if (alloc_offsets
[0] == WP_MISSING_DEV
) {
1243 "zoned: cannot recover write pointer for zone %llu",
1248 cache
->alloc_offset
= alloc_offsets
[0];
1250 case BTRFS_BLOCK_GROUP_DUP
:
1251 case BTRFS_BLOCK_GROUP_RAID1
:
1252 case BTRFS_BLOCK_GROUP_RAID0
:
1253 case BTRFS_BLOCK_GROUP_RAID10
:
1254 case BTRFS_BLOCK_GROUP_RAID5
:
1255 case BTRFS_BLOCK_GROUP_RAID6
:
1256 /* non-single profiles are not supported yet */
1258 btrfs_err(fs_info
, "zoned: profile %s not yet supported",
1259 btrfs_bg_type_to_raid_name(map
->type
));
1265 if (cache
->alloc_offset
> fs_info
->zone_size
) {
1267 "zoned: invalid write pointer %llu in block group %llu",
1268 cache
->alloc_offset
, cache
->start
);
1272 /* An extent is allocated after the write pointer */
1273 if (!ret
&& num_conventional
&& last_alloc
> cache
->alloc_offset
) {
1275 "zoned: got wrong write pointer in BG %llu: %llu > %llu",
1276 logical
, last_alloc
, cache
->alloc_offset
);
1281 cache
->meta_write_pointer
= cache
->alloc_offset
+ cache
->start
;
1283 kfree(alloc_offsets
);
1284 free_extent_map(em
);
1289 void btrfs_calc_zone_unusable(struct btrfs_block_group
*cache
)
1293 if (!btrfs_is_zoned(cache
->fs_info
))
1296 WARN_ON(cache
->bytes_super
!= 0);
1297 unusable
= cache
->alloc_offset
- cache
->used
;
1298 free
= cache
->length
- cache
->alloc_offset
;
1300 /* We only need ->free_space in ALLOC_SEQ block groups */
1301 cache
->last_byte_to_unpin
= (u64
)-1;
1302 cache
->cached
= BTRFS_CACHE_FINISHED
;
1303 cache
->free_space_ctl
->free_space
= free
;
1304 cache
->zone_unusable
= unusable
;
1306 /* Should not have any excluded extents. Just in case, though */
1307 btrfs_free_excluded_extents(cache
);
1310 void btrfs_redirty_list_add(struct btrfs_transaction
*trans
,
1311 struct extent_buffer
*eb
)
1313 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
1315 if (!btrfs_is_zoned(fs_info
) ||
1316 btrfs_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
) ||
1317 !list_empty(&eb
->release_list
))
1320 set_extent_buffer_dirty(eb
);
1321 set_extent_bits_nowait(&trans
->dirty_pages
, eb
->start
,
1322 eb
->start
+ eb
->len
- 1, EXTENT_DIRTY
);
1323 memzero_extent_buffer(eb
, 0, eb
->len
);
1324 set_bit(EXTENT_BUFFER_NO_CHECK
, &eb
->bflags
);
1326 spin_lock(&trans
->releasing_ebs_lock
);
1327 list_add_tail(&eb
->release_list
, &trans
->releasing_ebs
);
1328 spin_unlock(&trans
->releasing_ebs_lock
);
1329 atomic_inc(&eb
->refs
);
1332 void btrfs_free_redirty_list(struct btrfs_transaction
*trans
)
1334 spin_lock(&trans
->releasing_ebs_lock
);
1335 while (!list_empty(&trans
->releasing_ebs
)) {
1336 struct extent_buffer
*eb
;
1338 eb
= list_first_entry(&trans
->releasing_ebs
,
1339 struct extent_buffer
, release_list
);
1340 list_del_init(&eb
->release_list
);
1341 free_extent_buffer(eb
);
1343 spin_unlock(&trans
->releasing_ebs_lock
);
1346 bool btrfs_use_zone_append(struct btrfs_inode
*inode
, u64 start
)
1348 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1349 struct btrfs_block_group
*cache
;
1352 if (!btrfs_is_zoned(fs_info
))
1355 if (!is_data_inode(&inode
->vfs_inode
))
1359 * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the
1360 * extent layout the relocation code has.
1361 * Furthermore we have set aside own block-group from which only the
1362 * relocation "process" can allocate and make sure only one process at a
1363 * time can add pages to an extent that gets relocated, so it's safe to
1364 * use regular REQ_OP_WRITE for this special case.
1366 if (btrfs_is_data_reloc_root(inode
->root
))
1369 cache
= btrfs_lookup_block_group(fs_info
, start
);
1374 ret
= cache
->seq_zone
;
1375 btrfs_put_block_group(cache
);
1380 void btrfs_record_physical_zoned(struct inode
*inode
, u64 file_offset
,
1383 struct btrfs_ordered_extent
*ordered
;
1384 const u64 physical
= bio
->bi_iter
.bi_sector
<< SECTOR_SHIFT
;
1386 if (bio_op(bio
) != REQ_OP_ZONE_APPEND
)
1389 ordered
= btrfs_lookup_ordered_extent(BTRFS_I(inode
), file_offset
);
1390 if (WARN_ON(!ordered
))
1393 ordered
->physical
= physical
;
1394 ordered
->bdev
= bio
->bi_bdev
;
1396 btrfs_put_ordered_extent(ordered
);
1399 void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent
*ordered
)
1401 struct btrfs_inode
*inode
= BTRFS_I(ordered
->inode
);
1402 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1403 struct extent_map_tree
*em_tree
;
1404 struct extent_map
*em
;
1405 struct btrfs_ordered_sum
*sum
;
1406 u64 orig_logical
= ordered
->disk_bytenr
;
1407 u64
*logical
= NULL
;
1410 /* Zoned devices should not have partitions. So, we can assume it is 0 */
1411 ASSERT(!bdev_is_partition(ordered
->bdev
));
1412 if (WARN_ON(!ordered
->bdev
))
1415 if (WARN_ON(btrfs_rmap_block(fs_info
, orig_logical
, ordered
->bdev
,
1416 ordered
->physical
, &logical
, &nr
,
1422 if (orig_logical
== *logical
)
1425 ordered
->disk_bytenr
= *logical
;
1427 em_tree
= &inode
->extent_tree
;
1428 write_lock(&em_tree
->lock
);
1429 em
= search_extent_mapping(em_tree
, ordered
->file_offset
,
1430 ordered
->num_bytes
);
1431 em
->block_start
= *logical
;
1432 free_extent_map(em
);
1433 write_unlock(&em_tree
->lock
);
1435 list_for_each_entry(sum
, &ordered
->list
, list
) {
1436 if (*logical
< orig_logical
)
1437 sum
->bytenr
-= orig_logical
- *logical
;
1439 sum
->bytenr
+= *logical
- orig_logical
;
1446 bool btrfs_check_meta_write_pointer(struct btrfs_fs_info
*fs_info
,
1447 struct extent_buffer
*eb
,
1448 struct btrfs_block_group
**cache_ret
)
1450 struct btrfs_block_group
*cache
;
1453 if (!btrfs_is_zoned(fs_info
))
1458 if (cache
&& (eb
->start
< cache
->start
||
1459 cache
->start
+ cache
->length
<= eb
->start
)) {
1460 btrfs_put_block_group(cache
);
1466 cache
= btrfs_lookup_block_group(fs_info
, eb
->start
);
1469 if (cache
->meta_write_pointer
!= eb
->start
) {
1470 btrfs_put_block_group(cache
);
1474 cache
->meta_write_pointer
= eb
->start
+ eb
->len
;
1483 void btrfs_revert_meta_write_pointer(struct btrfs_block_group
*cache
,
1484 struct extent_buffer
*eb
)
1486 if (!btrfs_is_zoned(eb
->fs_info
) || !cache
)
1489 ASSERT(cache
->meta_write_pointer
== eb
->start
+ eb
->len
);
1490 cache
->meta_write_pointer
= eb
->start
;
1493 int btrfs_zoned_issue_zeroout(struct btrfs_device
*device
, u64 physical
, u64 length
)
1495 if (!btrfs_dev_is_sequential(device
, physical
))
1498 return blkdev_issue_zeroout(device
->bdev
, physical
>> SECTOR_SHIFT
,
1499 length
>> SECTOR_SHIFT
, GFP_NOFS
, 0);
1502 static int read_zone_info(struct btrfs_fs_info
*fs_info
, u64 logical
,
1503 struct blk_zone
*zone
)
1505 struct btrfs_bio
*bbio
= NULL
;
1506 u64 mapped_length
= PAGE_SIZE
;
1507 unsigned int nofs_flag
;
1511 ret
= btrfs_map_sblock(fs_info
, BTRFS_MAP_GET_READ_MIRRORS
, logical
,
1512 &mapped_length
, &bbio
);
1513 if (ret
|| !bbio
|| mapped_length
< PAGE_SIZE
) {
1514 btrfs_put_bbio(bbio
);
1518 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID56_MASK
)
1521 nofs_flag
= memalloc_nofs_save();
1522 nmirrors
= (int)bbio
->num_stripes
;
1523 for (i
= 0; i
< nmirrors
; i
++) {
1524 u64 physical
= bbio
->stripes
[i
].physical
;
1525 struct btrfs_device
*dev
= bbio
->stripes
[i
].dev
;
1527 /* Missing device */
1531 ret
= btrfs_get_dev_zone(dev
, physical
, zone
);
1532 /* Failing device */
1533 if (ret
== -EIO
|| ret
== -EOPNOTSUPP
)
1537 memalloc_nofs_restore(nofs_flag
);
1543 * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by
1544 * filling zeros between @physical_pos to a write pointer of dev-replace
1547 int btrfs_sync_zone_write_pointer(struct btrfs_device
*tgt_dev
, u64 logical
,
1548 u64 physical_start
, u64 physical_pos
)
1550 struct btrfs_fs_info
*fs_info
= tgt_dev
->fs_info
;
1551 struct blk_zone zone
;
1556 if (!btrfs_dev_is_sequential(tgt_dev
, physical_pos
))
1559 ret
= read_zone_info(fs_info
, logical
, &zone
);
1563 wp
= physical_start
+ ((zone
.wp
- zone
.start
) << SECTOR_SHIFT
);
1565 if (physical_pos
== wp
)
1568 if (physical_pos
> wp
)
1571 length
= wp
- physical_pos
;
1572 return btrfs_zoned_issue_zeroout(tgt_dev
, physical_pos
, length
);
1575 struct btrfs_device
*btrfs_zoned_get_device(struct btrfs_fs_info
*fs_info
,
1576 u64 logical
, u64 length
)
1578 struct btrfs_device
*device
;
1579 struct extent_map
*em
;
1580 struct map_lookup
*map
;
1582 em
= btrfs_get_chunk_map(fs_info
, logical
, length
);
1584 return ERR_CAST(em
);
1586 map
= em
->map_lookup
;
1587 /* We only support single profile for now */
1588 ASSERT(map
->num_stripes
== 1);
1589 device
= map
->stripes
[0].dev
;
1591 free_extent_map(em
);
1596 void btrfs_clear_data_reloc_bg(struct btrfs_block_group
*bg
)
1598 struct btrfs_fs_info
*fs_info
= bg
->fs_info
;
1600 spin_lock(&fs_info
->relocation_bg_lock
);
1601 if (fs_info
->data_reloc_bg
== bg
->start
)
1602 fs_info
->data_reloc_bg
= 0;
1603 spin_unlock(&fs_info
->relocation_bg_lock
);
1606 void btrfs_free_zone_cache(struct btrfs_fs_info
*fs_info
)
1608 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
1609 struct btrfs_device
*device
;
1611 if (!btrfs_is_zoned(fs_info
))
1614 mutex_lock(&fs_devices
->device_list_mutex
);
1615 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
1616 if (device
->zone_info
) {
1617 vfree(device
->zone_info
->zone_cache
);
1618 device
->zone_info
->zone_cache
= NULL
;
1621 mutex_unlock(&fs_devices
->device_list_mutex
);