1 // SPDX-License-Identifier: GPL-2.0
3 * Zoned block device handling
5 * Copyright (c) 2015, Hannes Reinecke
6 * Copyright (c) 2015, SUSE Linux GmbH
8 * Copyright (c) 2016, Damien Le Moal
9 * Copyright (c) 2016, Western Digital
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/rbtree.h>
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/mm.h>
23 #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
24 static const char *const zone_cond_name
[] = {
25 ZONE_COND_NAME(NOT_WP
),
26 ZONE_COND_NAME(EMPTY
),
27 ZONE_COND_NAME(IMP_OPEN
),
28 ZONE_COND_NAME(EXP_OPEN
),
29 ZONE_COND_NAME(CLOSED
),
30 ZONE_COND_NAME(READONLY
),
32 ZONE_COND_NAME(OFFLINE
),
37 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
38 * @zone_cond: BLK_ZONE_COND_XXX.
40 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
41 * into string format. Useful in the debugging and tracing zone conditions. For
42 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
44 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond
)
46 static const char *zone_cond_str
= "UNKNOWN";
48 if (zone_cond
< ARRAY_SIZE(zone_cond_name
) && zone_cond_name
[zone_cond
])
49 zone_cond_str
= zone_cond_name
[zone_cond
];
53 EXPORT_SYMBOL_GPL(blk_zone_cond_str
);
56 * Return true if a request is a write requests that needs zone write locking.
58 bool blk_req_needs_zone_write_lock(struct request
*rq
)
60 if (blk_rq_is_passthrough(rq
))
63 if (!rq
->q
->disk
->seq_zones_wlock
)
66 if (bdev_op_is_zoned_write(rq
->q
->disk
->part0
, req_op(rq
)))
67 return blk_rq_zone_is_seq(rq
);
71 EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock
);
73 bool blk_req_zone_write_trylock(struct request
*rq
)
75 unsigned int zno
= blk_rq_zone_no(rq
);
77 if (test_and_set_bit(zno
, rq
->q
->disk
->seq_zones_wlock
))
80 WARN_ON_ONCE(rq
->rq_flags
& RQF_ZONE_WRITE_LOCKED
);
81 rq
->rq_flags
|= RQF_ZONE_WRITE_LOCKED
;
85 EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock
);
87 void __blk_req_zone_write_lock(struct request
*rq
)
89 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq
),
90 rq
->q
->disk
->seq_zones_wlock
)))
93 WARN_ON_ONCE(rq
->rq_flags
& RQF_ZONE_WRITE_LOCKED
);
94 rq
->rq_flags
|= RQF_ZONE_WRITE_LOCKED
;
96 EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock
);
98 void __blk_req_zone_write_unlock(struct request
*rq
)
100 rq
->rq_flags
&= ~RQF_ZONE_WRITE_LOCKED
;
101 if (rq
->q
->disk
->seq_zones_wlock
)
102 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq
),
103 rq
->q
->disk
->seq_zones_wlock
));
105 EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock
);
108 * bdev_nr_zones - Get number of zones
109 * @bdev: Target device
111 * Return the total number of zones of a zoned block device. For a block
112 * device without zone capabilities, the number of zones is always 0.
114 unsigned int bdev_nr_zones(struct block_device
*bdev
)
116 sector_t zone_sectors
= bdev_zone_sectors(bdev
);
118 if (!bdev_is_zoned(bdev
))
120 return (bdev_nr_sectors(bdev
) + zone_sectors
- 1) >>
123 EXPORT_SYMBOL_GPL(bdev_nr_zones
);
126 * blkdev_report_zones - Get zones information
127 * @bdev: Target block device
128 * @sector: Sector from which to report zones
129 * @nr_zones: Maximum number of zones to report
130 * @cb: Callback function called for each reported zone
131 * @data: Private data for the callback
134 * Get zone information starting from the zone containing @sector for at most
135 * @nr_zones, and call @cb for each zone reported by the device.
136 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
137 * constant can be passed to @nr_zones.
138 * Returns the number of zones reported by the device, or a negative errno
139 * value in case of failure.
141 * Note: The caller must use memalloc_noXX_save/restore() calls to control
142 * memory allocations done within this function.
144 int blkdev_report_zones(struct block_device
*bdev
, sector_t sector
,
145 unsigned int nr_zones
, report_zones_cb cb
, void *data
)
147 struct gendisk
*disk
= bdev
->bd_disk
;
148 sector_t capacity
= get_capacity(disk
);
150 if (!bdev_is_zoned(bdev
) || WARN_ON_ONCE(!disk
->fops
->report_zones
))
153 if (!nr_zones
|| sector
>= capacity
)
156 return disk
->fops
->report_zones(disk
, sector
, nr_zones
, cb
, data
);
158 EXPORT_SYMBOL_GPL(blkdev_report_zones
);
160 static inline unsigned long *blk_alloc_zone_bitmap(int node
,
161 unsigned int nr_zones
)
163 return kcalloc_node(BITS_TO_LONGS(nr_zones
), sizeof(unsigned long),
167 static int blk_zone_need_reset_cb(struct blk_zone
*zone
, unsigned int idx
,
171 * For an all-zones reset, ignore conventional, empty, read-only
174 switch (zone
->cond
) {
175 case BLK_ZONE_COND_NOT_WP
:
176 case BLK_ZONE_COND_EMPTY
:
177 case BLK_ZONE_COND_READONLY
:
178 case BLK_ZONE_COND_OFFLINE
:
181 set_bit(idx
, (unsigned long *)data
);
186 static int blkdev_zone_reset_all_emulated(struct block_device
*bdev
,
189 struct gendisk
*disk
= bdev
->bd_disk
;
190 sector_t capacity
= bdev_nr_sectors(bdev
);
191 sector_t zone_sectors
= bdev_zone_sectors(bdev
);
192 unsigned long *need_reset
;
193 struct bio
*bio
= NULL
;
197 need_reset
= blk_alloc_zone_bitmap(disk
->queue
->node
, disk
->nr_zones
);
201 ret
= disk
->fops
->report_zones(disk
, 0, disk
->nr_zones
,
202 blk_zone_need_reset_cb
, need_reset
);
204 goto out_free_need_reset
;
207 while (sector
< capacity
) {
208 if (!test_bit(disk_zone_no(disk
, sector
), need_reset
)) {
209 sector
+= zone_sectors
;
213 bio
= blk_next_bio(bio
, bdev
, 0, REQ_OP_ZONE_RESET
| REQ_SYNC
,
215 bio
->bi_iter
.bi_sector
= sector
;
216 sector
+= zone_sectors
;
218 /* This may take a while, so be nice to others */
223 ret
= submit_bio_wait(bio
);
232 static int blkdev_zone_reset_all(struct block_device
*bdev
, gfp_t gfp_mask
)
236 bio_init(&bio
, bdev
, NULL
, 0, REQ_OP_ZONE_RESET_ALL
| REQ_SYNC
);
237 return submit_bio_wait(&bio
);
241 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
242 * @bdev: Target block device
243 * @op: Operation to be performed on the zones
244 * @sector: Start sector of the first zone to operate on
245 * @nr_sectors: Number of sectors, should be at least the length of one zone and
246 * must be zone size aligned.
247 * @gfp_mask: Memory allocation flags (for bio_alloc)
250 * Perform the specified operation on the range of zones specified by
251 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
252 * is valid, but the specified range should not contain conventional zones.
253 * The operation to execute on each zone can be a zone reset, open, close
256 int blkdev_zone_mgmt(struct block_device
*bdev
, enum req_op op
,
257 sector_t sector
, sector_t nr_sectors
, gfp_t gfp_mask
)
259 struct request_queue
*q
= bdev_get_queue(bdev
);
260 sector_t zone_sectors
= bdev_zone_sectors(bdev
);
261 sector_t capacity
= bdev_nr_sectors(bdev
);
262 sector_t end_sector
= sector
+ nr_sectors
;
263 struct bio
*bio
= NULL
;
266 if (!bdev_is_zoned(bdev
))
269 if (bdev_read_only(bdev
))
272 if (!op_is_zone_mgmt(op
))
275 if (end_sector
<= sector
|| end_sector
> capacity
)
279 /* Check alignment (handle eventual smaller last zone) */
280 if (sector
& (zone_sectors
- 1))
283 if ((nr_sectors
& (zone_sectors
- 1)) && end_sector
!= capacity
)
287 * In the case of a zone reset operation over all zones,
288 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
289 * command. For other devices, we emulate this command behavior by
290 * identifying the zones needing a reset.
292 if (op
== REQ_OP_ZONE_RESET
&& sector
== 0 && nr_sectors
== capacity
) {
293 if (!blk_queue_zone_resetall(q
))
294 return blkdev_zone_reset_all_emulated(bdev
, gfp_mask
);
295 return blkdev_zone_reset_all(bdev
, gfp_mask
);
298 while (sector
< end_sector
) {
299 bio
= blk_next_bio(bio
, bdev
, 0, op
| REQ_SYNC
, gfp_mask
);
300 bio
->bi_iter
.bi_sector
= sector
;
301 sector
+= zone_sectors
;
303 /* This may take a while, so be nice to others */
307 ret
= submit_bio_wait(bio
);
312 EXPORT_SYMBOL_GPL(blkdev_zone_mgmt
);
314 struct zone_report_args
{
315 struct blk_zone __user
*zones
;
318 static int blkdev_copy_zone_to_user(struct blk_zone
*zone
, unsigned int idx
,
321 struct zone_report_args
*args
= data
;
323 if (copy_to_user(&args
->zones
[idx
], zone
, sizeof(struct blk_zone
)))
329 * BLKREPORTZONE ioctl processing.
330 * Called from blkdev_ioctl.
332 int blkdev_report_zones_ioctl(struct block_device
*bdev
, fmode_t mode
,
333 unsigned int cmd
, unsigned long arg
)
335 void __user
*argp
= (void __user
*)arg
;
336 struct zone_report_args args
;
337 struct request_queue
*q
;
338 struct blk_zone_report rep
;
344 q
= bdev_get_queue(bdev
);
348 if (!bdev_is_zoned(bdev
))
351 if (copy_from_user(&rep
, argp
, sizeof(struct blk_zone_report
)))
357 args
.zones
= argp
+ sizeof(struct blk_zone_report
);
358 ret
= blkdev_report_zones(bdev
, rep
.sector
, rep
.nr_zones
,
359 blkdev_copy_zone_to_user
, &args
);
364 rep
.flags
= BLK_ZONE_REP_CAPACITY
;
365 if (copy_to_user(argp
, &rep
, sizeof(struct blk_zone_report
)))
370 static int blkdev_truncate_zone_range(struct block_device
*bdev
, fmode_t mode
,
371 const struct blk_zone_range
*zrange
)
375 if (zrange
->sector
+ zrange
->nr_sectors
<= zrange
->sector
||
376 zrange
->sector
+ zrange
->nr_sectors
> get_capacity(bdev
->bd_disk
))
380 start
= zrange
->sector
<< SECTOR_SHIFT
;
381 end
= ((zrange
->sector
+ zrange
->nr_sectors
) << SECTOR_SHIFT
) - 1;
383 return truncate_bdev_range(bdev
, mode
, start
, end
);
387 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
388 * Called from blkdev_ioctl.
390 int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
, fmode_t mode
,
391 unsigned int cmd
, unsigned long arg
)
393 void __user
*argp
= (void __user
*)arg
;
394 struct request_queue
*q
;
395 struct blk_zone_range zrange
;
402 q
= bdev_get_queue(bdev
);
406 if (!bdev_is_zoned(bdev
))
409 if (!(mode
& FMODE_WRITE
))
412 if (copy_from_user(&zrange
, argp
, sizeof(struct blk_zone_range
)))
417 op
= REQ_OP_ZONE_RESET
;
419 /* Invalidate the page cache, including dirty pages. */
420 filemap_invalidate_lock(bdev
->bd_inode
->i_mapping
);
421 ret
= blkdev_truncate_zone_range(bdev
, mode
, &zrange
);
426 op
= REQ_OP_ZONE_OPEN
;
429 op
= REQ_OP_ZONE_CLOSE
;
432 op
= REQ_OP_ZONE_FINISH
;
438 ret
= blkdev_zone_mgmt(bdev
, op
, zrange
.sector
, zrange
.nr_sectors
,
442 if (cmd
== BLKRESETZONE
)
443 filemap_invalidate_unlock(bdev
->bd_inode
->i_mapping
);
448 void disk_free_zone_bitmaps(struct gendisk
*disk
)
450 kfree(disk
->conv_zones_bitmap
);
451 disk
->conv_zones_bitmap
= NULL
;
452 kfree(disk
->seq_zones_wlock
);
453 disk
->seq_zones_wlock
= NULL
;
456 struct blk_revalidate_zone_args
{
457 struct gendisk
*disk
;
458 unsigned long *conv_zones_bitmap
;
459 unsigned long *seq_zones_wlock
;
460 unsigned int nr_zones
;
461 sector_t zone_sectors
;
466 * Helper function to check the validity of zones of a zoned block device.
468 static int blk_revalidate_zone_cb(struct blk_zone
*zone
, unsigned int idx
,
471 struct blk_revalidate_zone_args
*args
= data
;
472 struct gendisk
*disk
= args
->disk
;
473 struct request_queue
*q
= disk
->queue
;
474 sector_t capacity
= get_capacity(disk
);
477 * All zones must have the same size, with the exception on an eventual
480 if (zone
->start
== 0) {
481 if (zone
->len
== 0 || !is_power_of_2(zone
->len
)) {
482 pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
483 disk
->disk_name
, zone
->len
);
487 args
->zone_sectors
= zone
->len
;
488 args
->nr_zones
= (capacity
+ zone
->len
- 1) >> ilog2(zone
->len
);
489 } else if (zone
->start
+ args
->zone_sectors
< capacity
) {
490 if (zone
->len
!= args
->zone_sectors
) {
491 pr_warn("%s: Invalid zoned device with non constant zone size\n",
496 if (zone
->len
> args
->zone_sectors
) {
497 pr_warn("%s: Invalid zoned device with larger last zone size\n",
503 /* Check for holes in the zone report */
504 if (zone
->start
!= args
->sector
) {
505 pr_warn("%s: Zone gap at sectors %llu..%llu\n",
506 disk
->disk_name
, args
->sector
, zone
->start
);
510 /* Check zone type */
511 switch (zone
->type
) {
512 case BLK_ZONE_TYPE_CONVENTIONAL
:
513 if (!args
->conv_zones_bitmap
) {
514 args
->conv_zones_bitmap
=
515 blk_alloc_zone_bitmap(q
->node
, args
->nr_zones
);
516 if (!args
->conv_zones_bitmap
)
519 set_bit(idx
, args
->conv_zones_bitmap
);
521 case BLK_ZONE_TYPE_SEQWRITE_REQ
:
522 case BLK_ZONE_TYPE_SEQWRITE_PREF
:
523 if (!args
->seq_zones_wlock
) {
524 args
->seq_zones_wlock
=
525 blk_alloc_zone_bitmap(q
->node
, args
->nr_zones
);
526 if (!args
->seq_zones_wlock
)
531 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
532 disk
->disk_name
, (int)zone
->type
, zone
->start
);
536 args
->sector
+= zone
->len
;
541 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
543 * @update_driver_data: Callback to update driver data on the frozen disk
545 * Helper function for low-level device drivers to (re) allocate and initialize
546 * a disk request queue zone bitmaps. This functions should normally be called
547 * within the disk ->revalidate method for blk-mq based drivers. For BIO based
548 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
550 * If the @update_driver_data callback function is not NULL, the callback is
551 * executed with the device request queue frozen after all zones have been
554 int blk_revalidate_disk_zones(struct gendisk
*disk
,
555 void (*update_driver_data
)(struct gendisk
*disk
))
557 struct request_queue
*q
= disk
->queue
;
558 struct blk_revalidate_zone_args args
= {
561 unsigned int noio_flag
;
564 if (WARN_ON_ONCE(!blk_queue_is_zoned(q
)))
566 if (WARN_ON_ONCE(!queue_is_mq(q
)))
569 if (!get_capacity(disk
))
573 * Ensure that all memory allocations in this context are done as if
574 * GFP_NOIO was specified.
576 noio_flag
= memalloc_noio_save();
577 ret
= disk
->fops
->report_zones(disk
, 0, UINT_MAX
,
578 blk_revalidate_zone_cb
, &args
);
580 pr_warn("%s: No zones reported\n", disk
->disk_name
);
583 memalloc_noio_restore(noio_flag
);
586 * If zones where reported, make sure that the entire disk capacity
589 if (ret
> 0 && args
.sector
!= get_capacity(disk
)) {
590 pr_warn("%s: Missing zones from sector %llu\n",
591 disk
->disk_name
, args
.sector
);
596 * Install the new bitmaps and update nr_zones only once the queue is
597 * stopped and all I/Os are completed (i.e. a scheduler is not
598 * referencing the bitmaps).
600 blk_mq_freeze_queue(q
);
602 blk_queue_chunk_sectors(q
, args
.zone_sectors
);
603 disk
->nr_zones
= args
.nr_zones
;
604 swap(disk
->seq_zones_wlock
, args
.seq_zones_wlock
);
605 swap(disk
->conv_zones_bitmap
, args
.conv_zones_bitmap
);
606 if (update_driver_data
)
607 update_driver_data(disk
);
610 pr_warn("%s: failed to revalidate zones\n", disk
->disk_name
);
611 disk_free_zone_bitmaps(disk
);
613 blk_mq_unfreeze_queue(q
);
615 kfree(args
.seq_zones_wlock
);
616 kfree(args
.conv_zones_bitmap
);
619 EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones
);
621 void disk_clear_zone_settings(struct gendisk
*disk
)
623 struct request_queue
*q
= disk
->queue
;
625 blk_mq_freeze_queue(q
);
627 disk_free_zone_bitmaps(disk
);
628 blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL
, q
);
629 q
->required_elevator_features
&= ~ELEVATOR_F_ZBD_SEQ_WRITE
;
631 disk
->max_open_zones
= 0;
632 disk
->max_active_zones
= 0;
633 q
->limits
.chunk_sectors
= 0;
634 q
->limits
.zone_write_granularity
= 0;
635 q
->limits
.max_zone_append_sectors
= 0;
637 blk_mq_unfreeze_queue(q
);