]>
Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
6a0cb1bc HR |
2 | /* |
3 | * Zoned block device handling | |
4 | * | |
5 | * Copyright (c) 2015, Hannes Reinecke | |
6 | * Copyright (c) 2015, SUSE Linux GmbH | |
7 | * | |
8 | * Copyright (c) 2016, Damien Le Moal | |
9 | * Copyright (c) 2016, Western Digital | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/rbtree.h> | |
15 | #include <linux/blkdev.h> | |
bf505456 | 16 | #include <linux/blk-mq.h> |
26202928 DLM |
17 | #include <linux/mm.h> |
18 | #include <linux/vmalloc.h> | |
bd976e52 | 19 | #include <linux/sched/mm.h> |
6a0cb1bc | 20 | |
a2d6b3a2 DLM |
21 | #include "blk.h" |
22 | ||
6a0cb1bc HR |
23 | static inline sector_t blk_zone_start(struct request_queue *q, |
24 | sector_t sector) | |
25 | { | |
f99e8648 | 26 | sector_t zone_mask = blk_queue_zone_sectors(q) - 1; |
6a0cb1bc HR |
27 | |
28 | return sector & ~zone_mask; | |
29 | } | |
30 | ||
6cc77e9c CH |
31 | /* |
32 | * Return true if a request is a write requests that needs zone write locking. | |
33 | */ | |
34 | bool blk_req_needs_zone_write_lock(struct request *rq) | |
35 | { | |
36 | if (!rq->q->seq_zones_wlock) | |
37 | return false; | |
38 | ||
39 | if (blk_rq_is_passthrough(rq)) | |
40 | return false; | |
41 | ||
42 | switch (req_op(rq)) { | |
43 | case REQ_OP_WRITE_ZEROES: | |
44 | case REQ_OP_WRITE_SAME: | |
45 | case REQ_OP_WRITE: | |
46 | return blk_rq_zone_is_seq(rq); | |
47 | default: | |
48 | return false; | |
49 | } | |
50 | } | |
51 | EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); | |
52 | ||
53 | void __blk_req_zone_write_lock(struct request *rq) | |
54 | { | |
55 | if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), | |
56 | rq->q->seq_zones_wlock))) | |
57 | return; | |
58 | ||
59 | WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); | |
60 | rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); | |
63 | ||
64 | void __blk_req_zone_write_unlock(struct request *rq) | |
65 | { | |
66 | rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; | |
67 | if (rq->q->seq_zones_wlock) | |
68 | WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), | |
69 | rq->q->seq_zones_wlock)); | |
70 | } | |
71 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); | |
72 | ||
a91e1380 DLM |
73 | static inline unsigned int __blkdev_nr_zones(struct request_queue *q, |
74 | sector_t nr_sectors) | |
75 | { | |
113ab72e | 76 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
a91e1380 DLM |
77 | |
78 | return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors); | |
79 | } | |
80 | ||
81 | /** | |
82 | * blkdev_nr_zones - Get number of zones | |
83 | * @bdev: Target block device | |
84 | * | |
85 | * Description: | |
86 | * Return the total number of zones of a zoned block device. | |
87 | * For a regular block device, the number of zones is always 0. | |
88 | */ | |
89 | unsigned int blkdev_nr_zones(struct block_device *bdev) | |
90 | { | |
91 | struct request_queue *q = bdev_get_queue(bdev); | |
92 | ||
93 | if (!blk_queue_is_zoned(q)) | |
94 | return 0; | |
95 | ||
96 | return __blkdev_nr_zones(q, bdev->bd_part->nr_sects); | |
97 | } | |
98 | EXPORT_SYMBOL_GPL(blkdev_nr_zones); | |
99 | ||
6a0cb1bc | 100 | /* |
e76239a3 CH |
101 | * Check that a zone report belongs to this partition, and if yes, fix its start |
102 | * sector and write pointer and return true. Return false otherwise. | |
6a0cb1bc | 103 | */ |
e76239a3 | 104 | static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep) |
6a0cb1bc HR |
105 | { |
106 | sector_t offset = get_start_sect(bdev); | |
107 | ||
108 | if (rep->start < offset) | |
109 | return false; | |
110 | ||
111 | rep->start -= offset; | |
112 | if (rep->start + rep->len > bdev->bd_part->nr_sects) | |
113 | return false; | |
114 | ||
115 | if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
116 | rep->wp = rep->start + rep->len; | |
117 | else | |
118 | rep->wp -= offset; | |
6a0cb1bc HR |
119 | return true; |
120 | } | |
121 | ||
e76239a3 | 122 | static int blk_report_zones(struct gendisk *disk, sector_t sector, |
bd976e52 | 123 | struct blk_zone *zones, unsigned int *nr_zones) |
e76239a3 CH |
124 | { |
125 | struct request_queue *q = disk->queue; | |
126 | unsigned int z = 0, n, nrz = *nr_zones; | |
127 | sector_t capacity = get_capacity(disk); | |
128 | int ret; | |
129 | ||
130 | while (z < nrz && sector < capacity) { | |
131 | n = nrz - z; | |
bd976e52 | 132 | ret = disk->fops->report_zones(disk, sector, &zones[z], &n); |
e76239a3 CH |
133 | if (ret) |
134 | return ret; | |
135 | if (!n) | |
136 | break; | |
137 | sector += blk_queue_zone_sectors(q) * n; | |
138 | z += n; | |
139 | } | |
140 | ||
141 | WARN_ON(z > *nr_zones); | |
142 | *nr_zones = z; | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
6a0cb1bc HR |
147 | /** |
148 | * blkdev_report_zones - Get zones information | |
149 | * @bdev: Target block device | |
150 | * @sector: Sector from which to report zones | |
151 | * @zones: Array of zone structures where to return the zones information | |
152 | * @nr_zones: Number of zone structures in the zone array | |
6a0cb1bc HR |
153 | * |
154 | * Description: | |
155 | * Get zone information starting from the zone containing @sector. | |
156 | * The number of zone information reported may be less than the number | |
157 | * requested by @nr_zones. The number of zones actually reported is | |
158 | * returned in @nr_zones. | |
bd976e52 DLM |
159 | * The caller must use memalloc_noXX_save/restore() calls to control |
160 | * memory allocations done within this function (zone array and command | |
161 | * buffer allocation by the device driver). | |
6a0cb1bc | 162 | */ |
e76239a3 | 163 | int blkdev_report_zones(struct block_device *bdev, sector_t sector, |
bd976e52 | 164 | struct blk_zone *zones, unsigned int *nr_zones) |
6a0cb1bc HR |
165 | { |
166 | struct request_queue *q = bdev_get_queue(bdev); | |
e76239a3 | 167 | unsigned int i, nrz; |
3c4da758 | 168 | int ret; |
6a0cb1bc | 169 | |
6a0cb1bc HR |
170 | if (!blk_queue_is_zoned(q)) |
171 | return -EOPNOTSUPP; | |
172 | ||
e76239a3 CH |
173 | /* |
174 | * A block device that advertized itself as zoned must have a | |
175 | * report_zones method. If it does not have one defined, the device | |
176 | * driver has a bug. So warn about that. | |
177 | */ | |
178 | if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones)) | |
179 | return -EOPNOTSUPP; | |
6a0cb1bc | 180 | |
e76239a3 | 181 | if (!*nr_zones || sector >= bdev->bd_part->nr_sects) { |
6a0cb1bc HR |
182 | *nr_zones = 0; |
183 | return 0; | |
184 | } | |
185 | ||
e76239a3 CH |
186 | nrz = min(*nr_zones, |
187 | __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector)); | |
188 | ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector, | |
bd976e52 | 189 | zones, &nrz); |
6a0cb1bc | 190 | if (ret) |
e76239a3 | 191 | return ret; |
6a0cb1bc | 192 | |
e76239a3 CH |
193 | for (i = 0; i < nrz; i++) { |
194 | if (!blkdev_report_zone(bdev, zones)) | |
6a0cb1bc | 195 | break; |
e76239a3 | 196 | zones++; |
6a0cb1bc HR |
197 | } |
198 | ||
e76239a3 | 199 | *nr_zones = i; |
6a0cb1bc | 200 | |
e76239a3 | 201 | return 0; |
6a0cb1bc HR |
202 | } |
203 | EXPORT_SYMBOL_GPL(blkdev_report_zones); | |
204 | ||
6e33dbf2 | 205 | static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev, |
6f2fa1a2 | 206 | sector_t sector, |
6e33dbf2 CK |
207 | sector_t nr_sectors) |
208 | { | |
209 | if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) | |
210 | return false; | |
211 | ||
6f2fa1a2 | 212 | if (sector || nr_sectors != part_nr_sects_read(bdev->bd_part)) |
6e33dbf2 CK |
213 | return false; |
214 | /* | |
215 | * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is | |
216 | * the entire disk, that is, if the blocks device start offset is 0 and | |
217 | * its capacity is the same as the entire disk. | |
218 | */ | |
219 | return get_start_sect(bdev) == 0 && | |
220 | part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk); | |
221 | } | |
222 | ||
6a0cb1bc HR |
223 | /** |
224 | * blkdev_reset_zones - Reset zones write pointer | |
225 | * @bdev: Target block device | |
226 | * @sector: Start sector of the first zone to reset | |
227 | * @nr_sectors: Number of sectors, at least the length of one zone | |
228 | * @gfp_mask: Memory allocation flags (for bio_alloc) | |
229 | * | |
230 | * Description: | |
231 | * Reset the write pointer of the zones contained in the range | |
232 | * @sector..@sector+@nr_sectors. Specifying the entire disk sector range | |
233 | * is valid, but the specified range should not contain conventional zones. | |
234 | */ | |
235 | int blkdev_reset_zones(struct block_device *bdev, | |
236 | sector_t sector, sector_t nr_sectors, | |
237 | gfp_t gfp_mask) | |
238 | { | |
239 | struct request_queue *q = bdev_get_queue(bdev); | |
240 | sector_t zone_sectors; | |
241 | sector_t end_sector = sector + nr_sectors; | |
a2d6b3a2 DLM |
242 | struct bio *bio = NULL; |
243 | struct blk_plug plug; | |
6a0cb1bc HR |
244 | int ret; |
245 | ||
6a0cb1bc HR |
246 | if (!blk_queue_is_zoned(q)) |
247 | return -EOPNOTSUPP; | |
248 | ||
a2d6b3a2 DLM |
249 | if (bdev_read_only(bdev)) |
250 | return -EPERM; | |
251 | ||
252 | if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) | |
6a0cb1bc HR |
253 | /* Out of range */ |
254 | return -EINVAL; | |
255 | ||
256 | /* Check alignment (handle eventual smaller last zone) */ | |
f99e8648 | 257 | zone_sectors = blk_queue_zone_sectors(q); |
6a0cb1bc HR |
258 | if (sector & (zone_sectors - 1)) |
259 | return -EINVAL; | |
260 | ||
261 | if ((nr_sectors & (zone_sectors - 1)) && | |
262 | end_sector != bdev->bd_part->nr_sects) | |
263 | return -EINVAL; | |
264 | ||
a2d6b3a2 | 265 | blk_start_plug(&plug); |
6a0cb1bc | 266 | while (sector < end_sector) { |
a2d6b3a2 | 267 | bio = blk_next_bio(bio, 0, gfp_mask); |
74d46992 | 268 | bio_set_dev(bio, bdev); |
6a0cb1bc | 269 | |
6f2fa1a2 DLM |
270 | /* |
271 | * Special case for the zone reset operation that reset all | |
272 | * zones, this is useful for applications like mkfs. | |
273 | */ | |
274 | if (blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { | |
275 | bio->bi_opf = REQ_OP_ZONE_RESET_ALL; | |
276 | break; | |
277 | } | |
278 | ||
279 | bio->bi_opf = REQ_OP_ZONE_RESET; | |
280 | bio->bi_iter.bi_sector = sector; | |
6a0cb1bc HR |
281 | sector += zone_sectors; |
282 | ||
283 | /* This may take a while, so be nice to others */ | |
284 | cond_resched(); | |
6a0cb1bc HR |
285 | } |
286 | ||
a2d6b3a2 DLM |
287 | ret = submit_bio_wait(bio); |
288 | bio_put(bio); | |
289 | ||
290 | blk_finish_plug(&plug); | |
291 | ||
292 | return ret; | |
6a0cb1bc HR |
293 | } |
294 | EXPORT_SYMBOL_GPL(blkdev_reset_zones); | |
3ed05a98 | 295 | |
56c4bddb | 296 | /* |
3ed05a98 ST |
297 | * BLKREPORTZONE ioctl processing. |
298 | * Called from blkdev_ioctl. | |
299 | */ | |
300 | int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, | |
301 | unsigned int cmd, unsigned long arg) | |
302 | { | |
303 | void __user *argp = (void __user *)arg; | |
304 | struct request_queue *q; | |
305 | struct blk_zone_report rep; | |
306 | struct blk_zone *zones; | |
307 | int ret; | |
308 | ||
309 | if (!argp) | |
310 | return -EINVAL; | |
311 | ||
312 | q = bdev_get_queue(bdev); | |
313 | if (!q) | |
314 | return -ENXIO; | |
315 | ||
316 | if (!blk_queue_is_zoned(q)) | |
317 | return -ENOTTY; | |
318 | ||
3ed05a98 ST |
319 | if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) |
320 | return -EFAULT; | |
321 | ||
322 | if (!rep.nr_zones) | |
323 | return -EINVAL; | |
324 | ||
2e85fbaf | 325 | rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones); |
327ea4ad | 326 | |
344476e1 KC |
327 | zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone), |
328 | GFP_KERNEL | __GFP_ZERO); | |
3ed05a98 ST |
329 | if (!zones) |
330 | return -ENOMEM; | |
331 | ||
bd976e52 | 332 | ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones); |
3ed05a98 ST |
333 | if (ret) |
334 | goto out; | |
335 | ||
336 | if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) { | |
337 | ret = -EFAULT; | |
338 | goto out; | |
339 | } | |
340 | ||
341 | if (rep.nr_zones) { | |
342 | if (copy_to_user(argp + sizeof(struct blk_zone_report), zones, | |
343 | sizeof(struct blk_zone) * rep.nr_zones)) | |
344 | ret = -EFAULT; | |
345 | } | |
346 | ||
347 | out: | |
327ea4ad | 348 | kvfree(zones); |
3ed05a98 ST |
349 | |
350 | return ret; | |
351 | } | |
352 | ||
56c4bddb | 353 | /* |
3ed05a98 ST |
354 | * BLKRESETZONE ioctl processing. |
355 | * Called from blkdev_ioctl. | |
356 | */ | |
357 | int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, | |
358 | unsigned int cmd, unsigned long arg) | |
359 | { | |
360 | void __user *argp = (void __user *)arg; | |
361 | struct request_queue *q; | |
362 | struct blk_zone_range zrange; | |
363 | ||
364 | if (!argp) | |
365 | return -EINVAL; | |
366 | ||
367 | q = bdev_get_queue(bdev); | |
368 | if (!q) | |
369 | return -ENXIO; | |
370 | ||
371 | if (!blk_queue_is_zoned(q)) | |
372 | return -ENOTTY; | |
373 | ||
3ed05a98 ST |
374 | if (!(mode & FMODE_WRITE)) |
375 | return -EBADF; | |
376 | ||
377 | if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) | |
378 | return -EFAULT; | |
379 | ||
380 | return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors, | |
381 | GFP_KERNEL); | |
382 | } | |
bf505456 DLM |
383 | |
384 | static inline unsigned long *blk_alloc_zone_bitmap(int node, | |
385 | unsigned int nr_zones) | |
386 | { | |
387 | return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), | |
388 | GFP_NOIO, node); | |
389 | } | |
390 | ||
391 | /* | |
392 | * Allocate an array of struct blk_zone to get nr_zones zone information. | |
393 | * The allocated array may be smaller than nr_zones. | |
394 | */ | |
26202928 | 395 | static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones) |
bf505456 | 396 | { |
26202928 DLM |
397 | struct blk_zone *zones; |
398 | size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES); | |
399 | ||
400 | /* | |
401 | * GFP_KERNEL here is meaningless as the caller task context has | |
402 | * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones() | |
403 | * with memalloc_noio_save(). | |
404 | */ | |
405 | zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL); | |
406 | if (!zones) { | |
407 | *nr_zones = 0; | |
408 | return NULL; | |
bf505456 DLM |
409 | } |
410 | ||
26202928 DLM |
411 | *nr_zones = nrz; |
412 | ||
413 | return zones; | |
bf505456 DLM |
414 | } |
415 | ||
416 | void blk_queue_free_zone_bitmaps(struct request_queue *q) | |
417 | { | |
418 | kfree(q->seq_zones_bitmap); | |
419 | q->seq_zones_bitmap = NULL; | |
420 | kfree(q->seq_zones_wlock); | |
421 | q->seq_zones_wlock = NULL; | |
422 | } | |
423 | ||
424 | /** | |
425 | * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps | |
426 | * @disk: Target disk | |
427 | * | |
428 | * Helper function for low-level device drivers to (re) allocate and initialize | |
429 | * a disk request queue zone bitmaps. This functions should normally be called | |
430 | * within the disk ->revalidate method. For BIO based queues, no zone bitmap | |
431 | * is allocated. | |
432 | */ | |
433 | int blk_revalidate_disk_zones(struct gendisk *disk) | |
434 | { | |
435 | struct request_queue *q = disk->queue; | |
436 | unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); | |
437 | unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; | |
438 | unsigned int i, rep_nr_zones = 0, z = 0, nrz; | |
439 | struct blk_zone *zones = NULL; | |
bd976e52 | 440 | unsigned int noio_flag; |
bf505456 DLM |
441 | sector_t sector = 0; |
442 | int ret = 0; | |
443 | ||
444 | /* | |
445 | * BIO based queues do not use a scheduler so only q->nr_zones | |
446 | * needs to be updated so that the sysfs exposed value is correct. | |
447 | */ | |
344e9ffc | 448 | if (!queue_is_mq(q)) { |
bf505456 DLM |
449 | q->nr_zones = nr_zones; |
450 | return 0; | |
451 | } | |
452 | ||
bd976e52 DLM |
453 | /* |
454 | * Ensure that all memory allocations in this context are done as | |
455 | * if GFP_NOIO was specified. | |
456 | */ | |
457 | noio_flag = memalloc_noio_save(); | |
458 | ||
bf505456 DLM |
459 | if (!blk_queue_is_zoned(q) || !nr_zones) { |
460 | nr_zones = 0; | |
461 | goto update; | |
462 | } | |
463 | ||
464 | /* Allocate bitmaps */ | |
465 | ret = -ENOMEM; | |
466 | seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); | |
467 | if (!seq_zones_wlock) | |
468 | goto out; | |
469 | seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); | |
470 | if (!seq_zones_bitmap) | |
471 | goto out; | |
472 | ||
473 | /* Get zone information and initialize seq_zones_bitmap */ | |
474 | rep_nr_zones = nr_zones; | |
26202928 | 475 | zones = blk_alloc_zones(&rep_nr_zones); |
bf505456 DLM |
476 | if (!zones) |
477 | goto out; | |
478 | ||
479 | while (z < nr_zones) { | |
480 | nrz = min(nr_zones - z, rep_nr_zones); | |
bd976e52 | 481 | ret = blk_report_zones(disk, sector, zones, &nrz); |
bf505456 DLM |
482 | if (ret) |
483 | goto out; | |
484 | if (!nrz) | |
485 | break; | |
486 | for (i = 0; i < nrz; i++) { | |
487 | if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL) | |
488 | set_bit(z, seq_zones_bitmap); | |
489 | z++; | |
490 | } | |
491 | sector += nrz * blk_queue_zone_sectors(q); | |
492 | } | |
493 | ||
494 | if (WARN_ON(z != nr_zones)) { | |
495 | ret = -EIO; | |
496 | goto out; | |
497 | } | |
498 | ||
499 | update: | |
500 | /* | |
501 | * Install the new bitmaps, making sure the queue is stopped and | |
502 | * all I/Os are completed (i.e. a scheduler is not referencing the | |
503 | * bitmaps). | |
504 | */ | |
505 | blk_mq_freeze_queue(q); | |
506 | q->nr_zones = nr_zones; | |
507 | swap(q->seq_zones_wlock, seq_zones_wlock); | |
508 | swap(q->seq_zones_bitmap, seq_zones_bitmap); | |
509 | blk_mq_unfreeze_queue(q); | |
510 | ||
511 | out: | |
bd976e52 DLM |
512 | memalloc_noio_restore(noio_flag); |
513 | ||
26202928 | 514 | kvfree(zones); |
bf505456 DLM |
515 | kfree(seq_zones_wlock); |
516 | kfree(seq_zones_bitmap); | |
517 | ||
518 | if (ret) { | |
519 | pr_warn("%s: failed to revalidate zones\n", disk->disk_name); | |
520 | blk_mq_freeze_queue(q); | |
521 | blk_queue_free_zone_bitmaps(q); | |
522 | blk_mq_unfreeze_queue(q); | |
523 | } | |
524 | ||
525 | return ret; | |
526 | } | |
527 | EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); | |
528 |