]>
Commit | Line | Data |
---|---|---|
ca4b2a01 MB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/vmalloc.h> | |
aa1c09cb | 3 | #include <linux/bitmap.h> |
ca4b2a01 MB |
4 | #include "null_blk.h" |
5 | ||
766c3297 CK |
6 | #define CREATE_TRACE_POINTS |
7 | #include "null_blk_trace.h" | |
8 | ||
ca4b2a01 MB |
9 | /* zone_size in MBs to sectors. */ |
10 | #define ZONE_SIZE_SHIFT 11 | |
11 | ||
12 | static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) | |
13 | { | |
14 | return sect >> ilog2(dev->zone_size_sects); | |
15 | } | |
16 | ||
d205bde7 | 17 | int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) |
ca4b2a01 MB |
18 | { |
19 | sector_t dev_size = (sector_t)dev->size * 1024 * 1024; | |
20 | sector_t sector = 0; | |
21 | unsigned int i; | |
22 | ||
23 | if (!is_power_of_2(dev->zone_size)) { | |
9c7eddf1 | 24 | pr_err("zone_size must be power-of-two\n"); |
ca4b2a01 MB |
25 | return -EINVAL; |
26 | } | |
e2748325 CK |
27 | if (dev->zone_size > dev->size) { |
28 | pr_err("Zone size larger than device capacity\n"); | |
29 | return -EINVAL; | |
30 | } | |
ca4b2a01 | 31 | |
089565fb AR |
32 | if (!dev->zone_capacity) |
33 | dev->zone_capacity = dev->zone_size; | |
34 | ||
35 | if (dev->zone_capacity > dev->zone_size) { | |
36 | pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n", | |
37 | dev->zone_capacity, dev->zone_size); | |
38 | return -EINVAL; | |
39 | } | |
40 | ||
ca4b2a01 MB |
41 | dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; |
42 | dev->nr_zones = dev_size >> | |
43 | (SECTOR_SHIFT + ilog2(dev->zone_size_sects)); | |
44 | dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone), | |
45 | GFP_KERNEL | __GFP_ZERO); | |
46 | if (!dev->zones) | |
47 | return -ENOMEM; | |
48 | ||
e1777d09 DLM |
49 | /* |
50 | * With memory backing, the zone_lock spinlock needs to be temporarily | |
51 | * released to avoid scheduling in atomic context. To guarantee zone | |
52 | * information protection, use a bitmap to lock zones with | |
53 | * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing | |
54 | * implies that the queue is marked with BLK_MQ_F_BLOCKING. | |
55 | */ | |
56 | spin_lock_init(&dev->zone_lock); | |
57 | if (dev->memory_backed) { | |
58 | dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); | |
59 | if (!dev->zone_locks) { | |
60 | kvfree(dev->zones); | |
61 | return -ENOMEM; | |
62 | } | |
aa1c09cb DLM |
63 | } |
64 | ||
ea2c18e1 MS |
65 | if (dev->zone_nr_conv >= dev->nr_zones) { |
66 | dev->zone_nr_conv = dev->nr_zones - 1; | |
9c7eddf1 | 67 | pr_info("changed the number of conventional zones to %u", |
ea2c18e1 MS |
68 | dev->zone_nr_conv); |
69 | } | |
70 | ||
dc4d137e NC |
71 | /* Max active zones has to be < nbr of seq zones in order to be enforceable */ |
72 | if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) { | |
73 | dev->zone_max_active = 0; | |
74 | pr_info("zone_max_active limit disabled, limit >= zone count\n"); | |
75 | } | |
76 | ||
77 | /* Max open zones has to be <= max active zones */ | |
78 | if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) { | |
79 | dev->zone_max_open = dev->zone_max_active; | |
80 | pr_info("changed the maximum number of open zones to %u\n", | |
81 | dev->nr_zones); | |
82 | } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) { | |
83 | dev->zone_max_open = 0; | |
84 | pr_info("zone_max_open limit disabled, limit >= zone count\n"); | |
85 | } | |
86 | ||
ea2c18e1 MS |
87 | for (i = 0; i < dev->zone_nr_conv; i++) { |
88 | struct blk_zone *zone = &dev->zones[i]; | |
89 | ||
90 | zone->start = sector; | |
91 | zone->len = dev->zone_size_sects; | |
82394db7 | 92 | zone->capacity = zone->len; |
ea2c18e1 MS |
93 | zone->wp = zone->start + zone->len; |
94 | zone->type = BLK_ZONE_TYPE_CONVENTIONAL; | |
95 | zone->cond = BLK_ZONE_COND_NOT_WP; | |
96 | ||
97 | sector += dev->zone_size_sects; | |
98 | } | |
99 | ||
100 | for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { | |
ca4b2a01 MB |
101 | struct blk_zone *zone = &dev->zones[i]; |
102 | ||
103 | zone->start = zone->wp = sector; | |
104 | zone->len = dev->zone_size_sects; | |
089565fb | 105 | zone->capacity = dev->zone_capacity << ZONE_SIZE_SHIFT; |
ca4b2a01 MB |
106 | zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; |
107 | zone->cond = BLK_ZONE_COND_EMPTY; | |
108 | ||
109 | sector += dev->zone_size_sects; | |
110 | } | |
111 | ||
d205bde7 DLM |
112 | q->limits.zoned = BLK_ZONED_HM; |
113 | blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q); | |
114 | blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE); | |
115 | ||
116 | return 0; | |
117 | } | |
118 | ||
119 | int null_register_zoned_dev(struct nullb *nullb) | |
120 | { | |
e0489ed5 | 121 | struct nullb_device *dev = nullb->dev; |
d205bde7 DLM |
122 | struct request_queue *q = nullb->q; |
123 | ||
e0489ed5 DLM |
124 | if (queue_is_mq(q)) { |
125 | int ret = blk_revalidate_disk_zones(nullb->disk, NULL); | |
126 | ||
127 | if (ret) | |
128 | return ret; | |
129 | } else { | |
130 | blk_queue_chunk_sectors(q, dev->zone_size_sects); | |
131 | q->nr_zones = blkdev_nr_zones(nullb->disk); | |
132 | } | |
d205bde7 | 133 | |
e0489ed5 | 134 | blk_queue_max_zone_append_sectors(q, dev->zone_size_sects); |
dc4d137e NC |
135 | blk_queue_max_open_zones(q, dev->zone_max_open); |
136 | blk_queue_max_active_zones(q, dev->zone_max_active); | |
d205bde7 | 137 | |
ca4b2a01 MB |
138 | return 0; |
139 | } | |
140 | ||
d205bde7 | 141 | void null_free_zoned_dev(struct nullb_device *dev) |
ca4b2a01 | 142 | { |
aa1c09cb | 143 | bitmap_free(dev->zone_locks); |
ca4b2a01 MB |
144 | kvfree(dev->zones); |
145 | } | |
146 | ||
aa1c09cb DLM |
147 | static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) |
148 | { | |
e1777d09 DLM |
149 | if (dev->memory_backed) |
150 | wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); | |
151 | spin_lock_irq(&dev->zone_lock); | |
aa1c09cb DLM |
152 | } |
153 | ||
154 | static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) | |
155 | { | |
e1777d09 DLM |
156 | spin_unlock_irq(&dev->zone_lock); |
157 | ||
158 | if (dev->memory_backed) | |
159 | clear_and_wake_up_bit(zno, dev->zone_locks); | |
aa1c09cb DLM |
160 | } |
161 | ||
7fc8fb51 | 162 | int null_report_zones(struct gendisk *disk, sector_t sector, |
d4100351 | 163 | unsigned int nr_zones, report_zones_cb cb, void *data) |
ca4b2a01 | 164 | { |
e76239a3 CH |
165 | struct nullb *nullb = disk->private_data; |
166 | struct nullb_device *dev = nullb->dev; | |
aa1c09cb | 167 | unsigned int first_zone, i, zno; |
d4100351 CH |
168 | struct blk_zone zone; |
169 | int error; | |
ca4b2a01 | 170 | |
d4100351 CH |
171 | first_zone = null_zone_no(dev, sector); |
172 | if (first_zone >= dev->nr_zones) | |
173 | return 0; | |
ca4b2a01 | 174 | |
d4100351 | 175 | nr_zones = min(nr_zones, dev->nr_zones - first_zone); |
766c3297 CK |
176 | trace_nullb_report_zones(nullb, nr_zones); |
177 | ||
aa1c09cb DLM |
178 | zno = first_zone; |
179 | for (i = 0; i < nr_zones; i++, zno++) { | |
d4100351 CH |
180 | /* |
181 | * Stacked DM target drivers will remap the zone information by | |
182 | * modifying the zone information passed to the report callback. | |
183 | * So use a local copy to avoid corruption of the device zone | |
184 | * array. | |
185 | */ | |
aa1c09cb DLM |
186 | null_lock_zone(dev, zno); |
187 | memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone)); | |
188 | null_unlock_zone(dev, zno); | |
35bc10b2 | 189 | |
d4100351 CH |
190 | error = cb(&zone, i, data); |
191 | if (error) | |
192 | return error; | |
193 | } | |
ca4b2a01 | 194 | |
d4100351 | 195 | return nr_zones; |
ca4b2a01 MB |
196 | } |
197 | ||
aa1c09cb DLM |
198 | /* |
199 | * This is called in the case of memory backing from null_process_cmd() | |
200 | * with the target zone already locked. | |
201 | */ | |
dd85b492 AJ |
202 | size_t null_zone_valid_read_len(struct nullb *nullb, |
203 | sector_t sector, unsigned int len) | |
204 | { | |
205 | struct nullb_device *dev = nullb->dev; | |
206 | struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)]; | |
207 | unsigned int nr_sectors = len >> SECTOR_SHIFT; | |
208 | ||
209 | /* Read must be below the write pointer position */ | |
210 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL || | |
211 | sector + nr_sectors <= zone->wp) | |
212 | return len; | |
213 | ||
214 | if (sector > zone->wp) | |
215 | return 0; | |
216 | ||
217 | return (zone->wp - sector) << SECTOR_SHIFT; | |
218 | } | |
219 | ||
dc4d137e NC |
220 | static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone) |
221 | { | |
222 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
223 | return BLK_STS_IOERR; | |
224 | ||
225 | switch (zone->cond) { | |
226 | case BLK_ZONE_COND_CLOSED: | |
227 | /* close operation on closed is not an error */ | |
228 | return BLK_STS_OK; | |
229 | case BLK_ZONE_COND_IMP_OPEN: | |
230 | dev->nr_zones_imp_open--; | |
231 | break; | |
232 | case BLK_ZONE_COND_EXP_OPEN: | |
233 | dev->nr_zones_exp_open--; | |
234 | break; | |
235 | case BLK_ZONE_COND_EMPTY: | |
236 | case BLK_ZONE_COND_FULL: | |
237 | default: | |
238 | return BLK_STS_IOERR; | |
239 | } | |
240 | ||
241 | if (zone->wp == zone->start) { | |
242 | zone->cond = BLK_ZONE_COND_EMPTY; | |
243 | } else { | |
244 | zone->cond = BLK_ZONE_COND_CLOSED; | |
245 | dev->nr_zones_closed++; | |
246 | } | |
247 | ||
248 | return BLK_STS_OK; | |
249 | } | |
250 | ||
251 | static void null_close_first_imp_zone(struct nullb_device *dev) | |
252 | { | |
253 | unsigned int i; | |
254 | ||
255 | for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { | |
256 | if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) { | |
257 | null_close_zone(dev, &dev->zones[i]); | |
258 | return; | |
259 | } | |
260 | } | |
261 | } | |
262 | ||
fd78874b | 263 | static blk_status_t null_check_active(struct nullb_device *dev) |
dc4d137e NC |
264 | { |
265 | if (!dev->zone_max_active) | |
fd78874b KB |
266 | return BLK_STS_OK; |
267 | ||
268 | if (dev->nr_zones_exp_open + dev->nr_zones_imp_open + | |
269 | dev->nr_zones_closed < dev->zone_max_active) | |
270 | return BLK_STS_OK; | |
dc4d137e | 271 | |
fd78874b | 272 | return BLK_STS_ZONE_ACTIVE_RESOURCE; |
dc4d137e NC |
273 | } |
274 | ||
fd78874b | 275 | static blk_status_t null_check_open(struct nullb_device *dev) |
dc4d137e NC |
276 | { |
277 | if (!dev->zone_max_open) | |
fd78874b | 278 | return BLK_STS_OK; |
dc4d137e NC |
279 | |
280 | if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open) | |
fd78874b | 281 | return BLK_STS_OK; |
dc4d137e | 282 | |
fd78874b KB |
283 | if (dev->nr_zones_imp_open) { |
284 | if (null_check_active(dev) == BLK_STS_OK) { | |
285 | null_close_first_imp_zone(dev); | |
286 | return BLK_STS_OK; | |
287 | } | |
dc4d137e NC |
288 | } |
289 | ||
fd78874b | 290 | return BLK_STS_ZONE_OPEN_RESOURCE; |
dc4d137e NC |
291 | } |
292 | ||
293 | /* | |
294 | * This function matches the manage open zone resources function in the ZBC standard, | |
295 | * with the addition of max active zones support (added in the ZNS standard). | |
296 | * | |
297 | * The function determines if a zone can transition to implicit open or explicit open, | |
298 | * while maintaining the max open zone (and max active zone) limit(s). It may close an | |
299 | * implicit open zone in order to make additional zone resources available. | |
300 | * | |
301 | * ZBC states that an implicit open zone shall be closed only if there is not | |
302 | * room within the open limit. However, with the addition of an active limit, | |
303 | * it is not certain that closing an implicit open zone will allow a new zone | |
304 | * to be opened, since we might already be at the active limit capacity. | |
305 | */ | |
fd78874b | 306 | static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone) |
dc4d137e | 307 | { |
fd78874b KB |
308 | blk_status_t ret; |
309 | ||
dc4d137e NC |
310 | switch (zone->cond) { |
311 | case BLK_ZONE_COND_EMPTY: | |
fd78874b KB |
312 | ret = null_check_active(dev); |
313 | if (ret != BLK_STS_OK) | |
314 | return ret; | |
dc4d137e NC |
315 | fallthrough; |
316 | case BLK_ZONE_COND_CLOSED: | |
fd78874b | 317 | return null_check_open(dev); |
dc4d137e NC |
318 | default: |
319 | /* Should never be called for other states */ | |
320 | WARN_ON(1); | |
fd78874b | 321 | return BLK_STS_IOERR; |
dc4d137e NC |
322 | } |
323 | } | |
324 | ||
fceb5d1b | 325 | static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, |
e0489ed5 | 326 | unsigned int nr_sectors, bool append) |
ca4b2a01 MB |
327 | { |
328 | struct nullb_device *dev = cmd->nq->dev; | |
ca4b2a01 MB |
329 | unsigned int zno = null_zone_no(dev, sector); |
330 | struct blk_zone *zone = &dev->zones[zno]; | |
9dd44c7e DLM |
331 | blk_status_t ret; |
332 | ||
333 | trace_nullb_zone_op(cmd, zno, zone->cond); | |
334 | ||
335 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
336 | return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); | |
ca4b2a01 | 337 | |
aa1c09cb | 338 | null_lock_zone(dev, zno); |
aa1c09cb | 339 | |
ca4b2a01 MB |
340 | switch (zone->cond) { |
341 | case BLK_ZONE_COND_FULL: | |
342 | /* Cannot write to a full zone */ | |
aa1c09cb DLM |
343 | ret = BLK_STS_IOERR; |
344 | goto unlock; | |
ca4b2a01 | 345 | case BLK_ZONE_COND_EMPTY: |
dc4d137e | 346 | case BLK_ZONE_COND_CLOSED: |
fd78874b KB |
347 | ret = null_check_zone_resources(dev, zone); |
348 | if (ret != BLK_STS_OK) | |
aa1c09cb | 349 | goto unlock; |
dc4d137e | 350 | break; |
ca4b2a01 | 351 | case BLK_ZONE_COND_IMP_OPEN: |
16c731fe | 352 | case BLK_ZONE_COND_EXP_OPEN: |
dc4d137e NC |
353 | break; |
354 | default: | |
355 | /* Invalid zone condition */ | |
aa1c09cb DLM |
356 | ret = BLK_STS_IOERR; |
357 | goto unlock; | |
dc4d137e NC |
358 | } |
359 | ||
360 | /* | |
361 | * Regular writes must be at the write pointer position. | |
362 | * Zone append writes are automatically issued at the write | |
363 | * pointer and the position returned using the request or BIO | |
364 | * sector. | |
365 | */ | |
366 | if (append) { | |
367 | sector = zone->wp; | |
368 | if (cmd->bio) | |
369 | cmd->bio->bi_iter.bi_sector = sector; | |
370 | else | |
371 | cmd->rq->__sector = sector; | |
372 | } else if (sector != zone->wp) { | |
aa1c09cb DLM |
373 | ret = BLK_STS_IOERR; |
374 | goto unlock; | |
dc4d137e NC |
375 | } |
376 | ||
aa1c09cb DLM |
377 | if (zone->wp + nr_sectors > zone->start + zone->capacity) { |
378 | ret = BLK_STS_IOERR; | |
379 | goto unlock; | |
380 | } | |
dc4d137e NC |
381 | |
382 | if (zone->cond == BLK_ZONE_COND_CLOSED) { | |
383 | dev->nr_zones_closed--; | |
384 | dev->nr_zones_imp_open++; | |
385 | } else if (zone->cond == BLK_ZONE_COND_EMPTY) { | |
386 | dev->nr_zones_imp_open++; | |
387 | } | |
388 | if (zone->cond != BLK_ZONE_COND_EXP_OPEN) | |
389 | zone->cond = BLK_ZONE_COND_IMP_OPEN; | |
390 | ||
e1777d09 DLM |
391 | /* |
392 | * Memory backing allocation may sleep: release the zone_lock spinlock | |
393 | * to avoid scheduling in atomic context. Zone operation atomicity is | |
394 | * still guaranteed through the zone_locks bitmap. | |
395 | */ | |
396 | if (dev->memory_backed) | |
397 | spin_unlock_irq(&dev->zone_lock); | |
dc4d137e | 398 | ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); |
e1777d09 DLM |
399 | if (dev->memory_backed) |
400 | spin_lock_irq(&dev->zone_lock); | |
401 | ||
dc4d137e | 402 | if (ret != BLK_STS_OK) |
aa1c09cb | 403 | goto unlock; |
dc4d137e NC |
404 | |
405 | zone->wp += nr_sectors; | |
406 | if (zone->wp == zone->start + zone->capacity) { | |
407 | if (zone->cond == BLK_ZONE_COND_EXP_OPEN) | |
408 | dev->nr_zones_exp_open--; | |
409 | else if (zone->cond == BLK_ZONE_COND_IMP_OPEN) | |
410 | dev->nr_zones_imp_open--; | |
411 | zone->cond = BLK_ZONE_COND_FULL; | |
412 | } | |
aa1c09cb DLM |
413 | ret = BLK_STS_OK; |
414 | ||
415 | unlock: | |
aa1c09cb DLM |
416 | null_unlock_zone(dev, zno); |
417 | ||
418 | return ret; | |
dc4d137e NC |
419 | } |
420 | ||
421 | static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone) | |
422 | { | |
fd78874b KB |
423 | blk_status_t ret; |
424 | ||
dc4d137e NC |
425 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) |
426 | return BLK_STS_IOERR; | |
427 | ||
428 | switch (zone->cond) { | |
429 | case BLK_ZONE_COND_EXP_OPEN: | |
430 | /* open operation on exp open is not an error */ | |
431 | return BLK_STS_OK; | |
432 | case BLK_ZONE_COND_EMPTY: | |
fd78874b KB |
433 | ret = null_check_zone_resources(dev, zone); |
434 | if (ret != BLK_STS_OK) | |
435 | return ret; | |
dc4d137e NC |
436 | break; |
437 | case BLK_ZONE_COND_IMP_OPEN: | |
438 | dev->nr_zones_imp_open--; | |
439 | break; | |
16c731fe | 440 | case BLK_ZONE_COND_CLOSED: |
fd78874b KB |
441 | ret = null_check_zone_resources(dev, zone); |
442 | if (ret != BLK_STS_OK) | |
443 | return ret; | |
dc4d137e NC |
444 | dev->nr_zones_closed--; |
445 | break; | |
446 | case BLK_ZONE_COND_FULL: | |
447 | default: | |
448 | return BLK_STS_IOERR; | |
449 | } | |
450 | ||
451 | zone->cond = BLK_ZONE_COND_EXP_OPEN; | |
452 | dev->nr_zones_exp_open++; | |
ca4b2a01 | 453 | |
dc4d137e NC |
454 | return BLK_STS_OK; |
455 | } | |
456 | ||
457 | static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone) | |
458 | { | |
fd78874b KB |
459 | blk_status_t ret; |
460 | ||
dc4d137e NC |
461 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) |
462 | return BLK_STS_IOERR; | |
463 | ||
464 | switch (zone->cond) { | |
465 | case BLK_ZONE_COND_FULL: | |
466 | /* finish operation on full is not an error */ | |
467 | return BLK_STS_OK; | |
468 | case BLK_ZONE_COND_EMPTY: | |
fd78874b KB |
469 | ret = null_check_zone_resources(dev, zone); |
470 | if (ret != BLK_STS_OK) | |
471 | return ret; | |
dc4d137e NC |
472 | break; |
473 | case BLK_ZONE_COND_IMP_OPEN: | |
474 | dev->nr_zones_imp_open--; | |
475 | break; | |
476 | case BLK_ZONE_COND_EXP_OPEN: | |
477 | dev->nr_zones_exp_open--; | |
478 | break; | |
479 | case BLK_ZONE_COND_CLOSED: | |
fd78874b KB |
480 | ret = null_check_zone_resources(dev, zone); |
481 | if (ret != BLK_STS_OK) | |
482 | return ret; | |
dc4d137e NC |
483 | dev->nr_zones_closed--; |
484 | break; | |
485 | default: | |
486 | return BLK_STS_IOERR; | |
487 | } | |
089565fb | 488 | |
dc4d137e NC |
489 | zone->cond = BLK_ZONE_COND_FULL; |
490 | zone->wp = zone->start + zone->len; | |
ca4b2a01 | 491 | |
dc4d137e NC |
492 | return BLK_STS_OK; |
493 | } | |
494 | ||
495 | static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone) | |
496 | { | |
497 | if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
498 | return BLK_STS_IOERR; | |
9dd44c7e | 499 | |
dc4d137e NC |
500 | switch (zone->cond) { |
501 | case BLK_ZONE_COND_EMPTY: | |
502 | /* reset operation on empty is not an error */ | |
9dd44c7e | 503 | return BLK_STS_OK; |
dc4d137e NC |
504 | case BLK_ZONE_COND_IMP_OPEN: |
505 | dev->nr_zones_imp_open--; | |
506 | break; | |
507 | case BLK_ZONE_COND_EXP_OPEN: | |
508 | dev->nr_zones_exp_open--; | |
509 | break; | |
510 | case BLK_ZONE_COND_CLOSED: | |
511 | dev->nr_zones_closed--; | |
512 | break; | |
513 | case BLK_ZONE_COND_FULL: | |
514 | break; | |
ca4b2a01 | 515 | default: |
fceb5d1b | 516 | return BLK_STS_IOERR; |
ca4b2a01 | 517 | } |
dc4d137e NC |
518 | |
519 | zone->cond = BLK_ZONE_COND_EMPTY; | |
520 | zone->wp = zone->start; | |
521 | ||
522 | return BLK_STS_OK; | |
ca4b2a01 MB |
523 | } |
524 | ||
da644b2c AJ |
525 | static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, |
526 | sector_t sector) | |
ca4b2a01 MB |
527 | { |
528 | struct nullb_device *dev = cmd->nq->dev; | |
aa1c09cb DLM |
529 | unsigned int zone_no; |
530 | struct blk_zone *zone; | |
531 | blk_status_t ret; | |
a61dbfb1 CK |
532 | size_t i; |
533 | ||
aa1c09cb | 534 | if (op == REQ_OP_ZONE_RESET_ALL) { |
f9c91042 | 535 | for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { |
aa1c09cb | 536 | null_lock_zone(dev, i); |
f9c91042 DLM |
537 | zone = &dev->zones[i]; |
538 | if (zone->cond != BLK_ZONE_COND_EMPTY) { | |
539 | null_reset_zone(dev, zone); | |
540 | trace_nullb_zone_op(cmd, i, zone->cond); | |
541 | } | |
aa1c09cb | 542 | null_unlock_zone(dev, i); |
f9c91042 DLM |
543 | } |
544 | return BLK_STS_OK; | |
aa1c09cb DLM |
545 | } |
546 | ||
547 | zone_no = null_zone_no(dev, sector); | |
548 | zone = &dev->zones[zone_no]; | |
549 | ||
550 | null_lock_zone(dev, zone_no); | |
aa1c09cb DLM |
551 | |
552 | switch (op) { | |
a61dbfb1 | 553 | case REQ_OP_ZONE_RESET: |
dc4d137e | 554 | ret = null_reset_zone(dev, zone); |
a61dbfb1 | 555 | break; |
da644b2c | 556 | case REQ_OP_ZONE_OPEN: |
dc4d137e | 557 | ret = null_open_zone(dev, zone); |
da644b2c AJ |
558 | break; |
559 | case REQ_OP_ZONE_CLOSE: | |
dc4d137e | 560 | ret = null_close_zone(dev, zone); |
da644b2c AJ |
561 | break; |
562 | case REQ_OP_ZONE_FINISH: | |
dc4d137e | 563 | ret = null_finish_zone(dev, zone); |
da644b2c | 564 | break; |
a61dbfb1 | 565 | default: |
aa1c09cb DLM |
566 | ret = BLK_STS_NOTSUPP; |
567 | break; | |
ea2c18e1 | 568 | } |
766c3297 | 569 | |
dc4d137e NC |
570 | if (ret == BLK_STS_OK) |
571 | trace_nullb_zone_op(cmd, zone_no, zone->cond); | |
572 | ||
aa1c09cb DLM |
573 | null_unlock_zone(dev, zone_no); |
574 | ||
dc4d137e | 575 | return ret; |
fceb5d1b CK |
576 | } |
577 | ||
9dd44c7e DLM |
578 | blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, |
579 | sector_t sector, sector_t nr_sectors) | |
fceb5d1b | 580 | { |
35bc10b2 | 581 | struct nullb_device *dev = cmd->nq->dev; |
aa1c09cb DLM |
582 | unsigned int zno = null_zone_no(dev, sector); |
583 | blk_status_t sts; | |
35bc10b2 | 584 | |
fceb5d1b CK |
585 | switch (op) { |
586 | case REQ_OP_WRITE: | |
35bc10b2 KJ |
587 | sts = null_zone_write(cmd, sector, nr_sectors, false); |
588 | break; | |
e0489ed5 | 589 | case REQ_OP_ZONE_APPEND: |
35bc10b2 KJ |
590 | sts = null_zone_write(cmd, sector, nr_sectors, true); |
591 | break; | |
fceb5d1b CK |
592 | case REQ_OP_ZONE_RESET: |
593 | case REQ_OP_ZONE_RESET_ALL: | |
da644b2c AJ |
594 | case REQ_OP_ZONE_OPEN: |
595 | case REQ_OP_ZONE_CLOSE: | |
596 | case REQ_OP_ZONE_FINISH: | |
35bc10b2 KJ |
597 | sts = null_zone_mgmt(cmd, op, sector); |
598 | break; | |
fceb5d1b | 599 | default: |
aa1c09cb | 600 | null_lock_zone(dev, zno); |
35bc10b2 | 601 | sts = null_process_cmd(cmd, op, sector, nr_sectors); |
aa1c09cb | 602 | null_unlock_zone(dev, zno); |
fceb5d1b | 603 | } |
35bc10b2 KJ |
604 | |
605 | return sts; | |
ca4b2a01 | 606 | } |