]>
Commit | Line | Data |
---|---|---|
3b1a94c8 DLM |
1 | /* |
2 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-zoned.h" | |
8 | ||
9 | #include <linux/module.h> | |
10 | ||
11 | #define DM_MSG_PREFIX "zoned" | |
12 | ||
13 | #define DMZ_MIN_BIOS 8192 | |
14 | ||
15 | /* | |
16 | * Zone BIO context. | |
17 | */ | |
18 | struct dmz_bioctx { | |
19 | struct dmz_target *target; | |
20 | struct dm_zone *zone; | |
21 | struct bio *bio; | |
092b5648 | 22 | refcount_t ref; |
3b1a94c8 DLM |
23 | }; |
24 | ||
25 | /* | |
26 | * Chunk work descriptor. | |
27 | */ | |
28 | struct dm_chunk_work { | |
29 | struct work_struct work; | |
092b5648 | 30 | refcount_t refcount; |
3b1a94c8 DLM |
31 | struct dmz_target *target; |
32 | unsigned int chunk; | |
33 | struct bio_list bio_list; | |
34 | }; | |
35 | ||
36 | /* | |
37 | * Target descriptor. | |
38 | */ | |
39 | struct dmz_target { | |
40 | struct dm_dev *ddev; | |
41 | ||
42 | unsigned long flags; | |
43 | ||
44 | /* Zoned block device information */ | |
45 | struct dmz_dev *dev; | |
46 | ||
47 | /* For metadata handling */ | |
48 | struct dmz_metadata *metadata; | |
49 | ||
50 | /* For reclaim */ | |
51 | struct dmz_reclaim *reclaim; | |
52 | ||
53 | /* For chunk work */ | |
3b1a94c8 DLM |
54 | struct radix_tree_root chunk_rxtree; |
55 | struct workqueue_struct *chunk_wq; | |
72d711c8 | 56 | struct mutex chunk_lock; |
3b1a94c8 DLM |
57 | |
58 | /* For cloned BIOs to zones */ | |
6f1c819c | 59 | struct bio_set bio_set; |
3b1a94c8 DLM |
60 | |
61 | /* For flush */ | |
62 | spinlock_t flush_lock; | |
63 | struct bio_list flush_list; | |
64 | struct delayed_work flush_work; | |
65 | struct workqueue_struct *flush_wq; | |
66 | }; | |
67 | ||
68 | /* | |
69 | * Flush intervals (seconds). | |
70 | */ | |
71 | #define DMZ_FLUSH_PERIOD (10 * HZ) | |
72 | ||
73 | /* | |
74 | * Target BIO completion. | |
75 | */ | |
76 | static inline void dmz_bio_endio(struct bio *bio, blk_status_t status) | |
77 | { | |
78 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
79 | ||
d57f9da8 DLM |
80 | if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK) |
81 | bio->bi_status = status; | |
82 | ||
83 | if (refcount_dec_and_test(&bioctx->ref)) { | |
84 | struct dm_zone *zone = bioctx->zone; | |
85 | ||
86 | if (zone) { | |
87 | if (bio->bi_status != BLK_STS_OK && | |
88 | bio_op(bio) == REQ_OP_WRITE && | |
89 | dmz_is_seq(zone)) | |
90 | set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); | |
91 | dmz_deactivate_zone(zone); | |
92 | } | |
93 | bio_endio(bio); | |
94 | } | |
3b1a94c8 DLM |
95 | } |
96 | ||
97 | /* | |
d57f9da8 | 98 | * Completion callback for an internally cloned target BIO. This terminates the |
3b1a94c8 DLM |
99 | * target BIO when there are no more references to its context. |
100 | */ | |
d57f9da8 | 101 | static void dmz_clone_endio(struct bio *clone) |
3b1a94c8 | 102 | { |
d57f9da8 DLM |
103 | struct dmz_bioctx *bioctx = clone->bi_private; |
104 | blk_status_t status = clone->bi_status; | |
3b1a94c8 | 105 | |
d57f9da8 | 106 | bio_put(clone); |
3b1a94c8 DLM |
107 | dmz_bio_endio(bioctx->bio, status); |
108 | } | |
109 | ||
110 | /* | |
d57f9da8 | 111 | * Issue a clone of a target BIO. The clone may only partially process the |
3b1a94c8 DLM |
112 | * original target BIO. |
113 | */ | |
d57f9da8 DLM |
114 | static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, |
115 | struct bio *bio, sector_t chunk_block, | |
116 | unsigned int nr_blocks) | |
3b1a94c8 DLM |
117 | { |
118 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
3b1a94c8 DLM |
119 | struct bio *clone; |
120 | ||
6f1c819c | 121 | clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set); |
3b1a94c8 DLM |
122 | if (!clone) |
123 | return -ENOMEM; | |
124 | ||
d57f9da8 DLM |
125 | bio_set_dev(clone, dmz->dev->bdev); |
126 | clone->bi_iter.bi_sector = | |
127 | dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); | |
3b1a94c8 | 128 | clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; |
d57f9da8 | 129 | clone->bi_end_io = dmz_clone_endio; |
3b1a94c8 DLM |
130 | clone->bi_private = bioctx; |
131 | ||
132 | bio_advance(bio, clone->bi_iter.bi_size); | |
133 | ||
092b5648 | 134 | refcount_inc(&bioctx->ref); |
3b1a94c8 DLM |
135 | generic_make_request(clone); |
136 | ||
d57f9da8 DLM |
137 | if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) |
138 | zone->wp_block += nr_blocks; | |
139 | ||
3b1a94c8 DLM |
140 | return 0; |
141 | } | |
142 | ||
143 | /* | |
144 | * Zero out pages of discarded blocks accessed by a read BIO. | |
145 | */ | |
146 | static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, | |
147 | sector_t chunk_block, unsigned int nr_blocks) | |
148 | { | |
149 | unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT; | |
150 | ||
151 | /* Clear nr_blocks */ | |
152 | swap(bio->bi_iter.bi_size, size); | |
153 | zero_fill_bio(bio); | |
154 | swap(bio->bi_iter.bi_size, size); | |
155 | ||
156 | bio_advance(bio, size); | |
157 | } | |
158 | ||
159 | /* | |
160 | * Process a read BIO. | |
161 | */ | |
162 | static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone, | |
163 | struct bio *bio) | |
164 | { | |
165 | sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); | |
166 | unsigned int nr_blocks = dmz_bio_blocks(bio); | |
167 | sector_t end_block = chunk_block + nr_blocks; | |
168 | struct dm_zone *rzone, *bzone; | |
169 | int ret; | |
170 | ||
171 | /* Read into unmapped chunks need only zeroing the BIO buffer */ | |
172 | if (!zone) { | |
173 | zero_fill_bio(bio); | |
174 | return 0; | |
175 | } | |
176 | ||
177 | dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks", | |
178 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
179 | (dmz_is_rnd(zone) ? "RND" : "SEQ"), | |
180 | dmz_id(dmz->metadata, zone), | |
181 | (unsigned long long)chunk_block, nr_blocks); | |
182 | ||
183 | /* Check block validity to determine the read location */ | |
184 | bzone = zone->bzone; | |
185 | while (chunk_block < end_block) { | |
186 | nr_blocks = 0; | |
187 | if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) { | |
188 | /* Test block validity in the data zone */ | |
189 | ret = dmz_block_valid(dmz->metadata, zone, chunk_block); | |
190 | if (ret < 0) | |
191 | return ret; | |
192 | if (ret > 0) { | |
193 | /* Read data zone blocks */ | |
194 | nr_blocks = ret; | |
195 | rzone = zone; | |
196 | } | |
197 | } | |
198 | ||
199 | /* | |
200 | * No valid blocks found in the data zone. | |
201 | * Check the buffer zone, if there is one. | |
202 | */ | |
203 | if (!nr_blocks && bzone) { | |
204 | ret = dmz_block_valid(dmz->metadata, bzone, chunk_block); | |
205 | if (ret < 0) | |
206 | return ret; | |
207 | if (ret > 0) { | |
208 | /* Read buffer zone blocks */ | |
209 | nr_blocks = ret; | |
210 | rzone = bzone; | |
211 | } | |
212 | } | |
213 | ||
214 | if (nr_blocks) { | |
215 | /* Valid blocks found: read them */ | |
216 | nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block); | |
d57f9da8 | 217 | ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks); |
3b1a94c8 DLM |
218 | if (ret) |
219 | return ret; | |
220 | chunk_block += nr_blocks; | |
221 | } else { | |
222 | /* No valid block: zeroout the current BIO block */ | |
223 | dmz_handle_read_zero(dmz, bio, chunk_block, 1); | |
224 | chunk_block++; | |
225 | } | |
226 | } | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
3b1a94c8 DLM |
231 | /* |
232 | * Write blocks directly in a data zone, at the write pointer. | |
233 | * If a buffer zone is assigned, invalidate the blocks written | |
234 | * in place. | |
235 | */ | |
236 | static int dmz_handle_direct_write(struct dmz_target *dmz, | |
237 | struct dm_zone *zone, struct bio *bio, | |
238 | sector_t chunk_block, | |
239 | unsigned int nr_blocks) | |
240 | { | |
241 | struct dmz_metadata *zmd = dmz->metadata; | |
242 | struct dm_zone *bzone = zone->bzone; | |
243 | int ret; | |
244 | ||
245 | if (dmz_is_readonly(zone)) | |
246 | return -EROFS; | |
247 | ||
248 | /* Submit write */ | |
d57f9da8 DLM |
249 | ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks); |
250 | if (ret) | |
251 | return ret; | |
3b1a94c8 DLM |
252 | |
253 | /* | |
254 | * Validate the blocks in the data zone and invalidate | |
255 | * in the buffer zone, if there is one. | |
256 | */ | |
257 | ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks); | |
258 | if (ret == 0 && bzone) | |
259 | ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks); | |
260 | ||
261 | return ret; | |
262 | } | |
263 | ||
264 | /* | |
265 | * Write blocks in the buffer zone of @zone. | |
266 | * If no buffer zone is assigned yet, get one. | |
267 | * Called with @zone write locked. | |
268 | */ | |
269 | static int dmz_handle_buffered_write(struct dmz_target *dmz, | |
270 | struct dm_zone *zone, struct bio *bio, | |
271 | sector_t chunk_block, | |
272 | unsigned int nr_blocks) | |
273 | { | |
274 | struct dmz_metadata *zmd = dmz->metadata; | |
275 | struct dm_zone *bzone; | |
276 | int ret; | |
277 | ||
278 | /* Get the buffer zone. One will be allocated if needed */ | |
279 | bzone = dmz_get_chunk_buffer(zmd, zone); | |
280 | if (!bzone) | |
281 | return -ENOSPC; | |
282 | ||
283 | if (dmz_is_readonly(bzone)) | |
284 | return -EROFS; | |
285 | ||
286 | /* Submit write */ | |
d57f9da8 DLM |
287 | ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks); |
288 | if (ret) | |
289 | return ret; | |
3b1a94c8 DLM |
290 | |
291 | /* | |
292 | * Validate the blocks in the buffer zone | |
293 | * and invalidate in the data zone. | |
294 | */ | |
295 | ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks); | |
296 | if (ret == 0 && chunk_block < zone->wp_block) | |
297 | ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); | |
298 | ||
299 | return ret; | |
300 | } | |
301 | ||
302 | /* | |
303 | * Process a write BIO. | |
304 | */ | |
305 | static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone, | |
306 | struct bio *bio) | |
307 | { | |
308 | sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio)); | |
309 | unsigned int nr_blocks = dmz_bio_blocks(bio); | |
310 | ||
311 | if (!zone) | |
312 | return -ENOSPC; | |
313 | ||
314 | dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks", | |
315 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
316 | (dmz_is_rnd(zone) ? "RND" : "SEQ"), | |
317 | dmz_id(dmz->metadata, zone), | |
318 | (unsigned long long)chunk_block, nr_blocks); | |
319 | ||
320 | if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) { | |
321 | /* | |
322 | * zone is a random zone or it is a sequential zone | |
323 | * and the BIO is aligned to the zone write pointer: | |
324 | * direct write the zone. | |
325 | */ | |
326 | return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks); | |
327 | } | |
328 | ||
329 | /* | |
330 | * This is an unaligned write in a sequential zone: | |
331 | * use buffered write. | |
332 | */ | |
333 | return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks); | |
334 | } | |
335 | ||
336 | /* | |
337 | * Process a discard BIO. | |
338 | */ | |
339 | static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone, | |
340 | struct bio *bio) | |
341 | { | |
342 | struct dmz_metadata *zmd = dmz->metadata; | |
343 | sector_t block = dmz_bio_block(bio); | |
344 | unsigned int nr_blocks = dmz_bio_blocks(bio); | |
345 | sector_t chunk_block = dmz_chunk_block(dmz->dev, block); | |
346 | int ret = 0; | |
347 | ||
348 | /* For unmapped chunks, there is nothing to do */ | |
349 | if (!zone) | |
350 | return 0; | |
351 | ||
352 | if (dmz_is_readonly(zone)) | |
353 | return -EROFS; | |
354 | ||
355 | dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks", | |
356 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
357 | dmz_id(zmd, zone), | |
358 | (unsigned long long)chunk_block, nr_blocks); | |
359 | ||
360 | /* | |
361 | * Invalidate blocks in the data zone and its | |
362 | * buffer zone if one is mapped. | |
363 | */ | |
364 | if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) | |
365 | ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks); | |
366 | if (ret == 0 && zone->bzone) | |
367 | ret = dmz_invalidate_blocks(zmd, zone->bzone, | |
368 | chunk_block, nr_blocks); | |
369 | return ret; | |
370 | } | |
371 | ||
372 | /* | |
373 | * Process a BIO. | |
374 | */ | |
375 | static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw, | |
376 | struct bio *bio) | |
377 | { | |
378 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
379 | struct dmz_metadata *zmd = dmz->metadata; | |
380 | struct dm_zone *zone; | |
381 | int ret; | |
382 | ||
383 | /* | |
384 | * Write may trigger a zone allocation. So make sure the | |
385 | * allocation can succeed. | |
386 | */ | |
387 | if (bio_op(bio) == REQ_OP_WRITE) | |
388 | dmz_schedule_reclaim(dmz->reclaim); | |
389 | ||
390 | dmz_lock_metadata(zmd); | |
391 | ||
392 | /* | |
393 | * Get the data zone mapping the chunk. There may be no | |
394 | * mapping for read and discard. If a mapping is obtained, | |
395 | + the zone returned will be set to active state. | |
396 | */ | |
397 | zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio), | |
398 | bio_op(bio)); | |
399 | if (IS_ERR(zone)) { | |
400 | ret = PTR_ERR(zone); | |
401 | goto out; | |
402 | } | |
403 | ||
404 | /* Process the BIO */ | |
405 | if (zone) { | |
406 | dmz_activate_zone(zone); | |
407 | bioctx->zone = zone; | |
408 | } | |
409 | ||
410 | switch (bio_op(bio)) { | |
411 | case REQ_OP_READ: | |
412 | ret = dmz_handle_read(dmz, zone, bio); | |
413 | break; | |
414 | case REQ_OP_WRITE: | |
415 | ret = dmz_handle_write(dmz, zone, bio); | |
416 | break; | |
417 | case REQ_OP_DISCARD: | |
418 | case REQ_OP_WRITE_ZEROES: | |
419 | ret = dmz_handle_discard(dmz, zone, bio); | |
420 | break; | |
421 | default: | |
422 | dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x", | |
423 | bio_op(bio)); | |
424 | ret = -EIO; | |
425 | } | |
426 | ||
427 | /* | |
428 | * Release the chunk mapping. This will check that the mapping | |
429 | * is still valid, that is, that the zone used still has valid blocks. | |
430 | */ | |
431 | if (zone) | |
432 | dmz_put_chunk_mapping(zmd, zone); | |
433 | out: | |
434 | dmz_bio_endio(bio, errno_to_blk_status(ret)); | |
435 | ||
436 | dmz_unlock_metadata(zmd); | |
437 | } | |
438 | ||
439 | /* | |
440 | * Increment a chunk reference counter. | |
441 | */ | |
442 | static inline void dmz_get_chunk_work(struct dm_chunk_work *cw) | |
443 | { | |
092b5648 | 444 | refcount_inc(&cw->refcount); |
3b1a94c8 DLM |
445 | } |
446 | ||
447 | /* | |
448 | * Decrement a chunk work reference count and | |
449 | * free it if it becomes 0. | |
450 | */ | |
451 | static void dmz_put_chunk_work(struct dm_chunk_work *cw) | |
452 | { | |
092b5648 | 453 | if (refcount_dec_and_test(&cw->refcount)) { |
3b1a94c8 DLM |
454 | WARN_ON(!bio_list_empty(&cw->bio_list)); |
455 | radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk); | |
456 | kfree(cw); | |
457 | } | |
458 | } | |
459 | ||
460 | /* | |
461 | * Chunk BIO work function. | |
462 | */ | |
463 | static void dmz_chunk_work(struct work_struct *work) | |
464 | { | |
465 | struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work); | |
466 | struct dmz_target *dmz = cw->target; | |
467 | struct bio *bio; | |
468 | ||
469 | mutex_lock(&dmz->chunk_lock); | |
470 | ||
471 | /* Process the chunk BIOs */ | |
472 | while ((bio = bio_list_pop(&cw->bio_list))) { | |
473 | mutex_unlock(&dmz->chunk_lock); | |
474 | dmz_handle_bio(dmz, cw, bio); | |
475 | mutex_lock(&dmz->chunk_lock); | |
476 | dmz_put_chunk_work(cw); | |
477 | } | |
478 | ||
479 | /* Queueing the work incremented the work refcount */ | |
480 | dmz_put_chunk_work(cw); | |
481 | ||
482 | mutex_unlock(&dmz->chunk_lock); | |
483 | } | |
484 | ||
485 | /* | |
486 | * Flush work. | |
487 | */ | |
488 | static void dmz_flush_work(struct work_struct *work) | |
489 | { | |
490 | struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work); | |
491 | struct bio *bio; | |
492 | int ret; | |
493 | ||
494 | /* Flush dirty metadata blocks */ | |
495 | ret = dmz_flush_metadata(dmz->metadata); | |
496 | ||
497 | /* Process queued flush requests */ | |
498 | while (1) { | |
499 | spin_lock(&dmz->flush_lock); | |
500 | bio = bio_list_pop(&dmz->flush_list); | |
501 | spin_unlock(&dmz->flush_lock); | |
502 | ||
503 | if (!bio) | |
504 | break; | |
505 | ||
506 | dmz_bio_endio(bio, errno_to_blk_status(ret)); | |
507 | } | |
508 | ||
509 | queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); | |
510 | } | |
511 | ||
512 | /* | |
513 | * Get a chunk work and start it to process a new BIO. | |
514 | * If the BIO chunk has no work yet, create one. | |
515 | */ | |
d7428c50 | 516 | static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) |
3b1a94c8 DLM |
517 | { |
518 | unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); | |
519 | struct dm_chunk_work *cw; | |
d7428c50 | 520 | int ret = 0; |
3b1a94c8 DLM |
521 | |
522 | mutex_lock(&dmz->chunk_lock); | |
523 | ||
524 | /* Get the BIO chunk work. If one is not active yet, create one */ | |
525 | cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); | |
526 | if (!cw) { | |
3b1a94c8 DLM |
527 | |
528 | /* Create a new chunk work */ | |
4218a955 | 529 | cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); |
d7428c50 DF |
530 | if (unlikely(!cw)) { |
531 | ret = -ENOMEM; | |
3b1a94c8 | 532 | goto out; |
d7428c50 | 533 | } |
3b1a94c8 DLM |
534 | |
535 | INIT_WORK(&cw->work, dmz_chunk_work); | |
092b5648 | 536 | refcount_set(&cw->refcount, 0); |
3b1a94c8 DLM |
537 | cw->target = dmz; |
538 | cw->chunk = chunk; | |
539 | bio_list_init(&cw->bio_list); | |
540 | ||
541 | ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); | |
542 | if (unlikely(ret)) { | |
543 | kfree(cw); | |
3b1a94c8 DLM |
544 | goto out; |
545 | } | |
546 | } | |
547 | ||
548 | bio_list_add(&cw->bio_list, bio); | |
549 | dmz_get_chunk_work(cw); | |
550 | ||
d7428c50 | 551 | dmz_reclaim_bio_acc(dmz->reclaim); |
3b1a94c8 DLM |
552 | if (queue_work(dmz->chunk_wq, &cw->work)) |
553 | dmz_get_chunk_work(cw); | |
554 | out: | |
555 | mutex_unlock(&dmz->chunk_lock); | |
d7428c50 | 556 | return ret; |
3b1a94c8 DLM |
557 | } |
558 | ||
559 | /* | |
560 | * Process a new BIO. | |
561 | */ | |
562 | static int dmz_map(struct dm_target *ti, struct bio *bio) | |
563 | { | |
564 | struct dmz_target *dmz = ti->private; | |
565 | struct dmz_dev *dev = dmz->dev; | |
566 | struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); | |
567 | sector_t sector = bio->bi_iter.bi_sector; | |
568 | unsigned int nr_sectors = bio_sectors(bio); | |
569 | sector_t chunk_sector; | |
d7428c50 | 570 | int ret; |
3b1a94c8 DLM |
571 | |
572 | dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", | |
573 | bio_op(bio), (unsigned long long)sector, nr_sectors, | |
574 | (unsigned long long)dmz_bio_chunk(dmz->dev, bio), | |
575 | (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), | |
576 | (unsigned int)dmz_bio_blocks(bio)); | |
577 | ||
74d46992 | 578 | bio_set_dev(bio, dev->bdev); |
3b1a94c8 | 579 | |
edbe9597 | 580 | if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) |
3b1a94c8 DLM |
581 | return DM_MAPIO_REMAPPED; |
582 | ||
583 | /* The BIO should be block aligned */ | |
584 | if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) | |
585 | return DM_MAPIO_KILL; | |
586 | ||
587 | /* Initialize the BIO context */ | |
588 | bioctx->target = dmz; | |
589 | bioctx->zone = NULL; | |
590 | bioctx->bio = bio; | |
092b5648 | 591 | refcount_set(&bioctx->ref, 1); |
3b1a94c8 DLM |
592 | |
593 | /* Set the BIO pending in the flush list */ | |
edbe9597 | 594 | if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { |
3b1a94c8 DLM |
595 | spin_lock(&dmz->flush_lock); |
596 | bio_list_add(&dmz->flush_list, bio); | |
597 | spin_unlock(&dmz->flush_lock); | |
598 | mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0); | |
599 | return DM_MAPIO_SUBMITTED; | |
600 | } | |
601 | ||
602 | /* Split zone BIOs to fit entirely into a zone */ | |
603 | chunk_sector = sector & (dev->zone_nr_sectors - 1); | |
604 | if (chunk_sector + nr_sectors > dev->zone_nr_sectors) | |
605 | dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); | |
606 | ||
607 | /* Now ready to handle this BIO */ | |
d7428c50 DF |
608 | ret = dmz_queue_chunk_work(dmz, bio); |
609 | if (ret) { | |
610 | dmz_dev_debug(dmz->dev, | |
611 | "BIO op %d, can't process chunk %llu, err %i\n", | |
612 | bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio), | |
613 | ret); | |
614 | return DM_MAPIO_REQUEUE; | |
615 | } | |
3b1a94c8 DLM |
616 | |
617 | return DM_MAPIO_SUBMITTED; | |
618 | } | |
619 | ||
3b1a94c8 DLM |
620 | /* |
621 | * Get zoned device information. | |
622 | */ | |
623 | static int dmz_get_zoned_device(struct dm_target *ti, char *path) | |
624 | { | |
625 | struct dmz_target *dmz = ti->private; | |
626 | struct request_queue *q; | |
627 | struct dmz_dev *dev; | |
114e0259 | 628 | sector_t aligned_capacity; |
3b1a94c8 DLM |
629 | int ret; |
630 | ||
631 | /* Get the target device */ | |
632 | ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev); | |
633 | if (ret) { | |
634 | ti->error = "Get target device failed"; | |
635 | dmz->ddev = NULL; | |
636 | return ret; | |
637 | } | |
638 | ||
639 | dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL); | |
640 | if (!dev) { | |
641 | ret = -ENOMEM; | |
642 | goto err; | |
643 | } | |
644 | ||
645 | dev->bdev = dmz->ddev->bdev; | |
646 | (void)bdevname(dev->bdev, dev->name); | |
647 | ||
648 | if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) { | |
649 | ti->error = "Not a zoned block device"; | |
650 | ret = -EINVAL; | |
651 | goto err; | |
652 | } | |
653 | ||
114e0259 | 654 | q = bdev_get_queue(dev->bdev); |
3b1a94c8 | 655 | dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; |
a3839bc6 DC |
656 | aligned_capacity = dev->capacity & |
657 | ~((sector_t)blk_queue_zone_sectors(q) - 1); | |
114e0259 DLM |
658 | if (ti->begin || |
659 | ((ti->len != dev->capacity) && (ti->len != aligned_capacity))) { | |
3b1a94c8 DLM |
660 | ti->error = "Partial mapping not supported"; |
661 | ret = -EINVAL; | |
662 | goto err; | |
663 | } | |
664 | ||
114e0259 | 665 | dev->zone_nr_sectors = blk_queue_zone_sectors(q); |
3b1a94c8 DLM |
666 | dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors); |
667 | ||
668 | dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors); | |
669 | dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks); | |
670 | ||
a91e1380 | 671 | dev->nr_zones = blkdev_nr_zones(dev->bdev); |
3b1a94c8 DLM |
672 | |
673 | dmz->dev = dev; | |
674 | ||
675 | return 0; | |
676 | err: | |
677 | dm_put_device(ti, dmz->ddev); | |
678 | kfree(dev); | |
679 | ||
680 | return ret; | |
681 | } | |
682 | ||
683 | /* | |
684 | * Cleanup zoned device information. | |
685 | */ | |
686 | static void dmz_put_zoned_device(struct dm_target *ti) | |
687 | { | |
688 | struct dmz_target *dmz = ti->private; | |
689 | ||
690 | dm_put_device(ti, dmz->ddev); | |
691 | kfree(dmz->dev); | |
692 | dmz->dev = NULL; | |
693 | } | |
694 | ||
695 | /* | |
696 | * Setup target. | |
697 | */ | |
698 | static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
699 | { | |
700 | struct dmz_target *dmz; | |
701 | struct dmz_dev *dev; | |
702 | int ret; | |
703 | ||
704 | /* Check arguments */ | |
705 | if (argc != 1) { | |
706 | ti->error = "Invalid argument count"; | |
707 | return -EINVAL; | |
708 | } | |
709 | ||
710 | /* Allocate and initialize the target descriptor */ | |
711 | dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL); | |
712 | if (!dmz) { | |
713 | ti->error = "Unable to allocate the zoned target descriptor"; | |
714 | return -ENOMEM; | |
715 | } | |
716 | ti->private = dmz; | |
717 | ||
718 | /* Get the target zoned block device */ | |
719 | ret = dmz_get_zoned_device(ti, argv[0]); | |
720 | if (ret) { | |
721 | dmz->ddev = NULL; | |
722 | goto err; | |
723 | } | |
724 | ||
725 | /* Initialize metadata */ | |
726 | dev = dmz->dev; | |
727 | ret = dmz_ctr_metadata(dev, &dmz->metadata); | |
728 | if (ret) { | |
729 | ti->error = "Metadata initialization failed"; | |
730 | goto err_dev; | |
731 | } | |
732 | ||
733 | /* Set target (no write same support) */ | |
734 | ti->max_io_len = dev->zone_nr_sectors << 9; | |
735 | ti->num_flush_bios = 1; | |
736 | ti->num_discard_bios = 1; | |
737 | ti->num_write_zeroes_bios = 1; | |
738 | ti->per_io_data_size = sizeof(struct dmz_bioctx); | |
739 | ti->flush_supported = true; | |
740 | ti->discards_supported = true; | |
3b1a94c8 DLM |
741 | |
742 | /* The exposed capacity is the number of chunks that can be mapped */ | |
743 | ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; | |
744 | ||
745 | /* Zone BIO */ | |
6f1c819c KO |
746 | ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0); |
747 | if (ret) { | |
3b1a94c8 | 748 | ti->error = "Create BIO set failed"; |
3b1a94c8 DLM |
749 | goto err_meta; |
750 | } | |
751 | ||
752 | /* Chunk BIO work */ | |
753 | mutex_init(&dmz->chunk_lock); | |
2d0b2d64 | 754 | INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO); |
3b1a94c8 DLM |
755 | dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, |
756 | 0, dev->name); | |
757 | if (!dmz->chunk_wq) { | |
758 | ti->error = "Create chunk workqueue failed"; | |
759 | ret = -ENOMEM; | |
760 | goto err_bio; | |
761 | } | |
762 | ||
763 | /* Flush work */ | |
764 | spin_lock_init(&dmz->flush_lock); | |
765 | bio_list_init(&dmz->flush_list); | |
766 | INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work); | |
767 | dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM, | |
768 | dev->name); | |
769 | if (!dmz->flush_wq) { | |
770 | ti->error = "Create flush workqueue failed"; | |
771 | ret = -ENOMEM; | |
772 | goto err_cwq; | |
773 | } | |
774 | mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); | |
775 | ||
776 | /* Initialize reclaim */ | |
777 | ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim); | |
778 | if (ret) { | |
779 | ti->error = "Zone reclaim initialization failed"; | |
780 | goto err_fwq; | |
781 | } | |
782 | ||
783 | dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)", | |
784 | (unsigned long long)ti->len, | |
785 | (unsigned long long)dmz_sect2blk(ti->len)); | |
786 | ||
787 | return 0; | |
788 | err_fwq: | |
789 | destroy_workqueue(dmz->flush_wq); | |
790 | err_cwq: | |
791 | destroy_workqueue(dmz->chunk_wq); | |
792 | err_bio: | |
d5ffebdd | 793 | mutex_destroy(&dmz->chunk_lock); |
6f1c819c | 794 | bioset_exit(&dmz->bio_set); |
3b1a94c8 DLM |
795 | err_meta: |
796 | dmz_dtr_metadata(dmz->metadata); | |
797 | err_dev: | |
798 | dmz_put_zoned_device(ti); | |
799 | err: | |
800 | kfree(dmz); | |
801 | ||
802 | return ret; | |
803 | } | |
804 | ||
805 | /* | |
806 | * Cleanup target. | |
807 | */ | |
808 | static void dmz_dtr(struct dm_target *ti) | |
809 | { | |
810 | struct dmz_target *dmz = ti->private; | |
811 | ||
812 | flush_workqueue(dmz->chunk_wq); | |
813 | destroy_workqueue(dmz->chunk_wq); | |
814 | ||
815 | dmz_dtr_reclaim(dmz->reclaim); | |
816 | ||
817 | cancel_delayed_work_sync(&dmz->flush_work); | |
818 | destroy_workqueue(dmz->flush_wq); | |
819 | ||
820 | (void) dmz_flush_metadata(dmz->metadata); | |
821 | ||
822 | dmz_dtr_metadata(dmz->metadata); | |
823 | ||
6f1c819c | 824 | bioset_exit(&dmz->bio_set); |
3b1a94c8 DLM |
825 | |
826 | dmz_put_zoned_device(ti); | |
827 | ||
d5ffebdd MS |
828 | mutex_destroy(&dmz->chunk_lock); |
829 | ||
3b1a94c8 DLM |
830 | kfree(dmz); |
831 | } | |
832 | ||
833 | /* | |
834 | * Setup target request queue limits. | |
835 | */ | |
836 | static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) | |
837 | { | |
838 | struct dmz_target *dmz = ti->private; | |
839 | unsigned int chunk_sectors = dmz->dev->zone_nr_sectors; | |
840 | ||
841 | limits->logical_block_size = DMZ_BLOCK_SIZE; | |
842 | limits->physical_block_size = DMZ_BLOCK_SIZE; | |
843 | ||
844 | blk_limits_io_min(limits, DMZ_BLOCK_SIZE); | |
845 | blk_limits_io_opt(limits, DMZ_BLOCK_SIZE); | |
846 | ||
847 | limits->discard_alignment = DMZ_BLOCK_SIZE; | |
848 | limits->discard_granularity = DMZ_BLOCK_SIZE; | |
849 | limits->max_discard_sectors = chunk_sectors; | |
850 | limits->max_hw_discard_sectors = chunk_sectors; | |
851 | limits->max_write_zeroes_sectors = chunk_sectors; | |
852 | ||
853 | /* FS hint to try to align to the device zone size */ | |
854 | limits->chunk_sectors = chunk_sectors; | |
855 | limits->max_sectors = chunk_sectors; | |
856 | ||
857 | /* We are exposing a drive-managed zoned block device */ | |
858 | limits->zoned = BLK_ZONED_NONE; | |
859 | } | |
860 | ||
861 | /* | |
862 | * Pass on ioctl to the backend device. | |
863 | */ | |
5bd5e8d8 | 864 | static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) |
3b1a94c8 DLM |
865 | { |
866 | struct dmz_target *dmz = ti->private; | |
867 | ||
868 | *bdev = dmz->dev->bdev; | |
869 | ||
870 | return 0; | |
871 | } | |
872 | ||
873 | /* | |
874 | * Stop works on suspend. | |
875 | */ | |
876 | static void dmz_suspend(struct dm_target *ti) | |
877 | { | |
878 | struct dmz_target *dmz = ti->private; | |
879 | ||
880 | flush_workqueue(dmz->chunk_wq); | |
881 | dmz_suspend_reclaim(dmz->reclaim); | |
882 | cancel_delayed_work_sync(&dmz->flush_work); | |
883 | } | |
884 | ||
885 | /* | |
886 | * Restart works on resume or if suspend failed. | |
887 | */ | |
888 | static void dmz_resume(struct dm_target *ti) | |
889 | { | |
890 | struct dmz_target *dmz = ti->private; | |
891 | ||
892 | queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD); | |
893 | dmz_resume_reclaim(dmz->reclaim); | |
894 | } | |
895 | ||
896 | static int dmz_iterate_devices(struct dm_target *ti, | |
897 | iterate_devices_callout_fn fn, void *data) | |
898 | { | |
899 | struct dmz_target *dmz = ti->private; | |
114e0259 DLM |
900 | struct dmz_dev *dev = dmz->dev; |
901 | sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1); | |
3b1a94c8 | 902 | |
114e0259 | 903 | return fn(ti, dmz->ddev, 0, capacity, data); |
3b1a94c8 DLM |
904 | } |
905 | ||
906 | static struct target_type dmz_type = { | |
907 | .name = "zoned", | |
908 | .version = {1, 0, 0}, | |
909 | .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM, | |
910 | .module = THIS_MODULE, | |
911 | .ctr = dmz_ctr, | |
912 | .dtr = dmz_dtr, | |
913 | .map = dmz_map, | |
3b1a94c8 DLM |
914 | .io_hints = dmz_io_hints, |
915 | .prepare_ioctl = dmz_prepare_ioctl, | |
916 | .postsuspend = dmz_suspend, | |
917 | .resume = dmz_resume, | |
918 | .iterate_devices = dmz_iterate_devices, | |
919 | }; | |
920 | ||
921 | static int __init dmz_init(void) | |
922 | { | |
923 | return dm_register_target(&dmz_type); | |
924 | } | |
925 | ||
926 | static void __exit dmz_exit(void) | |
927 | { | |
928 | dm_unregister_target(&dmz_type); | |
929 | } | |
930 | ||
931 | module_init(dmz_init); | |
932 | module_exit(dmz_exit); | |
933 | ||
934 | MODULE_DESCRIPTION(DM_NAME " target for zoned block devices"); | |
935 | MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>"); | |
936 | MODULE_LICENSE("GPL"); |