]>
Commit | Line | Data |
---|---|---|
bae9a0aa | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3b1a94c8 DLM |
2 | /* |
3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-zoned.h" | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/crc32.h> | |
bd976e52 | 12 | #include <linux/sched/mm.h> |
3b1a94c8 DLM |
13 | |
14 | #define DM_MSG_PREFIX "zoned metadata" | |
15 | ||
16 | /* | |
17 | * Metadata version. | |
18 | */ | |
19 | #define DMZ_META_VER 1 | |
20 | ||
21 | /* | |
22 | * On-disk super block magic. | |
23 | */ | |
24 | #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \ | |
25 | (((unsigned int)('Z')) << 16) | \ | |
26 | (((unsigned int)('B')) << 8) | \ | |
27 | ((unsigned int)('D'))) | |
28 | ||
29 | /* | |
30 | * On disk super block. | |
31 | * This uses only 512 B but uses on disk a full 4KB block. This block is | |
32 | * followed on disk by the mapping table of chunks to zones and the bitmap | |
33 | * blocks indicating zone block validity. | |
34 | * The overall resulting metadata format is: | |
35 | * (1) Super block (1 block) | |
36 | * (2) Chunk mapping table (nr_map_blocks) | |
37 | * (3) Bitmap blocks (nr_bitmap_blocks) | |
ad1bd578 | 38 | * All metadata blocks are stored in conventional zones, starting from |
3b1a94c8 DLM |
39 | * the first conventional zone found on disk. |
40 | */ | |
41 | struct dmz_super { | |
42 | /* Magic number */ | |
43 | __le32 magic; /* 4 */ | |
44 | ||
45 | /* Metadata version number */ | |
46 | __le32 version; /* 8 */ | |
47 | ||
48 | /* Generation number */ | |
49 | __le64 gen; /* 16 */ | |
50 | ||
51 | /* This block number */ | |
52 | __le64 sb_block; /* 24 */ | |
53 | ||
54 | /* The number of metadata blocks, including this super block */ | |
55 | __le32 nr_meta_blocks; /* 28 */ | |
56 | ||
57 | /* The number of sequential zones reserved for reclaim */ | |
58 | __le32 nr_reserved_seq; /* 32 */ | |
59 | ||
60 | /* The number of entries in the mapping table */ | |
61 | __le32 nr_chunks; /* 36 */ | |
62 | ||
63 | /* The number of blocks used for the chunk mapping table */ | |
64 | __le32 nr_map_blocks; /* 40 */ | |
65 | ||
66 | /* The number of blocks used for the block bitmaps */ | |
67 | __le32 nr_bitmap_blocks; /* 44 */ | |
68 | ||
69 | /* Checksum */ | |
70 | __le32 crc; /* 48 */ | |
71 | ||
72 | /* Padding to full 512B sector */ | |
73 | u8 reserved[464]; /* 512 */ | |
74 | }; | |
75 | ||
76 | /* | |
77 | * Chunk mapping entry: entries are indexed by chunk number | |
78 | * and give the zone ID (dzone_id) mapping the chunk on disk. | |
79 | * This zone may be sequential or random. If it is a sequential | |
80 | * zone, a second zone (bzone_id) used as a write buffer may | |
81 | * also be specified. This second zone will always be a randomly | |
82 | * writeable zone. | |
83 | */ | |
84 | struct dmz_map { | |
85 | __le32 dzone_id; | |
86 | __le32 bzone_id; | |
87 | }; | |
88 | ||
89 | /* | |
90 | * Chunk mapping table metadata: 512 8-bytes entries per 4KB block. | |
91 | */ | |
92 | #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map)) | |
93 | #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES)) | |
94 | #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1) | |
95 | #define DMZ_MAP_UNMAPPED UINT_MAX | |
96 | ||
97 | /* | |
98 | * Meta data block descriptor (for cached metadata blocks). | |
99 | */ | |
100 | struct dmz_mblock { | |
101 | struct rb_node node; | |
102 | struct list_head link; | |
103 | sector_t no; | |
33c2865f | 104 | unsigned int ref; |
3b1a94c8 DLM |
105 | unsigned long state; |
106 | struct page *page; | |
107 | void *data; | |
108 | }; | |
109 | ||
110 | /* | |
111 | * Metadata block state flags. | |
112 | */ | |
113 | enum { | |
114 | DMZ_META_DIRTY, | |
115 | DMZ_META_READING, | |
116 | DMZ_META_WRITING, | |
117 | DMZ_META_ERROR, | |
118 | }; | |
119 | ||
120 | /* | |
121 | * Super block information (one per metadata set). | |
122 | */ | |
123 | struct dmz_sb { | |
124 | sector_t block; | |
125 | struct dmz_mblock *mblk; | |
126 | struct dmz_super *sb; | |
127 | }; | |
128 | ||
129 | /* | |
130 | * In-memory metadata. | |
131 | */ | |
132 | struct dmz_metadata { | |
133 | struct dmz_dev *dev; | |
134 | ||
135 | sector_t zone_bitmap_size; | |
136 | unsigned int zone_nr_bitmap_blocks; | |
b3996295 | 137 | unsigned int zone_bits_per_mblk; |
3b1a94c8 DLM |
138 | |
139 | unsigned int nr_bitmap_blocks; | |
140 | unsigned int nr_map_blocks; | |
141 | ||
142 | unsigned int nr_useable_zones; | |
143 | unsigned int nr_meta_blocks; | |
144 | unsigned int nr_meta_zones; | |
145 | unsigned int nr_data_zones; | |
146 | unsigned int nr_rnd_zones; | |
147 | unsigned int nr_reserved_seq; | |
148 | unsigned int nr_chunks; | |
149 | ||
150 | /* Zone information array */ | |
151 | struct dm_zone *zones; | |
152 | ||
153 | struct dm_zone *sb_zone; | |
154 | struct dmz_sb sb[2]; | |
155 | unsigned int mblk_primary; | |
156 | u64 sb_gen; | |
157 | unsigned int min_nr_mblks; | |
158 | unsigned int max_nr_mblks; | |
159 | atomic_t nr_mblks; | |
160 | struct rw_semaphore mblk_sem; | |
161 | struct mutex mblk_flush_lock; | |
162 | spinlock_t mblk_lock; | |
163 | struct rb_root mblk_rbtree; | |
164 | struct list_head mblk_lru_list; | |
165 | struct list_head mblk_dirty_list; | |
166 | struct shrinker mblk_shrinker; | |
167 | ||
168 | /* Zone allocation management */ | |
169 | struct mutex map_lock; | |
170 | struct dmz_mblock **map_mblk; | |
171 | unsigned int nr_rnd; | |
172 | atomic_t unmap_nr_rnd; | |
173 | struct list_head unmap_rnd_list; | |
174 | struct list_head map_rnd_list; | |
175 | ||
176 | unsigned int nr_seq; | |
177 | atomic_t unmap_nr_seq; | |
178 | struct list_head unmap_seq_list; | |
179 | struct list_head map_seq_list; | |
180 | ||
181 | atomic_t nr_reserved_seq_zones; | |
182 | struct list_head reserved_seq_zones_list; | |
183 | ||
184 | wait_queue_head_t free_wq; | |
185 | }; | |
186 | ||
187 | /* | |
188 | * Various accessors | |
189 | */ | |
190 | unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone) | |
191 | { | |
192 | return ((unsigned int)(zone - zmd->zones)); | |
193 | } | |
194 | ||
195 | sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) | |
196 | { | |
3908c983 | 197 | return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift; |
3b1a94c8 DLM |
198 | } |
199 | ||
200 | sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) | |
201 | { | |
3908c983 | 202 | return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift; |
3b1a94c8 DLM |
203 | } |
204 | ||
205 | unsigned int dmz_nr_chunks(struct dmz_metadata *zmd) | |
206 | { | |
207 | return zmd->nr_chunks; | |
208 | } | |
209 | ||
210 | unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd) | |
211 | { | |
212 | return zmd->nr_rnd; | |
213 | } | |
214 | ||
215 | unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd) | |
216 | { | |
217 | return atomic_read(&zmd->unmap_nr_rnd); | |
218 | } | |
219 | ||
220 | /* | |
221 | * Lock/unlock mapping table. | |
222 | * The map lock also protects all the zone lists. | |
223 | */ | |
224 | void dmz_lock_map(struct dmz_metadata *zmd) | |
225 | { | |
226 | mutex_lock(&zmd->map_lock); | |
227 | } | |
228 | ||
229 | void dmz_unlock_map(struct dmz_metadata *zmd) | |
230 | { | |
231 | mutex_unlock(&zmd->map_lock); | |
232 | } | |
233 | ||
234 | /* | |
235 | * Lock/unlock metadata access. This is a "read" lock on a semaphore | |
236 | * that prevents metadata flush from running while metadata are being | |
237 | * modified. The actual metadata write mutual exclusion is achieved with | |
ad1bd578 | 238 | * the map lock and zone state management (active and reclaim state are |
3b1a94c8 DLM |
239 | * mutually exclusive). |
240 | */ | |
241 | void dmz_lock_metadata(struct dmz_metadata *zmd) | |
242 | { | |
243 | down_read(&zmd->mblk_sem); | |
244 | } | |
245 | ||
246 | void dmz_unlock_metadata(struct dmz_metadata *zmd) | |
247 | { | |
248 | up_read(&zmd->mblk_sem); | |
249 | } | |
250 | ||
251 | /* | |
252 | * Lock/unlock flush: prevent concurrent executions | |
253 | * of dmz_flush_metadata as well as metadata modification in reclaim | |
254 | * while flush is being executed. | |
255 | */ | |
256 | void dmz_lock_flush(struct dmz_metadata *zmd) | |
257 | { | |
258 | mutex_lock(&zmd->mblk_flush_lock); | |
259 | } | |
260 | ||
261 | void dmz_unlock_flush(struct dmz_metadata *zmd) | |
262 | { | |
263 | mutex_unlock(&zmd->mblk_flush_lock); | |
264 | } | |
265 | ||
266 | /* | |
267 | * Allocate a metadata block. | |
268 | */ | |
269 | static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, | |
270 | sector_t mblk_no) | |
271 | { | |
272 | struct dmz_mblock *mblk = NULL; | |
273 | ||
274 | /* See if we can reuse cached blocks */ | |
275 | if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) { | |
276 | spin_lock(&zmd->mblk_lock); | |
277 | mblk = list_first_entry_or_null(&zmd->mblk_lru_list, | |
278 | struct dmz_mblock, link); | |
279 | if (mblk) { | |
280 | list_del_init(&mblk->link); | |
281 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | |
282 | mblk->no = mblk_no; | |
283 | } | |
284 | spin_unlock(&zmd->mblk_lock); | |
285 | if (mblk) | |
286 | return mblk; | |
287 | } | |
288 | ||
289 | /* Allocate a new block */ | |
290 | mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO); | |
291 | if (!mblk) | |
292 | return NULL; | |
293 | ||
294 | mblk->page = alloc_page(GFP_NOIO); | |
295 | if (!mblk->page) { | |
296 | kfree(mblk); | |
297 | return NULL; | |
298 | } | |
299 | ||
300 | RB_CLEAR_NODE(&mblk->node); | |
301 | INIT_LIST_HEAD(&mblk->link); | |
33c2865f | 302 | mblk->ref = 0; |
3b1a94c8 DLM |
303 | mblk->state = 0; |
304 | mblk->no = mblk_no; | |
305 | mblk->data = page_address(mblk->page); | |
306 | ||
307 | atomic_inc(&zmd->nr_mblks); | |
308 | ||
309 | return mblk; | |
310 | } | |
311 | ||
312 | /* | |
313 | * Free a metadata block. | |
314 | */ | |
315 | static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) | |
316 | { | |
317 | __free_pages(mblk->page, 0); | |
318 | kfree(mblk); | |
319 | ||
320 | atomic_dec(&zmd->nr_mblks); | |
321 | } | |
322 | ||
323 | /* | |
324 | * Insert a metadata block in the rbtree. | |
325 | */ | |
326 | static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) | |
327 | { | |
328 | struct rb_root *root = &zmd->mblk_rbtree; | |
329 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
330 | struct dmz_mblock *b; | |
331 | ||
332 | /* Figure out where to put the new node */ | |
333 | while (*new) { | |
334 | b = container_of(*new, struct dmz_mblock, node); | |
335 | parent = *new; | |
336 | new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right); | |
337 | } | |
338 | ||
339 | /* Add new node and rebalance tree */ | |
340 | rb_link_node(&mblk->node, parent, new); | |
341 | rb_insert_color(&mblk->node, root); | |
342 | } | |
343 | ||
344 | /* | |
3d4e7383 DLM |
345 | * Lookup a metadata block in the rbtree. If the block is found, increment |
346 | * its reference count. | |
3b1a94c8 | 347 | */ |
3d4e7383 DLM |
348 | static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd, |
349 | sector_t mblk_no) | |
3b1a94c8 DLM |
350 | { |
351 | struct rb_root *root = &zmd->mblk_rbtree; | |
352 | struct rb_node *node = root->rb_node; | |
353 | struct dmz_mblock *mblk; | |
354 | ||
355 | while (node) { | |
356 | mblk = container_of(node, struct dmz_mblock, node); | |
3d4e7383 DLM |
357 | if (mblk->no == mblk_no) { |
358 | /* | |
359 | * If this is the first reference to the block, | |
360 | * remove it from the LRU list. | |
361 | */ | |
362 | mblk->ref++; | |
363 | if (mblk->ref == 1 && | |
364 | !test_bit(DMZ_META_DIRTY, &mblk->state)) | |
365 | list_del_init(&mblk->link); | |
3b1a94c8 | 366 | return mblk; |
3d4e7383 | 367 | } |
3b1a94c8 DLM |
368 | node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; |
369 | } | |
370 | ||
371 | return NULL; | |
372 | } | |
373 | ||
374 | /* | |
375 | * Metadata block BIO end callback. | |
376 | */ | |
377 | static void dmz_mblock_bio_end_io(struct bio *bio) | |
378 | { | |
379 | struct dmz_mblock *mblk = bio->bi_private; | |
380 | int flag; | |
381 | ||
382 | if (bio->bi_status) | |
383 | set_bit(DMZ_META_ERROR, &mblk->state); | |
384 | ||
385 | if (bio_op(bio) == REQ_OP_WRITE) | |
386 | flag = DMZ_META_WRITING; | |
387 | else | |
388 | flag = DMZ_META_READING; | |
389 | ||
390 | clear_bit_unlock(flag, &mblk->state); | |
391 | smp_mb__after_atomic(); | |
392 | wake_up_bit(&mblk->state, flag); | |
393 | ||
394 | bio_put(bio); | |
395 | } | |
396 | ||
397 | /* | |
3d4e7383 | 398 | * Read an uncached metadata block from disk and add it to the cache. |
3b1a94c8 | 399 | */ |
3d4e7383 DLM |
400 | static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, |
401 | sector_t mblk_no) | |
3b1a94c8 | 402 | { |
3d4e7383 | 403 | struct dmz_mblock *mblk, *m; |
3b1a94c8 DLM |
404 | sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; |
405 | struct bio *bio; | |
406 | ||
75d66ffb DF |
407 | if (dmz_bdev_is_dying(zmd->dev)) |
408 | return ERR_PTR(-EIO); | |
409 | ||
3d4e7383 | 410 | /* Get a new block and a BIO to read it */ |
3b1a94c8 DLM |
411 | mblk = dmz_alloc_mblock(zmd, mblk_no); |
412 | if (!mblk) | |
75d66ffb | 413 | return ERR_PTR(-ENOMEM); |
3b1a94c8 | 414 | |
3b1a94c8 DLM |
415 | bio = bio_alloc(GFP_NOIO, 1); |
416 | if (!bio) { | |
417 | dmz_free_mblock(zmd, mblk); | |
75d66ffb | 418 | return ERR_PTR(-ENOMEM); |
3b1a94c8 DLM |
419 | } |
420 | ||
3d4e7383 DLM |
421 | spin_lock(&zmd->mblk_lock); |
422 | ||
423 | /* | |
424 | * Make sure that another context did not start reading | |
425 | * the block already. | |
426 | */ | |
427 | m = dmz_get_mblock_fast(zmd, mblk_no); | |
428 | if (m) { | |
429 | spin_unlock(&zmd->mblk_lock); | |
430 | dmz_free_mblock(zmd, mblk); | |
431 | bio_put(bio); | |
432 | return m; | |
433 | } | |
434 | ||
435 | mblk->ref++; | |
436 | set_bit(DMZ_META_READING, &mblk->state); | |
437 | dmz_insert_mblock(zmd, mblk); | |
438 | ||
439 | spin_unlock(&zmd->mblk_lock); | |
440 | ||
441 | /* Submit read BIO */ | |
3b1a94c8 | 442 | bio->bi_iter.bi_sector = dmz_blk2sect(block); |
74d46992 | 443 | bio_set_dev(bio, zmd->dev->bdev); |
3b1a94c8 DLM |
444 | bio->bi_private = mblk; |
445 | bio->bi_end_io = dmz_mblock_bio_end_io; | |
446 | bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); | |
447 | bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); | |
448 | submit_bio(bio); | |
449 | ||
450 | return mblk; | |
451 | } | |
452 | ||
453 | /* | |
454 | * Free metadata blocks. | |
455 | */ | |
456 | static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd, | |
457 | unsigned long limit) | |
458 | { | |
459 | struct dmz_mblock *mblk; | |
460 | unsigned long count = 0; | |
461 | ||
462 | if (!zmd->max_nr_mblks) | |
463 | return 0; | |
464 | ||
465 | while (!list_empty(&zmd->mblk_lru_list) && | |
466 | atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks && | |
467 | count < limit) { | |
468 | mblk = list_first_entry(&zmd->mblk_lru_list, | |
469 | struct dmz_mblock, link); | |
470 | list_del_init(&mblk->link); | |
471 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | |
472 | dmz_free_mblock(zmd, mblk); | |
473 | count++; | |
474 | } | |
475 | ||
476 | return count; | |
477 | } | |
478 | ||
479 | /* | |
480 | * For mblock shrinker: get the number of unused metadata blocks in the cache. | |
481 | */ | |
482 | static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink, | |
483 | struct shrink_control *sc) | |
484 | { | |
485 | struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker); | |
486 | ||
487 | return atomic_read(&zmd->nr_mblks); | |
488 | } | |
489 | ||
490 | /* | |
491 | * For mblock shrinker: scan unused metadata blocks and shrink the cache. | |
492 | */ | |
493 | static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink, | |
494 | struct shrink_control *sc) | |
495 | { | |
496 | struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker); | |
497 | unsigned long count; | |
498 | ||
499 | spin_lock(&zmd->mblk_lock); | |
500 | count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan); | |
501 | spin_unlock(&zmd->mblk_lock); | |
502 | ||
503 | return count ? count : SHRINK_STOP; | |
504 | } | |
505 | ||
506 | /* | |
507 | * Release a metadata block. | |
508 | */ | |
509 | static void dmz_release_mblock(struct dmz_metadata *zmd, | |
510 | struct dmz_mblock *mblk) | |
511 | { | |
512 | ||
513 | if (!mblk) | |
514 | return; | |
515 | ||
516 | spin_lock(&zmd->mblk_lock); | |
517 | ||
33c2865f DLM |
518 | mblk->ref--; |
519 | if (mblk->ref == 0) { | |
3b1a94c8 DLM |
520 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { |
521 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | |
522 | dmz_free_mblock(zmd, mblk); | |
523 | } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) { | |
524 | list_add_tail(&mblk->link, &zmd->mblk_lru_list); | |
525 | dmz_shrink_mblock_cache(zmd, 1); | |
526 | } | |
527 | } | |
528 | ||
529 | spin_unlock(&zmd->mblk_lock); | |
530 | } | |
531 | ||
532 | /* | |
533 | * Get a metadata block from the rbtree. If the block | |
534 | * is not present, read it from disk. | |
535 | */ | |
536 | static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, | |
537 | sector_t mblk_no) | |
538 | { | |
539 | struct dmz_mblock *mblk; | |
540 | ||
541 | /* Check rbtree */ | |
542 | spin_lock(&zmd->mblk_lock); | |
3d4e7383 | 543 | mblk = dmz_get_mblock_fast(zmd, mblk_no); |
3b1a94c8 DLM |
544 | spin_unlock(&zmd->mblk_lock); |
545 | ||
546 | if (!mblk) { | |
547 | /* Cache miss: read the block from disk */ | |
3d4e7383 | 548 | mblk = dmz_get_mblock_slow(zmd, mblk_no); |
75d66ffb DF |
549 | if (IS_ERR(mblk)) |
550 | return mblk; | |
3b1a94c8 DLM |
551 | } |
552 | ||
553 | /* Wait for on-going read I/O and check for error */ | |
554 | wait_on_bit_io(&mblk->state, DMZ_META_READING, | |
555 | TASK_UNINTERRUPTIBLE); | |
556 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { | |
557 | dmz_release_mblock(zmd, mblk); | |
e7fad909 | 558 | dmz_check_bdev(zmd->dev); |
3b1a94c8 DLM |
559 | return ERR_PTR(-EIO); |
560 | } | |
561 | ||
562 | return mblk; | |
563 | } | |
564 | ||
565 | /* | |
566 | * Mark a metadata block dirty. | |
567 | */ | |
568 | static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) | |
569 | { | |
570 | spin_lock(&zmd->mblk_lock); | |
571 | if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state)) | |
572 | list_add_tail(&mblk->link, &zmd->mblk_dirty_list); | |
573 | spin_unlock(&zmd->mblk_lock); | |
574 | } | |
575 | ||
576 | /* | |
577 | * Issue a metadata block write BIO. | |
578 | */ | |
75d66ffb DF |
579 | static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, |
580 | unsigned int set) | |
3b1a94c8 DLM |
581 | { |
582 | sector_t block = zmd->sb[set].block + mblk->no; | |
583 | struct bio *bio; | |
584 | ||
75d66ffb DF |
585 | if (dmz_bdev_is_dying(zmd->dev)) |
586 | return -EIO; | |
587 | ||
3b1a94c8 DLM |
588 | bio = bio_alloc(GFP_NOIO, 1); |
589 | if (!bio) { | |
590 | set_bit(DMZ_META_ERROR, &mblk->state); | |
75d66ffb | 591 | return -ENOMEM; |
3b1a94c8 DLM |
592 | } |
593 | ||
594 | set_bit(DMZ_META_WRITING, &mblk->state); | |
595 | ||
596 | bio->bi_iter.bi_sector = dmz_blk2sect(block); | |
74d46992 | 597 | bio_set_dev(bio, zmd->dev->bdev); |
3b1a94c8 DLM |
598 | bio->bi_private = mblk; |
599 | bio->bi_end_io = dmz_mblock_bio_end_io; | |
600 | bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); | |
601 | bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); | |
602 | submit_bio(bio); | |
75d66ffb DF |
603 | |
604 | return 0; | |
3b1a94c8 DLM |
605 | } |
606 | ||
607 | /* | |
608 | * Read/write a metadata block. | |
609 | */ | |
610 | static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block, | |
611 | struct page *page) | |
612 | { | |
613 | struct bio *bio; | |
614 | int ret; | |
615 | ||
75d66ffb DF |
616 | if (dmz_bdev_is_dying(zmd->dev)) |
617 | return -EIO; | |
618 | ||
3b1a94c8 DLM |
619 | bio = bio_alloc(GFP_NOIO, 1); |
620 | if (!bio) | |
621 | return -ENOMEM; | |
622 | ||
623 | bio->bi_iter.bi_sector = dmz_blk2sect(block); | |
74d46992 | 624 | bio_set_dev(bio, zmd->dev->bdev); |
3b1a94c8 DLM |
625 | bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); |
626 | bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); | |
627 | ret = submit_bio_wait(bio); | |
628 | bio_put(bio); | |
629 | ||
e7fad909 DF |
630 | if (ret) |
631 | dmz_check_bdev(zmd->dev); | |
3b1a94c8 DLM |
632 | return ret; |
633 | } | |
634 | ||
635 | /* | |
636 | * Write super block of the specified metadata set. | |
637 | */ | |
638 | static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) | |
639 | { | |
640 | sector_t block = zmd->sb[set].block; | |
641 | struct dmz_mblock *mblk = zmd->sb[set].mblk; | |
642 | struct dmz_super *sb = zmd->sb[set].sb; | |
643 | u64 sb_gen = zmd->sb_gen + 1; | |
644 | int ret; | |
645 | ||
646 | sb->magic = cpu_to_le32(DMZ_MAGIC); | |
647 | sb->version = cpu_to_le32(DMZ_META_VER); | |
648 | ||
649 | sb->gen = cpu_to_le64(sb_gen); | |
650 | ||
651 | sb->sb_block = cpu_to_le64(block); | |
652 | sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks); | |
653 | sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq); | |
654 | sb->nr_chunks = cpu_to_le32(zmd->nr_chunks); | |
655 | ||
656 | sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks); | |
657 | sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks); | |
658 | ||
659 | sb->crc = 0; | |
660 | sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE)); | |
661 | ||
662 | ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); | |
663 | if (ret == 0) | |
4218a955 | 664 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); |
3b1a94c8 DLM |
665 | |
666 | return ret; | |
667 | } | |
668 | ||
669 | /* | |
670 | * Write dirty metadata blocks to the specified set. | |
671 | */ | |
672 | static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, | |
673 | struct list_head *write_list, | |
674 | unsigned int set) | |
675 | { | |
676 | struct dmz_mblock *mblk; | |
677 | struct blk_plug plug; | |
75d66ffb | 678 | int ret = 0, nr_mblks_submitted = 0; |
3b1a94c8 DLM |
679 | |
680 | /* Issue writes */ | |
681 | blk_start_plug(&plug); | |
75d66ffb DF |
682 | list_for_each_entry(mblk, write_list, link) { |
683 | ret = dmz_write_mblock(zmd, mblk, set); | |
684 | if (ret) | |
685 | break; | |
686 | nr_mblks_submitted++; | |
687 | } | |
3b1a94c8 DLM |
688 | blk_finish_plug(&plug); |
689 | ||
690 | /* Wait for completion */ | |
691 | list_for_each_entry(mblk, write_list, link) { | |
75d66ffb DF |
692 | if (!nr_mblks_submitted) |
693 | break; | |
3b1a94c8 DLM |
694 | wait_on_bit_io(&mblk->state, DMZ_META_WRITING, |
695 | TASK_UNINTERRUPTIBLE); | |
696 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { | |
697 | clear_bit(DMZ_META_ERROR, &mblk->state); | |
e7fad909 | 698 | dmz_check_bdev(zmd->dev); |
3b1a94c8 DLM |
699 | ret = -EIO; |
700 | } | |
75d66ffb | 701 | nr_mblks_submitted--; |
3b1a94c8 DLM |
702 | } |
703 | ||
704 | /* Flush drive cache (this will also sync data) */ | |
705 | if (ret == 0) | |
4218a955 | 706 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); |
3b1a94c8 DLM |
707 | |
708 | return ret; | |
709 | } | |
710 | ||
711 | /* | |
712 | * Log dirty metadata blocks. | |
713 | */ | |
714 | static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd, | |
715 | struct list_head *write_list) | |
716 | { | |
717 | unsigned int log_set = zmd->mblk_primary ^ 0x1; | |
718 | int ret; | |
719 | ||
720 | /* Write dirty blocks to the log */ | |
721 | ret = dmz_write_dirty_mblocks(zmd, write_list, log_set); | |
722 | if (ret) | |
723 | return ret; | |
724 | ||
725 | /* | |
726 | * No error so far: now validate the log by updating the | |
727 | * log index super block generation. | |
728 | */ | |
729 | ret = dmz_write_sb(zmd, log_set); | |
730 | if (ret) | |
731 | return ret; | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
736 | /* | |
737 | * Flush dirty metadata blocks. | |
738 | */ | |
739 | int dmz_flush_metadata(struct dmz_metadata *zmd) | |
740 | { | |
741 | struct dmz_mblock *mblk; | |
742 | struct list_head write_list; | |
743 | int ret; | |
744 | ||
745 | if (WARN_ON(!zmd)) | |
746 | return 0; | |
747 | ||
748 | INIT_LIST_HEAD(&write_list); | |
749 | ||
750 | /* | |
751 | * Make sure that metadata blocks are stable before logging: take | |
752 | * the write lock on the metadata semaphore to prevent target BIOs | |
753 | * from modifying metadata. | |
754 | */ | |
755 | down_write(&zmd->mblk_sem); | |
756 | ||
757 | /* | |
758 | * This is called from the target flush work and reclaim work. | |
759 | * Concurrent execution is not allowed. | |
760 | */ | |
761 | dmz_lock_flush(zmd); | |
762 | ||
75d66ffb DF |
763 | if (dmz_bdev_is_dying(zmd->dev)) { |
764 | ret = -EIO; | |
765 | goto out; | |
766 | } | |
767 | ||
3b1a94c8 DLM |
768 | /* Get dirty blocks */ |
769 | spin_lock(&zmd->mblk_lock); | |
770 | list_splice_init(&zmd->mblk_dirty_list, &write_list); | |
771 | spin_unlock(&zmd->mblk_lock); | |
772 | ||
773 | /* If there are no dirty metadata blocks, just flush the device cache */ | |
774 | if (list_empty(&write_list)) { | |
4218a955 | 775 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); |
e7fad909 | 776 | goto err; |
3b1a94c8 DLM |
777 | } |
778 | ||
779 | /* | |
780 | * The primary metadata set is still clean. Keep it this way until | |
781 | * all updates are successful in the secondary set. That is, use | |
782 | * the secondary set as a log. | |
783 | */ | |
784 | ret = dmz_log_dirty_mblocks(zmd, &write_list); | |
785 | if (ret) | |
e7fad909 | 786 | goto err; |
3b1a94c8 DLM |
787 | |
788 | /* | |
789 | * The log is on disk. It is now safe to update in place | |
790 | * in the primary metadata set. | |
791 | */ | |
792 | ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary); | |
793 | if (ret) | |
e7fad909 | 794 | goto err; |
3b1a94c8 DLM |
795 | |
796 | ret = dmz_write_sb(zmd, zmd->mblk_primary); | |
797 | if (ret) | |
e7fad909 | 798 | goto err; |
3b1a94c8 DLM |
799 | |
800 | while (!list_empty(&write_list)) { | |
801 | mblk = list_first_entry(&write_list, struct dmz_mblock, link); | |
802 | list_del_init(&mblk->link); | |
803 | ||
804 | spin_lock(&zmd->mblk_lock); | |
805 | clear_bit(DMZ_META_DIRTY, &mblk->state); | |
33c2865f | 806 | if (mblk->ref == 0) |
3b1a94c8 DLM |
807 | list_add_tail(&mblk->link, &zmd->mblk_lru_list); |
808 | spin_unlock(&zmd->mblk_lock); | |
809 | } | |
810 | ||
811 | zmd->sb_gen++; | |
812 | out: | |
3b1a94c8 DLM |
813 | dmz_unlock_flush(zmd); |
814 | up_write(&zmd->mblk_sem); | |
815 | ||
816 | return ret; | |
e7fad909 DF |
817 | |
818 | err: | |
819 | if (!list_empty(&write_list)) { | |
820 | spin_lock(&zmd->mblk_lock); | |
821 | list_splice(&write_list, &zmd->mblk_dirty_list); | |
822 | spin_unlock(&zmd->mblk_lock); | |
823 | } | |
824 | if (!dmz_check_bdev(zmd->dev)) | |
825 | ret = -EIO; | |
826 | goto out; | |
3b1a94c8 DLM |
827 | } |
828 | ||
829 | /* | |
830 | * Check super block. | |
831 | */ | |
832 | static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb) | |
833 | { | |
834 | unsigned int nr_meta_zones, nr_data_zones; | |
835 | struct dmz_dev *dev = zmd->dev; | |
836 | u32 crc, stored_crc; | |
837 | u64 gen; | |
838 | ||
839 | gen = le64_to_cpu(sb->gen); | |
840 | stored_crc = le32_to_cpu(sb->crc); | |
841 | sb->crc = 0; | |
842 | crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE); | |
843 | if (crc != stored_crc) { | |
844 | dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)", | |
845 | crc, stored_crc); | |
846 | return -ENXIO; | |
847 | } | |
848 | ||
849 | if (le32_to_cpu(sb->magic) != DMZ_MAGIC) { | |
850 | dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)", | |
851 | DMZ_MAGIC, le32_to_cpu(sb->magic)); | |
852 | return -ENXIO; | |
853 | } | |
854 | ||
855 | if (le32_to_cpu(sb->version) != DMZ_META_VER) { | |
856 | dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)", | |
857 | DMZ_META_VER, le32_to_cpu(sb->version)); | |
858 | return -ENXIO; | |
859 | } | |
860 | ||
861 | nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1) | |
862 | >> dev->zone_nr_blocks_shift; | |
863 | if (!nr_meta_zones || | |
864 | nr_meta_zones >= zmd->nr_rnd_zones) { | |
865 | dmz_dev_err(dev, "Invalid number of metadata blocks"); | |
866 | return -ENXIO; | |
867 | } | |
868 | ||
869 | if (!le32_to_cpu(sb->nr_reserved_seq) || | |
870 | le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) { | |
871 | dmz_dev_err(dev, "Invalid number of reserved sequential zones"); | |
872 | return -ENXIO; | |
873 | } | |
874 | ||
875 | nr_data_zones = zmd->nr_useable_zones - | |
876 | (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq)); | |
877 | if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) { | |
878 | dmz_dev_err(dev, "Invalid number of chunks %u / %u", | |
879 | le32_to_cpu(sb->nr_chunks), nr_data_zones); | |
880 | return -ENXIO; | |
881 | } | |
882 | ||
883 | /* OK */ | |
884 | zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks); | |
885 | zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq); | |
886 | zmd->nr_chunks = le32_to_cpu(sb->nr_chunks); | |
887 | zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks); | |
888 | zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks); | |
889 | zmd->nr_meta_zones = nr_meta_zones; | |
890 | zmd->nr_data_zones = nr_data_zones; | |
891 | ||
892 | return 0; | |
893 | } | |
894 | ||
895 | /* | |
896 | * Read the first or second super block from disk. | |
897 | */ | |
898 | static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set) | |
899 | { | |
900 | return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block, | |
901 | zmd->sb[set].mblk->page); | |
902 | } | |
903 | ||
904 | /* | |
905 | * Determine the position of the secondary super blocks on disk. | |
906 | * This is used only if a corruption of the primary super block | |
907 | * is detected. | |
908 | */ | |
909 | static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd) | |
910 | { | |
911 | unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks; | |
912 | struct dmz_mblock *mblk; | |
913 | int i; | |
914 | ||
915 | /* Allocate a block */ | |
916 | mblk = dmz_alloc_mblock(zmd, 0); | |
917 | if (!mblk) | |
918 | return -ENOMEM; | |
919 | ||
920 | zmd->sb[1].mblk = mblk; | |
921 | zmd->sb[1].sb = mblk->data; | |
922 | ||
923 | /* Bad first super block: search for the second one */ | |
924 | zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks; | |
925 | for (i = 0; i < zmd->nr_rnd_zones - 1; i++) { | |
926 | if (dmz_read_sb(zmd, 1) != 0) | |
927 | break; | |
928 | if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC) | |
929 | return 0; | |
930 | zmd->sb[1].block += zone_nr_blocks; | |
931 | } | |
932 | ||
933 | dmz_free_mblock(zmd, mblk); | |
934 | zmd->sb[1].mblk = NULL; | |
935 | ||
936 | return -EIO; | |
937 | } | |
938 | ||
939 | /* | |
940 | * Read the first or second super block from disk. | |
941 | */ | |
942 | static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set) | |
943 | { | |
944 | struct dmz_mblock *mblk; | |
945 | int ret; | |
946 | ||
947 | /* Allocate a block */ | |
948 | mblk = dmz_alloc_mblock(zmd, 0); | |
949 | if (!mblk) | |
950 | return -ENOMEM; | |
951 | ||
952 | zmd->sb[set].mblk = mblk; | |
953 | zmd->sb[set].sb = mblk->data; | |
954 | ||
955 | /* Read super block */ | |
956 | ret = dmz_read_sb(zmd, set); | |
957 | if (ret) { | |
958 | dmz_free_mblock(zmd, mblk); | |
959 | zmd->sb[set].mblk = NULL; | |
960 | return ret; | |
961 | } | |
962 | ||
963 | return 0; | |
964 | } | |
965 | ||
966 | /* | |
967 | * Recover a metadata set. | |
968 | */ | |
969 | static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) | |
970 | { | |
971 | unsigned int src_set = dst_set ^ 0x1; | |
972 | struct page *page; | |
973 | int i, ret; | |
974 | ||
975 | dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set); | |
976 | ||
977 | if (dst_set == 0) | |
978 | zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone); | |
979 | else { | |
980 | zmd->sb[1].block = zmd->sb[0].block + | |
981 | (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); | |
982 | } | |
983 | ||
4218a955 | 984 | page = alloc_page(GFP_NOIO); |
3b1a94c8 DLM |
985 | if (!page) |
986 | return -ENOMEM; | |
987 | ||
988 | /* Copy metadata blocks */ | |
989 | for (i = 1; i < zmd->nr_meta_blocks; i++) { | |
990 | ret = dmz_rdwr_block(zmd, REQ_OP_READ, | |
991 | zmd->sb[src_set].block + i, page); | |
992 | if (ret) | |
993 | goto out; | |
994 | ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, | |
995 | zmd->sb[dst_set].block + i, page); | |
996 | if (ret) | |
997 | goto out; | |
998 | } | |
999 | ||
1000 | /* Finalize with the super block */ | |
1001 | if (!zmd->sb[dst_set].mblk) { | |
1002 | zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0); | |
1003 | if (!zmd->sb[dst_set].mblk) { | |
1004 | ret = -ENOMEM; | |
1005 | goto out; | |
1006 | } | |
1007 | zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data; | |
1008 | } | |
1009 | ||
1010 | ret = dmz_write_sb(zmd, dst_set); | |
1011 | out: | |
1012 | __free_pages(page, 0); | |
1013 | ||
1014 | return ret; | |
1015 | } | |
1016 | ||
1017 | /* | |
1018 | * Get super block from disk. | |
1019 | */ | |
1020 | static int dmz_load_sb(struct dmz_metadata *zmd) | |
1021 | { | |
1022 | bool sb_good[2] = {false, false}; | |
1023 | u64 sb_gen[2] = {0, 0}; | |
1024 | int ret; | |
1025 | ||
1026 | /* Read and check the primary super block */ | |
1027 | zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone); | |
1028 | ret = dmz_get_sb(zmd, 0); | |
1029 | if (ret) { | |
1030 | dmz_dev_err(zmd->dev, "Read primary super block failed"); | |
1031 | return ret; | |
1032 | } | |
1033 | ||
1034 | ret = dmz_check_sb(zmd, zmd->sb[0].sb); | |
1035 | ||
1036 | /* Read and check secondary super block */ | |
1037 | if (ret == 0) { | |
1038 | sb_good[0] = true; | |
1039 | zmd->sb[1].block = zmd->sb[0].block + | |
1040 | (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); | |
1041 | ret = dmz_get_sb(zmd, 1); | |
1042 | } else | |
1043 | ret = dmz_lookup_secondary_sb(zmd); | |
1044 | ||
1045 | if (ret) { | |
1046 | dmz_dev_err(zmd->dev, "Read secondary super block failed"); | |
1047 | return ret; | |
1048 | } | |
1049 | ||
1050 | ret = dmz_check_sb(zmd, zmd->sb[1].sb); | |
1051 | if (ret == 0) | |
1052 | sb_good[1] = true; | |
1053 | ||
1054 | /* Use highest generation sb first */ | |
1055 | if (!sb_good[0] && !sb_good[1]) { | |
1056 | dmz_dev_err(zmd->dev, "No valid super block found"); | |
1057 | return -EIO; | |
1058 | } | |
1059 | ||
1060 | if (sb_good[0]) | |
1061 | sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen); | |
1062 | else | |
1063 | ret = dmz_recover_mblocks(zmd, 0); | |
1064 | ||
1065 | if (sb_good[1]) | |
1066 | sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen); | |
1067 | else | |
1068 | ret = dmz_recover_mblocks(zmd, 1); | |
1069 | ||
1070 | if (ret) { | |
1071 | dmz_dev_err(zmd->dev, "Recovery failed"); | |
1072 | return -EIO; | |
1073 | } | |
1074 | ||
1075 | if (sb_gen[0] >= sb_gen[1]) { | |
1076 | zmd->sb_gen = sb_gen[0]; | |
1077 | zmd->mblk_primary = 0; | |
1078 | } else { | |
1079 | zmd->sb_gen = sb_gen[1]; | |
1080 | zmd->mblk_primary = 1; | |
1081 | } | |
1082 | ||
1083 | dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)", | |
1084 | zmd->mblk_primary, zmd->sb_gen); | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
1089 | /* | |
1090 | * Initialize a zone descriptor. | |
1091 | */ | |
d4100351 | 1092 | static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data) |
3b1a94c8 | 1093 | { |
d4100351 CH |
1094 | struct dmz_metadata *zmd = data; |
1095 | struct dm_zone *zone = &zmd->zones[idx]; | |
3b1a94c8 DLM |
1096 | struct dmz_dev *dev = zmd->dev; |
1097 | ||
1098 | /* Ignore the eventual last runt (smaller) zone */ | |
1099 | if (blkz->len != dev->zone_nr_sectors) { | |
1100 | if (blkz->start + blkz->len == dev->capacity) | |
1101 | return 0; | |
1102 | return -ENXIO; | |
1103 | } | |
1104 | ||
1105 | INIT_LIST_HEAD(&zone->link); | |
1106 | atomic_set(&zone->refcount, 0); | |
1107 | zone->chunk = DMZ_MAP_UNMAPPED; | |
1108 | ||
d4100351 CH |
1109 | switch (blkz->type) { |
1110 | case BLK_ZONE_TYPE_CONVENTIONAL: | |
3b1a94c8 | 1111 | set_bit(DMZ_RND, &zone->flags); |
d4100351 CH |
1112 | break; |
1113 | case BLK_ZONE_TYPE_SEQWRITE_REQ: | |
1114 | case BLK_ZONE_TYPE_SEQWRITE_PREF: | |
3b1a94c8 | 1115 | set_bit(DMZ_SEQ, &zone->flags); |
d4100351 CH |
1116 | break; |
1117 | default: | |
3b1a94c8 | 1118 | return -ENXIO; |
d4100351 | 1119 | } |
3b1a94c8 DLM |
1120 | |
1121 | if (dmz_is_rnd(zone)) | |
1122 | zone->wp_block = 0; | |
1123 | else | |
1124 | zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); | |
1125 | ||
d4100351 CH |
1126 | if (blkz->cond == BLK_ZONE_COND_OFFLINE) |
1127 | set_bit(DMZ_OFFLINE, &zone->flags); | |
1128 | else if (blkz->cond == BLK_ZONE_COND_READONLY) | |
1129 | set_bit(DMZ_READ_ONLY, &zone->flags); | |
1130 | else { | |
3b1a94c8 DLM |
1131 | zmd->nr_useable_zones++; |
1132 | if (dmz_is_rnd(zone)) { | |
1133 | zmd->nr_rnd_zones++; | |
1134 | if (!zmd->sb_zone) { | |
1135 | /* Super block zone */ | |
1136 | zmd->sb_zone = zone; | |
1137 | } | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | return 0; | |
1142 | } | |
1143 | ||
1144 | /* | |
1145 | * Free zones descriptors. | |
1146 | */ | |
1147 | static void dmz_drop_zones(struct dmz_metadata *zmd) | |
1148 | { | |
1149 | kfree(zmd->zones); | |
1150 | zmd->zones = NULL; | |
1151 | } | |
1152 | ||
3b1a94c8 DLM |
1153 | /* |
1154 | * Allocate and initialize zone descriptors using the zone | |
1155 | * information from disk. | |
1156 | */ | |
1157 | static int dmz_init_zones(struct dmz_metadata *zmd) | |
1158 | { | |
1159 | struct dmz_dev *dev = zmd->dev; | |
d4100351 | 1160 | int ret; |
3b1a94c8 DLM |
1161 | |
1162 | /* Init */ | |
1163 | zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3; | |
b3996295 DF |
1164 | zmd->zone_nr_bitmap_blocks = |
1165 | max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT); | |
1166 | zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks, | |
1167 | DMZ_BLOCK_SIZE_BITS); | |
3b1a94c8 DLM |
1168 | |
1169 | /* Allocate zone array */ | |
1170 | zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL); | |
1171 | if (!zmd->zones) | |
1172 | return -ENOMEM; | |
1173 | ||
1174 | dmz_dev_info(dev, "Using %zu B for zone information", | |
1175 | sizeof(struct dm_zone) * dev->nr_zones); | |
1176 | ||
3b1a94c8 | 1177 | /* |
d4100351 CH |
1178 | * Get zone information and initialize zone descriptors. At the same |
1179 | * time, determine where the super block should be: first block of the | |
1180 | * first randomly writable zone. | |
3b1a94c8 | 1181 | */ |
d4100351 CH |
1182 | ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone, |
1183 | zmd); | |
1184 | if (ret < 0) { | |
1185 | dmz_drop_zones(zmd); | |
1186 | return ret; | |
1187 | } | |
3b1a94c8 | 1188 | |
d4100351 CH |
1189 | return 0; |
1190 | } | |
7aedf75f | 1191 | |
d4100351 CH |
1192 | static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx, |
1193 | void *data) | |
1194 | { | |
1195 | struct dm_zone *zone = data; | |
3b1a94c8 | 1196 | |
d4100351 CH |
1197 | clear_bit(DMZ_OFFLINE, &zone->flags); |
1198 | clear_bit(DMZ_READ_ONLY, &zone->flags); | |
1199 | if (blkz->cond == BLK_ZONE_COND_OFFLINE) | |
1200 | set_bit(DMZ_OFFLINE, &zone->flags); | |
1201 | else if (blkz->cond == BLK_ZONE_COND_READONLY) | |
1202 | set_bit(DMZ_READ_ONLY, &zone->flags); | |
3b1a94c8 | 1203 | |
d4100351 CH |
1204 | if (dmz_is_seq(zone)) |
1205 | zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); | |
1206 | else | |
1207 | zone->wp_block = 0; | |
1208 | return 0; | |
3b1a94c8 DLM |
1209 | } |
1210 | ||
1211 | /* | |
1212 | * Update a zone information. | |
1213 | */ | |
1214 | static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1215 | { | |
bd976e52 | 1216 | unsigned int noio_flag; |
3b1a94c8 DLM |
1217 | int ret; |
1218 | ||
bd976e52 DLM |
1219 | /* |
1220 | * Get zone information from disk. Since blkdev_report_zones() uses | |
1221 | * GFP_KERNEL by default for memory allocations, set the per-task | |
1222 | * PF_MEMALLOC_NOIO flag so that all allocations are done as if | |
1223 | * GFP_NOIO was specified. | |
1224 | */ | |
1225 | noio_flag = memalloc_noio_save(); | |
d4100351 CH |
1226 | ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1, |
1227 | dmz_update_zone_cb, zone); | |
bd976e52 | 1228 | memalloc_noio_restore(noio_flag); |
d4100351 CH |
1229 | |
1230 | if (ret == 0) | |
7aedf75f | 1231 | ret = -EIO; |
d4100351 | 1232 | if (ret < 0) { |
3b1a94c8 DLM |
1233 | dmz_dev_err(zmd->dev, "Get zone %u report failed", |
1234 | dmz_id(zmd, zone)); | |
e7fad909 | 1235 | dmz_check_bdev(zmd->dev); |
3b1a94c8 DLM |
1236 | return ret; |
1237 | } | |
1238 | ||
3b1a94c8 DLM |
1239 | return 0; |
1240 | } | |
1241 | ||
1242 | /* | |
1243 | * Check a zone write pointer position when the zone is marked | |
1244 | * with the sequential write error flag. | |
1245 | */ | |
1246 | static int dmz_handle_seq_write_err(struct dmz_metadata *zmd, | |
1247 | struct dm_zone *zone) | |
1248 | { | |
1249 | unsigned int wp = 0; | |
1250 | int ret; | |
1251 | ||
1252 | wp = zone->wp_block; | |
1253 | ret = dmz_update_zone(zmd, zone); | |
1254 | if (ret) | |
1255 | return ret; | |
1256 | ||
1257 | dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)", | |
1258 | dmz_id(zmd, zone), zone->wp_block, wp); | |
1259 | ||
1260 | if (zone->wp_block < wp) { | |
1261 | dmz_invalidate_blocks(zmd, zone, zone->wp_block, | |
1262 | wp - zone->wp_block); | |
1263 | } | |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
1268 | static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id) | |
1269 | { | |
1270 | return &zmd->zones[zone_id]; | |
1271 | } | |
1272 | ||
1273 | /* | |
1274 | * Reset a zone write pointer. | |
1275 | */ | |
1276 | static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1277 | { | |
1278 | int ret; | |
1279 | ||
1280 | /* | |
1281 | * Ignore offline zones, read only zones, | |
1282 | * and conventional zones. | |
1283 | */ | |
1284 | if (dmz_is_offline(zone) || | |
1285 | dmz_is_readonly(zone) || | |
1286 | dmz_is_rnd(zone)) | |
1287 | return 0; | |
1288 | ||
1289 | if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) { | |
1290 | struct dmz_dev *dev = zmd->dev; | |
1291 | ||
6c1b1da5 AJ |
1292 | ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET, |
1293 | dmz_start_sect(zmd, zone), | |
1294 | dev->zone_nr_sectors, GFP_NOIO); | |
3b1a94c8 DLM |
1295 | if (ret) { |
1296 | dmz_dev_err(dev, "Reset zone %u failed %d", | |
1297 | dmz_id(zmd, zone), ret); | |
1298 | return ret; | |
1299 | } | |
1300 | } | |
1301 | ||
1302 | /* Clear write error bit and rewind write pointer position */ | |
1303 | clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags); | |
1304 | zone->wp_block = 0; | |
1305 | ||
1306 | return 0; | |
1307 | } | |
1308 | ||
1309 | static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone); | |
1310 | ||
1311 | /* | |
1312 | * Initialize chunk mapping. | |
1313 | */ | |
1314 | static int dmz_load_mapping(struct dmz_metadata *zmd) | |
1315 | { | |
1316 | struct dmz_dev *dev = zmd->dev; | |
1317 | struct dm_zone *dzone, *bzone; | |
1318 | struct dmz_mblock *dmap_mblk = NULL; | |
1319 | struct dmz_map *dmap; | |
1320 | unsigned int i = 0, e = 0, chunk = 0; | |
1321 | unsigned int dzone_id; | |
1322 | unsigned int bzone_id; | |
1323 | ||
1324 | /* Metadata block array for the chunk mapping table */ | |
1325 | zmd->map_mblk = kcalloc(zmd->nr_map_blocks, | |
1326 | sizeof(struct dmz_mblk *), GFP_KERNEL); | |
1327 | if (!zmd->map_mblk) | |
1328 | return -ENOMEM; | |
1329 | ||
1330 | /* Get chunk mapping table blocks and initialize zone mapping */ | |
1331 | while (chunk < zmd->nr_chunks) { | |
1332 | if (!dmap_mblk) { | |
1333 | /* Get mapping block */ | |
1334 | dmap_mblk = dmz_get_mblock(zmd, i + 1); | |
1335 | if (IS_ERR(dmap_mblk)) | |
1336 | return PTR_ERR(dmap_mblk); | |
1337 | zmd->map_mblk[i] = dmap_mblk; | |
1338 | dmap = (struct dmz_map *) dmap_mblk->data; | |
1339 | i++; | |
1340 | e = 0; | |
1341 | } | |
1342 | ||
1343 | /* Check data zone */ | |
1344 | dzone_id = le32_to_cpu(dmap[e].dzone_id); | |
1345 | if (dzone_id == DMZ_MAP_UNMAPPED) | |
1346 | goto next; | |
1347 | ||
1348 | if (dzone_id >= dev->nr_zones) { | |
1349 | dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u", | |
1350 | chunk, dzone_id); | |
1351 | return -EIO; | |
1352 | } | |
1353 | ||
1354 | dzone = dmz_get(zmd, dzone_id); | |
1355 | set_bit(DMZ_DATA, &dzone->flags); | |
1356 | dzone->chunk = chunk; | |
1357 | dmz_get_zone_weight(zmd, dzone); | |
1358 | ||
1359 | if (dmz_is_rnd(dzone)) | |
1360 | list_add_tail(&dzone->link, &zmd->map_rnd_list); | |
1361 | else | |
1362 | list_add_tail(&dzone->link, &zmd->map_seq_list); | |
1363 | ||
1364 | /* Check buffer zone */ | |
1365 | bzone_id = le32_to_cpu(dmap[e].bzone_id); | |
1366 | if (bzone_id == DMZ_MAP_UNMAPPED) | |
1367 | goto next; | |
1368 | ||
1369 | if (bzone_id >= dev->nr_zones) { | |
1370 | dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u", | |
1371 | chunk, bzone_id); | |
1372 | return -EIO; | |
1373 | } | |
1374 | ||
1375 | bzone = dmz_get(zmd, bzone_id); | |
1376 | if (!dmz_is_rnd(bzone)) { | |
1377 | dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u", | |
1378 | chunk, bzone_id); | |
1379 | return -EIO; | |
1380 | } | |
1381 | ||
1382 | set_bit(DMZ_DATA, &bzone->flags); | |
1383 | set_bit(DMZ_BUF, &bzone->flags); | |
1384 | bzone->chunk = chunk; | |
1385 | bzone->bzone = dzone; | |
1386 | dzone->bzone = bzone; | |
1387 | dmz_get_zone_weight(zmd, bzone); | |
1388 | list_add_tail(&bzone->link, &zmd->map_rnd_list); | |
1389 | next: | |
1390 | chunk++; | |
1391 | e++; | |
1392 | if (e >= DMZ_MAP_ENTRIES) | |
1393 | dmap_mblk = NULL; | |
1394 | } | |
1395 | ||
1396 | /* | |
1397 | * At this point, only meta zones and mapped data zones were | |
1398 | * fully initialized. All remaining zones are unmapped data | |
1399 | * zones. Finish initializing those here. | |
1400 | */ | |
1401 | for (i = 0; i < dev->nr_zones; i++) { | |
1402 | dzone = dmz_get(zmd, i); | |
1403 | if (dmz_is_meta(dzone)) | |
1404 | continue; | |
1405 | ||
1406 | if (dmz_is_rnd(dzone)) | |
1407 | zmd->nr_rnd++; | |
1408 | else | |
1409 | zmd->nr_seq++; | |
1410 | ||
1411 | if (dmz_is_data(dzone)) { | |
1412 | /* Already initialized */ | |
1413 | continue; | |
1414 | } | |
1415 | ||
1416 | /* Unmapped data zone */ | |
1417 | set_bit(DMZ_DATA, &dzone->flags); | |
1418 | dzone->chunk = DMZ_MAP_UNMAPPED; | |
1419 | if (dmz_is_rnd(dzone)) { | |
1420 | list_add_tail(&dzone->link, &zmd->unmap_rnd_list); | |
1421 | atomic_inc(&zmd->unmap_nr_rnd); | |
1422 | } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) { | |
1423 | list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list); | |
1424 | atomic_inc(&zmd->nr_reserved_seq_zones); | |
1425 | zmd->nr_seq--; | |
1426 | } else { | |
1427 | list_add_tail(&dzone->link, &zmd->unmap_seq_list); | |
1428 | atomic_inc(&zmd->unmap_nr_seq); | |
1429 | } | |
1430 | } | |
1431 | ||
1432 | return 0; | |
1433 | } | |
1434 | ||
1435 | /* | |
1436 | * Set a data chunk mapping. | |
1437 | */ | |
1438 | static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, | |
1439 | unsigned int dzone_id, unsigned int bzone_id) | |
1440 | { | |
1441 | struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; | |
1442 | struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; | |
1443 | int map_idx = chunk & DMZ_MAP_ENTRIES_MASK; | |
1444 | ||
1445 | dmap[map_idx].dzone_id = cpu_to_le32(dzone_id); | |
1446 | dmap[map_idx].bzone_id = cpu_to_le32(bzone_id); | |
1447 | dmz_dirty_mblock(zmd, dmap_mblk); | |
1448 | } | |
1449 | ||
1450 | /* | |
1451 | * The list of mapped zones is maintained in LRU order. | |
1452 | * This rotates a zone at the end of its map list. | |
1453 | */ | |
1454 | static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1455 | { | |
1456 | if (list_empty(&zone->link)) | |
1457 | return; | |
1458 | ||
1459 | list_del_init(&zone->link); | |
1460 | if (dmz_is_seq(zone)) { | |
1461 | /* LRU rotate sequential zone */ | |
1462 | list_add_tail(&zone->link, &zmd->map_seq_list); | |
1463 | } else { | |
1464 | /* LRU rotate random zone */ | |
1465 | list_add_tail(&zone->link, &zmd->map_rnd_list); | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | /* | |
1470 | * The list of mapped random zones is maintained | |
1471 | * in LRU order. This rotates a zone at the end of the list. | |
1472 | */ | |
1473 | static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1474 | { | |
1475 | __dmz_lru_zone(zmd, zone); | |
1476 | if (zone->bzone) | |
1477 | __dmz_lru_zone(zmd, zone->bzone); | |
1478 | } | |
1479 | ||
1480 | /* | |
1481 | * Wait for any zone to be freed. | |
1482 | */ | |
1483 | static void dmz_wait_for_free_zones(struct dmz_metadata *zmd) | |
1484 | { | |
1485 | DEFINE_WAIT(wait); | |
1486 | ||
1487 | prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE); | |
1488 | dmz_unlock_map(zmd); | |
1489 | dmz_unlock_metadata(zmd); | |
1490 | ||
1491 | io_schedule_timeout(HZ); | |
1492 | ||
1493 | dmz_lock_metadata(zmd); | |
1494 | dmz_lock_map(zmd); | |
1495 | finish_wait(&zmd->free_wq, &wait); | |
1496 | } | |
1497 | ||
1498 | /* | |
1499 | * Lock a zone for reclaim (set the zone RECLAIM bit). | |
1500 | * Returns false if the zone cannot be locked or if it is already locked | |
1501 | * and 1 otherwise. | |
1502 | */ | |
1503 | int dmz_lock_zone_reclaim(struct dm_zone *zone) | |
1504 | { | |
1505 | /* Active zones cannot be reclaimed */ | |
1506 | if (dmz_is_active(zone)) | |
1507 | return 0; | |
1508 | ||
1509 | return !test_and_set_bit(DMZ_RECLAIM, &zone->flags); | |
1510 | } | |
1511 | ||
1512 | /* | |
1513 | * Clear a zone reclaim flag. | |
1514 | */ | |
1515 | void dmz_unlock_zone_reclaim(struct dm_zone *zone) | |
1516 | { | |
1517 | WARN_ON(dmz_is_active(zone)); | |
1518 | WARN_ON(!dmz_in_reclaim(zone)); | |
1519 | ||
1520 | clear_bit_unlock(DMZ_RECLAIM, &zone->flags); | |
1521 | smp_mb__after_atomic(); | |
1522 | wake_up_bit(&zone->flags, DMZ_RECLAIM); | |
1523 | } | |
1524 | ||
1525 | /* | |
1526 | * Wait for a zone reclaim to complete. | |
1527 | */ | |
1528 | static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1529 | { | |
1530 | dmz_unlock_map(zmd); | |
1531 | dmz_unlock_metadata(zmd); | |
1532 | wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ); | |
1533 | dmz_lock_metadata(zmd); | |
1534 | dmz_lock_map(zmd); | |
1535 | } | |
1536 | ||
1537 | /* | |
1538 | * Select a random write zone for reclaim. | |
1539 | */ | |
1540 | static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd) | |
1541 | { | |
1542 | struct dm_zone *dzone = NULL; | |
1543 | struct dm_zone *zone; | |
1544 | ||
1545 | if (list_empty(&zmd->map_rnd_list)) | |
b234c6d7 | 1546 | return ERR_PTR(-EBUSY); |
3b1a94c8 DLM |
1547 | |
1548 | list_for_each_entry(zone, &zmd->map_rnd_list, link) { | |
1549 | if (dmz_is_buf(zone)) | |
1550 | dzone = zone->bzone; | |
1551 | else | |
1552 | dzone = zone; | |
1553 | if (dmz_lock_zone_reclaim(dzone)) | |
1554 | return dzone; | |
1555 | } | |
1556 | ||
b234c6d7 | 1557 | return ERR_PTR(-EBUSY); |
3b1a94c8 DLM |
1558 | } |
1559 | ||
1560 | /* | |
1561 | * Select a buffered sequential zone for reclaim. | |
1562 | */ | |
1563 | static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd) | |
1564 | { | |
1565 | struct dm_zone *zone; | |
1566 | ||
1567 | if (list_empty(&zmd->map_seq_list)) | |
e0702d90 | 1568 | return ERR_PTR(-EBUSY); |
3b1a94c8 DLM |
1569 | |
1570 | list_for_each_entry(zone, &zmd->map_seq_list, link) { | |
1571 | if (!zone->bzone) | |
1572 | continue; | |
1573 | if (dmz_lock_zone_reclaim(zone)) | |
1574 | return zone; | |
1575 | } | |
1576 | ||
e0702d90 | 1577 | return ERR_PTR(-EBUSY); |
3b1a94c8 DLM |
1578 | } |
1579 | ||
1580 | /* | |
1581 | * Select a zone for reclaim. | |
1582 | */ | |
1583 | struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd) | |
1584 | { | |
1585 | struct dm_zone *zone; | |
1586 | ||
1587 | /* | |
1588 | * Search for a zone candidate to reclaim: 2 cases are possible. | |
1589 | * (1) There is no free sequential zones. Then a random data zone | |
1590 | * cannot be reclaimed. So choose a sequential zone to reclaim so | |
1591 | * that afterward a random zone can be reclaimed. | |
1592 | * (2) At least one free sequential zone is available, then choose | |
1593 | * the oldest random zone (data or buffer) that can be locked. | |
1594 | */ | |
1595 | dmz_lock_map(zmd); | |
1596 | if (list_empty(&zmd->reserved_seq_zones_list)) | |
1597 | zone = dmz_get_seq_zone_for_reclaim(zmd); | |
1598 | else | |
1599 | zone = dmz_get_rnd_zone_for_reclaim(zmd); | |
1600 | dmz_unlock_map(zmd); | |
1601 | ||
1602 | return zone; | |
1603 | } | |
1604 | ||
3b1a94c8 DLM |
1605 | /* |
1606 | * Get the zone mapping a chunk, if the chunk is mapped already. | |
1607 | * If no mapping exist and the operation is WRITE, a zone is | |
1608 | * allocated and used to map the chunk. | |
1609 | * The zone returned will be set to the active state. | |
1610 | */ | |
1611 | struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op) | |
1612 | { | |
1613 | struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; | |
1614 | struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data; | |
1615 | int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK; | |
1616 | unsigned int dzone_id; | |
1617 | struct dm_zone *dzone = NULL; | |
1618 | int ret = 0; | |
1619 | ||
1620 | dmz_lock_map(zmd); | |
1621 | again: | |
1622 | /* Get the chunk mapping */ | |
1623 | dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id); | |
1624 | if (dzone_id == DMZ_MAP_UNMAPPED) { | |
1625 | /* | |
1626 | * Read or discard in unmapped chunks are fine. But for | |
1627 | * writes, we need a mapping, so get one. | |
1628 | */ | |
1629 | if (op != REQ_OP_WRITE) | |
1630 | goto out; | |
1631 | ||
ad1bd578 | 1632 | /* Allocate a random zone */ |
3b1a94c8 DLM |
1633 | dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); |
1634 | if (!dzone) { | |
75d66ffb DF |
1635 | if (dmz_bdev_is_dying(zmd->dev)) { |
1636 | dzone = ERR_PTR(-EIO); | |
1637 | goto out; | |
1638 | } | |
3b1a94c8 DLM |
1639 | dmz_wait_for_free_zones(zmd); |
1640 | goto again; | |
1641 | } | |
1642 | ||
1643 | dmz_map_zone(zmd, dzone, chunk); | |
1644 | ||
1645 | } else { | |
1646 | /* The chunk is already mapped: get the mapping zone */ | |
1647 | dzone = dmz_get(zmd, dzone_id); | |
1648 | if (dzone->chunk != chunk) { | |
1649 | dzone = ERR_PTR(-EIO); | |
1650 | goto out; | |
1651 | } | |
1652 | ||
1653 | /* Repair write pointer if the sequential dzone has error */ | |
1654 | if (dmz_seq_write_err(dzone)) { | |
1655 | ret = dmz_handle_seq_write_err(zmd, dzone); | |
1656 | if (ret) { | |
1657 | dzone = ERR_PTR(-EIO); | |
1658 | goto out; | |
1659 | } | |
1660 | clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags); | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | /* | |
1665 | * If the zone is being reclaimed, the chunk mapping may change | |
1666 | * to a different zone. So wait for reclaim and retry. Otherwise, | |
1667 | * activate the zone (this will prevent reclaim from touching it). | |
1668 | */ | |
1669 | if (dmz_in_reclaim(dzone)) { | |
1670 | dmz_wait_for_reclaim(zmd, dzone); | |
1671 | goto again; | |
1672 | } | |
1673 | dmz_activate_zone(dzone); | |
1674 | dmz_lru_zone(zmd, dzone); | |
1675 | out: | |
1676 | dmz_unlock_map(zmd); | |
1677 | ||
1678 | return dzone; | |
1679 | } | |
1680 | ||
1681 | /* | |
1682 | * Write and discard change the block validity of data zones and their buffer | |
1683 | * zones. Check here that valid blocks are still present. If all blocks are | |
1684 | * invalid, the zones can be unmapped on the fly without waiting for reclaim | |
1685 | * to do it. | |
1686 | */ | |
1687 | void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone) | |
1688 | { | |
1689 | struct dm_zone *bzone; | |
1690 | ||
1691 | dmz_lock_map(zmd); | |
1692 | ||
1693 | bzone = dzone->bzone; | |
1694 | if (bzone) { | |
1695 | if (dmz_weight(bzone)) | |
1696 | dmz_lru_zone(zmd, bzone); | |
1697 | else { | |
1698 | /* Empty buffer zone: reclaim it */ | |
1699 | dmz_unmap_zone(zmd, bzone); | |
1700 | dmz_free_zone(zmd, bzone); | |
1701 | bzone = NULL; | |
1702 | } | |
1703 | } | |
1704 | ||
1705 | /* Deactivate the data zone */ | |
1706 | dmz_deactivate_zone(dzone); | |
1707 | if (dmz_is_active(dzone) || bzone || dmz_weight(dzone)) | |
1708 | dmz_lru_zone(zmd, dzone); | |
1709 | else { | |
1710 | /* Unbuffered inactive empty data zone: reclaim it */ | |
1711 | dmz_unmap_zone(zmd, dzone); | |
1712 | dmz_free_zone(zmd, dzone); | |
1713 | } | |
1714 | ||
1715 | dmz_unlock_map(zmd); | |
1716 | } | |
1717 | ||
1718 | /* | |
1719 | * Allocate and map a random zone to buffer a chunk | |
1720 | * already mapped to a sequential zone. | |
1721 | */ | |
1722 | struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, | |
1723 | struct dm_zone *dzone) | |
1724 | { | |
1725 | struct dm_zone *bzone; | |
1726 | ||
1727 | dmz_lock_map(zmd); | |
1728 | again: | |
1729 | bzone = dzone->bzone; | |
1730 | if (bzone) | |
1731 | goto out; | |
1732 | ||
ad1bd578 | 1733 | /* Allocate a random zone */ |
3b1a94c8 DLM |
1734 | bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); |
1735 | if (!bzone) { | |
75d66ffb DF |
1736 | if (dmz_bdev_is_dying(zmd->dev)) { |
1737 | bzone = ERR_PTR(-EIO); | |
1738 | goto out; | |
1739 | } | |
3b1a94c8 DLM |
1740 | dmz_wait_for_free_zones(zmd); |
1741 | goto again; | |
1742 | } | |
1743 | ||
1744 | /* Update the chunk mapping */ | |
1745 | dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone), | |
1746 | dmz_id(zmd, bzone)); | |
1747 | ||
1748 | set_bit(DMZ_BUF, &bzone->flags); | |
1749 | bzone->chunk = dzone->chunk; | |
1750 | bzone->bzone = dzone; | |
1751 | dzone->bzone = bzone; | |
1752 | list_add_tail(&bzone->link, &zmd->map_rnd_list); | |
1753 | out: | |
1754 | dmz_unlock_map(zmd); | |
1755 | ||
1756 | return bzone; | |
1757 | } | |
1758 | ||
1759 | /* | |
1760 | * Get an unmapped (free) zone. | |
1761 | * This must be called with the mapping lock held. | |
1762 | */ | |
1763 | struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags) | |
1764 | { | |
1765 | struct list_head *list; | |
1766 | struct dm_zone *zone; | |
1767 | ||
1768 | if (flags & DMZ_ALLOC_RND) | |
1769 | list = &zmd->unmap_rnd_list; | |
1770 | else | |
1771 | list = &zmd->unmap_seq_list; | |
1772 | again: | |
1773 | if (list_empty(list)) { | |
1774 | /* | |
1775 | * No free zone: if this is for reclaim, allow using the | |
1776 | * reserved sequential zones. | |
1777 | */ | |
1778 | if (!(flags & DMZ_ALLOC_RECLAIM) || | |
1779 | list_empty(&zmd->reserved_seq_zones_list)) | |
1780 | return NULL; | |
1781 | ||
1782 | zone = list_first_entry(&zmd->reserved_seq_zones_list, | |
1783 | struct dm_zone, link); | |
1784 | list_del_init(&zone->link); | |
1785 | atomic_dec(&zmd->nr_reserved_seq_zones); | |
1786 | return zone; | |
1787 | } | |
1788 | ||
1789 | zone = list_first_entry(list, struct dm_zone, link); | |
1790 | list_del_init(&zone->link); | |
1791 | ||
1792 | if (dmz_is_rnd(zone)) | |
1793 | atomic_dec(&zmd->unmap_nr_rnd); | |
1794 | else | |
1795 | atomic_dec(&zmd->unmap_nr_seq); | |
1796 | ||
1797 | if (dmz_is_offline(zone)) { | |
1798 | dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone)); | |
1799 | zone = NULL; | |
1800 | goto again; | |
1801 | } | |
1802 | ||
1803 | return zone; | |
1804 | } | |
1805 | ||
1806 | /* | |
1807 | * Free a zone. | |
1808 | * This must be called with the mapping lock held. | |
1809 | */ | |
1810 | void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1811 | { | |
1812 | /* If this is a sequential zone, reset it */ | |
1813 | if (dmz_is_seq(zone)) | |
1814 | dmz_reset_zone(zmd, zone); | |
1815 | ||
1816 | /* Return the zone to its type unmap list */ | |
1817 | if (dmz_is_rnd(zone)) { | |
1818 | list_add_tail(&zone->link, &zmd->unmap_rnd_list); | |
1819 | atomic_inc(&zmd->unmap_nr_rnd); | |
1820 | } else if (atomic_read(&zmd->nr_reserved_seq_zones) < | |
1821 | zmd->nr_reserved_seq) { | |
1822 | list_add_tail(&zone->link, &zmd->reserved_seq_zones_list); | |
1823 | atomic_inc(&zmd->nr_reserved_seq_zones); | |
1824 | } else { | |
1825 | list_add_tail(&zone->link, &zmd->unmap_seq_list); | |
1826 | atomic_inc(&zmd->unmap_nr_seq); | |
1827 | } | |
1828 | ||
1829 | wake_up_all(&zmd->free_wq); | |
1830 | } | |
1831 | ||
1832 | /* | |
1833 | * Map a chunk to a zone. | |
1834 | * This must be called with the mapping lock held. | |
1835 | */ | |
1836 | void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone, | |
1837 | unsigned int chunk) | |
1838 | { | |
1839 | /* Set the chunk mapping */ | |
1840 | dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone), | |
1841 | DMZ_MAP_UNMAPPED); | |
1842 | dzone->chunk = chunk; | |
1843 | if (dmz_is_rnd(dzone)) | |
1844 | list_add_tail(&dzone->link, &zmd->map_rnd_list); | |
1845 | else | |
1846 | list_add_tail(&dzone->link, &zmd->map_seq_list); | |
1847 | } | |
1848 | ||
1849 | /* | |
1850 | * Unmap a zone. | |
1851 | * This must be called with the mapping lock held. | |
1852 | */ | |
1853 | void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |
1854 | { | |
1855 | unsigned int chunk = zone->chunk; | |
1856 | unsigned int dzone_id; | |
1857 | ||
1858 | if (chunk == DMZ_MAP_UNMAPPED) { | |
1859 | /* Already unmapped */ | |
1860 | return; | |
1861 | } | |
1862 | ||
1863 | if (test_and_clear_bit(DMZ_BUF, &zone->flags)) { | |
1864 | /* | |
1865 | * Unmapping the chunk buffer zone: clear only | |
1866 | * the chunk buffer mapping | |
1867 | */ | |
1868 | dzone_id = dmz_id(zmd, zone->bzone); | |
1869 | zone->bzone->bzone = NULL; | |
1870 | zone->bzone = NULL; | |
1871 | ||
1872 | } else { | |
1873 | /* | |
1874 | * Unmapping the chunk data zone: the zone must | |
1875 | * not be buffered. | |
1876 | */ | |
1877 | if (WARN_ON(zone->bzone)) { | |
1878 | zone->bzone->bzone = NULL; | |
1879 | zone->bzone = NULL; | |
1880 | } | |
1881 | dzone_id = DMZ_MAP_UNMAPPED; | |
1882 | } | |
1883 | ||
1884 | dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED); | |
1885 | ||
1886 | zone->chunk = DMZ_MAP_UNMAPPED; | |
1887 | list_del_init(&zone->link); | |
1888 | } | |
1889 | ||
1890 | /* | |
1891 | * Set @nr_bits bits in @bitmap starting from @bit. | |
1892 | * Return the number of bits changed from 0 to 1. | |
1893 | */ | |
1894 | static unsigned int dmz_set_bits(unsigned long *bitmap, | |
1895 | unsigned int bit, unsigned int nr_bits) | |
1896 | { | |
1897 | unsigned long *addr; | |
1898 | unsigned int end = bit + nr_bits; | |
1899 | unsigned int n = 0; | |
1900 | ||
1901 | while (bit < end) { | |
1902 | if (((bit & (BITS_PER_LONG - 1)) == 0) && | |
1903 | ((end - bit) >= BITS_PER_LONG)) { | |
1904 | /* Try to set the whole word at once */ | |
1905 | addr = bitmap + BIT_WORD(bit); | |
1906 | if (*addr == 0) { | |
1907 | *addr = ULONG_MAX; | |
1908 | n += BITS_PER_LONG; | |
1909 | bit += BITS_PER_LONG; | |
1910 | continue; | |
1911 | } | |
1912 | } | |
1913 | ||
1914 | if (!test_and_set_bit(bit, bitmap)) | |
1915 | n++; | |
1916 | bit++; | |
1917 | } | |
1918 | ||
1919 | return n; | |
1920 | } | |
1921 | ||
1922 | /* | |
1923 | * Get the bitmap block storing the bit for chunk_block in zone. | |
1924 | */ | |
1925 | static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd, | |
1926 | struct dm_zone *zone, | |
1927 | sector_t chunk_block) | |
1928 | { | |
1929 | sector_t bitmap_block = 1 + zmd->nr_map_blocks + | |
1930 | (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) + | |
1931 | (chunk_block >> DMZ_BLOCK_SHIFT_BITS); | |
1932 | ||
1933 | return dmz_get_mblock(zmd, bitmap_block); | |
1934 | } | |
1935 | ||
1936 | /* | |
1937 | * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone. | |
1938 | */ | |
1939 | int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, | |
1940 | struct dm_zone *to_zone) | |
1941 | { | |
1942 | struct dmz_mblock *from_mblk, *to_mblk; | |
1943 | sector_t chunk_block = 0; | |
1944 | ||
1945 | /* Get the zones bitmap blocks */ | |
1946 | while (chunk_block < zmd->dev->zone_nr_blocks) { | |
1947 | from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block); | |
1948 | if (IS_ERR(from_mblk)) | |
1949 | return PTR_ERR(from_mblk); | |
1950 | to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block); | |
1951 | if (IS_ERR(to_mblk)) { | |
1952 | dmz_release_mblock(zmd, from_mblk); | |
1953 | return PTR_ERR(to_mblk); | |
1954 | } | |
1955 | ||
1956 | memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE); | |
1957 | dmz_dirty_mblock(zmd, to_mblk); | |
1958 | ||
1959 | dmz_release_mblock(zmd, to_mblk); | |
1960 | dmz_release_mblock(zmd, from_mblk); | |
1961 | ||
b3996295 | 1962 | chunk_block += zmd->zone_bits_per_mblk; |
3b1a94c8 DLM |
1963 | } |
1964 | ||
1965 | to_zone->weight = from_zone->weight; | |
1966 | ||
1967 | return 0; | |
1968 | } | |
1969 | ||
1970 | /* | |
1971 | * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone, | |
1972 | * starting from chunk_block. | |
1973 | */ | |
1974 | int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, | |
1975 | struct dm_zone *to_zone, sector_t chunk_block) | |
1976 | { | |
1977 | unsigned int nr_blocks; | |
1978 | int ret; | |
1979 | ||
1980 | /* Get the zones bitmap blocks */ | |
1981 | while (chunk_block < zmd->dev->zone_nr_blocks) { | |
1982 | /* Get a valid region from the source zone */ | |
1983 | ret = dmz_first_valid_block(zmd, from_zone, &chunk_block); | |
1984 | if (ret <= 0) | |
1985 | return ret; | |
1986 | ||
1987 | nr_blocks = ret; | |
1988 | ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks); | |
1989 | if (ret) | |
1990 | return ret; | |
1991 | ||
1992 | chunk_block += nr_blocks; | |
1993 | } | |
1994 | ||
1995 | return 0; | |
1996 | } | |
1997 | ||
1998 | /* | |
1999 | * Validate all the blocks in the range [block..block+nr_blocks-1]. | |
2000 | */ | |
2001 | int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, | |
2002 | sector_t chunk_block, unsigned int nr_blocks) | |
2003 | { | |
2004 | unsigned int count, bit, nr_bits; | |
2005 | unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks; | |
2006 | struct dmz_mblock *mblk; | |
2007 | unsigned int n = 0; | |
2008 | ||
2009 | dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks", | |
2010 | dmz_id(zmd, zone), (unsigned long long)chunk_block, | |
2011 | nr_blocks); | |
2012 | ||
2013 | WARN_ON(chunk_block + nr_blocks > zone_nr_blocks); | |
2014 | ||
2015 | while (nr_blocks) { | |
2016 | /* Get bitmap block */ | |
2017 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); | |
2018 | if (IS_ERR(mblk)) | |
2019 | return PTR_ERR(mblk); | |
2020 | ||
2021 | /* Set bits */ | |
2022 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; | |
b3996295 | 2023 | nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); |
3b1a94c8 DLM |
2024 | |
2025 | count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits); | |
2026 | if (count) { | |
2027 | dmz_dirty_mblock(zmd, mblk); | |
2028 | n += count; | |
2029 | } | |
2030 | dmz_release_mblock(zmd, mblk); | |
2031 | ||
2032 | nr_blocks -= nr_bits; | |
2033 | chunk_block += nr_bits; | |
2034 | } | |
2035 | ||
2036 | if (likely(zone->weight + n <= zone_nr_blocks)) | |
2037 | zone->weight += n; | |
2038 | else { | |
2039 | dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u", | |
2040 | dmz_id(zmd, zone), zone->weight, | |
2041 | zone_nr_blocks - n); | |
2042 | zone->weight = zone_nr_blocks; | |
2043 | } | |
2044 | ||
2045 | return 0; | |
2046 | } | |
2047 | ||
2048 | /* | |
2049 | * Clear nr_bits bits in bitmap starting from bit. | |
2050 | * Return the number of bits cleared. | |
2051 | */ | |
2052 | static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits) | |
2053 | { | |
2054 | unsigned long *addr; | |
2055 | int end = bit + nr_bits; | |
2056 | int n = 0; | |
2057 | ||
2058 | while (bit < end) { | |
2059 | if (((bit & (BITS_PER_LONG - 1)) == 0) && | |
2060 | ((end - bit) >= BITS_PER_LONG)) { | |
2061 | /* Try to clear whole word at once */ | |
2062 | addr = bitmap + BIT_WORD(bit); | |
2063 | if (*addr == ULONG_MAX) { | |
2064 | *addr = 0; | |
2065 | n += BITS_PER_LONG; | |
2066 | bit += BITS_PER_LONG; | |
2067 | continue; | |
2068 | } | |
2069 | } | |
2070 | ||
2071 | if (test_and_clear_bit(bit, bitmap)) | |
2072 | n++; | |
2073 | bit++; | |
2074 | } | |
2075 | ||
2076 | return n; | |
2077 | } | |
2078 | ||
2079 | /* | |
2080 | * Invalidate all the blocks in the range [block..block+nr_blocks-1]. | |
2081 | */ | |
2082 | int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, | |
2083 | sector_t chunk_block, unsigned int nr_blocks) | |
2084 | { | |
2085 | unsigned int count, bit, nr_bits; | |
2086 | struct dmz_mblock *mblk; | |
2087 | unsigned int n = 0; | |
2088 | ||
2089 | dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks", | |
2090 | dmz_id(zmd, zone), (u64)chunk_block, nr_blocks); | |
2091 | ||
2092 | WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks); | |
2093 | ||
2094 | while (nr_blocks) { | |
2095 | /* Get bitmap block */ | |
2096 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); | |
2097 | if (IS_ERR(mblk)) | |
2098 | return PTR_ERR(mblk); | |
2099 | ||
2100 | /* Clear bits */ | |
2101 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; | |
b3996295 | 2102 | nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); |
3b1a94c8 DLM |
2103 | |
2104 | count = dmz_clear_bits((unsigned long *)mblk->data, | |
2105 | bit, nr_bits); | |
2106 | if (count) { | |
2107 | dmz_dirty_mblock(zmd, mblk); | |
2108 | n += count; | |
2109 | } | |
2110 | dmz_release_mblock(zmd, mblk); | |
2111 | ||
2112 | nr_blocks -= nr_bits; | |
2113 | chunk_block += nr_bits; | |
2114 | } | |
2115 | ||
2116 | if (zone->weight >= n) | |
2117 | zone->weight -= n; | |
2118 | else { | |
2119 | dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u", | |
2120 | dmz_id(zmd, zone), zone->weight, n); | |
2121 | zone->weight = 0; | |
2122 | } | |
2123 | ||
2124 | return 0; | |
2125 | } | |
2126 | ||
2127 | /* | |
2128 | * Get a block bit value. | |
2129 | */ | |
2130 | static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone, | |
2131 | sector_t chunk_block) | |
2132 | { | |
2133 | struct dmz_mblock *mblk; | |
2134 | int ret; | |
2135 | ||
2136 | WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks); | |
2137 | ||
2138 | /* Get bitmap block */ | |
2139 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); | |
2140 | if (IS_ERR(mblk)) | |
2141 | return PTR_ERR(mblk); | |
2142 | ||
2143 | /* Get offset */ | |
2144 | ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS, | |
2145 | (unsigned long *) mblk->data) != 0; | |
2146 | ||
2147 | dmz_release_mblock(zmd, mblk); | |
2148 | ||
2149 | return ret; | |
2150 | } | |
2151 | ||
2152 | /* | |
2153 | * Return the number of blocks from chunk_block to the first block with a bit | |
2154 | * value specified by set. Search at most nr_blocks blocks from chunk_block. | |
2155 | */ | |
2156 | static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, | |
2157 | sector_t chunk_block, unsigned int nr_blocks, | |
2158 | int set) | |
2159 | { | |
2160 | struct dmz_mblock *mblk; | |
2161 | unsigned int bit, set_bit, nr_bits; | |
b3996295 | 2162 | unsigned int zone_bits = zmd->zone_bits_per_mblk; |
3b1a94c8 DLM |
2163 | unsigned long *bitmap; |
2164 | int n = 0; | |
2165 | ||
2166 | WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks); | |
2167 | ||
2168 | while (nr_blocks) { | |
2169 | /* Get bitmap block */ | |
2170 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); | |
2171 | if (IS_ERR(mblk)) | |
2172 | return PTR_ERR(mblk); | |
2173 | ||
2174 | /* Get offset */ | |
2175 | bitmap = (unsigned long *) mblk->data; | |
2176 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; | |
b3996295 | 2177 | nr_bits = min(nr_blocks, zone_bits - bit); |
3b1a94c8 | 2178 | if (set) |
b3996295 | 2179 | set_bit = find_next_bit(bitmap, zone_bits, bit); |
3b1a94c8 | 2180 | else |
b3996295 | 2181 | set_bit = find_next_zero_bit(bitmap, zone_bits, bit); |
3b1a94c8 DLM |
2182 | dmz_release_mblock(zmd, mblk); |
2183 | ||
2184 | n += set_bit - bit; | |
b3996295 | 2185 | if (set_bit < zone_bits) |
3b1a94c8 DLM |
2186 | break; |
2187 | ||
2188 | nr_blocks -= nr_bits; | |
2189 | chunk_block += nr_bits; | |
2190 | } | |
2191 | ||
2192 | return n; | |
2193 | } | |
2194 | ||
2195 | /* | |
2196 | * Test if chunk_block is valid. If it is, the number of consecutive | |
2197 | * valid blocks from chunk_block will be returned. | |
2198 | */ | |
2199 | int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone, | |
2200 | sector_t chunk_block) | |
2201 | { | |
2202 | int valid; | |
2203 | ||
2204 | valid = dmz_test_block(zmd, zone, chunk_block); | |
2205 | if (valid <= 0) | |
2206 | return valid; | |
2207 | ||
2208 | /* The block is valid: get the number of valid blocks from block */ | |
2209 | return dmz_to_next_set_block(zmd, zone, chunk_block, | |
2210 | zmd->dev->zone_nr_blocks - chunk_block, 0); | |
2211 | } | |
2212 | ||
2213 | /* | |
2214 | * Find the first valid block from @chunk_block in @zone. | |
2215 | * If such a block is found, its number is returned using | |
2216 | * @chunk_block and the total number of valid blocks from @chunk_block | |
2217 | * is returned. | |
2218 | */ | |
2219 | int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone, | |
2220 | sector_t *chunk_block) | |
2221 | { | |
2222 | sector_t start_block = *chunk_block; | |
2223 | int ret; | |
2224 | ||
2225 | ret = dmz_to_next_set_block(zmd, zone, start_block, | |
2226 | zmd->dev->zone_nr_blocks - start_block, 1); | |
2227 | if (ret < 0) | |
2228 | return ret; | |
2229 | ||
2230 | start_block += ret; | |
2231 | *chunk_block = start_block; | |
2232 | ||
2233 | return dmz_to_next_set_block(zmd, zone, start_block, | |
2234 | zmd->dev->zone_nr_blocks - start_block, 0); | |
2235 | } | |
2236 | ||
2237 | /* | |
2238 | * Count the number of bits set starting from bit up to bit + nr_bits - 1. | |
2239 | */ | |
2240 | static int dmz_count_bits(void *bitmap, int bit, int nr_bits) | |
2241 | { | |
2242 | unsigned long *addr; | |
2243 | int end = bit + nr_bits; | |
2244 | int n = 0; | |
2245 | ||
2246 | while (bit < end) { | |
2247 | if (((bit & (BITS_PER_LONG - 1)) == 0) && | |
2248 | ((end - bit) >= BITS_PER_LONG)) { | |
2249 | addr = (unsigned long *)bitmap + BIT_WORD(bit); | |
2250 | if (*addr == ULONG_MAX) { | |
2251 | n += BITS_PER_LONG; | |
2252 | bit += BITS_PER_LONG; | |
2253 | continue; | |
2254 | } | |
2255 | } | |
2256 | ||
2257 | if (test_bit(bit, bitmap)) | |
2258 | n++; | |
2259 | bit++; | |
2260 | } | |
2261 | ||
2262 | return n; | |
2263 | } | |
2264 | ||
2265 | /* | |
2266 | * Get a zone weight. | |
2267 | */ | |
2268 | static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) | |
2269 | { | |
2270 | struct dmz_mblock *mblk; | |
2271 | sector_t chunk_block = 0; | |
2272 | unsigned int bit, nr_bits; | |
2273 | unsigned int nr_blocks = zmd->dev->zone_nr_blocks; | |
2274 | void *bitmap; | |
2275 | int n = 0; | |
2276 | ||
2277 | while (nr_blocks) { | |
2278 | /* Get bitmap block */ | |
2279 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); | |
2280 | if (IS_ERR(mblk)) { | |
2281 | n = 0; | |
2282 | break; | |
2283 | } | |
2284 | ||
2285 | /* Count bits in this block */ | |
2286 | bitmap = mblk->data; | |
2287 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; | |
b3996295 | 2288 | nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); |
3b1a94c8 DLM |
2289 | n += dmz_count_bits(bitmap, bit, nr_bits); |
2290 | ||
2291 | dmz_release_mblock(zmd, mblk); | |
2292 | ||
2293 | nr_blocks -= nr_bits; | |
2294 | chunk_block += nr_bits; | |
2295 | } | |
2296 | ||
2297 | zone->weight = n; | |
2298 | } | |
2299 | ||
2300 | /* | |
2301 | * Cleanup the zoned metadata resources. | |
2302 | */ | |
2303 | static void dmz_cleanup_metadata(struct dmz_metadata *zmd) | |
2304 | { | |
2305 | struct rb_root *root; | |
2306 | struct dmz_mblock *mblk, *next; | |
2307 | int i; | |
2308 | ||
2309 | /* Release zone mapping resources */ | |
2310 | if (zmd->map_mblk) { | |
2311 | for (i = 0; i < zmd->nr_map_blocks; i++) | |
2312 | dmz_release_mblock(zmd, zmd->map_mblk[i]); | |
2313 | kfree(zmd->map_mblk); | |
2314 | zmd->map_mblk = NULL; | |
2315 | } | |
2316 | ||
2317 | /* Release super blocks */ | |
2318 | for (i = 0; i < 2; i++) { | |
2319 | if (zmd->sb[i].mblk) { | |
2320 | dmz_free_mblock(zmd, zmd->sb[i].mblk); | |
2321 | zmd->sb[i].mblk = NULL; | |
2322 | } | |
2323 | } | |
2324 | ||
2325 | /* Free cached blocks */ | |
2326 | while (!list_empty(&zmd->mblk_dirty_list)) { | |
2327 | mblk = list_first_entry(&zmd->mblk_dirty_list, | |
2328 | struct dmz_mblock, link); | |
2329 | dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", | |
33c2865f | 2330 | (u64)mblk->no, mblk->ref); |
3b1a94c8 DLM |
2331 | list_del_init(&mblk->link); |
2332 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | |
2333 | dmz_free_mblock(zmd, mblk); | |
2334 | } | |
2335 | ||
2336 | while (!list_empty(&zmd->mblk_lru_list)) { | |
2337 | mblk = list_first_entry(&zmd->mblk_lru_list, | |
2338 | struct dmz_mblock, link); | |
2339 | list_del_init(&mblk->link); | |
2340 | rb_erase(&mblk->node, &zmd->mblk_rbtree); | |
2341 | dmz_free_mblock(zmd, mblk); | |
2342 | } | |
2343 | ||
2344 | /* Sanity checks: the mblock rbtree should now be empty */ | |
2345 | root = &zmd->mblk_rbtree; | |
2346 | rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { | |
2347 | dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", | |
33c2865f DLM |
2348 | (u64)mblk->no, mblk->ref); |
2349 | mblk->ref = 0; | |
3b1a94c8 DLM |
2350 | dmz_free_mblock(zmd, mblk); |
2351 | } | |
2352 | ||
2353 | /* Free the zone descriptors */ | |
2354 | dmz_drop_zones(zmd); | |
d5ffebdd MS |
2355 | |
2356 | mutex_destroy(&zmd->mblk_flush_lock); | |
2357 | mutex_destroy(&zmd->map_lock); | |
3b1a94c8 DLM |
2358 | } |
2359 | ||
2360 | /* | |
2361 | * Initialize the zoned metadata. | |
2362 | */ | |
2363 | int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata) | |
2364 | { | |
2365 | struct dmz_metadata *zmd; | |
2366 | unsigned int i, zid; | |
2367 | struct dm_zone *zone; | |
2368 | int ret; | |
2369 | ||
2370 | zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL); | |
2371 | if (!zmd) | |
2372 | return -ENOMEM; | |
2373 | ||
2374 | zmd->dev = dev; | |
2375 | zmd->mblk_rbtree = RB_ROOT; | |
2376 | init_rwsem(&zmd->mblk_sem); | |
2377 | mutex_init(&zmd->mblk_flush_lock); | |
2378 | spin_lock_init(&zmd->mblk_lock); | |
2379 | INIT_LIST_HEAD(&zmd->mblk_lru_list); | |
2380 | INIT_LIST_HEAD(&zmd->mblk_dirty_list); | |
2381 | ||
2382 | mutex_init(&zmd->map_lock); | |
2383 | atomic_set(&zmd->unmap_nr_rnd, 0); | |
2384 | INIT_LIST_HEAD(&zmd->unmap_rnd_list); | |
2385 | INIT_LIST_HEAD(&zmd->map_rnd_list); | |
2386 | ||
2387 | atomic_set(&zmd->unmap_nr_seq, 0); | |
2388 | INIT_LIST_HEAD(&zmd->unmap_seq_list); | |
2389 | INIT_LIST_HEAD(&zmd->map_seq_list); | |
2390 | ||
2391 | atomic_set(&zmd->nr_reserved_seq_zones, 0); | |
2392 | INIT_LIST_HEAD(&zmd->reserved_seq_zones_list); | |
2393 | ||
2394 | init_waitqueue_head(&zmd->free_wq); | |
2395 | ||
2396 | /* Initialize zone descriptors */ | |
2397 | ret = dmz_init_zones(zmd); | |
2398 | if (ret) | |
2399 | goto err; | |
2400 | ||
2401 | /* Get super block */ | |
2402 | ret = dmz_load_sb(zmd); | |
2403 | if (ret) | |
2404 | goto err; | |
2405 | ||
2406 | /* Set metadata zones starting from sb_zone */ | |
2407 | zid = dmz_id(zmd, zmd->sb_zone); | |
2408 | for (i = 0; i < zmd->nr_meta_zones << 1; i++) { | |
2409 | zone = dmz_get(zmd, zid + i); | |
2410 | if (!dmz_is_rnd(zone)) | |
2411 | goto err; | |
2412 | set_bit(DMZ_META, &zone->flags); | |
2413 | } | |
2414 | ||
2415 | /* Load mapping table */ | |
2416 | ret = dmz_load_mapping(zmd); | |
2417 | if (ret) | |
2418 | goto err; | |
2419 | ||
2420 | /* | |
2421 | * Cache size boundaries: allow at least 2 super blocks, the chunk map | |
2422 | * blocks and enough blocks to be able to cache the bitmap blocks of | |
2423 | * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow | |
2424 | * the cache to add 512 more metadata blocks. | |
2425 | */ | |
2426 | zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16; | |
2427 | zmd->max_nr_mblks = zmd->min_nr_mblks + 512; | |
2428 | zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count; | |
2429 | zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan; | |
2430 | zmd->mblk_shrinker.seeks = DEFAULT_SEEKS; | |
2431 | ||
2432 | /* Metadata cache shrinker */ | |
2433 | ret = register_shrinker(&zmd->mblk_shrinker); | |
2434 | if (ret) { | |
2435 | dmz_dev_err(dev, "Register metadata cache shrinker failed"); | |
2436 | goto err; | |
2437 | } | |
2438 | ||
2439 | dmz_dev_info(dev, "Host-%s zoned block device", | |
2440 | bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ? | |
2441 | "aware" : "managed"); | |
2442 | dmz_dev_info(dev, " %llu 512-byte logical sectors", | |
2443 | (u64)dev->capacity); | |
2444 | dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors", | |
2445 | dev->nr_zones, (u64)dev->zone_nr_sectors); | |
2446 | dmz_dev_info(dev, " %u metadata zones", | |
2447 | zmd->nr_meta_zones * 2); | |
2448 | dmz_dev_info(dev, " %u data zones for %u chunks", | |
2449 | zmd->nr_data_zones, zmd->nr_chunks); | |
2450 | dmz_dev_info(dev, " %u random zones (%u unmapped)", | |
2451 | zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd)); | |
2452 | dmz_dev_info(dev, " %u sequential zones (%u unmapped)", | |
2453 | zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq)); | |
2454 | dmz_dev_info(dev, " %u reserved sequential data zones", | |
2455 | zmd->nr_reserved_seq); | |
2456 | ||
2457 | dmz_dev_debug(dev, "Format:"); | |
2458 | dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)", | |
2459 | zmd->nr_meta_blocks, zmd->max_nr_mblks); | |
2460 | dmz_dev_debug(dev, " %u data zone mapping blocks", | |
2461 | zmd->nr_map_blocks); | |
2462 | dmz_dev_debug(dev, " %u bitmap blocks", | |
2463 | zmd->nr_bitmap_blocks); | |
2464 | ||
2465 | *metadata = zmd; | |
2466 | ||
2467 | return 0; | |
2468 | err: | |
2469 | dmz_cleanup_metadata(zmd); | |
2470 | kfree(zmd); | |
2471 | *metadata = NULL; | |
2472 | ||
2473 | return ret; | |
2474 | } | |
2475 | ||
2476 | /* | |
2477 | * Cleanup the zoned metadata resources. | |
2478 | */ | |
2479 | void dmz_dtr_metadata(struct dmz_metadata *zmd) | |
2480 | { | |
2481 | unregister_shrinker(&zmd->mblk_shrinker); | |
2482 | dmz_cleanup_metadata(zmd); | |
2483 | kfree(zmd); | |
2484 | } | |
2485 | ||
2486 | /* | |
2487 | * Check zone information on resume. | |
2488 | */ | |
2489 | int dmz_resume_metadata(struct dmz_metadata *zmd) | |
2490 | { | |
2491 | struct dmz_dev *dev = zmd->dev; | |
2492 | struct dm_zone *zone; | |
2493 | sector_t wp_block; | |
2494 | unsigned int i; | |
2495 | int ret; | |
2496 | ||
2497 | /* Check zones */ | |
2498 | for (i = 0; i < dev->nr_zones; i++) { | |
2499 | zone = dmz_get(zmd, i); | |
2500 | if (!zone) { | |
2501 | dmz_dev_err(dev, "Unable to get zone %u", i); | |
2502 | return -EIO; | |
2503 | } | |
2504 | ||
2505 | wp_block = zone->wp_block; | |
2506 | ||
2507 | ret = dmz_update_zone(zmd, zone); | |
2508 | if (ret) { | |
2509 | dmz_dev_err(dev, "Broken zone %u", i); | |
2510 | return ret; | |
2511 | } | |
2512 | ||
2513 | if (dmz_is_offline(zone)) { | |
2514 | dmz_dev_warn(dev, "Zone %u is offline", i); | |
2515 | continue; | |
2516 | } | |
2517 | ||
2518 | /* Check write pointer */ | |
2519 | if (!dmz_is_seq(zone)) | |
2520 | zone->wp_block = 0; | |
2521 | else if (zone->wp_block != wp_block) { | |
2522 | dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)", | |
2523 | i, (u64)zone->wp_block, (u64)wp_block); | |
2524 | zone->wp_block = wp_block; | |
2525 | dmz_invalidate_blocks(zmd, zone, zone->wp_block, | |
2526 | dev->zone_nr_blocks - zone->wp_block); | |
2527 | } | |
2528 | } | |
2529 | ||
2530 | return 0; | |
2531 | } |