]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/md/dm-zoned-reclaim.c
dm zoned: introduce dmz_metadata_label() to format device name
[mirror_ubuntu-hirsute-kernel.git] / drivers / md / dm-zoned-reclaim.c
CommitLineData
bae9a0aa 1// SPDX-License-Identifier: GPL-2.0-only
3b1a94c8
DLM
2/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-zoned.h"
9
10#include <linux/module.h>
11
12#define DM_MSG_PREFIX "zoned reclaim"
13
14struct dmz_reclaim {
15 struct dmz_metadata *metadata;
16 struct dmz_dev *dev;
17
18 struct delayed_work work;
19 struct workqueue_struct *wq;
20
21 struct dm_kcopyd_client *kc;
22 struct dm_kcopyd_throttle kc_throttle;
23 int kc_err;
24
25 unsigned long flags;
26
27 /* Last target access time */
28 unsigned long atime;
29};
30
31/*
32 * Reclaim state flags.
33 */
34enum {
35 DMZ_RECLAIM_KCOPY,
36};
37
38/*
39 * Number of seconds of target BIO inactivity to consider the target idle.
40 */
75d66ffb 41#define DMZ_IDLE_PERIOD (10UL * HZ)
3b1a94c8
DLM
42
43/*
44 * Percentage of unmapped (free) random zones below which reclaim starts
45 * even if the target is busy.
46 */
47#define DMZ_RECLAIM_LOW_UNMAP_RND 30
48
49/*
50 * Percentage of unmapped (free) random zones above which reclaim will
51 * stop if the target is busy.
52 */
53#define DMZ_RECLAIM_HIGH_UNMAP_RND 50
54
55/*
56 * Align a sequential zone write pointer to chunk_block.
57 */
58static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
59 sector_t block)
60{
61 struct dmz_metadata *zmd = zrc->metadata;
62 sector_t wp_block = zone->wp_block;
63 unsigned int nr_blocks;
64 int ret;
65
66 if (wp_block == block)
67 return 0;
68
69 if (wp_block > block)
70 return -EIO;
71
72 /*
73 * Zeroout the space between the write
74 * pointer and the requested position.
75 */
76 nr_blocks = block - wp_block;
77 ret = blkdev_issue_zeroout(zrc->dev->bdev,
78 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
4218a955 79 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
3b1a94c8
DLM
80 if (ret) {
81 dmz_dev_err(zrc->dev,
82 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
b7122873 83 zone->id, (unsigned long long)wp_block,
3b1a94c8 84 (unsigned long long)block, nr_blocks, ret);
e7fad909 85 dmz_check_bdev(zrc->dev);
3b1a94c8
DLM
86 return ret;
87 }
88
89 zone->wp_block = block;
90
91 return 0;
92}
93
94/*
95 * dm_kcopyd_copy end notification.
96 */
97static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
98 void *context)
99{
100 struct dmz_reclaim *zrc = context;
101
102 if (read_err || write_err)
103 zrc->kc_err = -EIO;
104 else
105 zrc->kc_err = 0;
106
107 clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
108 smp_mb__after_atomic();
109 wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
110}
111
112/*
113 * Copy valid blocks of src_zone into dst_zone.
114 */
115static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
116 struct dm_zone *src_zone, struct dm_zone *dst_zone)
117{
118 struct dmz_metadata *zmd = zrc->metadata;
119 struct dmz_dev *dev = zrc->dev;
120 struct dm_io_region src, dst;
121 sector_t block = 0, end_block;
122 sector_t nr_blocks;
123 sector_t src_zone_block;
124 sector_t dst_zone_block;
125 unsigned long flags = 0;
126 int ret;
127
128 if (dmz_is_seq(src_zone))
129 end_block = src_zone->wp_block;
130 else
36820560 131 end_block = dmz_zone_nr_blocks(zmd);
3b1a94c8
DLM
132 src_zone_block = dmz_start_block(zmd, src_zone);
133 dst_zone_block = dmz_start_block(zmd, dst_zone);
134
135 if (dmz_is_seq(dst_zone))
136 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
137
138 while (block < end_block) {
75d66ffb
DF
139 if (dev->flags & DMZ_BDEV_DYING)
140 return -EIO;
141
3b1a94c8
DLM
142 /* Get a valid region from the source zone */
143 ret = dmz_first_valid_block(zmd, src_zone, &block);
144 if (ret <= 0)
145 return ret;
146 nr_blocks = ret;
147
148 /*
149 * If we are writing in a sequential zone, we must make sure
150 * that writes are sequential. So Zeroout any eventual hole
151 * between writes.
152 */
153 if (dmz_is_seq(dst_zone)) {
154 ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
155 if (ret)
156 return ret;
157 }
158
159 src.bdev = dev->bdev;
160 src.sector = dmz_blk2sect(src_zone_block + block);
161 src.count = dmz_blk2sect(nr_blocks);
162
163 dst.bdev = dev->bdev;
164 dst.sector = dmz_blk2sect(dst_zone_block + block);
165 dst.count = src.count;
166
167 /* Copy the valid region */
168 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
7209049d
MS
169 dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
170 dmz_reclaim_kcopy_end, zrc);
3b1a94c8
DLM
171
172 /* Wait for copy to complete */
173 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
174 TASK_UNINTERRUPTIBLE);
175 if (zrc->kc_err)
176 return zrc->kc_err;
177
178 block += nr_blocks;
179 if (dmz_is_seq(dst_zone))
180 dst_zone->wp_block = block;
181 }
182
183 return 0;
184}
185
186/*
187 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
188 * and free the buffer zone.
189 */
190static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
191{
192 struct dm_zone *bzone = dzone->bzone;
193 sector_t chunk_block = dzone->wp_block;
194 struct dmz_metadata *zmd = zrc->metadata;
195 int ret;
196
197 dmz_dev_debug(zrc->dev,
198 "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
b7122873
HR
199 dzone->chunk, bzone->id, dmz_weight(bzone),
200 dzone->id, dmz_weight(dzone));
3b1a94c8
DLM
201
202 /* Flush data zone into the buffer zone */
203 ret = dmz_reclaim_copy(zrc, bzone, dzone);
204 if (ret < 0)
205 return ret;
206
207 dmz_lock_flush(zmd);
208
209 /* Validate copied blocks */
210 ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
211 if (ret == 0) {
212 /* Free the buffer zone */
36820560 213 dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
3b1a94c8
DLM
214 dmz_lock_map(zmd);
215 dmz_unmap_zone(zmd, bzone);
216 dmz_unlock_zone_reclaim(dzone);
217 dmz_free_zone(zmd, bzone);
218 dmz_unlock_map(zmd);
219 }
220
221 dmz_unlock_flush(zmd);
222
b234c6d7 223 return ret;
3b1a94c8
DLM
224}
225
226/*
227 * Merge valid blocks of dzone into its buffer zone and free dzone.
228 */
229static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
230{
231 unsigned int chunk = dzone->chunk;
232 struct dm_zone *bzone = dzone->bzone;
233 struct dmz_metadata *zmd = zrc->metadata;
234 int ret = 0;
235
236 dmz_dev_debug(zrc->dev,
237 "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
b7122873
HR
238 chunk, dzone->id, dmz_weight(dzone),
239 bzone->id, dmz_weight(bzone));
3b1a94c8
DLM
240
241 /* Flush data zone into the buffer zone */
242 ret = dmz_reclaim_copy(zrc, dzone, bzone);
243 if (ret < 0)
244 return ret;
245
246 dmz_lock_flush(zmd);
247
248 /* Validate copied blocks */
249 ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
250 if (ret == 0) {
251 /*
252 * Free the data zone and remap the chunk to
253 * the buffer zone.
254 */
36820560 255 dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
3b1a94c8
DLM
256 dmz_lock_map(zmd);
257 dmz_unmap_zone(zmd, bzone);
258 dmz_unmap_zone(zmd, dzone);
259 dmz_unlock_zone_reclaim(dzone);
260 dmz_free_zone(zmd, dzone);
261 dmz_map_zone(zmd, bzone, chunk);
262 dmz_unlock_map(zmd);
263 }
264
265 dmz_unlock_flush(zmd);
266
b234c6d7 267 return ret;
3b1a94c8
DLM
268}
269
270/*
271 * Move valid blocks of the random data zone dzone into a free sequential zone.
272 * Once blocks are moved, remap the zone chunk to the sequential zone.
273 */
274static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
275{
276 unsigned int chunk = dzone->chunk;
277 struct dm_zone *szone = NULL;
278 struct dmz_metadata *zmd = zrc->metadata;
279 int ret;
280
281 /* Get a free sequential zone */
282 dmz_lock_map(zmd);
283 szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
284 dmz_unlock_map(zmd);
285 if (!szone)
286 return -ENOSPC;
287
288 dmz_dev_debug(zrc->dev,
289 "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
b7122873 290 chunk, dzone->id, dmz_weight(dzone), szone->id);
3b1a94c8
DLM
291
292 /* Flush the random data zone into the sequential zone */
293 ret = dmz_reclaim_copy(zrc, dzone, szone);
294
295 dmz_lock_flush(zmd);
296
297 if (ret == 0) {
298 /* Validate copied blocks */
299 ret = dmz_copy_valid_blocks(zmd, dzone, szone);
300 }
301 if (ret) {
302 /* Free the sequential zone */
303 dmz_lock_map(zmd);
304 dmz_free_zone(zmd, szone);
305 dmz_unlock_map(zmd);
306 } else {
307 /* Free the data zone and remap the chunk */
36820560 308 dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
3b1a94c8
DLM
309 dmz_lock_map(zmd);
310 dmz_unmap_zone(zmd, dzone);
311 dmz_unlock_zone_reclaim(dzone);
312 dmz_free_zone(zmd, dzone);
313 dmz_map_zone(zmd, szone, chunk);
314 dmz_unlock_map(zmd);
315 }
316
317 dmz_unlock_flush(zmd);
318
b234c6d7 319 return ret;
3b1a94c8
DLM
320}
321
322/*
323 * Reclaim an empty zone.
324 */
325static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
326{
327 struct dmz_metadata *zmd = zrc->metadata;
328
329 dmz_lock_flush(zmd);
330 dmz_lock_map(zmd);
331 dmz_unmap_zone(zmd, dzone);
332 dmz_unlock_zone_reclaim(dzone);
333 dmz_free_zone(zmd, dzone);
334 dmz_unlock_map(zmd);
335 dmz_unlock_flush(zmd);
336}
337
338/*
339 * Find a candidate zone for reclaim and process it.
340 */
b234c6d7 341static int dmz_do_reclaim(struct dmz_reclaim *zrc)
3b1a94c8
DLM
342{
343 struct dmz_metadata *zmd = zrc->metadata;
344 struct dm_zone *dzone;
345 struct dm_zone *rzone;
346 unsigned long start;
347 int ret;
348
349 /* Get a data zone */
350 dzone = dmz_get_zone_for_reclaim(zmd);
b234c6d7
DF
351 if (IS_ERR(dzone))
352 return PTR_ERR(dzone);
3b1a94c8
DLM
353
354 start = jiffies;
355
356 if (dmz_is_rnd(dzone)) {
357 if (!dmz_weight(dzone)) {
358 /* Empty zone */
359 dmz_reclaim_empty(zrc, dzone);
360 ret = 0;
361 } else {
362 /*
363 * Reclaim the random data zone by moving its
364 * valid data blocks to a free sequential zone.
365 */
366 ret = dmz_reclaim_rnd_data(zrc, dzone);
367 }
368 rzone = dzone;
369
370 } else {
371 struct dm_zone *bzone = dzone->bzone;
372 sector_t chunk_block = 0;
373
374 ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
375 if (ret < 0)
376 goto out;
377
378 if (ret == 0 || chunk_block >= dzone->wp_block) {
379 /*
380 * The buffer zone is empty or its valid blocks are
381 * after the data zone write pointer.
382 */
383 ret = dmz_reclaim_buf(zrc, dzone);
384 rzone = bzone;
385 } else {
386 /*
387 * Reclaim the data zone by merging it into the
388 * buffer zone so that the buffer zone itself can
389 * be later reclaimed.
390 */
391 ret = dmz_reclaim_seq_data(zrc, dzone);
392 rzone = dzone;
393 }
394 }
395out:
396 if (ret) {
397 dmz_unlock_zone_reclaim(dzone);
b234c6d7 398 return ret;
3b1a94c8
DLM
399 }
400
b234c6d7
DF
401 ret = dmz_flush_metadata(zrc->metadata);
402 if (ret) {
403 dmz_dev_debug(zrc->dev,
404 "Metadata flush for zone %u failed, err %d\n",
b7122873 405 rzone->id, ret);
b234c6d7
DF
406 return ret;
407 }
3b1a94c8
DLM
408
409 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
b7122873 410 rzone->id, jiffies_to_msecs(jiffies - start));
b234c6d7 411 return 0;
3b1a94c8
DLM
412}
413
414/*
415 * Test if the target device is idle.
416 */
417static inline int dmz_target_idle(struct dmz_reclaim *zrc)
418{
419 return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
420}
421
422/*
423 * Test if reclaim is necessary.
424 */
425static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
426{
427 struct dmz_metadata *zmd = zrc->metadata;
428 unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
429 unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
430 unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
431
432 /* Reclaim when idle */
433 if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
434 return true;
435
436 /* If there are still plenty of random zones, do not reclaim */
437 if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
438 return false;
439
440 /*
ad1bd578 441 * If the percentage of unmapped random zones is low,
3b1a94c8
DLM
442 * reclaim even if the target is busy.
443 */
444 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
445}
446
447/*
448 * Reclaim work function.
449 */
450static void dmz_reclaim_work(struct work_struct *work)
451{
452 struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
453 struct dmz_metadata *zmd = zrc->metadata;
454 unsigned int nr_rnd, nr_unmap_rnd;
455 unsigned int p_unmap_rnd;
b234c6d7 456 int ret;
3b1a94c8 457
75d66ffb
DF
458 if (dmz_bdev_is_dying(zrc->dev))
459 return;
460
3b1a94c8
DLM
461 if (!dmz_should_reclaim(zrc)) {
462 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
463 return;
464 }
465
466 /*
467 * We need to start reclaiming random zones: set up zone copy
468 * throttling to either go fast if we are very low on random zones
469 * and slower if there are still some free random zones to avoid
470 * as much as possible to negatively impact the user workload.
471 */
472 nr_rnd = dmz_nr_rnd_zones(zmd);
473 nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
474 p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
475 if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
476 /* Idle or very low percentage: go fast */
477 zrc->kc_throttle.throttle = 100;
478 } else {
479 /* Busy but we still have some random zone: throttle */
480 zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
481 }
482
2234e732
HR
483 DMDEBUG("(%s): Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
484 dmz_metadata_label(zmd),
485 zrc->kc_throttle.throttle,
486 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
487 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
3b1a94c8 488
b234c6d7 489 ret = dmz_do_reclaim(zrc);
75d66ffb 490 if (ret) {
2234e732
HR
491 DMDEBUG("(%s): Reclaim error %d\n",
492 dmz_metadata_label(zmd), ret);
e7fad909 493 if (!dmz_check_bdev(zrc->dev))
75d66ffb
DF
494 return;
495 }
3b1a94c8
DLM
496
497 dmz_schedule_reclaim(zrc);
498}
499
500/*
501 * Initialize reclaim.
502 */
503int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
504 struct dmz_reclaim **reclaim)
505{
506 struct dmz_reclaim *zrc;
507 int ret;
508
509 zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
510 if (!zrc)
511 return -ENOMEM;
512
513 zrc->dev = dev;
514 zrc->metadata = zmd;
515 zrc->atime = jiffies;
516
517 /* Reclaim kcopyd client */
518 zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
519 if (IS_ERR(zrc->kc)) {
520 ret = PTR_ERR(zrc->kc);
521 zrc->kc = NULL;
522 goto err;
523 }
524
525 /* Reclaim work */
526 INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
527 zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
2234e732 528 dmz_metadata_label(zmd));
3b1a94c8
DLM
529 if (!zrc->wq) {
530 ret = -ENOMEM;
531 goto err;
532 }
533
534 *reclaim = zrc;
535 queue_delayed_work(zrc->wq, &zrc->work, 0);
536
537 return 0;
538err:
539 if (zrc->kc)
540 dm_kcopyd_client_destroy(zrc->kc);
541 kfree(zrc);
542
543 return ret;
544}
545
546/*
547 * Terminate reclaim.
548 */
549void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
550{
551 cancel_delayed_work_sync(&zrc->work);
552 destroy_workqueue(zrc->wq);
553 dm_kcopyd_client_destroy(zrc->kc);
554 kfree(zrc);
555}
556
557/*
558 * Suspend reclaim.
559 */
560void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
561{
562 cancel_delayed_work_sync(&zrc->work);
563}
564
565/*
566 * Resume reclaim.
567 */
568void dmz_resume_reclaim(struct dmz_reclaim *zrc)
569{
570 queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
571}
572
573/*
574 * BIO accounting.
575 */
576void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
577{
578 zrc->atime = jiffies;
579}
580
581/*
582 * Start reclaim if necessary.
583 */
584void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
585{
586 if (dmz_should_reclaim(zrc))
587 mod_delayed_work(zrc->wq, &zrc->work, 0);
588}
589