]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - drivers/lightnvm/pblk-gc.c
lightnvm: Convert timers to use timer_setup()
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-gc.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-gc.c - pblk's garbage collector
16 */
17
18#include "pblk.h"
19#include <linux/delay.h>
20
21static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
22{
23 if (gc_rq->data)
24 vfree(gc_rq->data);
25 kfree(gc_rq);
26}
27
28static int pblk_gc_write(struct pblk *pblk)
29{
30 struct pblk_gc *gc = &pblk->gc;
31 struct pblk_gc_rq *gc_rq, *tgc_rq;
32 LIST_HEAD(w_list);
33
34 spin_lock(&gc->w_lock);
35 if (list_empty(&gc->w_list)) {
36 spin_unlock(&gc->w_lock);
37 return 1;
38 }
39
40 list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
41 gc->w_entries = 0;
42 spin_unlock(&gc->w_lock);
43
44 list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
45 pblk_write_gc_to_cache(pblk, gc_rq);
46 list_del(&gc_rq->list);
47 kref_put(&gc_rq->line->ref, pblk_line_put);
48 pblk_gc_free_gc_rq(gc_rq);
49 }
50
51 return 0;
52}
53
54static void pblk_gc_writer_kick(struct pblk_gc *gc)
55{
56 wake_up_process(gc->gc_writer_ts);
57}
58
59static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
60{
61 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
62 struct list_head *move_list;
63
64 spin_lock(&line->lock);
65 WARN_ON(line->state != PBLK_LINESTATE_GC);
66 line->state = PBLK_LINESTATE_CLOSED;
67 move_list = pblk_line_gc_list(pblk, line);
68 spin_unlock(&line->lock);
69
70 if (move_list) {
71 spin_lock(&l_mg->gc_lock);
72 list_add_tail(&line->list, move_list);
73 spin_unlock(&l_mg->gc_lock);
74 }
75}
76
77static void pblk_gc_line_ws(struct work_struct *work)
78{
79 struct pblk_line_ws *gc_rq_ws = container_of(work,
80 struct pblk_line_ws, ws);
81 struct pblk *pblk = gc_rq_ws->pblk;
82 struct nvm_tgt_dev *dev = pblk->dev;
83 struct nvm_geo *geo = &dev->geo;
84 struct pblk_gc *gc = &pblk->gc;
85 struct pblk_line *line = gc_rq_ws->line;
86 struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
87 int ret;
88
89 up(&gc->gc_sem);
90
91 gc_rq->data = vmalloc(gc_rq->nr_secs * geo->sec_size);
92 if (!gc_rq->data) {
93 pr_err("pblk: could not GC line:%d (%d/%d)\n",
94 line->id, *line->vsc, gc_rq->nr_secs);
95 goto out;
96 }
97
98 /* Read from GC victim block */
99 ret = pblk_submit_read_gc(pblk, gc_rq);
100 if (ret) {
101 pr_err("pblk: failed GC read in line:%d (err:%d)\n",
102 line->id, ret);
103 goto out;
104 }
105
106 if (!gc_rq->secs_to_gc)
107 goto out;
108
109retry:
110 spin_lock(&gc->w_lock);
111 if (gc->w_entries >= PBLK_GC_RQ_QD) {
112 spin_unlock(&gc->w_lock);
113 pblk_gc_writer_kick(&pblk->gc);
114 usleep_range(128, 256);
115 goto retry;
116 }
117 gc->w_entries++;
118 list_add_tail(&gc_rq->list, &gc->w_list);
119 spin_unlock(&gc->w_lock);
120
121 pblk_gc_writer_kick(&pblk->gc);
122
123 kfree(gc_rq_ws);
124 return;
125
126out:
127 pblk_gc_free_gc_rq(gc_rq);
128 kref_put(&line->ref, pblk_line_put);
129 kfree(gc_rq_ws);
130}
131
132static void pblk_gc_line_prepare_ws(struct work_struct *work)
133{
134 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
135 ws);
136 struct pblk *pblk = line_ws->pblk;
137 struct pblk_line *line = line_ws->line;
138 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
139 struct pblk_line_meta *lm = &pblk->lm;
140 struct pblk_gc *gc = &pblk->gc;
141 struct line_emeta *emeta_buf;
142 struct pblk_line_ws *gc_rq_ws;
143 struct pblk_gc_rq *gc_rq;
144 __le64 *lba_list;
145 unsigned long *invalid_bitmap;
146 int sec_left, nr_secs, bit;
147 int ret;
148
149 invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
150 if (!invalid_bitmap) {
151 pr_err("pblk: could not allocate GC invalid bitmap\n");
152 goto fail_free_ws;
153 }
154
155 emeta_buf = pblk_malloc(lm->emeta_len[0], l_mg->emeta_alloc_type,
156 GFP_KERNEL);
157 if (!emeta_buf) {
158 pr_err("pblk: cannot use GC emeta\n");
159 goto fail_free_bitmap;
160 }
161
162 ret = pblk_line_read_emeta(pblk, line, emeta_buf);
163 if (ret) {
164 pr_err("pblk: line %d read emeta failed (%d)\n", line->id, ret);
165 goto fail_free_emeta;
166 }
167
168 /* If this read fails, it means that emeta is corrupted. For now, leave
169 * the line untouched. TODO: Implement a recovery routine that scans and
170 * moves all sectors on the line.
171 */
172 lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
173 if (!lba_list) {
174 pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
175 goto fail_free_emeta;
176 }
177
178 spin_lock(&line->lock);
179 bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
180 sec_left = pblk_line_vsc(line);
181 spin_unlock(&line->lock);
182
183 if (sec_left < 0) {
184 pr_err("pblk: corrupted GC line (%d)\n", line->id);
185 goto fail_free_emeta;
186 }
187
188 bit = -1;
189next_rq:
190 gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
191 if (!gc_rq)
192 goto fail_free_emeta;
193
194 nr_secs = 0;
195 do {
196 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
197 bit + 1);
198 if (bit > line->emeta_ssec)
199 break;
200
201 gc_rq->paddr_list[nr_secs] = bit;
202 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
203 } while (nr_secs < pblk->max_write_pgs);
204
205 if (unlikely(!nr_secs)) {
206 kfree(gc_rq);
207 goto out;
208 }
209
210 gc_rq->nr_secs = nr_secs;
211 gc_rq->line = line;
212
213 gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
214 if (!gc_rq_ws)
215 goto fail_free_gc_rq;
216
217 gc_rq_ws->pblk = pblk;
218 gc_rq_ws->line = line;
219 gc_rq_ws->priv = gc_rq;
220
221 /* The write GC path can be much slower than the read GC one due to
222 * the budget imposed by the rate-limiter. Balance in case that we get
223 * back pressure from the write GC path.
224 */
225 while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
226 io_schedule();
227
228 kref_get(&line->ref);
229
230 INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
231 queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
232
233 sec_left -= nr_secs;
234 if (sec_left > 0)
235 goto next_rq;
236
237out:
238 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
239 kfree(line_ws);
240 kfree(invalid_bitmap);
241
242 kref_put(&line->ref, pblk_line_put);
243 atomic_dec(&gc->read_inflight_gc);
244
245 return;
246
247fail_free_gc_rq:
248 kfree(gc_rq);
249fail_free_emeta:
250 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
251fail_free_bitmap:
252 kfree(invalid_bitmap);
253fail_free_ws:
254 kfree(line_ws);
255
256 pblk_put_line_back(pblk, line);
257 kref_put(&line->ref, pblk_line_put);
258 atomic_dec(&gc->read_inflight_gc);
259
260 pr_err("pblk: Failed to GC line %d\n", line->id);
261}
262
263static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
264{
265 struct pblk_gc *gc = &pblk->gc;
266 struct pblk_line_ws *line_ws;
267
268 pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id);
269
270 line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
271 if (!line_ws)
272 return -ENOMEM;
273
274 line_ws->pblk = pblk;
275 line_ws->line = line;
276
277 atomic_inc(&gc->pipeline_gc);
278 INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
279 queue_work(gc->gc_reader_wq, &line_ws->ws);
280
281 return 0;
282}
283
284static void pblk_gc_reader_kick(struct pblk_gc *gc)
285{
286 wake_up_process(gc->gc_reader_ts);
287}
288
289static void pblk_gc_kick(struct pblk *pblk)
290{
291 struct pblk_gc *gc = &pblk->gc;
292
293 pblk_gc_writer_kick(gc);
294 pblk_gc_reader_kick(gc);
295
296 /* If we're shutting down GC, let's not start it up again */
297 if (gc->gc_enabled) {
298 wake_up_process(gc->gc_ts);
299 mod_timer(&gc->gc_timer,
300 jiffies + msecs_to_jiffies(GC_TIME_MSECS));
301 }
302}
303
304static int pblk_gc_read(struct pblk *pblk)
305{
306 struct pblk_gc *gc = &pblk->gc;
307 struct pblk_line *line;
308
309 spin_lock(&gc->r_lock);
310 if (list_empty(&gc->r_list)) {
311 spin_unlock(&gc->r_lock);
312 return 1;
313 }
314
315 line = list_first_entry(&gc->r_list, struct pblk_line, list);
316 list_del(&line->list);
317 spin_unlock(&gc->r_lock);
318
319 pblk_gc_kick(pblk);
320
321 if (pblk_gc_line(pblk, line))
322 pr_err("pblk: failed to GC line %d\n", line->id);
323
324 return 0;
325}
326
327static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
328 struct list_head *group_list)
329{
330 struct pblk_line *line, *victim;
331 int line_vsc, victim_vsc;
332
333 victim = list_first_entry(group_list, struct pblk_line, list);
334 list_for_each_entry(line, group_list, list) {
335 line_vsc = le32_to_cpu(*line->vsc);
336 victim_vsc = le32_to_cpu(*victim->vsc);
337 if (line_vsc < victim_vsc)
338 victim = line;
339 }
340
341 return victim;
342}
343
344static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
345{
346 unsigned int nr_blocks_free, nr_blocks_need;
347
348 nr_blocks_need = pblk_rl_high_thrs(rl);
349 nr_blocks_free = pblk_rl_nr_free_blks(rl);
350
351 /* This is not critical, no need to take lock here */
352 return ((gc->gc_active) && (nr_blocks_need > nr_blocks_free));
353}
354
355void pblk_gc_free_full_lines(struct pblk *pblk)
356{
357 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
358 struct pblk_gc *gc = &pblk->gc;
359 struct pblk_line *line;
360
361 do {
362 spin_lock(&l_mg->gc_lock);
363 if (list_empty(&l_mg->gc_full_list)) {
364 spin_unlock(&l_mg->gc_lock);
365 return;
366 }
367
368 line = list_first_entry(&l_mg->gc_full_list,
369 struct pblk_line, list);
370
371 spin_lock(&line->lock);
372 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
373 line->state = PBLK_LINESTATE_GC;
374 spin_unlock(&line->lock);
375
376 list_del(&line->list);
377 spin_unlock(&l_mg->gc_lock);
378
379 atomic_inc(&gc->pipeline_gc);
380 kref_put(&line->ref, pblk_line_put);
381 } while (1);
382}
383
384/*
385 * Lines with no valid sectors will be returned to the free list immediately. If
386 * GC is activated - either because the free block count is under the determined
387 * threshold, or because it is being forced from user space - only lines with a
388 * high count of invalid sectors will be recycled.
389 */
390static void pblk_gc_run(struct pblk *pblk)
391{
392 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
393 struct pblk_gc *gc = &pblk->gc;
394 struct pblk_line *line;
395 struct list_head *group_list;
396 bool run_gc;
397 int read_inflight_gc, gc_group = 0, prev_group = 0;
398
399 pblk_gc_free_full_lines(pblk);
400
401 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
402 if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
403 return;
404
405next_gc_group:
406 group_list = l_mg->gc_lists[gc_group++];
407
408 do {
409 spin_lock(&l_mg->gc_lock);
410 if (list_empty(group_list)) {
411 spin_unlock(&l_mg->gc_lock);
412 break;
413 }
414
415 line = pblk_gc_get_victim_line(pblk, group_list);
416
417 spin_lock(&line->lock);
418 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
419 line->state = PBLK_LINESTATE_GC;
420 spin_unlock(&line->lock);
421
422 list_del(&line->list);
423 spin_unlock(&l_mg->gc_lock);
424
425 spin_lock(&gc->r_lock);
426 list_add_tail(&line->list, &gc->r_list);
427 spin_unlock(&gc->r_lock);
428
429 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
430 pblk_gc_reader_kick(gc);
431
432 prev_group = 1;
433
434 /* No need to queue up more GC lines than we can handle */
435 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
436 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
437 break;
438 } while (1);
439
440 if (!prev_group && pblk->rl.rb_state > gc_group &&
441 gc_group < PBLK_GC_NR_LISTS)
442 goto next_gc_group;
443}
444
445static void pblk_gc_timer(struct timer_list *t)
446{
447 struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
448
449 pblk_gc_kick(pblk);
450}
451
452static int pblk_gc_ts(void *data)
453{
454 struct pblk *pblk = data;
455
456 while (!kthread_should_stop()) {
457 pblk_gc_run(pblk);
458 set_current_state(TASK_INTERRUPTIBLE);
459 io_schedule();
460 }
461
462 return 0;
463}
464
465static int pblk_gc_writer_ts(void *data)
466{
467 struct pblk *pblk = data;
468
469 while (!kthread_should_stop()) {
470 if (!pblk_gc_write(pblk))
471 continue;
472 set_current_state(TASK_INTERRUPTIBLE);
473 io_schedule();
474 }
475
476 return 0;
477}
478
479static int pblk_gc_reader_ts(void *data)
480{
481 struct pblk *pblk = data;
482 struct pblk_gc *gc = &pblk->gc;
483
484 while (!kthread_should_stop()) {
485 if (!pblk_gc_read(pblk))
486 continue;
487 set_current_state(TASK_INTERRUPTIBLE);
488 io_schedule();
489 }
490
491#ifdef CONFIG_NVM_DEBUG
492 pr_info("pblk: flushing gc pipeline, %d lines left\n",
493 atomic_read(&gc->pipeline_gc));
494#endif
495
496 do {
497 if (!atomic_read(&gc->pipeline_gc))
498 break;
499
500 schedule();
501 } while (1);
502
503 return 0;
504}
505
506static void pblk_gc_start(struct pblk *pblk)
507{
508 pblk->gc.gc_active = 1;
509 pr_debug("pblk: gc start\n");
510}
511
512void pblk_gc_should_start(struct pblk *pblk)
513{
514 struct pblk_gc *gc = &pblk->gc;
515
516 if (gc->gc_enabled && !gc->gc_active) {
517 pblk_gc_start(pblk);
518 pblk_gc_kick(pblk);
519 }
520}
521
522/*
523 * If flush_wq == 1 then no lock should be held by the caller since
524 * flush_workqueue can sleep
525 */
526static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
527{
528 pblk->gc.gc_active = 0;
529 pr_debug("pblk: gc stop\n");
530}
531
532void pblk_gc_should_stop(struct pblk *pblk)
533{
534 struct pblk_gc *gc = &pblk->gc;
535
536 if (gc->gc_active && !gc->gc_forced)
537 pblk_gc_stop(pblk, 0);
538}
539
540void pblk_gc_should_kick(struct pblk *pblk)
541{
542 pblk_rl_update_rates(&pblk->rl);
543}
544
545void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
546 int *gc_active)
547{
548 struct pblk_gc *gc = &pblk->gc;
549
550 spin_lock(&gc->lock);
551 *gc_enabled = gc->gc_enabled;
552 *gc_active = gc->gc_active;
553 spin_unlock(&gc->lock);
554}
555
556int pblk_gc_sysfs_force(struct pblk *pblk, int force)
557{
558 struct pblk_gc *gc = &pblk->gc;
559
560 if (force < 0 || force > 1)
561 return -EINVAL;
562
563 spin_lock(&gc->lock);
564 gc->gc_forced = force;
565
566 if (force)
567 gc->gc_enabled = 1;
568 else
569 gc->gc_enabled = 0;
570 spin_unlock(&gc->lock);
571
572 pblk_gc_should_start(pblk);
573
574 return 0;
575}
576
577int pblk_gc_init(struct pblk *pblk)
578{
579 struct pblk_gc *gc = &pblk->gc;
580 int ret;
581
582 gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
583 if (IS_ERR(gc->gc_ts)) {
584 pr_err("pblk: could not allocate GC main kthread\n");
585 return PTR_ERR(gc->gc_ts);
586 }
587
588 gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
589 "pblk-gc-writer-ts");
590 if (IS_ERR(gc->gc_writer_ts)) {
591 pr_err("pblk: could not allocate GC writer kthread\n");
592 ret = PTR_ERR(gc->gc_writer_ts);
593 goto fail_free_main_kthread;
594 }
595
596 gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
597 "pblk-gc-reader-ts");
598 if (IS_ERR(gc->gc_reader_ts)) {
599 pr_err("pblk: could not allocate GC reader kthread\n");
600 ret = PTR_ERR(gc->gc_reader_ts);
601 goto fail_free_writer_kthread;
602 }
603
604 timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
605 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
606
607 gc->gc_active = 0;
608 gc->gc_forced = 0;
609 gc->gc_enabled = 1;
610 gc->w_entries = 0;
611 atomic_set(&gc->read_inflight_gc, 0);
612 atomic_set(&gc->pipeline_gc, 0);
613
614 /* Workqueue that reads valid sectors from a line and submit them to the
615 * GC writer to be recycled.
616 */
617 gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
618 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
619 if (!gc->gc_line_reader_wq) {
620 pr_err("pblk: could not allocate GC line reader workqueue\n");
621 ret = -ENOMEM;
622 goto fail_free_reader_kthread;
623 }
624
625 /* Workqueue that prepare lines for GC */
626 gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
627 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
628 if (!gc->gc_reader_wq) {
629 pr_err("pblk: could not allocate GC reader workqueue\n");
630 ret = -ENOMEM;
631 goto fail_free_reader_line_wq;
632 }
633
634 spin_lock_init(&gc->lock);
635 spin_lock_init(&gc->w_lock);
636 spin_lock_init(&gc->r_lock);
637
638 sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
639
640 INIT_LIST_HEAD(&gc->w_list);
641 INIT_LIST_HEAD(&gc->r_list);
642
643 return 0;
644
645fail_free_reader_line_wq:
646 destroy_workqueue(gc->gc_line_reader_wq);
647fail_free_reader_kthread:
648 kthread_stop(gc->gc_reader_ts);
649fail_free_writer_kthread:
650 kthread_stop(gc->gc_writer_ts);
651fail_free_main_kthread:
652 kthread_stop(gc->gc_ts);
653
654 return ret;
655}
656
657void pblk_gc_exit(struct pblk *pblk)
658{
659 struct pblk_gc *gc = &pblk->gc;
660
661 gc->gc_enabled = 0;
662 del_timer_sync(&gc->gc_timer);
663 pblk_gc_stop(pblk, 1);
664
665 if (gc->gc_ts)
666 kthread_stop(gc->gc_ts);
667
668 if (gc->gc_reader_ts)
669 kthread_stop(gc->gc_reader_ts);
670
671 flush_workqueue(gc->gc_reader_wq);
672 if (gc->gc_reader_wq)
673 destroy_workqueue(gc->gc_reader_wq);
674
675 flush_workqueue(gc->gc_line_reader_wq);
676 if (gc->gc_line_reader_wq)
677 destroy_workqueue(gc->gc_line_reader_wq);
678
679 if (gc->gc_writer_ts)
680 kthread_stop(gc->gc_writer_ts);
681}