]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/lightnvm/pblk-gc.c
Merge tag 'pstore-v5.12-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[mirror_ubuntu-jammy-kernel.git] / drivers / lightnvm / pblk-gc.c
CommitLineData
02a1520d 1// SPDX-License-Identifier: GPL-2.0
a4bd217b
JG
2/*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-gc.c - pblk's garbage collector
17 */
18
19#include "pblk.h"
f2937232 20#include "pblk-trace.h"
a4bd217b
JG
21#include <linux/delay.h>
22
f2937232 23
a4bd217b
JG
24static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
25{
4cf29e43 26 vfree(gc_rq->data);
a4bd217b
JG
27 kfree(gc_rq);
28}
29
30static int pblk_gc_write(struct pblk *pblk)
31{
32 struct pblk_gc *gc = &pblk->gc;
33 struct pblk_gc_rq *gc_rq, *tgc_rq;
34 LIST_HEAD(w_list);
35
36 spin_lock(&gc->w_lock);
37 if (list_empty(&gc->w_list)) {
38 spin_unlock(&gc->w_lock);
39 return 1;
40 }
41
b20ba1bc
JG
42 list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
43 gc->w_entries = 0;
a4bd217b
JG
44 spin_unlock(&gc->w_lock);
45
46 list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
d340121e 47 pblk_write_gc_to_cache(pblk, gc_rq);
a4bd217b 48 list_del(&gc_rq->list);
b20ba1bc 49 kref_put(&gc_rq->line->ref, pblk_line_put);
a4bd217b
JG
50 pblk_gc_free_gc_rq(gc_rq);
51 }
52
53 return 0;
54}
55
56static void pblk_gc_writer_kick(struct pblk_gc *gc)
57{
58 wake_up_process(gc->gc_writer_ts);
59}
60
f2e02457 61void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
a4bd217b
JG
62{
63 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
64 struct list_head *move_list;
65
4ca88524 66 spin_lock(&l_mg->gc_lock);
a4bd217b
JG
67 spin_lock(&line->lock);
68 WARN_ON(line->state != PBLK_LINESTATE_GC);
69 line->state = PBLK_LINESTATE_CLOSED;
f2937232
HH
70 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
71 line->state);
4ca88524
IK
72
73 /* We need to reset gc_group in order to ensure that
74 * pblk_line_gc_list will return proper move_list
75 * since right now current line is not on any of the
76 * gc lists.
77 */
78 line->gc_group = PBLK_LINEGC_NONE;
a4bd217b
JG
79 move_list = pblk_line_gc_list(pblk, line);
80 spin_unlock(&line->lock);
4ca88524
IK
81 list_add_tail(&line->list, move_list);
82 spin_unlock(&l_mg->gc_lock);
a4bd217b
JG
83}
84
85static void pblk_gc_line_ws(struct work_struct *work)
b20ba1bc 86{
b84ae4a8 87 struct pblk_line_ws *gc_rq_ws = container_of(work,
b20ba1bc 88 struct pblk_line_ws, ws);
b84ae4a8 89 struct pblk *pblk = gc_rq_ws->pblk;
b20ba1bc 90 struct pblk_gc *gc = &pblk->gc;
b84ae4a8
JG
91 struct pblk_line *line = gc_rq_ws->line;
92 struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
2a19b10d 93 int ret;
b20ba1bc
JG
94
95 up(&gc->gc_sem);
96
2a19b10d
JG
97 /* Read from GC victim block */
98 ret = pblk_submit_read_gc(pblk, gc_rq);
99 if (ret) {
f2e02457 100 line->w_err_gc->has_gc_err = 1;
2a19b10d
JG
101 goto out;
102 }
103
104 if (!gc_rq->secs_to_gc)
105 goto out;
106
107retry:
108 spin_lock(&gc->w_lock);
109 if (gc->w_entries >= PBLK_GC_RQ_QD) {
110 spin_unlock(&gc->w_lock);
111 pblk_gc_writer_kick(&pblk->gc);
112 usleep_range(128, 256);
113 goto retry;
b20ba1bc 114 }
2a19b10d
JG
115 gc->w_entries++;
116 list_add_tail(&gc_rq->list, &gc->w_list);
117 spin_unlock(&gc->w_lock);
118
119 pblk_gc_writer_kick(&pblk->gc);
b20ba1bc 120
b84ae4a8 121 kfree(gc_rq_ws);
2a19b10d
JG
122 return;
123
124out:
125 pblk_gc_free_gc_rq(gc_rq);
126 kref_put(&line->ref, pblk_line_put);
127 kfree(gc_rq_ws);
b20ba1bc
JG
128}
129
48b8d208
HH
130static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
131 struct pblk_line *line)
132{
133 struct line_emeta *emeta_buf;
48b8d208
HH
134 struct pblk_line_meta *lm = &pblk->lm;
135 unsigned int lba_list_size = lm->emeta_len[2];
136 __le64 *lba_list;
137 int ret;
138
ff8f3520 139 emeta_buf = kvmalloc(lm->emeta_len[0], GFP_KERNEL);
48b8d208
HH
140 if (!emeta_buf)
141 return NULL;
142
af3fac16 143 ret = pblk_line_emeta_read(pblk, line, emeta_buf);
48b8d208 144 if (ret) {
4e495a46 145 pblk_err(pblk, "line %d read emeta failed (%d)\n",
48b8d208 146 line->id, ret);
ff8f3520 147 kvfree(emeta_buf);
48b8d208
HH
148 return NULL;
149 }
150
151 /* If this read fails, it means that emeta is corrupted.
152 * For now, leave the line untouched.
153 * TODO: Implement a recovery routine that scans and moves
154 * all sectors on the line.
155 */
156
157 ret = pblk_recov_check_emeta(pblk, emeta_buf);
158 if (ret) {
4e495a46 159 pblk_err(pblk, "inconsistent emeta (line %d)\n",
48b8d208 160 line->id);
ff8f3520 161 kvfree(emeta_buf);
48b8d208
HH
162 return NULL;
163 }
164
ff8f3520
HH
165 lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
166
48b8d208
HH
167 if (lba_list)
168 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
169
ff8f3520 170 kvfree(emeta_buf);
48b8d208
HH
171
172 return lba_list;
173}
174
b20ba1bc 175static void pblk_gc_line_prepare_ws(struct work_struct *work)
a4bd217b
JG
176{
177 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
178 ws);
179 struct pblk *pblk = line_ws->pblk;
a4bd217b
JG
180 struct pblk_line *line = line_ws->line;
181 struct pblk_line_meta *lm = &pblk->lm;
d378561b
IK
182 struct nvm_tgt_dev *dev = pblk->dev;
183 struct nvm_geo *geo = &dev->geo;
b20ba1bc 184 struct pblk_gc *gc = &pblk->gc;
b84ae4a8 185 struct pblk_line_ws *gc_rq_ws;
b20ba1bc 186 struct pblk_gc_rq *gc_rq;
dd2a4343 187 __le64 *lba_list;
d340121e 188 unsigned long *invalid_bitmap;
b20ba1bc 189 int sec_left, nr_secs, bit;
a4bd217b 190
d340121e 191 invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
5da84cf6 192 if (!invalid_bitmap)
d340121e 193 goto fail_free_ws;
d340121e 194
48b8d208
HH
195 if (line->w_err_gc->has_write_err) {
196 lba_list = line->w_err_gc->lba_list;
197 line->w_err_gc->lba_list = NULL;
198 } else {
199 lba_list = get_lba_list_from_emeta(pblk, line);
200 if (!lba_list) {
4e495a46 201 pblk_err(pblk, "could not interpret emeta (line %d)\n",
48b8d208 202 line->id);
0ec6937e 203 goto fail_free_invalid_bitmap;
48b8d208 204 }
a4bd217b 205 }
a4bd217b 206
d340121e
JG
207 spin_lock(&line->lock);
208 bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
b20ba1bc 209 sec_left = pblk_line_vsc(line);
d340121e
JG
210 spin_unlock(&line->lock);
211
a4bd217b 212 if (sec_left < 0) {
4e495a46 213 pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
48b8d208 214 goto fail_free_lba_list;
a4bd217b
JG
215 }
216
217 bit = -1;
218next_rq:
b20ba1bc
JG
219 gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
220 if (!gc_rq)
48b8d208 221 goto fail_free_lba_list;
a4bd217b 222
b20ba1bc 223 nr_secs = 0;
a4bd217b 224 do {
d340121e 225 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
a4bd217b
JG
226 bit + 1);
227 if (bit > line->emeta_ssec)
228 break;
229
d340121e 230 gc_rq->paddr_list[nr_secs] = bit;
b20ba1bc
JG
231 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
232 } while (nr_secs < pblk->max_write_pgs);
a4bd217b 233
b20ba1bc
JG
234 if (unlikely(!nr_secs)) {
235 kfree(gc_rq);
a4bd217b
JG
236 goto out;
237 }
238
b20ba1bc
JG
239 gc_rq->nr_secs = nr_secs;
240 gc_rq->line = line;
241
d378561b
IK
242 gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
243 if (!gc_rq->data)
244 goto fail_free_gc_rq;
245
b84ae4a8
JG
246 gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
247 if (!gc_rq_ws)
d378561b 248 goto fail_free_gc_data;
b20ba1bc 249
b84ae4a8
JG
250 gc_rq_ws->pblk = pblk;
251 gc_rq_ws->line = line;
252 gc_rq_ws->priv = gc_rq;
b20ba1bc 253
8da10cce
JG
254 /* The write GC path can be much slower than the read GC one due to
255 * the budget imposed by the rate-limiter. Balance in case that we get
256 * back pressure from the write GC path.
257 */
258 while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
259 io_schedule();
260
b20ba1bc
JG
261 kref_get(&line->ref);
262
b84ae4a8
JG
263 INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
264 queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
a4bd217b 265
b20ba1bc 266 sec_left -= nr_secs;
a4bd217b
JG
267 if (sec_left > 0)
268 goto next_rq;
269
270out:
ff8f3520 271 kvfree(lba_list);
b84ae4a8 272 kfree(line_ws);
d340121e 273 kfree(invalid_bitmap);
b20ba1bc
JG
274
275 kref_put(&line->ref, pblk_line_put);
d6b992f7 276 atomic_dec(&gc->read_inflight_gc);
b20ba1bc
JG
277
278 return;
279
d378561b
IK
280fail_free_gc_data:
281 vfree(gc_rq->data);
b20ba1bc
JG
282fail_free_gc_rq:
283 kfree(gc_rq);
48b8d208 284fail_free_lba_list:
ff8f3520 285 kvfree(lba_list);
0ec6937e 286fail_free_invalid_bitmap:
d340121e
JG
287 kfree(invalid_bitmap);
288fail_free_ws:
289 kfree(line_ws);
290
7e5434ee
IK
291 /* Line goes back to closed state, so we cannot release additional
292 * reference for line, since we do that only when we want to do
293 * gc to free line state transition.
294 */
b20ba1bc 295 pblk_put_line_back(pblk, line);
d6b992f7 296 atomic_dec(&gc->read_inflight_gc);
b20ba1bc 297
4e495a46 298 pblk_err(pblk, "failed to GC line %d\n", line->id);
a4bd217b
JG
299}
300
301static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
302{
b20ba1bc 303 struct pblk_gc *gc = &pblk->gc;
a4bd217b 304 struct pblk_line_ws *line_ws;
a4bd217b 305
4e495a46 306 pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
a4bd217b 307
b84ae4a8 308 line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
b20ba1bc
JG
309 if (!line_ws)
310 return -ENOMEM;
a4bd217b 311
a4bd217b
JG
312 line_ws->pblk = pblk;
313 line_ws->line = line;
a4bd217b 314
d6b992f7 315 atomic_inc(&gc->pipeline_gc);
b20ba1bc
JG
316 INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
317 queue_work(gc->gc_reader_wq, &line_ws->ws);
a4bd217b
JG
318
319 return 0;
a4bd217b
JG
320}
321
8bd40020
JG
322static void pblk_gc_reader_kick(struct pblk_gc *gc)
323{
324 wake_up_process(gc->gc_reader_ts);
325}
326
327static void pblk_gc_kick(struct pblk *pblk)
328{
329 struct pblk_gc *gc = &pblk->gc;
330
331 pblk_gc_writer_kick(gc);
332 pblk_gc_reader_kick(gc);
333
334 /* If we're shutting down GC, let's not start it up again */
335 if (gc->gc_enabled) {
336 wake_up_process(gc->gc_ts);
337 mod_timer(&gc->gc_timer,
338 jiffies + msecs_to_jiffies(GC_TIME_MSECS));
339 }
340}
341
b20ba1bc 342static int pblk_gc_read(struct pblk *pblk)
a4bd217b 343{
b20ba1bc
JG
344 struct pblk_gc *gc = &pblk->gc;
345 struct pblk_line *line;
a4bd217b 346
b20ba1bc
JG
347 spin_lock(&gc->r_lock);
348 if (list_empty(&gc->r_list)) {
349 spin_unlock(&gc->r_lock);
350 return 1;
a4bd217b 351 }
b20ba1bc
JG
352
353 line = list_first_entry(&gc->r_list, struct pblk_line, list);
354 list_del(&line->list);
355 spin_unlock(&gc->r_lock);
356
357 pblk_gc_kick(pblk);
358
8935ebfc 359 if (pblk_gc_line(pblk, line)) {
4e495a46 360 pblk_err(pblk, "failed to GC line %d\n", line->id);
8935ebfc
IK
361 /* rollback */
362 spin_lock(&gc->r_lock);
363 list_add_tail(&line->list, &gc->r_list);
364 spin_unlock(&gc->r_lock);
365 }
b20ba1bc
JG
366
367 return 0;
368}
369
d45ebd47
JG
370static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
371 struct list_head *group_list)
372{
373 struct pblk_line *line, *victim;
0586942f 374 unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
d45ebd47
JG
375
376 victim = list_first_entry(group_list, struct pblk_line, list);
0586942f 377
d45ebd47 378 list_for_each_entry(line, group_list, list) {
0586942f
HL
379 if (!atomic_read(&line->sec_to_update))
380 line_vsc = le32_to_cpu(*line->vsc);
381 if (line_vsc < victim_vsc) {
d45ebd47 382 victim = line;
0586942f
HL
383 victim_vsc = le32_to_cpu(*victim->vsc);
384 }
d45ebd47
JG
385 }
386
0586942f
HL
387 if (victim_vsc == ~0x0)
388 return NULL;
389
d45ebd47
JG
390 return victim;
391}
392
b20ba1bc
JG
393static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
394{
395 unsigned int nr_blocks_free, nr_blocks_need;
48b8d208 396 unsigned int werr_lines = atomic_read(&rl->werr_lines);
b20ba1bc
JG
397
398 nr_blocks_need = pblk_rl_high_thrs(rl);
399 nr_blocks_free = pblk_rl_nr_free_blks(rl);
400
401 /* This is not critical, no need to take lock here */
48b8d208
HH
402 return ((werr_lines > 0) ||
403 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
b20ba1bc
JG
404}
405
37ce33d5 406void pblk_gc_free_full_lines(struct pblk *pblk)
a4bd217b
JG
407{
408 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
d6b992f7 409 struct pblk_gc *gc = &pblk->gc;
b20ba1bc 410 struct pblk_line *line;
b20ba1bc
JG
411
412 do {
413 spin_lock(&l_mg->gc_lock);
414 if (list_empty(&l_mg->gc_full_list)) {
415 spin_unlock(&l_mg->gc_lock);
37ce33d5 416 return;
b20ba1bc
JG
417 }
418
419 line = list_first_entry(&l_mg->gc_full_list,
420 struct pblk_line, list);
a4bd217b 421
a4bd217b
JG
422 spin_lock(&line->lock);
423 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
424 line->state = PBLK_LINESTATE_GC;
f2937232
HH
425 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
426 line->state);
a4bd217b
JG
427 spin_unlock(&line->lock);
428
429 list_del(&line->list);
b20ba1bc
JG
430 spin_unlock(&l_mg->gc_lock);
431
d6b992f7 432 atomic_inc(&gc->pipeline_gc);
a4bd217b 433 kref_put(&line->ref, pblk_line_put);
b20ba1bc 434 } while (1);
37ce33d5
HH
435}
436
437/*
438 * Lines with no valid sectors will be returned to the free list immediately. If
439 * GC is activated - either because the free block count is under the determined
440 * threshold, or because it is being forced from user space - only lines with a
441 * high count of invalid sectors will be recycled.
442 */
443static void pblk_gc_run(struct pblk *pblk)
444{
445 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
446 struct pblk_gc *gc = &pblk->gc;
447 struct pblk_line *line;
448 struct list_head *group_list;
449 bool run_gc;
d6b992f7 450 int read_inflight_gc, gc_group = 0, prev_group = 0;
37ce33d5
HH
451
452 pblk_gc_free_full_lines(pblk);
a4bd217b 453
b20ba1bc 454 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
d6b992f7 455 if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
b20ba1bc 456 return;
a4bd217b
JG
457
458next_gc_group:
459 group_list = l_mg->gc_lists[gc_group++];
b20ba1bc
JG
460
461 do {
462 spin_lock(&l_mg->gc_lock);
0586942f
HL
463
464 line = pblk_gc_get_victim_line(pblk, group_list);
465 if (!line) {
a4bd217b 466 spin_unlock(&l_mg->gc_lock);
b20ba1bc 467 break;
a4bd217b
JG
468 }
469
a4bd217b
JG
470 spin_lock(&line->lock);
471 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
472 line->state = PBLK_LINESTATE_GC;
f2937232
HH
473 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
474 line->state);
a4bd217b
JG
475 spin_unlock(&line->lock);
476
b20ba1bc
JG
477 list_del(&line->list);
478 spin_unlock(&l_mg->gc_lock);
479
480 spin_lock(&gc->r_lock);
481 list_add_tail(&line->list, &gc->r_list);
482 spin_unlock(&gc->r_lock);
483
d6b992f7 484 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
b20ba1bc 485 pblk_gc_reader_kick(gc);
a4bd217b 486
b20ba1bc 487 prev_group = 1;
a4bd217b 488
b20ba1bc
JG
489 /* No need to queue up more GC lines than we can handle */
490 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
d6b992f7 491 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
b20ba1bc
JG
492 break;
493 } while (1);
494
495 if (!prev_group && pblk->rl.rb_state > gc_group &&
496 gc_group < PBLK_GC_NR_LISTS)
a4bd217b
JG
497 goto next_gc_group;
498}
499
87c1d2d3 500static void pblk_gc_timer(struct timer_list *t)
a4bd217b 501{
87c1d2d3 502 struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
a4bd217b
JG
503
504 pblk_gc_kick(pblk);
505}
506
507static int pblk_gc_ts(void *data)
508{
509 struct pblk *pblk = data;
510
511 while (!kthread_should_stop()) {
512 pblk_gc_run(pblk);
513 set_current_state(TASK_INTERRUPTIBLE);
514 io_schedule();
515 }
516
517 return 0;
518}
519
520static int pblk_gc_writer_ts(void *data)
521{
522 struct pblk *pblk = data;
523
524 while (!kthread_should_stop()) {
525 if (!pblk_gc_write(pblk))
526 continue;
527 set_current_state(TASK_INTERRUPTIBLE);
528 io_schedule();
529 }
530
531 return 0;
532}
533
b20ba1bc 534static int pblk_gc_reader_ts(void *data)
a4bd217b 535{
b20ba1bc 536 struct pblk *pblk = data;
d6b992f7 537 struct pblk_gc *gc = &pblk->gc;
a4bd217b 538
b20ba1bc
JG
539 while (!kthread_should_stop()) {
540 if (!pblk_gc_read(pblk))
541 continue;
542 set_current_state(TASK_INTERRUPTIBLE);
543 io_schedule();
544 }
545
880eda54 546#ifdef CONFIG_NVM_PBLK_DEBUG
4e495a46 547 pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
d6b992f7
HH
548 atomic_read(&gc->pipeline_gc));
549#endif
550
551 do {
552 if (!atomic_read(&gc->pipeline_gc))
553 break;
554
555 schedule();
556 } while (1);
557
b20ba1bc 558 return 0;
a4bd217b
JG
559}
560
b20ba1bc 561static void pblk_gc_start(struct pblk *pblk)
a4bd217b 562{
b20ba1bc 563 pblk->gc.gc_active = 1;
4e495a46 564 pblk_debug(pblk, "gc start\n");
a4bd217b
JG
565}
566
b20ba1bc 567void pblk_gc_should_start(struct pblk *pblk)
a4bd217b
JG
568{
569 struct pblk_gc *gc = &pblk->gc;
570
3e3a5b8e 571 if (gc->gc_enabled && !gc->gc_active) {
a4bd217b 572 pblk_gc_start(pblk);
3e3a5b8e
HH
573 pblk_gc_kick(pblk);
574 }
a4bd217b
JG
575}
576
a4bd217b
JG
577void pblk_gc_should_stop(struct pblk *pblk)
578{
579 struct pblk_gc *gc = &pblk->gc;
580
581 if (gc->gc_active && !gc->gc_forced)
aed49e19 582 gc->gc_active = 0;
a4bd217b
JG
583}
584
03661b5f
HH
585void pblk_gc_should_kick(struct pblk *pblk)
586{
587 pblk_rl_update_rates(&pblk->rl);
588}
589
a4bd217b
JG
590void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
591 int *gc_active)
592{
593 struct pblk_gc *gc = &pblk->gc;
594
595 spin_lock(&gc->lock);
596 *gc_enabled = gc->gc_enabled;
597 *gc_active = gc->gc_active;
598 spin_unlock(&gc->lock);
599}
600
b20ba1bc 601int pblk_gc_sysfs_force(struct pblk *pblk, int force)
a4bd217b
JG
602{
603 struct pblk_gc *gc = &pblk->gc;
b20ba1bc
JG
604
605 if (force < 0 || force > 1)
606 return -EINVAL;
a4bd217b
JG
607
608 spin_lock(&gc->lock);
a4bd217b 609 gc->gc_forced = force;
b20ba1bc
JG
610
611 if (force)
612 gc->gc_enabled = 1;
613 else
614 gc->gc_enabled = 0;
a4bd217b 615 spin_unlock(&gc->lock);
b20ba1bc
JG
616
617 pblk_gc_should_start(pblk);
618
619 return 0;
a4bd217b
JG
620}
621
622int pblk_gc_init(struct pblk *pblk)
623{
624 struct pblk_gc *gc = &pblk->gc;
625 int ret;
626
627 gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
628 if (IS_ERR(gc->gc_ts)) {
4e495a46 629 pblk_err(pblk, "could not allocate GC main kthread\n");
a4bd217b
JG
630 return PTR_ERR(gc->gc_ts);
631 }
632
633 gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
634 "pblk-gc-writer-ts");
635 if (IS_ERR(gc->gc_writer_ts)) {
4e495a46 636 pblk_err(pblk, "could not allocate GC writer kthread\n");
a4bd217b
JG
637 ret = PTR_ERR(gc->gc_writer_ts);
638 goto fail_free_main_kthread;
639 }
640
b20ba1bc
JG
641 gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
642 "pblk-gc-reader-ts");
643 if (IS_ERR(gc->gc_reader_ts)) {
4e495a46 644 pblk_err(pblk, "could not allocate GC reader kthread\n");
b20ba1bc
JG
645 ret = PTR_ERR(gc->gc_reader_ts);
646 goto fail_free_writer_kthread;
647 }
648
87c1d2d3 649 timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
a4bd217b
JG
650 mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
651
652 gc->gc_active = 0;
653 gc->gc_forced = 0;
654 gc->gc_enabled = 1;
a4bd217b 655 gc->w_entries = 0;
d6b992f7
HH
656 atomic_set(&gc->read_inflight_gc, 0);
657 atomic_set(&gc->pipeline_gc, 0);
a4bd217b 658
b20ba1bc
JG
659 /* Workqueue that reads valid sectors from a line and submit them to the
660 * GC writer to be recycled.
661 */
662 gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
663 WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
664 if (!gc->gc_line_reader_wq) {
4e495a46 665 pblk_err(pblk, "could not allocate GC line reader workqueue\n");
b20ba1bc
JG
666 ret = -ENOMEM;
667 goto fail_free_reader_kthread;
668 }
669
670 /* Workqueue that prepare lines for GC */
671 gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
672 WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
a4bd217b 673 if (!gc->gc_reader_wq) {
4e495a46 674 pblk_err(pblk, "could not allocate GC reader workqueue\n");
a4bd217b 675 ret = -ENOMEM;
b20ba1bc 676 goto fail_free_reader_line_wq;
a4bd217b
JG
677 }
678
679 spin_lock_init(&gc->lock);
680 spin_lock_init(&gc->w_lock);
b20ba1bc
JG
681 spin_lock_init(&gc->r_lock);
682
3627896a 683 sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
b20ba1bc 684
a4bd217b 685 INIT_LIST_HEAD(&gc->w_list);
b20ba1bc 686 INIT_LIST_HEAD(&gc->r_list);
a4bd217b
JG
687
688 return 0;
689
b20ba1bc
JG
690fail_free_reader_line_wq:
691 destroy_workqueue(gc->gc_line_reader_wq);
692fail_free_reader_kthread:
693 kthread_stop(gc->gc_reader_ts);
a4bd217b
JG
694fail_free_writer_kthread:
695 kthread_stop(gc->gc_writer_ts);
503ec94e
DC
696fail_free_main_kthread:
697 kthread_stop(gc->gc_ts);
a4bd217b
JG
698
699 return ret;
700}
701
a7c9e910 702void pblk_gc_exit(struct pblk *pblk, bool graceful)
a4bd217b
JG
703{
704 struct pblk_gc *gc = &pblk->gc;
705
3e3a5b8e
HH
706 gc->gc_enabled = 0;
707 del_timer_sync(&gc->gc_timer);
aed49e19 708 gc->gc_active = 0;
a4bd217b
JG
709
710 if (gc->gc_ts)
711 kthread_stop(gc->gc_ts);
712
1edebacf
HH
713 if (gc->gc_reader_ts)
714 kthread_stop(gc->gc_reader_ts);
715
a7c9e910
JG
716 if (graceful) {
717 flush_workqueue(gc->gc_reader_wq);
718 flush_workqueue(gc->gc_line_reader_wq);
719 }
b20ba1bc 720
a7c9e910 721 destroy_workqueue(gc->gc_reader_wq);
5565b0ca 722 destroy_workqueue(gc->gc_line_reader_wq);
a4bd217b
JG
723
724 if (gc->gc_writer_ts)
725 kthread_stop(gc->gc_writer_ts);
726}