]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/lightnvm/pblk-write.c
lightnvm: pblk: fail gracefully on irrec. error
[mirror_ubuntu-bionic-kernel.git] / drivers / lightnvm / pblk-write.c
CommitLineData
a4bd217b
JG
1/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-write.c - pblk's write path from write buffer to media
16 */
17
18#include "pblk.h"
19
a4bd217b
JG
20static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21 struct pblk_c_ctx *c_ctx)
22{
23 struct nvm_tgt_dev *dev = pblk->dev;
24 struct bio *original_bio;
25 unsigned long ret;
26 int i;
27
28 for (i = 0; i < c_ctx->nr_valid; i++) {
29 struct pblk_w_ctx *w_ctx;
a4bd217b
JG
30
31 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
a4bd217b
JG
32 while ((original_bio = bio_list_pop(&w_ctx->bios)))
33 bio_endio(original_bio);
34 }
35
36#ifdef CONFIG_NVM_DEBUG
0880a9aa 37 atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
a4bd217b
JG
38#endif
39
40 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
41
42 if (rqd->meta_list)
43 nvm_dev_dma_free(dev->parent, rqd->meta_list,
44 rqd->dma_meta_list);
45
46 bio_put(rqd->bio);
47 pblk_free_rqd(pblk, rqd, WRITE);
48
49 return ret;
50}
51
52static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
53 struct nvm_rq *rqd,
54 struct pblk_c_ctx *c_ctx)
55{
56 list_del(&c_ctx->list);
57 return pblk_end_w_bio(pblk, rqd, c_ctx);
58}
59
60static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
61 struct pblk_c_ctx *c_ctx)
62{
63 struct pblk_c_ctx *c, *r;
64 unsigned long flags;
65 unsigned long pos;
66
67#ifdef CONFIG_NVM_DEBUG
68 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
69#endif
70
71 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
72
73 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
74 if (pos == c_ctx->sentry) {
75 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
76
77retry:
78 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
79 rqd = nvm_rq_from_c_ctx(c);
80 if (c->sentry == pos) {
81 pos = pblk_end_queued_w_bio(pblk, rqd, c);
82 goto retry;
83 }
84 }
85 } else {
86 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
87 list_add_tail(&c_ctx->list, &pblk->compl_list);
88 }
89 pblk_rb_sync_end(&pblk->rwb, &flags);
90}
91
92/* When a write fails, we are not sure whether the block has grown bad or a page
93 * range is more susceptible to write errors. If a high number of pages fail, we
94 * assume that the block is bad and we mark it accordingly. In all cases, we
95 * remap and resubmit the failed entries as fast as possible; if a flush is
96 * waiting on a completion, the whole stack would stall otherwise.
97 */
98static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
99{
100 void *comp_bits = &rqd->ppa_status;
101 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
102 struct pblk_rec_ctx *recovery;
103 struct ppa_addr *ppa_list = rqd->ppa_list;
104 int nr_ppas = rqd->nr_ppas;
105 unsigned int c_entries;
106 int bit, ret;
107
108 if (unlikely(nr_ppas == 1))
109 ppa_list = &rqd->ppa_addr;
110
111 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
112 if (!recovery) {
113 pr_err("pblk: could not allocate recovery context\n");
114 return;
115 }
116 INIT_LIST_HEAD(&recovery->failed);
117
118 bit = -1;
119 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
120 struct pblk_rb_entry *entry;
121 struct ppa_addr ppa;
122
123 /* Logic error */
124 if (bit > c_ctx->nr_valid) {
2a79efd8 125 WARN_ONCE(1, "pblk: corrupted write request\n");
33db9fd4 126 mempool_free(recovery, pblk->rec_pool);
a4bd217b
JG
127 goto out;
128 }
129
130 ppa = ppa_list[bit];
131 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
132 if (!entry) {
133 pr_err("pblk: could not scan entry on write failure\n");
33db9fd4 134 mempool_free(recovery, pblk->rec_pool);
a4bd217b
JG
135 goto out;
136 }
137
138 /* The list is filled first and emptied afterwards. No need for
139 * protecting it with a lock
140 */
141 list_add_tail(&entry->index, &recovery->failed);
142 }
143
144 c_entries = find_first_bit(comp_bits, nr_ppas);
145 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
146 if (ret) {
147 pr_err("pblk: could not recover from write failure\n");
33db9fd4 148 mempool_free(recovery, pblk->rec_pool);
a4bd217b
JG
149 goto out;
150 }
151
152 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
ef576494 153 queue_work(pblk->close_wq, &recovery->ws_rec);
a4bd217b
JG
154
155out:
156 pblk_complete_write(pblk, rqd, c_ctx);
157}
158
159static void pblk_end_io_write(struct nvm_rq *rqd)
160{
161 struct pblk *pblk = rqd->private;
162 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
163
164 if (rqd->error) {
165 pblk_log_write_err(pblk, rqd);
166 return pblk_end_w_fail(pblk, rqd);
167 }
168#ifdef CONFIG_NVM_DEBUG
169 else
4e4cbee9 170 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
a4bd217b
JG
171#endif
172
173 pblk_complete_write(pblk, rqd, c_ctx);
588726d3 174 atomic_dec(&pblk->inflight_io);
a4bd217b
JG
175}
176
dd2a4343
JG
177static void pblk_end_io_write_meta(struct nvm_rq *rqd)
178{
179 struct pblk *pblk = rqd->private;
180 struct nvm_tgt_dev *dev = pblk->dev;
181 struct nvm_geo *geo = &dev->geo;
182 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
183 struct pblk_line *line = m_ctx->private;
184 struct pblk_emeta *emeta = line->emeta;
185 int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
186 struct pblk_lun *rlun = &pblk->luns[pos];
187 int sync;
188
189 up(&rlun->wr_sem);
190
191 if (rqd->error) {
192 pblk_log_write_err(pblk, rqd);
193 pr_err("pblk: metadata I/O failed\n");
194 }
195#ifdef CONFIG_NVM_DEBUG
196 else
197 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
198#endif
199
200 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
201 if (sync == emeta->nr_entries)
ef576494
JG
202 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
203 pblk->close_wq);
dd2a4343
JG
204
205 bio_put(rqd->bio);
206 pblk_free_rqd(pblk, rqd, READ);
588726d3
JG
207
208 atomic_dec(&pblk->inflight_io);
dd2a4343
JG
209}
210
a4bd217b 211static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
dd2a4343
JG
212 unsigned int nr_secs,
213 nvm_end_io_fn(*end_io))
a4bd217b
JG
214{
215 struct nvm_tgt_dev *dev = pblk->dev;
216
217 /* Setup write request */
218 rqd->opcode = NVM_OP_PWRITE;
219 rqd->nr_ppas = nr_secs;
220 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
221 rqd->private = pblk;
dd2a4343 222 rqd->end_io = end_io;
a4bd217b
JG
223
224 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
225 &rqd->dma_meta_list);
226 if (!rqd->meta_list)
227 return -ENOMEM;
228
229 if (unlikely(nr_secs == 1))
230 return 0;
231
232 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
233 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
234
235 return 0;
236}
237
238static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
d624f371 239 struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
a4bd217b
JG
240{
241 struct pblk_line_meta *lm = &pblk->lm;
d624f371 242 struct pblk_line *e_line = pblk_line_get_erase(pblk);
a4bd217b
JG
243 unsigned int valid = c_ctx->nr_valid;
244 unsigned int padded = c_ctx->nr_padded;
245 unsigned int nr_secs = valid + padded;
246 unsigned long *lun_bitmap;
247 int ret = 0;
248
249 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
d624f371
JG
250 if (!lun_bitmap)
251 return -ENOMEM;
a4bd217b
JG
252 c_ctx->lun_bitmap = lun_bitmap;
253
dd2a4343 254 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
a4bd217b
JG
255 if (ret) {
256 kfree(lun_bitmap);
d624f371 257 return ret;
a4bd217b
JG
258 }
259
588726d3 260 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
a4bd217b
JG
261 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
262 else
263 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
d624f371 264 valid, erase_ppa);
a4bd217b 265
d624f371 266 return 0;
a4bd217b
JG
267}
268
269int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
270 struct pblk_c_ctx *c_ctx)
271{
272 struct pblk_line_meta *lm = &pblk->lm;
273 unsigned long *lun_bitmap;
274 int ret;
275
276 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
277 if (!lun_bitmap)
278 return -ENOMEM;
279
280 c_ctx->lun_bitmap = lun_bitmap;
281
dd2a4343 282 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
a4bd217b
JG
283 if (ret)
284 return ret;
285
286 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
287
288 rqd->ppa_status = (u64)0;
289 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
290
291 return ret;
292}
293
294static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
295 unsigned int secs_to_flush)
296{
297 int secs_to_sync;
298
299 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
300
301#ifdef CONFIG_NVM_DEBUG
302 if ((!secs_to_sync && secs_to_flush)
303 || (secs_to_sync < 0)
304 || (secs_to_sync > secs_avail && !secs_to_flush)) {
305 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
306 secs_avail, secs_to_sync, secs_to_flush);
307 }
308#endif
309
310 return secs_to_sync;
311}
312
dd2a4343
JG
313static inline int pblk_valid_meta_ppa(struct pblk *pblk,
314 struct pblk_line *meta_line,
315 struct ppa_addr *ppa_list, int nr_ppas)
316{
317 struct nvm_tgt_dev *dev = pblk->dev;
318 struct nvm_geo *geo = &dev->geo;
319 struct pblk_line *data_line;
320 struct ppa_addr ppa, ppa_opt;
321 u64 paddr;
322 int i;
323
324 data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
325 paddr = pblk_lookup_page(pblk, meta_line);
326 ppa = addr_to_gen_ppa(pblk, paddr, 0);
327
328 if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
329 return 1;
330
331 /* Schedule a metadata I/O that is half the distance from the data I/O
332 * with regards to the number of LUNs forming the pblk instance. This
333 * balances LUN conflicts across every I/O.
334 *
335 * When the LUN configuration changes (e.g., due to GC), this distance
336 * can align, which would result on a LUN deadlock. In this case, modify
337 * the distance to not be optimal, but allow metadata I/Os to succeed.
338 */
339 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
340 if (unlikely(ppa_opt.ppa == ppa.ppa)) {
341 data_line->meta_distance--;
342 return 0;
343 }
344
345 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
346 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
347 ppa_list[i].g.lun == ppa_opt.g.lun)
348 return 1;
349
350 if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
351 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
352 if (ppa_list[i].g.ch == ppa.g.ch &&
353 ppa_list[i].g.lun == ppa.g.lun)
354 return 0;
355
356 return 1;
357 }
358
359 return 0;
360}
361
362int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
363{
364 struct nvm_tgt_dev *dev = pblk->dev;
365 struct nvm_geo *geo = &dev->geo;
366 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
367 struct pblk_line_meta *lm = &pblk->lm;
368 struct pblk_emeta *emeta = meta_line->emeta;
369 struct pblk_g_ctx *m_ctx;
370 struct pblk_lun *rlun;
371 struct bio *bio;
372 struct nvm_rq *rqd;
373 void *data;
374 u64 paddr;
375 int rq_ppas = pblk->min_write_pgs;
376 int id = meta_line->id;
377 int rq_len;
378 int i, j;
379 int ret;
380
381 rqd = pblk_alloc_rqd(pblk, READ);
382 if (IS_ERR(rqd)) {
383 pr_err("pblk: cannot allocate write req.\n");
384 return PTR_ERR(rqd);
385 }
386 m_ctx = nvm_rq_to_pdu(rqd);
387 m_ctx->private = meta_line;
388
389 rq_len = rq_ppas * geo->sec_size;
390 data = ((void *)emeta->buf) + emeta->mem;
391
392 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, GFP_KERNEL);
393 if (IS_ERR(bio)) {
394 ret = PTR_ERR(bio);
395 goto fail_free_rqd;
396 }
397 bio->bi_iter.bi_sector = 0; /* internal bio */
398 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
399 rqd->bio = bio;
400
401 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
402 if (ret)
403 goto fail_free_bio;
404
405 for (i = 0; i < rqd->nr_ppas; ) {
406 spin_lock(&meta_line->lock);
407 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
408 spin_unlock(&meta_line->lock);
409 for (j = 0; j < rq_ppas; j++, i++, paddr++)
410 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
411 }
412
413 rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
414 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
415 if (ret) {
416 pr_err("pblk: lun semaphore timed out (%d)\n", ret);
417 goto fail_free_bio;
418 }
419
420 emeta->mem += rq_len;
421 if (emeta->mem >= lm->emeta_len[0]) {
422 spin_lock(&l_mg->close_lock);
423 list_del(&meta_line->list);
424 WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
425 "pblk: corrupt meta line %d\n", meta_line->id);
426 spin_unlock(&l_mg->close_lock);
427 }
428
429 ret = pblk_submit_io(pblk, rqd);
430 if (ret) {
431 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
432 goto fail_rollback;
433 }
434
435 return NVM_IO_OK;
436
437fail_rollback:
438 spin_lock(&l_mg->close_lock);
439 pblk_dealloc_page(pblk, meta_line, rq_ppas);
440 list_add(&meta_line->list, &meta_line->list);
441 spin_unlock(&l_mg->close_lock);
442fail_free_bio:
f680f19a
JG
443 if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
444 bio_put(bio);
dd2a4343
JG
445fail_free_rqd:
446 pblk_free_rqd(pblk, rqd, READ);
447 return ret;
448}
449
450static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
451 int prev_n)
452{
453 struct pblk_line_meta *lm = &pblk->lm;
454 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
455 struct pblk_line *meta_line;
456
457 spin_lock(&l_mg->close_lock);
458retry:
459 if (list_empty(&l_mg->emeta_list)) {
460 spin_unlock(&l_mg->close_lock);
461 return 0;
462 }
463 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
464 if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
465 goto retry;
466 spin_unlock(&l_mg->close_lock);
467
468 if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
469 return 0;
470
471 return pblk_submit_meta_io(pblk, meta_line);
472}
473
d624f371
JG
474static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
475{
476 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
477 struct ppa_addr erase_ppa;
478 int err;
479
480 ppa_set_empty(&erase_ppa);
481
482 /* Assign lbas to ppas and populate request structure */
483 err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
484 if (err) {
485 pr_err("pblk: could not setup write request: %d\n", err);
486 return NVM_IO_ERR;
487 }
488
dd2a4343
JG
489 if (likely(ppa_empty(erase_ppa))) {
490 /* Submit metadata write for previous data line */
491 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
492 if (err) {
493 pr_err("pblk: metadata I/O submission failed: %d", err);
494 return NVM_IO_ERR;
495 }
496
497 /* Submit data write for current data line */
498 err = pblk_submit_io(pblk, rqd);
499 if (err) {
500 pr_err("pblk: data I/O submission failed: %d\n", err);
501 return NVM_IO_ERR;
502 }
503 } else {
504 /* Submit data write for current data line */
505 err = pblk_submit_io(pblk, rqd);
506 if (err) {
507 pr_err("pblk: data I/O submission failed: %d\n", err);
508 return NVM_IO_ERR;
509 }
d624f371 510
dd2a4343
JG
511 /* Submit available erase for next data line */
512 if (pblk_blk_erase_async(pblk, erase_ppa)) {
513 struct pblk_line *e_line = pblk_line_get_erase(pblk);
514 struct nvm_tgt_dev *dev = pblk->dev;
515 struct nvm_geo *geo = &dev->geo;
516 int bit;
517
518 atomic_inc(&e_line->left_eblks);
519 bit = pblk_ppa_to_pos(geo, erase_ppa);
520 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
521 }
d624f371
JG
522 }
523
524 return NVM_IO_OK;
525}
526
527static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
528{
529 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
530 struct bio *bio = rqd->bio;
531
532 if (c_ctx->nr_padded)
533 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
534}
535
a4bd217b
JG
536static int pblk_submit_write(struct pblk *pblk)
537{
538 struct bio *bio;
539 struct nvm_rq *rqd;
a4bd217b
JG
540 unsigned int secs_avail, secs_to_sync, secs_to_com;
541 unsigned int secs_to_flush;
542 unsigned long pos;
a4bd217b
JG
543
544 /* If there are no sectors in the cache, flushes (bios without data)
545 * will be cleared on the cache threads
546 */
547 secs_avail = pblk_rb_read_count(&pblk->rwb);
548 if (!secs_avail)
549 return 1;
550
551 secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
552 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
553 return 1;
554
555 rqd = pblk_alloc_rqd(pblk, WRITE);
556 if (IS_ERR(rqd)) {
557 pr_err("pblk: cannot allocate write req.\n");
558 return 1;
559 }
a4bd217b
JG
560
561 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
562 if (!bio) {
563 pr_err("pblk: cannot allocate write bio\n");
564 goto fail_free_rqd;
565 }
566 bio->bi_iter.bi_sector = 0; /* internal bio */
567 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
568 rqd->bio = bio;
569
570 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
571 if (secs_to_sync > pblk->max_write_pgs) {
572 pr_err("pblk: bad buffer sync calculation\n");
573 goto fail_put_bio;
574 }
575
576 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
577 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
578
d624f371
JG
579 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
580 secs_avail)) {
a4bd217b
JG
581 pr_err("pblk: corrupted write bio\n");
582 goto fail_put_bio;
583 }
584
d624f371 585 if (pblk_submit_io_set(pblk, rqd))
a4bd217b 586 goto fail_free_bio;
a4bd217b
JG
587
588#ifdef CONFIG_NVM_DEBUG
589 atomic_long_add(secs_to_sync, &pblk->sub_writes);
590#endif
591
592 return 0;
593
594fail_free_bio:
d624f371 595 pblk_free_write_rqd(pblk, rqd);
a4bd217b
JG
596fail_put_bio:
597 bio_put(bio);
598fail_free_rqd:
599 pblk_free_rqd(pblk, rqd, WRITE);
600
601 return 1;
602}
603
604int pblk_write_ts(void *data)
605{
606 struct pblk *pblk = data;
607
608 while (!kthread_should_stop()) {
609 if (!pblk_submit_write(pblk))
610 continue;
611 set_current_state(TASK_INTERRUPTIBLE);
612 io_schedule();
613 }
614
615 return 0;
616}