]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/lightnvm/pblk-write.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / drivers / lightnvm / pblk-write.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * pblk-write.c - pblk's write path from write buffer to media
17 */
18
19 #include "pblk.h"
20 #include "pblk-trace.h"
21
22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23 struct pblk_c_ctx *c_ctx)
24 {
25 struct bio *original_bio;
26 struct pblk_rb *rwb = &pblk->rwb;
27 unsigned long ret;
28 int i;
29
30 for (i = 0; i < c_ctx->nr_valid; i++) {
31 struct pblk_w_ctx *w_ctx;
32 int pos = c_ctx->sentry + i;
33 int flags;
34
35 w_ctx = pblk_rb_w_ctx(rwb, pos);
36 flags = READ_ONCE(w_ctx->flags);
37
38 if (flags & PBLK_FLUSH_ENTRY) {
39 flags &= ~PBLK_FLUSH_ENTRY;
40 /* Release flags on context. Protect from writes */
41 smp_store_release(&w_ctx->flags, flags);
42
43 #ifdef CONFIG_NVM_PBLK_DEBUG
44 atomic_dec(&rwb->inflight_flush_point);
45 #endif
46 }
47
48 while ((original_bio = bio_list_pop(&w_ctx->bios)))
49 bio_endio(original_bio);
50 }
51
52 if (c_ctx->nr_padded)
53 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
54 c_ctx->nr_padded);
55
56 #ifdef CONFIG_NVM_PBLK_DEBUG
57 atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
58 #endif
59
60 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
61
62 bio_put(rqd->bio);
63 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
64
65 return ret;
66 }
67
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
69 struct nvm_rq *rqd,
70 struct pblk_c_ctx *c_ctx)
71 {
72 list_del(&c_ctx->list);
73 return pblk_end_w_bio(pblk, rqd, c_ctx);
74 }
75
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77 struct pblk_c_ctx *c_ctx)
78 {
79 struct pblk_c_ctx *c, *r;
80 unsigned long flags;
81 unsigned long pos;
82
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
85 #endif
86 pblk_up_rq(pblk, c_ctx->lun_bitmap);
87
88 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89 if (pos == c_ctx->sentry) {
90 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
91
92 retry:
93 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94 rqd = nvm_rq_from_c_ctx(c);
95 if (c->sentry == pos) {
96 pos = pblk_end_queued_w_bio(pblk, rqd, c);
97 goto retry;
98 }
99 }
100 } else {
101 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102 list_add_tail(&c_ctx->list, &pblk->compl_list);
103 }
104 pblk_rb_sync_end(&pblk->rwb, &flags);
105 }
106
107 /* Map remaining sectors in chunk, starting from ppa */
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
109 int rqd_ppas)
110 {
111 struct pblk_line *line;
112 struct ppa_addr map_ppa = *ppa;
113 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
114 __le64 *lba_list;
115 u64 paddr;
116 int done = 0;
117 int n = 0;
118
119 line = pblk_ppa_to_line(pblk, *ppa);
120 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
121
122 spin_lock(&line->lock);
123
124 while (!done) {
125 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
126
127 if (!test_and_set_bit(paddr, line->map_bitmap))
128 line->left_msecs--;
129
130 if (n < rqd_ppas && lba_list[paddr] != addr_empty)
131 line->nr_valid_lbas--;
132
133 lba_list[paddr] = addr_empty;
134
135 if (!test_and_set_bit(paddr, line->invalid_bitmap))
136 le32_add_cpu(line->vsc, -1);
137
138 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
139
140 n++;
141 }
142
143 line->w_err_gc->has_write_err = 1;
144 spin_unlock(&line->lock);
145 }
146
147 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
148 unsigned int nr_entries)
149 {
150 struct pblk_rb *rb = &pblk->rwb;
151 struct pblk_rb_entry *entry;
152 struct pblk_line *line;
153 struct pblk_w_ctx *w_ctx;
154 struct ppa_addr ppa_l2p;
155 int flags;
156 unsigned int i;
157
158 spin_lock(&pblk->trans_lock);
159 for (i = 0; i < nr_entries; i++) {
160 entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
161 w_ctx = &entry->w_ctx;
162
163 /* Check if the lba has been overwritten */
164 if (w_ctx->lba != ADDR_EMPTY) {
165 ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
166 if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
167 w_ctx->lba = ADDR_EMPTY;
168 }
169
170 /* Mark up the entry as submittable again */
171 flags = READ_ONCE(w_ctx->flags);
172 flags |= PBLK_WRITTEN_DATA;
173 /* Release flags on write context. Protect from writes */
174 smp_store_release(&w_ctx->flags, flags);
175
176 /* Decrease the reference count to the line as we will
177 * re-map these entries
178 */
179 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
180 kref_put(&line->ref, pblk_line_put);
181 }
182 spin_unlock(&pblk->trans_lock);
183 }
184
185 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
186 {
187 struct pblk_c_ctx *r_ctx;
188
189 r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
190 if (!r_ctx)
191 return;
192
193 r_ctx->lun_bitmap = NULL;
194 r_ctx->sentry = c_ctx->sentry;
195 r_ctx->nr_valid = c_ctx->nr_valid;
196 r_ctx->nr_padded = c_ctx->nr_padded;
197
198 spin_lock(&pblk->resubmit_lock);
199 list_add_tail(&r_ctx->list, &pblk->resubmit_list);
200 spin_unlock(&pblk->resubmit_lock);
201
202 #ifdef CONFIG_NVM_PBLK_DEBUG
203 atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
204 #endif
205 }
206
207 static void pblk_submit_rec(struct work_struct *work)
208 {
209 struct pblk_rec_ctx *recovery =
210 container_of(work, struct pblk_rec_ctx, ws_rec);
211 struct pblk *pblk = recovery->pblk;
212 struct nvm_rq *rqd = recovery->rqd;
213 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
214 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
215
216 pblk_log_write_err(pblk, rqd);
217
218 pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
219 pblk_queue_resubmit(pblk, c_ctx);
220
221 pblk_up_rq(pblk, c_ctx->lun_bitmap);
222 if (c_ctx->nr_padded)
223 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
224 c_ctx->nr_padded);
225 bio_put(rqd->bio);
226 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
227 mempool_free(recovery, &pblk->rec_pool);
228
229 atomic_dec(&pblk->inflight_io);
230 }
231
232
233 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
234 {
235 struct pblk_rec_ctx *recovery;
236
237 recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
238 if (!recovery) {
239 pblk_err(pblk, "could not allocate recovery work\n");
240 return;
241 }
242
243 recovery->pblk = pblk;
244 recovery->rqd = rqd;
245
246 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
247 queue_work(pblk->close_wq, &recovery->ws_rec);
248 }
249
250 static void pblk_end_io_write(struct nvm_rq *rqd)
251 {
252 struct pblk *pblk = rqd->private;
253 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
254
255 if (rqd->error) {
256 pblk_end_w_fail(pblk, rqd);
257 return;
258 } else {
259 if (trace_pblk_chunk_state_enabled())
260 pblk_check_chunk_state_update(pblk, rqd);
261 #ifdef CONFIG_NVM_PBLK_DEBUG
262 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
263 #endif
264 }
265
266 pblk_complete_write(pblk, rqd, c_ctx);
267 atomic_dec(&pblk->inflight_io);
268 }
269
270 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
271 {
272 struct pblk *pblk = rqd->private;
273 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
274 struct pblk_line *line = m_ctx->private;
275 struct pblk_emeta *emeta = line->emeta;
276 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
277 int sync;
278
279 pblk_up_chunk(pblk, ppa_list[0]);
280
281 if (rqd->error) {
282 pblk_log_write_err(pblk, rqd);
283 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
284 line->w_err_gc->has_write_err = 1;
285 } else {
286 if (trace_pblk_chunk_state_enabled())
287 pblk_check_chunk_state_update(pblk, rqd);
288 }
289
290 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
291 if (sync == emeta->nr_entries)
292 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
293 GFP_ATOMIC, pblk->close_wq);
294
295 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
296
297 atomic_dec(&pblk->inflight_io);
298 }
299
300 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
301 unsigned int nr_secs, nvm_end_io_fn(*end_io))
302 {
303 /* Setup write request */
304 rqd->opcode = NVM_OP_PWRITE;
305 rqd->nr_ppas = nr_secs;
306 rqd->is_seq = 1;
307 rqd->private = pblk;
308 rqd->end_io = end_io;
309
310 return pblk_alloc_rqd_meta(pblk, rqd);
311 }
312
313 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
314 struct ppa_addr *erase_ppa)
315 {
316 struct pblk_line_meta *lm = &pblk->lm;
317 struct pblk_line *e_line = pblk_line_get_erase(pblk);
318 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
319 unsigned int valid = c_ctx->nr_valid;
320 unsigned int padded = c_ctx->nr_padded;
321 unsigned int nr_secs = valid + padded;
322 unsigned long *lun_bitmap;
323 int ret;
324
325 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
326 if (!lun_bitmap)
327 return -ENOMEM;
328 c_ctx->lun_bitmap = lun_bitmap;
329
330 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
331 if (ret) {
332 kfree(lun_bitmap);
333 return ret;
334 }
335
336 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
337 ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
338 valid, 0);
339 else
340 ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
341 valid, erase_ppa);
342
343 return ret;
344 }
345
346 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
347 unsigned int secs_to_flush)
348 {
349 int secs_to_sync;
350
351 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
352
353 #ifdef CONFIG_NVM_PBLK_DEBUG
354 if ((!secs_to_sync && secs_to_flush)
355 || (secs_to_sync < 0)
356 || (secs_to_sync > secs_avail && !secs_to_flush)) {
357 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
358 secs_avail, secs_to_sync, secs_to_flush);
359 }
360 #endif
361
362 return secs_to_sync;
363 }
364
365 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
366 {
367 struct nvm_tgt_dev *dev = pblk->dev;
368 struct nvm_geo *geo = &dev->geo;
369 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
370 struct pblk_line_meta *lm = &pblk->lm;
371 struct pblk_emeta *emeta = meta_line->emeta;
372 struct ppa_addr *ppa_list;
373 struct pblk_g_ctx *m_ctx;
374 struct bio *bio;
375 struct nvm_rq *rqd;
376 void *data;
377 u64 paddr;
378 int rq_ppas = pblk->min_write_pgs;
379 int id = meta_line->id;
380 int rq_len;
381 int i, j;
382 int ret;
383
384 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
385
386 m_ctx = nvm_rq_to_pdu(rqd);
387 m_ctx->private = meta_line;
388
389 rq_len = rq_ppas * geo->csecs;
390 data = ((void *)emeta->buf) + emeta->mem;
391
392 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
393 l_mg->emeta_alloc_type, GFP_KERNEL);
394 if (IS_ERR(bio)) {
395 pblk_err(pblk, "failed to map emeta io");
396 ret = PTR_ERR(bio);
397 goto fail_free_rqd;
398 }
399 bio->bi_iter.bi_sector = 0; /* internal bio */
400 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
401 rqd->bio = bio;
402
403 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
404 if (ret)
405 goto fail_free_bio;
406
407 ppa_list = nvm_rq_to_ppa_list(rqd);
408 for (i = 0; i < rqd->nr_ppas; ) {
409 spin_lock(&meta_line->lock);
410 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
411 spin_unlock(&meta_line->lock);
412 for (j = 0; j < rq_ppas; j++, i++, paddr++)
413 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
414 }
415
416 spin_lock(&l_mg->close_lock);
417 emeta->mem += rq_len;
418 if (emeta->mem >= lm->emeta_len[0])
419 list_del(&meta_line->list);
420 spin_unlock(&l_mg->close_lock);
421
422 pblk_down_chunk(pblk, ppa_list[0]);
423
424 ret = pblk_submit_io(pblk, rqd);
425 if (ret) {
426 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
427 goto fail_rollback;
428 }
429
430 return NVM_IO_OK;
431
432 fail_rollback:
433 pblk_up_chunk(pblk, ppa_list[0]);
434 spin_lock(&l_mg->close_lock);
435 pblk_dealloc_page(pblk, meta_line, rq_ppas);
436 list_add(&meta_line->list, &meta_line->list);
437 spin_unlock(&l_mg->close_lock);
438 fail_free_bio:
439 bio_put(bio);
440 fail_free_rqd:
441 pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
442 return ret;
443 }
444
445 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
446 struct pblk_line *meta_line,
447 struct nvm_rq *data_rqd)
448 {
449 struct nvm_tgt_dev *dev = pblk->dev;
450 struct nvm_geo *geo = &dev->geo;
451 struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
452 struct pblk_line *data_line = pblk_line_get_data(pblk);
453 struct ppa_addr ppa, ppa_opt;
454 u64 paddr;
455 int pos_opt;
456
457 /* Schedule a metadata I/O that is half the distance from the data I/O
458 * with regards to the number of LUNs forming the pblk instance. This
459 * balances LUN conflicts across every I/O.
460 *
461 * When the LUN configuration changes (e.g., due to GC), this distance
462 * can align, which would result on metadata and data I/Os colliding. In
463 * this case, modify the distance to not be optimal, but move the
464 * optimal in the right direction.
465 */
466 paddr = pblk_lookup_page(pblk, meta_line);
467 ppa = addr_to_gen_ppa(pblk, paddr, 0);
468 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
469 pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
470
471 if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
472 test_bit(pos_opt, data_line->blk_bitmap))
473 return true;
474
475 if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
476 data_line->meta_distance--;
477
478 return false;
479 }
480
481 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
482 struct nvm_rq *data_rqd)
483 {
484 struct pblk_line_meta *lm = &pblk->lm;
485 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
486 struct pblk_line *meta_line;
487
488 spin_lock(&l_mg->close_lock);
489 if (list_empty(&l_mg->emeta_list)) {
490 spin_unlock(&l_mg->close_lock);
491 return NULL;
492 }
493 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
494 if (meta_line->emeta->mem >= lm->emeta_len[0]) {
495 spin_unlock(&l_mg->close_lock);
496 return NULL;
497 }
498 spin_unlock(&l_mg->close_lock);
499
500 if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
501 return NULL;
502
503 return meta_line;
504 }
505
506 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
507 {
508 struct ppa_addr erase_ppa;
509 struct pblk_line *meta_line;
510 int err;
511
512 pblk_ppa_set_empty(&erase_ppa);
513
514 /* Assign lbas to ppas and populate request structure */
515 err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
516 if (err) {
517 pblk_err(pblk, "could not setup write request: %d\n", err);
518 return NVM_IO_ERR;
519 }
520
521 meta_line = pblk_should_submit_meta_io(pblk, rqd);
522
523 /* Submit data write for current data line */
524 err = pblk_submit_io(pblk, rqd);
525 if (err) {
526 pblk_err(pblk, "data I/O submission failed: %d\n", err);
527 return NVM_IO_ERR;
528 }
529
530 if (!pblk_ppa_empty(erase_ppa)) {
531 /* Submit erase for next data line */
532 if (pblk_blk_erase_async(pblk, erase_ppa)) {
533 struct pblk_line *e_line = pblk_line_get_erase(pblk);
534 struct nvm_tgt_dev *dev = pblk->dev;
535 struct nvm_geo *geo = &dev->geo;
536 int bit;
537
538 atomic_inc(&e_line->left_eblks);
539 bit = pblk_ppa_to_pos(geo, erase_ppa);
540 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
541 }
542 }
543
544 if (meta_line) {
545 /* Submit metadata write for previous data line */
546 err = pblk_submit_meta_io(pblk, meta_line);
547 if (err) {
548 pblk_err(pblk, "metadata I/O submission failed: %d",
549 err);
550 return NVM_IO_ERR;
551 }
552 }
553
554 return NVM_IO_OK;
555 }
556
557 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
558 {
559 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
560 struct bio *bio = rqd->bio;
561
562 if (c_ctx->nr_padded)
563 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
564 c_ctx->nr_padded);
565 }
566
567 static int pblk_submit_write(struct pblk *pblk, int *secs_left)
568 {
569 struct bio *bio;
570 struct nvm_rq *rqd;
571 unsigned int secs_avail, secs_to_sync, secs_to_com;
572 unsigned int secs_to_flush, packed_meta_pgs;
573 unsigned long pos;
574 unsigned int resubmit;
575
576 *secs_left = 0;
577
578 spin_lock(&pblk->resubmit_lock);
579 resubmit = !list_empty(&pblk->resubmit_list);
580 spin_unlock(&pblk->resubmit_lock);
581
582 /* Resubmit failed writes first */
583 if (resubmit) {
584 struct pblk_c_ctx *r_ctx;
585
586 spin_lock(&pblk->resubmit_lock);
587 r_ctx = list_first_entry(&pblk->resubmit_list,
588 struct pblk_c_ctx, list);
589 list_del(&r_ctx->list);
590 spin_unlock(&pblk->resubmit_lock);
591
592 secs_avail = r_ctx->nr_valid;
593 pos = r_ctx->sentry;
594
595 pblk_prepare_resubmit(pblk, pos, secs_avail);
596 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
597 secs_avail);
598
599 kfree(r_ctx);
600 } else {
601 /* If there are no sectors in the cache,
602 * flushes (bios without data) will be cleared on
603 * the cache threads
604 */
605 secs_avail = pblk_rb_read_count(&pblk->rwb);
606 if (!secs_avail)
607 return 0;
608
609 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
610 if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
611 return 0;
612
613 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
614 secs_to_flush);
615 if (secs_to_sync > pblk->max_write_pgs) {
616 pblk_err(pblk, "bad buffer sync calculation\n");
617 return 0;
618 }
619
620 secs_to_com = (secs_to_sync > secs_avail) ?
621 secs_avail : secs_to_sync;
622 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
623 }
624
625 packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
626 bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
627
628 bio->bi_iter.bi_sector = 0; /* internal bio */
629 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
630
631 rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
632 rqd->bio = bio;
633
634 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
635 secs_avail)) {
636 pblk_err(pblk, "corrupted write bio\n");
637 goto fail_put_bio;
638 }
639
640 if (pblk_submit_io_set(pblk, rqd))
641 goto fail_free_bio;
642
643 #ifdef CONFIG_NVM_PBLK_DEBUG
644 atomic_long_add(secs_to_sync, &pblk->sub_writes);
645 #endif
646
647 *secs_left = 1;
648 return 0;
649
650 fail_free_bio:
651 pblk_free_write_rqd(pblk, rqd);
652 fail_put_bio:
653 bio_put(bio);
654 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
655
656 return -EINTR;
657 }
658
659 int pblk_write_ts(void *data)
660 {
661 struct pblk *pblk = data;
662 int secs_left;
663 int write_failure = 0;
664
665 while (!kthread_should_stop()) {
666 if (!write_failure) {
667 write_failure = pblk_submit_write(pblk, &secs_left);
668
669 if (secs_left)
670 continue;
671 }
672 set_current_state(TASK_INTERRUPTIBLE);
673 io_schedule();
674 }
675
676 return 0;
677 }