]>
git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/lightnvm/pblk-rb.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Based upon the circular ringbuffer.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-rb.c - pblk's write buffer
19 #include <linux/circ_buf.h>
23 static DECLARE_RWSEM(pblk_rb_lock
);
25 void pblk_rb_data_free(struct pblk_rb
*rb
)
27 struct pblk_rb_pages
*p
, *t
;
29 down_write(&pblk_rb_lock
);
30 list_for_each_entry_safe(p
, t
, &rb
->pages
, list
) {
31 free_pages((unsigned long)page_address(p
->pages
), p
->order
);
35 up_write(&pblk_rb_lock
);
39 * Initialize ring buffer. The data and metadata buffers must be previously
40 * allocated and their size must be a power of two
41 * (Documentation/circular-buffers.txt)
43 int pblk_rb_init(struct pblk_rb
*rb
, struct pblk_rb_entry
*rb_entry_base
,
44 unsigned int power_size
, unsigned int power_seg_sz
)
46 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
47 unsigned int init_entry
= 0;
48 unsigned int alloc_order
= power_size
;
49 unsigned int max_order
= MAX_ORDER
- 1;
50 unsigned int order
, iter
;
52 down_write(&pblk_rb_lock
);
53 rb
->entries
= rb_entry_base
;
54 rb
->seg_size
= (1 << power_seg_sz
);
55 rb
->nr_entries
= (1 << power_size
);
56 rb
->mem
= rb
->subm
= rb
->sync
= rb
->l2p_update
= 0;
57 rb
->flush_point
= EMPTY_ENTRY
;
59 spin_lock_init(&rb
->w_lock
);
60 spin_lock_init(&rb
->s_lock
);
62 INIT_LIST_HEAD(&rb
->pages
);
64 if (alloc_order
>= max_order
) {
66 iter
= (1 << (alloc_order
- max_order
));
73 struct pblk_rb_entry
*entry
;
74 struct pblk_rb_pages
*page_set
;
76 unsigned long set_size
;
79 page_set
= kmalloc(sizeof(struct pblk_rb_pages
), GFP_KERNEL
);
81 up_write(&pblk_rb_lock
);
85 page_set
->order
= order
;
86 page_set
->pages
= alloc_pages(GFP_KERNEL
, order
);
87 if (!page_set
->pages
) {
89 pblk_rb_data_free(rb
);
90 up_write(&pblk_rb_lock
);
93 kaddr
= page_address(page_set
->pages
);
95 entry
= &rb
->entries
[init_entry
];
97 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
98 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
100 set_size
= (1 << order
);
101 for (i
= 1; i
< set_size
; i
++) {
102 entry
= &rb
->entries
[init_entry
];
103 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
104 entry
->data
= kaddr
+ (i
* rb
->seg_size
);
105 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
106 bio_list_init(&entry
->w_ctx
.bios
);
109 list_add_tail(&page_set
->list
, &rb
->pages
);
112 up_write(&pblk_rb_lock
);
114 #ifdef CONFIG_NVM_DEBUG
115 atomic_set(&rb
->inflight_flush_point
, 0);
119 * Initialize rate-limiter, which controls access to the write buffer
120 * but user and GC I/O
122 pblk_rl_init(&pblk
->rl
, rb
->nr_entries
);
128 * pblk_rb_calculate_size -- calculate the size of the write buffer
130 unsigned int pblk_rb_calculate_size(unsigned int nr_entries
)
132 /* Alloc a write buffer that can at least fit 128 entries */
133 return (1 << max(get_count_order(nr_entries
), 7));
136 void *pblk_rb_entries_ref(struct pblk_rb
*rb
)
141 static void clean_wctx(struct pblk_w_ctx
*w_ctx
)
146 flags
= READ_ONCE(w_ctx
->flags
);
147 if (!(flags
& PBLK_SUBMITTED_ENTRY
))
150 /* Release flags on context. Protect from writes and reads */
151 smp_store_release(&w_ctx
->flags
, PBLK_WRITABLE_ENTRY
);
152 pblk_ppa_set_empty(&w_ctx
->ppa
);
153 w_ctx
->lba
= ADDR_EMPTY
;
156 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
157 #define pblk_rb_ring_space(rb, head, tail, size) \
158 (CIRC_SPACE(head, tail, size))
161 * Buffer space is calculated with respect to the back pointer signaling
162 * synchronized entries to the media.
164 static unsigned int pblk_rb_space(struct pblk_rb
*rb
)
166 unsigned int mem
= READ_ONCE(rb
->mem
);
167 unsigned int sync
= READ_ONCE(rb
->sync
);
169 return pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
);
173 * Buffer count is calculated with respect to the submission entry signaling the
174 * entries that are available to send to the media
176 unsigned int pblk_rb_read_count(struct pblk_rb
*rb
)
178 unsigned int mem
= READ_ONCE(rb
->mem
);
179 unsigned int subm
= READ_ONCE(rb
->subm
);
181 return pblk_rb_ring_count(mem
, subm
, rb
->nr_entries
);
184 unsigned int pblk_rb_sync_count(struct pblk_rb
*rb
)
186 unsigned int mem
= READ_ONCE(rb
->mem
);
187 unsigned int sync
= READ_ONCE(rb
->sync
);
189 return pblk_rb_ring_count(mem
, sync
, rb
->nr_entries
);
192 unsigned int pblk_rb_read_commit(struct pblk_rb
*rb
, unsigned int nr_entries
)
196 subm
= READ_ONCE(rb
->subm
);
197 /* Commit read means updating submission pointer */
198 smp_store_release(&rb
->subm
,
199 (subm
+ nr_entries
) & (rb
->nr_entries
- 1));
204 static int __pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int to_update
)
206 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
207 struct pblk_line
*line
;
208 struct pblk_rb_entry
*entry
;
209 struct pblk_w_ctx
*w_ctx
;
210 unsigned int user_io
= 0, gc_io
= 0;
214 for (i
= 0; i
< to_update
; i
++) {
215 entry
= &rb
->entries
[rb
->l2p_update
];
216 w_ctx
= &entry
->w_ctx
;
218 flags
= READ_ONCE(entry
->w_ctx
.flags
);
219 if (flags
& PBLK_IOTYPE_USER
)
221 else if (flags
& PBLK_IOTYPE_GC
)
224 WARN(1, "pblk: unknown IO type\n");
226 pblk_update_map_dev(pblk
, w_ctx
->lba
, w_ctx
->ppa
,
229 line
= &pblk
->lines
[pblk_ppa_to_line(w_ctx
->ppa
)];
230 kref_put(&line
->ref
, pblk_line_put
);
232 rb
->l2p_update
= (rb
->l2p_update
+ 1) & (rb
->nr_entries
- 1);
235 pblk_rl_out(&pblk
->rl
, user_io
, gc_io
);
241 * When we move the l2p_update pointer, we update the l2p table - lookups will
242 * point to the physical address instead of to the cacheline in the write buffer
243 * from this moment on.
245 static int pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int nr_entries
,
246 unsigned int mem
, unsigned int sync
)
248 unsigned int space
, count
;
251 lockdep_assert_held(&rb
->w_lock
);
253 /* Update l2p only as buffer entries are being overwritten */
254 space
= pblk_rb_ring_space(rb
, mem
, rb
->l2p_update
, rb
->nr_entries
);
255 if (space
> nr_entries
)
258 count
= nr_entries
- space
;
259 /* l2p_update used exclusively under rb->w_lock */
260 ret
= __pblk_rb_update_l2p(rb
, count
);
267 * Update the l2p entry for all sectors stored on the write buffer. This means
268 * that all future lookups to the l2p table will point to a device address, not
269 * to the cacheline in the write buffer.
271 void pblk_rb_sync_l2p(struct pblk_rb
*rb
)
274 unsigned int to_update
;
276 spin_lock(&rb
->w_lock
);
278 /* Protect from reads and writes */
279 sync
= smp_load_acquire(&rb
->sync
);
281 to_update
= pblk_rb_ring_count(sync
, rb
->l2p_update
, rb
->nr_entries
);
282 __pblk_rb_update_l2p(rb
, to_update
);
284 spin_unlock(&rb
->w_lock
);
288 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
289 * Typically, 4KB data chunks coming from a bio will be copied to the ring
290 * buffer, thus the write will fail if not all incoming data can be copied.
293 static void __pblk_rb_write_entry(struct pblk_rb
*rb
, void *data
,
294 struct pblk_w_ctx w_ctx
,
295 struct pblk_rb_entry
*entry
)
297 memcpy(entry
->data
, data
, rb
->seg_size
);
299 entry
->w_ctx
.lba
= w_ctx
.lba
;
300 entry
->w_ctx
.ppa
= w_ctx
.ppa
;
303 void pblk_rb_write_entry_user(struct pblk_rb
*rb
, void *data
,
304 struct pblk_w_ctx w_ctx
, unsigned int ring_pos
)
306 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
307 struct pblk_rb_entry
*entry
;
310 entry
= &rb
->entries
[ring_pos
];
311 flags
= READ_ONCE(entry
->w_ctx
.flags
);
312 #ifdef CONFIG_NVM_DEBUG
313 /* Caller must guarantee that the entry is free */
314 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
317 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
319 pblk_update_map_cache(pblk
, w_ctx
.lba
, entry
->cacheline
);
320 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
322 /* Release flags on write context. Protect from writes */
323 smp_store_release(&entry
->w_ctx
.flags
, flags
);
326 void pblk_rb_write_entry_gc(struct pblk_rb
*rb
, void *data
,
327 struct pblk_w_ctx w_ctx
, struct pblk_line
*line
,
328 u64 paddr
, unsigned int ring_pos
)
330 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
331 struct pblk_rb_entry
*entry
;
334 entry
= &rb
->entries
[ring_pos
];
335 flags
= READ_ONCE(entry
->w_ctx
.flags
);
336 #ifdef CONFIG_NVM_DEBUG
337 /* Caller must guarantee that the entry is free */
338 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
341 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
343 if (!pblk_update_map_gc(pblk
, w_ctx
.lba
, entry
->cacheline
, line
, paddr
))
344 entry
->w_ctx
.lba
= ADDR_EMPTY
;
346 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
348 /* Release flags on write context. Protect from writes */
349 smp_store_release(&entry
->w_ctx
.flags
, flags
);
352 static int pblk_rb_flush_point_set(struct pblk_rb
*rb
, struct bio
*bio
,
355 struct pblk_rb_entry
*entry
;
356 unsigned int sync
, flush_point
;
358 pblk_rb_sync_init(rb
, NULL
);
359 sync
= READ_ONCE(rb
->sync
);
362 pblk_rb_sync_end(rb
, NULL
);
366 #ifdef CONFIG_NVM_DEBUG
367 atomic_inc(&rb
->inflight_flush_point
);
370 flush_point
= (pos
== 0) ? (rb
->nr_entries
- 1) : (pos
- 1);
371 entry
= &rb
->entries
[flush_point
];
373 /* Protect flush points */
374 smp_store_release(&rb
->flush_point
, flush_point
);
377 bio_list_add(&entry
->w_ctx
.bios
, bio
);
379 pblk_rb_sync_end(rb
, NULL
);
384 static int __pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
390 sync
= READ_ONCE(rb
->sync
);
391 mem
= READ_ONCE(rb
->mem
);
393 if (pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
) < nr_entries
)
396 if (pblk_rb_update_l2p(rb
, nr_entries
, mem
, sync
))
404 static int pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
407 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
410 /* Protect from read count */
411 smp_store_release(&rb
->mem
, (*pos
+ nr_entries
) & (rb
->nr_entries
- 1));
415 void pblk_rb_flush(struct pblk_rb
*rb
)
417 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
418 unsigned int mem
= READ_ONCE(rb
->mem
);
420 if (pblk_rb_flush_point_set(rb
, NULL
, mem
))
423 pblk_write_should_kick(pblk
);
426 static int pblk_rb_may_write_flush(struct pblk_rb
*rb
, unsigned int nr_entries
,
427 unsigned int *pos
, struct bio
*bio
,
432 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
435 mem
= (*pos
+ nr_entries
) & (rb
->nr_entries
- 1);
436 *io_ret
= NVM_IO_DONE
;
438 if (bio
->bi_opf
& REQ_PREFLUSH
) {
439 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
441 atomic64_inc(&pblk
->nr_flush
);
442 if (pblk_rb_flush_point_set(&pblk
->rwb
, bio
, mem
))
446 /* Protect from read count */
447 smp_store_release(&rb
->mem
, mem
);
453 * Atomically check that (i) there is space on the write buffer for the
454 * incoming I/O, and (ii) the current I/O type has enough budget in the write
455 * buffer (rate-limiter).
457 int pblk_rb_may_write_user(struct pblk_rb
*rb
, struct bio
*bio
,
458 unsigned int nr_entries
, unsigned int *pos
)
460 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
463 spin_lock(&rb
->w_lock
);
464 io_ret
= pblk_rl_user_may_insert(&pblk
->rl
, nr_entries
);
466 spin_unlock(&rb
->w_lock
);
470 if (!pblk_rb_may_write_flush(rb
, nr_entries
, pos
, bio
, &io_ret
)) {
471 spin_unlock(&rb
->w_lock
);
472 return NVM_IO_REQUEUE
;
475 pblk_rl_user_in(&pblk
->rl
, nr_entries
);
476 spin_unlock(&rb
->w_lock
);
482 * Look at pblk_rb_may_write_user comment
484 int pblk_rb_may_write_gc(struct pblk_rb
*rb
, unsigned int nr_entries
,
487 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
489 spin_lock(&rb
->w_lock
);
490 if (!pblk_rl_gc_may_insert(&pblk
->rl
, nr_entries
)) {
491 spin_unlock(&rb
->w_lock
);
495 if (!pblk_rb_may_write(rb
, nr_entries
, pos
)) {
496 spin_unlock(&rb
->w_lock
);
500 pblk_rl_gc_in(&pblk
->rl
, nr_entries
);
501 spin_unlock(&rb
->w_lock
);
507 * The caller of this function must ensure that the backpointer will not
508 * overwrite the entries passed on the list.
510 unsigned int pblk_rb_read_to_bio_list(struct pblk_rb
*rb
, struct bio
*bio
,
511 struct list_head
*list
,
514 struct pblk_rb_entry
*entry
, *tentry
;
516 unsigned int read
= 0;
519 list_for_each_entry_safe(entry
, tentry
, list
, index
) {
521 pr_err("pblk: too many entries on list\n");
525 page
= virt_to_page(entry
->data
);
527 pr_err("pblk: could not allocate write bio page\n");
531 ret
= bio_add_page(bio
, page
, rb
->seg_size
, 0);
532 if (ret
!= rb
->seg_size
) {
533 pr_err("pblk: could not add page to write bio\n");
537 list_del(&entry
->index
);
546 * Read available entries on rb and add them to the given bio. To avoid a memory
547 * copy, a page reference to the write buffer is used to be added to the bio.
549 * This function is used by the write thread to form the write bio that will
550 * persist data on the write buffer to the media.
552 unsigned int pblk_rb_read_to_bio(struct pblk_rb
*rb
, struct nvm_rq
*rqd
,
553 unsigned int pos
, unsigned int nr_entries
,
556 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
557 struct request_queue
*q
= pblk
->dev
->q
;
558 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
559 struct bio
*bio
= rqd
->bio
;
560 struct pblk_rb_entry
*entry
;
562 unsigned int pad
= 0, to_read
= nr_entries
;
566 if (count
< nr_entries
) {
567 pad
= nr_entries
- count
;
572 c_ctx
->nr_valid
= to_read
;
573 c_ctx
->nr_padded
= pad
;
575 for (i
= 0; i
< to_read
; i
++) {
576 entry
= &rb
->entries
[pos
];
578 /* A write has been allowed into the buffer, but data is still
579 * being copied to it. It is ok to busy wait.
582 flags
= READ_ONCE(entry
->w_ctx
.flags
);
583 if (!(flags
& PBLK_WRITTEN_DATA
)) {
588 page
= virt_to_page(entry
->data
);
590 pr_err("pblk: could not allocate write bio page\n");
591 flags
&= ~PBLK_WRITTEN_DATA
;
592 flags
|= PBLK_SUBMITTED_ENTRY
;
593 /* Release flags on context. Protect from writes */
594 smp_store_release(&entry
->w_ctx
.flags
, flags
);
598 if (bio_add_pc_page(q
, bio
, page
, rb
->seg_size
, 0) !=
600 pr_err("pblk: could not add page to write bio\n");
601 flags
&= ~PBLK_WRITTEN_DATA
;
602 flags
|= PBLK_SUBMITTED_ENTRY
;
603 /* Release flags on context. Protect from writes */
604 smp_store_release(&entry
->w_ctx
.flags
, flags
);
608 flags
&= ~PBLK_WRITTEN_DATA
;
609 flags
|= PBLK_SUBMITTED_ENTRY
;
611 /* Release flags on context. Protect from writes */
612 smp_store_release(&entry
->w_ctx
.flags
, flags
);
614 pos
= (pos
+ 1) & (rb
->nr_entries
- 1);
618 if (pblk_bio_add_pages(pblk
, bio
, GFP_KERNEL
, pad
)) {
619 pr_err("pblk: could not pad page in write bio\n");
623 if (pad
< pblk
->min_write_pgs
)
624 atomic64_inc(&pblk
->pad_dist
[pad
- 1]);
626 pr_warn("pblk: padding more than min. sectors\n");
628 atomic64_add(pad
, &pblk
->pad_wa
);
631 #ifdef CONFIG_NVM_DEBUG
632 atomic_long_add(pad
, &pblk
->padded_writes
);
639 * Copy to bio only if the lba matches the one on the given cache entry.
640 * Otherwise, it means that the entry has been overwritten, and the bio should
641 * be directed to disk.
643 int pblk_rb_copy_to_bio(struct pblk_rb
*rb
, struct bio
*bio
, sector_t lba
,
644 struct ppa_addr ppa
, int bio_iter
, bool advanced_bio
)
646 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
647 struct pblk_rb_entry
*entry
;
648 struct pblk_w_ctx
*w_ctx
;
649 struct ppa_addr l2p_ppa
;
650 u64 pos
= pblk_addr_to_cacheline(ppa
);
656 #ifdef CONFIG_NVM_DEBUG
657 /* Caller must ensure that the access will not cause an overflow */
658 BUG_ON(pos
>= rb
->nr_entries
);
660 entry
= &rb
->entries
[pos
];
661 w_ctx
= &entry
->w_ctx
;
662 flags
= READ_ONCE(w_ctx
->flags
);
664 spin_lock(&rb
->w_lock
);
665 spin_lock(&pblk
->trans_lock
);
666 l2p_ppa
= pblk_trans_map_get(pblk
, lba
);
667 spin_unlock(&pblk
->trans_lock
);
669 /* Check if the entry has been overwritten or is scheduled to be */
670 if (!pblk_ppa_comp(l2p_ppa
, ppa
) || w_ctx
->lba
!= lba
||
671 flags
& PBLK_WRITABLE_ENTRY
) {
676 /* Only advance the bio if it hasn't been advanced already. If advanced,
677 * this bio is at least a partial bio (i.e., it has partially been
678 * filled with data from the cache). If part of the data resides on the
679 * media, we will read later on
681 if (unlikely(!advanced_bio
))
682 bio_advance(bio
, bio_iter
* PBLK_EXPOSED_PAGE_SIZE
);
684 data
= bio_data(bio
);
685 memcpy(data
, entry
->data
, rb
->seg_size
);
688 spin_unlock(&rb
->w_lock
);
692 struct pblk_w_ctx
*pblk_rb_w_ctx(struct pblk_rb
*rb
, unsigned int pos
)
694 unsigned int entry
= pos
& (rb
->nr_entries
- 1);
696 return &rb
->entries
[entry
].w_ctx
;
699 unsigned int pblk_rb_sync_init(struct pblk_rb
*rb
, unsigned long *flags
)
700 __acquires(&rb
->s_lock
)
703 spin_lock_irqsave(&rb
->s_lock
, *flags
);
705 spin_lock_irq(&rb
->s_lock
);
710 void pblk_rb_sync_end(struct pblk_rb
*rb
, unsigned long *flags
)
711 __releases(&rb
->s_lock
)
713 lockdep_assert_held(&rb
->s_lock
);
716 spin_unlock_irqrestore(&rb
->s_lock
, *flags
);
718 spin_unlock_irq(&rb
->s_lock
);
721 unsigned int pblk_rb_sync_advance(struct pblk_rb
*rb
, unsigned int nr_entries
)
723 unsigned int sync
, flush_point
;
724 lockdep_assert_held(&rb
->s_lock
);
726 sync
= READ_ONCE(rb
->sync
);
727 flush_point
= READ_ONCE(rb
->flush_point
);
729 if (flush_point
!= EMPTY_ENTRY
) {
730 unsigned int secs_to_flush
;
732 secs_to_flush
= pblk_rb_ring_count(flush_point
, sync
,
734 if (secs_to_flush
< nr_entries
) {
735 /* Protect flush points */
736 smp_store_release(&rb
->flush_point
, EMPTY_ENTRY
);
740 sync
= (sync
+ nr_entries
) & (rb
->nr_entries
- 1);
742 /* Protect from counts */
743 smp_store_release(&rb
->sync
, sync
);
748 /* Calculate how many sectors to submit up to the current flush point. */
749 unsigned int pblk_rb_flush_point_count(struct pblk_rb
*rb
)
751 unsigned int subm
, sync
, flush_point
;
752 unsigned int submitted
, to_flush
;
754 /* Protect flush points */
755 flush_point
= smp_load_acquire(&rb
->flush_point
);
756 if (flush_point
== EMPTY_ENTRY
)
760 sync
= smp_load_acquire(&rb
->sync
);
762 subm
= READ_ONCE(rb
->subm
);
763 submitted
= pblk_rb_ring_count(subm
, sync
, rb
->nr_entries
);
765 /* The sync point itself counts as a sector to sync */
766 to_flush
= pblk_rb_ring_count(flush_point
, sync
, rb
->nr_entries
) + 1;
768 return (submitted
< to_flush
) ? (to_flush
- submitted
) : 0;
772 * Scan from the current position of the sync pointer to find the entry that
773 * corresponds to the given ppa. This is necessary since write requests can be
774 * completed out of order. The assumption is that the ppa is close to the sync
775 * pointer thus the search will not take long.
777 * The caller of this function must guarantee that the sync pointer will no
778 * reach the entry while it is using the metadata associated with it. With this
779 * assumption in mind, there is no need to take the sync lock.
781 struct pblk_rb_entry
*pblk_rb_sync_scan_entry(struct pblk_rb
*rb
,
782 struct ppa_addr
*ppa
)
784 unsigned int sync
, subm
, count
;
787 sync
= READ_ONCE(rb
->sync
);
788 subm
= READ_ONCE(rb
->subm
);
789 count
= pblk_rb_ring_count(subm
, sync
, rb
->nr_entries
);
791 for (i
= 0; i
< count
; i
++)
792 sync
= (sync
+ 1) & (rb
->nr_entries
- 1);
797 int pblk_rb_tear_down_check(struct pblk_rb
*rb
)
799 struct pblk_rb_entry
*entry
;
803 spin_lock(&rb
->w_lock
);
804 spin_lock_irq(&rb
->s_lock
);
806 if ((rb
->mem
== rb
->subm
) && (rb
->subm
== rb
->sync
) &&
807 (rb
->sync
== rb
->l2p_update
) &&
808 (rb
->flush_point
== EMPTY_ENTRY
)) {
817 for (i
= 0; i
< rb
->nr_entries
; i
++) {
818 entry
= &rb
->entries
[i
];
827 spin_unlock(&rb
->w_lock
);
828 spin_unlock_irq(&rb
->s_lock
);
833 unsigned int pblk_rb_wrap_pos(struct pblk_rb
*rb
, unsigned int pos
)
835 return (pos
& (rb
->nr_entries
- 1));
838 int pblk_rb_pos_oob(struct pblk_rb
*rb
, u64 pos
)
840 return (pos
>= rb
->nr_entries
);
843 ssize_t
pblk_rb_sysfs(struct pblk_rb
*rb
, char *buf
)
845 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
846 struct pblk_c_ctx
*c
;
848 int queued_entries
= 0;
850 spin_lock_irq(&rb
->s_lock
);
851 list_for_each_entry(c
, &pblk
->compl_list
, list
)
853 spin_unlock_irq(&rb
->s_lock
);
855 if (rb
->flush_point
!= EMPTY_ENTRY
)
856 offset
= scnprintf(buf
, PAGE_SIZE
,
857 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
863 #ifdef CONFIG_NVM_DEBUG
864 atomic_read(&rb
->inflight_flush_point
),
869 pblk_rb_read_count(rb
),
871 pblk_rb_flush_point_count(rb
),
874 offset
= scnprintf(buf
, PAGE_SIZE
,
875 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
881 #ifdef CONFIG_NVM_DEBUG
882 atomic_read(&rb
->inflight_flush_point
),
886 pblk_rb_read_count(rb
),
888 pblk_rb_flush_point_count(rb
),