]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/lightnvm/pblk-rb.c
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Based upon the circular ringbuffer.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-rb.c - pblk's write buffer
19 #include <linux/circ_buf.h>
23 static DECLARE_RWSEM(pblk_rb_lock
);
25 void pblk_rb_data_free(struct pblk_rb
*rb
)
27 struct pblk_rb_pages
*p
, *t
;
29 down_write(&pblk_rb_lock
);
30 list_for_each_entry_safe(p
, t
, &rb
->pages
, list
) {
31 free_pages((unsigned long)page_address(p
->pages
), p
->order
);
35 up_write(&pblk_rb_lock
);
39 * Initialize ring buffer. The data and metadata buffers must be previously
40 * allocated and their size must be a power of two
41 * (Documentation/circular-buffers.txt)
43 int pblk_rb_init(struct pblk_rb
*rb
, struct pblk_rb_entry
*rb_entry_base
,
44 unsigned int power_size
, unsigned int power_seg_sz
)
46 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
47 unsigned int init_entry
= 0;
48 unsigned int alloc_order
= power_size
;
49 unsigned int max_order
= MAX_ORDER
- 1;
50 unsigned int order
, iter
;
52 down_write(&pblk_rb_lock
);
53 rb
->entries
= rb_entry_base
;
54 rb
->seg_size
= (1 << power_seg_sz
);
55 rb
->nr_entries
= (1 << power_size
);
56 rb
->mem
= rb
->subm
= rb
->sync
= rb
->l2p_update
= 0;
57 rb
->sync_point
= EMPTY_ENTRY
;
59 spin_lock_init(&rb
->w_lock
);
60 spin_lock_init(&rb
->s_lock
);
62 INIT_LIST_HEAD(&rb
->pages
);
64 if (alloc_order
>= max_order
) {
66 iter
= (1 << (alloc_order
- max_order
));
73 struct pblk_rb_entry
*entry
;
74 struct pblk_rb_pages
*page_set
;
76 unsigned long set_size
;
79 page_set
= kmalloc(sizeof(struct pblk_rb_pages
), GFP_KERNEL
);
81 up_write(&pblk_rb_lock
);
85 page_set
->order
= order
;
86 page_set
->pages
= alloc_pages(GFP_KERNEL
, order
);
87 if (!page_set
->pages
) {
89 pblk_rb_data_free(rb
);
90 up_write(&pblk_rb_lock
);
93 kaddr
= page_address(page_set
->pages
);
95 entry
= &rb
->entries
[init_entry
];
97 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
98 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
100 set_size
= (1 << order
);
101 for (i
= 1; i
< set_size
; i
++) {
102 entry
= &rb
->entries
[init_entry
];
103 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
104 entry
->data
= kaddr
+ (i
* rb
->seg_size
);
105 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
106 bio_list_init(&entry
->w_ctx
.bios
);
109 list_add_tail(&page_set
->list
, &rb
->pages
);
112 up_write(&pblk_rb_lock
);
114 #ifdef CONFIG_NVM_DEBUG
115 atomic_set(&rb
->inflight_sync_point
, 0);
119 * Initialize rate-limiter, which controls access to the write buffer
120 * but user and GC I/O
122 pblk_rl_init(&pblk
->rl
, rb
->nr_entries
);
128 * pblk_rb_calculate_size -- calculate the size of the write buffer
130 unsigned int pblk_rb_calculate_size(unsigned int nr_entries
)
132 /* Alloc a write buffer that can at least fit 128 entries */
133 return (1 << max(get_count_order(nr_entries
), 7));
136 void *pblk_rb_entries_ref(struct pblk_rb
*rb
)
141 static void clean_wctx(struct pblk_w_ctx
*w_ctx
)
146 flags
= READ_ONCE(w_ctx
->flags
);
147 if (!(flags
& PBLK_SUBMITTED_ENTRY
))
150 /* Release flags on context. Protect from writes and reads */
151 smp_store_release(&w_ctx
->flags
, PBLK_WRITABLE_ENTRY
);
152 pblk_ppa_set_empty(&w_ctx
->ppa
);
153 w_ctx
->lba
= ADDR_EMPTY
;
156 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
157 #define pblk_rb_ring_space(rb, head, tail, size) \
158 (CIRC_SPACE(head, tail, size))
161 * Buffer space is calculated with respect to the back pointer signaling
162 * synchronized entries to the media.
164 static unsigned int pblk_rb_space(struct pblk_rb
*rb
)
166 unsigned int mem
= READ_ONCE(rb
->mem
);
167 unsigned int sync
= READ_ONCE(rb
->sync
);
169 return pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
);
173 * Buffer count is calculated with respect to the submission entry signaling the
174 * entries that are available to send to the media
176 unsigned int pblk_rb_read_count(struct pblk_rb
*rb
)
178 unsigned int mem
= READ_ONCE(rb
->mem
);
179 unsigned int subm
= READ_ONCE(rb
->subm
);
181 return pblk_rb_ring_count(mem
, subm
, rb
->nr_entries
);
184 unsigned int pblk_rb_sync_count(struct pblk_rb
*rb
)
186 unsigned int mem
= READ_ONCE(rb
->mem
);
187 unsigned int sync
= READ_ONCE(rb
->sync
);
189 return pblk_rb_ring_count(mem
, sync
, rb
->nr_entries
);
192 unsigned int pblk_rb_read_commit(struct pblk_rb
*rb
, unsigned int nr_entries
)
196 subm
= READ_ONCE(rb
->subm
);
197 /* Commit read means updating submission pointer */
198 smp_store_release(&rb
->subm
,
199 (subm
+ nr_entries
) & (rb
->nr_entries
- 1));
204 static int __pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int *l2p_upd
,
205 unsigned int to_update
)
207 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
208 struct pblk_line
*line
;
209 struct pblk_rb_entry
*entry
;
210 struct pblk_w_ctx
*w_ctx
;
211 unsigned int user_io
= 0, gc_io
= 0;
215 for (i
= 0; i
< to_update
; i
++) {
216 entry
= &rb
->entries
[*l2p_upd
];
217 w_ctx
= &entry
->w_ctx
;
219 flags
= READ_ONCE(entry
->w_ctx
.flags
);
220 if (flags
& PBLK_IOTYPE_USER
)
222 else if (flags
& PBLK_IOTYPE_GC
)
225 WARN(1, "pblk: unknown IO type\n");
227 pblk_update_map_dev(pblk
, w_ctx
->lba
, w_ctx
->ppa
,
230 line
= &pblk
->lines
[pblk_tgt_ppa_to_line(w_ctx
->ppa
)];
231 kref_put(&line
->ref
, pblk_line_put
);
233 *l2p_upd
= (*l2p_upd
+ 1) & (rb
->nr_entries
- 1);
236 pblk_rl_out(&pblk
->rl
, user_io
, gc_io
);
242 * When we move the l2p_update pointer, we update the l2p table - lookups will
243 * point to the physical address instead of to the cacheline in the write buffer
244 * from this moment on.
246 static int pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int nr_entries
,
247 unsigned int mem
, unsigned int sync
)
249 unsigned int space
, count
;
252 lockdep_assert_held(&rb
->w_lock
);
254 /* Update l2p only as buffer entries are being overwritten */
255 space
= pblk_rb_ring_space(rb
, mem
, rb
->l2p_update
, rb
->nr_entries
);
256 if (space
> nr_entries
)
259 count
= nr_entries
- space
;
260 /* l2p_update used exclusively under rb->w_lock */
261 ret
= __pblk_rb_update_l2p(rb
, &rb
->l2p_update
, count
);
268 * Update the l2p entry for all sectors stored on the write buffer. This means
269 * that all future lookups to the l2p table will point to a device address, not
270 * to the cacheline in the write buffer.
272 void pblk_rb_sync_l2p(struct pblk_rb
*rb
)
275 unsigned int to_update
;
277 spin_lock(&rb
->w_lock
);
279 /* Protect from reads and writes */
280 sync
= smp_load_acquire(&rb
->sync
);
282 to_update
= pblk_rb_ring_count(sync
, rb
->l2p_update
, rb
->nr_entries
);
283 __pblk_rb_update_l2p(rb
, &rb
->l2p_update
, to_update
);
285 spin_unlock(&rb
->w_lock
);
289 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
290 * Typically, 4KB data chunks coming from a bio will be copied to the ring
291 * buffer, thus the write will fail if not all incoming data can be copied.
294 static void __pblk_rb_write_entry(struct pblk_rb
*rb
, void *data
,
295 struct pblk_w_ctx w_ctx
,
296 struct pblk_rb_entry
*entry
)
298 memcpy(entry
->data
, data
, rb
->seg_size
);
300 entry
->w_ctx
.lba
= w_ctx
.lba
;
301 entry
->w_ctx
.ppa
= w_ctx
.ppa
;
304 void pblk_rb_write_entry_user(struct pblk_rb
*rb
, void *data
,
305 struct pblk_w_ctx w_ctx
, unsigned int ring_pos
)
307 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
308 struct pblk_rb_entry
*entry
;
311 entry
= &rb
->entries
[ring_pos
];
312 flags
= READ_ONCE(entry
->w_ctx
.flags
);
313 #ifdef CONFIG_NVM_DEBUG
314 /* Caller must guarantee that the entry is free */
315 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
318 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
320 pblk_update_map_cache(pblk
, w_ctx
.lba
, entry
->cacheline
);
321 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
323 /* Release flags on write context. Protect from writes */
324 smp_store_release(&entry
->w_ctx
.flags
, flags
);
327 void pblk_rb_write_entry_gc(struct pblk_rb
*rb
, void *data
,
328 struct pblk_w_ctx w_ctx
, struct pblk_line
*gc_line
,
329 unsigned int ring_pos
)
331 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
332 struct pblk_rb_entry
*entry
;
335 entry
= &rb
->entries
[ring_pos
];
336 flags
= READ_ONCE(entry
->w_ctx
.flags
);
337 #ifdef CONFIG_NVM_DEBUG
338 /* Caller must guarantee that the entry is free */
339 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
342 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
344 if (!pblk_update_map_gc(pblk
, w_ctx
.lba
, entry
->cacheline
, gc_line
))
345 entry
->w_ctx
.lba
= ADDR_EMPTY
;
347 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
349 /* Release flags on write context. Protect from writes */
350 smp_store_release(&entry
->w_ctx
.flags
, flags
);
353 static int pblk_rb_sync_point_set(struct pblk_rb
*rb
, struct bio
*bio
,
356 struct pblk_rb_entry
*entry
;
357 unsigned int subm
, sync_point
;
360 subm
= READ_ONCE(rb
->subm
);
362 #ifdef CONFIG_NVM_DEBUG
363 atomic_inc(&rb
->inflight_sync_point
);
369 sync_point
= (pos
== 0) ? (rb
->nr_entries
- 1) : (pos
- 1);
370 entry
= &rb
->entries
[sync_point
];
372 flags
= READ_ONCE(entry
->w_ctx
.flags
);
373 flags
|= PBLK_FLUSH_ENTRY
;
375 /* Release flags on context. Protect from writes */
376 smp_store_release(&entry
->w_ctx
.flags
, flags
);
379 smp_store_release(&rb
->sync_point
, sync_point
);
384 spin_lock_irq(&rb
->s_lock
);
385 bio_list_add(&entry
->w_ctx
.bios
, bio
);
386 spin_unlock_irq(&rb
->s_lock
);
391 static int __pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
397 sync
= READ_ONCE(rb
->sync
);
398 mem
= READ_ONCE(rb
->mem
);
400 if (pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
) < nr_entries
)
403 if (pblk_rb_update_l2p(rb
, nr_entries
, mem
, sync
))
411 static int pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
414 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
417 /* Protect from read count */
418 smp_store_release(&rb
->mem
, (*pos
+ nr_entries
) & (rb
->nr_entries
- 1));
422 void pblk_rb_flush(struct pblk_rb
*rb
)
424 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
425 unsigned int mem
= READ_ONCE(rb
->mem
);
427 if (pblk_rb_sync_point_set(rb
, NULL
, mem
))
430 pblk_write_should_kick(pblk
);
433 static int pblk_rb_may_write_flush(struct pblk_rb
*rb
, unsigned int nr_entries
,
434 unsigned int *pos
, struct bio
*bio
,
439 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
442 mem
= (*pos
+ nr_entries
) & (rb
->nr_entries
- 1);
443 *io_ret
= NVM_IO_DONE
;
445 if (bio
->bi_opf
& REQ_PREFLUSH
) {
446 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
448 #ifdef CONFIG_NVM_DEBUG
449 atomic_long_inc(&pblk
->nr_flush
);
451 if (pblk_rb_sync_point_set(&pblk
->rwb
, bio
, mem
))
455 /* Protect from read count */
456 smp_store_release(&rb
->mem
, mem
);
461 * Atomically check that (i) there is space on the write buffer for the
462 * incoming I/O, and (ii) the current I/O type has enough budget in the write
463 * buffer (rate-limiter).
465 int pblk_rb_may_write_user(struct pblk_rb
*rb
, struct bio
*bio
,
466 unsigned int nr_entries
, unsigned int *pos
)
468 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
471 spin_lock(&rb
->w_lock
);
472 io_ret
= pblk_rl_user_may_insert(&pblk
->rl
, nr_entries
);
474 spin_unlock(&rb
->w_lock
);
478 if (!pblk_rb_may_write_flush(rb
, nr_entries
, pos
, bio
, &io_ret
)) {
479 spin_unlock(&rb
->w_lock
);
480 return NVM_IO_REQUEUE
;
483 pblk_rl_user_in(&pblk
->rl
, nr_entries
);
484 spin_unlock(&rb
->w_lock
);
490 * Look at pblk_rb_may_write_user comment
492 int pblk_rb_may_write_gc(struct pblk_rb
*rb
, unsigned int nr_entries
,
495 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
497 spin_lock(&rb
->w_lock
);
498 if (!pblk_rl_gc_may_insert(&pblk
->rl
, nr_entries
)) {
499 spin_unlock(&rb
->w_lock
);
503 if (!pblk_rb_may_write(rb
, nr_entries
, pos
)) {
504 spin_unlock(&rb
->w_lock
);
508 pblk_rl_gc_in(&pblk
->rl
, nr_entries
);
509 spin_unlock(&rb
->w_lock
);
515 * The caller of this function must ensure that the backpointer will not
516 * overwrite the entries passed on the list.
518 unsigned int pblk_rb_read_to_bio_list(struct pblk_rb
*rb
, struct bio
*bio
,
519 struct list_head
*list
,
522 struct pblk_rb_entry
*entry
, *tentry
;
524 unsigned int read
= 0;
527 list_for_each_entry_safe(entry
, tentry
, list
, index
) {
529 pr_err("pblk: too many entries on list\n");
533 page
= virt_to_page(entry
->data
);
535 pr_err("pblk: could not allocate write bio page\n");
539 ret
= bio_add_page(bio
, page
, rb
->seg_size
, 0);
540 if (ret
!= rb
->seg_size
) {
541 pr_err("pblk: could not add page to write bio\n");
545 list_del(&entry
->index
);
554 * Read available entries on rb and add them to the given bio. To avoid a memory
555 * copy, a page reference to the write buffer is used to be added to the bio.
557 * This function is used by the write thread to form the write bio that will
558 * persist data on the write buffer to the media.
560 unsigned int pblk_rb_read_to_bio(struct pblk_rb
*rb
, struct nvm_rq
*rqd
,
561 struct bio
*bio
, unsigned int pos
,
562 unsigned int nr_entries
, unsigned int count
)
564 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
565 struct request_queue
*q
= pblk
->dev
->q
;
566 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
567 struct pblk_rb_entry
*entry
;
569 unsigned int pad
= 0, to_read
= nr_entries
;
573 if (count
< nr_entries
) {
574 pad
= nr_entries
- count
;
579 c_ctx
->nr_valid
= to_read
;
580 c_ctx
->nr_padded
= pad
;
582 for (i
= 0; i
< to_read
; i
++) {
583 entry
= &rb
->entries
[pos
];
585 /* A write has been allowed into the buffer, but data is still
586 * being copied to it. It is ok to busy wait.
589 flags
= READ_ONCE(entry
->w_ctx
.flags
);
590 if (!(flags
& PBLK_WRITTEN_DATA
)) {
595 page
= virt_to_page(entry
->data
);
597 pr_err("pblk: could not allocate write bio page\n");
598 flags
&= ~PBLK_WRITTEN_DATA
;
599 flags
|= PBLK_SUBMITTED_ENTRY
;
600 /* Release flags on context. Protect from writes */
601 smp_store_release(&entry
->w_ctx
.flags
, flags
);
605 if (bio_add_pc_page(q
, bio
, page
, rb
->seg_size
, 0) !=
607 pr_err("pblk: could not add page to write bio\n");
608 flags
&= ~PBLK_WRITTEN_DATA
;
609 flags
|= PBLK_SUBMITTED_ENTRY
;
610 /* Release flags on context. Protect from writes */
611 smp_store_release(&entry
->w_ctx
.flags
, flags
);
615 if (flags
& PBLK_FLUSH_ENTRY
) {
616 unsigned int sync_point
;
618 sync_point
= READ_ONCE(rb
->sync_point
);
619 if (sync_point
== pos
) {
621 smp_store_release(&rb
->sync_point
, EMPTY_ENTRY
);
624 flags
&= ~PBLK_FLUSH_ENTRY
;
625 #ifdef CONFIG_NVM_DEBUG
626 atomic_dec(&rb
->inflight_sync_point
);
630 flags
&= ~PBLK_WRITTEN_DATA
;
631 flags
|= PBLK_SUBMITTED_ENTRY
;
633 /* Release flags on context. Protect from writes */
634 smp_store_release(&entry
->w_ctx
.flags
, flags
);
636 pos
= (pos
+ 1) & (rb
->nr_entries
- 1);
640 if (pblk_bio_add_pages(pblk
, bio
, GFP_KERNEL
, pad
)) {
641 pr_err("pblk: could not pad page in write bio\n");
646 #ifdef CONFIG_NVM_DEBUG
647 atomic_long_add(pad
, &((struct pblk
*)
648 (container_of(rb
, struct pblk
, rwb
)))->padded_writes
);
655 * Copy to bio only if the lba matches the one on the given cache entry.
656 * Otherwise, it means that the entry has been overwritten, and the bio should
657 * be directed to disk.
659 int pblk_rb_copy_to_bio(struct pblk_rb
*rb
, struct bio
*bio
, sector_t lba
,
660 struct ppa_addr ppa
, int bio_iter
, bool advanced_bio
)
662 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
663 struct pblk_rb_entry
*entry
;
664 struct pblk_w_ctx
*w_ctx
;
665 struct ppa_addr l2p_ppa
;
666 u64 pos
= pblk_addr_to_cacheline(ppa
);
672 #ifdef CONFIG_NVM_DEBUG
673 /* Caller must ensure that the access will not cause an overflow */
674 BUG_ON(pos
>= rb
->nr_entries
);
676 entry
= &rb
->entries
[pos
];
677 w_ctx
= &entry
->w_ctx
;
678 flags
= READ_ONCE(w_ctx
->flags
);
680 spin_lock(&rb
->w_lock
);
681 spin_lock(&pblk
->trans_lock
);
682 l2p_ppa
= pblk_trans_map_get(pblk
, lba
);
683 spin_unlock(&pblk
->trans_lock
);
685 /* Check if the entry has been overwritten or is scheduled to be */
686 if (!pblk_ppa_comp(l2p_ppa
, ppa
) || w_ctx
->lba
!= lba
||
687 flags
& PBLK_WRITABLE_ENTRY
) {
692 /* Only advance the bio if it hasn't been advanced already. If advanced,
693 * this bio is at least a partial bio (i.e., it has partially been
694 * filled with data from the cache). If part of the data resides on the
695 * media, we will read later on
697 if (unlikely(!advanced_bio
))
698 bio_advance(bio
, bio_iter
* PBLK_EXPOSED_PAGE_SIZE
);
700 data
= bio_data(bio
);
701 memcpy(data
, entry
->data
, rb
->seg_size
);
704 spin_unlock(&rb
->w_lock
);
708 struct pblk_w_ctx
*pblk_rb_w_ctx(struct pblk_rb
*rb
, unsigned int pos
)
710 unsigned int entry
= pos
& (rb
->nr_entries
- 1);
712 return &rb
->entries
[entry
].w_ctx
;
715 unsigned int pblk_rb_sync_init(struct pblk_rb
*rb
, unsigned long *flags
)
716 __acquires(&rb
->s_lock
)
719 spin_lock_irqsave(&rb
->s_lock
, *flags
);
721 spin_lock_irq(&rb
->s_lock
);
726 void pblk_rb_sync_end(struct pblk_rb
*rb
, unsigned long *flags
)
727 __releases(&rb
->s_lock
)
729 lockdep_assert_held(&rb
->s_lock
);
732 spin_unlock_irqrestore(&rb
->s_lock
, *flags
);
734 spin_unlock_irq(&rb
->s_lock
);
737 unsigned int pblk_rb_sync_advance(struct pblk_rb
*rb
, unsigned int nr_entries
)
742 lockdep_assert_held(&rb
->s_lock
);
744 sync
= READ_ONCE(rb
->sync
);
746 for (i
= 0; i
< nr_entries
; i
++)
747 sync
= (sync
+ 1) & (rb
->nr_entries
- 1);
749 /* Protect from counts */
750 smp_store_release(&rb
->sync
, sync
);
755 unsigned int pblk_rb_sync_point_count(struct pblk_rb
*rb
)
757 unsigned int subm
, sync_point
;
761 sync_point
= smp_load_acquire(&rb
->sync_point
);
762 if (sync_point
== EMPTY_ENTRY
)
765 subm
= READ_ONCE(rb
->subm
);
767 /* The sync point itself counts as a sector to sync */
768 count
= pblk_rb_ring_count(sync_point
, subm
, rb
->nr_entries
) + 1;
774 * Scan from the current position of the sync pointer to find the entry that
775 * corresponds to the given ppa. This is necessary since write requests can be
776 * completed out of order. The assumption is that the ppa is close to the sync
777 * pointer thus the search will not take long.
779 * The caller of this function must guarantee that the sync pointer will no
780 * reach the entry while it is using the metadata associated with it. With this
781 * assumption in mind, there is no need to take the sync lock.
783 struct pblk_rb_entry
*pblk_rb_sync_scan_entry(struct pblk_rb
*rb
,
784 struct ppa_addr
*ppa
)
786 unsigned int sync
, subm
, count
;
789 sync
= READ_ONCE(rb
->sync
);
790 subm
= READ_ONCE(rb
->subm
);
791 count
= pblk_rb_ring_count(subm
, sync
, rb
->nr_entries
);
793 for (i
= 0; i
< count
; i
++)
794 sync
= (sync
+ 1) & (rb
->nr_entries
- 1);
799 int pblk_rb_tear_down_check(struct pblk_rb
*rb
)
801 struct pblk_rb_entry
*entry
;
805 spin_lock(&rb
->w_lock
);
806 spin_lock_irq(&rb
->s_lock
);
808 if ((rb
->mem
== rb
->subm
) && (rb
->subm
== rb
->sync
) &&
809 (rb
->sync
== rb
->l2p_update
) &&
810 (rb
->sync_point
== EMPTY_ENTRY
)) {
819 for (i
= 0; i
< rb
->nr_entries
; i
++) {
820 entry
= &rb
->entries
[i
];
829 spin_unlock(&rb
->w_lock
);
830 spin_unlock_irq(&rb
->s_lock
);
835 unsigned int pblk_rb_wrap_pos(struct pblk_rb
*rb
, unsigned int pos
)
837 return (pos
& (rb
->nr_entries
- 1));
840 int pblk_rb_pos_oob(struct pblk_rb
*rb
, u64 pos
)
842 return (pos
>= rb
->nr_entries
);
845 ssize_t
pblk_rb_sysfs(struct pblk_rb
*rb
, char *buf
)
847 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
848 struct pblk_c_ctx
*c
;
850 int queued_entries
= 0;
852 spin_lock_irq(&rb
->s_lock
);
853 list_for_each_entry(c
, &pblk
->compl_list
, list
)
855 spin_unlock_irq(&rb
->s_lock
);
857 if (rb
->sync_point
!= EMPTY_ENTRY
)
858 offset
= scnprintf(buf
, PAGE_SIZE
,
859 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
865 #ifdef CONFIG_NVM_DEBUG
866 atomic_read(&rb
->inflight_sync_point
),
871 pblk_rb_read_count(rb
),
873 pblk_rb_sync_point_count(rb
),
876 offset
= scnprintf(buf
, PAGE_SIZE
,
877 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
883 #ifdef CONFIG_NVM_DEBUG
884 atomic_read(&rb
->inflight_sync_point
),
888 pblk_rb_read_count(rb
),
890 pblk_rb_sync_point_count(rb
),