]>
Commit | Line | Data |
---|---|---|
a4bd217b JG |
1 | /* |
2 | * Copyright (C) 2016 CNEX Labs | |
3 | * Initial release: Javier Gonzalez <javier@cnexlabs.com> | |
4 | * | |
5 | * Based upon the circular ringbuffer. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License version | |
9 | * 2 as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * pblk-rb.c - pblk's write buffer | |
17 | */ | |
18 | ||
19 | #include <linux/circ_buf.h> | |
20 | ||
21 | #include "pblk.h" | |
22 | ||
23 | static DECLARE_RWSEM(pblk_rb_lock); | |
24 | ||
25 | void pblk_rb_data_free(struct pblk_rb *rb) | |
26 | { | |
27 | struct pblk_rb_pages *p, *t; | |
28 | ||
29 | down_write(&pblk_rb_lock); | |
30 | list_for_each_entry_safe(p, t, &rb->pages, list) { | |
31 | free_pages((unsigned long)page_address(p->pages), p->order); | |
32 | list_del(&p->list); | |
33 | kfree(p); | |
34 | } | |
35 | up_write(&pblk_rb_lock); | |
36 | } | |
37 | ||
38 | /* | |
39 | * Initialize ring buffer. The data and metadata buffers must be previously | |
40 | * allocated and their size must be a power of two | |
41 | * (Documentation/circular-buffers.txt) | |
42 | */ | |
43 | int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, | |
44 | unsigned int power_size, unsigned int power_seg_sz) | |
45 | { | |
46 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
47 | unsigned int init_entry = 0; | |
48 | unsigned int alloc_order = power_size; | |
49 | unsigned int max_order = MAX_ORDER - 1; | |
50 | unsigned int order, iter; | |
51 | ||
52 | down_write(&pblk_rb_lock); | |
53 | rb->entries = rb_entry_base; | |
54 | rb->seg_size = (1 << power_seg_sz); | |
55 | rb->nr_entries = (1 << power_size); | |
56 | rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; | |
57 | rb->sync_point = EMPTY_ENTRY; | |
58 | ||
59 | spin_lock_init(&rb->w_lock); | |
60 | spin_lock_init(&rb->s_lock); | |
61 | ||
62 | INIT_LIST_HEAD(&rb->pages); | |
63 | ||
64 | if (alloc_order >= max_order) { | |
65 | order = max_order; | |
66 | iter = (1 << (alloc_order - max_order)); | |
67 | } else { | |
68 | order = alloc_order; | |
69 | iter = 1; | |
70 | } | |
71 | ||
72 | do { | |
73 | struct pblk_rb_entry *entry; | |
74 | struct pblk_rb_pages *page_set; | |
75 | void *kaddr; | |
76 | unsigned long set_size; | |
77 | int i; | |
78 | ||
79 | page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL); | |
80 | if (!page_set) { | |
81 | up_write(&pblk_rb_lock); | |
82 | return -ENOMEM; | |
83 | } | |
84 | ||
85 | page_set->order = order; | |
86 | page_set->pages = alloc_pages(GFP_KERNEL, order); | |
87 | if (!page_set->pages) { | |
88 | kfree(page_set); | |
89 | pblk_rb_data_free(rb); | |
90 | up_write(&pblk_rb_lock); | |
91 | return -ENOMEM; | |
92 | } | |
93 | kaddr = page_address(page_set->pages); | |
94 | ||
95 | entry = &rb->entries[init_entry]; | |
96 | entry->data = kaddr; | |
97 | entry->cacheline = pblk_cacheline_to_addr(init_entry++); | |
98 | entry->w_ctx.flags = PBLK_WRITABLE_ENTRY; | |
99 | ||
100 | set_size = (1 << order); | |
101 | for (i = 1; i < set_size; i++) { | |
102 | entry = &rb->entries[init_entry]; | |
103 | entry->cacheline = pblk_cacheline_to_addr(init_entry++); | |
104 | entry->data = kaddr + (i * rb->seg_size); | |
105 | entry->w_ctx.flags = PBLK_WRITABLE_ENTRY; | |
106 | bio_list_init(&entry->w_ctx.bios); | |
107 | } | |
108 | ||
109 | list_add_tail(&page_set->list, &rb->pages); | |
110 | iter--; | |
111 | } while (iter > 0); | |
112 | up_write(&pblk_rb_lock); | |
113 | ||
114 | #ifdef CONFIG_NVM_DEBUG | |
115 | atomic_set(&rb->inflight_sync_point, 0); | |
116 | #endif | |
117 | ||
118 | /* | |
119 | * Initialize rate-limiter, which controls access to the write buffer | |
120 | * but user and GC I/O | |
121 | */ | |
122 | pblk_rl_init(&pblk->rl, rb->nr_entries); | |
123 | ||
124 | return 0; | |
125 | } | |
126 | ||
127 | /* | |
128 | * pblk_rb_calculate_size -- calculate the size of the write buffer | |
129 | */ | |
130 | unsigned int pblk_rb_calculate_size(unsigned int nr_entries) | |
131 | { | |
132 | /* Alloc a write buffer that can at least fit 128 entries */ | |
133 | return (1 << max(get_count_order(nr_entries), 7)); | |
134 | } | |
135 | ||
136 | void *pblk_rb_entries_ref(struct pblk_rb *rb) | |
137 | { | |
138 | return rb->entries; | |
139 | } | |
140 | ||
141 | static void clean_wctx(struct pblk_w_ctx *w_ctx) | |
142 | { | |
143 | int flags; | |
144 | ||
145 | try: | |
146 | flags = READ_ONCE(w_ctx->flags); | |
147 | if (!(flags & PBLK_SUBMITTED_ENTRY)) | |
148 | goto try; | |
149 | ||
150 | /* Release flags on context. Protect from writes and reads */ | |
151 | smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY); | |
152 | pblk_ppa_set_empty(&w_ctx->ppa); | |
153 | } | |
154 | ||
155 | #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size) | |
156 | #define pblk_rb_ring_space(rb, head, tail, size) \ | |
157 | (CIRC_SPACE(head, tail, size)) | |
158 | ||
159 | /* | |
160 | * Buffer space is calculated with respect to the back pointer signaling | |
161 | * synchronized entries to the media. | |
162 | */ | |
163 | static unsigned int pblk_rb_space(struct pblk_rb *rb) | |
164 | { | |
165 | unsigned int mem = READ_ONCE(rb->mem); | |
166 | unsigned int sync = READ_ONCE(rb->sync); | |
167 | ||
168 | return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries); | |
169 | } | |
170 | ||
171 | /* | |
172 | * Buffer count is calculated with respect to the submission entry signaling the | |
173 | * entries that are available to send to the media | |
174 | */ | |
175 | unsigned int pblk_rb_read_count(struct pblk_rb *rb) | |
176 | { | |
177 | unsigned int mem = READ_ONCE(rb->mem); | |
178 | unsigned int subm = READ_ONCE(rb->subm); | |
179 | ||
180 | return pblk_rb_ring_count(mem, subm, rb->nr_entries); | |
181 | } | |
182 | ||
ee8d5c1a JG |
183 | unsigned int pblk_rb_sync_count(struct pblk_rb *rb) |
184 | { | |
185 | unsigned int mem = READ_ONCE(rb->mem); | |
186 | unsigned int sync = READ_ONCE(rb->sync); | |
187 | ||
188 | return pblk_rb_ring_count(mem, sync, rb->nr_entries); | |
189 | } | |
190 | ||
a4bd217b JG |
191 | unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries) |
192 | { | |
193 | unsigned int subm; | |
194 | ||
195 | subm = READ_ONCE(rb->subm); | |
196 | /* Commit read means updating submission pointer */ | |
197 | smp_store_release(&rb->subm, | |
198 | (subm + nr_entries) & (rb->nr_entries - 1)); | |
199 | ||
200 | return subm; | |
201 | } | |
202 | ||
203 | static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int *l2p_upd, | |
204 | unsigned int to_update) | |
205 | { | |
206 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
207 | struct pblk_line *line; | |
208 | struct pblk_rb_entry *entry; | |
209 | struct pblk_w_ctx *w_ctx; | |
b20ba1bc | 210 | unsigned int user_io = 0, gc_io = 0; |
a4bd217b | 211 | unsigned int i; |
b20ba1bc | 212 | int flags; |
a4bd217b JG |
213 | |
214 | for (i = 0; i < to_update; i++) { | |
215 | entry = &rb->entries[*l2p_upd]; | |
216 | w_ctx = &entry->w_ctx; | |
217 | ||
b20ba1bc JG |
218 | flags = READ_ONCE(entry->w_ctx.flags); |
219 | if (flags & PBLK_IOTYPE_USER) | |
220 | user_io++; | |
221 | else if (flags & PBLK_IOTYPE_GC) | |
222 | gc_io++; | |
223 | else | |
224 | WARN(1, "pblk: unknown IO type\n"); | |
225 | ||
a4bd217b JG |
226 | pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa, |
227 | entry->cacheline); | |
228 | ||
229 | line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)]; | |
230 | kref_put(&line->ref, pblk_line_put); | |
231 | clean_wctx(w_ctx); | |
232 | *l2p_upd = (*l2p_upd + 1) & (rb->nr_entries - 1); | |
233 | } | |
234 | ||
b20ba1bc JG |
235 | pblk_rl_out(&pblk->rl, user_io, gc_io); |
236 | ||
a4bd217b JG |
237 | return 0; |
238 | } | |
239 | ||
240 | /* | |
241 | * When we move the l2p_update pointer, we update the l2p table - lookups will | |
242 | * point to the physical address instead of to the cacheline in the write buffer | |
243 | * from this moment on. | |
244 | */ | |
245 | static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries, | |
246 | unsigned int mem, unsigned int sync) | |
247 | { | |
248 | unsigned int space, count; | |
249 | int ret = 0; | |
250 | ||
251 | lockdep_assert_held(&rb->w_lock); | |
252 | ||
253 | /* Update l2p only as buffer entries are being overwritten */ | |
254 | space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries); | |
255 | if (space > nr_entries) | |
256 | goto out; | |
257 | ||
258 | count = nr_entries - space; | |
259 | /* l2p_update used exclusively under rb->w_lock */ | |
260 | ret = __pblk_rb_update_l2p(rb, &rb->l2p_update, count); | |
261 | ||
262 | out: | |
263 | return ret; | |
264 | } | |
265 | ||
266 | /* | |
267 | * Update the l2p entry for all sectors stored on the write buffer. This means | |
268 | * that all future lookups to the l2p table will point to a device address, not | |
269 | * to the cacheline in the write buffer. | |
270 | */ | |
271 | void pblk_rb_sync_l2p(struct pblk_rb *rb) | |
272 | { | |
273 | unsigned int sync; | |
274 | unsigned int to_update; | |
275 | ||
276 | spin_lock(&rb->w_lock); | |
277 | ||
278 | /* Protect from reads and writes */ | |
279 | sync = smp_load_acquire(&rb->sync); | |
280 | ||
281 | to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries); | |
282 | __pblk_rb_update_l2p(rb, &rb->l2p_update, to_update); | |
283 | ||
284 | spin_unlock(&rb->w_lock); | |
285 | } | |
286 | ||
287 | /* | |
288 | * Write @nr_entries to ring buffer from @data buffer if there is enough space. | |
289 | * Typically, 4KB data chunks coming from a bio will be copied to the ring | |
290 | * buffer, thus the write will fail if not all incoming data can be copied. | |
291 | * | |
292 | */ | |
293 | static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data, | |
294 | struct pblk_w_ctx w_ctx, | |
295 | struct pblk_rb_entry *entry) | |
296 | { | |
297 | memcpy(entry->data, data, rb->seg_size); | |
298 | ||
299 | entry->w_ctx.lba = w_ctx.lba; | |
300 | entry->w_ctx.ppa = w_ctx.ppa; | |
301 | } | |
302 | ||
303 | void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data, | |
304 | struct pblk_w_ctx w_ctx, unsigned int ring_pos) | |
305 | { | |
306 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
307 | struct pblk_rb_entry *entry; | |
308 | int flags; | |
309 | ||
310 | entry = &rb->entries[ring_pos]; | |
311 | flags = READ_ONCE(entry->w_ctx.flags); | |
312 | #ifdef CONFIG_NVM_DEBUG | |
313 | /* Caller must guarantee that the entry is free */ | |
314 | BUG_ON(!(flags & PBLK_WRITABLE_ENTRY)); | |
315 | #endif | |
316 | ||
317 | __pblk_rb_write_entry(rb, data, w_ctx, entry); | |
318 | ||
319 | pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline); | |
320 | flags = w_ctx.flags | PBLK_WRITTEN_DATA; | |
321 | ||
322 | /* Release flags on write context. Protect from writes */ | |
323 | smp_store_release(&entry->w_ctx.flags, flags); | |
324 | } | |
325 | ||
326 | void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data, | |
327 | struct pblk_w_ctx w_ctx, struct pblk_line *gc_line, | |
328 | unsigned int ring_pos) | |
329 | { | |
330 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
331 | struct pblk_rb_entry *entry; | |
332 | int flags; | |
333 | ||
334 | entry = &rb->entries[ring_pos]; | |
335 | flags = READ_ONCE(entry->w_ctx.flags); | |
336 | #ifdef CONFIG_NVM_DEBUG | |
337 | /* Caller must guarantee that the entry is free */ | |
338 | BUG_ON(!(flags & PBLK_WRITABLE_ENTRY)); | |
339 | #endif | |
340 | ||
341 | __pblk_rb_write_entry(rb, data, w_ctx, entry); | |
342 | ||
343 | if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, gc_line)) | |
344 | entry->w_ctx.lba = ADDR_EMPTY; | |
345 | ||
346 | flags = w_ctx.flags | PBLK_WRITTEN_DATA; | |
347 | ||
348 | /* Release flags on write context. Protect from writes */ | |
349 | smp_store_release(&entry->w_ctx.flags, flags); | |
350 | } | |
351 | ||
352 | static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio, | |
353 | unsigned int pos) | |
354 | { | |
355 | struct pblk_rb_entry *entry; | |
356 | unsigned int subm, sync_point; | |
357 | int flags; | |
358 | ||
359 | subm = READ_ONCE(rb->subm); | |
360 | ||
361 | #ifdef CONFIG_NVM_DEBUG | |
362 | atomic_inc(&rb->inflight_sync_point); | |
363 | #endif | |
364 | ||
365 | if (pos == subm) | |
366 | return 0; | |
367 | ||
368 | sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1); | |
369 | entry = &rb->entries[sync_point]; | |
370 | ||
371 | flags = READ_ONCE(entry->w_ctx.flags); | |
372 | flags |= PBLK_FLUSH_ENTRY; | |
373 | ||
374 | /* Release flags on context. Protect from writes */ | |
375 | smp_store_release(&entry->w_ctx.flags, flags); | |
376 | ||
377 | /* Protect syncs */ | |
378 | smp_store_release(&rb->sync_point, sync_point); | |
379 | ||
588726d3 JG |
380 | if (!bio) |
381 | return 0; | |
382 | ||
a4bd217b JG |
383 | spin_lock_irq(&rb->s_lock); |
384 | bio_list_add(&entry->w_ctx.bios, bio); | |
385 | spin_unlock_irq(&rb->s_lock); | |
386 | ||
387 | return 1; | |
388 | } | |
389 | ||
390 | static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries, | |
391 | unsigned int *pos) | |
392 | { | |
393 | unsigned int mem; | |
394 | unsigned int sync; | |
395 | ||
396 | sync = READ_ONCE(rb->sync); | |
397 | mem = READ_ONCE(rb->mem); | |
398 | ||
399 | if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries) | |
400 | return 0; | |
401 | ||
402 | if (pblk_rb_update_l2p(rb, nr_entries, mem, sync)) | |
403 | return 0; | |
404 | ||
405 | *pos = mem; | |
406 | ||
407 | return 1; | |
408 | } | |
409 | ||
410 | static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries, | |
411 | unsigned int *pos) | |
412 | { | |
413 | if (!__pblk_rb_may_write(rb, nr_entries, pos)) | |
414 | return 0; | |
415 | ||
416 | /* Protect from read count */ | |
417 | smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1)); | |
418 | return 1; | |
419 | } | |
420 | ||
588726d3 JG |
421 | void pblk_rb_flush(struct pblk_rb *rb) |
422 | { | |
423 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
424 | unsigned int mem = READ_ONCE(rb->mem); | |
425 | ||
426 | if (pblk_rb_sync_point_set(rb, NULL, mem)) | |
427 | return; | |
428 | ||
429 | pblk_write_should_kick(pblk); | |
430 | } | |
431 | ||
a4bd217b JG |
432 | static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries, |
433 | unsigned int *pos, struct bio *bio, | |
434 | int *io_ret) | |
435 | { | |
436 | unsigned int mem; | |
437 | ||
438 | if (!__pblk_rb_may_write(rb, nr_entries, pos)) | |
439 | return 0; | |
440 | ||
441 | mem = (*pos + nr_entries) & (rb->nr_entries - 1); | |
442 | *io_ret = NVM_IO_DONE; | |
443 | ||
444 | if (bio->bi_opf & REQ_PREFLUSH) { | |
445 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
446 | ||
447 | #ifdef CONFIG_NVM_DEBUG | |
448 | atomic_long_inc(&pblk->nr_flush); | |
449 | #endif | |
450 | if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem)) | |
451 | *io_ret = NVM_IO_OK; | |
452 | } | |
453 | ||
454 | /* Protect from read count */ | |
455 | smp_store_release(&rb->mem, mem); | |
456 | return 1; | |
457 | } | |
458 | ||
459 | /* | |
460 | * Atomically check that (i) there is space on the write buffer for the | |
461 | * incoming I/O, and (ii) the current I/O type has enough budget in the write | |
462 | * buffer (rate-limiter). | |
463 | */ | |
464 | int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio, | |
465 | unsigned int nr_entries, unsigned int *pos) | |
466 | { | |
467 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
588726d3 | 468 | int io_ret; |
a4bd217b JG |
469 | |
470 | spin_lock(&rb->w_lock); | |
588726d3 JG |
471 | io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries); |
472 | if (io_ret) { | |
a4bd217b | 473 | spin_unlock(&rb->w_lock); |
588726d3 | 474 | return io_ret; |
a4bd217b JG |
475 | } |
476 | ||
588726d3 | 477 | if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) { |
a4bd217b JG |
478 | spin_unlock(&rb->w_lock); |
479 | return NVM_IO_REQUEUE; | |
480 | } | |
481 | ||
482 | pblk_rl_user_in(&pblk->rl, nr_entries); | |
483 | spin_unlock(&rb->w_lock); | |
484 | ||
588726d3 | 485 | return io_ret; |
a4bd217b JG |
486 | } |
487 | ||
488 | /* | |
489 | * Look at pblk_rb_may_write_user comment | |
490 | */ | |
491 | int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries, | |
492 | unsigned int *pos) | |
493 | { | |
494 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
495 | ||
496 | spin_lock(&rb->w_lock); | |
497 | if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) { | |
498 | spin_unlock(&rb->w_lock); | |
499 | return 0; | |
500 | } | |
501 | ||
502 | if (!pblk_rb_may_write(rb, nr_entries, pos)) { | |
503 | spin_unlock(&rb->w_lock); | |
504 | return 0; | |
505 | } | |
506 | ||
507 | pblk_rl_gc_in(&pblk->rl, nr_entries); | |
508 | spin_unlock(&rb->w_lock); | |
509 | ||
510 | return 1; | |
511 | } | |
512 | ||
513 | /* | |
514 | * The caller of this function must ensure that the backpointer will not | |
515 | * overwrite the entries passed on the list. | |
516 | */ | |
517 | unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio, | |
518 | struct list_head *list, | |
519 | unsigned int max) | |
520 | { | |
521 | struct pblk_rb_entry *entry, *tentry; | |
522 | struct page *page; | |
523 | unsigned int read = 0; | |
524 | int ret; | |
525 | ||
526 | list_for_each_entry_safe(entry, tentry, list, index) { | |
527 | if (read > max) { | |
528 | pr_err("pblk: too many entries on list\n"); | |
529 | goto out; | |
530 | } | |
531 | ||
532 | page = virt_to_page(entry->data); | |
533 | if (!page) { | |
534 | pr_err("pblk: could not allocate write bio page\n"); | |
535 | goto out; | |
536 | } | |
537 | ||
538 | ret = bio_add_page(bio, page, rb->seg_size, 0); | |
539 | if (ret != rb->seg_size) { | |
540 | pr_err("pblk: could not add page to write bio\n"); | |
541 | goto out; | |
542 | } | |
543 | ||
544 | list_del(&entry->index); | |
545 | read++; | |
546 | } | |
547 | ||
548 | out: | |
549 | return read; | |
550 | } | |
551 | ||
552 | /* | |
553 | * Read available entries on rb and add them to the given bio. To avoid a memory | |
554 | * copy, a page reference to the write buffer is used to be added to the bio. | |
555 | * | |
556 | * This function is used by the write thread to form the write bio that will | |
557 | * persist data on the write buffer to the media. | |
558 | */ | |
d624f371 JG |
559 | unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, |
560 | struct bio *bio, unsigned int pos, | |
561 | unsigned int nr_entries, unsigned int count) | |
a4bd217b JG |
562 | { |
563 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
d624f371 JG |
564 | struct request_queue *q = pblk->dev->q; |
565 | struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd); | |
a4bd217b JG |
566 | struct pblk_rb_entry *entry; |
567 | struct page *page; | |
d624f371 | 568 | unsigned int pad = 0, to_read = nr_entries; |
a4bd217b JG |
569 | unsigned int i; |
570 | int flags; | |
a4bd217b JG |
571 | |
572 | if (count < nr_entries) { | |
573 | pad = nr_entries - count; | |
574 | to_read = count; | |
575 | } | |
576 | ||
577 | c_ctx->sentry = pos; | |
578 | c_ctx->nr_valid = to_read; | |
579 | c_ctx->nr_padded = pad; | |
580 | ||
581 | for (i = 0; i < to_read; i++) { | |
582 | entry = &rb->entries[pos]; | |
583 | ||
584 | /* A write has been allowed into the buffer, but data is still | |
585 | * being copied to it. It is ok to busy wait. | |
586 | */ | |
587 | try: | |
588 | flags = READ_ONCE(entry->w_ctx.flags); | |
10888129 JG |
589 | if (!(flags & PBLK_WRITTEN_DATA)) { |
590 | io_schedule(); | |
a4bd217b | 591 | goto try; |
10888129 | 592 | } |
a4bd217b | 593 | |
a4bd217b JG |
594 | page = virt_to_page(entry->data); |
595 | if (!page) { | |
596 | pr_err("pblk: could not allocate write bio page\n"); | |
597 | flags &= ~PBLK_WRITTEN_DATA; | |
598 | flags |= PBLK_SUBMITTED_ENTRY; | |
599 | /* Release flags on context. Protect from writes */ | |
600 | smp_store_release(&entry->w_ctx.flags, flags); | |
d624f371 | 601 | return NVM_IO_ERR; |
a4bd217b JG |
602 | } |
603 | ||
d624f371 JG |
604 | if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != |
605 | rb->seg_size) { | |
a4bd217b JG |
606 | pr_err("pblk: could not add page to write bio\n"); |
607 | flags &= ~PBLK_WRITTEN_DATA; | |
608 | flags |= PBLK_SUBMITTED_ENTRY; | |
609 | /* Release flags on context. Protect from writes */ | |
610 | smp_store_release(&entry->w_ctx.flags, flags); | |
d624f371 | 611 | return NVM_IO_ERR; |
a4bd217b JG |
612 | } |
613 | ||
614 | if (flags & PBLK_FLUSH_ENTRY) { | |
615 | unsigned int sync_point; | |
616 | ||
617 | sync_point = READ_ONCE(rb->sync_point); | |
618 | if (sync_point == pos) { | |
619 | /* Protect syncs */ | |
620 | smp_store_release(&rb->sync_point, EMPTY_ENTRY); | |
621 | } | |
622 | ||
623 | flags &= ~PBLK_FLUSH_ENTRY; | |
624 | #ifdef CONFIG_NVM_DEBUG | |
625 | atomic_dec(&rb->inflight_sync_point); | |
626 | #endif | |
627 | } | |
628 | ||
629 | flags &= ~PBLK_WRITTEN_DATA; | |
630 | flags |= PBLK_SUBMITTED_ENTRY; | |
631 | ||
632 | /* Release flags on context. Protect from writes */ | |
633 | smp_store_release(&entry->w_ctx.flags, flags); | |
634 | ||
635 | pos = (pos + 1) & (rb->nr_entries - 1); | |
636 | } | |
637 | ||
d624f371 JG |
638 | if (pad) { |
639 | if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) { | |
640 | pr_err("pblk: could not pad page in write bio\n"); | |
641 | return NVM_IO_ERR; | |
642 | } | |
643 | } | |
644 | ||
a4bd217b JG |
645 | #ifdef CONFIG_NVM_DEBUG |
646 | atomic_long_add(pad, &((struct pblk *) | |
647 | (container_of(rb, struct pblk, rwb)))->padded_writes); | |
648 | #endif | |
d624f371 JG |
649 | |
650 | return NVM_IO_OK; | |
a4bd217b JG |
651 | } |
652 | ||
653 | /* | |
654 | * Copy to bio only if the lba matches the one on the given cache entry. | |
655 | * Otherwise, it means that the entry has been overwritten, and the bio should | |
656 | * be directed to disk. | |
657 | */ | |
658 | int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, | |
659 | u64 pos, int bio_iter) | |
660 | { | |
661 | struct pblk_rb_entry *entry; | |
662 | struct pblk_w_ctx *w_ctx; | |
663 | void *data; | |
664 | int flags; | |
665 | int ret = 1; | |
666 | ||
667 | spin_lock(&rb->w_lock); | |
668 | ||
669 | #ifdef CONFIG_NVM_DEBUG | |
670 | /* Caller must ensure that the access will not cause an overflow */ | |
671 | BUG_ON(pos >= rb->nr_entries); | |
672 | #endif | |
673 | entry = &rb->entries[pos]; | |
674 | w_ctx = &entry->w_ctx; | |
675 | flags = READ_ONCE(w_ctx->flags); | |
676 | ||
677 | /* Check if the entry has been overwritten or is scheduled to be */ | |
678 | if (w_ctx->lba != lba || flags & PBLK_WRITABLE_ENTRY) { | |
679 | ret = 0; | |
680 | goto out; | |
681 | } | |
682 | ||
683 | /* Only advance the bio if it hasn't been advanced already. If advanced, | |
684 | * this bio is at least a partial bio (i.e., it has partially been | |
685 | * filled with data from the cache). If part of the data resides on the | |
686 | * media, we will read later on | |
687 | */ | |
688 | if (unlikely(!bio->bi_iter.bi_idx)) | |
689 | bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); | |
690 | ||
691 | data = bio_data(bio); | |
692 | memcpy(data, entry->data, rb->seg_size); | |
693 | ||
694 | out: | |
695 | spin_unlock(&rb->w_lock); | |
696 | return ret; | |
697 | } | |
698 | ||
699 | struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos) | |
700 | { | |
701 | unsigned int entry = pos & (rb->nr_entries - 1); | |
702 | ||
703 | return &rb->entries[entry].w_ctx; | |
704 | } | |
705 | ||
706 | unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags) | |
707 | __acquires(&rb->s_lock) | |
708 | { | |
709 | if (flags) | |
710 | spin_lock_irqsave(&rb->s_lock, *flags); | |
711 | else | |
712 | spin_lock_irq(&rb->s_lock); | |
713 | ||
714 | return rb->sync; | |
715 | } | |
716 | ||
717 | void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags) | |
718 | __releases(&rb->s_lock) | |
719 | { | |
720 | lockdep_assert_held(&rb->s_lock); | |
721 | ||
722 | if (flags) | |
723 | spin_unlock_irqrestore(&rb->s_lock, *flags); | |
724 | else | |
725 | spin_unlock_irq(&rb->s_lock); | |
726 | } | |
727 | ||
728 | unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries) | |
729 | { | |
730 | unsigned int sync; | |
731 | unsigned int i; | |
732 | ||
733 | lockdep_assert_held(&rb->s_lock); | |
734 | ||
735 | sync = READ_ONCE(rb->sync); | |
736 | ||
737 | for (i = 0; i < nr_entries; i++) | |
738 | sync = (sync + 1) & (rb->nr_entries - 1); | |
739 | ||
740 | /* Protect from counts */ | |
741 | smp_store_release(&rb->sync, sync); | |
742 | ||
743 | return sync; | |
744 | } | |
745 | ||
746 | unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb) | |
747 | { | |
748 | unsigned int subm, sync_point; | |
749 | unsigned int count; | |
750 | ||
751 | /* Protect syncs */ | |
752 | sync_point = smp_load_acquire(&rb->sync_point); | |
753 | if (sync_point == EMPTY_ENTRY) | |
754 | return 0; | |
755 | ||
756 | subm = READ_ONCE(rb->subm); | |
757 | ||
758 | /* The sync point itself counts as a sector to sync */ | |
759 | count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1; | |
760 | ||
761 | return count; | |
762 | } | |
763 | ||
764 | /* | |
765 | * Scan from the current position of the sync pointer to find the entry that | |
766 | * corresponds to the given ppa. This is necessary since write requests can be | |
767 | * completed out of order. The assumption is that the ppa is close to the sync | |
768 | * pointer thus the search will not take long. | |
769 | * | |
770 | * The caller of this function must guarantee that the sync pointer will no | |
771 | * reach the entry while it is using the metadata associated with it. With this | |
772 | * assumption in mind, there is no need to take the sync lock. | |
773 | */ | |
774 | struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb, | |
775 | struct ppa_addr *ppa) | |
776 | { | |
777 | unsigned int sync, subm, count; | |
778 | unsigned int i; | |
779 | ||
780 | sync = READ_ONCE(rb->sync); | |
781 | subm = READ_ONCE(rb->subm); | |
782 | count = pblk_rb_ring_count(subm, sync, rb->nr_entries); | |
783 | ||
784 | for (i = 0; i < count; i++) | |
785 | sync = (sync + 1) & (rb->nr_entries - 1); | |
786 | ||
787 | return NULL; | |
788 | } | |
789 | ||
790 | int pblk_rb_tear_down_check(struct pblk_rb *rb) | |
791 | { | |
792 | struct pblk_rb_entry *entry; | |
793 | int i; | |
794 | int ret = 0; | |
795 | ||
796 | spin_lock(&rb->w_lock); | |
797 | spin_lock_irq(&rb->s_lock); | |
798 | ||
799 | if ((rb->mem == rb->subm) && (rb->subm == rb->sync) && | |
800 | (rb->sync == rb->l2p_update) && | |
801 | (rb->sync_point == EMPTY_ENTRY)) { | |
802 | goto out; | |
803 | } | |
804 | ||
805 | if (!rb->entries) { | |
806 | ret = 1; | |
807 | goto out; | |
808 | } | |
809 | ||
810 | for (i = 0; i < rb->nr_entries; i++) { | |
811 | entry = &rb->entries[i]; | |
812 | ||
813 | if (!entry->data) { | |
814 | ret = 1; | |
815 | goto out; | |
816 | } | |
817 | } | |
818 | ||
819 | out: | |
820 | spin_unlock(&rb->w_lock); | |
821 | spin_unlock_irq(&rb->s_lock); | |
822 | ||
823 | return ret; | |
824 | } | |
825 | ||
826 | unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos) | |
827 | { | |
828 | return (pos & (rb->nr_entries - 1)); | |
829 | } | |
830 | ||
831 | int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos) | |
832 | { | |
833 | return (pos >= rb->nr_entries); | |
834 | } | |
835 | ||
836 | ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf) | |
837 | { | |
838 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | |
839 | struct pblk_c_ctx *c; | |
840 | ssize_t offset; | |
841 | int queued_entries = 0; | |
842 | ||
843 | spin_lock_irq(&rb->s_lock); | |
844 | list_for_each_entry(c, &pblk->compl_list, list) | |
845 | queued_entries++; | |
846 | spin_unlock_irq(&rb->s_lock); | |
847 | ||
848 | if (rb->sync_point != EMPTY_ENTRY) | |
849 | offset = scnprintf(buf, PAGE_SIZE, | |
850 | "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n", | |
851 | rb->nr_entries, | |
852 | rb->mem, | |
853 | rb->subm, | |
854 | rb->sync, | |
855 | rb->l2p_update, | |
856 | #ifdef CONFIG_NVM_DEBUG | |
857 | atomic_read(&rb->inflight_sync_point), | |
858 | #else | |
859 | 0, | |
860 | #endif | |
861 | rb->sync_point, | |
862 | pblk_rb_read_count(rb), | |
863 | pblk_rb_space(rb), | |
864 | pblk_rb_sync_point_count(rb), | |
865 | queued_entries); | |
866 | else | |
867 | offset = scnprintf(buf, PAGE_SIZE, | |
868 | "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n", | |
869 | rb->nr_entries, | |
870 | rb->mem, | |
871 | rb->subm, | |
872 | rb->sync, | |
873 | rb->l2p_update, | |
874 | #ifdef CONFIG_NVM_DEBUG | |
875 | atomic_read(&rb->inflight_sync_point), | |
876 | #else | |
877 | 0, | |
878 | #endif | |
879 | pblk_rb_read_count(rb), | |
880 | pblk_rb_space(rb), | |
881 | pblk_rb_sync_point_count(rb), | |
882 | queued_entries); | |
883 | ||
884 | return offset; | |
885 | } |