]>
Commit | Line | Data |
---|---|---|
02a1520d | 1 | // SPDX-License-Identifier: GPL-2.0 |
a4bd217b JG |
2 | /* |
3 | * Copyright (C) 2016 CNEX Labs | |
4 | * Initial release: Javier Gonzalez <javier@cnexlabs.com> | |
5 | * Matias Bjorling <matias@cnexlabs.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License version | |
9 | * 2 as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but | |
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * pblk-core.c - pblk's core functionality | |
17 | * | |
18 | */ | |
19 | ||
4c44abf4 HH |
20 | #define CREATE_TRACE_POINTS |
21 | ||
a4bd217b | 22 | #include "pblk.h" |
4c44abf4 | 23 | #include "pblk-trace.h" |
a4bd217b | 24 | |
8bd40020 JG |
25 | static void pblk_line_mark_bb(struct work_struct *work) |
26 | { | |
27 | struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws, | |
28 | ws); | |
29 | struct pblk *pblk = line_ws->pblk; | |
30 | struct nvm_tgt_dev *dev = pblk->dev; | |
31 | struct ppa_addr *ppa = line_ws->priv; | |
32 | int ret; | |
33 | ||
aff3fb18 | 34 | ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD); |
8bd40020 JG |
35 | if (ret) { |
36 | struct pblk_line *line; | |
37 | int pos; | |
38 | ||
cb21665c | 39 | line = pblk_ppa_to_line(pblk, *ppa); |
b1bcfda1 | 40 | pos = pblk_ppa_to_pos(&dev->geo, *ppa); |
8bd40020 | 41 | |
4e495a46 | 42 | pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n", |
8bd40020 JG |
43 | line->id, pos); |
44 | } | |
45 | ||
46 | kfree(ppa); | |
b906bbb6 | 47 | mempool_free(line_ws, &pblk->gen_ws_pool); |
8bd40020 JG |
48 | } |
49 | ||
a4bd217b | 50 | static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line, |
32ef9412 | 51 | struct ppa_addr ppa_addr) |
a4bd217b JG |
52 | { |
53 | struct nvm_tgt_dev *dev = pblk->dev; | |
54 | struct nvm_geo *geo = &dev->geo; | |
32ef9412 JG |
55 | struct ppa_addr *ppa; |
56 | int pos = pblk_ppa_to_pos(geo, ppa_addr); | |
a4bd217b | 57 | |
4e495a46 | 58 | pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos); |
a4bd217b JG |
59 | atomic_long_inc(&pblk->erase_failed); |
60 | ||
a44f53fa | 61 | atomic_dec(&line->blk_in_line); |
a4bd217b | 62 | if (test_and_set_bit(pos, line->blk_bitmap)) |
4e495a46 | 63 | pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n", |
a4bd217b JG |
64 | line->id, pos); |
65 | ||
32ef9412 JG |
66 | /* Not necessary to mark bad blocks on 2.0 spec. */ |
67 | if (geo->version == NVM_OCSSD_SPEC_20) | |
68 | return; | |
69 | ||
70 | ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC); | |
71 | if (!ppa) | |
72 | return; | |
73 | ||
74 | *ppa = ppa_addr; | |
b84ae4a8 JG |
75 | pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb, |
76 | GFP_ATOMIC, pblk->bb_wq); | |
a4bd217b JG |
77 | } |
78 | ||
79 | static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd) | |
80 | { | |
32ef9412 JG |
81 | struct nvm_tgt_dev *dev = pblk->dev; |
82 | struct nvm_geo *geo = &dev->geo; | |
83 | struct nvm_chk_meta *chunk; | |
a4bd217b | 84 | struct pblk_line *line; |
32ef9412 | 85 | int pos; |
a4bd217b | 86 | |
cb21665c | 87 | line = pblk_ppa_to_line(pblk, rqd->ppa_addr); |
32ef9412 JG |
88 | pos = pblk_ppa_to_pos(geo, rqd->ppa_addr); |
89 | chunk = &line->chks[pos]; | |
90 | ||
a4bd217b JG |
91 | atomic_dec(&line->left_seblks); |
92 | ||
93 | if (rqd->error) { | |
4209c31c HH |
94 | trace_pblk_chunk_reset(pblk_disk_name(pblk), |
95 | &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED); | |
96 | ||
32ef9412 JG |
97 | chunk->state = NVM_CHK_ST_OFFLINE; |
98 | pblk_mark_bb(pblk, line, rqd->ppa_addr); | |
99 | } else { | |
4209c31c HH |
100 | trace_pblk_chunk_reset(pblk_disk_name(pblk), |
101 | &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE); | |
102 | ||
32ef9412 | 103 | chunk->state = NVM_CHK_ST_FREE; |
a4bd217b | 104 | } |
588726d3 | 105 | |
4c44abf4 HH |
106 | trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr, |
107 | chunk->state); | |
108 | ||
588726d3 | 109 | atomic_dec(&pblk->inflight_io); |
a4bd217b JG |
110 | } |
111 | ||
112 | /* Erase completion assumes that only one block is erased at the time */ | |
113 | static void pblk_end_io_erase(struct nvm_rq *rqd) | |
114 | { | |
115 | struct pblk *pblk = rqd->private; | |
116 | ||
a4bd217b | 117 | __pblk_end_io_erase(pblk, rqd); |
b906bbb6 | 118 | mempool_free(rqd, &pblk->e_rq_pool); |
a4bd217b JG |
119 | } |
120 | ||
32ef9412 JG |
121 | /* |
122 | * Get information for all chunks from the device. | |
123 | * | |
090ee26f | 124 | * The caller is responsible for freeing (vmalloc) the returned structure |
32ef9412 | 125 | */ |
aff3fb18 | 126 | struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk) |
32ef9412 JG |
127 | { |
128 | struct nvm_tgt_dev *dev = pblk->dev; | |
129 | struct nvm_geo *geo = &dev->geo; | |
130 | struct nvm_chk_meta *meta; | |
131 | struct ppa_addr ppa; | |
132 | unsigned long len; | |
133 | int ret; | |
134 | ||
135 | ppa.ppa = 0; | |
136 | ||
137 | len = geo->all_chunks * sizeof(*meta); | |
090ee26f | 138 | meta = vzalloc(len); |
32ef9412 JG |
139 | if (!meta) |
140 | return ERR_PTR(-ENOMEM); | |
141 | ||
aff3fb18 | 142 | ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta); |
32ef9412 | 143 | if (ret) { |
6916cf54 | 144 | vfree(meta); |
32ef9412 JG |
145 | return ERR_PTR(-EIO); |
146 | } | |
147 | ||
148 | return meta; | |
149 | } | |
150 | ||
151 | struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk, | |
152 | struct nvm_chk_meta *meta, | |
153 | struct ppa_addr ppa) | |
154 | { | |
155 | struct nvm_tgt_dev *dev = pblk->dev; | |
156 | struct nvm_geo *geo = &dev->geo; | |
157 | int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun; | |
158 | int lun_off = ppa.m.pu * geo->num_chk; | |
159 | int chk_off = ppa.m.chk; | |
160 | ||
161 | return meta + ch_off + lun_off + chk_off; | |
162 | } | |
163 | ||
0880a9aa JG |
164 | void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line, |
165 | u64 paddr) | |
a4bd217b JG |
166 | { |
167 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
168 | struct list_head *move_list = NULL; | |
169 | ||
170 | /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P | |
171 | * table is modified with reclaimed sectors, a check is done to endure | |
172 | * that newer updates are not overwritten. | |
173 | */ | |
174 | spin_lock(&line->lock); | |
d340121e | 175 | WARN_ON(line->state == PBLK_LINESTATE_FREE); |
a4bd217b JG |
176 | |
177 | if (test_and_set_bit(paddr, line->invalid_bitmap)) { | |
178 | WARN_ONCE(1, "pblk: double invalidate\n"); | |
179 | spin_unlock(&line->lock); | |
180 | return; | |
181 | } | |
dd2a4343 | 182 | le32_add_cpu(line->vsc, -1); |
a4bd217b JG |
183 | |
184 | if (line->state == PBLK_LINESTATE_CLOSED) | |
185 | move_list = pblk_line_gc_list(pblk, line); | |
186 | spin_unlock(&line->lock); | |
187 | ||
188 | if (move_list) { | |
189 | spin_lock(&l_mg->gc_lock); | |
190 | spin_lock(&line->lock); | |
191 | /* Prevent moving a line that has just been chosen for GC */ | |
d340121e | 192 | if (line->state == PBLK_LINESTATE_GC) { |
a4bd217b JG |
193 | spin_unlock(&line->lock); |
194 | spin_unlock(&l_mg->gc_lock); | |
195 | return; | |
196 | } | |
197 | spin_unlock(&line->lock); | |
198 | ||
199 | list_move_tail(&line->list, move_list); | |
200 | spin_unlock(&l_mg->gc_lock); | |
201 | } | |
202 | } | |
203 | ||
204 | void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa) | |
205 | { | |
206 | struct pblk_line *line; | |
207 | u64 paddr; | |
a4bd217b | 208 | |
880eda54 | 209 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b JG |
210 | /* Callers must ensure that the ppa points to a device address */ |
211 | BUG_ON(pblk_addr_in_cache(ppa)); | |
212 | BUG_ON(pblk_ppa_empty(ppa)); | |
213 | #endif | |
214 | ||
cb21665c | 215 | line = pblk_ppa_to_line(pblk, ppa); |
a4bd217b JG |
216 | paddr = pblk_dev_ppa_to_line_addr(pblk, ppa); |
217 | ||
218 | __pblk_map_invalidate(pblk, line, paddr); | |
219 | } | |
220 | ||
a4bd217b JG |
221 | static void pblk_invalidate_range(struct pblk *pblk, sector_t slba, |
222 | unsigned int nr_secs) | |
223 | { | |
224 | sector_t lba; | |
225 | ||
226 | spin_lock(&pblk->trans_lock); | |
227 | for (lba = slba; lba < slba + nr_secs; lba++) { | |
228 | struct ppa_addr ppa; | |
229 | ||
230 | ppa = pblk_trans_map_get(pblk, lba); | |
231 | ||
232 | if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa)) | |
233 | pblk_map_invalidate(pblk, ppa); | |
234 | ||
235 | pblk_ppa_set_empty(&ppa); | |
236 | pblk_trans_map_set(pblk, lba, ppa); | |
237 | } | |
238 | spin_unlock(&pblk->trans_lock); | |
239 | } | |
240 | ||
45dcf29b JG |
241 | int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) |
242 | { | |
243 | struct nvm_tgt_dev *dev = pblk->dev; | |
244 | ||
245 | rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, | |
246 | &rqd->dma_meta_list); | |
247 | if (!rqd->meta_list) | |
248 | return -ENOMEM; | |
249 | ||
250 | if (rqd->nr_ppas == 1) | |
251 | return 0; | |
252 | ||
24828d05 IK |
253 | rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk); |
254 | rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk); | |
45dcf29b JG |
255 | |
256 | return 0; | |
257 | } | |
258 | ||
259 | void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) | |
260 | { | |
261 | struct nvm_tgt_dev *dev = pblk->dev; | |
262 | ||
263 | if (rqd->meta_list) | |
264 | nvm_dev_dma_free(dev->parent, rqd->meta_list, | |
265 | rqd->dma_meta_list); | |
266 | } | |
267 | ||
67bf26a3 JG |
268 | /* Caller must guarantee that the request is a valid type */ |
269 | struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type) | |
a4bd217b JG |
270 | { |
271 | mempool_t *pool; | |
272 | struct nvm_rq *rqd; | |
273 | int rq_size; | |
274 | ||
67bf26a3 JG |
275 | switch (type) { |
276 | case PBLK_WRITE: | |
277 | case PBLK_WRITE_INT: | |
b906bbb6 | 278 | pool = &pblk->w_rq_pool; |
a4bd217b | 279 | rq_size = pblk_w_rq_size; |
67bf26a3 JG |
280 | break; |
281 | case PBLK_READ: | |
b906bbb6 | 282 | pool = &pblk->r_rq_pool; |
084ec9ba | 283 | rq_size = pblk_g_rq_size; |
67bf26a3 JG |
284 | break; |
285 | default: | |
b906bbb6 | 286 | pool = &pblk->e_rq_pool; |
67bf26a3 | 287 | rq_size = pblk_g_rq_size; |
a4bd217b JG |
288 | } |
289 | ||
290 | rqd = mempool_alloc(pool, GFP_KERNEL); | |
291 | memset(rqd, 0, rq_size); | |
292 | ||
293 | return rqd; | |
294 | } | |
295 | ||
67bf26a3 JG |
296 | /* Typically used on completion path. Cannot guarantee request consistency */ |
297 | void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) | |
a4bd217b JG |
298 | { |
299 | mempool_t *pool; | |
300 | ||
67bf26a3 JG |
301 | switch (type) { |
302 | case PBLK_WRITE: | |
303 | kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap); | |
df561f66 | 304 | fallthrough; |
67bf26a3 | 305 | case PBLK_WRITE_INT: |
b906bbb6 | 306 | pool = &pblk->w_rq_pool; |
67bf26a3 JG |
307 | break; |
308 | case PBLK_READ: | |
b906bbb6 | 309 | pool = &pblk->r_rq_pool; |
67bf26a3 JG |
310 | break; |
311 | case PBLK_ERASE: | |
b906bbb6 | 312 | pool = &pblk->e_rq_pool; |
67bf26a3 JG |
313 | break; |
314 | default: | |
4e495a46 | 315 | pblk_err(pblk, "trying to free unknown rqd type\n"); |
67bf26a3 JG |
316 | return; |
317 | } | |
a4bd217b | 318 | |
45dcf29b | 319 | pblk_free_rqd_meta(pblk, rqd); |
a4bd217b JG |
320 | mempool_free(rqd, pool); |
321 | } | |
322 | ||
323 | void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off, | |
324 | int nr_pages) | |
325 | { | |
510fd8ea HL |
326 | struct bio_vec *bv; |
327 | struct page *page; | |
328 | int i, e, nbv = 0; | |
329 | ||
330 | for (i = 0; i < bio->bi_vcnt; i++) { | |
331 | bv = &bio->bi_io_vec[i]; | |
332 | page = bv->bv_page; | |
333 | for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++) | |
334 | if (nbv >= off) | |
335 | mempool_free(page++, &pblk->page_bio_pool); | |
a4bd217b JG |
336 | } |
337 | } | |
338 | ||
339 | int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, | |
340 | int nr_pages) | |
341 | { | |
342 | struct request_queue *q = pblk->dev->q; | |
343 | struct page *page; | |
344 | int i, ret; | |
345 | ||
346 | for (i = 0; i < nr_pages; i++) { | |
b906bbb6 | 347 | page = mempool_alloc(&pblk->page_bio_pool, flags); |
a4bd217b JG |
348 | |
349 | ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); | |
350 | if (ret != PBLK_EXPOSED_PAGE_SIZE) { | |
4e495a46 | 351 | pblk_err(pblk, "could not add page to bio\n"); |
b906bbb6 | 352 | mempool_free(page, &pblk->page_bio_pool); |
a4bd217b JG |
353 | goto err; |
354 | } | |
355 | } | |
356 | ||
357 | return 0; | |
358 | err: | |
f142ac0b | 359 | pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i); |
a4bd217b JG |
360 | return -1; |
361 | } | |
362 | ||
cc9c9a00 | 363 | void pblk_write_kick(struct pblk *pblk) |
a4bd217b JG |
364 | { |
365 | wake_up_process(pblk->writer_ts); | |
366 | mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000)); | |
367 | } | |
368 | ||
87c1d2d3 | 369 | void pblk_write_timer_fn(struct timer_list *t) |
a4bd217b | 370 | { |
87c1d2d3 | 371 | struct pblk *pblk = from_timer(pblk, t, wtimer); |
a4bd217b JG |
372 | |
373 | /* kick the write thread every tick to flush outstanding data */ | |
374 | pblk_write_kick(pblk); | |
375 | } | |
376 | ||
377 | void pblk_write_should_kick(struct pblk *pblk) | |
378 | { | |
379 | unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb); | |
380 | ||
55d8ec35 | 381 | if (secs_avail >= pblk->min_write_pgs_data) |
a4bd217b JG |
382 | pblk_write_kick(pblk); |
383 | } | |
384 | ||
8bd40020 | 385 | static void pblk_wait_for_meta(struct pblk *pblk) |
a4bd217b | 386 | { |
588726d3 JG |
387 | do { |
388 | if (!atomic_read(&pblk->inflight_io)) | |
389 | break; | |
a4bd217b | 390 | |
588726d3 JG |
391 | schedule(); |
392 | } while (1); | |
393 | } | |
a4bd217b | 394 | |
588726d3 JG |
395 | static void pblk_flush_writer(struct pblk *pblk) |
396 | { | |
397 | pblk_rb_flush(&pblk->rwb); | |
398 | do { | |
ee8d5c1a | 399 | if (!pblk_rb_sync_count(&pblk->rwb)) |
588726d3 | 400 | break; |
a4bd217b | 401 | |
ee8d5c1a | 402 | pblk_write_kick(pblk); |
588726d3 JG |
403 | schedule(); |
404 | } while (1); | |
a4bd217b JG |
405 | } |
406 | ||
407 | struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line) | |
408 | { | |
409 | struct pblk_line_meta *lm = &pblk->lm; | |
410 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
411 | struct list_head *move_list = NULL; | |
55d8ec35 IK |
412 | int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data) |
413 | * (pblk->min_write_pgs - pblk->min_write_pgs_data); | |
414 | int vsc = le32_to_cpu(*line->vsc) + packed_meta; | |
a4bd217b | 415 | |
476118c9 JG |
416 | lockdep_assert_held(&line->lock); |
417 | ||
48b8d208 HH |
418 | if (line->w_err_gc->has_write_err) { |
419 | if (line->gc_group != PBLK_LINEGC_WERR) { | |
420 | line->gc_group = PBLK_LINEGC_WERR; | |
421 | move_list = &l_mg->gc_werr_list; | |
422 | pblk_rl_werr_line_in(&pblk->rl); | |
423 | } | |
424 | } else if (!vsc) { | |
a4bd217b JG |
425 | if (line->gc_group != PBLK_LINEGC_FULL) { |
426 | line->gc_group = PBLK_LINEGC_FULL; | |
427 | move_list = &l_mg->gc_full_list; | |
428 | } | |
b20ba1bc | 429 | } else if (vsc < lm->high_thrs) { |
a4bd217b JG |
430 | if (line->gc_group != PBLK_LINEGC_HIGH) { |
431 | line->gc_group = PBLK_LINEGC_HIGH; | |
432 | move_list = &l_mg->gc_high_list; | |
433 | } | |
b20ba1bc | 434 | } else if (vsc < lm->mid_thrs) { |
a4bd217b JG |
435 | if (line->gc_group != PBLK_LINEGC_MID) { |
436 | line->gc_group = PBLK_LINEGC_MID; | |
437 | move_list = &l_mg->gc_mid_list; | |
438 | } | |
dd2a4343 | 439 | } else if (vsc < line->sec_in_line) { |
a4bd217b JG |
440 | if (line->gc_group != PBLK_LINEGC_LOW) { |
441 | line->gc_group = PBLK_LINEGC_LOW; | |
442 | move_list = &l_mg->gc_low_list; | |
443 | } | |
dd2a4343 | 444 | } else if (vsc == line->sec_in_line) { |
a4bd217b JG |
445 | if (line->gc_group != PBLK_LINEGC_EMPTY) { |
446 | line->gc_group = PBLK_LINEGC_EMPTY; | |
447 | move_list = &l_mg->gc_empty_list; | |
448 | } | |
449 | } else { | |
450 | line->state = PBLK_LINESTATE_CORRUPT; | |
f2937232 HH |
451 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
452 | line->state); | |
453 | ||
a4bd217b JG |
454 | line->gc_group = PBLK_LINEGC_NONE; |
455 | move_list = &l_mg->corrupt_list; | |
4e495a46 | 456 | pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n", |
dd2a4343 | 457 | line->id, vsc, |
a4bd217b JG |
458 | line->sec_in_line, |
459 | lm->high_thrs, lm->mid_thrs); | |
460 | } | |
461 | ||
462 | return move_list; | |
463 | } | |
464 | ||
465 | void pblk_discard(struct pblk *pblk, struct bio *bio) | |
466 | { | |
467 | sector_t slba = pblk_get_lba(bio); | |
468 | sector_t nr_secs = pblk_get_secs(bio); | |
469 | ||
470 | pblk_invalidate_range(pblk, slba, nr_secs); | |
471 | } | |
472 | ||
a4bd217b JG |
473 | void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd) |
474 | { | |
475 | atomic_long_inc(&pblk->write_failed); | |
880eda54 | 476 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b JG |
477 | pblk_print_failed_rqd(pblk, rqd, rqd->error); |
478 | #endif | |
479 | } | |
480 | ||
481 | void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd) | |
482 | { | |
483 | /* Empty page read is not necessarily an error (e.g., L2P recovery) */ | |
484 | if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) { | |
485 | atomic_long_inc(&pblk->read_empty); | |
486 | return; | |
487 | } | |
488 | ||
489 | switch (rqd->error) { | |
490 | case NVM_RSP_WARN_HIGHECC: | |
491 | atomic_long_inc(&pblk->read_high_ecc); | |
492 | break; | |
493 | case NVM_RSP_ERR_FAILECC: | |
494 | case NVM_RSP_ERR_FAILCRC: | |
495 | atomic_long_inc(&pblk->read_failed); | |
496 | break; | |
497 | default: | |
4e495a46 | 498 | pblk_err(pblk, "unknown read error:%d\n", rqd->error); |
a4bd217b | 499 | } |
880eda54 | 500 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b JG |
501 | pblk_print_failed_rqd(pblk, rqd, rqd->error); |
502 | #endif | |
503 | } | |
504 | ||
c2e9f5d4 JG |
505 | void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write) |
506 | { | |
507 | pblk->sec_per_write = sec_per_write; | |
508 | } | |
509 | ||
48e5da72 | 510 | int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf) |
a4bd217b JG |
511 | { |
512 | struct nvm_tgt_dev *dev = pblk->dev; | |
513 | ||
b6730dd4 | 514 | atomic_inc(&pblk->inflight_io); |
a4bd217b | 515 | |
880eda54 | 516 | #ifdef CONFIG_NVM_PBLK_DEBUG |
b6730dd4 JG |
517 | if (pblk_check_io(pblk, rqd)) |
518 | return NVM_IO_ERR; | |
1a94b2d4 | 519 | #endif |
a4bd217b | 520 | |
48e5da72 | 521 | return nvm_submit_io(dev, rqd, buf); |
1a94b2d4 | 522 | } |
a4bd217b | 523 | |
4c44abf4 HH |
524 | void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd) |
525 | { | |
526 | struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); | |
527 | ||
528 | int i; | |
529 | ||
530 | for (i = 0; i < rqd->nr_ppas; i++) { | |
531 | struct ppa_addr *ppa = &ppa_list[i]; | |
532 | struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa); | |
533 | u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa); | |
534 | ||
535 | if (caddr == 0) | |
536 | trace_pblk_chunk_state(pblk_disk_name(pblk), | |
537 | ppa, NVM_CHK_ST_OPEN); | |
96076f7d | 538 | else if (caddr == (chunk->cnlb - 1)) |
4c44abf4 HH |
539 | trace_pblk_chunk_state(pblk_disk_name(pblk), |
540 | ppa, NVM_CHK_ST_CLOSED); | |
541 | } | |
542 | } | |
543 | ||
48e5da72 | 544 | int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf) |
1a94b2d4 JG |
545 | { |
546 | struct nvm_tgt_dev *dev = pblk->dev; | |
4c44abf4 | 547 | int ret; |
1a94b2d4 | 548 | |
b6730dd4 | 549 | atomic_inc(&pblk->inflight_io); |
1a94b2d4 | 550 | |
880eda54 | 551 | #ifdef CONFIG_NVM_PBLK_DEBUG |
b6730dd4 JG |
552 | if (pblk_check_io(pblk, rqd)) |
553 | return NVM_IO_ERR; | |
a4bd217b | 554 | #endif |
588726d3 | 555 | |
48e5da72 | 556 | ret = nvm_submit_io_sync(dev, rqd, buf); |
4c44abf4 HH |
557 | |
558 | if (trace_pblk_chunk_state_enabled() && !ret && | |
559 | rqd->opcode == NVM_OP_PWRITE) | |
560 | pblk_check_chunk_state_update(pblk, rqd); | |
561 | ||
562 | return ret; | |
a4bd217b JG |
563 | } |
564 | ||
48e5da72 HH |
565 | static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd, |
566 | void *buf) | |
253babc3 | 567 | { |
45c5fcbb | 568 | struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); |
253babc3 JG |
569 | int ret; |
570 | ||
253babc3 | 571 | pblk_down_chunk(pblk, ppa_list[0]); |
48e5da72 | 572 | ret = pblk_submit_io_sync(pblk, rqd, buf); |
253babc3 JG |
573 | pblk_up_chunk(pblk, ppa_list[0]); |
574 | ||
575 | return ret; | |
576 | } | |
577 | ||
a4bd217b | 578 | int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, |
55d8ec35 | 579 | unsigned long secs_to_flush, bool skip_meta) |
a4bd217b | 580 | { |
c2e9f5d4 | 581 | int max = pblk->sec_per_write; |
a4bd217b JG |
582 | int min = pblk->min_write_pgs; |
583 | int secs_to_sync = 0; | |
584 | ||
55d8ec35 IK |
585 | if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs) |
586 | min = max = pblk->min_write_pgs_data; | |
587 | ||
a4bd217b JG |
588 | if (secs_avail >= max) |
589 | secs_to_sync = max; | |
590 | else if (secs_avail >= min) | |
591 | secs_to_sync = min * (secs_avail / min); | |
592 | else if (secs_to_flush) | |
593 | secs_to_sync = min; | |
594 | ||
595 | return secs_to_sync; | |
596 | } | |
597 | ||
dd2a4343 JG |
598 | void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) |
599 | { | |
600 | u64 addr; | |
601 | int i; | |
602 | ||
e57903fd | 603 | spin_lock(&line->lock); |
dd2a4343 JG |
604 | addr = find_next_zero_bit(line->map_bitmap, |
605 | pblk->lm.sec_per_line, line->cur_sec); | |
606 | line->cur_sec = addr - nr_secs; | |
607 | ||
608 | for (i = 0; i < nr_secs; i++, line->cur_sec--) | |
609 | WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap)); | |
e57903fd | 610 | spin_unlock(&line->lock); |
dd2a4343 JG |
611 | } |
612 | ||
613 | u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) | |
a4bd217b JG |
614 | { |
615 | u64 addr; | |
616 | int i; | |
617 | ||
476118c9 JG |
618 | lockdep_assert_held(&line->lock); |
619 | ||
a4bd217b JG |
620 | /* logic error: ppa out-of-bounds. Prevent generating bad address */ |
621 | if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) { | |
622 | WARN(1, "pblk: page allocation out of bounds\n"); | |
623 | nr_secs = pblk->lm.sec_per_line - line->cur_sec; | |
624 | } | |
625 | ||
626 | line->cur_sec = addr = find_next_zero_bit(line->map_bitmap, | |
627 | pblk->lm.sec_per_line, line->cur_sec); | |
628 | for (i = 0; i < nr_secs; i++, line->cur_sec++) | |
629 | WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap)); | |
630 | ||
631 | return addr; | |
632 | } | |
633 | ||
634 | u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs) | |
635 | { | |
636 | u64 addr; | |
637 | ||
638 | /* Lock needed in case a write fails and a recovery needs to remap | |
639 | * failed write buffer entries | |
640 | */ | |
641 | spin_lock(&line->lock); | |
642 | addr = __pblk_alloc_page(pblk, line, nr_secs); | |
643 | line->left_msecs -= nr_secs; | |
644 | WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n"); | |
645 | spin_unlock(&line->lock); | |
646 | ||
647 | return addr; | |
648 | } | |
649 | ||
dd2a4343 JG |
650 | u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line) |
651 | { | |
652 | u64 paddr; | |
653 | ||
654 | spin_lock(&line->lock); | |
655 | paddr = find_next_zero_bit(line->map_bitmap, | |
656 | pblk->lm.sec_per_line, line->cur_sec); | |
657 | spin_unlock(&line->lock); | |
658 | ||
659 | return paddr; | |
660 | } | |
661 | ||
af3fac16 | 662 | u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line) |
a4bd217b JG |
663 | { |
664 | struct nvm_tgt_dev *dev = pblk->dev; | |
665 | struct nvm_geo *geo = &dev->geo; | |
666 | struct pblk_line_meta *lm = &pblk->lm; | |
af3fac16 | 667 | int bit; |
a4bd217b | 668 | |
af3fac16 JG |
669 | /* This usually only happens on bad lines */ |
670 | bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line); | |
671 | if (bit >= lm->blk_per_line) | |
672 | return -1; | |
a4bd217b | 673 | |
af3fac16 JG |
674 | return bit * geo->ws_opt; |
675 | } | |
a4bd217b | 676 | |
af3fac16 JG |
677 | int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line) |
678 | { | |
af3fac16 | 679 | struct pblk_line_meta *lm = &pblk->lm; |
45c5fcbb | 680 | struct ppa_addr *ppa_list; |
af3fac16 JG |
681 | struct nvm_rq rqd; |
682 | u64 paddr = pblk_line_smeta_start(pblk, line); | |
683 | int i, ret; | |
63e3809c | 684 | |
a4bd217b JG |
685 | memset(&rqd, 0, sizeof(struct nvm_rq)); |
686 | ||
af3fac16 JG |
687 | ret = pblk_alloc_rqd_meta(pblk, &rqd); |
688 | if (ret) | |
689 | return ret; | |
a4bd217b | 690 | |
af3fac16 JG |
691 | rqd.opcode = NVM_OP_PREAD; |
692 | rqd.nr_ppas = lm->smeta_sec; | |
693 | rqd.is_seq = 1; | |
45c5fcbb | 694 | ppa_list = nvm_rq_to_ppa_list(&rqd); |
a4bd217b | 695 | |
af3fac16 | 696 | for (i = 0; i < lm->smeta_sec; i++, paddr++) |
45c5fcbb | 697 | ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); |
a4bd217b | 698 | |
48e5da72 | 699 | ret = pblk_submit_io_sync(pblk, &rqd, line->smeta); |
a4bd217b | 700 | if (ret) { |
af3fac16 | 701 | pblk_err(pblk, "smeta I/O submission failed: %d\n", ret); |
af3fac16 | 702 | goto clear_rqd; |
a4bd217b JG |
703 | } |
704 | ||
588726d3 | 705 | atomic_dec(&pblk->inflight_io); |
a4bd217b | 706 | |
d165a7a6 | 707 | if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) { |
af3fac16 | 708 | pblk_log_read_err(pblk, &rqd); |
d165a7a6 IK |
709 | ret = -EIO; |
710 | } | |
a4bd217b | 711 | |
af3fac16 JG |
712 | clear_rqd: |
713 | pblk_free_rqd_meta(pblk, &rqd); | |
a4bd217b JG |
714 | return ret; |
715 | } | |
716 | ||
af3fac16 JG |
717 | static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line, |
718 | u64 paddr) | |
a4bd217b | 719 | { |
a4bd217b | 720 | struct pblk_line_meta *lm = &pblk->lm; |
45c5fcbb | 721 | struct ppa_addr *ppa_list; |
a4bd217b | 722 | struct nvm_rq rqd; |
af3fac16 JG |
723 | __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf); |
724 | __le64 addr_empty = cpu_to_le64(ADDR_EMPTY); | |
a4bd217b | 725 | int i, ret; |
a4bd217b JG |
726 | |
727 | memset(&rqd, 0, sizeof(struct nvm_rq)); | |
728 | ||
45dcf29b JG |
729 | ret = pblk_alloc_rqd_meta(pblk, &rqd); |
730 | if (ret) | |
731 | return ret; | |
63e3809c | 732 | |
af3fac16 | 733 | rqd.opcode = NVM_OP_PWRITE; |
a4bd217b | 734 | rqd.nr_ppas = lm->smeta_sec; |
af3fac16 | 735 | rqd.is_seq = 1; |
45c5fcbb | 736 | ppa_list = nvm_rq_to_ppa_list(&rqd); |
a4bd217b JG |
737 | |
738 | for (i = 0; i < lm->smeta_sec; i++, paddr++) { | |
faa79f27 IK |
739 | struct pblk_sec_meta *meta = pblk_get_meta(pblk, |
740 | rqd.meta_list, i); | |
63e3809c | 741 | |
45c5fcbb | 742 | ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); |
faa79f27 | 743 | meta->lba = lba_list[paddr] = addr_empty; |
a4bd217b JG |
744 | } |
745 | ||
48e5da72 | 746 | ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta); |
a4bd217b | 747 | if (ret) { |
4e495a46 | 748 | pblk_err(pblk, "smeta I/O submission failed: %d\n", ret); |
45dcf29b | 749 | goto clear_rqd; |
a4bd217b JG |
750 | } |
751 | ||
588726d3 | 752 | atomic_dec(&pblk->inflight_io); |
a4bd217b JG |
753 | |
754 | if (rqd.error) { | |
af3fac16 JG |
755 | pblk_log_write_err(pblk, &rqd); |
756 | ret = -EIO; | |
a4bd217b JG |
757 | } |
758 | ||
45dcf29b JG |
759 | clear_rqd: |
760 | pblk_free_rqd_meta(pblk, &rqd); | |
a4bd217b JG |
761 | return ret; |
762 | } | |
763 | ||
af3fac16 JG |
764 | int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line, |
765 | void *emeta_buf) | |
a4bd217b | 766 | { |
af3fac16 JG |
767 | struct nvm_tgt_dev *dev = pblk->dev; |
768 | struct nvm_geo *geo = &dev->geo; | |
af3fac16 | 769 | struct pblk_line_meta *lm = &pblk->lm; |
45c5fcbb | 770 | void *ppa_list_buf, *meta_list; |
45c5fcbb | 771 | struct ppa_addr *ppa_list; |
af3fac16 JG |
772 | struct nvm_rq rqd; |
773 | u64 paddr = line->emeta_ssec; | |
774 | dma_addr_t dma_ppa_list, dma_meta_list; | |
775 | int min = pblk->min_write_pgs; | |
776 | int left_ppas = lm->emeta_sec[0]; | |
777 | int line_id = line->id; | |
778 | int rq_ppas, rq_len; | |
779 | int i, j; | |
780 | int ret; | |
a4bd217b | 781 | |
af3fac16 JG |
782 | meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, |
783 | &dma_meta_list); | |
784 | if (!meta_list) | |
785 | return -ENOMEM; | |
a4bd217b | 786 | |
45c5fcbb | 787 | ppa_list_buf = meta_list + pblk_dma_meta_size(pblk); |
24828d05 | 788 | dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk); |
af3fac16 JG |
789 | |
790 | next_rq: | |
791 | memset(&rqd, 0, sizeof(struct nvm_rq)); | |
792 | ||
55d8ec35 | 793 | rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false); |
af3fac16 JG |
794 | rq_len = rq_ppas * geo->csecs; |
795 | ||
af3fac16 | 796 | rqd.meta_list = meta_list; |
45c5fcbb | 797 | rqd.ppa_list = ppa_list_buf; |
af3fac16 JG |
798 | rqd.dma_meta_list = dma_meta_list; |
799 | rqd.dma_ppa_list = dma_ppa_list; | |
800 | rqd.opcode = NVM_OP_PREAD; | |
801 | rqd.nr_ppas = rq_ppas; | |
45c5fcbb | 802 | ppa_list = nvm_rq_to_ppa_list(&rqd); |
af3fac16 JG |
803 | |
804 | for (i = 0; i < rqd.nr_ppas; ) { | |
805 | struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id); | |
806 | int pos = pblk_ppa_to_pos(geo, ppa); | |
807 | ||
808 | if (pblk_io_aligned(pblk, rq_ppas)) | |
809 | rqd.is_seq = 1; | |
810 | ||
811 | while (test_bit(pos, line->blk_bitmap)) { | |
812 | paddr += min; | |
813 | if (pblk_boundary_paddr_checks(pblk, paddr)) { | |
af3fac16 JG |
814 | ret = -EINTR; |
815 | goto free_rqd_dma; | |
816 | } | |
817 | ||
818 | ppa = addr_to_gen_ppa(pblk, paddr, line_id); | |
819 | pos = pblk_ppa_to_pos(geo, ppa); | |
820 | } | |
821 | ||
822 | if (pblk_boundary_paddr_checks(pblk, paddr + min)) { | |
af3fac16 JG |
823 | ret = -EINTR; |
824 | goto free_rqd_dma; | |
825 | } | |
826 | ||
827 | for (j = 0; j < min; j++, i++, paddr++) | |
45c5fcbb | 828 | ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id); |
af3fac16 JG |
829 | } |
830 | ||
48e5da72 | 831 | ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf); |
af3fac16 JG |
832 | if (ret) { |
833 | pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); | |
af3fac16 JG |
834 | goto free_rqd_dma; |
835 | } | |
836 | ||
837 | atomic_dec(&pblk->inflight_io); | |
838 | ||
d165a7a6 | 839 | if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) { |
af3fac16 | 840 | pblk_log_read_err(pblk, &rqd); |
d165a7a6 IK |
841 | ret = -EIO; |
842 | goto free_rqd_dma; | |
843 | } | |
af3fac16 JG |
844 | |
845 | emeta_buf += rq_len; | |
846 | left_ppas -= rq_ppas; | |
847 | if (left_ppas) | |
848 | goto next_rq; | |
849 | ||
850 | free_rqd_dma: | |
851 | nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); | |
852 | return ret; | |
a4bd217b JG |
853 | } |
854 | ||
855 | static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd, | |
856 | struct ppa_addr ppa) | |
857 | { | |
858 | rqd->opcode = NVM_OP_ERASE; | |
859 | rqd->ppa_addr = ppa; | |
860 | rqd->nr_ppas = 1; | |
d7b68016 | 861 | rqd->is_seq = 1; |
a4bd217b JG |
862 | rqd->bio = NULL; |
863 | } | |
864 | ||
865 | static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa) | |
866 | { | |
4b5d56ed MB |
867 | struct nvm_rq rqd = {NULL}; |
868 | int ret; | |
a4bd217b | 869 | |
4209c31c HH |
870 | trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa, |
871 | PBLK_CHUNK_RESET_START); | |
872 | ||
a4bd217b JG |
873 | pblk_setup_e_rq(pblk, &rqd, ppa); |
874 | ||
a4bd217b JG |
875 | /* The write thread schedules erases so that it minimizes disturbances |
876 | * with writes. Thus, there is no need to take the LUN semaphore. | |
877 | */ | |
48e5da72 | 878 | ret = pblk_submit_io_sync(pblk, &rqd, NULL); |
a4bd217b JG |
879 | rqd.private = pblk; |
880 | __pblk_end_io_erase(pblk, &rqd); | |
881 | ||
588726d3 | 882 | return ret; |
a4bd217b JG |
883 | } |
884 | ||
885 | int pblk_line_erase(struct pblk *pblk, struct pblk_line *line) | |
886 | { | |
887 | struct pblk_line_meta *lm = &pblk->lm; | |
888 | struct ppa_addr ppa; | |
588726d3 | 889 | int ret, bit = -1; |
a4bd217b | 890 | |
a44f53fa JG |
891 | /* Erase only good blocks, one at a time */ |
892 | do { | |
893 | spin_lock(&line->lock); | |
894 | bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line, | |
895 | bit + 1); | |
896 | if (bit >= lm->blk_per_line) { | |
897 | spin_unlock(&line->lock); | |
898 | break; | |
899 | } | |
900 | ||
a4bd217b | 901 | ppa = pblk->luns[bit].bppa; /* set ch and lun */ |
69471513 | 902 | ppa.a.blk = line->id; |
a4bd217b | 903 | |
a44f53fa | 904 | atomic_dec(&line->left_eblks); |
a4bd217b | 905 | WARN_ON(test_and_set_bit(bit, line->erase_bitmap)); |
a44f53fa | 906 | spin_unlock(&line->lock); |
a4bd217b | 907 | |
588726d3 JG |
908 | ret = pblk_blk_erase_sync(pblk, ppa); |
909 | if (ret) { | |
4e495a46 | 910 | pblk_err(pblk, "failed to erase line %d\n", line->id); |
588726d3 | 911 | return ret; |
a4bd217b | 912 | } |
a44f53fa | 913 | } while (1); |
a4bd217b JG |
914 | |
915 | return 0; | |
916 | } | |
917 | ||
dd2a4343 JG |
918 | static void pblk_line_setup_metadata(struct pblk_line *line, |
919 | struct pblk_line_mgmt *l_mg, | |
920 | struct pblk_line_meta *lm) | |
921 | { | |
922 | int meta_line; | |
923 | ||
588726d3 JG |
924 | lockdep_assert_held(&l_mg->free_lock); |
925 | ||
dd2a4343 JG |
926 | retry_meta: |
927 | meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES); | |
928 | if (meta_line == PBLK_DATA_LINES) { | |
929 | spin_unlock(&l_mg->free_lock); | |
930 | io_schedule(); | |
931 | spin_lock(&l_mg->free_lock); | |
932 | goto retry_meta; | |
933 | } | |
934 | ||
935 | set_bit(meta_line, &l_mg->meta_bitmap); | |
936 | line->meta_line = meta_line; | |
937 | ||
938 | line->smeta = l_mg->sline_meta[meta_line]; | |
939 | line->emeta = l_mg->eline_meta[meta_line]; | |
940 | ||
941 | memset(line->smeta, 0, lm->smeta_len); | |
942 | memset(line->emeta->buf, 0, lm->emeta_len[0]); | |
943 | ||
944 | line->emeta->mem = 0; | |
945 | atomic_set(&line->emeta->sync, 0); | |
946 | } | |
947 | ||
a4bd217b JG |
948 | /* For now lines are always assumed full lines. Thus, smeta former and current |
949 | * lun bitmaps are omitted. | |
950 | */ | |
dd2a4343 | 951 | static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line, |
a4bd217b JG |
952 | struct pblk_line *cur) |
953 | { | |
954 | struct nvm_tgt_dev *dev = pblk->dev; | |
955 | struct nvm_geo *geo = &dev->geo; | |
956 | struct pblk_line_meta *lm = &pblk->lm; | |
957 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
dd2a4343 JG |
958 | struct pblk_emeta *emeta = line->emeta; |
959 | struct line_emeta *emeta_buf = emeta->buf; | |
960 | struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta; | |
a4bd217b JG |
961 | int nr_blk_line; |
962 | ||
963 | /* After erasing the line, new bad blocks might appear and we risk | |
964 | * having an invalid line | |
965 | */ | |
966 | nr_blk_line = lm->blk_per_line - | |
967 | bitmap_weight(line->blk_bitmap, lm->blk_per_line); | |
968 | if (nr_blk_line < lm->min_blk_line) { | |
969 | spin_lock(&l_mg->free_lock); | |
970 | spin_lock(&line->lock); | |
971 | line->state = PBLK_LINESTATE_BAD; | |
f2937232 HH |
972 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
973 | line->state); | |
a4bd217b JG |
974 | spin_unlock(&line->lock); |
975 | ||
976 | list_add_tail(&line->list, &l_mg->bad_list); | |
977 | spin_unlock(&l_mg->free_lock); | |
978 | ||
4e495a46 | 979 | pblk_debug(pblk, "line %d is bad\n", line->id); |
a4bd217b JG |
980 | |
981 | return 0; | |
982 | } | |
983 | ||
984 | /* Run-time metadata */ | |
dd2a4343 | 985 | line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta); |
a4bd217b JG |
986 | |
987 | /* Mark LUNs allocated in this line (all for now) */ | |
988 | bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len); | |
989 | ||
dd2a4343 | 990 | smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC); |
7e0a0847 | 991 | guid_copy((guid_t *)&smeta_buf->header.uuid, &pblk->instance_uuid); |
dd2a4343 JG |
992 | smeta_buf->header.id = cpu_to_le32(line->id); |
993 | smeta_buf->header.type = cpu_to_le16(line->type); | |
d0ab0b1a HH |
994 | smeta_buf->header.version_major = SMETA_VERSION_MAJOR; |
995 | smeta_buf->header.version_minor = SMETA_VERSION_MINOR; | |
a4bd217b JG |
996 | |
997 | /* Start metadata */ | |
dd2a4343 | 998 | smeta_buf->seq_nr = cpu_to_le64(line->seq_nr); |
fae7fae4 | 999 | smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns); |
a4bd217b JG |
1000 | |
1001 | /* Fill metadata among lines */ | |
1002 | if (cur) { | |
1003 | memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len); | |
dd2a4343 JG |
1004 | smeta_buf->prev_id = cpu_to_le32(cur->id); |
1005 | cur->emeta->buf->next_id = cpu_to_le32(line->id); | |
a4bd217b | 1006 | } else { |
dd2a4343 | 1007 | smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY); |
a4bd217b JG |
1008 | } |
1009 | ||
1010 | /* All smeta must be set at this point */ | |
dd2a4343 JG |
1011 | smeta_buf->header.crc = cpu_to_le32( |
1012 | pblk_calc_meta_header_crc(pblk, &smeta_buf->header)); | |
1013 | smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf)); | |
a4bd217b JG |
1014 | |
1015 | /* End metadata */ | |
dd2a4343 JG |
1016 | memcpy(&emeta_buf->header, &smeta_buf->header, |
1017 | sizeof(struct line_header)); | |
d0ab0b1a HH |
1018 | |
1019 | emeta_buf->header.version_major = EMETA_VERSION_MAJOR; | |
1020 | emeta_buf->header.version_minor = EMETA_VERSION_MINOR; | |
1021 | emeta_buf->header.crc = cpu_to_le32( | |
1022 | pblk_calc_meta_header_crc(pblk, &emeta_buf->header)); | |
1023 | ||
dd2a4343 JG |
1024 | emeta_buf->seq_nr = cpu_to_le64(line->seq_nr); |
1025 | emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line); | |
1026 | emeta_buf->nr_valid_lbas = cpu_to_le64(0); | |
1027 | emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY); | |
1028 | emeta_buf->crc = cpu_to_le32(0); | |
1029 | emeta_buf->prev_id = smeta_buf->prev_id; | |
a4bd217b JG |
1030 | |
1031 | return 1; | |
1032 | } | |
1033 | ||
9cfd5a95 JG |
1034 | static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line) |
1035 | { | |
1036 | struct pblk_line_meta *lm = &pblk->lm; | |
53d82db6 | 1037 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
9cfd5a95 | 1038 | |
53d82db6 | 1039 | line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL); |
9cfd5a95 JG |
1040 | if (!line->map_bitmap) |
1041 | return -ENOMEM; | |
1042 | ||
53d82db6 HH |
1043 | memset(line->map_bitmap, 0, lm->sec_bitmap_len); |
1044 | ||
9cfd5a95 | 1045 | /* will be initialized using bb info from map_bitmap */ |
53d82db6 | 1046 | line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL); |
9cfd5a95 | 1047 | if (!line->invalid_bitmap) { |
53d82db6 | 1048 | mempool_free(line->map_bitmap, l_mg->bitmap_pool); |
9cfd5a95 JG |
1049 | line->map_bitmap = NULL; |
1050 | return -ENOMEM; | |
1051 | } | |
1052 | ||
1053 | return 0; | |
1054 | } | |
1055 | ||
a4bd217b JG |
1056 | /* For now lines are always assumed full lines. Thus, smeta former and current |
1057 | * lun bitmaps are omitted. | |
1058 | */ | |
1059 | static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, | |
1060 | int init) | |
1061 | { | |
1062 | struct nvm_tgt_dev *dev = pblk->dev; | |
1063 | struct nvm_geo *geo = &dev->geo; | |
1064 | struct pblk_line_meta *lm = &pblk->lm; | |
1065 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
a4bd217b JG |
1066 | u64 off; |
1067 | int bit = -1; | |
cfe1c9e2 | 1068 | int emeta_secs; |
a4bd217b JG |
1069 | |
1070 | line->sec_in_line = lm->sec_per_line; | |
1071 | ||
1072 | /* Capture bad block information on line mapping bitmaps */ | |
1073 | while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line, | |
1074 | bit + 1)) < lm->blk_per_line) { | |
e46f4e48 | 1075 | off = bit * geo->ws_opt; |
a4bd217b JG |
1076 | bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off, |
1077 | lm->sec_per_line); | |
1078 | bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux, | |
1079 | lm->sec_per_line); | |
e46f4e48 | 1080 | line->sec_in_line -= geo->clba; |
a4bd217b JG |
1081 | } |
1082 | ||
1083 | /* Mark smeta metadata sectors as bad sectors */ | |
1084 | bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line); | |
e46f4e48 | 1085 | off = bit * geo->ws_opt; |
a4bd217b JG |
1086 | bitmap_set(line->map_bitmap, off, lm->smeta_sec); |
1087 | line->sec_in_line -= lm->smeta_sec; | |
a4bd217b JG |
1088 | line->cur_sec = off + lm->smeta_sec; |
1089 | ||
af3fac16 | 1090 | if (init && pblk_line_smeta_write(pblk, line, off)) { |
4e495a46 | 1091 | pblk_debug(pblk, "line smeta I/O failed. Retry\n"); |
6cf17a2f | 1092 | return 0; |
a4bd217b JG |
1093 | } |
1094 | ||
1095 | bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line); | |
1096 | ||
1097 | /* Mark emeta metadata sectors as bad sectors. We need to consider bad | |
1098 | * blocks to make sure that there are enough sectors to store emeta | |
1099 | */ | |
cfe1c9e2 HH |
1100 | emeta_secs = lm->emeta_sec[0]; |
1101 | off = lm->sec_per_line; | |
1102 | while (emeta_secs) { | |
e46f4e48 | 1103 | off -= geo->ws_opt; |
a4bd217b | 1104 | if (!test_bit(off, line->invalid_bitmap)) { |
e46f4e48 JG |
1105 | bitmap_set(line->invalid_bitmap, off, geo->ws_opt); |
1106 | emeta_secs -= geo->ws_opt; | |
a4bd217b JG |
1107 | } |
1108 | } | |
1109 | ||
a4bd217b | 1110 | line->emeta_ssec = off; |
cfe1c9e2 | 1111 | line->sec_in_line -= lm->emeta_sec[0]; |
dd2a4343 | 1112 | line->nr_valid_lbas = 0; |
0880a9aa | 1113 | line->left_msecs = line->sec_in_line; |
dd2a4343 | 1114 | *line->vsc = cpu_to_le32(line->sec_in_line); |
a4bd217b JG |
1115 | |
1116 | if (lm->sec_per_line - line->sec_in_line != | |
1117 | bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) { | |
1118 | spin_lock(&line->lock); | |
1119 | line->state = PBLK_LINESTATE_BAD; | |
f2937232 HH |
1120 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
1121 | line->state); | |
a4bd217b JG |
1122 | spin_unlock(&line->lock); |
1123 | ||
1124 | list_add_tail(&line->list, &l_mg->bad_list); | |
4e495a46 | 1125 | pblk_err(pblk, "unexpected line %d is bad\n", line->id); |
a4bd217b JG |
1126 | |
1127 | return 0; | |
1128 | } | |
1129 | ||
1130 | return 1; | |
1131 | } | |
1132 | ||
32ef9412 JG |
1133 | static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line) |
1134 | { | |
1135 | struct pblk_line_meta *lm = &pblk->lm; | |
1136 | struct nvm_tgt_dev *dev = pblk->dev; | |
1137 | struct nvm_geo *geo = &dev->geo; | |
1138 | int blk_to_erase = atomic_read(&line->blk_in_line); | |
1139 | int i; | |
1140 | ||
1141 | for (i = 0; i < lm->blk_per_line; i++) { | |
1142 | struct pblk_lun *rlun = &pblk->luns[i]; | |
1143 | int pos = pblk_ppa_to_pos(geo, rlun->bppa); | |
1144 | int state = line->chks[pos].state; | |
1145 | ||
1146 | /* Free chunks should not be erased */ | |
1147 | if (state & NVM_CHK_ST_FREE) { | |
1148 | set_bit(pblk_ppa_to_pos(geo, rlun->bppa), | |
1149 | line->erase_bitmap); | |
1150 | blk_to_erase--; | |
1151 | } | |
1152 | } | |
1153 | ||
1154 | return blk_to_erase; | |
1155 | } | |
1156 | ||
a4bd217b JG |
1157 | static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line) |
1158 | { | |
1159 | struct pblk_line_meta *lm = &pblk->lm; | |
1d8b33e0 | 1160 | int blk_in_line = atomic_read(&line->blk_in_line); |
9cfd5a95 | 1161 | int blk_to_erase; |
a4bd217b | 1162 | |
32ef9412 JG |
1163 | /* Bad blocks do not need to be erased */ |
1164 | bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line); | |
1165 | ||
a4bd217b | 1166 | spin_lock(&line->lock); |
32ef9412 JG |
1167 | |
1168 | /* If we have not written to this line, we need to mark up free chunks | |
1169 | * as already erased | |
1170 | */ | |
1171 | if (line->state == PBLK_LINESTATE_NEW) { | |
1172 | blk_to_erase = pblk_prepare_new_line(pblk, line); | |
1173 | line->state = PBLK_LINESTATE_FREE; | |
f2937232 HH |
1174 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
1175 | line->state); | |
32ef9412 | 1176 | } else { |
1d8b33e0 JG |
1177 | blk_to_erase = blk_in_line; |
1178 | } | |
1179 | ||
1180 | if (blk_in_line < lm->min_blk_line) { | |
9cfd5a95 JG |
1181 | spin_unlock(&line->lock); |
1182 | return -EAGAIN; | |
32ef9412 JG |
1183 | } |
1184 | ||
a4bd217b | 1185 | if (line->state != PBLK_LINESTATE_FREE) { |
588726d3 JG |
1186 | WARN(1, "pblk: corrupted line %d, state %d\n", |
1187 | line->id, line->state); | |
9cfd5a95 JG |
1188 | spin_unlock(&line->lock); |
1189 | return -EINTR; | |
a4bd217b | 1190 | } |
588726d3 | 1191 | |
a4bd217b | 1192 | line->state = PBLK_LINESTATE_OPEN; |
f2937232 HH |
1193 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
1194 | line->state); | |
a44f53fa | 1195 | |
32ef9412 JG |
1196 | atomic_set(&line->left_eblks, blk_to_erase); |
1197 | atomic_set(&line->left_seblks, blk_to_erase); | |
dd2a4343 JG |
1198 | |
1199 | line->meta_distance = lm->meta_distance; | |
a4bd217b JG |
1200 | spin_unlock(&line->lock); |
1201 | ||
a4bd217b | 1202 | kref_init(&line->ref); |
0586942f | 1203 | atomic_set(&line->sec_to_update, 0); |
a4bd217b JG |
1204 | |
1205 | return 0; | |
1206 | } | |
1207 | ||
361d889f | 1208 | /* Line allocations in the recovery path are always single threaded */ |
a4bd217b JG |
1209 | int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line) |
1210 | { | |
1211 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1212 | int ret; | |
1213 | ||
1214 | spin_lock(&l_mg->free_lock); | |
1215 | l_mg->data_line = line; | |
1216 | list_del(&line->list); | |
a4bd217b JG |
1217 | |
1218 | ret = pblk_line_prepare(pblk, line); | |
1219 | if (ret) { | |
1220 | list_add(&line->list, &l_mg->free_list); | |
3dc001f3 | 1221 | spin_unlock(&l_mg->free_lock); |
a4bd217b JG |
1222 | return ret; |
1223 | } | |
3dc001f3 | 1224 | spin_unlock(&l_mg->free_lock); |
a4bd217b | 1225 | |
9cfd5a95 JG |
1226 | ret = pblk_line_alloc_bitmaps(pblk, line); |
1227 | if (ret) | |
fde201a4 | 1228 | goto fail; |
a4bd217b JG |
1229 | |
1230 | if (!pblk_line_init_bb(pblk, line, 0)) { | |
fde201a4 HS |
1231 | ret = -EINTR; |
1232 | goto fail; | |
a4bd217b JG |
1233 | } |
1234 | ||
9cfd5a95 | 1235 | pblk_rl_free_lines_dec(&pblk->rl, line, true); |
a4bd217b | 1236 | return 0; |
fde201a4 HS |
1237 | |
1238 | fail: | |
1239 | spin_lock(&l_mg->free_lock); | |
1240 | list_add(&line->list, &l_mg->free_list); | |
1241 | spin_unlock(&l_mg->free_lock); | |
1242 | ||
1243 | return ret; | |
a4bd217b JG |
1244 | } |
1245 | ||
1246 | void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line) | |
1247 | { | |
53d82db6 HH |
1248 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
1249 | ||
1250 | mempool_free(line->map_bitmap, l_mg->bitmap_pool); | |
a4bd217b JG |
1251 | line->map_bitmap = NULL; |
1252 | line->smeta = NULL; | |
1253 | line->emeta = NULL; | |
1254 | } | |
1255 | ||
9cfd5a95 JG |
1256 | static void pblk_line_reinit(struct pblk_line *line) |
1257 | { | |
1258 | *line->vsc = cpu_to_le32(EMPTY_ENTRY); | |
1259 | ||
1260 | line->map_bitmap = NULL; | |
1261 | line->invalid_bitmap = NULL; | |
1262 | line->smeta = NULL; | |
1263 | line->emeta = NULL; | |
1264 | } | |
1265 | ||
1266 | void pblk_line_free(struct pblk_line *line) | |
1267 | { | |
53d82db6 HH |
1268 | struct pblk *pblk = line->pblk; |
1269 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1270 | ||
1271 | mempool_free(line->map_bitmap, l_mg->bitmap_pool); | |
1272 | mempool_free(line->invalid_bitmap, l_mg->bitmap_pool); | |
9cfd5a95 JG |
1273 | |
1274 | pblk_line_reinit(line); | |
1275 | } | |
1276 | ||
a4bd217b JG |
1277 | struct pblk_line *pblk_line_get(struct pblk *pblk) |
1278 | { | |
1279 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1280 | struct pblk_line_meta *lm = &pblk->lm; | |
588726d3 JG |
1281 | struct pblk_line *line; |
1282 | int ret, bit; | |
a4bd217b JG |
1283 | |
1284 | lockdep_assert_held(&l_mg->free_lock); | |
1285 | ||
588726d3 | 1286 | retry: |
a4bd217b | 1287 | if (list_empty(&l_mg->free_list)) { |
4e495a46 | 1288 | pblk_err(pblk, "no free lines\n"); |
588726d3 | 1289 | return NULL; |
a4bd217b JG |
1290 | } |
1291 | ||
1292 | line = list_first_entry(&l_mg->free_list, struct pblk_line, list); | |
1293 | list_del(&line->list); | |
1294 | l_mg->nr_free_lines--; | |
1295 | ||
1296 | bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line); | |
1297 | if (unlikely(bit >= lm->blk_per_line)) { | |
1298 | spin_lock(&line->lock); | |
1299 | line->state = PBLK_LINESTATE_BAD; | |
f2937232 HH |
1300 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
1301 | line->state); | |
a4bd217b JG |
1302 | spin_unlock(&line->lock); |
1303 | ||
1304 | list_add_tail(&line->list, &l_mg->bad_list); | |
1305 | ||
4e495a46 | 1306 | pblk_debug(pblk, "line %d is bad\n", line->id); |
588726d3 | 1307 | goto retry; |
a4bd217b JG |
1308 | } |
1309 | ||
588726d3 JG |
1310 | ret = pblk_line_prepare(pblk, line); |
1311 | if (ret) { | |
1d8b33e0 JG |
1312 | switch (ret) { |
1313 | case -EAGAIN: | |
1314 | list_add(&line->list, &l_mg->bad_list); | |
1315 | goto retry; | |
1316 | case -EINTR: | |
588726d3 JG |
1317 | list_add(&line->list, &l_mg->corrupt_list); |
1318 | goto retry; | |
1d8b33e0 | 1319 | default: |
4e495a46 | 1320 | pblk_err(pblk, "failed to prepare line %d\n", line->id); |
588726d3 JG |
1321 | list_add(&line->list, &l_mg->free_list); |
1322 | l_mg->nr_free_lines++; | |
1323 | return NULL; | |
1324 | } | |
a4bd217b JG |
1325 | } |
1326 | ||
a4bd217b JG |
1327 | return line; |
1328 | } | |
1329 | ||
1330 | static struct pblk_line *pblk_line_retry(struct pblk *pblk, | |
1331 | struct pblk_line *line) | |
1332 | { | |
1333 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1334 | struct pblk_line *retry_line; | |
1335 | ||
588726d3 | 1336 | retry: |
a4bd217b JG |
1337 | spin_lock(&l_mg->free_lock); |
1338 | retry_line = pblk_line_get(pblk); | |
1339 | if (!retry_line) { | |
be388d9f | 1340 | l_mg->data_line = NULL; |
a4bd217b JG |
1341 | spin_unlock(&l_mg->free_lock); |
1342 | return NULL; | |
1343 | } | |
1344 | ||
9cfd5a95 JG |
1345 | retry_line->map_bitmap = line->map_bitmap; |
1346 | retry_line->invalid_bitmap = line->invalid_bitmap; | |
a4bd217b JG |
1347 | retry_line->smeta = line->smeta; |
1348 | retry_line->emeta = line->emeta; | |
1349 | retry_line->meta_line = line->meta_line; | |
a4bd217b | 1350 | |
9cfd5a95 JG |
1351 | pblk_line_reinit(line); |
1352 | ||
3dc001f3 | 1353 | l_mg->data_line = retry_line; |
a4bd217b JG |
1354 | spin_unlock(&l_mg->free_lock); |
1355 | ||
a7689938 | 1356 | pblk_rl_free_lines_dec(&pblk->rl, line, false); |
a4bd217b | 1357 | |
588726d3 JG |
1358 | if (pblk_line_erase(pblk, retry_line)) |
1359 | goto retry; | |
1360 | ||
a4bd217b JG |
1361 | return retry_line; |
1362 | } | |
1363 | ||
588726d3 JG |
1364 | static void pblk_set_space_limit(struct pblk *pblk) |
1365 | { | |
1366 | struct pblk_rl *rl = &pblk->rl; | |
1367 | ||
1368 | atomic_set(&rl->rb_space, 0); | |
1369 | } | |
1370 | ||
a4bd217b JG |
1371 | struct pblk_line *pblk_line_get_first_data(struct pblk *pblk) |
1372 | { | |
1373 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1374 | struct pblk_line *line; | |
a4bd217b JG |
1375 | |
1376 | spin_lock(&l_mg->free_lock); | |
1377 | line = pblk_line_get(pblk); | |
1378 | if (!line) { | |
1379 | spin_unlock(&l_mg->free_lock); | |
1380 | return NULL; | |
1381 | } | |
1382 | ||
1383 | line->seq_nr = l_mg->d_seq_nr++; | |
1384 | line->type = PBLK_LINETYPE_DATA; | |
1385 | l_mg->data_line = line; | |
1386 | ||
dd2a4343 | 1387 | pblk_line_setup_metadata(line, l_mg, &pblk->lm); |
a4bd217b JG |
1388 | |
1389 | /* Allocate next line for preparation */ | |
1390 | l_mg->data_next = pblk_line_get(pblk); | |
588726d3 JG |
1391 | if (!l_mg->data_next) { |
1392 | /* If we cannot get a new line, we need to stop the pipeline. | |
1393 | * Only allow as many writes in as we can store safely and then | |
1394 | * fail gracefully | |
1395 | */ | |
1396 | pblk_set_space_limit(pblk); | |
1397 | ||
1398 | l_mg->data_next = NULL; | |
1399 | } else { | |
a4bd217b JG |
1400 | l_mg->data_next->seq_nr = l_mg->d_seq_nr++; |
1401 | l_mg->data_next->type = PBLK_LINETYPE_DATA; | |
a4bd217b JG |
1402 | } |
1403 | spin_unlock(&l_mg->free_lock); | |
1404 | ||
9cfd5a95 JG |
1405 | if (pblk_line_alloc_bitmaps(pblk, line)) |
1406 | return NULL; | |
1407 | ||
588726d3 JG |
1408 | if (pblk_line_erase(pblk, line)) { |
1409 | line = pblk_line_retry(pblk, line); | |
1410 | if (!line) | |
1411 | return NULL; | |
1412 | } | |
1413 | ||
a4bd217b | 1414 | retry_setup: |
dd2a4343 | 1415 | if (!pblk_line_init_metadata(pblk, line, NULL)) { |
a4bd217b JG |
1416 | line = pblk_line_retry(pblk, line); |
1417 | if (!line) | |
1418 | return NULL; | |
1419 | ||
1420 | goto retry_setup; | |
1421 | } | |
1422 | ||
1423 | if (!pblk_line_init_bb(pblk, line, 1)) { | |
1424 | line = pblk_line_retry(pblk, line); | |
1425 | if (!line) | |
1426 | return NULL; | |
1427 | ||
1428 | goto retry_setup; | |
1429 | } | |
1430 | ||
a7689938 JG |
1431 | pblk_rl_free_lines_dec(&pblk->rl, line, true); |
1432 | ||
a4bd217b JG |
1433 | return line; |
1434 | } | |
1435 | ||
ae14cc04 MB |
1436 | void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa) |
1437 | { | |
1438 | struct pblk_line *line; | |
1439 | ||
cb21665c | 1440 | line = pblk_ppa_to_line(pblk, ppa); |
ae14cc04 MB |
1441 | kref_put(&line->ref, pblk_line_put_wq); |
1442 | } | |
1443 | ||
1444 | void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd) | |
1445 | { | |
45c5fcbb | 1446 | struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); |
ae14cc04 MB |
1447 | int i; |
1448 | ||
ae14cc04 MB |
1449 | for (i = 0; i < rqd->nr_ppas; i++) |
1450 | pblk_ppa_to_line_put(pblk, ppa_list[i]); | |
1451 | } | |
1452 | ||
588726d3 JG |
1453 | static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line) |
1454 | { | |
1455 | lockdep_assert_held(&pblk->l_mg.free_lock); | |
1456 | ||
1457 | pblk_set_space_limit(pblk); | |
1458 | pblk->state = PBLK_STATE_STOPPING; | |
1b0dd0bf | 1459 | trace_pblk_state(pblk_disk_name(pblk), pblk->state); |
588726d3 JG |
1460 | } |
1461 | ||
8bd40020 JG |
1462 | static void pblk_line_close_meta_sync(struct pblk *pblk) |
1463 | { | |
1464 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1465 | struct pblk_line_meta *lm = &pblk->lm; | |
1466 | struct pblk_line *line, *tline; | |
1467 | LIST_HEAD(list); | |
1468 | ||
1469 | spin_lock(&l_mg->close_lock); | |
1470 | if (list_empty(&l_mg->emeta_list)) { | |
1471 | spin_unlock(&l_mg->close_lock); | |
1472 | return; | |
1473 | } | |
1474 | ||
1475 | list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev); | |
1476 | spin_unlock(&l_mg->close_lock); | |
1477 | ||
1478 | list_for_each_entry_safe(line, tline, &list, list) { | |
1479 | struct pblk_emeta *emeta = line->emeta; | |
1480 | ||
1481 | while (emeta->mem < lm->emeta_len[0]) { | |
1482 | int ret; | |
1483 | ||
1484 | ret = pblk_submit_meta_io(pblk, line); | |
1485 | if (ret) { | |
4e495a46 | 1486 | pblk_err(pblk, "sync meta line %d failed (%d)\n", |
8bd40020 JG |
1487 | line->id, ret); |
1488 | return; | |
1489 | } | |
1490 | } | |
1491 | } | |
1492 | ||
1493 | pblk_wait_for_meta(pblk); | |
1494 | flush_workqueue(pblk->close_wq); | |
1495 | } | |
1496 | ||
a7c9e910 | 1497 | void __pblk_pipeline_flush(struct pblk *pblk) |
588726d3 JG |
1498 | { |
1499 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1500 | int ret; | |
1501 | ||
1502 | spin_lock(&l_mg->free_lock); | |
1503 | if (pblk->state == PBLK_STATE_RECOVERING || | |
1504 | pblk->state == PBLK_STATE_STOPPED) { | |
1505 | spin_unlock(&l_mg->free_lock); | |
1506 | return; | |
1507 | } | |
1508 | pblk->state = PBLK_STATE_RECOVERING; | |
1b0dd0bf | 1509 | trace_pblk_state(pblk_disk_name(pblk), pblk->state); |
588726d3 JG |
1510 | spin_unlock(&l_mg->free_lock); |
1511 | ||
1512 | pblk_flush_writer(pblk); | |
1513 | pblk_wait_for_meta(pblk); | |
1514 | ||
1515 | ret = pblk_recov_pad(pblk); | |
1516 | if (ret) { | |
4e495a46 | 1517 | pblk_err(pblk, "could not close data on teardown(%d)\n", ret); |
588726d3 JG |
1518 | return; |
1519 | } | |
1520 | ||
ee8d5c1a | 1521 | flush_workqueue(pblk->bb_wq); |
588726d3 | 1522 | pblk_line_close_meta_sync(pblk); |
a7c9e910 JG |
1523 | } |
1524 | ||
1525 | void __pblk_pipeline_stop(struct pblk *pblk) | |
1526 | { | |
1527 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
588726d3 JG |
1528 | |
1529 | spin_lock(&l_mg->free_lock); | |
1530 | pblk->state = PBLK_STATE_STOPPED; | |
1b0dd0bf | 1531 | trace_pblk_state(pblk_disk_name(pblk), pblk->state); |
588726d3 JG |
1532 | l_mg->data_line = NULL; |
1533 | l_mg->data_next = NULL; | |
1534 | spin_unlock(&l_mg->free_lock); | |
1535 | } | |
1536 | ||
a7c9e910 JG |
1537 | void pblk_pipeline_stop(struct pblk *pblk) |
1538 | { | |
1539 | __pblk_pipeline_flush(pblk); | |
1540 | __pblk_pipeline_stop(pblk); | |
1541 | } | |
1542 | ||
21d22871 | 1543 | struct pblk_line *pblk_line_replace_data(struct pblk *pblk) |
a4bd217b | 1544 | { |
a4bd217b | 1545 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
21d22871 | 1546 | struct pblk_line *cur, *new = NULL; |
a4bd217b | 1547 | unsigned int left_seblks; |
a4bd217b | 1548 | |
a4bd217b JG |
1549 | new = l_mg->data_next; |
1550 | if (!new) | |
21d22871 | 1551 | goto out; |
a4bd217b | 1552 | |
588726d3 | 1553 | spin_lock(&l_mg->free_lock); |
44cdbdc6 JG |
1554 | cur = l_mg->data_line; |
1555 | l_mg->data_line = new; | |
1556 | ||
588726d3 JG |
1557 | pblk_line_setup_metadata(new, l_mg, &pblk->lm); |
1558 | spin_unlock(&l_mg->free_lock); | |
1559 | ||
1560 | retry_erase: | |
a4bd217b JG |
1561 | left_seblks = atomic_read(&new->left_seblks); |
1562 | if (left_seblks) { | |
1563 | /* If line is not fully erased, erase it */ | |
a44f53fa | 1564 | if (atomic_read(&new->left_eblks)) { |
a4bd217b | 1565 | if (pblk_line_erase(pblk, new)) |
21d22871 | 1566 | goto out; |
a4bd217b JG |
1567 | } else { |
1568 | io_schedule(); | |
1569 | } | |
588726d3 | 1570 | goto retry_erase; |
a4bd217b JG |
1571 | } |
1572 | ||
9cfd5a95 JG |
1573 | if (pblk_line_alloc_bitmaps(pblk, new)) |
1574 | return NULL; | |
1575 | ||
a4bd217b | 1576 | retry_setup: |
dd2a4343 | 1577 | if (!pblk_line_init_metadata(pblk, new, cur)) { |
a4bd217b | 1578 | new = pblk_line_retry(pblk, new); |
f3236cef | 1579 | if (!new) |
21d22871 | 1580 | goto out; |
a4bd217b JG |
1581 | |
1582 | goto retry_setup; | |
1583 | } | |
1584 | ||
1585 | if (!pblk_line_init_bb(pblk, new, 1)) { | |
1586 | new = pblk_line_retry(pblk, new); | |
1587 | if (!new) | |
21d22871 | 1588 | goto out; |
a4bd217b JG |
1589 | |
1590 | goto retry_setup; | |
1591 | } | |
1592 | ||
a7689938 JG |
1593 | pblk_rl_free_lines_dec(&pblk->rl, new, true); |
1594 | ||
588726d3 JG |
1595 | /* Allocate next line for preparation */ |
1596 | spin_lock(&l_mg->free_lock); | |
1597 | l_mg->data_next = pblk_line_get(pblk); | |
1598 | if (!l_mg->data_next) { | |
1599 | /* If we cannot get a new line, we need to stop the pipeline. | |
1600 | * Only allow as many writes in as we can store safely and then | |
1601 | * fail gracefully | |
1602 | */ | |
1603 | pblk_stop_writes(pblk, new); | |
1604 | l_mg->data_next = NULL; | |
1605 | } else { | |
1606 | l_mg->data_next->seq_nr = l_mg->d_seq_nr++; | |
1607 | l_mg->data_next->type = PBLK_LINETYPE_DATA; | |
588726d3 JG |
1608 | } |
1609 | spin_unlock(&l_mg->free_lock); | |
1610 | ||
21d22871 JG |
1611 | out: |
1612 | return new; | |
a4bd217b JG |
1613 | } |
1614 | ||
7bd4d370 | 1615 | static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line) |
a4bd217b | 1616 | { |
a4bd217b | 1617 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
d6b992f7 | 1618 | struct pblk_gc *gc = &pblk->gc; |
a4bd217b JG |
1619 | |
1620 | spin_lock(&line->lock); | |
1621 | WARN_ON(line->state != PBLK_LINESTATE_GC); | |
f2e02457 IK |
1622 | if (line->w_err_gc->has_gc_err) { |
1623 | spin_unlock(&line->lock); | |
1624 | pblk_err(pblk, "line %d had errors during GC\n", line->id); | |
1625 | pblk_put_line_back(pblk, line); | |
1626 | line->w_err_gc->has_gc_err = 0; | |
1627 | return; | |
1628 | } | |
1629 | ||
a4bd217b | 1630 | line->state = PBLK_LINESTATE_FREE; |
f2937232 HH |
1631 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, |
1632 | line->state); | |
a4bd217b | 1633 | line->gc_group = PBLK_LINEGC_NONE; |
8e55c07b | 1634 | pblk_line_free(line); |
a4bd217b | 1635 | |
48b8d208 HH |
1636 | if (line->w_err_gc->has_write_err) { |
1637 | pblk_rl_werr_line_out(&pblk->rl); | |
1638 | line->w_err_gc->has_write_err = 0; | |
1639 | } | |
1640 | ||
1641 | spin_unlock(&line->lock); | |
d6b992f7 HH |
1642 | atomic_dec(&gc->pipeline_gc); |
1643 | ||
a4bd217b JG |
1644 | spin_lock(&l_mg->free_lock); |
1645 | list_add_tail(&line->list, &l_mg->free_list); | |
1646 | l_mg->nr_free_lines++; | |
1647 | spin_unlock(&l_mg->free_lock); | |
1648 | ||
1649 | pblk_rl_free_lines_inc(&pblk->rl, line); | |
1650 | } | |
1651 | ||
7bd4d370 JG |
1652 | static void pblk_line_put_ws(struct work_struct *work) |
1653 | { | |
1654 | struct pblk_line_ws *line_put_ws = container_of(work, | |
1655 | struct pblk_line_ws, ws); | |
1656 | struct pblk *pblk = line_put_ws->pblk; | |
1657 | struct pblk_line *line = line_put_ws->line; | |
1658 | ||
1659 | __pblk_line_put(pblk, line); | |
b906bbb6 | 1660 | mempool_free(line_put_ws, &pblk->gen_ws_pool); |
7bd4d370 JG |
1661 | } |
1662 | ||
1663 | void pblk_line_put(struct kref *ref) | |
1664 | { | |
1665 | struct pblk_line *line = container_of(ref, struct pblk_line, ref); | |
1666 | struct pblk *pblk = line->pblk; | |
1667 | ||
1668 | __pblk_line_put(pblk, line); | |
1669 | } | |
1670 | ||
1671 | void pblk_line_put_wq(struct kref *ref) | |
1672 | { | |
1673 | struct pblk_line *line = container_of(ref, struct pblk_line, ref); | |
1674 | struct pblk *pblk = line->pblk; | |
1675 | struct pblk_line_ws *line_put_ws; | |
1676 | ||
b906bbb6 | 1677 | line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC); |
7bd4d370 JG |
1678 | if (!line_put_ws) |
1679 | return; | |
1680 | ||
1681 | line_put_ws->pblk = pblk; | |
1682 | line_put_ws->line = line; | |
1683 | line_put_ws->priv = NULL; | |
1684 | ||
1685 | INIT_WORK(&line_put_ws->ws, pblk_line_put_ws); | |
1686 | queue_work(pblk->r_end_wq, &line_put_ws->ws); | |
1687 | } | |
1688 | ||
a4bd217b JG |
1689 | int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa) |
1690 | { | |
1691 | struct nvm_rq *rqd; | |
1692 | int err; | |
1693 | ||
67bf26a3 | 1694 | rqd = pblk_alloc_rqd(pblk, PBLK_ERASE); |
a4bd217b JG |
1695 | |
1696 | pblk_setup_e_rq(pblk, rqd, ppa); | |
1697 | ||
1698 | rqd->end_io = pblk_end_io_erase; | |
1699 | rqd->private = pblk; | |
1700 | ||
4209c31c HH |
1701 | trace_pblk_chunk_reset(pblk_disk_name(pblk), |
1702 | &ppa, PBLK_CHUNK_RESET_START); | |
1703 | ||
a4bd217b JG |
1704 | /* The write thread schedules erases so that it minimizes disturbances |
1705 | * with writes. Thus, there is no need to take the LUN semaphore. | |
1706 | */ | |
48e5da72 | 1707 | err = pblk_submit_io(pblk, rqd, NULL); |
a4bd217b JG |
1708 | if (err) { |
1709 | struct nvm_tgt_dev *dev = pblk->dev; | |
1710 | struct nvm_geo *geo = &dev->geo; | |
1711 | ||
4e495a46 | 1712 | pblk_err(pblk, "could not async erase line:%d,blk:%d\n", |
cb21665c | 1713 | pblk_ppa_to_line_id(ppa), |
b1bcfda1 | 1714 | pblk_ppa_to_pos(geo, ppa)); |
a4bd217b JG |
1715 | } |
1716 | ||
1717 | return err; | |
1718 | } | |
1719 | ||
1720 | struct pblk_line *pblk_line_get_data(struct pblk *pblk) | |
1721 | { | |
1722 | return pblk->l_mg.data_line; | |
1723 | } | |
1724 | ||
d624f371 JG |
1725 | /* For now, always erase next line */ |
1726 | struct pblk_line *pblk_line_get_erase(struct pblk *pblk) | |
a4bd217b JG |
1727 | { |
1728 | return pblk->l_mg.data_next; | |
1729 | } | |
1730 | ||
1731 | int pblk_line_is_full(struct pblk_line *line) | |
1732 | { | |
1733 | return (line->left_msecs == 0); | |
1734 | } | |
1735 | ||
588726d3 JG |
1736 | static void pblk_line_should_sync_meta(struct pblk *pblk) |
1737 | { | |
1738 | if (pblk_rl_is_limit(&pblk->rl)) | |
1739 | pblk_line_close_meta_sync(pblk); | |
1740 | } | |
1741 | ||
a4bd217b JG |
1742 | void pblk_line_close(struct pblk *pblk, struct pblk_line *line) |
1743 | { | |
32ef9412 JG |
1744 | struct nvm_tgt_dev *dev = pblk->dev; |
1745 | struct nvm_geo *geo = &dev->geo; | |
1746 | struct pblk_line_meta *lm = &pblk->lm; | |
a4bd217b JG |
1747 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; |
1748 | struct list_head *move_list; | |
32ef9412 | 1749 | int i; |
a4bd217b | 1750 | |
880eda54 | 1751 | #ifdef CONFIG_NVM_PBLK_DEBUG |
dd2a4343 | 1752 | WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line), |
a4bd217b | 1753 | "pblk: corrupt closed line %d\n", line->id); |
a84ebb83 | 1754 | #endif |
a4bd217b JG |
1755 | |
1756 | spin_lock(&l_mg->free_lock); | |
1757 | WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap)); | |
1758 | spin_unlock(&l_mg->free_lock); | |
1759 | ||
1760 | spin_lock(&l_mg->gc_lock); | |
1761 | spin_lock(&line->lock); | |
1762 | WARN_ON(line->state != PBLK_LINESTATE_OPEN); | |
1763 | line->state = PBLK_LINESTATE_CLOSED; | |
1764 | move_list = pblk_line_gc_list(pblk, line); | |
a4bd217b JG |
1765 | list_add_tail(&line->list, move_list); |
1766 | ||
53d82db6 | 1767 | mempool_free(line->map_bitmap, l_mg->bitmap_pool); |
a4bd217b JG |
1768 | line->map_bitmap = NULL; |
1769 | line->smeta = NULL; | |
1770 | line->emeta = NULL; | |
1771 | ||
32ef9412 JG |
1772 | for (i = 0; i < lm->blk_per_line; i++) { |
1773 | struct pblk_lun *rlun = &pblk->luns[i]; | |
1774 | int pos = pblk_ppa_to_pos(geo, rlun->bppa); | |
1775 | int state = line->chks[pos].state; | |
1776 | ||
1777 | if (!(state & NVM_CHK_ST_OFFLINE)) | |
1778 | state = NVM_CHK_ST_CLOSED; | |
1779 | } | |
1780 | ||
a4bd217b JG |
1781 | spin_unlock(&line->lock); |
1782 | spin_unlock(&l_mg->gc_lock); | |
f2937232 HH |
1783 | |
1784 | trace_pblk_line_state(pblk_disk_name(pblk), line->id, | |
1785 | line->state); | |
a4bd217b JG |
1786 | } |
1787 | ||
dd2a4343 JG |
1788 | void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line) |
1789 | { | |
1790 | struct pblk_line_mgmt *l_mg = &pblk->l_mg; | |
1791 | struct pblk_line_meta *lm = &pblk->lm; | |
1792 | struct pblk_emeta *emeta = line->emeta; | |
1793 | struct line_emeta *emeta_buf = emeta->buf; | |
76758390 | 1794 | struct wa_counters *wa = emeta_to_wa(lm, emeta_buf); |
dd2a4343 | 1795 | |
588726d3 | 1796 | /* No need for exact vsc value; avoid a big line lock and take aprox. */ |
dd2a4343 JG |
1797 | memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len); |
1798 | memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len); | |
1799 | ||
76758390 HH |
1800 | wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa)); |
1801 | wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa)); | |
1802 | wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa)); | |
1803 | ||
9cc85bc7 JG |
1804 | if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) { |
1805 | emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC); | |
7e0a0847 AS |
1806 | guid_copy((guid_t *)&emeta_buf->header.uuid, |
1807 | &pblk->instance_uuid); | |
9cc85bc7 JG |
1808 | emeta_buf->header.id = cpu_to_le32(line->id); |
1809 | emeta_buf->header.type = cpu_to_le16(line->type); | |
1810 | emeta_buf->header.version_major = EMETA_VERSION_MAJOR; | |
1811 | emeta_buf->header.version_minor = EMETA_VERSION_MINOR; | |
1812 | emeta_buf->header.crc = cpu_to_le32( | |
1813 | pblk_calc_meta_header_crc(pblk, &emeta_buf->header)); | |
1814 | } | |
1815 | ||
dd2a4343 JG |
1816 | emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas); |
1817 | emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf)); | |
1818 | ||
1819 | spin_lock(&l_mg->close_lock); | |
1820 | spin_lock(&line->lock); | |
48b8d208 HH |
1821 | |
1822 | /* Update the in-memory start address for emeta, in case it has | |
1823 | * shifted due to write errors | |
1824 | */ | |
1825 | if (line->emeta_ssec != line->cur_sec) | |
1826 | line->emeta_ssec = line->cur_sec; | |
1827 | ||
dd2a4343 JG |
1828 | list_add_tail(&line->list, &l_mg->emeta_list); |
1829 | spin_unlock(&line->lock); | |
1830 | spin_unlock(&l_mg->close_lock); | |
588726d3 JG |
1831 | |
1832 | pblk_line_should_sync_meta(pblk); | |
48b8d208 HH |
1833 | } |
1834 | ||
1835 | static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line) | |
1836 | { | |
1837 | struct pblk_line_meta *lm = &pblk->lm; | |
48b8d208 HH |
1838 | unsigned int lba_list_size = lm->emeta_len[2]; |
1839 | struct pblk_w_err_gc *w_err_gc = line->w_err_gc; | |
1840 | struct pblk_emeta *emeta = line->emeta; | |
1841 | ||
ff8f3520 | 1842 | w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL); |
48b8d208 HH |
1843 | memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf), |
1844 | lba_list_size); | |
dd2a4343 JG |
1845 | } |
1846 | ||
a4bd217b JG |
1847 | void pblk_line_close_ws(struct work_struct *work) |
1848 | { | |
1849 | struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws, | |
1850 | ws); | |
1851 | struct pblk *pblk = line_ws->pblk; | |
1852 | struct pblk_line *line = line_ws->line; | |
48b8d208 HH |
1853 | struct pblk_w_err_gc *w_err_gc = line->w_err_gc; |
1854 | ||
1855 | /* Write errors makes the emeta start address stored in smeta invalid, | |
1856 | * so keep a copy of the lba list until we've gc'd the line | |
1857 | */ | |
1858 | if (w_err_gc->has_write_err) | |
1859 | pblk_save_lba_list(pblk, line); | |
a4bd217b JG |
1860 | |
1861 | pblk_line_close(pblk, line); | |
b906bbb6 | 1862 | mempool_free(line_ws, &pblk->gen_ws_pool); |
a4bd217b JG |
1863 | } |
1864 | ||
b84ae4a8 JG |
1865 | void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv, |
1866 | void (*work)(struct work_struct *), gfp_t gfp_mask, | |
ef576494 | 1867 | struct workqueue_struct *wq) |
a4bd217b JG |
1868 | { |
1869 | struct pblk_line_ws *line_ws; | |
1870 | ||
b906bbb6 | 1871 | line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask); |
a4bd217b JG |
1872 | |
1873 | line_ws->pblk = pblk; | |
1874 | line_ws->line = line; | |
1875 | line_ws->priv = priv; | |
1876 | ||
1877 | INIT_WORK(&line_ws->ws, work); | |
ef576494 | 1878 | queue_work(wq, &line_ws->ws); |
a4bd217b JG |
1879 | } |
1880 | ||
43241cfe | 1881 | static void __pblk_down_chunk(struct pblk *pblk, int pos) |
a4bd217b | 1882 | { |
3eaa11e2 | 1883 | struct pblk_lun *rlun = &pblk->luns[pos]; |
a4bd217b JG |
1884 | int ret; |
1885 | ||
1886 | /* | |
1887 | * Only send one inflight I/O per LUN. Since we map at a page | |
1888 | * granurality, all ppas in the I/O will map to the same LUN | |
1889 | */ | |
a4bd217b | 1890 | |
3eaa11e2 | 1891 | ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000)); |
c5493845 | 1892 | if (ret == -ETIME || ret == -EINTR) |
4e495a46 MB |
1893 | pblk_err(pblk, "taking lun semaphore timed out: err %d\n", |
1894 | -ret); | |
a4bd217b JG |
1895 | } |
1896 | ||
43241cfe | 1897 | void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa) |
3eaa11e2 JG |
1898 | { |
1899 | struct nvm_tgt_dev *dev = pblk->dev; | |
1900 | struct nvm_geo *geo = &dev->geo; | |
43241cfe | 1901 | int pos = pblk_ppa_to_pos(geo, ppa); |
3eaa11e2 | 1902 | |
43241cfe | 1903 | __pblk_down_chunk(pblk, pos); |
3eaa11e2 JG |
1904 | } |
1905 | ||
43241cfe | 1906 | void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa, |
3eaa11e2 JG |
1907 | unsigned long *lun_bitmap) |
1908 | { | |
1909 | struct nvm_tgt_dev *dev = pblk->dev; | |
1910 | struct nvm_geo *geo = &dev->geo; | |
43241cfe | 1911 | int pos = pblk_ppa_to_pos(geo, ppa); |
3eaa11e2 JG |
1912 | |
1913 | /* If the LUN has been locked for this same request, do no attempt to | |
1914 | * lock it again | |
1915 | */ | |
1916 | if (test_and_set_bit(pos, lun_bitmap)) | |
1917 | return; | |
1918 | ||
43241cfe | 1919 | __pblk_down_chunk(pblk, pos); |
3eaa11e2 JG |
1920 | } |
1921 | ||
43241cfe | 1922 | void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa) |
3eaa11e2 JG |
1923 | { |
1924 | struct nvm_tgt_dev *dev = pblk->dev; | |
1925 | struct nvm_geo *geo = &dev->geo; | |
1926 | struct pblk_lun *rlun; | |
43241cfe | 1927 | int pos = pblk_ppa_to_pos(geo, ppa); |
3eaa11e2 JG |
1928 | |
1929 | rlun = &pblk->luns[pos]; | |
1930 | up(&rlun->wr_sem); | |
1931 | } | |
1932 | ||
e99e802f | 1933 | void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap) |
a4bd217b JG |
1934 | { |
1935 | struct nvm_tgt_dev *dev = pblk->dev; | |
1936 | struct nvm_geo *geo = &dev->geo; | |
1937 | struct pblk_lun *rlun; | |
a40afad9 | 1938 | int num_lun = geo->all_luns; |
a4bd217b JG |
1939 | int bit = -1; |
1940 | ||
a40afad9 | 1941 | while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) { |
a4bd217b JG |
1942 | rlun = &pblk->luns[bit]; |
1943 | up(&rlun->wr_sem); | |
1944 | } | |
a4bd217b JG |
1945 | } |
1946 | ||
1947 | void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) | |
1948 | { | |
9f6cb13b | 1949 | struct ppa_addr ppa_l2p; |
a4bd217b JG |
1950 | |
1951 | /* logic error: lba out-of-bounds. Ignore update */ | |
847a3a27 | 1952 | if (!(lba < pblk->capacity)) { |
a4bd217b JG |
1953 | WARN(1, "pblk: corrupted L2P map request\n"); |
1954 | return; | |
1955 | } | |
1956 | ||
1957 | spin_lock(&pblk->trans_lock); | |
9f6cb13b | 1958 | ppa_l2p = pblk_trans_map_get(pblk, lba); |
a4bd217b | 1959 | |
9f6cb13b JG |
1960 | if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p)) |
1961 | pblk_map_invalidate(pblk, ppa_l2p); | |
a4bd217b JG |
1962 | |
1963 | pblk_trans_map_set(pblk, lba, ppa); | |
1964 | spin_unlock(&pblk->trans_lock); | |
1965 | } | |
1966 | ||
1967 | void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) | |
1968 | { | |
d340121e | 1969 | |
880eda54 | 1970 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b JG |
1971 | /* Callers must ensure that the ppa points to a cache address */ |
1972 | BUG_ON(!pblk_addr_in_cache(ppa)); | |
1973 | BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa))); | |
1974 | #endif | |
1975 | ||
1976 | pblk_update_map(pblk, lba, ppa); | |
1977 | } | |
1978 | ||
9f6cb13b | 1979 | int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new, |
d340121e | 1980 | struct pblk_line *gc_line, u64 paddr_gc) |
a4bd217b | 1981 | { |
d340121e | 1982 | struct ppa_addr ppa_l2p, ppa_gc; |
a4bd217b JG |
1983 | int ret = 1; |
1984 | ||
880eda54 | 1985 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b | 1986 | /* Callers must ensure that the ppa points to a cache address */ |
9f6cb13b JG |
1987 | BUG_ON(!pblk_addr_in_cache(ppa_new)); |
1988 | BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new))); | |
a4bd217b JG |
1989 | #endif |
1990 | ||
1991 | /* logic error: lba out-of-bounds. Ignore update */ | |
847a3a27 | 1992 | if (!(lba < pblk->capacity)) { |
a4bd217b JG |
1993 | WARN(1, "pblk: corrupted L2P map request\n"); |
1994 | return 0; | |
1995 | } | |
1996 | ||
1997 | spin_lock(&pblk->trans_lock); | |
9f6cb13b | 1998 | ppa_l2p = pblk_trans_map_get(pblk, lba); |
d340121e | 1999 | ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id); |
a4bd217b | 2000 | |
d340121e JG |
2001 | if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) { |
2002 | spin_lock(&gc_line->lock); | |
2003 | WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap), | |
2004 | "pblk: corrupted GC update"); | |
2005 | spin_unlock(&gc_line->lock); | |
9f6cb13b | 2006 | |
a4bd217b JG |
2007 | ret = 0; |
2008 | goto out; | |
2009 | } | |
2010 | ||
9f6cb13b | 2011 | pblk_trans_map_set(pblk, lba, ppa_new); |
a4bd217b JG |
2012 | out: |
2013 | spin_unlock(&pblk->trans_lock); | |
2014 | return ret; | |
2015 | } | |
2016 | ||
9f6cb13b JG |
2017 | void pblk_update_map_dev(struct pblk *pblk, sector_t lba, |
2018 | struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache) | |
a4bd217b | 2019 | { |
9f6cb13b | 2020 | struct ppa_addr ppa_l2p; |
a4bd217b | 2021 | |
880eda54 | 2022 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b | 2023 | /* Callers must ensure that the ppa points to a device address */ |
9f6cb13b | 2024 | BUG_ON(pblk_addr_in_cache(ppa_mapped)); |
a4bd217b JG |
2025 | #endif |
2026 | /* Invalidate and discard padded entries */ | |
2027 | if (lba == ADDR_EMPTY) { | |
76758390 | 2028 | atomic64_inc(&pblk->pad_wa); |
880eda54 | 2029 | #ifdef CONFIG_NVM_PBLK_DEBUG |
a4bd217b JG |
2030 | atomic_long_inc(&pblk->padded_wb); |
2031 | #endif | |
9f6cb13b JG |
2032 | if (!pblk_ppa_empty(ppa_mapped)) |
2033 | pblk_map_invalidate(pblk, ppa_mapped); | |
a4bd217b JG |
2034 | return; |
2035 | } | |
2036 | ||
2037 | /* logic error: lba out-of-bounds. Ignore update */ | |
847a3a27 | 2038 | if (!(lba < pblk->capacity)) { |
a4bd217b JG |
2039 | WARN(1, "pblk: corrupted L2P map request\n"); |
2040 | return; | |
2041 | } | |
2042 | ||
2043 | spin_lock(&pblk->trans_lock); | |
9f6cb13b | 2044 | ppa_l2p = pblk_trans_map_get(pblk, lba); |
a4bd217b JG |
2045 | |
2046 | /* Do not update L2P if the cacheline has been updated. In this case, | |
2047 | * the mapped ppa must be invalidated | |
2048 | */ | |
9f6cb13b JG |
2049 | if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) { |
2050 | if (!pblk_ppa_empty(ppa_mapped)) | |
2051 | pblk_map_invalidate(pblk, ppa_mapped); | |
a4bd217b JG |
2052 | goto out; |
2053 | } | |
2054 | ||
880eda54 | 2055 | #ifdef CONFIG_NVM_PBLK_DEBUG |
9f6cb13b | 2056 | WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p)); |
a4bd217b JG |
2057 | #endif |
2058 | ||
9f6cb13b | 2059 | pblk_trans_map_set(pblk, lba, ppa_mapped); |
a4bd217b JG |
2060 | out: |
2061 | spin_unlock(&pblk->trans_lock); | |
2062 | } | |
2063 | ||
a96de64a IK |
2064 | int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, |
2065 | sector_t blba, int nr_secs, bool *from_cache) | |
a4bd217b JG |
2066 | { |
2067 | int i; | |
2068 | ||
2069 | spin_lock(&pblk->trans_lock); | |
7bd4d370 JG |
2070 | for (i = 0; i < nr_secs; i++) { |
2071 | struct ppa_addr ppa; | |
2072 | ||
2073 | ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i); | |
2074 | ||
2075 | /* If the L2P entry maps to a line, the reference is valid */ | |
2076 | if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) { | |
cb21665c | 2077 | struct pblk_line *line = pblk_ppa_to_line(pblk, ppa); |
7bd4d370 | 2078 | |
a96de64a IK |
2079 | if (i > 0 && *from_cache) |
2080 | break; | |
2081 | *from_cache = false; | |
2082 | ||
7bd4d370 | 2083 | kref_get(&line->ref); |
a96de64a IK |
2084 | } else { |
2085 | if (i > 0 && !*from_cache) | |
2086 | break; | |
2087 | *from_cache = true; | |
7bd4d370 JG |
2088 | } |
2089 | } | |
a4bd217b | 2090 | spin_unlock(&pblk->trans_lock); |
a96de64a | 2091 | return i; |
a4bd217b JG |
2092 | } |
2093 | ||
2094 | void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas, | |
2095 | u64 *lba_list, int nr_secs) | |
2096 | { | |
d340121e | 2097 | u64 lba; |
a4bd217b JG |
2098 | int i; |
2099 | ||
2100 | spin_lock(&pblk->trans_lock); | |
2101 | for (i = 0; i < nr_secs; i++) { | |
2102 | lba = lba_list[i]; | |
d340121e | 2103 | if (lba != ADDR_EMPTY) { |
a4bd217b | 2104 | /* logic error: lba out-of-bounds. Ignore update */ |
847a3a27 | 2105 | if (!(lba < pblk->capacity)) { |
a4bd217b JG |
2106 | WARN(1, "pblk: corrupted L2P map request\n"); |
2107 | continue; | |
2108 | } | |
2109 | ppas[i] = pblk_trans_map_get(pblk, lba); | |
2110 | } | |
2111 | } | |
2112 | spin_unlock(&pblk->trans_lock); | |
2113 | } | |
55d8ec35 IK |
2114 | |
2115 | void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd) | |
2116 | { | |
2117 | void *buffer; | |
2118 | ||
2119 | if (pblk_is_oob_meta_supported(pblk)) { | |
2120 | /* Just use OOB metadata buffer as always */ | |
2121 | buffer = rqd->meta_list; | |
2122 | } else { | |
2123 | /* We need to reuse last page of request (packed metadata) | |
2124 | * in similar way as traditional oob metadata | |
2125 | */ | |
2126 | buffer = page_to_virt( | |
2127 | rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page); | |
2128 | } | |
2129 | ||
2130 | return buffer; | |
2131 | } | |
2132 | ||
2133 | void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd) | |
2134 | { | |
2135 | void *meta_list = rqd->meta_list; | |
2136 | void *page; | |
2137 | int i = 0; | |
2138 | ||
2139 | if (pblk_is_oob_meta_supported(pblk)) | |
2140 | return; | |
2141 | ||
2142 | page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page); | |
2143 | /* We need to fill oob meta buffer with data from packed metadata */ | |
2144 | for (; i < rqd->nr_ppas; i++) | |
2145 | memcpy(pblk_get_meta(pblk, meta_list, i), | |
2146 | page + (i * sizeof(struct pblk_sec_meta)), | |
2147 | sizeof(struct pblk_sec_meta)); | |
2148 | } |