]>
Commit | Line | Data |
---|---|---|
5212e11f VV |
1 | /* |
2 | * Block Translation Table | |
3 | * Copyright (c) 2014-2015, Intel Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | #include <linux/highmem.h> | |
15 | #include <linux/debugfs.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/hdreg.h> | |
21 | #include <linux/genhd.h> | |
22 | #include <linux/sizes.h> | |
23 | #include <linux/ndctl.h> | |
24 | #include <linux/fs.h> | |
25 | #include <linux/nd.h> | |
26 | #include "btt.h" | |
27 | #include "nd.h" | |
28 | ||
29 | enum log_ent_request { | |
30 | LOG_NEW_ENT = 0, | |
31 | LOG_OLD_ENT | |
32 | }; | |
33 | ||
ebaccf71 VV |
34 | static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset) |
35 | { | |
36 | return offset + nd_btt->initial_offset; | |
37 | } | |
38 | ||
5212e11f | 39 | static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, |
3ae3d67b | 40 | void *buf, size_t n, unsigned long flags) |
5212e11f VV |
41 | { |
42 | struct nd_btt *nd_btt = arena->nd_btt; | |
43 | struct nd_namespace_common *ndns = nd_btt->ndns; | |
44 | ||
14e49454 | 45 | /* arena offsets may be shifted from the base of the device */ |
ebaccf71 | 46 | offset = adjust_initial_offset(nd_btt, offset); |
3ae3d67b | 47 | return nvdimm_read_bytes(ndns, offset, buf, n, flags); |
5212e11f VV |
48 | } |
49 | ||
50 | static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, | |
3ae3d67b | 51 | void *buf, size_t n, unsigned long flags) |
5212e11f VV |
52 | { |
53 | struct nd_btt *nd_btt = arena->nd_btt; | |
54 | struct nd_namespace_common *ndns = nd_btt->ndns; | |
55 | ||
14e49454 | 56 | /* arena offsets may be shifted from the base of the device */ |
ebaccf71 | 57 | offset = adjust_initial_offset(nd_btt, offset); |
3ae3d67b | 58 | return nvdimm_write_bytes(ndns, offset, buf, n, flags); |
5212e11f VV |
59 | } |
60 | ||
61 | static int btt_info_write(struct arena_info *arena, struct btt_sb *super) | |
62 | { | |
63 | int ret; | |
64 | ||
b177fe85 VV |
65 | /* |
66 | * infooff and info2off should always be at least 512B aligned. | |
67 | * We rely on that to make sure rw_bytes does error clearing | |
68 | * correctly, so make sure that is the case. | |
69 | */ | |
70 | WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512)); | |
71 | WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512)); | |
72 | ||
5212e11f | 73 | ret = arena_write_bytes(arena, arena->info2off, super, |
3ae3d67b | 74 | sizeof(struct btt_sb), 0); |
5212e11f VV |
75 | if (ret) |
76 | return ret; | |
77 | ||
78 | return arena_write_bytes(arena, arena->infooff, super, | |
3ae3d67b | 79 | sizeof(struct btt_sb), 0); |
5212e11f VV |
80 | } |
81 | ||
82 | static int btt_info_read(struct arena_info *arena, struct btt_sb *super) | |
83 | { | |
84 | WARN_ON(!super); | |
85 | return arena_read_bytes(arena, arena->infooff, super, | |
3ae3d67b | 86 | sizeof(struct btt_sb), 0); |
5212e11f VV |
87 | } |
88 | ||
89 | /* | |
90 | * 'raw' version of btt_map write | |
91 | * Assumptions: | |
92 | * mapping is in little-endian | |
93 | * mapping contains 'E' and 'Z' flags as desired | |
94 | */ | |
3ae3d67b VV |
95 | static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, |
96 | unsigned long flags) | |
5212e11f VV |
97 | { |
98 | u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); | |
99 | ||
100 | WARN_ON(lba >= arena->external_nlba); | |
3ae3d67b | 101 | return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); |
5212e11f VV |
102 | } |
103 | ||
104 | static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, | |
3ae3d67b | 105 | u32 z_flag, u32 e_flag, unsigned long rwb_flags) |
5212e11f VV |
106 | { |
107 | u32 ze; | |
108 | __le32 mapping_le; | |
109 | ||
110 | /* | |
111 | * This 'mapping' is supposed to be just the LBA mapping, without | |
112 | * any flags set, so strip the flag bits. | |
113 | */ | |
45f43c23 | 114 | mapping = ent_lba(mapping); |
5212e11f VV |
115 | |
116 | ze = (z_flag << 1) + e_flag; | |
117 | switch (ze) { | |
118 | case 0: | |
119 | /* | |
120 | * We want to set neither of the Z or E flags, and | |
121 | * in the actual layout, this means setting the bit | |
122 | * positions of both to '1' to indicate a 'normal' | |
123 | * map entry | |
124 | */ | |
125 | mapping |= MAP_ENT_NORMAL; | |
126 | break; | |
127 | case 1: | |
128 | mapping |= (1 << MAP_ERR_SHIFT); | |
129 | break; | |
130 | case 2: | |
131 | mapping |= (1 << MAP_TRIM_SHIFT); | |
132 | break; | |
133 | default: | |
134 | /* | |
135 | * The case where Z and E are both sent in as '1' could be | |
136 | * construed as a valid 'normal' case, but we decide not to, | |
137 | * to avoid confusion | |
138 | */ | |
139 | WARN_ONCE(1, "Invalid use of Z and E flags\n"); | |
140 | return -EIO; | |
141 | } | |
142 | ||
143 | mapping_le = cpu_to_le32(mapping); | |
3ae3d67b | 144 | return __btt_map_write(arena, lba, mapping_le, rwb_flags); |
5212e11f VV |
145 | } |
146 | ||
147 | static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, | |
3ae3d67b | 148 | int *trim, int *error, unsigned long rwb_flags) |
5212e11f VV |
149 | { |
150 | int ret; | |
151 | __le32 in; | |
152 | u32 raw_mapping, postmap, ze, z_flag, e_flag; | |
153 | u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); | |
154 | ||
155 | WARN_ON(lba >= arena->external_nlba); | |
156 | ||
3ae3d67b | 157 | ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags); |
5212e11f VV |
158 | if (ret) |
159 | return ret; | |
160 | ||
161 | raw_mapping = le32_to_cpu(in); | |
162 | ||
45f43c23 VV |
163 | z_flag = ent_z_flag(raw_mapping); |
164 | e_flag = ent_e_flag(raw_mapping); | |
5212e11f | 165 | ze = (z_flag << 1) + e_flag; |
45f43c23 | 166 | postmap = ent_lba(raw_mapping); |
5212e11f VV |
167 | |
168 | /* Reuse the {z,e}_flag variables for *trim and *error */ | |
169 | z_flag = 0; | |
170 | e_flag = 0; | |
171 | ||
172 | switch (ze) { | |
173 | case 0: | |
174 | /* Initial state. Return postmap = premap */ | |
175 | *mapping = lba; | |
176 | break; | |
177 | case 1: | |
178 | *mapping = postmap; | |
179 | e_flag = 1; | |
180 | break; | |
181 | case 2: | |
182 | *mapping = postmap; | |
183 | z_flag = 1; | |
184 | break; | |
185 | case 3: | |
186 | *mapping = postmap; | |
187 | break; | |
188 | default: | |
189 | return -EIO; | |
190 | } | |
191 | ||
192 | if (trim) | |
193 | *trim = z_flag; | |
194 | if (error) | |
195 | *error = e_flag; | |
196 | ||
197 | return ret; | |
198 | } | |
199 | ||
200 | static int btt_log_read_pair(struct arena_info *arena, u32 lane, | |
201 | struct log_entry *ent) | |
202 | { | |
203 | WARN_ON(!ent); | |
204 | return arena_read_bytes(arena, | |
205 | arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, | |
3ae3d67b | 206 | 2 * LOG_ENT_SIZE, 0); |
5212e11f VV |
207 | } |
208 | ||
209 | static struct dentry *debugfs_root; | |
210 | ||
211 | static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, | |
212 | int idx) | |
213 | { | |
214 | char dirname[32]; | |
215 | struct dentry *d; | |
216 | ||
217 | /* If for some reason, parent bttN was not created, exit */ | |
218 | if (!parent) | |
219 | return; | |
220 | ||
221 | snprintf(dirname, 32, "arena%d", idx); | |
222 | d = debugfs_create_dir(dirname, parent); | |
223 | if (IS_ERR_OR_NULL(d)) | |
224 | return; | |
225 | a->debugfs_dir = d; | |
226 | ||
227 | debugfs_create_x64("size", S_IRUGO, d, &a->size); | |
228 | debugfs_create_x64("external_lba_start", S_IRUGO, d, | |
229 | &a->external_lba_start); | |
230 | debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba); | |
231 | debugfs_create_u32("internal_lbasize", S_IRUGO, d, | |
232 | &a->internal_lbasize); | |
233 | debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba); | |
234 | debugfs_create_u32("external_lbasize", S_IRUGO, d, | |
235 | &a->external_lbasize); | |
236 | debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree); | |
237 | debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major); | |
238 | debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor); | |
239 | debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff); | |
240 | debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff); | |
241 | debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff); | |
242 | debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff); | |
243 | debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); | |
244 | debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); | |
245 | debugfs_create_x32("flags", S_IRUGO, d, &a->flags); | |
246 | } | |
247 | ||
248 | static void btt_debugfs_init(struct btt *btt) | |
249 | { | |
250 | int i = 0; | |
251 | struct arena_info *arena; | |
252 | ||
253 | btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev), | |
254 | debugfs_root); | |
255 | if (IS_ERR_OR_NULL(btt->debugfs_dir)) | |
256 | return; | |
257 | ||
258 | list_for_each_entry(arena, &btt->arena_list, list) { | |
259 | arena_debugfs_init(arena, btt->debugfs_dir, i); | |
260 | i++; | |
261 | } | |
262 | } | |
263 | ||
264 | /* | |
265 | * This function accepts two log entries, and uses the | |
266 | * sequence number to find the 'older' entry. | |
267 | * It also updates the sequence number in this old entry to | |
268 | * make it the 'new' one if the mark_flag is set. | |
269 | * Finally, it returns which of the entries was the older one. | |
270 | * | |
271 | * TODO The logic feels a bit kludge-y. make it better.. | |
272 | */ | |
273 | static int btt_log_get_old(struct log_entry *ent) | |
274 | { | |
275 | int old; | |
276 | ||
277 | /* | |
278 | * the first ever time this is seen, the entry goes into [0] | |
279 | * the next time, the following logic works out to put this | |
280 | * (next) entry into [1] | |
281 | */ | |
282 | if (ent[0].seq == 0) { | |
283 | ent[0].seq = cpu_to_le32(1); | |
284 | return 0; | |
285 | } | |
286 | ||
287 | if (ent[0].seq == ent[1].seq) | |
288 | return -EINVAL; | |
289 | if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) | |
290 | return -EINVAL; | |
291 | ||
292 | if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { | |
293 | if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) | |
294 | old = 0; | |
295 | else | |
296 | old = 1; | |
297 | } else { | |
298 | if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) | |
299 | old = 1; | |
300 | else | |
301 | old = 0; | |
302 | } | |
303 | ||
304 | return old; | |
305 | } | |
306 | ||
307 | static struct device *to_dev(struct arena_info *arena) | |
308 | { | |
309 | return &arena->nd_btt->dev; | |
310 | } | |
311 | ||
312 | /* | |
313 | * This function copies the desired (old/new) log entry into ent if | |
314 | * it is not NULL. It returns the sub-slot number (0 or 1) | |
315 | * where the desired log entry was found. Negative return values | |
316 | * indicate errors. | |
317 | */ | |
318 | static int btt_log_read(struct arena_info *arena, u32 lane, | |
319 | struct log_entry *ent, int old_flag) | |
320 | { | |
321 | int ret; | |
322 | int old_ent, ret_ent; | |
323 | struct log_entry log[2]; | |
324 | ||
325 | ret = btt_log_read_pair(arena, lane, log); | |
326 | if (ret) | |
327 | return -EIO; | |
328 | ||
329 | old_ent = btt_log_get_old(log); | |
330 | if (old_ent < 0 || old_ent > 1) { | |
e6be2dcb | 331 | dev_err(to_dev(arena), |
5212e11f VV |
332 | "log corruption (%d): lane %d seq [%d, %d]\n", |
333 | old_ent, lane, log[0].seq, log[1].seq); | |
334 | /* TODO set error state? */ | |
335 | return -EIO; | |
336 | } | |
337 | ||
338 | ret_ent = (old_flag ? old_ent : (1 - old_ent)); | |
339 | ||
340 | if (ent != NULL) | |
341 | memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); | |
342 | ||
343 | return ret_ent; | |
344 | } | |
345 | ||
346 | /* | |
347 | * This function commits a log entry to media | |
348 | * It does _not_ prepare the freelist entry for the next write | |
349 | * btt_flog_write is the wrapper for updating the freelist elements | |
350 | */ | |
351 | static int __btt_log_write(struct arena_info *arena, u32 lane, | |
3ae3d67b | 352 | u32 sub, struct log_entry *ent, unsigned long flags) |
5212e11f VV |
353 | { |
354 | int ret; | |
355 | /* | |
356 | * Ignore the padding in log_entry for calculating log_half. | |
357 | * The entry is 'committed' when we write the sequence number, | |
358 | * and we want to ensure that that is the last thing written. | |
359 | * We don't bother writing the padding as that would be extra | |
360 | * media wear and write amplification | |
361 | */ | |
362 | unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2; | |
363 | u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE); | |
364 | void *src = ent; | |
365 | ||
366 | /* split the 16B write into atomic, durable halves */ | |
3ae3d67b | 367 | ret = arena_write_bytes(arena, ns_off, src, log_half, flags); |
5212e11f VV |
368 | if (ret) |
369 | return ret; | |
370 | ||
371 | ns_off += log_half; | |
372 | src += log_half; | |
3ae3d67b | 373 | return arena_write_bytes(arena, ns_off, src, log_half, flags); |
5212e11f VV |
374 | } |
375 | ||
376 | static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, | |
377 | struct log_entry *ent) | |
378 | { | |
379 | int ret; | |
380 | ||
3ae3d67b | 381 | ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); |
5212e11f VV |
382 | if (ret) |
383 | return ret; | |
384 | ||
385 | /* prepare the next free entry */ | |
386 | arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; | |
387 | if (++(arena->freelist[lane].seq) == 4) | |
388 | arena->freelist[lane].seq = 1; | |
ebaccf71 VV |
389 | if (ent_e_flag(ent->old_map)) |
390 | arena->freelist[lane].has_err = 1; | |
391 | arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map)); | |
5212e11f VV |
392 | |
393 | return ret; | |
394 | } | |
395 | ||
396 | /* | |
397 | * This function initializes the BTT map to the initial state, which is | |
398 | * all-zeroes, and indicates an identity mapping | |
399 | */ | |
400 | static int btt_map_init(struct arena_info *arena) | |
401 | { | |
402 | int ret = -EINVAL; | |
403 | void *zerobuf; | |
404 | size_t offset = 0; | |
405 | size_t chunk_size = SZ_2M; | |
406 | size_t mapsize = arena->logoff - arena->mapoff; | |
407 | ||
408 | zerobuf = kzalloc(chunk_size, GFP_KERNEL); | |
409 | if (!zerobuf) | |
410 | return -ENOMEM; | |
411 | ||
b177fe85 VV |
412 | /* |
413 | * mapoff should always be at least 512B aligned. We rely on that to | |
414 | * make sure rw_bytes does error clearing correctly, so make sure that | |
415 | * is the case. | |
416 | */ | |
417 | WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512)); | |
418 | ||
5212e11f VV |
419 | while (mapsize) { |
420 | size_t size = min(mapsize, chunk_size); | |
421 | ||
b177fe85 | 422 | WARN_ON_ONCE(size < 512); |
5212e11f | 423 | ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, |
3ae3d67b | 424 | size, 0); |
5212e11f VV |
425 | if (ret) |
426 | goto free; | |
427 | ||
428 | offset += size; | |
429 | mapsize -= size; | |
430 | cond_resched(); | |
431 | } | |
432 | ||
433 | free: | |
434 | kfree(zerobuf); | |
435 | return ret; | |
436 | } | |
437 | ||
438 | /* | |
439 | * This function initializes the BTT log with 'fake' entries pointing | |
440 | * to the initial reserved set of blocks as being free | |
441 | */ | |
442 | static int btt_log_init(struct arena_info *arena) | |
443 | { | |
b177fe85 VV |
444 | size_t logsize = arena->info2off - arena->logoff; |
445 | size_t chunk_size = SZ_4K, offset = 0; | |
446 | struct log_entry log; | |
447 | void *zerobuf; | |
5212e11f VV |
448 | int ret; |
449 | u32 i; | |
5212e11f | 450 | |
b177fe85 VV |
451 | zerobuf = kzalloc(chunk_size, GFP_KERNEL); |
452 | if (!zerobuf) | |
453 | return -ENOMEM; | |
454 | /* | |
455 | * logoff should always be at least 512B aligned. We rely on that to | |
456 | * make sure rw_bytes does error clearing correctly, so make sure that | |
457 | * is the case. | |
458 | */ | |
459 | WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512)); | |
460 | ||
461 | while (logsize) { | |
462 | size_t size = min(logsize, chunk_size); | |
463 | ||
464 | WARN_ON_ONCE(size < 512); | |
465 | ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, | |
466 | size, 0); | |
467 | if (ret) | |
468 | goto free; | |
469 | ||
470 | offset += size; | |
471 | logsize -= size; | |
472 | cond_resched(); | |
473 | } | |
5212e11f VV |
474 | |
475 | for (i = 0; i < arena->nfree; i++) { | |
476 | log.lba = cpu_to_le32(i); | |
477 | log.old_map = cpu_to_le32(arena->external_nlba + i); | |
478 | log.new_map = cpu_to_le32(arena->external_nlba + i); | |
479 | log.seq = cpu_to_le32(LOG_SEQ_INIT); | |
3ae3d67b | 480 | ret = __btt_log_write(arena, i, 0, &log, 0); |
5212e11f | 481 | if (ret) |
b177fe85 | 482 | goto free; |
5212e11f VV |
483 | } |
484 | ||
b177fe85 VV |
485 | free: |
486 | kfree(zerobuf); | |
487 | return ret; | |
5212e11f VV |
488 | } |
489 | ||
ebaccf71 VV |
490 | static u64 to_namespace_offset(struct arena_info *arena, u64 lba) |
491 | { | |
492 | return arena->dataoff + ((u64)lba * arena->internal_lbasize); | |
493 | } | |
494 | ||
495 | static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) | |
496 | { | |
497 | int ret = 0; | |
498 | ||
499 | if (arena->freelist[lane].has_err) { | |
500 | void *zero_page = page_address(ZERO_PAGE(0)); | |
501 | u32 lba = arena->freelist[lane].block; | |
502 | u64 nsoff = to_namespace_offset(arena, lba); | |
503 | unsigned long len = arena->sector_size; | |
504 | ||
505 | mutex_lock(&arena->err_lock); | |
506 | ||
507 | while (len) { | |
508 | unsigned long chunk = min(len, PAGE_SIZE); | |
509 | ||
510 | ret = arena_write_bytes(arena, nsoff, zero_page, | |
511 | chunk, 0); | |
512 | if (ret) | |
513 | break; | |
514 | len -= chunk; | |
515 | nsoff += chunk; | |
516 | if (len == 0) | |
517 | arena->freelist[lane].has_err = 0; | |
518 | } | |
519 | mutex_unlock(&arena->err_lock); | |
520 | } | |
521 | return ret; | |
522 | } | |
523 | ||
5212e11f VV |
524 | static int btt_freelist_init(struct arena_info *arena) |
525 | { | |
526 | int old, new, ret; | |
527 | u32 i, map_entry; | |
528 | struct log_entry log_new, log_old; | |
529 | ||
530 | arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), | |
531 | GFP_KERNEL); | |
532 | if (!arena->freelist) | |
533 | return -ENOMEM; | |
534 | ||
535 | for (i = 0; i < arena->nfree; i++) { | |
536 | old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); | |
537 | if (old < 0) | |
538 | return old; | |
539 | ||
540 | new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); | |
541 | if (new < 0) | |
542 | return new; | |
543 | ||
544 | /* sub points to the next one to be overwritten */ | |
545 | arena->freelist[i].sub = 1 - new; | |
546 | arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); | |
547 | arena->freelist[i].block = le32_to_cpu(log_new.old_map); | |
548 | ||
ebaccf71 VV |
549 | /* |
550 | * FIXME: if error clearing fails during init, we want to make | |
551 | * the BTT read-only | |
552 | */ | |
553 | if (ent_e_flag(log_new.old_map)) { | |
554 | ret = arena_clear_freelist_error(arena, i); | |
555 | if (ret) | |
556 | WARN_ONCE(1, "Unable to clear known errors\n"); | |
557 | } | |
558 | ||
5212e11f VV |
559 | /* This implies a newly created or untouched flog entry */ |
560 | if (log_new.old_map == log_new.new_map) | |
561 | continue; | |
562 | ||
563 | /* Check if map recovery is needed */ | |
564 | ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry, | |
3ae3d67b | 565 | NULL, NULL, 0); |
5212e11f VV |
566 | if (ret) |
567 | return ret; | |
568 | if ((le32_to_cpu(log_new.new_map) != map_entry) && | |
569 | (le32_to_cpu(log_new.old_map) == map_entry)) { | |
570 | /* | |
571 | * Last transaction wrote the flog, but wasn't able | |
572 | * to complete the map write. So fix up the map. | |
573 | */ | |
574 | ret = btt_map_write(arena, le32_to_cpu(log_new.lba), | |
3ae3d67b | 575 | le32_to_cpu(log_new.new_map), 0, 0, 0); |
5212e11f VV |
576 | if (ret) |
577 | return ret; | |
578 | } | |
5212e11f VV |
579 | } |
580 | ||
581 | return 0; | |
582 | } | |
583 | ||
584 | static int btt_rtt_init(struct arena_info *arena) | |
585 | { | |
586 | arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); | |
587 | if (arena->rtt == NULL) | |
588 | return -ENOMEM; | |
589 | ||
590 | return 0; | |
591 | } | |
592 | ||
593 | static int btt_maplocks_init(struct arena_info *arena) | |
594 | { | |
595 | u32 i; | |
596 | ||
597 | arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock), | |
598 | GFP_KERNEL); | |
599 | if (!arena->map_locks) | |
600 | return -ENOMEM; | |
601 | ||
602 | for (i = 0; i < arena->nfree; i++) | |
603 | spin_lock_init(&arena->map_locks[i].lock); | |
604 | ||
605 | return 0; | |
606 | } | |
607 | ||
608 | static struct arena_info *alloc_arena(struct btt *btt, size_t size, | |
609 | size_t start, size_t arena_off) | |
610 | { | |
611 | struct arena_info *arena; | |
612 | u64 logsize, mapsize, datasize; | |
613 | u64 available = size; | |
614 | ||
615 | arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL); | |
616 | if (!arena) | |
617 | return NULL; | |
618 | arena->nd_btt = btt->nd_btt; | |
adfc036d | 619 | arena->sector_size = btt->sector_size; |
5212e11f VV |
620 | |
621 | if (!size) | |
622 | return arena; | |
623 | ||
624 | arena->size = size; | |
625 | arena->external_lba_start = start; | |
626 | arena->external_lbasize = btt->lbasize; | |
627 | arena->internal_lbasize = roundup(arena->external_lbasize, | |
628 | INT_LBASIZE_ALIGNMENT); | |
629 | arena->nfree = BTT_DEFAULT_NFREE; | |
14e49454 VV |
630 | arena->version_major = btt->nd_btt->version_major; |
631 | arena->version_minor = btt->nd_btt->version_minor; | |
5212e11f VV |
632 | |
633 | if (available % BTT_PG_SIZE) | |
634 | available -= (available % BTT_PG_SIZE); | |
635 | ||
636 | /* Two pages are reserved for the super block and its copy */ | |
637 | available -= 2 * BTT_PG_SIZE; | |
638 | ||
639 | /* The log takes a fixed amount of space based on nfree */ | |
640 | logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), | |
641 | BTT_PG_SIZE); | |
642 | available -= logsize; | |
643 | ||
644 | /* Calculate optimal split between map and data area */ | |
645 | arena->internal_nlba = div_u64(available - BTT_PG_SIZE, | |
646 | arena->internal_lbasize + MAP_ENT_SIZE); | |
647 | arena->external_nlba = arena->internal_nlba - arena->nfree; | |
648 | ||
649 | mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); | |
650 | datasize = available - mapsize; | |
651 | ||
652 | /* 'Absolute' values, relative to start of storage space */ | |
653 | arena->infooff = arena_off; | |
654 | arena->dataoff = arena->infooff + BTT_PG_SIZE; | |
655 | arena->mapoff = arena->dataoff + datasize; | |
656 | arena->logoff = arena->mapoff + mapsize; | |
657 | arena->info2off = arena->logoff + logsize; | |
658 | return arena; | |
659 | } | |
660 | ||
661 | static void free_arenas(struct btt *btt) | |
662 | { | |
663 | struct arena_info *arena, *next; | |
664 | ||
665 | list_for_each_entry_safe(arena, next, &btt->arena_list, list) { | |
666 | list_del(&arena->list); | |
667 | kfree(arena->rtt); | |
668 | kfree(arena->map_locks); | |
669 | kfree(arena->freelist); | |
670 | debugfs_remove_recursive(arena->debugfs_dir); | |
671 | kfree(arena); | |
672 | } | |
673 | } | |
674 | ||
5212e11f VV |
675 | /* |
676 | * This function reads an existing valid btt superblock and | |
677 | * populates the corresponding arena_info struct | |
678 | */ | |
679 | static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super, | |
680 | u64 arena_off) | |
681 | { | |
682 | arena->internal_nlba = le32_to_cpu(super->internal_nlba); | |
683 | arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); | |
684 | arena->external_nlba = le32_to_cpu(super->external_nlba); | |
685 | arena->external_lbasize = le32_to_cpu(super->external_lbasize); | |
686 | arena->nfree = le32_to_cpu(super->nfree); | |
687 | arena->version_major = le16_to_cpu(super->version_major); | |
688 | arena->version_minor = le16_to_cpu(super->version_minor); | |
689 | ||
690 | arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + | |
691 | le64_to_cpu(super->nextoff)); | |
692 | arena->infooff = arena_off; | |
693 | arena->dataoff = arena_off + le64_to_cpu(super->dataoff); | |
694 | arena->mapoff = arena_off + le64_to_cpu(super->mapoff); | |
695 | arena->logoff = arena_off + le64_to_cpu(super->logoff); | |
696 | arena->info2off = arena_off + le64_to_cpu(super->info2off); | |
697 | ||
5e329406 DW |
698 | arena->size = (le64_to_cpu(super->nextoff) > 0) |
699 | ? (le64_to_cpu(super->nextoff)) | |
700 | : (arena->info2off - arena->infooff + BTT_PG_SIZE); | |
5212e11f VV |
701 | |
702 | arena->flags = le32_to_cpu(super->flags); | |
703 | } | |
704 | ||
705 | static int discover_arenas(struct btt *btt) | |
706 | { | |
707 | int ret = 0; | |
708 | struct arena_info *arena; | |
709 | struct btt_sb *super; | |
710 | size_t remaining = btt->rawsize; | |
711 | u64 cur_nlba = 0; | |
712 | size_t cur_off = 0; | |
713 | int num_arenas = 0; | |
714 | ||
715 | super = kzalloc(sizeof(*super), GFP_KERNEL); | |
716 | if (!super) | |
717 | return -ENOMEM; | |
718 | ||
719 | while (remaining) { | |
720 | /* Alloc memory for arena */ | |
721 | arena = alloc_arena(btt, 0, 0, 0); | |
722 | if (!arena) { | |
723 | ret = -ENOMEM; | |
724 | goto out_super; | |
725 | } | |
726 | ||
727 | arena->infooff = cur_off; | |
728 | ret = btt_info_read(arena, super); | |
729 | if (ret) | |
730 | goto out; | |
731 | ||
ab45e763 | 732 | if (!nd_btt_arena_is_valid(btt->nd_btt, super)) { |
5212e11f VV |
733 | if (remaining == btt->rawsize) { |
734 | btt->init_state = INIT_NOTFOUND; | |
735 | dev_info(to_dev(arena), "No existing arenas\n"); | |
736 | goto out; | |
737 | } else { | |
e6be2dcb | 738 | dev_err(to_dev(arena), |
5212e11f VV |
739 | "Found corrupted metadata!\n"); |
740 | ret = -ENODEV; | |
741 | goto out; | |
742 | } | |
743 | } | |
744 | ||
745 | arena->external_lba_start = cur_nlba; | |
746 | parse_arena_meta(arena, super, cur_off); | |
747 | ||
ebaccf71 | 748 | mutex_init(&arena->err_lock); |
5212e11f VV |
749 | ret = btt_freelist_init(arena); |
750 | if (ret) | |
751 | goto out; | |
752 | ||
753 | ret = btt_rtt_init(arena); | |
754 | if (ret) | |
755 | goto out; | |
756 | ||
757 | ret = btt_maplocks_init(arena); | |
758 | if (ret) | |
759 | goto out; | |
760 | ||
761 | list_add_tail(&arena->list, &btt->arena_list); | |
762 | ||
763 | remaining -= arena->size; | |
764 | cur_off += arena->size; | |
765 | cur_nlba += arena->external_nlba; | |
766 | num_arenas++; | |
767 | ||
768 | if (arena->nextoff == 0) | |
769 | break; | |
770 | } | |
771 | btt->num_arenas = num_arenas; | |
772 | btt->nlba = cur_nlba; | |
773 | btt->init_state = INIT_READY; | |
774 | ||
775 | kfree(super); | |
776 | return ret; | |
777 | ||
778 | out: | |
779 | kfree(arena); | |
780 | free_arenas(btt); | |
781 | out_super: | |
782 | kfree(super); | |
783 | return ret; | |
784 | } | |
785 | ||
786 | static int create_arenas(struct btt *btt) | |
787 | { | |
788 | size_t remaining = btt->rawsize; | |
789 | size_t cur_off = 0; | |
790 | ||
791 | while (remaining) { | |
792 | struct arena_info *arena; | |
793 | size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining); | |
794 | ||
795 | remaining -= arena_size; | |
796 | if (arena_size < ARENA_MIN_SIZE) | |
797 | break; | |
798 | ||
799 | arena = alloc_arena(btt, arena_size, btt->nlba, cur_off); | |
800 | if (!arena) { | |
801 | free_arenas(btt); | |
802 | return -ENOMEM; | |
803 | } | |
804 | btt->nlba += arena->external_nlba; | |
805 | if (remaining >= ARENA_MIN_SIZE) | |
806 | arena->nextoff = arena->size; | |
807 | else | |
808 | arena->nextoff = 0; | |
809 | cur_off += arena_size; | |
810 | list_add_tail(&arena->list, &btt->arena_list); | |
811 | } | |
812 | ||
813 | return 0; | |
814 | } | |
815 | ||
816 | /* | |
817 | * This function completes arena initialization by writing | |
818 | * all the metadata. | |
819 | * It is only called for an uninitialized arena when a write | |
820 | * to that arena occurs for the first time. | |
821 | */ | |
fbde1414 | 822 | static int btt_arena_write_layout(struct arena_info *arena) |
5212e11f VV |
823 | { |
824 | int ret; | |
e1455744 | 825 | u64 sum; |
5212e11f | 826 | struct btt_sb *super; |
fbde1414 | 827 | struct nd_btt *nd_btt = arena->nd_btt; |
6ec68954 | 828 | const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); |
5212e11f VV |
829 | |
830 | ret = btt_map_init(arena); | |
831 | if (ret) | |
832 | return ret; | |
833 | ||
834 | ret = btt_log_init(arena); | |
835 | if (ret) | |
836 | return ret; | |
837 | ||
838 | super = kzalloc(sizeof(struct btt_sb), GFP_NOIO); | |
839 | if (!super) | |
840 | return -ENOMEM; | |
841 | ||
842 | strncpy(super->signature, BTT_SIG, BTT_SIG_LEN); | |
fbde1414 | 843 | memcpy(super->uuid, nd_btt->uuid, 16); |
6ec68954 | 844 | memcpy(super->parent_uuid, parent_uuid, 16); |
5212e11f VV |
845 | super->flags = cpu_to_le32(arena->flags); |
846 | super->version_major = cpu_to_le16(arena->version_major); | |
847 | super->version_minor = cpu_to_le16(arena->version_minor); | |
848 | super->external_lbasize = cpu_to_le32(arena->external_lbasize); | |
849 | super->external_nlba = cpu_to_le32(arena->external_nlba); | |
850 | super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); | |
851 | super->internal_nlba = cpu_to_le32(arena->internal_nlba); | |
852 | super->nfree = cpu_to_le32(arena->nfree); | |
853 | super->infosize = cpu_to_le32(sizeof(struct btt_sb)); | |
854 | super->nextoff = cpu_to_le64(arena->nextoff); | |
855 | /* | |
856 | * Subtract arena->infooff (arena start) so numbers are relative | |
857 | * to 'this' arena | |
858 | */ | |
859 | super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); | |
860 | super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); | |
861 | super->logoff = cpu_to_le64(arena->logoff - arena->infooff); | |
862 | super->info2off = cpu_to_le64(arena->info2off - arena->infooff); | |
863 | ||
864 | super->flags = 0; | |
e1455744 DW |
865 | sum = nd_sb_checksum((struct nd_gen_sb *) super); |
866 | super->checksum = cpu_to_le64(sum); | |
5212e11f VV |
867 | |
868 | ret = btt_info_write(arena, super); | |
869 | ||
870 | kfree(super); | |
871 | return ret; | |
872 | } | |
873 | ||
874 | /* | |
875 | * This function completes the initialization for the BTT namespace | |
876 | * such that it is ready to accept IOs | |
877 | */ | |
878 | static int btt_meta_init(struct btt *btt) | |
879 | { | |
880 | int ret = 0; | |
881 | struct arena_info *arena; | |
882 | ||
883 | mutex_lock(&btt->init_lock); | |
884 | list_for_each_entry(arena, &btt->arena_list, list) { | |
fbde1414 | 885 | ret = btt_arena_write_layout(arena); |
5212e11f VV |
886 | if (ret) |
887 | goto unlock; | |
888 | ||
889 | ret = btt_freelist_init(arena); | |
890 | if (ret) | |
891 | goto unlock; | |
892 | ||
893 | ret = btt_rtt_init(arena); | |
894 | if (ret) | |
895 | goto unlock; | |
896 | ||
897 | ret = btt_maplocks_init(arena); | |
898 | if (ret) | |
899 | goto unlock; | |
900 | } | |
901 | ||
902 | btt->init_state = INIT_READY; | |
903 | ||
904 | unlock: | |
905 | mutex_unlock(&btt->init_lock); | |
906 | return ret; | |
907 | } | |
908 | ||
41cd8b70 VV |
909 | static u32 btt_meta_size(struct btt *btt) |
910 | { | |
911 | return btt->lbasize - btt->sector_size; | |
912 | } | |
913 | ||
5212e11f VV |
914 | /* |
915 | * This function calculates the arena in which the given LBA lies | |
916 | * by doing a linear walk. This is acceptable since we expect only | |
917 | * a few arenas. If we have backing devices that get much larger, | |
918 | * we can construct a balanced binary tree of arenas at init time | |
919 | * so that this range search becomes faster. | |
920 | */ | |
921 | static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap, | |
922 | struct arena_info **arena) | |
923 | { | |
924 | struct arena_info *arena_list; | |
925 | __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size); | |
926 | ||
927 | list_for_each_entry(arena_list, &btt->arena_list, list) { | |
928 | if (lba < arena_list->external_nlba) { | |
929 | *arena = arena_list; | |
930 | *premap = lba; | |
931 | return 0; | |
932 | } | |
933 | lba -= arena_list->external_nlba; | |
934 | } | |
935 | ||
936 | return -EIO; | |
937 | } | |
938 | ||
939 | /* | |
940 | * The following (lock_map, unlock_map) are mostly just to improve | |
941 | * readability, since they index into an array of locks | |
942 | */ | |
943 | static void lock_map(struct arena_info *arena, u32 premap) | |
944 | __acquires(&arena->map_locks[idx].lock) | |
945 | { | |
946 | u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; | |
947 | ||
948 | spin_lock(&arena->map_locks[idx].lock); | |
949 | } | |
950 | ||
951 | static void unlock_map(struct arena_info *arena, u32 premap) | |
952 | __releases(&arena->map_locks[idx].lock) | |
953 | { | |
954 | u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; | |
955 | ||
956 | spin_unlock(&arena->map_locks[idx].lock); | |
957 | } | |
958 | ||
5212e11f VV |
959 | static int btt_data_read(struct arena_info *arena, struct page *page, |
960 | unsigned int off, u32 lba, u32 len) | |
961 | { | |
962 | int ret; | |
963 | u64 nsoff = to_namespace_offset(arena, lba); | |
964 | void *mem = kmap_atomic(page); | |
965 | ||
3ae3d67b | 966 | ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); |
5212e11f VV |
967 | kunmap_atomic(mem); |
968 | ||
969 | return ret; | |
970 | } | |
971 | ||
972 | static int btt_data_write(struct arena_info *arena, u32 lba, | |
973 | struct page *page, unsigned int off, u32 len) | |
974 | { | |
975 | int ret; | |
976 | u64 nsoff = to_namespace_offset(arena, lba); | |
977 | void *mem = kmap_atomic(page); | |
978 | ||
3ae3d67b | 979 | ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC); |
5212e11f VV |
980 | kunmap_atomic(mem); |
981 | ||
982 | return ret; | |
983 | } | |
984 | ||
985 | static void zero_fill_data(struct page *page, unsigned int off, u32 len) | |
986 | { | |
987 | void *mem = kmap_atomic(page); | |
988 | ||
989 | memset(mem + off, 0, len); | |
990 | kunmap_atomic(mem); | |
991 | } | |
992 | ||
41cd8b70 VV |
993 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
994 | static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, | |
995 | struct arena_info *arena, u32 postmap, int rw) | |
996 | { | |
997 | unsigned int len = btt_meta_size(btt); | |
998 | u64 meta_nsoff; | |
999 | int ret = 0; | |
1000 | ||
1001 | if (bip == NULL) | |
1002 | return 0; | |
1003 | ||
1004 | meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size; | |
1005 | ||
1006 | while (len) { | |
1007 | unsigned int cur_len; | |
1008 | struct bio_vec bv; | |
1009 | void *mem; | |
1010 | ||
1011 | bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); | |
1012 | /* | |
1013 | * The 'bv' obtained from bvec_iter_bvec has its .bv_len and | |
1014 | * .bv_offset already adjusted for iter->bi_bvec_done, and we | |
1015 | * can use those directly | |
1016 | */ | |
1017 | ||
1018 | cur_len = min(len, bv.bv_len); | |
1019 | mem = kmap_atomic(bv.bv_page); | |
1020 | if (rw) | |
1021 | ret = arena_write_bytes(arena, meta_nsoff, | |
3ae3d67b VV |
1022 | mem + bv.bv_offset, cur_len, |
1023 | NVDIMM_IO_ATOMIC); | |
41cd8b70 VV |
1024 | else |
1025 | ret = arena_read_bytes(arena, meta_nsoff, | |
3ae3d67b VV |
1026 | mem + bv.bv_offset, cur_len, |
1027 | NVDIMM_IO_ATOMIC); | |
41cd8b70 VV |
1028 | |
1029 | kunmap_atomic(mem); | |
1030 | if (ret) | |
1031 | return ret; | |
1032 | ||
1033 | len -= cur_len; | |
1034 | meta_nsoff += cur_len; | |
b1fb2c52 DM |
1035 | if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len)) |
1036 | return -EIO; | |
41cd8b70 VV |
1037 | } |
1038 | ||
1039 | return ret; | |
1040 | } | |
1041 | ||
1042 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | |
1043 | static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, | |
1044 | struct arena_info *arena, u32 postmap, int rw) | |
1045 | { | |
1046 | return 0; | |
1047 | } | |
1048 | #endif | |
1049 | ||
1050 | static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip, | |
1051 | struct page *page, unsigned int off, sector_t sector, | |
1052 | unsigned int len) | |
5212e11f VV |
1053 | { |
1054 | int ret = 0; | |
1055 | int t_flag, e_flag; | |
1056 | struct arena_info *arena = NULL; | |
1057 | u32 lane = 0, premap, postmap; | |
1058 | ||
1059 | while (len) { | |
1060 | u32 cur_len; | |
1061 | ||
1062 | lane = nd_region_acquire_lane(btt->nd_region); | |
1063 | ||
1064 | ret = lba_to_arena(btt, sector, &premap, &arena); | |
1065 | if (ret) | |
1066 | goto out_lane; | |
1067 | ||
1068 | cur_len = min(btt->sector_size, len); | |
1069 | ||
3ae3d67b VV |
1070 | ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag, |
1071 | NVDIMM_IO_ATOMIC); | |
5212e11f VV |
1072 | if (ret) |
1073 | goto out_lane; | |
1074 | ||
1075 | /* | |
1076 | * We loop to make sure that the post map LBA didn't change | |
1077 | * from under us between writing the RTT and doing the actual | |
1078 | * read. | |
1079 | */ | |
1080 | while (1) { | |
1081 | u32 new_map; | |
b79a9312 | 1082 | int new_t, new_e; |
5212e11f VV |
1083 | |
1084 | if (t_flag) { | |
1085 | zero_fill_data(page, off, cur_len); | |
1086 | goto out_lane; | |
1087 | } | |
1088 | ||
1089 | if (e_flag) { | |
1090 | ret = -EIO; | |
1091 | goto out_lane; | |
1092 | } | |
1093 | ||
1094 | arena->rtt[lane] = RTT_VALID | postmap; | |
1095 | /* | |
1096 | * Barrier to make sure this write is not reordered | |
1097 | * to do the verification map_read before the RTT store | |
1098 | */ | |
1099 | barrier(); | |
1100 | ||
b79a9312 VV |
1101 | ret = btt_map_read(arena, premap, &new_map, &new_t, |
1102 | &new_e, NVDIMM_IO_ATOMIC); | |
5212e11f VV |
1103 | if (ret) |
1104 | goto out_rtt; | |
1105 | ||
b79a9312 VV |
1106 | if ((postmap == new_map) && (t_flag == new_t) && |
1107 | (e_flag == new_e)) | |
5212e11f VV |
1108 | break; |
1109 | ||
1110 | postmap = new_map; | |
b79a9312 VV |
1111 | t_flag = new_t; |
1112 | e_flag = new_e; | |
5212e11f VV |
1113 | } |
1114 | ||
1115 | ret = btt_data_read(arena, page, off, postmap, cur_len); | |
ebaccf71 VV |
1116 | if (ret) { |
1117 | int rc; | |
1118 | ||
1119 | /* Media error - set the e_flag */ | |
1120 | rc = btt_map_write(arena, premap, postmap, 0, 1, | |
1121 | NVDIMM_IO_ATOMIC); | |
5212e11f | 1122 | goto out_rtt; |
ebaccf71 | 1123 | } |
5212e11f | 1124 | |
41cd8b70 VV |
1125 | if (bip) { |
1126 | ret = btt_rw_integrity(btt, bip, arena, postmap, READ); | |
1127 | if (ret) | |
1128 | goto out_rtt; | |
1129 | } | |
1130 | ||
5212e11f VV |
1131 | arena->rtt[lane] = RTT_INVALID; |
1132 | nd_region_release_lane(btt->nd_region, lane); | |
1133 | ||
1134 | len -= cur_len; | |
1135 | off += cur_len; | |
1136 | sector += btt->sector_size >> SECTOR_SHIFT; | |
1137 | } | |
1138 | ||
1139 | return 0; | |
1140 | ||
1141 | out_rtt: | |
1142 | arena->rtt[lane] = RTT_INVALID; | |
1143 | out_lane: | |
1144 | nd_region_release_lane(btt->nd_region, lane); | |
1145 | return ret; | |
1146 | } | |
1147 | ||
ebaccf71 VV |
1148 | /* |
1149 | * Normally, arena_{read,write}_bytes will take care of the initial offset | |
1150 | * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem, | |
1151 | * we need the final, raw namespace offset here | |
1152 | */ | |
1153 | static bool btt_is_badblock(struct btt *btt, struct arena_info *arena, | |
1154 | u32 postmap) | |
1155 | { | |
1156 | u64 nsoff = adjust_initial_offset(arena->nd_btt, | |
1157 | to_namespace_offset(arena, postmap)); | |
1158 | sector_t phys_sector = nsoff >> 9; | |
1159 | ||
1160 | return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize); | |
1161 | } | |
1162 | ||
41cd8b70 VV |
1163 | static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, |
1164 | sector_t sector, struct page *page, unsigned int off, | |
1165 | unsigned int len) | |
5212e11f VV |
1166 | { |
1167 | int ret = 0; | |
1168 | struct arena_info *arena = NULL; | |
1169 | u32 premap = 0, old_postmap, new_postmap, lane = 0, i; | |
1170 | struct log_entry log; | |
1171 | int sub; | |
1172 | ||
1173 | while (len) { | |
1174 | u32 cur_len; | |
ebaccf71 | 1175 | int e_flag; |
5212e11f | 1176 | |
ebaccf71 | 1177 | retry: |
5212e11f VV |
1178 | lane = nd_region_acquire_lane(btt->nd_region); |
1179 | ||
1180 | ret = lba_to_arena(btt, sector, &premap, &arena); | |
1181 | if (ret) | |
1182 | goto out_lane; | |
1183 | cur_len = min(btt->sector_size, len); | |
1184 | ||
1185 | if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { | |
1186 | ret = -EIO; | |
1187 | goto out_lane; | |
1188 | } | |
1189 | ||
ebaccf71 VV |
1190 | if (btt_is_badblock(btt, arena, arena->freelist[lane].block)) |
1191 | arena->freelist[lane].has_err = 1; | |
1192 | ||
1193 | if (mutex_is_locked(&arena->err_lock) | |
1194 | || arena->freelist[lane].has_err) { | |
1195 | nd_region_release_lane(btt->nd_region, lane); | |
1196 | ||
1197 | ret = arena_clear_freelist_error(arena, lane); | |
1198 | if (ret) | |
1199 | return ret; | |
1200 | ||
1201 | /* OK to acquire a different lane/free block */ | |
1202 | goto retry; | |
1203 | } | |
1204 | ||
5212e11f VV |
1205 | new_postmap = arena->freelist[lane].block; |
1206 | ||
1207 | /* Wait if the new block is being read from */ | |
1208 | for (i = 0; i < arena->nfree; i++) | |
1209 | while (arena->rtt[i] == (RTT_VALID | new_postmap)) | |
1210 | cpu_relax(); | |
1211 | ||
1212 | ||
1213 | if (new_postmap >= arena->internal_nlba) { | |
1214 | ret = -EIO; | |
1215 | goto out_lane; | |
41cd8b70 VV |
1216 | } |
1217 | ||
1218 | ret = btt_data_write(arena, new_postmap, page, off, cur_len); | |
5212e11f VV |
1219 | if (ret) |
1220 | goto out_lane; | |
1221 | ||
41cd8b70 VV |
1222 | if (bip) { |
1223 | ret = btt_rw_integrity(btt, bip, arena, new_postmap, | |
1224 | WRITE); | |
1225 | if (ret) | |
1226 | goto out_lane; | |
1227 | } | |
1228 | ||
5212e11f | 1229 | lock_map(arena, premap); |
ebaccf71 | 1230 | ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag, |
3ae3d67b | 1231 | NVDIMM_IO_ATOMIC); |
5212e11f VV |
1232 | if (ret) |
1233 | goto out_map; | |
1234 | if (old_postmap >= arena->internal_nlba) { | |
1235 | ret = -EIO; | |
1236 | goto out_map; | |
1237 | } | |
ebaccf71 VV |
1238 | if (e_flag) |
1239 | set_e_flag(old_postmap); | |
5212e11f VV |
1240 | |
1241 | log.lba = cpu_to_le32(premap); | |
1242 | log.old_map = cpu_to_le32(old_postmap); | |
1243 | log.new_map = cpu_to_le32(new_postmap); | |
1244 | log.seq = cpu_to_le32(arena->freelist[lane].seq); | |
1245 | sub = arena->freelist[lane].sub; | |
1246 | ret = btt_flog_write(arena, lane, sub, &log); | |
1247 | if (ret) | |
1248 | goto out_map; | |
1249 | ||
36a7c4ef VV |
1250 | ret = btt_map_write(arena, premap, new_postmap, 0, 0, |
1251 | NVDIMM_IO_ATOMIC); | |
5212e11f VV |
1252 | if (ret) |
1253 | goto out_map; | |
1254 | ||
1255 | unlock_map(arena, premap); | |
1256 | nd_region_release_lane(btt->nd_region, lane); | |
1257 | ||
ebaccf71 VV |
1258 | if (e_flag) { |
1259 | ret = arena_clear_freelist_error(arena, lane); | |
1260 | if (ret) | |
1261 | return ret; | |
1262 | } | |
1263 | ||
5212e11f VV |
1264 | len -= cur_len; |
1265 | off += cur_len; | |
1266 | sector += btt->sector_size >> SECTOR_SHIFT; | |
1267 | } | |
1268 | ||
1269 | return 0; | |
1270 | ||
1271 | out_map: | |
1272 | unlock_map(arena, premap); | |
1273 | out_lane: | |
1274 | nd_region_release_lane(btt->nd_region, lane); | |
1275 | return ret; | |
1276 | } | |
1277 | ||
41cd8b70 VV |
1278 | static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, |
1279 | struct page *page, unsigned int len, unsigned int off, | |
c11f0c0b | 1280 | bool is_write, sector_t sector) |
5212e11f VV |
1281 | { |
1282 | int ret; | |
1283 | ||
c11f0c0b | 1284 | if (!is_write) { |
41cd8b70 | 1285 | ret = btt_read_pg(btt, bip, page, off, sector, len); |
5212e11f VV |
1286 | flush_dcache_page(page); |
1287 | } else { | |
1288 | flush_dcache_page(page); | |
41cd8b70 | 1289 | ret = btt_write_pg(btt, bip, sector, page, off, len); |
5212e11f VV |
1290 | } |
1291 | ||
1292 | return ret; | |
1293 | } | |
1294 | ||
dece1635 | 1295 | static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) |
5212e11f | 1296 | { |
41cd8b70 | 1297 | struct bio_integrity_payload *bip = bio_integrity(bio); |
5212e11f VV |
1298 | struct btt *btt = q->queuedata; |
1299 | struct bvec_iter iter; | |
f0dc089c | 1300 | unsigned long start; |
5212e11f | 1301 | struct bio_vec bvec; |
abf54548 | 1302 | int err = 0; |
f0dc089c | 1303 | bool do_acct; |
5212e11f | 1304 | |
e23947bd DM |
1305 | if (!bio_integrity_prep(bio)) |
1306 | return BLK_QC_T_NONE; | |
41cd8b70 | 1307 | |
f0dc089c | 1308 | do_acct = nd_iostat_start(bio, &start); |
5212e11f VV |
1309 | bio_for_each_segment(bvec, bio, iter) { |
1310 | unsigned int len = bvec.bv_len; | |
1311 | ||
1312 | BUG_ON(len > PAGE_SIZE); | |
1313 | /* Make sure len is in multiples of sector size. */ | |
1314 | /* XXX is this right? */ | |
1315 | BUG_ON(len < btt->sector_size); | |
1316 | BUG_ON(len % btt->sector_size); | |
1317 | ||
41cd8b70 | 1318 | err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, |
c11f0c0b | 1319 | op_is_write(bio_op(bio)), iter.bi_sector); |
5212e11f | 1320 | if (err) { |
e6be2dcb | 1321 | dev_err(&btt->nd_btt->dev, |
5212e11f | 1322 | "io error in %s sector %lld, len %d,\n", |
abf54548 MC |
1323 | (op_is_write(bio_op(bio))) ? "WRITE" : |
1324 | "READ", | |
5212e11f | 1325 | (unsigned long long) iter.bi_sector, len); |
4e4cbee9 | 1326 | bio->bi_status = errno_to_blk_status(err); |
f0dc089c | 1327 | break; |
5212e11f VV |
1328 | } |
1329 | } | |
f0dc089c DW |
1330 | if (do_acct) |
1331 | nd_iostat_end(bio, start); | |
5212e11f | 1332 | |
4246a0b6 | 1333 | bio_endio(bio); |
dece1635 | 1334 | return BLK_QC_T_NONE; |
5212e11f VV |
1335 | } |
1336 | ||
1337 | static int btt_rw_page(struct block_device *bdev, sector_t sector, | |
c11f0c0b | 1338 | struct page *page, bool is_write) |
5212e11f VV |
1339 | { |
1340 | struct btt *btt = bdev->bd_disk->private_data; | |
c13c43d5 | 1341 | int rc; |
5212e11f | 1342 | |
c13c43d5 VV |
1343 | rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector); |
1344 | if (rc == 0) | |
1345 | page_endio(page, is_write, 0); | |
1346 | ||
1347 | return rc; | |
5212e11f VV |
1348 | } |
1349 | ||
1350 | ||
1351 | static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) | |
1352 | { | |
1353 | /* some standard values */ | |
1354 | geo->heads = 1 << 6; | |
1355 | geo->sectors = 1 << 5; | |
1356 | geo->cylinders = get_capacity(bd->bd_disk) >> 11; | |
1357 | return 0; | |
1358 | } | |
1359 | ||
1360 | static const struct block_device_operations btt_fops = { | |
1361 | .owner = THIS_MODULE, | |
1362 | .rw_page = btt_rw_page, | |
1363 | .getgeo = btt_getgeo, | |
58138820 | 1364 | .revalidate_disk = nvdimm_revalidate_disk, |
5212e11f VV |
1365 | }; |
1366 | ||
1367 | static int btt_blk_init(struct btt *btt) | |
1368 | { | |
1369 | struct nd_btt *nd_btt = btt->nd_btt; | |
1370 | struct nd_namespace_common *ndns = nd_btt->ndns; | |
1371 | ||
1372 | /* create a new disk and request queue for btt */ | |
1373 | btt->btt_queue = blk_alloc_queue(GFP_KERNEL); | |
1374 | if (!btt->btt_queue) | |
1375 | return -ENOMEM; | |
1376 | ||
1377 | btt->btt_disk = alloc_disk(0); | |
1378 | if (!btt->btt_disk) { | |
1379 | blk_cleanup_queue(btt->btt_queue); | |
1380 | return -ENOMEM; | |
1381 | } | |
1382 | ||
1383 | nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); | |
5212e11f VV |
1384 | btt->btt_disk->first_minor = 0; |
1385 | btt->btt_disk->fops = &btt_fops; | |
1386 | btt->btt_disk->private_data = btt; | |
1387 | btt->btt_disk->queue = btt->btt_queue; | |
1388 | btt->btt_disk->flags = GENHD_FL_EXT_DEVT; | |
1389 | ||
1390 | blk_queue_make_request(btt->btt_queue, btt_make_request); | |
1391 | blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); | |
1392 | blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); | |
5212e11f VV |
1393 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); |
1394 | btt->btt_queue->queuedata = btt; | |
1395 | ||
41cd8b70 | 1396 | set_capacity(btt->btt_disk, 0); |
0d52c756 | 1397 | device_add_disk(&btt->nd_btt->dev, btt->btt_disk); |
41cd8b70 VV |
1398 | if (btt_meta_size(btt)) { |
1399 | int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); | |
1400 | ||
1401 | if (rc) { | |
1402 | del_gendisk(btt->btt_disk); | |
1403 | put_disk(btt->btt_disk); | |
1404 | blk_cleanup_queue(btt->btt_queue); | |
1405 | return rc; | |
1406 | } | |
1407 | } | |
1408 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); | |
abe8b4e3 | 1409 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; |
58138820 | 1410 | revalidate_disk(btt->btt_disk); |
5212e11f VV |
1411 | |
1412 | return 0; | |
1413 | } | |
1414 | ||
1415 | static void btt_blk_cleanup(struct btt *btt) | |
1416 | { | |
1417 | del_gendisk(btt->btt_disk); | |
1418 | put_disk(btt->btt_disk); | |
1419 | blk_cleanup_queue(btt->btt_queue); | |
1420 | } | |
1421 | ||
1422 | /** | |
1423 | * btt_init - initialize a block translation table for the given device | |
1424 | * @nd_btt: device with BTT geometry and backing device info | |
1425 | * @rawsize: raw size in bytes of the backing device | |
1426 | * @lbasize: lba size of the backing device | |
1427 | * @uuid: A uuid for the backing device - this is stored on media | |
1428 | * @maxlane: maximum number of parallel requests the device can handle | |
1429 | * | |
1430 | * Initialize a Block Translation Table on a backing device to provide | |
1431 | * single sector power fail atomicity. | |
1432 | * | |
1433 | * Context: | |
1434 | * Might sleep. | |
1435 | * | |
1436 | * Returns: | |
1437 | * Pointer to a new struct btt on success, NULL on failure. | |
1438 | */ | |
1439 | static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize, | |
1440 | u32 lbasize, u8 *uuid, struct nd_region *nd_region) | |
1441 | { | |
1442 | int ret; | |
1443 | struct btt *btt; | |
ebaccf71 | 1444 | struct nd_namespace_io *nsio; |
5212e11f VV |
1445 | struct device *dev = &nd_btt->dev; |
1446 | ||
e32bc729 | 1447 | btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL); |
5212e11f VV |
1448 | if (!btt) |
1449 | return NULL; | |
1450 | ||
1451 | btt->nd_btt = nd_btt; | |
1452 | btt->rawsize = rawsize; | |
1453 | btt->lbasize = lbasize; | |
1454 | btt->sector_size = ((lbasize >= 4096) ? 4096 : 512); | |
1455 | INIT_LIST_HEAD(&btt->arena_list); | |
1456 | mutex_init(&btt->init_lock); | |
1457 | btt->nd_region = nd_region; | |
ebaccf71 VV |
1458 | nsio = to_nd_namespace_io(&nd_btt->ndns->dev); |
1459 | btt->phys_bb = &nsio->bb; | |
5212e11f VV |
1460 | |
1461 | ret = discover_arenas(btt); | |
1462 | if (ret) { | |
1463 | dev_err(dev, "init: error in arena_discover: %d\n", ret); | |
e32bc729 | 1464 | return NULL; |
5212e11f VV |
1465 | } |
1466 | ||
58138820 | 1467 | if (btt->init_state != INIT_READY && nd_region->ro) { |
e6be2dcb | 1468 | dev_warn(dev, "%s is read-only, unable to init btt metadata\n", |
58138820 | 1469 | dev_name(&nd_region->dev)); |
e32bc729 | 1470 | return NULL; |
58138820 | 1471 | } else if (btt->init_state != INIT_READY) { |
5212e11f VV |
1472 | btt->num_arenas = (rawsize / ARENA_MAX_SIZE) + |
1473 | ((rawsize % ARENA_MAX_SIZE) ? 1 : 0); | |
1474 | dev_dbg(dev, "init: %d arenas for %llu rawsize\n", | |
1475 | btt->num_arenas, rawsize); | |
1476 | ||
1477 | ret = create_arenas(btt); | |
1478 | if (ret) { | |
1479 | dev_info(dev, "init: create_arenas: %d\n", ret); | |
e32bc729 | 1480 | return NULL; |
5212e11f VV |
1481 | } |
1482 | ||
1483 | ret = btt_meta_init(btt); | |
1484 | if (ret) { | |
1485 | dev_err(dev, "init: error in meta_init: %d\n", ret); | |
e32bc729 | 1486 | return NULL; |
5212e11f VV |
1487 | } |
1488 | } | |
1489 | ||
1490 | ret = btt_blk_init(btt); | |
1491 | if (ret) { | |
1492 | dev_err(dev, "init: error in blk_init: %d\n", ret); | |
e32bc729 | 1493 | return NULL; |
5212e11f VV |
1494 | } |
1495 | ||
1496 | btt_debugfs_init(btt); | |
1497 | ||
1498 | return btt; | |
5212e11f VV |
1499 | } |
1500 | ||
1501 | /** | |
1502 | * btt_fini - de-initialize a BTT | |
1503 | * @btt: the BTT handle that was generated by btt_init | |
1504 | * | |
1505 | * De-initialize a Block Translation Table on device removal | |
1506 | * | |
1507 | * Context: | |
1508 | * Might sleep. | |
1509 | */ | |
1510 | static void btt_fini(struct btt *btt) | |
1511 | { | |
1512 | if (btt) { | |
1513 | btt_blk_cleanup(btt); | |
1514 | free_arenas(btt); | |
1515 | debugfs_remove_recursive(btt->debugfs_dir); | |
5212e11f VV |
1516 | } |
1517 | } | |
1518 | ||
1519 | int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns) | |
1520 | { | |
1521 | struct nd_btt *nd_btt = to_nd_btt(ndns->claim); | |
1522 | struct nd_region *nd_region; | |
14e49454 | 1523 | struct btt_sb *btt_sb; |
5212e11f VV |
1524 | struct btt *btt; |
1525 | size_t rawsize; | |
1526 | ||
9dec4892 DW |
1527 | if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) { |
1528 | dev_dbg(&nd_btt->dev, "incomplete btt configuration\n"); | |
5212e11f | 1529 | return -ENODEV; |
9dec4892 | 1530 | } |
5212e11f | 1531 | |
14e49454 | 1532 | btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL); |
b1872a31 CJ |
1533 | if (!btt_sb) |
1534 | return -ENOMEM; | |
14e49454 VV |
1535 | |
1536 | /* | |
1537 | * If this returns < 0, that is ok as it just means there wasn't | |
1538 | * an existing BTT, and we're creating a new one. We still need to | |
1539 | * call this as we need the version dependent fields in nd_btt to be | |
1540 | * set correctly based on the holder class | |
1541 | */ | |
1542 | nd_btt_version(nd_btt, ndns, btt_sb); | |
1543 | ||
1544 | rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset; | |
5212e11f | 1545 | if (rawsize < ARENA_MIN_SIZE) { |
9dec4892 | 1546 | dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n", |
14e49454 VV |
1547 | dev_name(&ndns->dev), |
1548 | ARENA_MIN_SIZE + nd_btt->initial_offset); | |
5212e11f VV |
1549 | return -ENXIO; |
1550 | } | |
1551 | nd_region = to_nd_region(nd_btt->dev.parent); | |
1552 | btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid, | |
1553 | nd_region); | |
1554 | if (!btt) | |
1555 | return -ENOMEM; | |
1556 | nd_btt->btt = btt; | |
1557 | ||
1558 | return 0; | |
1559 | } | |
1560 | EXPORT_SYMBOL(nvdimm_namespace_attach_btt); | |
1561 | ||
298f2bc5 | 1562 | int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt) |
5212e11f | 1563 | { |
5212e11f VV |
1564 | struct btt *btt = nd_btt->btt; |
1565 | ||
1566 | btt_fini(btt); | |
1567 | nd_btt->btt = NULL; | |
1568 | ||
1569 | return 0; | |
1570 | } | |
1571 | EXPORT_SYMBOL(nvdimm_namespace_detach_btt); | |
1572 | ||
1573 | static int __init nd_btt_init(void) | |
1574 | { | |
ff8e92d5 | 1575 | int rc = 0; |
5212e11f VV |
1576 | |
1577 | debugfs_root = debugfs_create_dir("btt", NULL); | |
ff8e92d5 | 1578 | if (IS_ERR_OR_NULL(debugfs_root)) |
5212e11f | 1579 | rc = -ENXIO; |
5212e11f VV |
1580 | |
1581 | return rc; | |
1582 | } | |
1583 | ||
1584 | static void __exit nd_btt_exit(void) | |
1585 | { | |
1586 | debugfs_remove_recursive(debugfs_root); | |
5212e11f VV |
1587 | } |
1588 | ||
1589 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); | |
1590 | MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>"); | |
1591 | MODULE_LICENSE("GPL v2"); | |
1592 | module_init(nd_btt_init); | |
1593 | module_exit(nd_btt_exit); |