]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/nvdimm/btt.c
block: switch bios to blk_status_t
[mirror_ubuntu-artful-kernel.git] / drivers / nvdimm / btt.c
1 /*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
24 #include <linux/fs.h>
25 #include <linux/nd.h>
26 #include "btt.h"
27 #include "nd.h"
28
29 enum log_ent_request {
30 LOG_NEW_ENT = 0,
31 LOG_OLD_ENT
32 };
33
34 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
35 void *buf, size_t n, unsigned long flags)
36 {
37 struct nd_btt *nd_btt = arena->nd_btt;
38 struct nd_namespace_common *ndns = nd_btt->ndns;
39
40 /* arena offsets are 4K from the base of the device */
41 offset += SZ_4K;
42 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
43 }
44
45 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
46 void *buf, size_t n, unsigned long flags)
47 {
48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns;
50
51 /* arena offsets are 4K from the base of the device */
52 offset += SZ_4K;
53 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
54 }
55
56 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
57 {
58 int ret;
59
60 /*
61 * infooff and info2off should always be at least 512B aligned.
62 * We rely on that to make sure rw_bytes does error clearing
63 * correctly, so make sure that is the case.
64 */
65 WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512));
66 WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512));
67
68 ret = arena_write_bytes(arena, arena->info2off, super,
69 sizeof(struct btt_sb), 0);
70 if (ret)
71 return ret;
72
73 return arena_write_bytes(arena, arena->infooff, super,
74 sizeof(struct btt_sb), 0);
75 }
76
77 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
78 {
79 WARN_ON(!super);
80 return arena_read_bytes(arena, arena->infooff, super,
81 sizeof(struct btt_sb), 0);
82 }
83
84 /*
85 * 'raw' version of btt_map write
86 * Assumptions:
87 * mapping is in little-endian
88 * mapping contains 'E' and 'Z' flags as desired
89 */
90 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
91 unsigned long flags)
92 {
93 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
94
95 WARN_ON(lba >= arena->external_nlba);
96 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
97 }
98
99 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
100 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
101 {
102 u32 ze;
103 __le32 mapping_le;
104
105 /*
106 * This 'mapping' is supposed to be just the LBA mapping, without
107 * any flags set, so strip the flag bits.
108 */
109 mapping &= MAP_LBA_MASK;
110
111 ze = (z_flag << 1) + e_flag;
112 switch (ze) {
113 case 0:
114 /*
115 * We want to set neither of the Z or E flags, and
116 * in the actual layout, this means setting the bit
117 * positions of both to '1' to indicate a 'normal'
118 * map entry
119 */
120 mapping |= MAP_ENT_NORMAL;
121 break;
122 case 1:
123 mapping |= (1 << MAP_ERR_SHIFT);
124 break;
125 case 2:
126 mapping |= (1 << MAP_TRIM_SHIFT);
127 break;
128 default:
129 /*
130 * The case where Z and E are both sent in as '1' could be
131 * construed as a valid 'normal' case, but we decide not to,
132 * to avoid confusion
133 */
134 WARN_ONCE(1, "Invalid use of Z and E flags\n");
135 return -EIO;
136 }
137
138 mapping_le = cpu_to_le32(mapping);
139 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
140 }
141
142 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
143 int *trim, int *error, unsigned long rwb_flags)
144 {
145 int ret;
146 __le32 in;
147 u32 raw_mapping, postmap, ze, z_flag, e_flag;
148 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
149
150 WARN_ON(lba >= arena->external_nlba);
151
152 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
153 if (ret)
154 return ret;
155
156 raw_mapping = le32_to_cpu(in);
157
158 z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
159 e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
160 ze = (z_flag << 1) + e_flag;
161 postmap = raw_mapping & MAP_LBA_MASK;
162
163 /* Reuse the {z,e}_flag variables for *trim and *error */
164 z_flag = 0;
165 e_flag = 0;
166
167 switch (ze) {
168 case 0:
169 /* Initial state. Return postmap = premap */
170 *mapping = lba;
171 break;
172 case 1:
173 *mapping = postmap;
174 e_flag = 1;
175 break;
176 case 2:
177 *mapping = postmap;
178 z_flag = 1;
179 break;
180 case 3:
181 *mapping = postmap;
182 break;
183 default:
184 return -EIO;
185 }
186
187 if (trim)
188 *trim = z_flag;
189 if (error)
190 *error = e_flag;
191
192 return ret;
193 }
194
195 static int btt_log_read_pair(struct arena_info *arena, u32 lane,
196 struct log_entry *ent)
197 {
198 WARN_ON(!ent);
199 return arena_read_bytes(arena,
200 arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
201 2 * LOG_ENT_SIZE, 0);
202 }
203
204 static struct dentry *debugfs_root;
205
206 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
207 int idx)
208 {
209 char dirname[32];
210 struct dentry *d;
211
212 /* If for some reason, parent bttN was not created, exit */
213 if (!parent)
214 return;
215
216 snprintf(dirname, 32, "arena%d", idx);
217 d = debugfs_create_dir(dirname, parent);
218 if (IS_ERR_OR_NULL(d))
219 return;
220 a->debugfs_dir = d;
221
222 debugfs_create_x64("size", S_IRUGO, d, &a->size);
223 debugfs_create_x64("external_lba_start", S_IRUGO, d,
224 &a->external_lba_start);
225 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
226 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
227 &a->internal_lbasize);
228 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
229 debugfs_create_u32("external_lbasize", S_IRUGO, d,
230 &a->external_lbasize);
231 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
232 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
233 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
234 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
235 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
236 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
237 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
238 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
239 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
240 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
241 }
242
243 static void btt_debugfs_init(struct btt *btt)
244 {
245 int i = 0;
246 struct arena_info *arena;
247
248 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
249 debugfs_root);
250 if (IS_ERR_OR_NULL(btt->debugfs_dir))
251 return;
252
253 list_for_each_entry(arena, &btt->arena_list, list) {
254 arena_debugfs_init(arena, btt->debugfs_dir, i);
255 i++;
256 }
257 }
258
259 /*
260 * This function accepts two log entries, and uses the
261 * sequence number to find the 'older' entry.
262 * It also updates the sequence number in this old entry to
263 * make it the 'new' one if the mark_flag is set.
264 * Finally, it returns which of the entries was the older one.
265 *
266 * TODO The logic feels a bit kludge-y. make it better..
267 */
268 static int btt_log_get_old(struct log_entry *ent)
269 {
270 int old;
271
272 /*
273 * the first ever time this is seen, the entry goes into [0]
274 * the next time, the following logic works out to put this
275 * (next) entry into [1]
276 */
277 if (ent[0].seq == 0) {
278 ent[0].seq = cpu_to_le32(1);
279 return 0;
280 }
281
282 if (ent[0].seq == ent[1].seq)
283 return -EINVAL;
284 if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
285 return -EINVAL;
286
287 if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
288 if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
289 old = 0;
290 else
291 old = 1;
292 } else {
293 if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
294 old = 1;
295 else
296 old = 0;
297 }
298
299 return old;
300 }
301
302 static struct device *to_dev(struct arena_info *arena)
303 {
304 return &arena->nd_btt->dev;
305 }
306
307 /*
308 * This function copies the desired (old/new) log entry into ent if
309 * it is not NULL. It returns the sub-slot number (0 or 1)
310 * where the desired log entry was found. Negative return values
311 * indicate errors.
312 */
313 static int btt_log_read(struct arena_info *arena, u32 lane,
314 struct log_entry *ent, int old_flag)
315 {
316 int ret;
317 int old_ent, ret_ent;
318 struct log_entry log[2];
319
320 ret = btt_log_read_pair(arena, lane, log);
321 if (ret)
322 return -EIO;
323
324 old_ent = btt_log_get_old(log);
325 if (old_ent < 0 || old_ent > 1) {
326 dev_info(to_dev(arena),
327 "log corruption (%d): lane %d seq [%d, %d]\n",
328 old_ent, lane, log[0].seq, log[1].seq);
329 /* TODO set error state? */
330 return -EIO;
331 }
332
333 ret_ent = (old_flag ? old_ent : (1 - old_ent));
334
335 if (ent != NULL)
336 memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
337
338 return ret_ent;
339 }
340
341 /*
342 * This function commits a log entry to media
343 * It does _not_ prepare the freelist entry for the next write
344 * btt_flog_write is the wrapper for updating the freelist elements
345 */
346 static int __btt_log_write(struct arena_info *arena, u32 lane,
347 u32 sub, struct log_entry *ent, unsigned long flags)
348 {
349 int ret;
350 /*
351 * Ignore the padding in log_entry for calculating log_half.
352 * The entry is 'committed' when we write the sequence number,
353 * and we want to ensure that that is the last thing written.
354 * We don't bother writing the padding as that would be extra
355 * media wear and write amplification
356 */
357 unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
358 u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
359 void *src = ent;
360
361 /* split the 16B write into atomic, durable halves */
362 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
363 if (ret)
364 return ret;
365
366 ns_off += log_half;
367 src += log_half;
368 return arena_write_bytes(arena, ns_off, src, log_half, flags);
369 }
370
371 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
372 struct log_entry *ent)
373 {
374 int ret;
375
376 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
377 if (ret)
378 return ret;
379
380 /* prepare the next free entry */
381 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
382 if (++(arena->freelist[lane].seq) == 4)
383 arena->freelist[lane].seq = 1;
384 arena->freelist[lane].block = le32_to_cpu(ent->old_map);
385
386 return ret;
387 }
388
389 /*
390 * This function initializes the BTT map to the initial state, which is
391 * all-zeroes, and indicates an identity mapping
392 */
393 static int btt_map_init(struct arena_info *arena)
394 {
395 int ret = -EINVAL;
396 void *zerobuf;
397 size_t offset = 0;
398 size_t chunk_size = SZ_2M;
399 size_t mapsize = arena->logoff - arena->mapoff;
400
401 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
402 if (!zerobuf)
403 return -ENOMEM;
404
405 /*
406 * mapoff should always be at least 512B aligned. We rely on that to
407 * make sure rw_bytes does error clearing correctly, so make sure that
408 * is the case.
409 */
410 WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512));
411
412 while (mapsize) {
413 size_t size = min(mapsize, chunk_size);
414
415 WARN_ON_ONCE(size < 512);
416 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
417 size, 0);
418 if (ret)
419 goto free;
420
421 offset += size;
422 mapsize -= size;
423 cond_resched();
424 }
425
426 free:
427 kfree(zerobuf);
428 return ret;
429 }
430
431 /*
432 * This function initializes the BTT log with 'fake' entries pointing
433 * to the initial reserved set of blocks as being free
434 */
435 static int btt_log_init(struct arena_info *arena)
436 {
437 size_t logsize = arena->info2off - arena->logoff;
438 size_t chunk_size = SZ_4K, offset = 0;
439 struct log_entry log;
440 void *zerobuf;
441 int ret;
442 u32 i;
443
444 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
445 if (!zerobuf)
446 return -ENOMEM;
447 /*
448 * logoff should always be at least 512B aligned. We rely on that to
449 * make sure rw_bytes does error clearing correctly, so make sure that
450 * is the case.
451 */
452 WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512));
453
454 while (logsize) {
455 size_t size = min(logsize, chunk_size);
456
457 WARN_ON_ONCE(size < 512);
458 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
459 size, 0);
460 if (ret)
461 goto free;
462
463 offset += size;
464 logsize -= size;
465 cond_resched();
466 }
467
468 for (i = 0; i < arena->nfree; i++) {
469 log.lba = cpu_to_le32(i);
470 log.old_map = cpu_to_le32(arena->external_nlba + i);
471 log.new_map = cpu_to_le32(arena->external_nlba + i);
472 log.seq = cpu_to_le32(LOG_SEQ_INIT);
473 ret = __btt_log_write(arena, i, 0, &log, 0);
474 if (ret)
475 goto free;
476 }
477
478 free:
479 kfree(zerobuf);
480 return ret;
481 }
482
483 static int btt_freelist_init(struct arena_info *arena)
484 {
485 int old, new, ret;
486 u32 i, map_entry;
487 struct log_entry log_new, log_old;
488
489 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
490 GFP_KERNEL);
491 if (!arena->freelist)
492 return -ENOMEM;
493
494 for (i = 0; i < arena->nfree; i++) {
495 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
496 if (old < 0)
497 return old;
498
499 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
500 if (new < 0)
501 return new;
502
503 /* sub points to the next one to be overwritten */
504 arena->freelist[i].sub = 1 - new;
505 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
506 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
507
508 /* This implies a newly created or untouched flog entry */
509 if (log_new.old_map == log_new.new_map)
510 continue;
511
512 /* Check if map recovery is needed */
513 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
514 NULL, NULL, 0);
515 if (ret)
516 return ret;
517 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
518 (le32_to_cpu(log_new.old_map) == map_entry)) {
519 /*
520 * Last transaction wrote the flog, but wasn't able
521 * to complete the map write. So fix up the map.
522 */
523 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
524 le32_to_cpu(log_new.new_map), 0, 0, 0);
525 if (ret)
526 return ret;
527 }
528
529 }
530
531 return 0;
532 }
533
534 static int btt_rtt_init(struct arena_info *arena)
535 {
536 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
537 if (arena->rtt == NULL)
538 return -ENOMEM;
539
540 return 0;
541 }
542
543 static int btt_maplocks_init(struct arena_info *arena)
544 {
545 u32 i;
546
547 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
548 GFP_KERNEL);
549 if (!arena->map_locks)
550 return -ENOMEM;
551
552 for (i = 0; i < arena->nfree; i++)
553 spin_lock_init(&arena->map_locks[i].lock);
554
555 return 0;
556 }
557
558 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
559 size_t start, size_t arena_off)
560 {
561 struct arena_info *arena;
562 u64 logsize, mapsize, datasize;
563 u64 available = size;
564
565 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
566 if (!arena)
567 return NULL;
568 arena->nd_btt = btt->nd_btt;
569
570 if (!size)
571 return arena;
572
573 arena->size = size;
574 arena->external_lba_start = start;
575 arena->external_lbasize = btt->lbasize;
576 arena->internal_lbasize = roundup(arena->external_lbasize,
577 INT_LBASIZE_ALIGNMENT);
578 arena->nfree = BTT_DEFAULT_NFREE;
579 arena->version_major = 1;
580 arena->version_minor = 1;
581
582 if (available % BTT_PG_SIZE)
583 available -= (available % BTT_PG_SIZE);
584
585 /* Two pages are reserved for the super block and its copy */
586 available -= 2 * BTT_PG_SIZE;
587
588 /* The log takes a fixed amount of space based on nfree */
589 logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
590 BTT_PG_SIZE);
591 available -= logsize;
592
593 /* Calculate optimal split between map and data area */
594 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
595 arena->internal_lbasize + MAP_ENT_SIZE);
596 arena->external_nlba = arena->internal_nlba - arena->nfree;
597
598 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
599 datasize = available - mapsize;
600
601 /* 'Absolute' values, relative to start of storage space */
602 arena->infooff = arena_off;
603 arena->dataoff = arena->infooff + BTT_PG_SIZE;
604 arena->mapoff = arena->dataoff + datasize;
605 arena->logoff = arena->mapoff + mapsize;
606 arena->info2off = arena->logoff + logsize;
607 return arena;
608 }
609
610 static void free_arenas(struct btt *btt)
611 {
612 struct arena_info *arena, *next;
613
614 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
615 list_del(&arena->list);
616 kfree(arena->rtt);
617 kfree(arena->map_locks);
618 kfree(arena->freelist);
619 debugfs_remove_recursive(arena->debugfs_dir);
620 kfree(arena);
621 }
622 }
623
624 /*
625 * This function reads an existing valid btt superblock and
626 * populates the corresponding arena_info struct
627 */
628 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
629 u64 arena_off)
630 {
631 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
632 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
633 arena->external_nlba = le32_to_cpu(super->external_nlba);
634 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
635 arena->nfree = le32_to_cpu(super->nfree);
636 arena->version_major = le16_to_cpu(super->version_major);
637 arena->version_minor = le16_to_cpu(super->version_minor);
638
639 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
640 le64_to_cpu(super->nextoff));
641 arena->infooff = arena_off;
642 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
643 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
644 arena->logoff = arena_off + le64_to_cpu(super->logoff);
645 arena->info2off = arena_off + le64_to_cpu(super->info2off);
646
647 arena->size = (le64_to_cpu(super->nextoff) > 0)
648 ? (le64_to_cpu(super->nextoff))
649 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
650
651 arena->flags = le32_to_cpu(super->flags);
652 }
653
654 static int discover_arenas(struct btt *btt)
655 {
656 int ret = 0;
657 struct arena_info *arena;
658 struct btt_sb *super;
659 size_t remaining = btt->rawsize;
660 u64 cur_nlba = 0;
661 size_t cur_off = 0;
662 int num_arenas = 0;
663
664 super = kzalloc(sizeof(*super), GFP_KERNEL);
665 if (!super)
666 return -ENOMEM;
667
668 while (remaining) {
669 /* Alloc memory for arena */
670 arena = alloc_arena(btt, 0, 0, 0);
671 if (!arena) {
672 ret = -ENOMEM;
673 goto out_super;
674 }
675
676 arena->infooff = cur_off;
677 ret = btt_info_read(arena, super);
678 if (ret)
679 goto out;
680
681 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
682 if (remaining == btt->rawsize) {
683 btt->init_state = INIT_NOTFOUND;
684 dev_info(to_dev(arena), "No existing arenas\n");
685 goto out;
686 } else {
687 dev_info(to_dev(arena),
688 "Found corrupted metadata!\n");
689 ret = -ENODEV;
690 goto out;
691 }
692 }
693
694 arena->external_lba_start = cur_nlba;
695 parse_arena_meta(arena, super, cur_off);
696
697 ret = btt_freelist_init(arena);
698 if (ret)
699 goto out;
700
701 ret = btt_rtt_init(arena);
702 if (ret)
703 goto out;
704
705 ret = btt_maplocks_init(arena);
706 if (ret)
707 goto out;
708
709 list_add_tail(&arena->list, &btt->arena_list);
710
711 remaining -= arena->size;
712 cur_off += arena->size;
713 cur_nlba += arena->external_nlba;
714 num_arenas++;
715
716 if (arena->nextoff == 0)
717 break;
718 }
719 btt->num_arenas = num_arenas;
720 btt->nlba = cur_nlba;
721 btt->init_state = INIT_READY;
722
723 kfree(super);
724 return ret;
725
726 out:
727 kfree(arena);
728 free_arenas(btt);
729 out_super:
730 kfree(super);
731 return ret;
732 }
733
734 static int create_arenas(struct btt *btt)
735 {
736 size_t remaining = btt->rawsize;
737 size_t cur_off = 0;
738
739 while (remaining) {
740 struct arena_info *arena;
741 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
742
743 remaining -= arena_size;
744 if (arena_size < ARENA_MIN_SIZE)
745 break;
746
747 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
748 if (!arena) {
749 free_arenas(btt);
750 return -ENOMEM;
751 }
752 btt->nlba += arena->external_nlba;
753 if (remaining >= ARENA_MIN_SIZE)
754 arena->nextoff = arena->size;
755 else
756 arena->nextoff = 0;
757 cur_off += arena_size;
758 list_add_tail(&arena->list, &btt->arena_list);
759 }
760
761 return 0;
762 }
763
764 /*
765 * This function completes arena initialization by writing
766 * all the metadata.
767 * It is only called for an uninitialized arena when a write
768 * to that arena occurs for the first time.
769 */
770 static int btt_arena_write_layout(struct arena_info *arena)
771 {
772 int ret;
773 u64 sum;
774 struct btt_sb *super;
775 struct nd_btt *nd_btt = arena->nd_btt;
776 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
777
778 ret = btt_map_init(arena);
779 if (ret)
780 return ret;
781
782 ret = btt_log_init(arena);
783 if (ret)
784 return ret;
785
786 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
787 if (!super)
788 return -ENOMEM;
789
790 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
791 memcpy(super->uuid, nd_btt->uuid, 16);
792 memcpy(super->parent_uuid, parent_uuid, 16);
793 super->flags = cpu_to_le32(arena->flags);
794 super->version_major = cpu_to_le16(arena->version_major);
795 super->version_minor = cpu_to_le16(arena->version_minor);
796 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
797 super->external_nlba = cpu_to_le32(arena->external_nlba);
798 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
799 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
800 super->nfree = cpu_to_le32(arena->nfree);
801 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
802 super->nextoff = cpu_to_le64(arena->nextoff);
803 /*
804 * Subtract arena->infooff (arena start) so numbers are relative
805 * to 'this' arena
806 */
807 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
808 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
809 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
810 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
811
812 super->flags = 0;
813 sum = nd_sb_checksum((struct nd_gen_sb *) super);
814 super->checksum = cpu_to_le64(sum);
815
816 ret = btt_info_write(arena, super);
817
818 kfree(super);
819 return ret;
820 }
821
822 /*
823 * This function completes the initialization for the BTT namespace
824 * such that it is ready to accept IOs
825 */
826 static int btt_meta_init(struct btt *btt)
827 {
828 int ret = 0;
829 struct arena_info *arena;
830
831 mutex_lock(&btt->init_lock);
832 list_for_each_entry(arena, &btt->arena_list, list) {
833 ret = btt_arena_write_layout(arena);
834 if (ret)
835 goto unlock;
836
837 ret = btt_freelist_init(arena);
838 if (ret)
839 goto unlock;
840
841 ret = btt_rtt_init(arena);
842 if (ret)
843 goto unlock;
844
845 ret = btt_maplocks_init(arena);
846 if (ret)
847 goto unlock;
848 }
849
850 btt->init_state = INIT_READY;
851
852 unlock:
853 mutex_unlock(&btt->init_lock);
854 return ret;
855 }
856
857 static u32 btt_meta_size(struct btt *btt)
858 {
859 return btt->lbasize - btt->sector_size;
860 }
861
862 /*
863 * This function calculates the arena in which the given LBA lies
864 * by doing a linear walk. This is acceptable since we expect only
865 * a few arenas. If we have backing devices that get much larger,
866 * we can construct a balanced binary tree of arenas at init time
867 * so that this range search becomes faster.
868 */
869 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
870 struct arena_info **arena)
871 {
872 struct arena_info *arena_list;
873 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
874
875 list_for_each_entry(arena_list, &btt->arena_list, list) {
876 if (lba < arena_list->external_nlba) {
877 *arena = arena_list;
878 *premap = lba;
879 return 0;
880 }
881 lba -= arena_list->external_nlba;
882 }
883
884 return -EIO;
885 }
886
887 /*
888 * The following (lock_map, unlock_map) are mostly just to improve
889 * readability, since they index into an array of locks
890 */
891 static void lock_map(struct arena_info *arena, u32 premap)
892 __acquires(&arena->map_locks[idx].lock)
893 {
894 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
895
896 spin_lock(&arena->map_locks[idx].lock);
897 }
898
899 static void unlock_map(struct arena_info *arena, u32 premap)
900 __releases(&arena->map_locks[idx].lock)
901 {
902 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
903
904 spin_unlock(&arena->map_locks[idx].lock);
905 }
906
907 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
908 {
909 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
910 }
911
912 static int btt_data_read(struct arena_info *arena, struct page *page,
913 unsigned int off, u32 lba, u32 len)
914 {
915 int ret;
916 u64 nsoff = to_namespace_offset(arena, lba);
917 void *mem = kmap_atomic(page);
918
919 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
920 kunmap_atomic(mem);
921
922 return ret;
923 }
924
925 static int btt_data_write(struct arena_info *arena, u32 lba,
926 struct page *page, unsigned int off, u32 len)
927 {
928 int ret;
929 u64 nsoff = to_namespace_offset(arena, lba);
930 void *mem = kmap_atomic(page);
931
932 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
933 kunmap_atomic(mem);
934
935 return ret;
936 }
937
938 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
939 {
940 void *mem = kmap_atomic(page);
941
942 memset(mem + off, 0, len);
943 kunmap_atomic(mem);
944 }
945
946 #ifdef CONFIG_BLK_DEV_INTEGRITY
947 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
948 struct arena_info *arena, u32 postmap, int rw)
949 {
950 unsigned int len = btt_meta_size(btt);
951 u64 meta_nsoff;
952 int ret = 0;
953
954 if (bip == NULL)
955 return 0;
956
957 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
958
959 while (len) {
960 unsigned int cur_len;
961 struct bio_vec bv;
962 void *mem;
963
964 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
965 /*
966 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
967 * .bv_offset already adjusted for iter->bi_bvec_done, and we
968 * can use those directly
969 */
970
971 cur_len = min(len, bv.bv_len);
972 mem = kmap_atomic(bv.bv_page);
973 if (rw)
974 ret = arena_write_bytes(arena, meta_nsoff,
975 mem + bv.bv_offset, cur_len,
976 NVDIMM_IO_ATOMIC);
977 else
978 ret = arena_read_bytes(arena, meta_nsoff,
979 mem + bv.bv_offset, cur_len,
980 NVDIMM_IO_ATOMIC);
981
982 kunmap_atomic(mem);
983 if (ret)
984 return ret;
985
986 len -= cur_len;
987 meta_nsoff += cur_len;
988 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
989 }
990
991 return ret;
992 }
993
994 #else /* CONFIG_BLK_DEV_INTEGRITY */
995 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
996 struct arena_info *arena, u32 postmap, int rw)
997 {
998 return 0;
999 }
1000 #endif
1001
1002 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1003 struct page *page, unsigned int off, sector_t sector,
1004 unsigned int len)
1005 {
1006 int ret = 0;
1007 int t_flag, e_flag;
1008 struct arena_info *arena = NULL;
1009 u32 lane = 0, premap, postmap;
1010
1011 while (len) {
1012 u32 cur_len;
1013
1014 lane = nd_region_acquire_lane(btt->nd_region);
1015
1016 ret = lba_to_arena(btt, sector, &premap, &arena);
1017 if (ret)
1018 goto out_lane;
1019
1020 cur_len = min(btt->sector_size, len);
1021
1022 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1023 NVDIMM_IO_ATOMIC);
1024 if (ret)
1025 goto out_lane;
1026
1027 /*
1028 * We loop to make sure that the post map LBA didn't change
1029 * from under us between writing the RTT and doing the actual
1030 * read.
1031 */
1032 while (1) {
1033 u32 new_map;
1034
1035 if (t_flag) {
1036 zero_fill_data(page, off, cur_len);
1037 goto out_lane;
1038 }
1039
1040 if (e_flag) {
1041 ret = -EIO;
1042 goto out_lane;
1043 }
1044
1045 arena->rtt[lane] = RTT_VALID | postmap;
1046 /*
1047 * Barrier to make sure this write is not reordered
1048 * to do the verification map_read before the RTT store
1049 */
1050 barrier();
1051
1052 ret = btt_map_read(arena, premap, &new_map, &t_flag,
1053 &e_flag, NVDIMM_IO_ATOMIC);
1054 if (ret)
1055 goto out_rtt;
1056
1057 if (postmap == new_map)
1058 break;
1059
1060 postmap = new_map;
1061 }
1062
1063 ret = btt_data_read(arena, page, off, postmap, cur_len);
1064 if (ret)
1065 goto out_rtt;
1066
1067 if (bip) {
1068 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1069 if (ret)
1070 goto out_rtt;
1071 }
1072
1073 arena->rtt[lane] = RTT_INVALID;
1074 nd_region_release_lane(btt->nd_region, lane);
1075
1076 len -= cur_len;
1077 off += cur_len;
1078 sector += btt->sector_size >> SECTOR_SHIFT;
1079 }
1080
1081 return 0;
1082
1083 out_rtt:
1084 arena->rtt[lane] = RTT_INVALID;
1085 out_lane:
1086 nd_region_release_lane(btt->nd_region, lane);
1087 return ret;
1088 }
1089
1090 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1091 sector_t sector, struct page *page, unsigned int off,
1092 unsigned int len)
1093 {
1094 int ret = 0;
1095 struct arena_info *arena = NULL;
1096 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1097 struct log_entry log;
1098 int sub;
1099
1100 while (len) {
1101 u32 cur_len;
1102
1103 lane = nd_region_acquire_lane(btt->nd_region);
1104
1105 ret = lba_to_arena(btt, sector, &premap, &arena);
1106 if (ret)
1107 goto out_lane;
1108 cur_len = min(btt->sector_size, len);
1109
1110 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1111 ret = -EIO;
1112 goto out_lane;
1113 }
1114
1115 new_postmap = arena->freelist[lane].block;
1116
1117 /* Wait if the new block is being read from */
1118 for (i = 0; i < arena->nfree; i++)
1119 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1120 cpu_relax();
1121
1122
1123 if (new_postmap >= arena->internal_nlba) {
1124 ret = -EIO;
1125 goto out_lane;
1126 }
1127
1128 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1129 if (ret)
1130 goto out_lane;
1131
1132 if (bip) {
1133 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1134 WRITE);
1135 if (ret)
1136 goto out_lane;
1137 }
1138
1139 lock_map(arena, premap);
1140 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL,
1141 NVDIMM_IO_ATOMIC);
1142 if (ret)
1143 goto out_map;
1144 if (old_postmap >= arena->internal_nlba) {
1145 ret = -EIO;
1146 goto out_map;
1147 }
1148
1149 log.lba = cpu_to_le32(premap);
1150 log.old_map = cpu_to_le32(old_postmap);
1151 log.new_map = cpu_to_le32(new_postmap);
1152 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1153 sub = arena->freelist[lane].sub;
1154 ret = btt_flog_write(arena, lane, sub, &log);
1155 if (ret)
1156 goto out_map;
1157
1158 ret = btt_map_write(arena, premap, new_postmap, 0, 0, 0);
1159 if (ret)
1160 goto out_map;
1161
1162 unlock_map(arena, premap);
1163 nd_region_release_lane(btt->nd_region, lane);
1164
1165 len -= cur_len;
1166 off += cur_len;
1167 sector += btt->sector_size >> SECTOR_SHIFT;
1168 }
1169
1170 return 0;
1171
1172 out_map:
1173 unlock_map(arena, premap);
1174 out_lane:
1175 nd_region_release_lane(btt->nd_region, lane);
1176 return ret;
1177 }
1178
1179 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1180 struct page *page, unsigned int len, unsigned int off,
1181 bool is_write, sector_t sector)
1182 {
1183 int ret;
1184
1185 if (!is_write) {
1186 ret = btt_read_pg(btt, bip, page, off, sector, len);
1187 flush_dcache_page(page);
1188 } else {
1189 flush_dcache_page(page);
1190 ret = btt_write_pg(btt, bip, sector, page, off, len);
1191 }
1192
1193 return ret;
1194 }
1195
1196 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1197 {
1198 struct bio_integrity_payload *bip = bio_integrity(bio);
1199 struct btt *btt = q->queuedata;
1200 struct bvec_iter iter;
1201 unsigned long start;
1202 struct bio_vec bvec;
1203 int err = 0;
1204 bool do_acct;
1205
1206 /*
1207 * bio_integrity_enabled also checks if the bio already has an
1208 * integrity payload attached. If it does, we *don't* do a
1209 * bio_integrity_prep here - the payload has been generated by
1210 * another kernel subsystem, and we just pass it through.
1211 */
1212 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1213 bio->bi_status = BLK_STS_IOERR;
1214 goto out;
1215 }
1216
1217 do_acct = nd_iostat_start(bio, &start);
1218 bio_for_each_segment(bvec, bio, iter) {
1219 unsigned int len = bvec.bv_len;
1220
1221 BUG_ON(len > PAGE_SIZE);
1222 /* Make sure len is in multiples of sector size. */
1223 /* XXX is this right? */
1224 BUG_ON(len < btt->sector_size);
1225 BUG_ON(len % btt->sector_size);
1226
1227 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1228 op_is_write(bio_op(bio)), iter.bi_sector);
1229 if (err) {
1230 dev_info(&btt->nd_btt->dev,
1231 "io error in %s sector %lld, len %d,\n",
1232 (op_is_write(bio_op(bio))) ? "WRITE" :
1233 "READ",
1234 (unsigned long long) iter.bi_sector, len);
1235 bio->bi_status = errno_to_blk_status(err);
1236 break;
1237 }
1238 }
1239 if (do_acct)
1240 nd_iostat_end(bio, start);
1241
1242 out:
1243 bio_endio(bio);
1244 return BLK_QC_T_NONE;
1245 }
1246
1247 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1248 struct page *page, bool is_write)
1249 {
1250 struct btt *btt = bdev->bd_disk->private_data;
1251
1252 btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
1253 page_endio(page, is_write, 0);
1254 return 0;
1255 }
1256
1257
1258 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1259 {
1260 /* some standard values */
1261 geo->heads = 1 << 6;
1262 geo->sectors = 1 << 5;
1263 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1264 return 0;
1265 }
1266
1267 static const struct block_device_operations btt_fops = {
1268 .owner = THIS_MODULE,
1269 .rw_page = btt_rw_page,
1270 .getgeo = btt_getgeo,
1271 .revalidate_disk = nvdimm_revalidate_disk,
1272 };
1273
1274 static int btt_blk_init(struct btt *btt)
1275 {
1276 struct nd_btt *nd_btt = btt->nd_btt;
1277 struct nd_namespace_common *ndns = nd_btt->ndns;
1278
1279 /* create a new disk and request queue for btt */
1280 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1281 if (!btt->btt_queue)
1282 return -ENOMEM;
1283
1284 btt->btt_disk = alloc_disk(0);
1285 if (!btt->btt_disk) {
1286 blk_cleanup_queue(btt->btt_queue);
1287 return -ENOMEM;
1288 }
1289
1290 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1291 btt->btt_disk->first_minor = 0;
1292 btt->btt_disk->fops = &btt_fops;
1293 btt->btt_disk->private_data = btt;
1294 btt->btt_disk->queue = btt->btt_queue;
1295 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1296
1297 blk_queue_make_request(btt->btt_queue, btt_make_request);
1298 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1299 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1300 blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
1301 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1302 btt->btt_queue->queuedata = btt;
1303
1304 set_capacity(btt->btt_disk, 0);
1305 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1306 if (btt_meta_size(btt)) {
1307 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1308
1309 if (rc) {
1310 del_gendisk(btt->btt_disk);
1311 put_disk(btt->btt_disk);
1312 blk_cleanup_queue(btt->btt_queue);
1313 return rc;
1314 }
1315 }
1316 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1317 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1318 revalidate_disk(btt->btt_disk);
1319
1320 return 0;
1321 }
1322
1323 static void btt_blk_cleanup(struct btt *btt)
1324 {
1325 del_gendisk(btt->btt_disk);
1326 put_disk(btt->btt_disk);
1327 blk_cleanup_queue(btt->btt_queue);
1328 }
1329
1330 /**
1331 * btt_init - initialize a block translation table for the given device
1332 * @nd_btt: device with BTT geometry and backing device info
1333 * @rawsize: raw size in bytes of the backing device
1334 * @lbasize: lba size of the backing device
1335 * @uuid: A uuid for the backing device - this is stored on media
1336 * @maxlane: maximum number of parallel requests the device can handle
1337 *
1338 * Initialize a Block Translation Table on a backing device to provide
1339 * single sector power fail atomicity.
1340 *
1341 * Context:
1342 * Might sleep.
1343 *
1344 * Returns:
1345 * Pointer to a new struct btt on success, NULL on failure.
1346 */
1347 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1348 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1349 {
1350 int ret;
1351 struct btt *btt;
1352 struct device *dev = &nd_btt->dev;
1353
1354 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1355 if (!btt)
1356 return NULL;
1357
1358 btt->nd_btt = nd_btt;
1359 btt->rawsize = rawsize;
1360 btt->lbasize = lbasize;
1361 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1362 INIT_LIST_HEAD(&btt->arena_list);
1363 mutex_init(&btt->init_lock);
1364 btt->nd_region = nd_region;
1365
1366 ret = discover_arenas(btt);
1367 if (ret) {
1368 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1369 return NULL;
1370 }
1371
1372 if (btt->init_state != INIT_READY && nd_region->ro) {
1373 dev_info(dev, "%s is read-only, unable to init btt metadata\n",
1374 dev_name(&nd_region->dev));
1375 return NULL;
1376 } else if (btt->init_state != INIT_READY) {
1377 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1378 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1379 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1380 btt->num_arenas, rawsize);
1381
1382 ret = create_arenas(btt);
1383 if (ret) {
1384 dev_info(dev, "init: create_arenas: %d\n", ret);
1385 return NULL;
1386 }
1387
1388 ret = btt_meta_init(btt);
1389 if (ret) {
1390 dev_err(dev, "init: error in meta_init: %d\n", ret);
1391 return NULL;
1392 }
1393 }
1394
1395 ret = btt_blk_init(btt);
1396 if (ret) {
1397 dev_err(dev, "init: error in blk_init: %d\n", ret);
1398 return NULL;
1399 }
1400
1401 btt_debugfs_init(btt);
1402
1403 return btt;
1404 }
1405
1406 /**
1407 * btt_fini - de-initialize a BTT
1408 * @btt: the BTT handle that was generated by btt_init
1409 *
1410 * De-initialize a Block Translation Table on device removal
1411 *
1412 * Context:
1413 * Might sleep.
1414 */
1415 static void btt_fini(struct btt *btt)
1416 {
1417 if (btt) {
1418 btt_blk_cleanup(btt);
1419 free_arenas(btt);
1420 debugfs_remove_recursive(btt->debugfs_dir);
1421 }
1422 }
1423
1424 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1425 {
1426 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1427 struct nd_region *nd_region;
1428 struct btt *btt;
1429 size_t rawsize;
1430
1431 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1432 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1433 return -ENODEV;
1434 }
1435
1436 rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
1437 if (rawsize < ARENA_MIN_SIZE) {
1438 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1439 dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K);
1440 return -ENXIO;
1441 }
1442 nd_region = to_nd_region(nd_btt->dev.parent);
1443 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1444 nd_region);
1445 if (!btt)
1446 return -ENOMEM;
1447 nd_btt->btt = btt;
1448
1449 return 0;
1450 }
1451 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1452
1453 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1454 {
1455 struct btt *btt = nd_btt->btt;
1456
1457 btt_fini(btt);
1458 nd_btt->btt = NULL;
1459
1460 return 0;
1461 }
1462 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1463
1464 static int __init nd_btt_init(void)
1465 {
1466 int rc = 0;
1467
1468 debugfs_root = debugfs_create_dir("btt", NULL);
1469 if (IS_ERR_OR_NULL(debugfs_root))
1470 rc = -ENXIO;
1471
1472 return rc;
1473 }
1474
1475 static void __exit nd_btt_exit(void)
1476 {
1477 debugfs_remove_recursive(debugfs_root);
1478 }
1479
1480 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1481 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1482 MODULE_LICENSE("GPL v2");
1483 module_init(nd_btt_init);
1484 module_exit(nd_btt_exit);