]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/nvdimm/namespace_devs.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / nvdimm / namespace_devs.c
1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/sort.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "pmem.h"
21 #include "nd.h"
22
23 static void namespace_io_release(struct device *dev)
24 {
25 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
26
27 kfree(nsio);
28 }
29
30 static void namespace_pmem_release(struct device *dev)
31 {
32 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
33 struct nd_region *nd_region = to_nd_region(dev->parent);
34
35 if (nspm->id >= 0)
36 ida_simple_remove(&nd_region->ns_ida, nspm->id);
37 kfree(nspm->alt_name);
38 kfree(nspm->uuid);
39 kfree(nspm);
40 }
41
42 static void namespace_blk_release(struct device *dev)
43 {
44 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
45 struct nd_region *nd_region = to_nd_region(dev->parent);
46
47 if (nsblk->id >= 0)
48 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
49 kfree(nsblk->alt_name);
50 kfree(nsblk->uuid);
51 kfree(nsblk->res);
52 kfree(nsblk);
53 }
54
55 static const struct device_type namespace_io_device_type = {
56 .name = "nd_namespace_io",
57 .release = namespace_io_release,
58 };
59
60 static const struct device_type namespace_pmem_device_type = {
61 .name = "nd_namespace_pmem",
62 .release = namespace_pmem_release,
63 };
64
65 static const struct device_type namespace_blk_device_type = {
66 .name = "nd_namespace_blk",
67 .release = namespace_blk_release,
68 };
69
70 static bool is_namespace_pmem(const struct device *dev)
71 {
72 return dev ? dev->type == &namespace_pmem_device_type : false;
73 }
74
75 static bool is_namespace_blk(const struct device *dev)
76 {
77 return dev ? dev->type == &namespace_blk_device_type : false;
78 }
79
80 static bool is_namespace_io(const struct device *dev)
81 {
82 return dev ? dev->type == &namespace_io_device_type : false;
83 }
84
85 static int is_uuid_busy(struct device *dev, void *data)
86 {
87 u8 *uuid1 = data, *uuid2 = NULL;
88
89 if (is_namespace_pmem(dev)) {
90 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
91
92 uuid2 = nspm->uuid;
93 } else if (is_namespace_blk(dev)) {
94 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
95
96 uuid2 = nsblk->uuid;
97 } else if (is_nd_btt(dev)) {
98 struct nd_btt *nd_btt = to_nd_btt(dev);
99
100 uuid2 = nd_btt->uuid;
101 } else if (is_nd_pfn(dev)) {
102 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
103
104 uuid2 = nd_pfn->uuid;
105 }
106
107 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
108 return -EBUSY;
109
110 return 0;
111 }
112
113 static int is_namespace_uuid_busy(struct device *dev, void *data)
114 {
115 if (is_nd_region(dev))
116 return device_for_each_child(dev, data, is_uuid_busy);
117 return 0;
118 }
119
120 /**
121 * nd_is_uuid_unique - verify that no other namespace has @uuid
122 * @dev: any device on a nvdimm_bus
123 * @uuid: uuid to check
124 */
125 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
126 {
127 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
128
129 if (!nvdimm_bus)
130 return false;
131 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
132 if (device_for_each_child(&nvdimm_bus->dev, uuid,
133 is_namespace_uuid_busy) != 0)
134 return false;
135 return true;
136 }
137
138 bool pmem_should_map_pages(struct device *dev)
139 {
140 struct nd_region *nd_region = to_nd_region(dev->parent);
141 struct nd_namespace_common *ndns = to_ndns(dev);
142 struct nd_namespace_io *nsio;
143
144 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
145 return false;
146
147 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
148 return false;
149
150 if (is_nd_pfn(dev) || is_nd_btt(dev))
151 return false;
152
153 if (ndns->force_raw)
154 return false;
155
156 nsio = to_nd_namespace_io(dev);
157 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
158 IORESOURCE_SYSTEM_RAM,
159 IORES_DESC_NONE) == REGION_MIXED)
160 return false;
161
162 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
163 }
164 EXPORT_SYMBOL(pmem_should_map_pages);
165
166 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
167 {
168 if (is_namespace_pmem(&ndns->dev)) {
169 struct nd_namespace_pmem *nspm;
170
171 nspm = to_nd_namespace_pmem(&ndns->dev);
172 if (nspm->lbasize == 0 || nspm->lbasize == 512)
173 /* default */;
174 else if (nspm->lbasize == 4096)
175 return 4096;
176 else
177 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
178 nspm->lbasize);
179 }
180
181 /*
182 * There is no namespace label (is_namespace_io()), or the label
183 * indicates the default sector size.
184 */
185 return 512;
186 }
187 EXPORT_SYMBOL(pmem_sector_size);
188
189 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
190 char *name)
191 {
192 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
193 const char *suffix = NULL;
194
195 if (ndns->claim && is_nd_btt(ndns->claim))
196 suffix = "s";
197
198 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
199 int nsidx = 0;
200
201 if (is_namespace_pmem(&ndns->dev)) {
202 struct nd_namespace_pmem *nspm;
203
204 nspm = to_nd_namespace_pmem(&ndns->dev);
205 nsidx = nspm->id;
206 }
207
208 if (nsidx)
209 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
210 suffix ? suffix : "");
211 else
212 sprintf(name, "pmem%d%s", nd_region->id,
213 suffix ? suffix : "");
214 } else if (is_namespace_blk(&ndns->dev)) {
215 struct nd_namespace_blk *nsblk;
216
217 nsblk = to_nd_namespace_blk(&ndns->dev);
218 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
219 suffix ? suffix : "");
220 } else {
221 return NULL;
222 }
223
224 return name;
225 }
226 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
227
228 const u8 *nd_dev_to_uuid(struct device *dev)
229 {
230 static const u8 null_uuid[16];
231
232 if (!dev)
233 return null_uuid;
234
235 if (is_namespace_pmem(dev)) {
236 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
237
238 return nspm->uuid;
239 } else if (is_namespace_blk(dev)) {
240 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
241
242 return nsblk->uuid;
243 } else
244 return null_uuid;
245 }
246 EXPORT_SYMBOL(nd_dev_to_uuid);
247
248 static ssize_t nstype_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250 {
251 struct nd_region *nd_region = to_nd_region(dev->parent);
252
253 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
254 }
255 static DEVICE_ATTR_RO(nstype);
256
257 static ssize_t __alt_name_store(struct device *dev, const char *buf,
258 const size_t len)
259 {
260 char *input, *pos, *alt_name, **ns_altname;
261 ssize_t rc;
262
263 if (is_namespace_pmem(dev)) {
264 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
265
266 ns_altname = &nspm->alt_name;
267 } else if (is_namespace_blk(dev)) {
268 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
269
270 ns_altname = &nsblk->alt_name;
271 } else
272 return -ENXIO;
273
274 if (dev->driver || to_ndns(dev)->claim)
275 return -EBUSY;
276
277 input = kstrndup(buf, len, GFP_KERNEL);
278 if (!input)
279 return -ENOMEM;
280
281 pos = strim(input);
282 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
283 rc = -EINVAL;
284 goto out;
285 }
286
287 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
288 if (!alt_name) {
289 rc = -ENOMEM;
290 goto out;
291 }
292 kfree(*ns_altname);
293 *ns_altname = alt_name;
294 sprintf(*ns_altname, "%s", pos);
295 rc = len;
296
297 out:
298 kfree(input);
299 return rc;
300 }
301
302 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
303 {
304 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
305 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
306 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
307 struct nd_label_id label_id;
308 resource_size_t size = 0;
309 struct resource *res;
310
311 if (!nsblk->uuid)
312 return 0;
313 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
314 for_each_dpa_resource(ndd, res)
315 if (strcmp(res->name, label_id.id) == 0)
316 size += resource_size(res);
317 return size;
318 }
319
320 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
321 {
322 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
323 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
324 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
325 struct nd_label_id label_id;
326 struct resource *res;
327 int count, i;
328
329 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
330 return false;
331
332 count = 0;
333 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
334 for_each_dpa_resource(ndd, res) {
335 if (strcmp(res->name, label_id.id) != 0)
336 continue;
337 /*
338 * Resources with unacknowledged adjustments indicate a
339 * failure to update labels
340 */
341 if (res->flags & DPA_RESOURCE_ADJUSTED)
342 return false;
343 count++;
344 }
345
346 /* These values match after a successful label update */
347 if (count != nsblk->num_resources)
348 return false;
349
350 for (i = 0; i < nsblk->num_resources; i++) {
351 struct resource *found = NULL;
352
353 for_each_dpa_resource(ndd, res)
354 if (res == nsblk->res[i]) {
355 found = res;
356 break;
357 }
358 /* stale resource */
359 if (!found)
360 return false;
361 }
362
363 return true;
364 }
365
366 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
367 {
368 resource_size_t size;
369
370 nvdimm_bus_lock(&nsblk->common.dev);
371 size = __nd_namespace_blk_validate(nsblk);
372 nvdimm_bus_unlock(&nsblk->common.dev);
373
374 return size;
375 }
376 EXPORT_SYMBOL(nd_namespace_blk_validate);
377
378
379 static int nd_namespace_label_update(struct nd_region *nd_region,
380 struct device *dev)
381 {
382 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
383 "namespace must be idle during label update\n");
384 if (dev->driver || to_ndns(dev)->claim)
385 return 0;
386
387 /*
388 * Only allow label writes that will result in a valid namespace
389 * or deletion of an existing namespace.
390 */
391 if (is_namespace_pmem(dev)) {
392 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
393 resource_size_t size = resource_size(&nspm->nsio.res);
394
395 if (size == 0 && nspm->uuid)
396 /* delete allocation */;
397 else if (!nspm->uuid)
398 return 0;
399
400 return nd_pmem_namespace_label_update(nd_region, nspm, size);
401 } else if (is_namespace_blk(dev)) {
402 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
403 resource_size_t size = nd_namespace_blk_size(nsblk);
404
405 if (size == 0 && nsblk->uuid)
406 /* delete allocation */;
407 else if (!nsblk->uuid || !nsblk->lbasize)
408 return 0;
409
410 return nd_blk_namespace_label_update(nd_region, nsblk, size);
411 } else
412 return -ENXIO;
413 }
414
415 static ssize_t alt_name_store(struct device *dev,
416 struct device_attribute *attr, const char *buf, size_t len)
417 {
418 struct nd_region *nd_region = to_nd_region(dev->parent);
419 ssize_t rc;
420
421 device_lock(dev);
422 nvdimm_bus_lock(dev);
423 wait_nvdimm_bus_probe_idle(dev);
424 rc = __alt_name_store(dev, buf, len);
425 if (rc >= 0)
426 rc = nd_namespace_label_update(nd_region, dev);
427 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
428 nvdimm_bus_unlock(dev);
429 device_unlock(dev);
430
431 return rc < 0 ? rc : len;
432 }
433
434 static ssize_t alt_name_show(struct device *dev,
435 struct device_attribute *attr, char *buf)
436 {
437 char *ns_altname;
438
439 if (is_namespace_pmem(dev)) {
440 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
441
442 ns_altname = nspm->alt_name;
443 } else if (is_namespace_blk(dev)) {
444 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
445
446 ns_altname = nsblk->alt_name;
447 } else
448 return -ENXIO;
449
450 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
451 }
452 static DEVICE_ATTR_RW(alt_name);
453
454 static int scan_free(struct nd_region *nd_region,
455 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
456 resource_size_t n)
457 {
458 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
459 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
460 int rc = 0;
461
462 while (n) {
463 struct resource *res, *last;
464 resource_size_t new_start;
465
466 last = NULL;
467 for_each_dpa_resource(ndd, res)
468 if (strcmp(res->name, label_id->id) == 0)
469 last = res;
470 res = last;
471 if (!res)
472 return 0;
473
474 if (n >= resource_size(res)) {
475 n -= resource_size(res);
476 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
477 nvdimm_free_dpa(ndd, res);
478 /* retry with last resource deleted */
479 continue;
480 }
481
482 /*
483 * Keep BLK allocations relegated to high DPA as much as
484 * possible
485 */
486 if (is_blk)
487 new_start = res->start + n;
488 else
489 new_start = res->start;
490
491 rc = adjust_resource(res, new_start, resource_size(res) - n);
492 if (rc == 0)
493 res->flags |= DPA_RESOURCE_ADJUSTED;
494 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
495 break;
496 }
497
498 return rc;
499 }
500
501 /**
502 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
503 * @nd_region: the set of dimms to reclaim @n bytes from
504 * @label_id: unique identifier for the namespace consuming this dpa range
505 * @n: number of bytes per-dimm to release
506 *
507 * Assumes resources are ordered. Starting from the end try to
508 * adjust_resource() the allocation to @n, but if @n is larger than the
509 * allocation delete it and find the 'new' last allocation in the label
510 * set.
511 */
512 static int shrink_dpa_allocation(struct nd_region *nd_region,
513 struct nd_label_id *label_id, resource_size_t n)
514 {
515 int i;
516
517 for (i = 0; i < nd_region->ndr_mappings; i++) {
518 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
519 int rc;
520
521 rc = scan_free(nd_region, nd_mapping, label_id, n);
522 if (rc)
523 return rc;
524 }
525
526 return 0;
527 }
528
529 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
530 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
531 resource_size_t n)
532 {
533 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
534 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
535 resource_size_t first_dpa;
536 struct resource *res;
537 int rc = 0;
538
539 /* allocate blk from highest dpa first */
540 if (is_blk)
541 first_dpa = nd_mapping->start + nd_mapping->size - n;
542 else
543 first_dpa = nd_mapping->start;
544
545 /* first resource allocation for this label-id or dimm */
546 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
547 if (!res)
548 rc = -EBUSY;
549
550 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
551 return rc ? n : 0;
552 }
553
554
555 /**
556 * space_valid() - validate free dpa space against constraints
557 * @nd_region: hosting region of the free space
558 * @ndd: dimm device data for debug
559 * @label_id: namespace id to allocate space
560 * @prev: potential allocation that precedes free space
561 * @next: allocation that follows the given free space range
562 * @exist: first allocation with same id in the mapping
563 * @n: range that must satisfied for pmem allocations
564 * @valid: free space range to validate
565 *
566 * BLK-space is valid as long as it does not precede a PMEM
567 * allocation in a given region. PMEM-space must be contiguous
568 * and adjacent to an existing existing allocation (if one
569 * exists). If reserving PMEM any space is valid.
570 */
571 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
572 struct nd_label_id *label_id, struct resource *prev,
573 struct resource *next, struct resource *exist,
574 resource_size_t n, struct resource *valid)
575 {
576 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
577 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
578
579 if (valid->start >= valid->end)
580 goto invalid;
581
582 if (is_reserve)
583 return;
584
585 if (!is_pmem) {
586 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
587 struct nvdimm_bus *nvdimm_bus;
588 struct blk_alloc_info info = {
589 .nd_mapping = nd_mapping,
590 .available = nd_mapping->size,
591 .res = valid,
592 };
593
594 WARN_ON(!is_nd_blk(&nd_region->dev));
595 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
596 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
597 return;
598 }
599
600 /* allocation needs to be contiguous, so this is all or nothing */
601 if (resource_size(valid) < n)
602 goto invalid;
603
604 /* we've got all the space we need and no existing allocation */
605 if (!exist)
606 return;
607
608 /* allocation needs to be contiguous with the existing namespace */
609 if (valid->start == exist->end + 1
610 || valid->end == exist->start - 1)
611 return;
612
613 invalid:
614 /* truncate @valid size to 0 */
615 valid->end = valid->start - 1;
616 }
617
618 enum alloc_loc {
619 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
620 };
621
622 static resource_size_t scan_allocate(struct nd_region *nd_region,
623 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
624 resource_size_t n)
625 {
626 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
627 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
628 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
629 struct resource *res, *exist = NULL, valid;
630 const resource_size_t to_allocate = n;
631 int first;
632
633 for_each_dpa_resource(ndd, res)
634 if (strcmp(label_id->id, res->name) == 0)
635 exist = res;
636
637 valid.start = nd_mapping->start;
638 valid.end = mapping_end;
639 valid.name = "free space";
640 retry:
641 first = 0;
642 for_each_dpa_resource(ndd, res) {
643 struct resource *next = res->sibling, *new_res = NULL;
644 resource_size_t allocate, available = 0;
645 enum alloc_loc loc = ALLOC_ERR;
646 const char *action;
647 int rc = 0;
648
649 /* ignore resources outside this nd_mapping */
650 if (res->start > mapping_end)
651 continue;
652 if (res->end < nd_mapping->start)
653 continue;
654
655 /* space at the beginning of the mapping */
656 if (!first++ && res->start > nd_mapping->start) {
657 valid.start = nd_mapping->start;
658 valid.end = res->start - 1;
659 space_valid(nd_region, ndd, label_id, NULL, next, exist,
660 to_allocate, &valid);
661 available = resource_size(&valid);
662 if (available)
663 loc = ALLOC_BEFORE;
664 }
665
666 /* space between allocations */
667 if (!loc && next) {
668 valid.start = res->start + resource_size(res);
669 valid.end = min(mapping_end, next->start - 1);
670 space_valid(nd_region, ndd, label_id, res, next, exist,
671 to_allocate, &valid);
672 available = resource_size(&valid);
673 if (available)
674 loc = ALLOC_MID;
675 }
676
677 /* space at the end of the mapping */
678 if (!loc && !next) {
679 valid.start = res->start + resource_size(res);
680 valid.end = mapping_end;
681 space_valid(nd_region, ndd, label_id, res, next, exist,
682 to_allocate, &valid);
683 available = resource_size(&valid);
684 if (available)
685 loc = ALLOC_AFTER;
686 }
687
688 if (!loc || !available)
689 continue;
690 allocate = min(available, n);
691 switch (loc) {
692 case ALLOC_BEFORE:
693 if (strcmp(res->name, label_id->id) == 0) {
694 /* adjust current resource up */
695 rc = adjust_resource(res, res->start - allocate,
696 resource_size(res) + allocate);
697 action = "cur grow up";
698 } else
699 action = "allocate";
700 break;
701 case ALLOC_MID:
702 if (strcmp(next->name, label_id->id) == 0) {
703 /* adjust next resource up */
704 rc = adjust_resource(next, next->start
705 - allocate, resource_size(next)
706 + allocate);
707 new_res = next;
708 action = "next grow up";
709 } else if (strcmp(res->name, label_id->id) == 0) {
710 action = "grow down";
711 } else
712 action = "allocate";
713 break;
714 case ALLOC_AFTER:
715 if (strcmp(res->name, label_id->id) == 0)
716 action = "grow down";
717 else
718 action = "allocate";
719 break;
720 default:
721 return n;
722 }
723
724 if (strcmp(action, "allocate") == 0) {
725 /* BLK allocate bottom up */
726 if (!is_pmem)
727 valid.start += available - allocate;
728
729 new_res = nvdimm_allocate_dpa(ndd, label_id,
730 valid.start, allocate);
731 if (!new_res)
732 rc = -EBUSY;
733 } else if (strcmp(action, "grow down") == 0) {
734 /* adjust current resource down */
735 rc = adjust_resource(res, res->start, resource_size(res)
736 + allocate);
737 if (rc == 0)
738 res->flags |= DPA_RESOURCE_ADJUSTED;
739 }
740
741 if (!new_res)
742 new_res = res;
743
744 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
745 action, loc, rc);
746
747 if (rc)
748 return n;
749
750 n -= allocate;
751 if (n) {
752 /*
753 * Retry scan with newly inserted resources.
754 * For example, if we did an ALLOC_BEFORE
755 * insertion there may also have been space
756 * available for an ALLOC_AFTER insertion, so we
757 * need to check this same resource again
758 */
759 goto retry;
760 } else
761 return 0;
762 }
763
764 /*
765 * If we allocated nothing in the BLK case it may be because we are in
766 * an initial "pmem-reserve pass". Only do an initial BLK allocation
767 * when none of the DPA space is reserved.
768 */
769 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
770 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
771 return n;
772 }
773
774 static int merge_dpa(struct nd_region *nd_region,
775 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
776 {
777 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
778 struct resource *res;
779
780 if (strncmp("pmem", label_id->id, 4) == 0)
781 return 0;
782 retry:
783 for_each_dpa_resource(ndd, res) {
784 int rc;
785 struct resource *next = res->sibling;
786 resource_size_t end = res->start + resource_size(res);
787
788 if (!next || strcmp(res->name, label_id->id) != 0
789 || strcmp(next->name, label_id->id) != 0
790 || end != next->start)
791 continue;
792 end += resource_size(next);
793 nvdimm_free_dpa(ndd, next);
794 rc = adjust_resource(res, res->start, end - res->start);
795 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
796 if (rc)
797 return rc;
798 res->flags |= DPA_RESOURCE_ADJUSTED;
799 goto retry;
800 }
801
802 return 0;
803 }
804
805 int __reserve_free_pmem(struct device *dev, void *data)
806 {
807 struct nvdimm *nvdimm = data;
808 struct nd_region *nd_region;
809 struct nd_label_id label_id;
810 int i;
811
812 if (!is_memory(dev))
813 return 0;
814
815 nd_region = to_nd_region(dev);
816 if (nd_region->ndr_mappings == 0)
817 return 0;
818
819 memset(&label_id, 0, sizeof(label_id));
820 strcat(label_id.id, "pmem-reserve");
821 for (i = 0; i < nd_region->ndr_mappings; i++) {
822 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
823 resource_size_t n, rem = 0;
824
825 if (nd_mapping->nvdimm != nvdimm)
826 continue;
827
828 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
829 if (n == 0)
830 return 0;
831 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
832 dev_WARN_ONCE(&nd_region->dev, rem,
833 "pmem reserve underrun: %#llx of %#llx bytes\n",
834 (unsigned long long) n - rem,
835 (unsigned long long) n);
836 return rem ? -ENXIO : 0;
837 }
838
839 return 0;
840 }
841
842 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
843 struct nd_mapping *nd_mapping)
844 {
845 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
846 struct resource *res, *_res;
847
848 for_each_dpa_resource_safe(ndd, res, _res)
849 if (strcmp(res->name, "pmem-reserve") == 0)
850 nvdimm_free_dpa(ndd, res);
851 }
852
853 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
854 struct nd_mapping *nd_mapping)
855 {
856 struct nvdimm *nvdimm = nd_mapping->nvdimm;
857 int rc;
858
859 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
860 __reserve_free_pmem);
861 if (rc)
862 release_free_pmem(nvdimm_bus, nd_mapping);
863 return rc;
864 }
865
866 /**
867 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
868 * @nd_region: the set of dimms to allocate @n more bytes from
869 * @label_id: unique identifier for the namespace consuming this dpa range
870 * @n: number of bytes per-dimm to add to the existing allocation
871 *
872 * Assumes resources are ordered. For BLK regions, first consume
873 * BLK-only available DPA free space, then consume PMEM-aliased DPA
874 * space starting at the highest DPA. For PMEM regions start
875 * allocations from the start of an interleave set and end at the first
876 * BLK allocation or the end of the interleave set, whichever comes
877 * first.
878 */
879 static int grow_dpa_allocation(struct nd_region *nd_region,
880 struct nd_label_id *label_id, resource_size_t n)
881 {
882 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
883 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
884 int i;
885
886 for (i = 0; i < nd_region->ndr_mappings; i++) {
887 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
888 resource_size_t rem = n;
889 int rc, j;
890
891 /*
892 * In the BLK case try once with all unallocated PMEM
893 * reserved, and once without
894 */
895 for (j = is_pmem; j < 2; j++) {
896 bool blk_only = j == 0;
897
898 if (blk_only) {
899 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
900 if (rc)
901 return rc;
902 }
903 rem = scan_allocate(nd_region, nd_mapping,
904 label_id, rem);
905 if (blk_only)
906 release_free_pmem(nvdimm_bus, nd_mapping);
907
908 /* try again and allow encroachments into PMEM */
909 if (rem == 0)
910 break;
911 }
912
913 dev_WARN_ONCE(&nd_region->dev, rem,
914 "allocation underrun: %#llx of %#llx bytes\n",
915 (unsigned long long) n - rem,
916 (unsigned long long) n);
917 if (rem)
918 return -ENXIO;
919
920 rc = merge_dpa(nd_region, nd_mapping, label_id);
921 if (rc)
922 return rc;
923 }
924
925 return 0;
926 }
927
928 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
929 struct nd_namespace_pmem *nspm, resource_size_t size)
930 {
931 struct resource *res = &nspm->nsio.res;
932 resource_size_t offset = 0;
933
934 if (size && !nspm->uuid) {
935 WARN_ON_ONCE(1);
936 size = 0;
937 }
938
939 if (size && nspm->uuid) {
940 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
941 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
942 struct nd_label_id label_id;
943 struct resource *res;
944
945 if (!ndd) {
946 size = 0;
947 goto out;
948 }
949
950 nd_label_gen_id(&label_id, nspm->uuid, 0);
951
952 /* calculate a spa offset from the dpa allocation offset */
953 for_each_dpa_resource(ndd, res)
954 if (strcmp(res->name, label_id.id) == 0) {
955 offset = (res->start - nd_mapping->start)
956 * nd_region->ndr_mappings;
957 goto out;
958 }
959
960 WARN_ON_ONCE(1);
961 size = 0;
962 }
963
964 out:
965 res->start = nd_region->ndr_start + offset;
966 res->end = res->start + size - 1;
967 }
968
969 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
970 {
971 if (!uuid) {
972 dev_dbg(dev, "%s: uuid not set\n", where);
973 return true;
974 }
975 return false;
976 }
977
978 static ssize_t __size_store(struct device *dev, unsigned long long val)
979 {
980 resource_size_t allocated = 0, available = 0;
981 struct nd_region *nd_region = to_nd_region(dev->parent);
982 struct nd_namespace_common *ndns = to_ndns(dev);
983 struct nd_mapping *nd_mapping;
984 struct nvdimm_drvdata *ndd;
985 struct nd_label_id label_id;
986 u32 flags = 0, remainder;
987 int rc, i, id = -1;
988 u8 *uuid = NULL;
989
990 if (dev->driver || ndns->claim)
991 return -EBUSY;
992
993 if (is_namespace_pmem(dev)) {
994 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
995
996 uuid = nspm->uuid;
997 id = nspm->id;
998 } else if (is_namespace_blk(dev)) {
999 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1000
1001 uuid = nsblk->uuid;
1002 flags = NSLABEL_FLAG_LOCAL;
1003 id = nsblk->id;
1004 }
1005
1006 /*
1007 * We need a uuid for the allocation-label and dimm(s) on which
1008 * to store the label.
1009 */
1010 if (uuid_not_set(uuid, dev, __func__))
1011 return -ENXIO;
1012 if (nd_region->ndr_mappings == 0) {
1013 dev_dbg(dev, "not associated with dimm(s)\n");
1014 return -ENXIO;
1015 }
1016
1017 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
1018 if (remainder) {
1019 dev_dbg(dev, "%llu is not %dK aligned\n", val,
1020 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
1021 return -EINVAL;
1022 }
1023
1024 nd_label_gen_id(&label_id, uuid, flags);
1025 for (i = 0; i < nd_region->ndr_mappings; i++) {
1026 nd_mapping = &nd_region->mapping[i];
1027 ndd = to_ndd(nd_mapping);
1028
1029 /*
1030 * All dimms in an interleave set, or the base dimm for a blk
1031 * region, need to be enabled for the size to be changed.
1032 */
1033 if (!ndd)
1034 return -ENXIO;
1035
1036 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1037 }
1038 available = nd_region_allocatable_dpa(nd_region);
1039
1040 if (val > available + allocated)
1041 return -ENOSPC;
1042
1043 if (val == allocated)
1044 return 0;
1045
1046 val = div_u64(val, nd_region->ndr_mappings);
1047 allocated = div_u64(allocated, nd_region->ndr_mappings);
1048 if (val < allocated)
1049 rc = shrink_dpa_allocation(nd_region, &label_id,
1050 allocated - val);
1051 else
1052 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1053
1054 if (rc)
1055 return rc;
1056
1057 if (is_namespace_pmem(dev)) {
1058 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1059
1060 nd_namespace_pmem_set_resource(nd_region, nspm,
1061 val * nd_region->ndr_mappings);
1062 }
1063
1064 /*
1065 * Try to delete the namespace if we deleted all of its
1066 * allocation, this is not the seed or 0th device for the
1067 * region, and it is not actively claimed by a btt, pfn, or dax
1068 * instance.
1069 */
1070 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1071 nd_device_unregister(dev, ND_ASYNC);
1072
1073 return rc;
1074 }
1075
1076 static ssize_t size_store(struct device *dev,
1077 struct device_attribute *attr, const char *buf, size_t len)
1078 {
1079 struct nd_region *nd_region = to_nd_region(dev->parent);
1080 unsigned long long val;
1081 u8 **uuid = NULL;
1082 int rc;
1083
1084 rc = kstrtoull(buf, 0, &val);
1085 if (rc)
1086 return rc;
1087
1088 device_lock(dev);
1089 nvdimm_bus_lock(dev);
1090 wait_nvdimm_bus_probe_idle(dev);
1091 rc = __size_store(dev, val);
1092 if (rc >= 0)
1093 rc = nd_namespace_label_update(nd_region, dev);
1094
1095 if (is_namespace_pmem(dev)) {
1096 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1097
1098 uuid = &nspm->uuid;
1099 } else if (is_namespace_blk(dev)) {
1100 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1101
1102 uuid = &nsblk->uuid;
1103 }
1104
1105 if (rc == 0 && val == 0 && uuid) {
1106 /* setting size zero == 'delete namespace' */
1107 kfree(*uuid);
1108 *uuid = NULL;
1109 }
1110
1111 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1112
1113 nvdimm_bus_unlock(dev);
1114 device_unlock(dev);
1115
1116 return rc < 0 ? rc : len;
1117 }
1118
1119 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1120 {
1121 struct device *dev = &ndns->dev;
1122
1123 if (is_namespace_pmem(dev)) {
1124 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1125
1126 return resource_size(&nspm->nsio.res);
1127 } else if (is_namespace_blk(dev)) {
1128 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1129 } else if (is_namespace_io(dev)) {
1130 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1131
1132 return resource_size(&nsio->res);
1133 } else
1134 WARN_ONCE(1, "unknown namespace type\n");
1135 return 0;
1136 }
1137
1138 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1139 {
1140 resource_size_t size;
1141
1142 nvdimm_bus_lock(&ndns->dev);
1143 size = __nvdimm_namespace_capacity(ndns);
1144 nvdimm_bus_unlock(&ndns->dev);
1145
1146 return size;
1147 }
1148 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1149
1150 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1151 {
1152 int i;
1153 bool locked = false;
1154 struct device *dev = &ndns->dev;
1155 struct nd_region *nd_region = to_nd_region(dev->parent);
1156
1157 for (i = 0; i < nd_region->ndr_mappings; i++) {
1158 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1159 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1160
1161 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1162 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1163 locked = true;
1164 }
1165 }
1166 return locked;
1167 }
1168 EXPORT_SYMBOL(nvdimm_namespace_locked);
1169
1170 static ssize_t size_show(struct device *dev,
1171 struct device_attribute *attr, char *buf)
1172 {
1173 return sprintf(buf, "%llu\n", (unsigned long long)
1174 nvdimm_namespace_capacity(to_ndns(dev)));
1175 }
1176 static DEVICE_ATTR(size, 0444, size_show, size_store);
1177
1178 static u8 *namespace_to_uuid(struct device *dev)
1179 {
1180 if (is_namespace_pmem(dev)) {
1181 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1182
1183 return nspm->uuid;
1184 } else if (is_namespace_blk(dev)) {
1185 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1186
1187 return nsblk->uuid;
1188 } else
1189 return ERR_PTR(-ENXIO);
1190 }
1191
1192 static ssize_t uuid_show(struct device *dev,
1193 struct device_attribute *attr, char *buf)
1194 {
1195 u8 *uuid = namespace_to_uuid(dev);
1196
1197 if (IS_ERR(uuid))
1198 return PTR_ERR(uuid);
1199 if (uuid)
1200 return sprintf(buf, "%pUb\n", uuid);
1201 return sprintf(buf, "\n");
1202 }
1203
1204 /**
1205 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1206 * @nd_region: parent region so we can updates all dimms in the set
1207 * @dev: namespace type for generating label_id
1208 * @new_uuid: incoming uuid
1209 * @old_uuid: reference to the uuid storage location in the namespace object
1210 */
1211 static int namespace_update_uuid(struct nd_region *nd_region,
1212 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1213 {
1214 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1215 struct nd_label_id old_label_id;
1216 struct nd_label_id new_label_id;
1217 int i;
1218
1219 if (!nd_is_uuid_unique(dev, new_uuid))
1220 return -EINVAL;
1221
1222 if (*old_uuid == NULL)
1223 goto out;
1224
1225 /*
1226 * If we've already written a label with this uuid, then it's
1227 * too late to rename because we can't reliably update the uuid
1228 * without losing the old namespace. Userspace must delete this
1229 * namespace to abandon the old uuid.
1230 */
1231 for (i = 0; i < nd_region->ndr_mappings; i++) {
1232 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1233
1234 /*
1235 * This check by itself is sufficient because old_uuid
1236 * would be NULL above if this uuid did not exist in the
1237 * currently written set.
1238 *
1239 * FIXME: can we delete uuid with zero dpa allocated?
1240 */
1241 if (list_empty(&nd_mapping->labels))
1242 return -EBUSY;
1243 }
1244
1245 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1246 nd_label_gen_id(&new_label_id, new_uuid, flags);
1247 for (i = 0; i < nd_region->ndr_mappings; i++) {
1248 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1249 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1250 struct resource *res;
1251
1252 for_each_dpa_resource(ndd, res)
1253 if (strcmp(res->name, old_label_id.id) == 0)
1254 sprintf((void *) res->name, "%s",
1255 new_label_id.id);
1256 }
1257 kfree(*old_uuid);
1258 out:
1259 *old_uuid = new_uuid;
1260 return 0;
1261 }
1262
1263 static ssize_t uuid_store(struct device *dev,
1264 struct device_attribute *attr, const char *buf, size_t len)
1265 {
1266 struct nd_region *nd_region = to_nd_region(dev->parent);
1267 u8 *uuid = NULL;
1268 ssize_t rc = 0;
1269 u8 **ns_uuid;
1270
1271 if (is_namespace_pmem(dev)) {
1272 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1273
1274 ns_uuid = &nspm->uuid;
1275 } else if (is_namespace_blk(dev)) {
1276 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1277
1278 ns_uuid = &nsblk->uuid;
1279 } else
1280 return -ENXIO;
1281
1282 device_lock(dev);
1283 nvdimm_bus_lock(dev);
1284 wait_nvdimm_bus_probe_idle(dev);
1285 if (to_ndns(dev)->claim)
1286 rc = -EBUSY;
1287 if (rc >= 0)
1288 rc = nd_uuid_store(dev, &uuid, buf, len);
1289 if (rc >= 0)
1290 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1291 if (rc >= 0)
1292 rc = nd_namespace_label_update(nd_region, dev);
1293 else
1294 kfree(uuid);
1295 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1296 buf[len - 1] == '\n' ? "" : "\n");
1297 nvdimm_bus_unlock(dev);
1298 device_unlock(dev);
1299
1300 return rc < 0 ? rc : len;
1301 }
1302 static DEVICE_ATTR_RW(uuid);
1303
1304 static ssize_t resource_show(struct device *dev,
1305 struct device_attribute *attr, char *buf)
1306 {
1307 struct resource *res;
1308
1309 if (is_namespace_pmem(dev)) {
1310 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1311
1312 res = &nspm->nsio.res;
1313 } else if (is_namespace_io(dev)) {
1314 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1315
1316 res = &nsio->res;
1317 } else
1318 return -ENXIO;
1319
1320 /* no address to convey if the namespace has no allocation */
1321 if (resource_size(res) == 0)
1322 return -ENXIO;
1323 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1324 }
1325 static DEVICE_ATTR_RO(resource);
1326
1327 static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
1328 4096, 4104, 4160, 4224, 0 };
1329
1330 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1331
1332 static ssize_t sector_size_show(struct device *dev,
1333 struct device_attribute *attr, char *buf)
1334 {
1335 if (is_namespace_blk(dev)) {
1336 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1337
1338 return nd_size_select_show(nsblk->lbasize,
1339 blk_lbasize_supported, buf);
1340 }
1341
1342 if (is_namespace_pmem(dev)) {
1343 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1344
1345 return nd_size_select_show(nspm->lbasize,
1346 pmem_lbasize_supported, buf);
1347 }
1348 return -ENXIO;
1349 }
1350
1351 static ssize_t sector_size_store(struct device *dev,
1352 struct device_attribute *attr, const char *buf, size_t len)
1353 {
1354 struct nd_region *nd_region = to_nd_region(dev->parent);
1355 const unsigned long *supported;
1356 unsigned long *lbasize;
1357 ssize_t rc = 0;
1358
1359 if (is_namespace_blk(dev)) {
1360 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1361
1362 lbasize = &nsblk->lbasize;
1363 supported = blk_lbasize_supported;
1364 } else if (is_namespace_pmem(dev)) {
1365 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1366
1367 lbasize = &nspm->lbasize;
1368 supported = pmem_lbasize_supported;
1369 } else
1370 return -ENXIO;
1371
1372 device_lock(dev);
1373 nvdimm_bus_lock(dev);
1374 if (to_ndns(dev)->claim)
1375 rc = -EBUSY;
1376 if (rc >= 0)
1377 rc = nd_size_select_store(dev, buf, lbasize, supported);
1378 if (rc >= 0)
1379 rc = nd_namespace_label_update(nd_region, dev);
1380 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1381 buf, buf[len - 1] == '\n' ? "" : "\n");
1382 nvdimm_bus_unlock(dev);
1383 device_unlock(dev);
1384
1385 return rc ? rc : len;
1386 }
1387 static DEVICE_ATTR_RW(sector_size);
1388
1389 static ssize_t dpa_extents_show(struct device *dev,
1390 struct device_attribute *attr, char *buf)
1391 {
1392 struct nd_region *nd_region = to_nd_region(dev->parent);
1393 struct nd_label_id label_id;
1394 int count = 0, i;
1395 u8 *uuid = NULL;
1396 u32 flags = 0;
1397
1398 nvdimm_bus_lock(dev);
1399 if (is_namespace_pmem(dev)) {
1400 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1401
1402 uuid = nspm->uuid;
1403 flags = 0;
1404 } else if (is_namespace_blk(dev)) {
1405 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1406
1407 uuid = nsblk->uuid;
1408 flags = NSLABEL_FLAG_LOCAL;
1409 }
1410
1411 if (!uuid)
1412 goto out;
1413
1414 nd_label_gen_id(&label_id, uuid, flags);
1415 for (i = 0; i < nd_region->ndr_mappings; i++) {
1416 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1417 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1418 struct resource *res;
1419
1420 for_each_dpa_resource(ndd, res)
1421 if (strcmp(res->name, label_id.id) == 0)
1422 count++;
1423 }
1424 out:
1425 nvdimm_bus_unlock(dev);
1426
1427 return sprintf(buf, "%d\n", count);
1428 }
1429 static DEVICE_ATTR_RO(dpa_extents);
1430
1431 static int btt_claim_class(struct device *dev)
1432 {
1433 struct nd_region *nd_region = to_nd_region(dev->parent);
1434 int i, loop_bitmask = 0;
1435
1436 for (i = 0; i < nd_region->ndr_mappings; i++) {
1437 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1438 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1439 struct nd_namespace_index *nsindex;
1440
1441 /*
1442 * If any of the DIMMs do not support labels the only
1443 * possible BTT format is v1.
1444 */
1445 if (!ndd) {
1446 loop_bitmask = 0;
1447 break;
1448 }
1449
1450 nsindex = to_namespace_index(ndd, ndd->ns_current);
1451 if (nsindex == NULL)
1452 loop_bitmask |= 1;
1453 else {
1454 /* check whether existing labels are v1.1 or v1.2 */
1455 if (__le16_to_cpu(nsindex->major) == 1
1456 && __le16_to_cpu(nsindex->minor) == 1)
1457 loop_bitmask |= 2;
1458 else
1459 loop_bitmask |= 4;
1460 }
1461 }
1462 /*
1463 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1464 * block is found, a v1.1 label for any mapping will set bit 1, and a
1465 * v1.2 label will set bit 2.
1466 *
1467 * At the end of the loop, at most one of the three bits must be set.
1468 * If multiple bits were set, it means the different mappings disagree
1469 * about their labels, and this must be cleaned up first.
1470 *
1471 * If all the label index blocks are found to agree, nsindex of NULL
1472 * implies labels haven't been initialized yet, and when they will,
1473 * they will be of the 1.2 format, so we can assume BTT2.0
1474 *
1475 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1476 * found, we enforce BTT2.0
1477 *
1478 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1479 */
1480 switch (loop_bitmask) {
1481 case 0:
1482 case 2:
1483 return NVDIMM_CCLASS_BTT;
1484 case 1:
1485 case 4:
1486 return NVDIMM_CCLASS_BTT2;
1487 default:
1488 return -ENXIO;
1489 }
1490 }
1491
1492 static ssize_t holder_show(struct device *dev,
1493 struct device_attribute *attr, char *buf)
1494 {
1495 struct nd_namespace_common *ndns = to_ndns(dev);
1496 ssize_t rc;
1497
1498 device_lock(dev);
1499 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1500 device_unlock(dev);
1501
1502 return rc;
1503 }
1504 static DEVICE_ATTR_RO(holder);
1505
1506 static ssize_t __holder_class_store(struct device *dev, const char *buf)
1507 {
1508 struct nd_namespace_common *ndns = to_ndns(dev);
1509
1510 if (dev->driver || ndns->claim)
1511 return -EBUSY;
1512
1513 if (sysfs_streq(buf, "btt"))
1514 ndns->claim_class = btt_claim_class(dev);
1515 else if (sysfs_streq(buf, "pfn"))
1516 ndns->claim_class = NVDIMM_CCLASS_PFN;
1517 else if (sysfs_streq(buf, "dax"))
1518 ndns->claim_class = NVDIMM_CCLASS_DAX;
1519 else if (sysfs_streq(buf, ""))
1520 ndns->claim_class = NVDIMM_CCLASS_NONE;
1521 else
1522 return -EINVAL;
1523
1524 /* btt_claim_class() could've returned an error */
1525 if (ndns->claim_class < 0)
1526 return ndns->claim_class;
1527
1528 return 0;
1529 }
1530
1531 static ssize_t holder_class_store(struct device *dev,
1532 struct device_attribute *attr, const char *buf, size_t len)
1533 {
1534 struct nd_region *nd_region = to_nd_region(dev->parent);
1535 ssize_t rc;
1536
1537 device_lock(dev);
1538 nvdimm_bus_lock(dev);
1539 wait_nvdimm_bus_probe_idle(dev);
1540 rc = __holder_class_store(dev, buf);
1541 if (rc >= 0)
1542 rc = nd_namespace_label_update(nd_region, dev);
1543 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
1544 nvdimm_bus_unlock(dev);
1545 device_unlock(dev);
1546
1547 return rc < 0 ? rc : len;
1548 }
1549
1550 static ssize_t holder_class_show(struct device *dev,
1551 struct device_attribute *attr, char *buf)
1552 {
1553 struct nd_namespace_common *ndns = to_ndns(dev);
1554 ssize_t rc;
1555
1556 device_lock(dev);
1557 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1558 rc = sprintf(buf, "\n");
1559 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1560 (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1561 rc = sprintf(buf, "btt\n");
1562 else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1563 rc = sprintf(buf, "pfn\n");
1564 else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1565 rc = sprintf(buf, "dax\n");
1566 else
1567 rc = sprintf(buf, "<unknown>\n");
1568 device_unlock(dev);
1569
1570 return rc;
1571 }
1572 static DEVICE_ATTR_RW(holder_class);
1573
1574 static ssize_t mode_show(struct device *dev,
1575 struct device_attribute *attr, char *buf)
1576 {
1577 struct nd_namespace_common *ndns = to_ndns(dev);
1578 struct device *claim;
1579 char *mode;
1580 ssize_t rc;
1581
1582 device_lock(dev);
1583 claim = ndns->claim;
1584 if (claim && is_nd_btt(claim))
1585 mode = "safe";
1586 else if (claim && is_nd_pfn(claim))
1587 mode = "memory";
1588 else if (claim && is_nd_dax(claim))
1589 mode = "dax";
1590 else if (!claim && pmem_should_map_pages(dev))
1591 mode = "memory";
1592 else
1593 mode = "raw";
1594 rc = sprintf(buf, "%s\n", mode);
1595 device_unlock(dev);
1596
1597 return rc;
1598 }
1599 static DEVICE_ATTR_RO(mode);
1600
1601 static ssize_t force_raw_store(struct device *dev,
1602 struct device_attribute *attr, const char *buf, size_t len)
1603 {
1604 bool force_raw;
1605 int rc = strtobool(buf, &force_raw);
1606
1607 if (rc)
1608 return rc;
1609
1610 to_ndns(dev)->force_raw = force_raw;
1611 return len;
1612 }
1613
1614 static ssize_t force_raw_show(struct device *dev,
1615 struct device_attribute *attr, char *buf)
1616 {
1617 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1618 }
1619 static DEVICE_ATTR_RW(force_raw);
1620
1621 static struct attribute *nd_namespace_attributes[] = {
1622 &dev_attr_nstype.attr,
1623 &dev_attr_size.attr,
1624 &dev_attr_mode.attr,
1625 &dev_attr_uuid.attr,
1626 &dev_attr_holder.attr,
1627 &dev_attr_resource.attr,
1628 &dev_attr_alt_name.attr,
1629 &dev_attr_force_raw.attr,
1630 &dev_attr_sector_size.attr,
1631 &dev_attr_dpa_extents.attr,
1632 &dev_attr_holder_class.attr,
1633 NULL,
1634 };
1635
1636 static umode_t namespace_visible(struct kobject *kobj,
1637 struct attribute *a, int n)
1638 {
1639 struct device *dev = container_of(kobj, struct device, kobj);
1640
1641 if (a == &dev_attr_resource.attr) {
1642 if (is_namespace_blk(dev))
1643 return 0;
1644 return 0400;
1645 }
1646
1647 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1648 if (a == &dev_attr_size.attr)
1649 return 0644;
1650
1651 return a->mode;
1652 }
1653
1654 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1655 || a == &dev_attr_holder.attr
1656 || a == &dev_attr_holder_class.attr
1657 || a == &dev_attr_force_raw.attr
1658 || a == &dev_attr_mode.attr)
1659 return a->mode;
1660
1661 return 0;
1662 }
1663
1664 static struct attribute_group nd_namespace_attribute_group = {
1665 .attrs = nd_namespace_attributes,
1666 .is_visible = namespace_visible,
1667 };
1668
1669 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1670 &nd_device_attribute_group,
1671 &nd_namespace_attribute_group,
1672 &nd_numa_attribute_group,
1673 NULL,
1674 };
1675
1676 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1677 {
1678 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1679 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1680 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1681 struct nd_namespace_common *ndns = NULL;
1682 resource_size_t size;
1683
1684 if (nd_btt || nd_pfn || nd_dax) {
1685 if (nd_btt)
1686 ndns = nd_btt->ndns;
1687 else if (nd_pfn)
1688 ndns = nd_pfn->ndns;
1689 else if (nd_dax)
1690 ndns = nd_dax->nd_pfn.ndns;
1691
1692 if (!ndns)
1693 return ERR_PTR(-ENODEV);
1694
1695 /*
1696 * Flush any in-progess probes / removals in the driver
1697 * for the raw personality of this namespace.
1698 */
1699 device_lock(&ndns->dev);
1700 device_unlock(&ndns->dev);
1701 if (ndns->dev.driver) {
1702 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1703 dev_name(dev));
1704 return ERR_PTR(-EBUSY);
1705 }
1706 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1707 "host (%s) vs claim (%s) mismatch\n",
1708 dev_name(dev),
1709 dev_name(ndns->claim)))
1710 return ERR_PTR(-ENXIO);
1711 } else {
1712 ndns = to_ndns(dev);
1713 if (ndns->claim) {
1714 dev_dbg(dev, "claimed by %s, failing probe\n",
1715 dev_name(ndns->claim));
1716
1717 return ERR_PTR(-ENXIO);
1718 }
1719 }
1720
1721 if (nvdimm_namespace_locked(ndns))
1722 return ERR_PTR(-EACCES);
1723
1724 size = nvdimm_namespace_capacity(ndns);
1725 if (size < ND_MIN_NAMESPACE_SIZE) {
1726 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1727 &size, ND_MIN_NAMESPACE_SIZE);
1728 return ERR_PTR(-ENODEV);
1729 }
1730
1731 if (is_namespace_pmem(&ndns->dev)) {
1732 struct nd_namespace_pmem *nspm;
1733
1734 nspm = to_nd_namespace_pmem(&ndns->dev);
1735 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1736 return ERR_PTR(-ENODEV);
1737 } else if (is_namespace_blk(&ndns->dev)) {
1738 struct nd_namespace_blk *nsblk;
1739
1740 nsblk = to_nd_namespace_blk(&ndns->dev);
1741 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1742 return ERR_PTR(-ENODEV);
1743 if (!nsblk->lbasize) {
1744 dev_dbg(&ndns->dev, "sector size not set\n");
1745 return ERR_PTR(-ENODEV);
1746 }
1747 if (!nd_namespace_blk_validate(nsblk))
1748 return ERR_PTR(-ENODEV);
1749 }
1750
1751 return ndns;
1752 }
1753 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1754
1755 static struct device **create_namespace_io(struct nd_region *nd_region)
1756 {
1757 struct nd_namespace_io *nsio;
1758 struct device *dev, **devs;
1759 struct resource *res;
1760
1761 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1762 if (!nsio)
1763 return NULL;
1764
1765 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1766 if (!devs) {
1767 kfree(nsio);
1768 return NULL;
1769 }
1770
1771 dev = &nsio->common.dev;
1772 dev->type = &namespace_io_device_type;
1773 dev->parent = &nd_region->dev;
1774 res = &nsio->res;
1775 res->name = dev_name(&nd_region->dev);
1776 res->flags = IORESOURCE_MEM;
1777 res->start = nd_region->ndr_start;
1778 res->end = res->start + nd_region->ndr_size - 1;
1779
1780 devs[0] = dev;
1781 return devs;
1782 }
1783
1784 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1785 u64 cookie, u16 pos)
1786 {
1787 struct nd_namespace_label *found = NULL;
1788 int i;
1789
1790 for (i = 0; i < nd_region->ndr_mappings; i++) {
1791 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1792 struct nd_interleave_set *nd_set = nd_region->nd_set;
1793 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1794 struct nd_label_ent *label_ent;
1795 bool found_uuid = false;
1796
1797 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1798 struct nd_namespace_label *nd_label = label_ent->label;
1799 u16 position, nlabel;
1800 u64 isetcookie;
1801
1802 if (!nd_label)
1803 continue;
1804 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1805 position = __le16_to_cpu(nd_label->position);
1806 nlabel = __le16_to_cpu(nd_label->nlabel);
1807
1808 if (isetcookie != cookie)
1809 continue;
1810
1811 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1812 continue;
1813
1814 if (namespace_label_has(ndd, type_guid)
1815 && !guid_equal(&nd_set->type_guid,
1816 &nd_label->type_guid)) {
1817 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
1818 nd_set->type_guid.b,
1819 nd_label->type_guid.b);
1820 continue;
1821 }
1822
1823 if (found_uuid) {
1824 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1825 return false;
1826 }
1827 found_uuid = true;
1828 if (nlabel != nd_region->ndr_mappings)
1829 continue;
1830 if (position != pos)
1831 continue;
1832 found = nd_label;
1833 break;
1834 }
1835 if (found)
1836 break;
1837 }
1838 return found != NULL;
1839 }
1840
1841 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1842 {
1843 int i;
1844
1845 if (!pmem_id)
1846 return -ENODEV;
1847
1848 for (i = 0; i < nd_region->ndr_mappings; i++) {
1849 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1850 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1851 struct nd_namespace_label *nd_label = NULL;
1852 u64 hw_start, hw_end, pmem_start, pmem_end;
1853 struct nd_label_ent *label_ent;
1854
1855 lockdep_assert_held(&nd_mapping->lock);
1856 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1857 nd_label = label_ent->label;
1858 if (!nd_label)
1859 continue;
1860 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1861 break;
1862 nd_label = NULL;
1863 }
1864
1865 if (!nd_label) {
1866 WARN_ON(1);
1867 return -EINVAL;
1868 }
1869
1870 /*
1871 * Check that this label is compliant with the dpa
1872 * range published in NFIT
1873 */
1874 hw_start = nd_mapping->start;
1875 hw_end = hw_start + nd_mapping->size;
1876 pmem_start = __le64_to_cpu(nd_label->dpa);
1877 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
1878 if (pmem_start >= hw_start && pmem_start < hw_end
1879 && pmem_end <= hw_end && pmem_end > hw_start)
1880 /* pass */;
1881 else {
1882 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1883 dev_name(ndd->dev), nd_label->uuid);
1884 return -EINVAL;
1885 }
1886
1887 /* move recently validated label to the front of the list */
1888 list_move(&label_ent->list, &nd_mapping->labels);
1889 }
1890 return 0;
1891 }
1892
1893 /**
1894 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1895 * @nd_region: region with mappings to validate
1896 * @nspm: target namespace to create
1897 * @nd_label: target pmem namespace label to evaluate
1898 */
1899 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1900 struct nd_namespace_index *nsindex,
1901 struct nd_namespace_label *nd_label)
1902 {
1903 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1904 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1905 struct nd_label_ent *label_ent;
1906 struct nd_namespace_pmem *nspm;
1907 struct nd_mapping *nd_mapping;
1908 resource_size_t size = 0;
1909 struct resource *res;
1910 struct device *dev;
1911 int rc = 0;
1912 u16 i;
1913
1914 if (cookie == 0) {
1915 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1916 return ERR_PTR(-ENXIO);
1917 }
1918
1919 if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1920 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1921 nd_label->uuid);
1922 if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
1923 return ERR_PTR(-EAGAIN);
1924
1925 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1926 nd_label->uuid);
1927 }
1928
1929 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1930 if (!nspm)
1931 return ERR_PTR(-ENOMEM);
1932
1933 nspm->id = -1;
1934 dev = &nspm->nsio.common.dev;
1935 dev->type = &namespace_pmem_device_type;
1936 dev->parent = &nd_region->dev;
1937 res = &nspm->nsio.res;
1938 res->name = dev_name(&nd_region->dev);
1939 res->flags = IORESOURCE_MEM;
1940
1941 for (i = 0; i < nd_region->ndr_mappings; i++) {
1942 if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1943 continue;
1944 if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1945 continue;
1946 break;
1947 }
1948
1949 if (i < nd_region->ndr_mappings) {
1950 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1951
1952 /*
1953 * Give up if we don't find an instance of a uuid at each
1954 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1955 * find a dimm with two instances of the same uuid.
1956 */
1957 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1958 nvdimm_name(nvdimm), nd_label->uuid);
1959 rc = -EINVAL;
1960 goto err;
1961 }
1962
1963 /*
1964 * Fix up each mapping's 'labels' to have the validated pmem label for
1965 * that position at labels[0], and NULL at labels[1]. In the process,
1966 * check that the namespace aligns with interleave-set. We know
1967 * that it does not overlap with any blk namespaces by virtue of
1968 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1969 * succeeded).
1970 */
1971 rc = select_pmem_id(nd_region, nd_label->uuid);
1972 if (rc)
1973 goto err;
1974
1975 /* Calculate total size and populate namespace properties from label0 */
1976 for (i = 0; i < nd_region->ndr_mappings; i++) {
1977 struct nd_namespace_label *label0;
1978 struct nvdimm_drvdata *ndd;
1979
1980 nd_mapping = &nd_region->mapping[i];
1981 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1982 typeof(*label_ent), list);
1983 label0 = label_ent ? label_ent->label : 0;
1984
1985 if (!label0) {
1986 WARN_ON(1);
1987 continue;
1988 }
1989
1990 size += __le64_to_cpu(label0->rawsize);
1991 if (__le16_to_cpu(label0->position) != 0)
1992 continue;
1993 WARN_ON(nspm->alt_name || nspm->uuid);
1994 nspm->alt_name = kmemdup((void __force *) label0->name,
1995 NSLABEL_NAME_LEN, GFP_KERNEL);
1996 nspm->uuid = kmemdup((void __force *) label0->uuid,
1997 NSLABEL_UUID_LEN, GFP_KERNEL);
1998 nspm->lbasize = __le64_to_cpu(label0->lbasize);
1999 ndd = to_ndd(nd_mapping);
2000 if (namespace_label_has(ndd, abstraction_guid))
2001 nspm->nsio.common.claim_class
2002 = to_nvdimm_cclass(&label0->abstraction_guid);
2003
2004 }
2005
2006 if (!nspm->alt_name || !nspm->uuid) {
2007 rc = -ENOMEM;
2008 goto err;
2009 }
2010
2011 nd_namespace_pmem_set_resource(nd_region, nspm, size);
2012
2013 return dev;
2014 err:
2015 namespace_pmem_release(dev);
2016 switch (rc) {
2017 case -EINVAL:
2018 dev_dbg(&nd_region->dev, "invalid label(s)\n");
2019 break;
2020 case -ENODEV:
2021 dev_dbg(&nd_region->dev, "label not found\n");
2022 break;
2023 default:
2024 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2025 break;
2026 }
2027 return ERR_PTR(rc);
2028 }
2029
2030 struct resource *nsblk_add_resource(struct nd_region *nd_region,
2031 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2032 resource_size_t start)
2033 {
2034 struct nd_label_id label_id;
2035 struct resource *res;
2036
2037 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
2038 res = krealloc(nsblk->res,
2039 sizeof(void *) * (nsblk->num_resources + 1),
2040 GFP_KERNEL);
2041 if (!res)
2042 return NULL;
2043 nsblk->res = (struct resource **) res;
2044 for_each_dpa_resource(ndd, res)
2045 if (strcmp(res->name, label_id.id) == 0
2046 && res->start == start) {
2047 nsblk->res[nsblk->num_resources++] = res;
2048 return res;
2049 }
2050 return NULL;
2051 }
2052
2053 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2054 {
2055 struct nd_namespace_blk *nsblk;
2056 struct device *dev;
2057
2058 if (!is_nd_blk(&nd_region->dev))
2059 return NULL;
2060
2061 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2062 if (!nsblk)
2063 return NULL;
2064
2065 dev = &nsblk->common.dev;
2066 dev->type = &namespace_blk_device_type;
2067 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2068 if (nsblk->id < 0) {
2069 kfree(nsblk);
2070 return NULL;
2071 }
2072 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2073 dev->parent = &nd_region->dev;
2074 dev->groups = nd_namespace_attribute_groups;
2075
2076 return &nsblk->common.dev;
2077 }
2078
2079 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2080 {
2081 struct nd_namespace_pmem *nspm;
2082 struct resource *res;
2083 struct device *dev;
2084
2085 if (!is_memory(&nd_region->dev))
2086 return NULL;
2087
2088 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2089 if (!nspm)
2090 return NULL;
2091
2092 dev = &nspm->nsio.common.dev;
2093 dev->type = &namespace_pmem_device_type;
2094 dev->parent = &nd_region->dev;
2095 res = &nspm->nsio.res;
2096 res->name = dev_name(&nd_region->dev);
2097 res->flags = IORESOURCE_MEM;
2098
2099 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2100 if (nspm->id < 0) {
2101 kfree(nspm);
2102 return NULL;
2103 }
2104 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2105 dev->groups = nd_namespace_attribute_groups;
2106 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2107
2108 return dev;
2109 }
2110
2111 void nd_region_create_ns_seed(struct nd_region *nd_region)
2112 {
2113 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2114
2115 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2116 return;
2117
2118 if (is_nd_blk(&nd_region->dev))
2119 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2120 else
2121 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2122
2123 /*
2124 * Seed creation failures are not fatal, provisioning is simply
2125 * disabled until memory becomes available
2126 */
2127 if (!nd_region->ns_seed)
2128 dev_err(&nd_region->dev, "failed to create %s namespace\n",
2129 is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2130 else
2131 nd_device_register(nd_region->ns_seed);
2132 }
2133
2134 void nd_region_create_dax_seed(struct nd_region *nd_region)
2135 {
2136 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2137 nd_region->dax_seed = nd_dax_create(nd_region);
2138 /*
2139 * Seed creation failures are not fatal, provisioning is simply
2140 * disabled until memory becomes available
2141 */
2142 if (!nd_region->dax_seed)
2143 dev_err(&nd_region->dev, "failed to create dax namespace\n");
2144 }
2145
2146 void nd_region_create_pfn_seed(struct nd_region *nd_region)
2147 {
2148 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2149 nd_region->pfn_seed = nd_pfn_create(nd_region);
2150 /*
2151 * Seed creation failures are not fatal, provisioning is simply
2152 * disabled until memory becomes available
2153 */
2154 if (!nd_region->pfn_seed)
2155 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2156 }
2157
2158 void nd_region_create_btt_seed(struct nd_region *nd_region)
2159 {
2160 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2161 nd_region->btt_seed = nd_btt_create(nd_region);
2162 /*
2163 * Seed creation failures are not fatal, provisioning is simply
2164 * disabled until memory becomes available
2165 */
2166 if (!nd_region->btt_seed)
2167 dev_err(&nd_region->dev, "failed to create btt namespace\n");
2168 }
2169
2170 static int add_namespace_resource(struct nd_region *nd_region,
2171 struct nd_namespace_label *nd_label, struct device **devs,
2172 int count)
2173 {
2174 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2175 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2176 int i;
2177
2178 for (i = 0; i < count; i++) {
2179 u8 *uuid = namespace_to_uuid(devs[i]);
2180 struct resource *res;
2181
2182 if (IS_ERR_OR_NULL(uuid)) {
2183 WARN_ON(1);
2184 continue;
2185 }
2186
2187 if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
2188 continue;
2189 if (is_namespace_blk(devs[i])) {
2190 res = nsblk_add_resource(nd_region, ndd,
2191 to_nd_namespace_blk(devs[i]),
2192 __le64_to_cpu(nd_label->dpa));
2193 if (!res)
2194 return -ENXIO;
2195 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2196 } else {
2197 dev_err(&nd_region->dev,
2198 "error: conflicting extents for uuid: %pUb\n",
2199 nd_label->uuid);
2200 return -ENXIO;
2201 }
2202 break;
2203 }
2204
2205 return i;
2206 }
2207
2208 static struct device *create_namespace_blk(struct nd_region *nd_region,
2209 struct nd_namespace_label *nd_label, int count)
2210 {
2211
2212 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2213 struct nd_interleave_set *nd_set = nd_region->nd_set;
2214 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2215 struct nd_namespace_blk *nsblk;
2216 char name[NSLABEL_NAME_LEN];
2217 struct device *dev = NULL;
2218 struct resource *res;
2219
2220 if (namespace_label_has(ndd, type_guid)) {
2221 if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
2222 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
2223 nd_set->type_guid.b,
2224 nd_label->type_guid.b);
2225 return ERR_PTR(-EAGAIN);
2226 }
2227
2228 if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
2229 dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
2230 nd_set->cookie2,
2231 __le64_to_cpu(nd_label->isetcookie));
2232 return ERR_PTR(-EAGAIN);
2233 }
2234 }
2235
2236 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2237 if (!nsblk)
2238 return ERR_PTR(-ENOMEM);
2239 dev = &nsblk->common.dev;
2240 dev->type = &namespace_blk_device_type;
2241 dev->parent = &nd_region->dev;
2242 nsblk->id = -1;
2243 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
2244 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
2245 GFP_KERNEL);
2246 if (namespace_label_has(ndd, abstraction_guid))
2247 nsblk->common.claim_class
2248 = to_nvdimm_cclass(&nd_label->abstraction_guid);
2249 if (!nsblk->uuid)
2250 goto blk_err;
2251 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
2252 if (name[0])
2253 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
2254 GFP_KERNEL);
2255 res = nsblk_add_resource(nd_region, ndd, nsblk,
2256 __le64_to_cpu(nd_label->dpa));
2257 if (!res)
2258 goto blk_err;
2259 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2260 return dev;
2261 blk_err:
2262 namespace_blk_release(dev);
2263 return ERR_PTR(-ENXIO);
2264 }
2265
2266 static int cmp_dpa(const void *a, const void *b)
2267 {
2268 const struct device *dev_a = *(const struct device **) a;
2269 const struct device *dev_b = *(const struct device **) b;
2270 struct nd_namespace_blk *nsblk_a, *nsblk_b;
2271 struct nd_namespace_pmem *nspm_a, *nspm_b;
2272
2273 if (is_namespace_io(dev_a))
2274 return 0;
2275
2276 if (is_namespace_blk(dev_a)) {
2277 nsblk_a = to_nd_namespace_blk(dev_a);
2278 nsblk_b = to_nd_namespace_blk(dev_b);
2279
2280 return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
2281 sizeof(resource_size_t));
2282 }
2283
2284 nspm_a = to_nd_namespace_pmem(dev_a);
2285 nspm_b = to_nd_namespace_pmem(dev_b);
2286
2287 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
2288 sizeof(resource_size_t));
2289 }
2290
2291 static struct device **scan_labels(struct nd_region *nd_region)
2292 {
2293 int i, count = 0;
2294 struct device *dev, **devs = NULL;
2295 struct nd_label_ent *label_ent, *e;
2296 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2297 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
2298
2299 /* "safe" because create_namespace_pmem() might list_move() label_ent */
2300 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
2301 struct nd_namespace_label *nd_label = label_ent->label;
2302 struct device **__devs;
2303 u32 flags;
2304
2305 if (!nd_label)
2306 continue;
2307 flags = __le32_to_cpu(nd_label->flags);
2308 if (is_nd_blk(&nd_region->dev)
2309 == !!(flags & NSLABEL_FLAG_LOCAL))
2310 /* pass, region matches label type */;
2311 else
2312 continue;
2313
2314 /* skip labels that describe extents outside of the region */
2315 if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end)
2316 continue;
2317
2318 i = add_namespace_resource(nd_region, nd_label, devs, count);
2319 if (i < 0)
2320 goto err;
2321 if (i < count)
2322 continue;
2323 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
2324 if (!__devs)
2325 goto err;
2326 memcpy(__devs, devs, sizeof(dev) * count);
2327 kfree(devs);
2328 devs = __devs;
2329
2330 if (is_nd_blk(&nd_region->dev))
2331 dev = create_namespace_blk(nd_region, nd_label, count);
2332 else {
2333 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2334 struct nd_namespace_index *nsindex;
2335
2336 nsindex = to_namespace_index(ndd, ndd->ns_current);
2337 dev = create_namespace_pmem(nd_region, nsindex, nd_label);
2338 }
2339
2340 if (IS_ERR(dev)) {
2341 switch (PTR_ERR(dev)) {
2342 case -EAGAIN:
2343 /* skip invalid labels */
2344 continue;
2345 case -ENODEV:
2346 /* fallthrough to seed creation */
2347 break;
2348 default:
2349 goto err;
2350 }
2351 } else
2352 devs[count++] = dev;
2353
2354 }
2355
2356 dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2357 count, is_nd_blk(&nd_region->dev)
2358 ? "blk" : "pmem", count == 1 ? "" : "s");
2359
2360 if (count == 0) {
2361 /* Publish a zero-sized namespace for userspace to configure. */
2362 nd_mapping_free_labels(nd_mapping);
2363
2364 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2365 if (!devs)
2366 goto err;
2367 if (is_nd_blk(&nd_region->dev)) {
2368 struct nd_namespace_blk *nsblk;
2369
2370 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2371 if (!nsblk)
2372 goto err;
2373 dev = &nsblk->common.dev;
2374 dev->type = &namespace_blk_device_type;
2375 } else {
2376 struct nd_namespace_pmem *nspm;
2377
2378 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2379 if (!nspm)
2380 goto err;
2381 dev = &nspm->nsio.common.dev;
2382 dev->type = &namespace_pmem_device_type;
2383 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2384 }
2385 dev->parent = &nd_region->dev;
2386 devs[count++] = dev;
2387 } else if (is_memory(&nd_region->dev)) {
2388 /* clean unselected labels */
2389 for (i = 0; i < nd_region->ndr_mappings; i++) {
2390 struct list_head *l, *e;
2391 LIST_HEAD(list);
2392 int j;
2393
2394 nd_mapping = &nd_region->mapping[i];
2395 if (list_empty(&nd_mapping->labels)) {
2396 WARN_ON(1);
2397 continue;
2398 }
2399
2400 j = count;
2401 list_for_each_safe(l, e, &nd_mapping->labels) {
2402 if (!j--)
2403 break;
2404 list_move_tail(l, &list);
2405 }
2406 nd_mapping_free_labels(nd_mapping);
2407 list_splice_init(&list, &nd_mapping->labels);
2408 }
2409 }
2410
2411 if (count > 1)
2412 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2413
2414 return devs;
2415
2416 err:
2417 if (devs) {
2418 for (i = 0; devs[i]; i++)
2419 if (is_nd_blk(&nd_region->dev))
2420 namespace_blk_release(devs[i]);
2421 else
2422 namespace_pmem_release(devs[i]);
2423 kfree(devs);
2424 }
2425 return NULL;
2426 }
2427
2428 static struct device **create_namespaces(struct nd_region *nd_region)
2429 {
2430 struct nd_mapping *nd_mapping;
2431 struct device **devs;
2432 int i;
2433
2434 if (nd_region->ndr_mappings == 0)
2435 return NULL;
2436
2437 /* lock down all mappings while we scan labels */
2438 for (i = 0; i < nd_region->ndr_mappings; i++) {
2439 nd_mapping = &nd_region->mapping[i];
2440 mutex_lock_nested(&nd_mapping->lock, i);
2441 }
2442
2443 devs = scan_labels(nd_region);
2444
2445 for (i = 0; i < nd_region->ndr_mappings; i++) {
2446 int reverse = nd_region->ndr_mappings - 1 - i;
2447
2448 nd_mapping = &nd_region->mapping[reverse];
2449 mutex_unlock(&nd_mapping->lock);
2450 }
2451
2452 return devs;
2453 }
2454
2455 static int init_active_labels(struct nd_region *nd_region)
2456 {
2457 int i;
2458
2459 for (i = 0; i < nd_region->ndr_mappings; i++) {
2460 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2461 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2462 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2463 struct nd_label_ent *label_ent;
2464 int count, j;
2465
2466 /*
2467 * If the dimm is disabled then we may need to prevent
2468 * the region from being activated.
2469 */
2470 if (!ndd) {
2471 if (test_bit(NDD_LOCKED, &nvdimm->flags))
2472 /* fail, label data may be unreadable */;
2473 else if (test_bit(NDD_ALIASING, &nvdimm->flags))
2474 /* fail, labels needed to disambiguate dpa */;
2475 else
2476 return 0;
2477
2478 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2479 dev_name(&nd_mapping->nvdimm->dev),
2480 test_bit(NDD_LOCKED, &nvdimm->flags)
2481 ? "locked" : "disabled");
2482 return -ENXIO;
2483 }
2484 nd_mapping->ndd = ndd;
2485 atomic_inc(&nvdimm->busy);
2486 get_ndd(ndd);
2487
2488 count = nd_label_active_count(ndd);
2489 dev_dbg(ndd->dev, "count: %d\n", count);
2490 if (!count)
2491 continue;
2492 for (j = 0; j < count; j++) {
2493 struct nd_namespace_label *label;
2494
2495 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2496 if (!label_ent)
2497 break;
2498 label = nd_label_active(ndd, j);
2499 if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2500 u32 flags = __le32_to_cpu(label->flags);
2501
2502 flags &= ~NSLABEL_FLAG_LOCAL;
2503 label->flags = __cpu_to_le32(flags);
2504 }
2505 label_ent->label = label;
2506
2507 mutex_lock(&nd_mapping->lock);
2508 list_add_tail(&label_ent->list, &nd_mapping->labels);
2509 mutex_unlock(&nd_mapping->lock);
2510 }
2511
2512 if (j >= count)
2513 continue;
2514
2515 mutex_lock(&nd_mapping->lock);
2516 nd_mapping_free_labels(nd_mapping);
2517 mutex_unlock(&nd_mapping->lock);
2518 return -ENOMEM;
2519 }
2520
2521 return 0;
2522 }
2523
2524 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2525 {
2526 struct device **devs = NULL;
2527 int i, rc = 0, type;
2528
2529 *err = 0;
2530 nvdimm_bus_lock(&nd_region->dev);
2531 rc = init_active_labels(nd_region);
2532 if (rc) {
2533 nvdimm_bus_unlock(&nd_region->dev);
2534 return rc;
2535 }
2536
2537 type = nd_region_to_nstype(nd_region);
2538 switch (type) {
2539 case ND_DEVICE_NAMESPACE_IO:
2540 devs = create_namespace_io(nd_region);
2541 break;
2542 case ND_DEVICE_NAMESPACE_PMEM:
2543 case ND_DEVICE_NAMESPACE_BLK:
2544 devs = create_namespaces(nd_region);
2545 break;
2546 default:
2547 break;
2548 }
2549 nvdimm_bus_unlock(&nd_region->dev);
2550
2551 if (!devs)
2552 return -ENODEV;
2553
2554 for (i = 0; devs[i]; i++) {
2555 struct device *dev = devs[i];
2556 int id;
2557
2558 if (type == ND_DEVICE_NAMESPACE_BLK) {
2559 struct nd_namespace_blk *nsblk;
2560
2561 nsblk = to_nd_namespace_blk(dev);
2562 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2563 GFP_KERNEL);
2564 nsblk->id = id;
2565 } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2566 struct nd_namespace_pmem *nspm;
2567
2568 nspm = to_nd_namespace_pmem(dev);
2569 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2570 GFP_KERNEL);
2571 nspm->id = id;
2572 } else
2573 id = i;
2574
2575 if (id < 0)
2576 break;
2577 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2578 dev->groups = nd_namespace_attribute_groups;
2579 nd_device_register(dev);
2580 }
2581 if (i)
2582 nd_region->ns_seed = devs[0];
2583
2584 if (devs[i]) {
2585 int j;
2586
2587 for (j = i; devs[j]; j++) {
2588 struct device *dev = devs[j];
2589
2590 device_initialize(dev);
2591 put_device(dev);
2592 }
2593 *err = j - i;
2594 /*
2595 * All of the namespaces we tried to register failed, so
2596 * fail region activation.
2597 */
2598 if (*err == 0)
2599 rc = -ENODEV;
2600 }
2601 kfree(devs);
2602
2603 if (rc == -ENODEV)
2604 return rc;
2605
2606 return i;
2607 }