]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/nvdimm/namespace_devs.c
Merge tag 'for-linus-2020-01-03' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-hirsute-kernel.git] / drivers / nvdimm / namespace_devs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/sort.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/nd.h>
11 #include "nd-core.h"
12 #include "pmem.h"
13 #include "nd.h"
14
15 static void namespace_io_release(struct device *dev)
16 {
17 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
18
19 kfree(nsio);
20 }
21
22 static void namespace_pmem_release(struct device *dev)
23 {
24 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
25 struct nd_region *nd_region = to_nd_region(dev->parent);
26
27 if (nspm->id >= 0)
28 ida_simple_remove(&nd_region->ns_ida, nspm->id);
29 kfree(nspm->alt_name);
30 kfree(nspm->uuid);
31 kfree(nspm);
32 }
33
34 static void namespace_blk_release(struct device *dev)
35 {
36 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
37 struct nd_region *nd_region = to_nd_region(dev->parent);
38
39 if (nsblk->id >= 0)
40 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
41 kfree(nsblk->alt_name);
42 kfree(nsblk->uuid);
43 kfree(nsblk->res);
44 kfree(nsblk);
45 }
46
47 static bool is_namespace_pmem(const struct device *dev);
48 static bool is_namespace_blk(const struct device *dev);
49 static bool is_namespace_io(const struct device *dev);
50
51 static int is_uuid_busy(struct device *dev, void *data)
52 {
53 u8 *uuid1 = data, *uuid2 = NULL;
54
55 if (is_namespace_pmem(dev)) {
56 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
57
58 uuid2 = nspm->uuid;
59 } else if (is_namespace_blk(dev)) {
60 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
61
62 uuid2 = nsblk->uuid;
63 } else if (is_nd_btt(dev)) {
64 struct nd_btt *nd_btt = to_nd_btt(dev);
65
66 uuid2 = nd_btt->uuid;
67 } else if (is_nd_pfn(dev)) {
68 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
69
70 uuid2 = nd_pfn->uuid;
71 }
72
73 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
74 return -EBUSY;
75
76 return 0;
77 }
78
79 static int is_namespace_uuid_busy(struct device *dev, void *data)
80 {
81 if (is_nd_region(dev))
82 return device_for_each_child(dev, data, is_uuid_busy);
83 return 0;
84 }
85
86 /**
87 * nd_is_uuid_unique - verify that no other namespace has @uuid
88 * @dev: any device on a nvdimm_bus
89 * @uuid: uuid to check
90 */
91 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
92 {
93 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
94
95 if (!nvdimm_bus)
96 return false;
97 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
98 if (device_for_each_child(&nvdimm_bus->dev, uuid,
99 is_namespace_uuid_busy) != 0)
100 return false;
101 return true;
102 }
103
104 bool pmem_should_map_pages(struct device *dev)
105 {
106 struct nd_region *nd_region = to_nd_region(dev->parent);
107 struct nd_namespace_common *ndns = to_ndns(dev);
108 struct nd_namespace_io *nsio;
109
110 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
111 return false;
112
113 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
114 return false;
115
116 if (is_nd_pfn(dev) || is_nd_btt(dev))
117 return false;
118
119 if (ndns->force_raw)
120 return false;
121
122 nsio = to_nd_namespace_io(dev);
123 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
124 IORESOURCE_SYSTEM_RAM,
125 IORES_DESC_NONE) == REGION_MIXED)
126 return false;
127
128 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
129 }
130 EXPORT_SYMBOL(pmem_should_map_pages);
131
132 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
133 {
134 if (is_namespace_pmem(&ndns->dev)) {
135 struct nd_namespace_pmem *nspm;
136
137 nspm = to_nd_namespace_pmem(&ndns->dev);
138 if (nspm->lbasize == 0 || nspm->lbasize == 512)
139 /* default */;
140 else if (nspm->lbasize == 4096)
141 return 4096;
142 else
143 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
144 nspm->lbasize);
145 }
146
147 /*
148 * There is no namespace label (is_namespace_io()), or the label
149 * indicates the default sector size.
150 */
151 return 512;
152 }
153 EXPORT_SYMBOL(pmem_sector_size);
154
155 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
156 char *name)
157 {
158 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
159 const char *suffix = NULL;
160
161 if (ndns->claim && is_nd_btt(ndns->claim))
162 suffix = "s";
163
164 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
165 int nsidx = 0;
166
167 if (is_namespace_pmem(&ndns->dev)) {
168 struct nd_namespace_pmem *nspm;
169
170 nspm = to_nd_namespace_pmem(&ndns->dev);
171 nsidx = nspm->id;
172 }
173
174 if (nsidx)
175 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
176 suffix ? suffix : "");
177 else
178 sprintf(name, "pmem%d%s", nd_region->id,
179 suffix ? suffix : "");
180 } else if (is_namespace_blk(&ndns->dev)) {
181 struct nd_namespace_blk *nsblk;
182
183 nsblk = to_nd_namespace_blk(&ndns->dev);
184 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
185 suffix ? suffix : "");
186 } else {
187 return NULL;
188 }
189
190 return name;
191 }
192 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
193
194 const u8 *nd_dev_to_uuid(struct device *dev)
195 {
196 static const u8 null_uuid[16];
197
198 if (!dev)
199 return null_uuid;
200
201 if (is_namespace_pmem(dev)) {
202 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
203
204 return nspm->uuid;
205 } else if (is_namespace_blk(dev)) {
206 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
207
208 return nsblk->uuid;
209 } else
210 return null_uuid;
211 }
212 EXPORT_SYMBOL(nd_dev_to_uuid);
213
214 static ssize_t nstype_show(struct device *dev,
215 struct device_attribute *attr, char *buf)
216 {
217 struct nd_region *nd_region = to_nd_region(dev->parent);
218
219 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
220 }
221 static DEVICE_ATTR_RO(nstype);
222
223 static ssize_t __alt_name_store(struct device *dev, const char *buf,
224 const size_t len)
225 {
226 char *input, *pos, *alt_name, **ns_altname;
227 ssize_t rc;
228
229 if (is_namespace_pmem(dev)) {
230 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
231
232 ns_altname = &nspm->alt_name;
233 } else if (is_namespace_blk(dev)) {
234 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
235
236 ns_altname = &nsblk->alt_name;
237 } else
238 return -ENXIO;
239
240 if (dev->driver || to_ndns(dev)->claim)
241 return -EBUSY;
242
243 input = kstrndup(buf, len, GFP_KERNEL);
244 if (!input)
245 return -ENOMEM;
246
247 pos = strim(input);
248 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
249 rc = -EINVAL;
250 goto out;
251 }
252
253 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
254 if (!alt_name) {
255 rc = -ENOMEM;
256 goto out;
257 }
258 kfree(*ns_altname);
259 *ns_altname = alt_name;
260 sprintf(*ns_altname, "%s", pos);
261 rc = len;
262
263 out:
264 kfree(input);
265 return rc;
266 }
267
268 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
269 {
270 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
271 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
272 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
273 struct nd_label_id label_id;
274 resource_size_t size = 0;
275 struct resource *res;
276
277 if (!nsblk->uuid)
278 return 0;
279 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
280 for_each_dpa_resource(ndd, res)
281 if (strcmp(res->name, label_id.id) == 0)
282 size += resource_size(res);
283 return size;
284 }
285
286 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
287 {
288 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
289 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
290 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
291 struct nd_label_id label_id;
292 struct resource *res;
293 int count, i;
294
295 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
296 return false;
297
298 count = 0;
299 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
300 for_each_dpa_resource(ndd, res) {
301 if (strcmp(res->name, label_id.id) != 0)
302 continue;
303 /*
304 * Resources with unacknowledged adjustments indicate a
305 * failure to update labels
306 */
307 if (res->flags & DPA_RESOURCE_ADJUSTED)
308 return false;
309 count++;
310 }
311
312 /* These values match after a successful label update */
313 if (count != nsblk->num_resources)
314 return false;
315
316 for (i = 0; i < nsblk->num_resources; i++) {
317 struct resource *found = NULL;
318
319 for_each_dpa_resource(ndd, res)
320 if (res == nsblk->res[i]) {
321 found = res;
322 break;
323 }
324 /* stale resource */
325 if (!found)
326 return false;
327 }
328
329 return true;
330 }
331
332 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
333 {
334 resource_size_t size;
335
336 nvdimm_bus_lock(&nsblk->common.dev);
337 size = __nd_namespace_blk_validate(nsblk);
338 nvdimm_bus_unlock(&nsblk->common.dev);
339
340 return size;
341 }
342 EXPORT_SYMBOL(nd_namespace_blk_validate);
343
344
345 static int nd_namespace_label_update(struct nd_region *nd_region,
346 struct device *dev)
347 {
348 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
349 "namespace must be idle during label update\n");
350 if (dev->driver || to_ndns(dev)->claim)
351 return 0;
352
353 /*
354 * Only allow label writes that will result in a valid namespace
355 * or deletion of an existing namespace.
356 */
357 if (is_namespace_pmem(dev)) {
358 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
359 resource_size_t size = resource_size(&nspm->nsio.res);
360
361 if (size == 0 && nspm->uuid)
362 /* delete allocation */;
363 else if (!nspm->uuid)
364 return 0;
365
366 return nd_pmem_namespace_label_update(nd_region, nspm, size);
367 } else if (is_namespace_blk(dev)) {
368 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
369 resource_size_t size = nd_namespace_blk_size(nsblk);
370
371 if (size == 0 && nsblk->uuid)
372 /* delete allocation */;
373 else if (!nsblk->uuid || !nsblk->lbasize)
374 return 0;
375
376 return nd_blk_namespace_label_update(nd_region, nsblk, size);
377 } else
378 return -ENXIO;
379 }
380
381 static ssize_t alt_name_store(struct device *dev,
382 struct device_attribute *attr, const char *buf, size_t len)
383 {
384 struct nd_region *nd_region = to_nd_region(dev->parent);
385 ssize_t rc;
386
387 nd_device_lock(dev);
388 nvdimm_bus_lock(dev);
389 wait_nvdimm_bus_probe_idle(dev);
390 rc = __alt_name_store(dev, buf, len);
391 if (rc >= 0)
392 rc = nd_namespace_label_update(nd_region, dev);
393 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
394 nvdimm_bus_unlock(dev);
395 nd_device_unlock(dev);
396
397 return rc < 0 ? rc : len;
398 }
399
400 static ssize_t alt_name_show(struct device *dev,
401 struct device_attribute *attr, char *buf)
402 {
403 char *ns_altname;
404
405 if (is_namespace_pmem(dev)) {
406 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
407
408 ns_altname = nspm->alt_name;
409 } else if (is_namespace_blk(dev)) {
410 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
411
412 ns_altname = nsblk->alt_name;
413 } else
414 return -ENXIO;
415
416 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
417 }
418 static DEVICE_ATTR_RW(alt_name);
419
420 static int scan_free(struct nd_region *nd_region,
421 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
422 resource_size_t n)
423 {
424 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
425 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
426 int rc = 0;
427
428 while (n) {
429 struct resource *res, *last;
430 resource_size_t new_start;
431
432 last = NULL;
433 for_each_dpa_resource(ndd, res)
434 if (strcmp(res->name, label_id->id) == 0)
435 last = res;
436 res = last;
437 if (!res)
438 return 0;
439
440 if (n >= resource_size(res)) {
441 n -= resource_size(res);
442 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
443 nvdimm_free_dpa(ndd, res);
444 /* retry with last resource deleted */
445 continue;
446 }
447
448 /*
449 * Keep BLK allocations relegated to high DPA as much as
450 * possible
451 */
452 if (is_blk)
453 new_start = res->start + n;
454 else
455 new_start = res->start;
456
457 rc = adjust_resource(res, new_start, resource_size(res) - n);
458 if (rc == 0)
459 res->flags |= DPA_RESOURCE_ADJUSTED;
460 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
461 break;
462 }
463
464 return rc;
465 }
466
467 /**
468 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
469 * @nd_region: the set of dimms to reclaim @n bytes from
470 * @label_id: unique identifier for the namespace consuming this dpa range
471 * @n: number of bytes per-dimm to release
472 *
473 * Assumes resources are ordered. Starting from the end try to
474 * adjust_resource() the allocation to @n, but if @n is larger than the
475 * allocation delete it and find the 'new' last allocation in the label
476 * set.
477 */
478 static int shrink_dpa_allocation(struct nd_region *nd_region,
479 struct nd_label_id *label_id, resource_size_t n)
480 {
481 int i;
482
483 for (i = 0; i < nd_region->ndr_mappings; i++) {
484 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
485 int rc;
486
487 rc = scan_free(nd_region, nd_mapping, label_id, n);
488 if (rc)
489 return rc;
490 }
491
492 return 0;
493 }
494
495 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
496 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
497 resource_size_t n)
498 {
499 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
500 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
501 resource_size_t first_dpa;
502 struct resource *res;
503 int rc = 0;
504
505 /* allocate blk from highest dpa first */
506 if (is_blk)
507 first_dpa = nd_mapping->start + nd_mapping->size - n;
508 else
509 first_dpa = nd_mapping->start;
510
511 /* first resource allocation for this label-id or dimm */
512 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
513 if (!res)
514 rc = -EBUSY;
515
516 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
517 return rc ? n : 0;
518 }
519
520
521 /**
522 * space_valid() - validate free dpa space against constraints
523 * @nd_region: hosting region of the free space
524 * @ndd: dimm device data for debug
525 * @label_id: namespace id to allocate space
526 * @prev: potential allocation that precedes free space
527 * @next: allocation that follows the given free space range
528 * @exist: first allocation with same id in the mapping
529 * @n: range that must satisfied for pmem allocations
530 * @valid: free space range to validate
531 *
532 * BLK-space is valid as long as it does not precede a PMEM
533 * allocation in a given region. PMEM-space must be contiguous
534 * and adjacent to an existing existing allocation (if one
535 * exists). If reserving PMEM any space is valid.
536 */
537 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
538 struct nd_label_id *label_id, struct resource *prev,
539 struct resource *next, struct resource *exist,
540 resource_size_t n, struct resource *valid)
541 {
542 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
543 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
544
545 if (valid->start >= valid->end)
546 goto invalid;
547
548 if (is_reserve)
549 return;
550
551 if (!is_pmem) {
552 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
553 struct nvdimm_bus *nvdimm_bus;
554 struct blk_alloc_info info = {
555 .nd_mapping = nd_mapping,
556 .available = nd_mapping->size,
557 .res = valid,
558 };
559
560 WARN_ON(!is_nd_blk(&nd_region->dev));
561 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
562 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
563 return;
564 }
565
566 /* allocation needs to be contiguous, so this is all or nothing */
567 if (resource_size(valid) < n)
568 goto invalid;
569
570 /* we've got all the space we need and no existing allocation */
571 if (!exist)
572 return;
573
574 /* allocation needs to be contiguous with the existing namespace */
575 if (valid->start == exist->end + 1
576 || valid->end == exist->start - 1)
577 return;
578
579 invalid:
580 /* truncate @valid size to 0 */
581 valid->end = valid->start - 1;
582 }
583
584 enum alloc_loc {
585 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
586 };
587
588 static resource_size_t scan_allocate(struct nd_region *nd_region,
589 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
590 resource_size_t n)
591 {
592 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
593 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
594 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
595 struct resource *res, *exist = NULL, valid;
596 const resource_size_t to_allocate = n;
597 int first;
598
599 for_each_dpa_resource(ndd, res)
600 if (strcmp(label_id->id, res->name) == 0)
601 exist = res;
602
603 valid.start = nd_mapping->start;
604 valid.end = mapping_end;
605 valid.name = "free space";
606 retry:
607 first = 0;
608 for_each_dpa_resource(ndd, res) {
609 struct resource *next = res->sibling, *new_res = NULL;
610 resource_size_t allocate, available = 0;
611 enum alloc_loc loc = ALLOC_ERR;
612 const char *action;
613 int rc = 0;
614
615 /* ignore resources outside this nd_mapping */
616 if (res->start > mapping_end)
617 continue;
618 if (res->end < nd_mapping->start)
619 continue;
620
621 /* space at the beginning of the mapping */
622 if (!first++ && res->start > nd_mapping->start) {
623 valid.start = nd_mapping->start;
624 valid.end = res->start - 1;
625 space_valid(nd_region, ndd, label_id, NULL, next, exist,
626 to_allocate, &valid);
627 available = resource_size(&valid);
628 if (available)
629 loc = ALLOC_BEFORE;
630 }
631
632 /* space between allocations */
633 if (!loc && next) {
634 valid.start = res->start + resource_size(res);
635 valid.end = min(mapping_end, next->start - 1);
636 space_valid(nd_region, ndd, label_id, res, next, exist,
637 to_allocate, &valid);
638 available = resource_size(&valid);
639 if (available)
640 loc = ALLOC_MID;
641 }
642
643 /* space at the end of the mapping */
644 if (!loc && !next) {
645 valid.start = res->start + resource_size(res);
646 valid.end = mapping_end;
647 space_valid(nd_region, ndd, label_id, res, next, exist,
648 to_allocate, &valid);
649 available = resource_size(&valid);
650 if (available)
651 loc = ALLOC_AFTER;
652 }
653
654 if (!loc || !available)
655 continue;
656 allocate = min(available, n);
657 switch (loc) {
658 case ALLOC_BEFORE:
659 if (strcmp(res->name, label_id->id) == 0) {
660 /* adjust current resource up */
661 rc = adjust_resource(res, res->start - allocate,
662 resource_size(res) + allocate);
663 action = "cur grow up";
664 } else
665 action = "allocate";
666 break;
667 case ALLOC_MID:
668 if (strcmp(next->name, label_id->id) == 0) {
669 /* adjust next resource up */
670 rc = adjust_resource(next, next->start
671 - allocate, resource_size(next)
672 + allocate);
673 new_res = next;
674 action = "next grow up";
675 } else if (strcmp(res->name, label_id->id) == 0) {
676 action = "grow down";
677 } else
678 action = "allocate";
679 break;
680 case ALLOC_AFTER:
681 if (strcmp(res->name, label_id->id) == 0)
682 action = "grow down";
683 else
684 action = "allocate";
685 break;
686 default:
687 return n;
688 }
689
690 if (strcmp(action, "allocate") == 0) {
691 /* BLK allocate bottom up */
692 if (!is_pmem)
693 valid.start += available - allocate;
694
695 new_res = nvdimm_allocate_dpa(ndd, label_id,
696 valid.start, allocate);
697 if (!new_res)
698 rc = -EBUSY;
699 } else if (strcmp(action, "grow down") == 0) {
700 /* adjust current resource down */
701 rc = adjust_resource(res, res->start, resource_size(res)
702 + allocate);
703 if (rc == 0)
704 res->flags |= DPA_RESOURCE_ADJUSTED;
705 }
706
707 if (!new_res)
708 new_res = res;
709
710 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
711 action, loc, rc);
712
713 if (rc)
714 return n;
715
716 n -= allocate;
717 if (n) {
718 /*
719 * Retry scan with newly inserted resources.
720 * For example, if we did an ALLOC_BEFORE
721 * insertion there may also have been space
722 * available for an ALLOC_AFTER insertion, so we
723 * need to check this same resource again
724 */
725 goto retry;
726 } else
727 return 0;
728 }
729
730 /*
731 * If we allocated nothing in the BLK case it may be because we are in
732 * an initial "pmem-reserve pass". Only do an initial BLK allocation
733 * when none of the DPA space is reserved.
734 */
735 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
736 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
737 return n;
738 }
739
740 static int merge_dpa(struct nd_region *nd_region,
741 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
742 {
743 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
744 struct resource *res;
745
746 if (strncmp("pmem", label_id->id, 4) == 0)
747 return 0;
748 retry:
749 for_each_dpa_resource(ndd, res) {
750 int rc;
751 struct resource *next = res->sibling;
752 resource_size_t end = res->start + resource_size(res);
753
754 if (!next || strcmp(res->name, label_id->id) != 0
755 || strcmp(next->name, label_id->id) != 0
756 || end != next->start)
757 continue;
758 end += resource_size(next);
759 nvdimm_free_dpa(ndd, next);
760 rc = adjust_resource(res, res->start, end - res->start);
761 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
762 if (rc)
763 return rc;
764 res->flags |= DPA_RESOURCE_ADJUSTED;
765 goto retry;
766 }
767
768 return 0;
769 }
770
771 int __reserve_free_pmem(struct device *dev, void *data)
772 {
773 struct nvdimm *nvdimm = data;
774 struct nd_region *nd_region;
775 struct nd_label_id label_id;
776 int i;
777
778 if (!is_memory(dev))
779 return 0;
780
781 nd_region = to_nd_region(dev);
782 if (nd_region->ndr_mappings == 0)
783 return 0;
784
785 memset(&label_id, 0, sizeof(label_id));
786 strcat(label_id.id, "pmem-reserve");
787 for (i = 0; i < nd_region->ndr_mappings; i++) {
788 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
789 resource_size_t n, rem = 0;
790
791 if (nd_mapping->nvdimm != nvdimm)
792 continue;
793
794 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
795 if (n == 0)
796 return 0;
797 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
798 dev_WARN_ONCE(&nd_region->dev, rem,
799 "pmem reserve underrun: %#llx of %#llx bytes\n",
800 (unsigned long long) n - rem,
801 (unsigned long long) n);
802 return rem ? -ENXIO : 0;
803 }
804
805 return 0;
806 }
807
808 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
809 struct nd_mapping *nd_mapping)
810 {
811 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
812 struct resource *res, *_res;
813
814 for_each_dpa_resource_safe(ndd, res, _res)
815 if (strcmp(res->name, "pmem-reserve") == 0)
816 nvdimm_free_dpa(ndd, res);
817 }
818
819 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
820 struct nd_mapping *nd_mapping)
821 {
822 struct nvdimm *nvdimm = nd_mapping->nvdimm;
823 int rc;
824
825 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
826 __reserve_free_pmem);
827 if (rc)
828 release_free_pmem(nvdimm_bus, nd_mapping);
829 return rc;
830 }
831
832 /**
833 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
834 * @nd_region: the set of dimms to allocate @n more bytes from
835 * @label_id: unique identifier for the namespace consuming this dpa range
836 * @n: number of bytes per-dimm to add to the existing allocation
837 *
838 * Assumes resources are ordered. For BLK regions, first consume
839 * BLK-only available DPA free space, then consume PMEM-aliased DPA
840 * space starting at the highest DPA. For PMEM regions start
841 * allocations from the start of an interleave set and end at the first
842 * BLK allocation or the end of the interleave set, whichever comes
843 * first.
844 */
845 static int grow_dpa_allocation(struct nd_region *nd_region,
846 struct nd_label_id *label_id, resource_size_t n)
847 {
848 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
849 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
850 int i;
851
852 for (i = 0; i < nd_region->ndr_mappings; i++) {
853 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
854 resource_size_t rem = n;
855 int rc, j;
856
857 /*
858 * In the BLK case try once with all unallocated PMEM
859 * reserved, and once without
860 */
861 for (j = is_pmem; j < 2; j++) {
862 bool blk_only = j == 0;
863
864 if (blk_only) {
865 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
866 if (rc)
867 return rc;
868 }
869 rem = scan_allocate(nd_region, nd_mapping,
870 label_id, rem);
871 if (blk_only)
872 release_free_pmem(nvdimm_bus, nd_mapping);
873
874 /* try again and allow encroachments into PMEM */
875 if (rem == 0)
876 break;
877 }
878
879 dev_WARN_ONCE(&nd_region->dev, rem,
880 "allocation underrun: %#llx of %#llx bytes\n",
881 (unsigned long long) n - rem,
882 (unsigned long long) n);
883 if (rem)
884 return -ENXIO;
885
886 rc = merge_dpa(nd_region, nd_mapping, label_id);
887 if (rc)
888 return rc;
889 }
890
891 return 0;
892 }
893
894 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
895 struct nd_namespace_pmem *nspm, resource_size_t size)
896 {
897 struct resource *res = &nspm->nsio.res;
898 resource_size_t offset = 0;
899
900 if (size && !nspm->uuid) {
901 WARN_ON_ONCE(1);
902 size = 0;
903 }
904
905 if (size && nspm->uuid) {
906 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
907 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
908 struct nd_label_id label_id;
909 struct resource *res;
910
911 if (!ndd) {
912 size = 0;
913 goto out;
914 }
915
916 nd_label_gen_id(&label_id, nspm->uuid, 0);
917
918 /* calculate a spa offset from the dpa allocation offset */
919 for_each_dpa_resource(ndd, res)
920 if (strcmp(res->name, label_id.id) == 0) {
921 offset = (res->start - nd_mapping->start)
922 * nd_region->ndr_mappings;
923 goto out;
924 }
925
926 WARN_ON_ONCE(1);
927 size = 0;
928 }
929
930 out:
931 res->start = nd_region->ndr_start + offset;
932 res->end = res->start + size - 1;
933 }
934
935 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
936 {
937 if (!uuid) {
938 dev_dbg(dev, "%s: uuid not set\n", where);
939 return true;
940 }
941 return false;
942 }
943
944 static ssize_t __size_store(struct device *dev, unsigned long long val)
945 {
946 resource_size_t allocated = 0, available = 0;
947 struct nd_region *nd_region = to_nd_region(dev->parent);
948 struct nd_namespace_common *ndns = to_ndns(dev);
949 struct nd_mapping *nd_mapping;
950 struct nvdimm_drvdata *ndd;
951 struct nd_label_id label_id;
952 u32 flags = 0, remainder;
953 int rc, i, id = -1;
954 u8 *uuid = NULL;
955
956 if (dev->driver || ndns->claim)
957 return -EBUSY;
958
959 if (is_namespace_pmem(dev)) {
960 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
961
962 uuid = nspm->uuid;
963 id = nspm->id;
964 } else if (is_namespace_blk(dev)) {
965 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
966
967 uuid = nsblk->uuid;
968 flags = NSLABEL_FLAG_LOCAL;
969 id = nsblk->id;
970 }
971
972 /*
973 * We need a uuid for the allocation-label and dimm(s) on which
974 * to store the label.
975 */
976 if (uuid_not_set(uuid, dev, __func__))
977 return -ENXIO;
978 if (nd_region->ndr_mappings == 0) {
979 dev_dbg(dev, "not associated with dimm(s)\n");
980 return -ENXIO;
981 }
982
983 div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
984 if (remainder) {
985 dev_dbg(dev, "%llu is not %ldK aligned\n", val,
986 (PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
987 return -EINVAL;
988 }
989
990 nd_label_gen_id(&label_id, uuid, flags);
991 for (i = 0; i < nd_region->ndr_mappings; i++) {
992 nd_mapping = &nd_region->mapping[i];
993 ndd = to_ndd(nd_mapping);
994
995 /*
996 * All dimms in an interleave set, or the base dimm for a blk
997 * region, need to be enabled for the size to be changed.
998 */
999 if (!ndd)
1000 return -ENXIO;
1001
1002 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1003 }
1004 available = nd_region_allocatable_dpa(nd_region);
1005
1006 if (val > available + allocated)
1007 return -ENOSPC;
1008
1009 if (val == allocated)
1010 return 0;
1011
1012 val = div_u64(val, nd_region->ndr_mappings);
1013 allocated = div_u64(allocated, nd_region->ndr_mappings);
1014 if (val < allocated)
1015 rc = shrink_dpa_allocation(nd_region, &label_id,
1016 allocated - val);
1017 else
1018 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1019
1020 if (rc)
1021 return rc;
1022
1023 if (is_namespace_pmem(dev)) {
1024 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1025
1026 nd_namespace_pmem_set_resource(nd_region, nspm,
1027 val * nd_region->ndr_mappings);
1028 }
1029
1030 /*
1031 * Try to delete the namespace if we deleted all of its
1032 * allocation, this is not the seed or 0th device for the
1033 * region, and it is not actively claimed by a btt, pfn, or dax
1034 * instance.
1035 */
1036 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1037 nd_device_unregister(dev, ND_ASYNC);
1038
1039 return rc;
1040 }
1041
1042 static ssize_t size_store(struct device *dev,
1043 struct device_attribute *attr, const char *buf, size_t len)
1044 {
1045 struct nd_region *nd_region = to_nd_region(dev->parent);
1046 unsigned long long val;
1047 u8 **uuid = NULL;
1048 int rc;
1049
1050 rc = kstrtoull(buf, 0, &val);
1051 if (rc)
1052 return rc;
1053
1054 nd_device_lock(dev);
1055 nvdimm_bus_lock(dev);
1056 wait_nvdimm_bus_probe_idle(dev);
1057 rc = __size_store(dev, val);
1058 if (rc >= 0)
1059 rc = nd_namespace_label_update(nd_region, dev);
1060
1061 if (is_namespace_pmem(dev)) {
1062 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1063
1064 uuid = &nspm->uuid;
1065 } else if (is_namespace_blk(dev)) {
1066 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1067
1068 uuid = &nsblk->uuid;
1069 }
1070
1071 if (rc == 0 && val == 0 && uuid) {
1072 /* setting size zero == 'delete namespace' */
1073 kfree(*uuid);
1074 *uuid = NULL;
1075 }
1076
1077 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1078
1079 nvdimm_bus_unlock(dev);
1080 nd_device_unlock(dev);
1081
1082 return rc < 0 ? rc : len;
1083 }
1084
1085 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1086 {
1087 struct device *dev = &ndns->dev;
1088
1089 if (is_namespace_pmem(dev)) {
1090 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1091
1092 return resource_size(&nspm->nsio.res);
1093 } else if (is_namespace_blk(dev)) {
1094 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1095 } else if (is_namespace_io(dev)) {
1096 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1097
1098 return resource_size(&nsio->res);
1099 } else
1100 WARN_ONCE(1, "unknown namespace type\n");
1101 return 0;
1102 }
1103
1104 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1105 {
1106 resource_size_t size;
1107
1108 nvdimm_bus_lock(&ndns->dev);
1109 size = __nvdimm_namespace_capacity(ndns);
1110 nvdimm_bus_unlock(&ndns->dev);
1111
1112 return size;
1113 }
1114 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1115
1116 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1117 {
1118 int i;
1119 bool locked = false;
1120 struct device *dev = &ndns->dev;
1121 struct nd_region *nd_region = to_nd_region(dev->parent);
1122
1123 for (i = 0; i < nd_region->ndr_mappings; i++) {
1124 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1125 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1126
1127 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1128 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1129 locked = true;
1130 }
1131 }
1132 return locked;
1133 }
1134 EXPORT_SYMBOL(nvdimm_namespace_locked);
1135
1136 static ssize_t size_show(struct device *dev,
1137 struct device_attribute *attr, char *buf)
1138 {
1139 return sprintf(buf, "%llu\n", (unsigned long long)
1140 nvdimm_namespace_capacity(to_ndns(dev)));
1141 }
1142 static DEVICE_ATTR(size, 0444, size_show, size_store);
1143
1144 static u8 *namespace_to_uuid(struct device *dev)
1145 {
1146 if (is_namespace_pmem(dev)) {
1147 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1148
1149 return nspm->uuid;
1150 } else if (is_namespace_blk(dev)) {
1151 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1152
1153 return nsblk->uuid;
1154 } else
1155 return ERR_PTR(-ENXIO);
1156 }
1157
1158 static ssize_t uuid_show(struct device *dev,
1159 struct device_attribute *attr, char *buf)
1160 {
1161 u8 *uuid = namespace_to_uuid(dev);
1162
1163 if (IS_ERR(uuid))
1164 return PTR_ERR(uuid);
1165 if (uuid)
1166 return sprintf(buf, "%pUb\n", uuid);
1167 return sprintf(buf, "\n");
1168 }
1169
1170 /**
1171 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1172 * @nd_region: parent region so we can updates all dimms in the set
1173 * @dev: namespace type for generating label_id
1174 * @new_uuid: incoming uuid
1175 * @old_uuid: reference to the uuid storage location in the namespace object
1176 */
1177 static int namespace_update_uuid(struct nd_region *nd_region,
1178 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1179 {
1180 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1181 struct nd_label_id old_label_id;
1182 struct nd_label_id new_label_id;
1183 int i;
1184
1185 if (!nd_is_uuid_unique(dev, new_uuid))
1186 return -EINVAL;
1187
1188 if (*old_uuid == NULL)
1189 goto out;
1190
1191 /*
1192 * If we've already written a label with this uuid, then it's
1193 * too late to rename because we can't reliably update the uuid
1194 * without losing the old namespace. Userspace must delete this
1195 * namespace to abandon the old uuid.
1196 */
1197 for (i = 0; i < nd_region->ndr_mappings; i++) {
1198 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1199
1200 /*
1201 * This check by itself is sufficient because old_uuid
1202 * would be NULL above if this uuid did not exist in the
1203 * currently written set.
1204 *
1205 * FIXME: can we delete uuid with zero dpa allocated?
1206 */
1207 if (list_empty(&nd_mapping->labels))
1208 return -EBUSY;
1209 }
1210
1211 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1212 nd_label_gen_id(&new_label_id, new_uuid, flags);
1213 for (i = 0; i < nd_region->ndr_mappings; i++) {
1214 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1215 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1216 struct nd_label_ent *label_ent;
1217 struct resource *res;
1218
1219 for_each_dpa_resource(ndd, res)
1220 if (strcmp(res->name, old_label_id.id) == 0)
1221 sprintf((void *) res->name, "%s",
1222 new_label_id.id);
1223
1224 mutex_lock(&nd_mapping->lock);
1225 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1226 struct nd_namespace_label *nd_label = label_ent->label;
1227 struct nd_label_id label_id;
1228
1229 if (!nd_label)
1230 continue;
1231 nd_label_gen_id(&label_id, nd_label->uuid,
1232 __le32_to_cpu(nd_label->flags));
1233 if (strcmp(old_label_id.id, label_id.id) == 0)
1234 set_bit(ND_LABEL_REAP, &label_ent->flags);
1235 }
1236 mutex_unlock(&nd_mapping->lock);
1237 }
1238 kfree(*old_uuid);
1239 out:
1240 *old_uuid = new_uuid;
1241 return 0;
1242 }
1243
1244 static ssize_t uuid_store(struct device *dev,
1245 struct device_attribute *attr, const char *buf, size_t len)
1246 {
1247 struct nd_region *nd_region = to_nd_region(dev->parent);
1248 u8 *uuid = NULL;
1249 ssize_t rc = 0;
1250 u8 **ns_uuid;
1251
1252 if (is_namespace_pmem(dev)) {
1253 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1254
1255 ns_uuid = &nspm->uuid;
1256 } else if (is_namespace_blk(dev)) {
1257 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1258
1259 ns_uuid = &nsblk->uuid;
1260 } else
1261 return -ENXIO;
1262
1263 nd_device_lock(dev);
1264 nvdimm_bus_lock(dev);
1265 wait_nvdimm_bus_probe_idle(dev);
1266 if (to_ndns(dev)->claim)
1267 rc = -EBUSY;
1268 if (rc >= 0)
1269 rc = nd_uuid_store(dev, &uuid, buf, len);
1270 if (rc >= 0)
1271 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1272 if (rc >= 0)
1273 rc = nd_namespace_label_update(nd_region, dev);
1274 else
1275 kfree(uuid);
1276 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1277 buf[len - 1] == '\n' ? "" : "\n");
1278 nvdimm_bus_unlock(dev);
1279 nd_device_unlock(dev);
1280
1281 return rc < 0 ? rc : len;
1282 }
1283 static DEVICE_ATTR_RW(uuid);
1284
1285 static ssize_t resource_show(struct device *dev,
1286 struct device_attribute *attr, char *buf)
1287 {
1288 struct resource *res;
1289
1290 if (is_namespace_pmem(dev)) {
1291 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1292
1293 res = &nspm->nsio.res;
1294 } else if (is_namespace_io(dev)) {
1295 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1296
1297 res = &nsio->res;
1298 } else
1299 return -ENXIO;
1300
1301 /* no address to convey if the namespace has no allocation */
1302 if (resource_size(res) == 0)
1303 return -ENXIO;
1304 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1305 }
1306 static DEVICE_ATTR(resource, 0400, resource_show, NULL);
1307
1308 static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
1309 4096, 4104, 4160, 4224, 0 };
1310
1311 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1312
1313 static ssize_t sector_size_show(struct device *dev,
1314 struct device_attribute *attr, char *buf)
1315 {
1316 if (is_namespace_blk(dev)) {
1317 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1318
1319 return nd_size_select_show(nsblk->lbasize,
1320 blk_lbasize_supported, buf);
1321 }
1322
1323 if (is_namespace_pmem(dev)) {
1324 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1325
1326 return nd_size_select_show(nspm->lbasize,
1327 pmem_lbasize_supported, buf);
1328 }
1329 return -ENXIO;
1330 }
1331
1332 static ssize_t sector_size_store(struct device *dev,
1333 struct device_attribute *attr, const char *buf, size_t len)
1334 {
1335 struct nd_region *nd_region = to_nd_region(dev->parent);
1336 const unsigned long *supported;
1337 unsigned long *lbasize;
1338 ssize_t rc = 0;
1339
1340 if (is_namespace_blk(dev)) {
1341 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1342
1343 lbasize = &nsblk->lbasize;
1344 supported = blk_lbasize_supported;
1345 } else if (is_namespace_pmem(dev)) {
1346 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1347
1348 lbasize = &nspm->lbasize;
1349 supported = pmem_lbasize_supported;
1350 } else
1351 return -ENXIO;
1352
1353 nd_device_lock(dev);
1354 nvdimm_bus_lock(dev);
1355 if (to_ndns(dev)->claim)
1356 rc = -EBUSY;
1357 if (rc >= 0)
1358 rc = nd_size_select_store(dev, buf, lbasize, supported);
1359 if (rc >= 0)
1360 rc = nd_namespace_label_update(nd_region, dev);
1361 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1362 buf, buf[len - 1] == '\n' ? "" : "\n");
1363 nvdimm_bus_unlock(dev);
1364 nd_device_unlock(dev);
1365
1366 return rc ? rc : len;
1367 }
1368 static DEVICE_ATTR_RW(sector_size);
1369
1370 static ssize_t dpa_extents_show(struct device *dev,
1371 struct device_attribute *attr, char *buf)
1372 {
1373 struct nd_region *nd_region = to_nd_region(dev->parent);
1374 struct nd_label_id label_id;
1375 int count = 0, i;
1376 u8 *uuid = NULL;
1377 u32 flags = 0;
1378
1379 nvdimm_bus_lock(dev);
1380 if (is_namespace_pmem(dev)) {
1381 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1382
1383 uuid = nspm->uuid;
1384 flags = 0;
1385 } else if (is_namespace_blk(dev)) {
1386 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1387
1388 uuid = nsblk->uuid;
1389 flags = NSLABEL_FLAG_LOCAL;
1390 }
1391
1392 if (!uuid)
1393 goto out;
1394
1395 nd_label_gen_id(&label_id, uuid, flags);
1396 for (i = 0; i < nd_region->ndr_mappings; i++) {
1397 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1398 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1399 struct resource *res;
1400
1401 for_each_dpa_resource(ndd, res)
1402 if (strcmp(res->name, label_id.id) == 0)
1403 count++;
1404 }
1405 out:
1406 nvdimm_bus_unlock(dev);
1407
1408 return sprintf(buf, "%d\n", count);
1409 }
1410 static DEVICE_ATTR_RO(dpa_extents);
1411
1412 static int btt_claim_class(struct device *dev)
1413 {
1414 struct nd_region *nd_region = to_nd_region(dev->parent);
1415 int i, loop_bitmask = 0;
1416
1417 for (i = 0; i < nd_region->ndr_mappings; i++) {
1418 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1419 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1420 struct nd_namespace_index *nsindex;
1421
1422 /*
1423 * If any of the DIMMs do not support labels the only
1424 * possible BTT format is v1.
1425 */
1426 if (!ndd) {
1427 loop_bitmask = 0;
1428 break;
1429 }
1430
1431 nsindex = to_namespace_index(ndd, ndd->ns_current);
1432 if (nsindex == NULL)
1433 loop_bitmask |= 1;
1434 else {
1435 /* check whether existing labels are v1.1 or v1.2 */
1436 if (__le16_to_cpu(nsindex->major) == 1
1437 && __le16_to_cpu(nsindex->minor) == 1)
1438 loop_bitmask |= 2;
1439 else
1440 loop_bitmask |= 4;
1441 }
1442 }
1443 /*
1444 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1445 * block is found, a v1.1 label for any mapping will set bit 1, and a
1446 * v1.2 label will set bit 2.
1447 *
1448 * At the end of the loop, at most one of the three bits must be set.
1449 * If multiple bits were set, it means the different mappings disagree
1450 * about their labels, and this must be cleaned up first.
1451 *
1452 * If all the label index blocks are found to agree, nsindex of NULL
1453 * implies labels haven't been initialized yet, and when they will,
1454 * they will be of the 1.2 format, so we can assume BTT2.0
1455 *
1456 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1457 * found, we enforce BTT2.0
1458 *
1459 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1460 */
1461 switch (loop_bitmask) {
1462 case 0:
1463 case 2:
1464 return NVDIMM_CCLASS_BTT;
1465 case 1:
1466 case 4:
1467 return NVDIMM_CCLASS_BTT2;
1468 default:
1469 return -ENXIO;
1470 }
1471 }
1472
1473 static ssize_t holder_show(struct device *dev,
1474 struct device_attribute *attr, char *buf)
1475 {
1476 struct nd_namespace_common *ndns = to_ndns(dev);
1477 ssize_t rc;
1478
1479 nd_device_lock(dev);
1480 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1481 nd_device_unlock(dev);
1482
1483 return rc;
1484 }
1485 static DEVICE_ATTR_RO(holder);
1486
1487 static int __holder_class_store(struct device *dev, const char *buf)
1488 {
1489 struct nd_namespace_common *ndns = to_ndns(dev);
1490
1491 if (dev->driver || ndns->claim)
1492 return -EBUSY;
1493
1494 if (sysfs_streq(buf, "btt")) {
1495 int rc = btt_claim_class(dev);
1496
1497 if (rc < NVDIMM_CCLASS_NONE)
1498 return rc;
1499 ndns->claim_class = rc;
1500 } else if (sysfs_streq(buf, "pfn"))
1501 ndns->claim_class = NVDIMM_CCLASS_PFN;
1502 else if (sysfs_streq(buf, "dax"))
1503 ndns->claim_class = NVDIMM_CCLASS_DAX;
1504 else if (sysfs_streq(buf, ""))
1505 ndns->claim_class = NVDIMM_CCLASS_NONE;
1506 else
1507 return -EINVAL;
1508
1509 return 0;
1510 }
1511
1512 static ssize_t holder_class_store(struct device *dev,
1513 struct device_attribute *attr, const char *buf, size_t len)
1514 {
1515 struct nd_region *nd_region = to_nd_region(dev->parent);
1516 int rc;
1517
1518 nd_device_lock(dev);
1519 nvdimm_bus_lock(dev);
1520 wait_nvdimm_bus_probe_idle(dev);
1521 rc = __holder_class_store(dev, buf);
1522 if (rc >= 0)
1523 rc = nd_namespace_label_update(nd_region, dev);
1524 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1525 nvdimm_bus_unlock(dev);
1526 nd_device_unlock(dev);
1527
1528 return rc < 0 ? rc : len;
1529 }
1530
1531 static ssize_t holder_class_show(struct device *dev,
1532 struct device_attribute *attr, char *buf)
1533 {
1534 struct nd_namespace_common *ndns = to_ndns(dev);
1535 ssize_t rc;
1536
1537 nd_device_lock(dev);
1538 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1539 rc = sprintf(buf, "\n");
1540 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1541 (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1542 rc = sprintf(buf, "btt\n");
1543 else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1544 rc = sprintf(buf, "pfn\n");
1545 else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1546 rc = sprintf(buf, "dax\n");
1547 else
1548 rc = sprintf(buf, "<unknown>\n");
1549 nd_device_unlock(dev);
1550
1551 return rc;
1552 }
1553 static DEVICE_ATTR_RW(holder_class);
1554
1555 static ssize_t mode_show(struct device *dev,
1556 struct device_attribute *attr, char *buf)
1557 {
1558 struct nd_namespace_common *ndns = to_ndns(dev);
1559 struct device *claim;
1560 char *mode;
1561 ssize_t rc;
1562
1563 nd_device_lock(dev);
1564 claim = ndns->claim;
1565 if (claim && is_nd_btt(claim))
1566 mode = "safe";
1567 else if (claim && is_nd_pfn(claim))
1568 mode = "memory";
1569 else if (claim && is_nd_dax(claim))
1570 mode = "dax";
1571 else if (!claim && pmem_should_map_pages(dev))
1572 mode = "memory";
1573 else
1574 mode = "raw";
1575 rc = sprintf(buf, "%s\n", mode);
1576 nd_device_unlock(dev);
1577
1578 return rc;
1579 }
1580 static DEVICE_ATTR_RO(mode);
1581
1582 static ssize_t force_raw_store(struct device *dev,
1583 struct device_attribute *attr, const char *buf, size_t len)
1584 {
1585 bool force_raw;
1586 int rc = strtobool(buf, &force_raw);
1587
1588 if (rc)
1589 return rc;
1590
1591 to_ndns(dev)->force_raw = force_raw;
1592 return len;
1593 }
1594
1595 static ssize_t force_raw_show(struct device *dev,
1596 struct device_attribute *attr, char *buf)
1597 {
1598 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1599 }
1600 static DEVICE_ATTR_RW(force_raw);
1601
1602 static struct attribute *nd_namespace_attributes[] = {
1603 &dev_attr_nstype.attr,
1604 &dev_attr_size.attr,
1605 &dev_attr_mode.attr,
1606 &dev_attr_uuid.attr,
1607 &dev_attr_holder.attr,
1608 &dev_attr_resource.attr,
1609 &dev_attr_alt_name.attr,
1610 &dev_attr_force_raw.attr,
1611 &dev_attr_sector_size.attr,
1612 &dev_attr_dpa_extents.attr,
1613 &dev_attr_holder_class.attr,
1614 NULL,
1615 };
1616
1617 static umode_t namespace_visible(struct kobject *kobj,
1618 struct attribute *a, int n)
1619 {
1620 struct device *dev = container_of(kobj, struct device, kobj);
1621
1622 if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
1623 return 0;
1624
1625 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1626 if (a == &dev_attr_size.attr)
1627 return 0644;
1628
1629 return a->mode;
1630 }
1631
1632 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1633 || a == &dev_attr_holder.attr
1634 || a == &dev_attr_holder_class.attr
1635 || a == &dev_attr_force_raw.attr
1636 || a == &dev_attr_mode.attr)
1637 return a->mode;
1638
1639 return 0;
1640 }
1641
1642 static struct attribute_group nd_namespace_attribute_group = {
1643 .attrs = nd_namespace_attributes,
1644 .is_visible = namespace_visible,
1645 };
1646
1647 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1648 &nd_device_attribute_group,
1649 &nd_namespace_attribute_group,
1650 &nd_numa_attribute_group,
1651 NULL,
1652 };
1653
1654 static const struct device_type namespace_io_device_type = {
1655 .name = "nd_namespace_io",
1656 .release = namespace_io_release,
1657 .groups = nd_namespace_attribute_groups,
1658 };
1659
1660 static const struct device_type namespace_pmem_device_type = {
1661 .name = "nd_namespace_pmem",
1662 .release = namespace_pmem_release,
1663 .groups = nd_namespace_attribute_groups,
1664 };
1665
1666 static const struct device_type namespace_blk_device_type = {
1667 .name = "nd_namespace_blk",
1668 .release = namespace_blk_release,
1669 .groups = nd_namespace_attribute_groups,
1670 };
1671
1672 static bool is_namespace_pmem(const struct device *dev)
1673 {
1674 return dev ? dev->type == &namespace_pmem_device_type : false;
1675 }
1676
1677 static bool is_namespace_blk(const struct device *dev)
1678 {
1679 return dev ? dev->type == &namespace_blk_device_type : false;
1680 }
1681
1682 static bool is_namespace_io(const struct device *dev)
1683 {
1684 return dev ? dev->type == &namespace_io_device_type : false;
1685 }
1686
1687 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1688 {
1689 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1690 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1691 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1692 struct nd_namespace_common *ndns = NULL;
1693 resource_size_t size;
1694
1695 if (nd_btt || nd_pfn || nd_dax) {
1696 if (nd_btt)
1697 ndns = nd_btt->ndns;
1698 else if (nd_pfn)
1699 ndns = nd_pfn->ndns;
1700 else if (nd_dax)
1701 ndns = nd_dax->nd_pfn.ndns;
1702
1703 if (!ndns)
1704 return ERR_PTR(-ENODEV);
1705
1706 /*
1707 * Flush any in-progess probes / removals in the driver
1708 * for the raw personality of this namespace.
1709 */
1710 nd_device_lock(&ndns->dev);
1711 nd_device_unlock(&ndns->dev);
1712 if (ndns->dev.driver) {
1713 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1714 dev_name(dev));
1715 return ERR_PTR(-EBUSY);
1716 }
1717 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1718 "host (%s) vs claim (%s) mismatch\n",
1719 dev_name(dev),
1720 dev_name(ndns->claim)))
1721 return ERR_PTR(-ENXIO);
1722 } else {
1723 ndns = to_ndns(dev);
1724 if (ndns->claim) {
1725 dev_dbg(dev, "claimed by %s, failing probe\n",
1726 dev_name(ndns->claim));
1727
1728 return ERR_PTR(-ENXIO);
1729 }
1730 }
1731
1732 if (nvdimm_namespace_locked(ndns))
1733 return ERR_PTR(-EACCES);
1734
1735 size = nvdimm_namespace_capacity(ndns);
1736 if (size < ND_MIN_NAMESPACE_SIZE) {
1737 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1738 &size, ND_MIN_NAMESPACE_SIZE);
1739 return ERR_PTR(-ENODEV);
1740 }
1741
1742 if (is_namespace_pmem(&ndns->dev)) {
1743 struct nd_namespace_pmem *nspm;
1744
1745 nspm = to_nd_namespace_pmem(&ndns->dev);
1746 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1747 return ERR_PTR(-ENODEV);
1748 } else if (is_namespace_blk(&ndns->dev)) {
1749 struct nd_namespace_blk *nsblk;
1750
1751 nsblk = to_nd_namespace_blk(&ndns->dev);
1752 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1753 return ERR_PTR(-ENODEV);
1754 if (!nsblk->lbasize) {
1755 dev_dbg(&ndns->dev, "sector size not set\n");
1756 return ERR_PTR(-ENODEV);
1757 }
1758 if (!nd_namespace_blk_validate(nsblk))
1759 return ERR_PTR(-ENODEV);
1760 }
1761
1762 return ndns;
1763 }
1764 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1765
1766 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1767 resource_size_t size)
1768 {
1769 if (is_namespace_blk(&ndns->dev))
1770 return 0;
1771 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1772 }
1773 EXPORT_SYMBOL_GPL(devm_namespace_enable);
1774
1775 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1776 {
1777 if (is_namespace_blk(&ndns->dev))
1778 return;
1779 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1780 }
1781 EXPORT_SYMBOL_GPL(devm_namespace_disable);
1782
1783 static struct device **create_namespace_io(struct nd_region *nd_region)
1784 {
1785 struct nd_namespace_io *nsio;
1786 struct device *dev, **devs;
1787 struct resource *res;
1788
1789 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1790 if (!nsio)
1791 return NULL;
1792
1793 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1794 if (!devs) {
1795 kfree(nsio);
1796 return NULL;
1797 }
1798
1799 dev = &nsio->common.dev;
1800 dev->type = &namespace_io_device_type;
1801 dev->parent = &nd_region->dev;
1802 res = &nsio->res;
1803 res->name = dev_name(&nd_region->dev);
1804 res->flags = IORESOURCE_MEM;
1805 res->start = nd_region->ndr_start;
1806 res->end = res->start + nd_region->ndr_size - 1;
1807
1808 devs[0] = dev;
1809 return devs;
1810 }
1811
1812 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1813 u64 cookie, u16 pos)
1814 {
1815 struct nd_namespace_label *found = NULL;
1816 int i;
1817
1818 for (i = 0; i < nd_region->ndr_mappings; i++) {
1819 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1820 struct nd_interleave_set *nd_set = nd_region->nd_set;
1821 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1822 struct nd_label_ent *label_ent;
1823 bool found_uuid = false;
1824
1825 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1826 struct nd_namespace_label *nd_label = label_ent->label;
1827 u16 position, nlabel;
1828 u64 isetcookie;
1829
1830 if (!nd_label)
1831 continue;
1832 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1833 position = __le16_to_cpu(nd_label->position);
1834 nlabel = __le16_to_cpu(nd_label->nlabel);
1835
1836 if (isetcookie != cookie)
1837 continue;
1838
1839 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1840 continue;
1841
1842 if (namespace_label_has(ndd, type_guid)
1843 && !guid_equal(&nd_set->type_guid,
1844 &nd_label->type_guid)) {
1845 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
1846 &nd_set->type_guid,
1847 &nd_label->type_guid);
1848 continue;
1849 }
1850
1851 if (found_uuid) {
1852 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1853 return false;
1854 }
1855 found_uuid = true;
1856 if (nlabel != nd_region->ndr_mappings)
1857 continue;
1858 if (position != pos)
1859 continue;
1860 found = nd_label;
1861 break;
1862 }
1863 if (found)
1864 break;
1865 }
1866 return found != NULL;
1867 }
1868
1869 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1870 {
1871 int i;
1872
1873 if (!pmem_id)
1874 return -ENODEV;
1875
1876 for (i = 0; i < nd_region->ndr_mappings; i++) {
1877 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1878 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1879 struct nd_namespace_label *nd_label = NULL;
1880 u64 hw_start, hw_end, pmem_start, pmem_end;
1881 struct nd_label_ent *label_ent;
1882
1883 lockdep_assert_held(&nd_mapping->lock);
1884 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1885 nd_label = label_ent->label;
1886 if (!nd_label)
1887 continue;
1888 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1889 break;
1890 nd_label = NULL;
1891 }
1892
1893 if (!nd_label) {
1894 WARN_ON(1);
1895 return -EINVAL;
1896 }
1897
1898 /*
1899 * Check that this label is compliant with the dpa
1900 * range published in NFIT
1901 */
1902 hw_start = nd_mapping->start;
1903 hw_end = hw_start + nd_mapping->size;
1904 pmem_start = __le64_to_cpu(nd_label->dpa);
1905 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
1906 if (pmem_start >= hw_start && pmem_start < hw_end
1907 && pmem_end <= hw_end && pmem_end > hw_start)
1908 /* pass */;
1909 else {
1910 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1911 dev_name(ndd->dev), nd_label->uuid);
1912 return -EINVAL;
1913 }
1914
1915 /* move recently validated label to the front of the list */
1916 list_move(&label_ent->list, &nd_mapping->labels);
1917 }
1918 return 0;
1919 }
1920
1921 /**
1922 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1923 * @nd_region: region with mappings to validate
1924 * @nspm: target namespace to create
1925 * @nd_label: target pmem namespace label to evaluate
1926 */
1927 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1928 struct nd_namespace_index *nsindex,
1929 struct nd_namespace_label *nd_label)
1930 {
1931 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1932 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1933 struct nd_label_ent *label_ent;
1934 struct nd_namespace_pmem *nspm;
1935 struct nd_mapping *nd_mapping;
1936 resource_size_t size = 0;
1937 struct resource *res;
1938 struct device *dev;
1939 int rc = 0;
1940 u16 i;
1941
1942 if (cookie == 0) {
1943 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1944 return ERR_PTR(-ENXIO);
1945 }
1946
1947 if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1948 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1949 nd_label->uuid);
1950 if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
1951 return ERR_PTR(-EAGAIN);
1952
1953 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1954 nd_label->uuid);
1955 }
1956
1957 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1958 if (!nspm)
1959 return ERR_PTR(-ENOMEM);
1960
1961 nspm->id = -1;
1962 dev = &nspm->nsio.common.dev;
1963 dev->type = &namespace_pmem_device_type;
1964 dev->parent = &nd_region->dev;
1965 res = &nspm->nsio.res;
1966 res->name = dev_name(&nd_region->dev);
1967 res->flags = IORESOURCE_MEM;
1968
1969 for (i = 0; i < nd_region->ndr_mappings; i++) {
1970 if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1971 continue;
1972 if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1973 continue;
1974 break;
1975 }
1976
1977 if (i < nd_region->ndr_mappings) {
1978 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1979
1980 /*
1981 * Give up if we don't find an instance of a uuid at each
1982 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1983 * find a dimm with two instances of the same uuid.
1984 */
1985 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1986 nvdimm_name(nvdimm), nd_label->uuid);
1987 rc = -EINVAL;
1988 goto err;
1989 }
1990
1991 /*
1992 * Fix up each mapping's 'labels' to have the validated pmem label for
1993 * that position at labels[0], and NULL at labels[1]. In the process,
1994 * check that the namespace aligns with interleave-set. We know
1995 * that it does not overlap with any blk namespaces by virtue of
1996 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1997 * succeeded).
1998 */
1999 rc = select_pmem_id(nd_region, nd_label->uuid);
2000 if (rc)
2001 goto err;
2002
2003 /* Calculate total size and populate namespace properties from label0 */
2004 for (i = 0; i < nd_region->ndr_mappings; i++) {
2005 struct nd_namespace_label *label0;
2006 struct nvdimm_drvdata *ndd;
2007
2008 nd_mapping = &nd_region->mapping[i];
2009 label_ent = list_first_entry_or_null(&nd_mapping->labels,
2010 typeof(*label_ent), list);
2011 label0 = label_ent ? label_ent->label : NULL;
2012
2013 if (!label0) {
2014 WARN_ON(1);
2015 continue;
2016 }
2017
2018 size += __le64_to_cpu(label0->rawsize);
2019 if (__le16_to_cpu(label0->position) != 0)
2020 continue;
2021 WARN_ON(nspm->alt_name || nspm->uuid);
2022 nspm->alt_name = kmemdup((void __force *) label0->name,
2023 NSLABEL_NAME_LEN, GFP_KERNEL);
2024 nspm->uuid = kmemdup((void __force *) label0->uuid,
2025 NSLABEL_UUID_LEN, GFP_KERNEL);
2026 nspm->lbasize = __le64_to_cpu(label0->lbasize);
2027 ndd = to_ndd(nd_mapping);
2028 if (namespace_label_has(ndd, abstraction_guid))
2029 nspm->nsio.common.claim_class
2030 = to_nvdimm_cclass(&label0->abstraction_guid);
2031
2032 }
2033
2034 if (!nspm->alt_name || !nspm->uuid) {
2035 rc = -ENOMEM;
2036 goto err;
2037 }
2038
2039 nd_namespace_pmem_set_resource(nd_region, nspm, size);
2040
2041 return dev;
2042 err:
2043 namespace_pmem_release(dev);
2044 switch (rc) {
2045 case -EINVAL:
2046 dev_dbg(&nd_region->dev, "invalid label(s)\n");
2047 break;
2048 case -ENODEV:
2049 dev_dbg(&nd_region->dev, "label not found\n");
2050 break;
2051 default:
2052 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2053 break;
2054 }
2055 return ERR_PTR(rc);
2056 }
2057
2058 struct resource *nsblk_add_resource(struct nd_region *nd_region,
2059 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2060 resource_size_t start)
2061 {
2062 struct nd_label_id label_id;
2063 struct resource *res;
2064
2065 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
2066 res = krealloc(nsblk->res,
2067 sizeof(void *) * (nsblk->num_resources + 1),
2068 GFP_KERNEL);
2069 if (!res)
2070 return NULL;
2071 nsblk->res = (struct resource **) res;
2072 for_each_dpa_resource(ndd, res)
2073 if (strcmp(res->name, label_id.id) == 0
2074 && res->start == start) {
2075 nsblk->res[nsblk->num_resources++] = res;
2076 return res;
2077 }
2078 return NULL;
2079 }
2080
2081 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2082 {
2083 struct nd_namespace_blk *nsblk;
2084 struct device *dev;
2085
2086 if (!is_nd_blk(&nd_region->dev))
2087 return NULL;
2088
2089 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2090 if (!nsblk)
2091 return NULL;
2092
2093 dev = &nsblk->common.dev;
2094 dev->type = &namespace_blk_device_type;
2095 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2096 if (nsblk->id < 0) {
2097 kfree(nsblk);
2098 return NULL;
2099 }
2100 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2101 dev->parent = &nd_region->dev;
2102
2103 return &nsblk->common.dev;
2104 }
2105
2106 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2107 {
2108 struct nd_namespace_pmem *nspm;
2109 struct resource *res;
2110 struct device *dev;
2111
2112 if (!is_memory(&nd_region->dev))
2113 return NULL;
2114
2115 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2116 if (!nspm)
2117 return NULL;
2118
2119 dev = &nspm->nsio.common.dev;
2120 dev->type = &namespace_pmem_device_type;
2121 dev->parent = &nd_region->dev;
2122 res = &nspm->nsio.res;
2123 res->name = dev_name(&nd_region->dev);
2124 res->flags = IORESOURCE_MEM;
2125
2126 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2127 if (nspm->id < 0) {
2128 kfree(nspm);
2129 return NULL;
2130 }
2131 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2132 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2133
2134 return dev;
2135 }
2136
2137 void nd_region_create_ns_seed(struct nd_region *nd_region)
2138 {
2139 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2140
2141 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2142 return;
2143
2144 if (is_nd_blk(&nd_region->dev))
2145 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2146 else
2147 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2148
2149 /*
2150 * Seed creation failures are not fatal, provisioning is simply
2151 * disabled until memory becomes available
2152 */
2153 if (!nd_region->ns_seed)
2154 dev_err(&nd_region->dev, "failed to create %s namespace\n",
2155 is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2156 else
2157 nd_device_register(nd_region->ns_seed);
2158 }
2159
2160 void nd_region_create_dax_seed(struct nd_region *nd_region)
2161 {
2162 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2163 nd_region->dax_seed = nd_dax_create(nd_region);
2164 /*
2165 * Seed creation failures are not fatal, provisioning is simply
2166 * disabled until memory becomes available
2167 */
2168 if (!nd_region->dax_seed)
2169 dev_err(&nd_region->dev, "failed to create dax namespace\n");
2170 }
2171
2172 void nd_region_create_pfn_seed(struct nd_region *nd_region)
2173 {
2174 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2175 nd_region->pfn_seed = nd_pfn_create(nd_region);
2176 /*
2177 * Seed creation failures are not fatal, provisioning is simply
2178 * disabled until memory becomes available
2179 */
2180 if (!nd_region->pfn_seed)
2181 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2182 }
2183
2184 void nd_region_create_btt_seed(struct nd_region *nd_region)
2185 {
2186 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2187 nd_region->btt_seed = nd_btt_create(nd_region);
2188 /*
2189 * Seed creation failures are not fatal, provisioning is simply
2190 * disabled until memory becomes available
2191 */
2192 if (!nd_region->btt_seed)
2193 dev_err(&nd_region->dev, "failed to create btt namespace\n");
2194 }
2195
2196 static int add_namespace_resource(struct nd_region *nd_region,
2197 struct nd_namespace_label *nd_label, struct device **devs,
2198 int count)
2199 {
2200 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2201 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2202 int i;
2203
2204 for (i = 0; i < count; i++) {
2205 u8 *uuid = namespace_to_uuid(devs[i]);
2206 struct resource *res;
2207
2208 if (IS_ERR_OR_NULL(uuid)) {
2209 WARN_ON(1);
2210 continue;
2211 }
2212
2213 if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
2214 continue;
2215 if (is_namespace_blk(devs[i])) {
2216 res = nsblk_add_resource(nd_region, ndd,
2217 to_nd_namespace_blk(devs[i]),
2218 __le64_to_cpu(nd_label->dpa));
2219 if (!res)
2220 return -ENXIO;
2221 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2222 } else {
2223 dev_err(&nd_region->dev,
2224 "error: conflicting extents for uuid: %pUb\n",
2225 nd_label->uuid);
2226 return -ENXIO;
2227 }
2228 break;
2229 }
2230
2231 return i;
2232 }
2233
2234 static struct device *create_namespace_blk(struct nd_region *nd_region,
2235 struct nd_namespace_label *nd_label, int count)
2236 {
2237
2238 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2239 struct nd_interleave_set *nd_set = nd_region->nd_set;
2240 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2241 struct nd_namespace_blk *nsblk;
2242 char name[NSLABEL_NAME_LEN];
2243 struct device *dev = NULL;
2244 struct resource *res;
2245
2246 if (namespace_label_has(ndd, type_guid)) {
2247 if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
2248 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
2249 &nd_set->type_guid,
2250 &nd_label->type_guid);
2251 return ERR_PTR(-EAGAIN);
2252 }
2253
2254 if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
2255 dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
2256 nd_set->cookie2,
2257 __le64_to_cpu(nd_label->isetcookie));
2258 return ERR_PTR(-EAGAIN);
2259 }
2260 }
2261
2262 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2263 if (!nsblk)
2264 return ERR_PTR(-ENOMEM);
2265 dev = &nsblk->common.dev;
2266 dev->type = &namespace_blk_device_type;
2267 dev->parent = &nd_region->dev;
2268 nsblk->id = -1;
2269 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
2270 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
2271 GFP_KERNEL);
2272 if (namespace_label_has(ndd, abstraction_guid))
2273 nsblk->common.claim_class
2274 = to_nvdimm_cclass(&nd_label->abstraction_guid);
2275 if (!nsblk->uuid)
2276 goto blk_err;
2277 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
2278 if (name[0]) {
2279 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
2280 GFP_KERNEL);
2281 if (!nsblk->alt_name)
2282 goto blk_err;
2283 }
2284 res = nsblk_add_resource(nd_region, ndd, nsblk,
2285 __le64_to_cpu(nd_label->dpa));
2286 if (!res)
2287 goto blk_err;
2288 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2289 return dev;
2290 blk_err:
2291 namespace_blk_release(dev);
2292 return ERR_PTR(-ENXIO);
2293 }
2294
2295 static int cmp_dpa(const void *a, const void *b)
2296 {
2297 const struct device *dev_a = *(const struct device **) a;
2298 const struct device *dev_b = *(const struct device **) b;
2299 struct nd_namespace_blk *nsblk_a, *nsblk_b;
2300 struct nd_namespace_pmem *nspm_a, *nspm_b;
2301
2302 if (is_namespace_io(dev_a))
2303 return 0;
2304
2305 if (is_namespace_blk(dev_a)) {
2306 nsblk_a = to_nd_namespace_blk(dev_a);
2307 nsblk_b = to_nd_namespace_blk(dev_b);
2308
2309 return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
2310 sizeof(resource_size_t));
2311 }
2312
2313 nspm_a = to_nd_namespace_pmem(dev_a);
2314 nspm_b = to_nd_namespace_pmem(dev_b);
2315
2316 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
2317 sizeof(resource_size_t));
2318 }
2319
2320 static struct device **scan_labels(struct nd_region *nd_region)
2321 {
2322 int i, count = 0;
2323 struct device *dev, **devs = NULL;
2324 struct nd_label_ent *label_ent, *e;
2325 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2326 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
2327
2328 /* "safe" because create_namespace_pmem() might list_move() label_ent */
2329 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
2330 struct nd_namespace_label *nd_label = label_ent->label;
2331 struct device **__devs;
2332 u32 flags;
2333
2334 if (!nd_label)
2335 continue;
2336 flags = __le32_to_cpu(nd_label->flags);
2337 if (is_nd_blk(&nd_region->dev)
2338 == !!(flags & NSLABEL_FLAG_LOCAL))
2339 /* pass, region matches label type */;
2340 else
2341 continue;
2342
2343 /* skip labels that describe extents outside of the region */
2344 if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
2345 __le64_to_cpu(nd_label->dpa) > map_end)
2346 continue;
2347
2348 i = add_namespace_resource(nd_region, nd_label, devs, count);
2349 if (i < 0)
2350 goto err;
2351 if (i < count)
2352 continue;
2353 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
2354 if (!__devs)
2355 goto err;
2356 memcpy(__devs, devs, sizeof(dev) * count);
2357 kfree(devs);
2358 devs = __devs;
2359
2360 if (is_nd_blk(&nd_region->dev))
2361 dev = create_namespace_blk(nd_region, nd_label, count);
2362 else {
2363 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2364 struct nd_namespace_index *nsindex;
2365
2366 nsindex = to_namespace_index(ndd, ndd->ns_current);
2367 dev = create_namespace_pmem(nd_region, nsindex, nd_label);
2368 }
2369
2370 if (IS_ERR(dev)) {
2371 switch (PTR_ERR(dev)) {
2372 case -EAGAIN:
2373 /* skip invalid labels */
2374 continue;
2375 case -ENODEV:
2376 /* fallthrough to seed creation */
2377 break;
2378 default:
2379 goto err;
2380 }
2381 } else
2382 devs[count++] = dev;
2383
2384 }
2385
2386 dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2387 count, is_nd_blk(&nd_region->dev)
2388 ? "blk" : "pmem", count == 1 ? "" : "s");
2389
2390 if (count == 0) {
2391 /* Publish a zero-sized namespace for userspace to configure. */
2392 nd_mapping_free_labels(nd_mapping);
2393
2394 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2395 if (!devs)
2396 goto err;
2397 if (is_nd_blk(&nd_region->dev)) {
2398 struct nd_namespace_blk *nsblk;
2399
2400 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2401 if (!nsblk)
2402 goto err;
2403 dev = &nsblk->common.dev;
2404 dev->type = &namespace_blk_device_type;
2405 } else {
2406 struct nd_namespace_pmem *nspm;
2407
2408 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2409 if (!nspm)
2410 goto err;
2411 dev = &nspm->nsio.common.dev;
2412 dev->type = &namespace_pmem_device_type;
2413 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2414 }
2415 dev->parent = &nd_region->dev;
2416 devs[count++] = dev;
2417 } else if (is_memory(&nd_region->dev)) {
2418 /* clean unselected labels */
2419 for (i = 0; i < nd_region->ndr_mappings; i++) {
2420 struct list_head *l, *e;
2421 LIST_HEAD(list);
2422 int j;
2423
2424 nd_mapping = &nd_region->mapping[i];
2425 if (list_empty(&nd_mapping->labels)) {
2426 WARN_ON(1);
2427 continue;
2428 }
2429
2430 j = count;
2431 list_for_each_safe(l, e, &nd_mapping->labels) {
2432 if (!j--)
2433 break;
2434 list_move_tail(l, &list);
2435 }
2436 nd_mapping_free_labels(nd_mapping);
2437 list_splice_init(&list, &nd_mapping->labels);
2438 }
2439 }
2440
2441 if (count > 1)
2442 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2443
2444 return devs;
2445
2446 err:
2447 if (devs) {
2448 for (i = 0; devs[i]; i++)
2449 if (is_nd_blk(&nd_region->dev))
2450 namespace_blk_release(devs[i]);
2451 else
2452 namespace_pmem_release(devs[i]);
2453 kfree(devs);
2454 }
2455 return NULL;
2456 }
2457
2458 static struct device **create_namespaces(struct nd_region *nd_region)
2459 {
2460 struct nd_mapping *nd_mapping;
2461 struct device **devs;
2462 int i;
2463
2464 if (nd_region->ndr_mappings == 0)
2465 return NULL;
2466
2467 /* lock down all mappings while we scan labels */
2468 for (i = 0; i < nd_region->ndr_mappings; i++) {
2469 nd_mapping = &nd_region->mapping[i];
2470 mutex_lock_nested(&nd_mapping->lock, i);
2471 }
2472
2473 devs = scan_labels(nd_region);
2474
2475 for (i = 0; i < nd_region->ndr_mappings; i++) {
2476 int reverse = nd_region->ndr_mappings - 1 - i;
2477
2478 nd_mapping = &nd_region->mapping[reverse];
2479 mutex_unlock(&nd_mapping->lock);
2480 }
2481
2482 return devs;
2483 }
2484
2485 static void deactivate_labels(void *region)
2486 {
2487 struct nd_region *nd_region = region;
2488 int i;
2489
2490 for (i = 0; i < nd_region->ndr_mappings; i++) {
2491 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2492 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2493 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2494
2495 mutex_lock(&nd_mapping->lock);
2496 nd_mapping_free_labels(nd_mapping);
2497 mutex_unlock(&nd_mapping->lock);
2498
2499 put_ndd(ndd);
2500 nd_mapping->ndd = NULL;
2501 if (ndd)
2502 atomic_dec(&nvdimm->busy);
2503 }
2504 }
2505
2506 static int init_active_labels(struct nd_region *nd_region)
2507 {
2508 int i;
2509
2510 for (i = 0; i < nd_region->ndr_mappings; i++) {
2511 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2512 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2513 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2514 struct nd_label_ent *label_ent;
2515 int count, j;
2516
2517 /*
2518 * If the dimm is disabled then we may need to prevent
2519 * the region from being activated.
2520 */
2521 if (!ndd) {
2522 if (test_bit(NDD_LOCKED, &nvdimm->flags))
2523 /* fail, label data may be unreadable */;
2524 else if (test_bit(NDD_ALIASING, &nvdimm->flags))
2525 /* fail, labels needed to disambiguate dpa */;
2526 else
2527 return 0;
2528
2529 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2530 dev_name(&nd_mapping->nvdimm->dev),
2531 test_bit(NDD_LOCKED, &nvdimm->flags)
2532 ? "locked" : "disabled");
2533 return -ENXIO;
2534 }
2535 nd_mapping->ndd = ndd;
2536 atomic_inc(&nvdimm->busy);
2537 get_ndd(ndd);
2538
2539 count = nd_label_active_count(ndd);
2540 dev_dbg(ndd->dev, "count: %d\n", count);
2541 if (!count)
2542 continue;
2543 for (j = 0; j < count; j++) {
2544 struct nd_namespace_label *label;
2545
2546 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2547 if (!label_ent)
2548 break;
2549 label = nd_label_active(ndd, j);
2550 if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2551 u32 flags = __le32_to_cpu(label->flags);
2552
2553 flags &= ~NSLABEL_FLAG_LOCAL;
2554 label->flags = __cpu_to_le32(flags);
2555 }
2556 label_ent->label = label;
2557
2558 mutex_lock(&nd_mapping->lock);
2559 list_add_tail(&label_ent->list, &nd_mapping->labels);
2560 mutex_unlock(&nd_mapping->lock);
2561 }
2562
2563 if (j < count)
2564 break;
2565 }
2566
2567 if (i < nd_region->ndr_mappings) {
2568 deactivate_labels(nd_region);
2569 return -ENOMEM;
2570 }
2571
2572 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2573 nd_region);
2574 }
2575
2576 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2577 {
2578 struct device **devs = NULL;
2579 int i, rc = 0, type;
2580
2581 *err = 0;
2582 nvdimm_bus_lock(&nd_region->dev);
2583 rc = init_active_labels(nd_region);
2584 if (rc) {
2585 nvdimm_bus_unlock(&nd_region->dev);
2586 return rc;
2587 }
2588
2589 type = nd_region_to_nstype(nd_region);
2590 switch (type) {
2591 case ND_DEVICE_NAMESPACE_IO:
2592 devs = create_namespace_io(nd_region);
2593 break;
2594 case ND_DEVICE_NAMESPACE_PMEM:
2595 case ND_DEVICE_NAMESPACE_BLK:
2596 devs = create_namespaces(nd_region);
2597 break;
2598 default:
2599 break;
2600 }
2601 nvdimm_bus_unlock(&nd_region->dev);
2602
2603 if (!devs)
2604 return -ENODEV;
2605
2606 for (i = 0; devs[i]; i++) {
2607 struct device *dev = devs[i];
2608 int id;
2609
2610 if (type == ND_DEVICE_NAMESPACE_BLK) {
2611 struct nd_namespace_blk *nsblk;
2612
2613 nsblk = to_nd_namespace_blk(dev);
2614 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2615 GFP_KERNEL);
2616 nsblk->id = id;
2617 } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2618 struct nd_namespace_pmem *nspm;
2619
2620 nspm = to_nd_namespace_pmem(dev);
2621 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2622 GFP_KERNEL);
2623 nspm->id = id;
2624 } else
2625 id = i;
2626
2627 if (id < 0)
2628 break;
2629 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2630 nd_device_register(dev);
2631 }
2632 if (i)
2633 nd_region->ns_seed = devs[0];
2634
2635 if (devs[i]) {
2636 int j;
2637
2638 for (j = i; devs[j]; j++) {
2639 struct device *dev = devs[j];
2640
2641 device_initialize(dev);
2642 put_device(dev);
2643 }
2644 *err = j - i;
2645 /*
2646 * All of the namespaces we tried to register failed, so
2647 * fail region activation.
2648 */
2649 if (*err == 0)
2650 rc = -ENODEV;
2651 }
2652 kfree(devs);
2653
2654 if (rc == -ENODEV)
2655 return rc;
2656
2657 return i;
2658 }