2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/pmem.h>
19 #include <linux/sort.h>
26 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
29 #include <linux/io-64-nonatomic-hi-lo.h>
31 static DEFINE_IDA(region_ida
);
32 static DEFINE_PER_CPU(int, flush_idx
);
34 static int nvdimm_map_flush(struct device
*dev
, struct nvdimm
*nvdimm
, int dimm
,
35 struct nd_region_data
*ndrd
)
39 dev_dbg(dev
, "%s: map %d flush address%s\n", nvdimm_name(nvdimm
),
40 nvdimm
->num_flush
, nvdimm
->num_flush
== 1 ? "" : "es");
41 for (i
= 0; i
< (1 << ndrd
->hints_shift
); i
++) {
42 struct resource
*res
= &nvdimm
->flush_wpq
[i
];
43 unsigned long pfn
= PHYS_PFN(res
->start
);
44 void __iomem
*flush_page
;
46 /* check if flush hints share a page */
47 for (j
= 0; j
< i
; j
++) {
48 struct resource
*res_j
= &nvdimm
->flush_wpq
[j
];
49 unsigned long pfn_j
= PHYS_PFN(res_j
->start
);
56 flush_page
= (void __iomem
*) ((unsigned long)
57 ndrd_get_flush_wpq(ndrd
, dimm
, j
)
60 flush_page
= devm_nvdimm_ioremap(dev
,
61 PFN_PHYS(pfn
), PAGE_SIZE
);
64 ndrd_set_flush_wpq(ndrd
, dimm
, i
, flush_page
65 + (res
->start
& ~PAGE_MASK
));
71 int nd_region_activate(struct nd_region
*nd_region
)
74 struct nd_region_data
*ndrd
;
75 struct device
*dev
= &nd_region
->dev
;
76 size_t flush_data_size
= sizeof(void *);
78 nvdimm_bus_lock(&nd_region
->dev
);
79 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
80 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
81 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
83 /* at least one null hint slot per-dimm for the "no-hint" case */
84 flush_data_size
+= sizeof(void *);
85 num_flush
= min_not_zero(num_flush
, nvdimm
->num_flush
);
86 if (!nvdimm
->num_flush
)
88 flush_data_size
+= nvdimm
->num_flush
* sizeof(void *);
90 nvdimm_bus_unlock(&nd_region
->dev
);
92 ndrd
= devm_kzalloc(dev
, sizeof(*ndrd
) + flush_data_size
, GFP_KERNEL
);
95 dev_set_drvdata(dev
, ndrd
);
100 ndrd
->hints_shift
= ilog2(num_flush
);
101 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
102 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
103 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
104 int rc
= nvdimm_map_flush(&nd_region
->dev
, nvdimm
, i
, ndrd
);
113 static void nd_region_release(struct device
*dev
)
115 struct nd_region
*nd_region
= to_nd_region(dev
);
118 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
119 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
120 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
122 put_device(&nvdimm
->dev
);
124 free_percpu(nd_region
->lane
);
125 ida_simple_remove(®ion_ida
, nd_region
->id
);
127 kfree(to_nd_blk_region(dev
));
132 static struct device_type nd_blk_device_type
= {
134 .release
= nd_region_release
,
137 static struct device_type nd_pmem_device_type
= {
139 .release
= nd_region_release
,
142 static struct device_type nd_volatile_device_type
= {
143 .name
= "nd_volatile",
144 .release
= nd_region_release
,
147 bool is_nd_pmem(struct device
*dev
)
149 return dev
? dev
->type
== &nd_pmem_device_type
: false;
152 bool is_nd_blk(struct device
*dev
)
154 return dev
? dev
->type
== &nd_blk_device_type
: false;
157 struct nd_region
*to_nd_region(struct device
*dev
)
159 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
161 WARN_ON(dev
->type
->release
!= nd_region_release
);
164 EXPORT_SYMBOL_GPL(to_nd_region
);
166 struct nd_blk_region
*to_nd_blk_region(struct device
*dev
)
168 struct nd_region
*nd_region
= to_nd_region(dev
);
170 WARN_ON(!is_nd_blk(dev
));
171 return container_of(nd_region
, struct nd_blk_region
, nd_region
);
173 EXPORT_SYMBOL_GPL(to_nd_blk_region
);
175 void *nd_region_provider_data(struct nd_region
*nd_region
)
177 return nd_region
->provider_data
;
179 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
181 void *nd_blk_region_provider_data(struct nd_blk_region
*ndbr
)
183 return ndbr
->blk_provider_data
;
185 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data
);
187 void nd_blk_region_set_provider_data(struct nd_blk_region
*ndbr
, void *data
)
189 ndbr
->blk_provider_data
= data
;
191 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data
);
194 * nd_region_to_nstype() - region to an integer namespace type
195 * @nd_region: region-device to interrogate
197 * This is the 'nstype' attribute of a region as well, an input to the
198 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
199 * namespace devices with namespace drivers.
201 int nd_region_to_nstype(struct nd_region
*nd_region
)
203 if (is_nd_pmem(&nd_region
->dev
)) {
206 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
207 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
208 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
210 if (nvdimm
->flags
& NDD_ALIASING
)
214 return ND_DEVICE_NAMESPACE_PMEM
;
216 return ND_DEVICE_NAMESPACE_IO
;
217 } else if (is_nd_blk(&nd_region
->dev
)) {
218 return ND_DEVICE_NAMESPACE_BLK
;
223 EXPORT_SYMBOL(nd_region_to_nstype
);
225 static ssize_t
size_show(struct device
*dev
,
226 struct device_attribute
*attr
, char *buf
)
228 struct nd_region
*nd_region
= to_nd_region(dev
);
229 unsigned long long size
= 0;
231 if (is_nd_pmem(dev
)) {
232 size
= nd_region
->ndr_size
;
233 } else if (nd_region
->ndr_mappings
== 1) {
234 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
236 size
= nd_mapping
->size
;
239 return sprintf(buf
, "%llu\n", size
);
241 static DEVICE_ATTR_RO(size
);
243 static ssize_t
mappings_show(struct device
*dev
,
244 struct device_attribute
*attr
, char *buf
)
246 struct nd_region
*nd_region
= to_nd_region(dev
);
248 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
250 static DEVICE_ATTR_RO(mappings
);
252 static ssize_t
nstype_show(struct device
*dev
,
253 struct device_attribute
*attr
, char *buf
)
255 struct nd_region
*nd_region
= to_nd_region(dev
);
257 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
259 static DEVICE_ATTR_RO(nstype
);
261 static ssize_t
set_cookie_show(struct device
*dev
,
262 struct device_attribute
*attr
, char *buf
)
264 struct nd_region
*nd_region
= to_nd_region(dev
);
265 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
267 if (is_nd_pmem(dev
) && nd_set
)
268 /* pass, should be precluded by region_visible */;
272 return sprintf(buf
, "%#llx\n", nd_set
->cookie
);
274 static DEVICE_ATTR_RO(set_cookie
);
276 resource_size_t
nd_region_available_dpa(struct nd_region
*nd_region
)
278 resource_size_t blk_max_overlap
= 0, available
, overlap
;
281 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
285 overlap
= blk_max_overlap
;
286 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
287 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
288 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
290 /* if a dimm is disabled the available capacity is zero */
294 if (is_nd_pmem(&nd_region
->dev
)) {
295 available
+= nd_pmem_available_dpa(nd_region
,
296 nd_mapping
, &overlap
);
297 if (overlap
> blk_max_overlap
) {
298 blk_max_overlap
= overlap
;
301 } else if (is_nd_blk(&nd_region
->dev
)) {
302 available
+= nd_blk_available_dpa(nd_mapping
);
309 static ssize_t
available_size_show(struct device
*dev
,
310 struct device_attribute
*attr
, char *buf
)
312 struct nd_region
*nd_region
= to_nd_region(dev
);
313 unsigned long long available
= 0;
316 * Flush in-flight updates and grab a snapshot of the available
317 * size. Of course, this value is potentially invalidated the
318 * memory nvdimm_bus_lock() is dropped, but that's userspace's
319 * problem to not race itself.
321 nvdimm_bus_lock(dev
);
322 wait_nvdimm_bus_probe_idle(dev
);
323 available
= nd_region_available_dpa(nd_region
);
324 nvdimm_bus_unlock(dev
);
326 return sprintf(buf
, "%llu\n", available
);
328 static DEVICE_ATTR_RO(available_size
);
330 static ssize_t
init_namespaces_show(struct device
*dev
,
331 struct device_attribute
*attr
, char *buf
)
333 struct nd_region_data
*ndrd
= dev_get_drvdata(dev
);
336 nvdimm_bus_lock(dev
);
338 rc
= sprintf(buf
, "%d/%d\n", ndrd
->ns_active
, ndrd
->ns_count
);
341 nvdimm_bus_unlock(dev
);
345 static DEVICE_ATTR_RO(init_namespaces
);
347 static ssize_t
namespace_seed_show(struct device
*dev
,
348 struct device_attribute
*attr
, char *buf
)
350 struct nd_region
*nd_region
= to_nd_region(dev
);
353 nvdimm_bus_lock(dev
);
354 if (nd_region
->ns_seed
)
355 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->ns_seed
));
357 rc
= sprintf(buf
, "\n");
358 nvdimm_bus_unlock(dev
);
361 static DEVICE_ATTR_RO(namespace_seed
);
363 static ssize_t
btt_seed_show(struct device
*dev
,
364 struct device_attribute
*attr
, char *buf
)
366 struct nd_region
*nd_region
= to_nd_region(dev
);
369 nvdimm_bus_lock(dev
);
370 if (nd_region
->btt_seed
)
371 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->btt_seed
));
373 rc
= sprintf(buf
, "\n");
374 nvdimm_bus_unlock(dev
);
378 static DEVICE_ATTR_RO(btt_seed
);
380 static ssize_t
pfn_seed_show(struct device
*dev
,
381 struct device_attribute
*attr
, char *buf
)
383 struct nd_region
*nd_region
= to_nd_region(dev
);
386 nvdimm_bus_lock(dev
);
387 if (nd_region
->pfn_seed
)
388 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->pfn_seed
));
390 rc
= sprintf(buf
, "\n");
391 nvdimm_bus_unlock(dev
);
395 static DEVICE_ATTR_RO(pfn_seed
);
397 static ssize_t
dax_seed_show(struct device
*dev
,
398 struct device_attribute
*attr
, char *buf
)
400 struct nd_region
*nd_region
= to_nd_region(dev
);
403 nvdimm_bus_lock(dev
);
404 if (nd_region
->dax_seed
)
405 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->dax_seed
));
407 rc
= sprintf(buf
, "\n");
408 nvdimm_bus_unlock(dev
);
412 static DEVICE_ATTR_RO(dax_seed
);
414 static ssize_t
read_only_show(struct device
*dev
,
415 struct device_attribute
*attr
, char *buf
)
417 struct nd_region
*nd_region
= to_nd_region(dev
);
419 return sprintf(buf
, "%d\n", nd_region
->ro
);
422 static ssize_t
read_only_store(struct device
*dev
,
423 struct device_attribute
*attr
, const char *buf
, size_t len
)
426 int rc
= strtobool(buf
, &ro
);
427 struct nd_region
*nd_region
= to_nd_region(dev
);
435 static DEVICE_ATTR_RW(read_only
);
437 static struct attribute
*nd_region_attributes
[] = {
439 &dev_attr_nstype
.attr
,
440 &dev_attr_mappings
.attr
,
441 &dev_attr_btt_seed
.attr
,
442 &dev_attr_pfn_seed
.attr
,
443 &dev_attr_dax_seed
.attr
,
444 &dev_attr_read_only
.attr
,
445 &dev_attr_set_cookie
.attr
,
446 &dev_attr_available_size
.attr
,
447 &dev_attr_namespace_seed
.attr
,
448 &dev_attr_init_namespaces
.attr
,
452 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
454 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
455 struct nd_region
*nd_region
= to_nd_region(dev
);
456 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
457 int type
= nd_region_to_nstype(nd_region
);
459 if (!is_nd_pmem(dev
) && a
== &dev_attr_pfn_seed
.attr
)
462 if (!is_nd_pmem(dev
) && a
== &dev_attr_dax_seed
.attr
)
465 if (a
!= &dev_attr_set_cookie
.attr
466 && a
!= &dev_attr_available_size
.attr
)
469 if ((type
== ND_DEVICE_NAMESPACE_PMEM
470 || type
== ND_DEVICE_NAMESPACE_BLK
)
471 && a
== &dev_attr_available_size
.attr
)
473 else if (is_nd_pmem(dev
) && nd_set
)
479 struct attribute_group nd_region_attribute_group
= {
480 .attrs
= nd_region_attributes
,
481 .is_visible
= region_visible
,
483 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
485 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
)
487 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
490 return nd_set
->cookie
;
495 * Upon successful probe/remove, take/release a reference on the
496 * associated interleave set (if present), and plant new btt + namespace
497 * seeds. Also, on the removal of a BLK region, notify the provider to
498 * disable the region.
500 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
501 struct device
*dev
, bool probe
)
503 struct nd_region
*nd_region
;
505 if (!probe
&& (is_nd_pmem(dev
) || is_nd_blk(dev
))) {
508 nd_region
= to_nd_region(dev
);
509 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
510 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
511 struct nvdimm_drvdata
*ndd
= nd_mapping
->ndd
;
512 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
514 kfree(nd_mapping
->labels
);
515 nd_mapping
->labels
= NULL
;
517 nd_mapping
->ndd
= NULL
;
519 atomic_dec(&nvdimm
->busy
);
525 if (dev
->parent
&& is_nd_blk(dev
->parent
) && probe
) {
526 nd_region
= to_nd_region(dev
->parent
);
527 nvdimm_bus_lock(dev
);
528 if (nd_region
->ns_seed
== dev
)
529 nd_region_create_blk_seed(nd_region
);
530 nvdimm_bus_unlock(dev
);
532 if (is_nd_btt(dev
) && probe
) {
533 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
535 nd_region
= to_nd_region(dev
->parent
);
536 nvdimm_bus_lock(dev
);
537 if (nd_region
->btt_seed
== dev
)
538 nd_region_create_btt_seed(nd_region
);
539 if (nd_region
->ns_seed
== &nd_btt
->ndns
->dev
&&
540 is_nd_blk(dev
->parent
))
541 nd_region_create_blk_seed(nd_region
);
542 nvdimm_bus_unlock(dev
);
544 if (is_nd_pfn(dev
) && probe
) {
545 nd_region
= to_nd_region(dev
->parent
);
546 nvdimm_bus_lock(dev
);
547 if (nd_region
->pfn_seed
== dev
)
548 nd_region_create_pfn_seed(nd_region
);
549 nvdimm_bus_unlock(dev
);
551 if (is_nd_dax(dev
) && probe
) {
552 nd_region
= to_nd_region(dev
->parent
);
553 nvdimm_bus_lock(dev
);
554 if (nd_region
->dax_seed
== dev
)
555 nd_region_create_dax_seed(nd_region
);
556 nvdimm_bus_unlock(dev
);
560 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
562 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
565 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
567 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
570 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
572 struct nd_region
*nd_region
= to_nd_region(dev
);
573 struct nd_mapping
*nd_mapping
;
574 struct nvdimm
*nvdimm
;
576 if (n
>= nd_region
->ndr_mappings
)
578 nd_mapping
= &nd_region
->mapping
[n
];
579 nvdimm
= nd_mapping
->nvdimm
;
581 return sprintf(buf
, "%s,%llu,%llu\n", dev_name(&nvdimm
->dev
),
582 nd_mapping
->start
, nd_mapping
->size
);
585 #define REGION_MAPPING(idx) \
586 static ssize_t mapping##idx##_show(struct device *dev, \
587 struct device_attribute *attr, char *buf) \
589 return mappingN(dev, buf, idx); \
591 static DEVICE_ATTR_RO(mapping##idx)
594 * 32 should be enough for a while, even in the presence of socket
595 * interleave a 32-way interleave set is a degenerate case.
630 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
632 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
633 struct nd_region
*nd_region
= to_nd_region(dev
);
635 if (n
< nd_region
->ndr_mappings
)
640 static struct attribute
*mapping_attributes
[] = {
641 &dev_attr_mapping0
.attr
,
642 &dev_attr_mapping1
.attr
,
643 &dev_attr_mapping2
.attr
,
644 &dev_attr_mapping3
.attr
,
645 &dev_attr_mapping4
.attr
,
646 &dev_attr_mapping5
.attr
,
647 &dev_attr_mapping6
.attr
,
648 &dev_attr_mapping7
.attr
,
649 &dev_attr_mapping8
.attr
,
650 &dev_attr_mapping9
.attr
,
651 &dev_attr_mapping10
.attr
,
652 &dev_attr_mapping11
.attr
,
653 &dev_attr_mapping12
.attr
,
654 &dev_attr_mapping13
.attr
,
655 &dev_attr_mapping14
.attr
,
656 &dev_attr_mapping15
.attr
,
657 &dev_attr_mapping16
.attr
,
658 &dev_attr_mapping17
.attr
,
659 &dev_attr_mapping18
.attr
,
660 &dev_attr_mapping19
.attr
,
661 &dev_attr_mapping20
.attr
,
662 &dev_attr_mapping21
.attr
,
663 &dev_attr_mapping22
.attr
,
664 &dev_attr_mapping23
.attr
,
665 &dev_attr_mapping24
.attr
,
666 &dev_attr_mapping25
.attr
,
667 &dev_attr_mapping26
.attr
,
668 &dev_attr_mapping27
.attr
,
669 &dev_attr_mapping28
.attr
,
670 &dev_attr_mapping29
.attr
,
671 &dev_attr_mapping30
.attr
,
672 &dev_attr_mapping31
.attr
,
676 struct attribute_group nd_mapping_attribute_group
= {
677 .is_visible
= mapping_visible
,
678 .attrs
= mapping_attributes
,
680 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
682 int nd_blk_region_init(struct nd_region
*nd_region
)
684 struct device
*dev
= &nd_region
->dev
;
685 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
690 if (nd_region
->ndr_mappings
< 1) {
691 dev_err(dev
, "invalid BLK region\n");
695 return to_nd_blk_region(dev
)->enable(nvdimm_bus
, dev
);
699 * nd_region_acquire_lane - allocate and lock a lane
700 * @nd_region: region id and number of lanes possible
702 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
703 * We optimize for the common case where there are 256 lanes, one
704 * per-cpu. For larger systems we need to lock to share lanes. For now
705 * this implementation assumes the cost of maintaining an allocator for
706 * free lanes is on the order of the lock hold time, so it implements a
707 * static lane = cpu % num_lanes mapping.
709 * In the case of a BTT instance on top of a BLK namespace a lane may be
710 * acquired recursively. We lock on the first instance.
712 * In the case of a BTT instance on top of PMEM, we only acquire a lane
713 * for the BTT metadata updates.
715 unsigned int nd_region_acquire_lane(struct nd_region
*nd_region
)
717 unsigned int cpu
, lane
;
720 if (nd_region
->num_lanes
< nr_cpu_ids
) {
721 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
723 lane
= cpu
% nd_region
->num_lanes
;
724 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
725 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
726 if (ndl_count
->count
++ == 0)
727 spin_lock(&ndl_lock
->lock
);
733 EXPORT_SYMBOL(nd_region_acquire_lane
);
735 void nd_region_release_lane(struct nd_region
*nd_region
, unsigned int lane
)
737 if (nd_region
->num_lanes
< nr_cpu_ids
) {
738 unsigned int cpu
= get_cpu();
739 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
741 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
742 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
743 if (--ndl_count
->count
== 0)
744 spin_unlock(&ndl_lock
->lock
);
749 EXPORT_SYMBOL(nd_region_release_lane
);
751 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
752 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
755 struct nd_region
*nd_region
;
761 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
762 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
763 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
765 if ((nd_mapping
->start
| nd_mapping
->size
) % SZ_4K
) {
766 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
767 caller
, dev_name(&nvdimm
->dev
), i
);
772 if (nvdimm
->flags
& NDD_UNARMED
)
776 if (dev_type
== &nd_blk_device_type
) {
777 struct nd_blk_region_desc
*ndbr_desc
;
778 struct nd_blk_region
*ndbr
;
780 ndbr_desc
= to_blk_region_desc(ndr_desc
);
781 ndbr
= kzalloc(sizeof(*ndbr
) + sizeof(struct nd_mapping
)
782 * ndr_desc
->num_mappings
,
785 nd_region
= &ndbr
->nd_region
;
786 ndbr
->enable
= ndbr_desc
->enable
;
787 ndbr
->do_io
= ndbr_desc
->do_io
;
791 nd_region
= kzalloc(sizeof(struct nd_region
)
792 + sizeof(struct nd_mapping
)
793 * ndr_desc
->num_mappings
,
795 region_buf
= nd_region
;
800 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
801 if (nd_region
->id
< 0)
804 nd_region
->lane
= alloc_percpu(struct nd_percpu_lane
);
805 if (!nd_region
->lane
)
808 for (i
= 0; i
< nr_cpu_ids
; i
++) {
809 struct nd_percpu_lane
*ndl
;
811 ndl
= per_cpu_ptr(nd_region
->lane
, i
);
812 spin_lock_init(&ndl
->lock
);
816 memcpy(nd_region
->mapping
, ndr_desc
->nd_mapping
,
817 sizeof(struct nd_mapping
) * ndr_desc
->num_mappings
);
818 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
819 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
820 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
822 get_device(&nvdimm
->dev
);
824 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
825 nd_region
->provider_data
= ndr_desc
->provider_data
;
826 nd_region
->nd_set
= ndr_desc
->nd_set
;
827 nd_region
->num_lanes
= ndr_desc
->num_lanes
;
828 nd_region
->flags
= ndr_desc
->flags
;
830 nd_region
->numa_node
= ndr_desc
->numa_node
;
831 ida_init(&nd_region
->ns_ida
);
832 ida_init(&nd_region
->btt_ida
);
833 ida_init(&nd_region
->pfn_ida
);
834 ida_init(&nd_region
->dax_ida
);
835 dev
= &nd_region
->dev
;
836 dev_set_name(dev
, "region%d", nd_region
->id
);
837 dev
->parent
= &nvdimm_bus
->dev
;
838 dev
->type
= dev_type
;
839 dev
->groups
= ndr_desc
->attr_groups
;
840 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
841 nd_region
->ndr_start
= ndr_desc
->res
->start
;
842 nd_device_register(dev
);
847 ida_simple_remove(®ion_ida
, nd_region
->id
);
853 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
854 struct nd_region_desc
*ndr_desc
)
856 ndr_desc
->num_lanes
= ND_MAX_LANES
;
857 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
860 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
862 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
863 struct nd_region_desc
*ndr_desc
)
865 if (ndr_desc
->num_mappings
> 1)
867 ndr_desc
->num_lanes
= min(ndr_desc
->num_lanes
, ND_MAX_LANES
);
868 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
871 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
873 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
874 struct nd_region_desc
*ndr_desc
)
876 ndr_desc
->num_lanes
= ND_MAX_LANES
;
877 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
880 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);
883 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
884 * @nd_region: blk or interleaved pmem region
886 void nvdimm_flush(struct nd_region
*nd_region
)
888 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
892 * Try to encourage some diversity in flush hint addresses
893 * across cpus assuming a limited number of flush hints.
895 idx
= this_cpu_read(flush_idx
);
896 idx
= this_cpu_add_return(flush_idx
, hash_32(current
->pid
+ idx
, 8));
899 * The first wmb() is needed to 'sfence' all previous writes
900 * such that they are architecturally visible for the platform
901 * buffer flush. Note that we've already arranged for pmem
902 * writes to avoid the cache via arch_memcpy_to_pmem(). The
903 * final wmb() ensures ordering for the NVDIMM flush write.
906 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
907 if (ndrd_get_flush_wpq(ndrd
, i
, 0))
908 writeq(1, ndrd_get_flush_wpq(ndrd
, i
, idx
));
911 EXPORT_SYMBOL_GPL(nvdimm_flush
);
914 * nvdimm_has_flush - determine write flushing requirements
915 * @nd_region: blk or interleaved pmem region
917 * Returns 1 if writes require flushing
918 * Returns 0 if writes do not require flushing
919 * Returns -ENXIO if flushing capability can not be determined
921 int nvdimm_has_flush(struct nd_region
*nd_region
)
923 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
926 /* no nvdimm == flushing capability unknown */
927 if (nd_region
->ndr_mappings
== 0)
930 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
931 /* flush hints present, flushing required */
932 if (ndrd_get_flush_wpq(ndrd
, i
, 0))
936 * The platform defines dimm devices without hints, assume
937 * platform persistence mechanism like ADR
941 EXPORT_SYMBOL_GPL(nvdimm_has_flush
);
943 void __exit
nd_region_devs_exit(void)
945 ida_destroy(®ion_ida
);