2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/pmem.h>
19 #include <linux/sort.h>
26 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
29 #include <linux/io-64-nonatomic-hi-lo.h>
31 static DEFINE_IDA(region_ida
);
32 static DEFINE_PER_CPU(int, flush_idx
);
34 static int nvdimm_map_flush(struct device
*dev
, struct nvdimm
*nvdimm
, int dimm
,
35 struct nd_region_data
*ndrd
)
39 dev_dbg(dev
, "%s: map %d flush address%s\n", nvdimm_name(nvdimm
),
40 nvdimm
->num_flush
, nvdimm
->num_flush
== 1 ? "" : "es");
41 for (i
= 0; i
< nvdimm
->num_flush
; i
++) {
42 struct resource
*res
= &nvdimm
->flush_wpq
[i
];
43 unsigned long pfn
= PHYS_PFN(res
->start
);
44 void __iomem
*flush_page
;
46 /* check if flush hints share a page */
47 for (j
= 0; j
< i
; j
++) {
48 struct resource
*res_j
= &nvdimm
->flush_wpq
[j
];
49 unsigned long pfn_j
= PHYS_PFN(res_j
->start
);
56 flush_page
= (void __iomem
*) ((unsigned long)
57 ndrd
->flush_wpq
[dimm
][j
] & PAGE_MASK
);
59 flush_page
= devm_nvdimm_ioremap(dev
,
60 PHYS_PFN(pfn
), PAGE_SIZE
);
63 ndrd
->flush_wpq
[dimm
][i
] = flush_page
64 + (res
->start
& ~PAGE_MASK
);
70 int nd_region_activate(struct nd_region
*nd_region
)
73 struct nd_region_data
*ndrd
;
74 struct device
*dev
= &nd_region
->dev
;
75 size_t flush_data_size
= sizeof(void *);
77 nvdimm_bus_lock(&nd_region
->dev
);
78 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
79 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
80 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
82 /* at least one null hint slot per-dimm for the "no-hint" case */
83 flush_data_size
+= sizeof(void *);
84 num_flush
= min_not_zero(num_flush
, nvdimm
->num_flush
);
85 if (!nvdimm
->num_flush
)
87 flush_data_size
+= nvdimm
->num_flush
* sizeof(void *);
89 nvdimm_bus_unlock(&nd_region
->dev
);
91 ndrd
= devm_kzalloc(dev
, sizeof(*ndrd
) + flush_data_size
, GFP_KERNEL
);
94 dev_set_drvdata(dev
, ndrd
);
96 ndrd
->flush_mask
= (1 << ilog2(num_flush
)) - 1;
97 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
98 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
99 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
100 int rc
= nvdimm_map_flush(&nd_region
->dev
, nvdimm
, i
, ndrd
);
109 static void nd_region_release(struct device
*dev
)
111 struct nd_region
*nd_region
= to_nd_region(dev
);
114 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
115 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
116 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
118 put_device(&nvdimm
->dev
);
120 free_percpu(nd_region
->lane
);
121 ida_simple_remove(®ion_ida
, nd_region
->id
);
123 kfree(to_nd_blk_region(dev
));
128 static struct device_type nd_blk_device_type
= {
130 .release
= nd_region_release
,
133 static struct device_type nd_pmem_device_type
= {
135 .release
= nd_region_release
,
138 static struct device_type nd_volatile_device_type
= {
139 .name
= "nd_volatile",
140 .release
= nd_region_release
,
143 bool is_nd_pmem(struct device
*dev
)
145 return dev
? dev
->type
== &nd_pmem_device_type
: false;
148 bool is_nd_blk(struct device
*dev
)
150 return dev
? dev
->type
== &nd_blk_device_type
: false;
153 struct nd_region
*to_nd_region(struct device
*dev
)
155 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
157 WARN_ON(dev
->type
->release
!= nd_region_release
);
160 EXPORT_SYMBOL_GPL(to_nd_region
);
162 struct nd_blk_region
*to_nd_blk_region(struct device
*dev
)
164 struct nd_region
*nd_region
= to_nd_region(dev
);
166 WARN_ON(!is_nd_blk(dev
));
167 return container_of(nd_region
, struct nd_blk_region
, nd_region
);
169 EXPORT_SYMBOL_GPL(to_nd_blk_region
);
171 void *nd_region_provider_data(struct nd_region
*nd_region
)
173 return nd_region
->provider_data
;
175 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
177 void *nd_blk_region_provider_data(struct nd_blk_region
*ndbr
)
179 return ndbr
->blk_provider_data
;
181 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data
);
183 void nd_blk_region_set_provider_data(struct nd_blk_region
*ndbr
, void *data
)
185 ndbr
->blk_provider_data
= data
;
187 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data
);
190 * nd_region_to_nstype() - region to an integer namespace type
191 * @nd_region: region-device to interrogate
193 * This is the 'nstype' attribute of a region as well, an input to the
194 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
195 * namespace devices with namespace drivers.
197 int nd_region_to_nstype(struct nd_region
*nd_region
)
199 if (is_nd_pmem(&nd_region
->dev
)) {
202 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
203 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
204 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
206 if (nvdimm
->flags
& NDD_ALIASING
)
210 return ND_DEVICE_NAMESPACE_PMEM
;
212 return ND_DEVICE_NAMESPACE_IO
;
213 } else if (is_nd_blk(&nd_region
->dev
)) {
214 return ND_DEVICE_NAMESPACE_BLK
;
219 EXPORT_SYMBOL(nd_region_to_nstype
);
221 static ssize_t
size_show(struct device
*dev
,
222 struct device_attribute
*attr
, char *buf
)
224 struct nd_region
*nd_region
= to_nd_region(dev
);
225 unsigned long long size
= 0;
227 if (is_nd_pmem(dev
)) {
228 size
= nd_region
->ndr_size
;
229 } else if (nd_region
->ndr_mappings
== 1) {
230 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
232 size
= nd_mapping
->size
;
235 return sprintf(buf
, "%llu\n", size
);
237 static DEVICE_ATTR_RO(size
);
239 static ssize_t
mappings_show(struct device
*dev
,
240 struct device_attribute
*attr
, char *buf
)
242 struct nd_region
*nd_region
= to_nd_region(dev
);
244 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
246 static DEVICE_ATTR_RO(mappings
);
248 static ssize_t
nstype_show(struct device
*dev
,
249 struct device_attribute
*attr
, char *buf
)
251 struct nd_region
*nd_region
= to_nd_region(dev
);
253 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
255 static DEVICE_ATTR_RO(nstype
);
257 static ssize_t
set_cookie_show(struct device
*dev
,
258 struct device_attribute
*attr
, char *buf
)
260 struct nd_region
*nd_region
= to_nd_region(dev
);
261 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
263 if (is_nd_pmem(dev
) && nd_set
)
264 /* pass, should be precluded by region_visible */;
268 return sprintf(buf
, "%#llx\n", nd_set
->cookie
);
270 static DEVICE_ATTR_RO(set_cookie
);
272 resource_size_t
nd_region_available_dpa(struct nd_region
*nd_region
)
274 resource_size_t blk_max_overlap
= 0, available
, overlap
;
277 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
281 overlap
= blk_max_overlap
;
282 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
283 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
284 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
286 /* if a dimm is disabled the available capacity is zero */
290 if (is_nd_pmem(&nd_region
->dev
)) {
291 available
+= nd_pmem_available_dpa(nd_region
,
292 nd_mapping
, &overlap
);
293 if (overlap
> blk_max_overlap
) {
294 blk_max_overlap
= overlap
;
297 } else if (is_nd_blk(&nd_region
->dev
))
298 available
+= nd_blk_available_dpa(nd_region
);
304 static ssize_t
available_size_show(struct device
*dev
,
305 struct device_attribute
*attr
, char *buf
)
307 struct nd_region
*nd_region
= to_nd_region(dev
);
308 unsigned long long available
= 0;
311 * Flush in-flight updates and grab a snapshot of the available
312 * size. Of course, this value is potentially invalidated the
313 * memory nvdimm_bus_lock() is dropped, but that's userspace's
314 * problem to not race itself.
316 nvdimm_bus_lock(dev
);
317 wait_nvdimm_bus_probe_idle(dev
);
318 available
= nd_region_available_dpa(nd_region
);
319 nvdimm_bus_unlock(dev
);
321 return sprintf(buf
, "%llu\n", available
);
323 static DEVICE_ATTR_RO(available_size
);
325 static ssize_t
init_namespaces_show(struct device
*dev
,
326 struct device_attribute
*attr
, char *buf
)
328 struct nd_region_data
*ndrd
= dev_get_drvdata(dev
);
331 nvdimm_bus_lock(dev
);
333 rc
= sprintf(buf
, "%d/%d\n", ndrd
->ns_active
, ndrd
->ns_count
);
336 nvdimm_bus_unlock(dev
);
340 static DEVICE_ATTR_RO(init_namespaces
);
342 static ssize_t
namespace_seed_show(struct device
*dev
,
343 struct device_attribute
*attr
, char *buf
)
345 struct nd_region
*nd_region
= to_nd_region(dev
);
348 nvdimm_bus_lock(dev
);
349 if (nd_region
->ns_seed
)
350 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->ns_seed
));
352 rc
= sprintf(buf
, "\n");
353 nvdimm_bus_unlock(dev
);
356 static DEVICE_ATTR_RO(namespace_seed
);
358 static ssize_t
btt_seed_show(struct device
*dev
,
359 struct device_attribute
*attr
, char *buf
)
361 struct nd_region
*nd_region
= to_nd_region(dev
);
364 nvdimm_bus_lock(dev
);
365 if (nd_region
->btt_seed
)
366 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->btt_seed
));
368 rc
= sprintf(buf
, "\n");
369 nvdimm_bus_unlock(dev
);
373 static DEVICE_ATTR_RO(btt_seed
);
375 static ssize_t
pfn_seed_show(struct device
*dev
,
376 struct device_attribute
*attr
, char *buf
)
378 struct nd_region
*nd_region
= to_nd_region(dev
);
381 nvdimm_bus_lock(dev
);
382 if (nd_region
->pfn_seed
)
383 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->pfn_seed
));
385 rc
= sprintf(buf
, "\n");
386 nvdimm_bus_unlock(dev
);
390 static DEVICE_ATTR_RO(pfn_seed
);
392 static ssize_t
dax_seed_show(struct device
*dev
,
393 struct device_attribute
*attr
, char *buf
)
395 struct nd_region
*nd_region
= to_nd_region(dev
);
398 nvdimm_bus_lock(dev
);
399 if (nd_region
->dax_seed
)
400 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->dax_seed
));
402 rc
= sprintf(buf
, "\n");
403 nvdimm_bus_unlock(dev
);
407 static DEVICE_ATTR_RO(dax_seed
);
409 static ssize_t
read_only_show(struct device
*dev
,
410 struct device_attribute
*attr
, char *buf
)
412 struct nd_region
*nd_region
= to_nd_region(dev
);
414 return sprintf(buf
, "%d\n", nd_region
->ro
);
417 static ssize_t
read_only_store(struct device
*dev
,
418 struct device_attribute
*attr
, const char *buf
, size_t len
)
421 int rc
= strtobool(buf
, &ro
);
422 struct nd_region
*nd_region
= to_nd_region(dev
);
430 static DEVICE_ATTR_RW(read_only
);
432 static struct attribute
*nd_region_attributes
[] = {
434 &dev_attr_nstype
.attr
,
435 &dev_attr_mappings
.attr
,
436 &dev_attr_btt_seed
.attr
,
437 &dev_attr_pfn_seed
.attr
,
438 &dev_attr_dax_seed
.attr
,
439 &dev_attr_read_only
.attr
,
440 &dev_attr_set_cookie
.attr
,
441 &dev_attr_available_size
.attr
,
442 &dev_attr_namespace_seed
.attr
,
443 &dev_attr_init_namespaces
.attr
,
447 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
449 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
450 struct nd_region
*nd_region
= to_nd_region(dev
);
451 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
452 int type
= nd_region_to_nstype(nd_region
);
454 if (!is_nd_pmem(dev
) && a
== &dev_attr_pfn_seed
.attr
)
457 if (!is_nd_pmem(dev
) && a
== &dev_attr_dax_seed
.attr
)
460 if (a
!= &dev_attr_set_cookie
.attr
461 && a
!= &dev_attr_available_size
.attr
)
464 if ((type
== ND_DEVICE_NAMESPACE_PMEM
465 || type
== ND_DEVICE_NAMESPACE_BLK
)
466 && a
== &dev_attr_available_size
.attr
)
468 else if (is_nd_pmem(dev
) && nd_set
)
474 struct attribute_group nd_region_attribute_group
= {
475 .attrs
= nd_region_attributes
,
476 .is_visible
= region_visible
,
478 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
480 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
)
482 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
485 return nd_set
->cookie
;
489 void nd_mapping_free_labels(struct nd_mapping
*nd_mapping
)
491 struct nd_label_ent
*label_ent
, *e
;
493 WARN_ON(!mutex_is_locked(&nd_mapping
->lock
));
494 list_for_each_entry_safe(label_ent
, e
, &nd_mapping
->labels
, list
) {
495 list_del(&label_ent
->list
);
501 * Upon successful probe/remove, take/release a reference on the
502 * associated interleave set (if present), and plant new btt + namespace
503 * seeds. Also, on the removal of a BLK region, notify the provider to
504 * disable the region.
506 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
507 struct device
*dev
, bool probe
)
509 struct nd_region
*nd_region
;
511 if (!probe
&& (is_nd_pmem(dev
) || is_nd_blk(dev
))) {
514 nd_region
= to_nd_region(dev
);
515 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
516 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
517 struct nvdimm_drvdata
*ndd
= nd_mapping
->ndd
;
518 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
520 mutex_lock(&nd_mapping
->lock
);
521 nd_mapping_free_labels(nd_mapping
);
522 mutex_unlock(&nd_mapping
->lock
);
525 nd_mapping
->ndd
= NULL
;
527 atomic_dec(&nvdimm
->busy
);
533 if (dev
->parent
&& is_nd_blk(dev
->parent
) && probe
) {
534 nd_region
= to_nd_region(dev
->parent
);
535 nvdimm_bus_lock(dev
);
536 if (nd_region
->ns_seed
== dev
)
537 nd_region_create_blk_seed(nd_region
);
538 nvdimm_bus_unlock(dev
);
540 if (is_nd_btt(dev
) && probe
) {
541 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
543 nd_region
= to_nd_region(dev
->parent
);
544 nvdimm_bus_lock(dev
);
545 if (nd_region
->btt_seed
== dev
)
546 nd_region_create_btt_seed(nd_region
);
547 if (nd_region
->ns_seed
== &nd_btt
->ndns
->dev
&&
548 is_nd_blk(dev
->parent
))
549 nd_region_create_blk_seed(nd_region
);
550 nvdimm_bus_unlock(dev
);
552 if (is_nd_pfn(dev
) && probe
) {
553 nd_region
= to_nd_region(dev
->parent
);
554 nvdimm_bus_lock(dev
);
555 if (nd_region
->pfn_seed
== dev
)
556 nd_region_create_pfn_seed(nd_region
);
557 nvdimm_bus_unlock(dev
);
559 if (is_nd_dax(dev
) && probe
) {
560 nd_region
= to_nd_region(dev
->parent
);
561 nvdimm_bus_lock(dev
);
562 if (nd_region
->dax_seed
== dev
)
563 nd_region_create_dax_seed(nd_region
);
564 nvdimm_bus_unlock(dev
);
568 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
570 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
573 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
575 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
578 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
580 struct nd_region
*nd_region
= to_nd_region(dev
);
581 struct nd_mapping
*nd_mapping
;
582 struct nvdimm
*nvdimm
;
584 if (n
>= nd_region
->ndr_mappings
)
586 nd_mapping
= &nd_region
->mapping
[n
];
587 nvdimm
= nd_mapping
->nvdimm
;
589 return sprintf(buf
, "%s,%llu,%llu\n", dev_name(&nvdimm
->dev
),
590 nd_mapping
->start
, nd_mapping
->size
);
593 #define REGION_MAPPING(idx) \
594 static ssize_t mapping##idx##_show(struct device *dev, \
595 struct device_attribute *attr, char *buf) \
597 return mappingN(dev, buf, idx); \
599 static DEVICE_ATTR_RO(mapping##idx)
602 * 32 should be enough for a while, even in the presence of socket
603 * interleave a 32-way interleave set is a degenerate case.
638 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
640 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
641 struct nd_region
*nd_region
= to_nd_region(dev
);
643 if (n
< nd_region
->ndr_mappings
)
648 static struct attribute
*mapping_attributes
[] = {
649 &dev_attr_mapping0
.attr
,
650 &dev_attr_mapping1
.attr
,
651 &dev_attr_mapping2
.attr
,
652 &dev_attr_mapping3
.attr
,
653 &dev_attr_mapping4
.attr
,
654 &dev_attr_mapping5
.attr
,
655 &dev_attr_mapping6
.attr
,
656 &dev_attr_mapping7
.attr
,
657 &dev_attr_mapping8
.attr
,
658 &dev_attr_mapping9
.attr
,
659 &dev_attr_mapping10
.attr
,
660 &dev_attr_mapping11
.attr
,
661 &dev_attr_mapping12
.attr
,
662 &dev_attr_mapping13
.attr
,
663 &dev_attr_mapping14
.attr
,
664 &dev_attr_mapping15
.attr
,
665 &dev_attr_mapping16
.attr
,
666 &dev_attr_mapping17
.attr
,
667 &dev_attr_mapping18
.attr
,
668 &dev_attr_mapping19
.attr
,
669 &dev_attr_mapping20
.attr
,
670 &dev_attr_mapping21
.attr
,
671 &dev_attr_mapping22
.attr
,
672 &dev_attr_mapping23
.attr
,
673 &dev_attr_mapping24
.attr
,
674 &dev_attr_mapping25
.attr
,
675 &dev_attr_mapping26
.attr
,
676 &dev_attr_mapping27
.attr
,
677 &dev_attr_mapping28
.attr
,
678 &dev_attr_mapping29
.attr
,
679 &dev_attr_mapping30
.attr
,
680 &dev_attr_mapping31
.attr
,
684 struct attribute_group nd_mapping_attribute_group
= {
685 .is_visible
= mapping_visible
,
686 .attrs
= mapping_attributes
,
688 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
690 int nd_blk_region_init(struct nd_region
*nd_region
)
692 struct device
*dev
= &nd_region
->dev
;
693 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
698 if (nd_region
->ndr_mappings
< 1) {
699 dev_err(dev
, "invalid BLK region\n");
703 return to_nd_blk_region(dev
)->enable(nvdimm_bus
, dev
);
707 * nd_region_acquire_lane - allocate and lock a lane
708 * @nd_region: region id and number of lanes possible
710 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
711 * We optimize for the common case where there are 256 lanes, one
712 * per-cpu. For larger systems we need to lock to share lanes. For now
713 * this implementation assumes the cost of maintaining an allocator for
714 * free lanes is on the order of the lock hold time, so it implements a
715 * static lane = cpu % num_lanes mapping.
717 * In the case of a BTT instance on top of a BLK namespace a lane may be
718 * acquired recursively. We lock on the first instance.
720 * In the case of a BTT instance on top of PMEM, we only acquire a lane
721 * for the BTT metadata updates.
723 unsigned int nd_region_acquire_lane(struct nd_region
*nd_region
)
725 unsigned int cpu
, lane
;
728 if (nd_region
->num_lanes
< nr_cpu_ids
) {
729 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
731 lane
= cpu
% nd_region
->num_lanes
;
732 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
733 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
734 if (ndl_count
->count
++ == 0)
735 spin_lock(&ndl_lock
->lock
);
741 EXPORT_SYMBOL(nd_region_acquire_lane
);
743 void nd_region_release_lane(struct nd_region
*nd_region
, unsigned int lane
)
745 if (nd_region
->num_lanes
< nr_cpu_ids
) {
746 unsigned int cpu
= get_cpu();
747 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
749 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
750 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
751 if (--ndl_count
->count
== 0)
752 spin_unlock(&ndl_lock
->lock
);
757 EXPORT_SYMBOL(nd_region_release_lane
);
759 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
760 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
763 struct nd_region
*nd_region
;
769 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
770 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
771 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
773 if ((mapping
->start
| mapping
->size
) % SZ_4K
) {
774 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
775 caller
, dev_name(&nvdimm
->dev
), i
);
780 if (nvdimm
->flags
& NDD_UNARMED
)
784 if (dev_type
== &nd_blk_device_type
) {
785 struct nd_blk_region_desc
*ndbr_desc
;
786 struct nd_blk_region
*ndbr
;
788 ndbr_desc
= to_blk_region_desc(ndr_desc
);
789 ndbr
= kzalloc(sizeof(*ndbr
) + sizeof(struct nd_mapping
)
790 * ndr_desc
->num_mappings
,
793 nd_region
= &ndbr
->nd_region
;
794 ndbr
->enable
= ndbr_desc
->enable
;
795 ndbr
->do_io
= ndbr_desc
->do_io
;
799 nd_region
= kzalloc(sizeof(struct nd_region
)
800 + sizeof(struct nd_mapping
)
801 * ndr_desc
->num_mappings
,
803 region_buf
= nd_region
;
808 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
809 if (nd_region
->id
< 0)
812 nd_region
->lane
= alloc_percpu(struct nd_percpu_lane
);
813 if (!nd_region
->lane
)
816 for (i
= 0; i
< nr_cpu_ids
; i
++) {
817 struct nd_percpu_lane
*ndl
;
819 ndl
= per_cpu_ptr(nd_region
->lane
, i
);
820 spin_lock_init(&ndl
->lock
);
824 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
825 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
826 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
828 nd_region
->mapping
[i
].nvdimm
= nvdimm
;
829 nd_region
->mapping
[i
].start
= mapping
->start
;
830 nd_region
->mapping
[i
].size
= mapping
->size
;
831 INIT_LIST_HEAD(&nd_region
->mapping
[i
].labels
);
832 mutex_init(&nd_region
->mapping
[i
].lock
);
834 get_device(&nvdimm
->dev
);
836 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
837 nd_region
->provider_data
= ndr_desc
->provider_data
;
838 nd_region
->nd_set
= ndr_desc
->nd_set
;
839 nd_region
->num_lanes
= ndr_desc
->num_lanes
;
840 nd_region
->flags
= ndr_desc
->flags
;
842 nd_region
->numa_node
= ndr_desc
->numa_node
;
843 ida_init(&nd_region
->ns_ida
);
844 ida_init(&nd_region
->btt_ida
);
845 ida_init(&nd_region
->pfn_ida
);
846 ida_init(&nd_region
->dax_ida
);
847 dev
= &nd_region
->dev
;
848 dev_set_name(dev
, "region%d", nd_region
->id
);
849 dev
->parent
= &nvdimm_bus
->dev
;
850 dev
->type
= dev_type
;
851 dev
->groups
= ndr_desc
->attr_groups
;
852 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
853 nd_region
->ndr_start
= ndr_desc
->res
->start
;
854 nd_device_register(dev
);
859 ida_simple_remove(®ion_ida
, nd_region
->id
);
865 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
866 struct nd_region_desc
*ndr_desc
)
868 ndr_desc
->num_lanes
= ND_MAX_LANES
;
869 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
872 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
874 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
875 struct nd_region_desc
*ndr_desc
)
877 if (ndr_desc
->num_mappings
> 1)
879 ndr_desc
->num_lanes
= min(ndr_desc
->num_lanes
, ND_MAX_LANES
);
880 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
883 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
885 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
886 struct nd_region_desc
*ndr_desc
)
888 ndr_desc
->num_lanes
= ND_MAX_LANES
;
889 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
892 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);
895 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
896 * @nd_region: blk or interleaved pmem region
898 void nvdimm_flush(struct nd_region
*nd_region
)
900 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
904 * Try to encourage some diversity in flush hint addresses
905 * across cpus assuming a limited number of flush hints.
907 idx
= this_cpu_read(flush_idx
);
908 idx
= this_cpu_add_return(flush_idx
, hash_32(current
->pid
+ idx
, 8));
911 * The first wmb() is needed to 'sfence' all previous writes
912 * such that they are architecturally visible for the platform
913 * buffer flush. Note that we've already arranged for pmem
914 * writes to avoid the cache via arch_memcpy_to_pmem(). The
915 * final wmb() ensures ordering for the NVDIMM flush write.
918 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
919 if (ndrd
->flush_wpq
[i
][0])
920 writeq(1, ndrd
->flush_wpq
[i
][idx
& ndrd
->flush_mask
]);
923 EXPORT_SYMBOL_GPL(nvdimm_flush
);
926 * nvdimm_has_flush - determine write flushing requirements
927 * @nd_region: blk or interleaved pmem region
929 * Returns 1 if writes require flushing
930 * Returns 0 if writes do not require flushing
931 * Returns -ENXIO if flushing capability can not be determined
933 int nvdimm_has_flush(struct nd_region
*nd_region
)
935 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
938 /* no nvdimm == flushing capability unknown */
939 if (nd_region
->ndr_mappings
== 0)
942 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
943 /* flush hints present, flushing required */
944 if (ndrd
->flush_wpq
[i
][0])
948 * The platform defines dimm devices without hints, assume
949 * platform persistence mechanism like ADR
953 EXPORT_SYMBOL_GPL(nvdimm_has_flush
);
955 void __exit
nd_region_devs_exit(void)
957 ida_destroy(®ion_ida
);