2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/vmalloc.h>
15 #include <linux/device.h>
16 #include <linux/ndctl.h>
17 #include <linux/slab.h>
26 static DEFINE_IDA(dimm_ida
);
29 * Retrieve bus and dimm handle and return if this bus supports
30 * get_config_data commands
32 int nvdimm_check_config_data(struct device
*dev
)
34 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
36 if (!nvdimm
->cmd_mask
||
37 !test_bit(ND_CMD_GET_CONFIG_DATA
, &nvdimm
->cmd_mask
)) {
38 if (test_bit(NDD_ALIASING
, &nvdimm
->flags
))
47 static int validate_dimm(struct nvdimm_drvdata
*ndd
)
54 rc
= nvdimm_check_config_data(ndd
->dev
);
56 dev_dbg(ndd
->dev
, "%pf: %s error: %d\n",
57 __builtin_return_address(0), __func__
, rc
);
62 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
63 * @nvdimm: dimm to initialize
65 int nvdimm_init_nsarea(struct nvdimm_drvdata
*ndd
)
67 struct nd_cmd_get_config_size
*cmd
= &ndd
->nsarea
;
68 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
69 struct nvdimm_bus_descriptor
*nd_desc
;
70 int rc
= validate_dimm(ndd
);
77 return 0; /* already valid */
79 memset(cmd
, 0, sizeof(*cmd
));
80 nd_desc
= nvdimm_bus
->nd_desc
;
81 rc
= nd_desc
->ndctl(nd_desc
, to_nvdimm(ndd
->dev
),
82 ND_CMD_GET_CONFIG_SIZE
, cmd
, sizeof(*cmd
), &cmd_rc
);
88 int nvdimm_get_config_data(struct nvdimm_drvdata
*ndd
, void *buf
,
89 size_t offset
, size_t len
)
91 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
92 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
93 int rc
= validate_dimm(ndd
), cmd_rc
= 0;
94 struct nd_cmd_get_config_data_hdr
*cmd
;
95 size_t max_cmd_size
, buf_offset
;
100 if (offset
+ len
> ndd
->nsarea
.config_size
)
103 max_cmd_size
= min_t(u32
, len
, ndd
->nsarea
.max_xfer
);
104 cmd
= kvzalloc(max_cmd_size
+ sizeof(*cmd
), GFP_KERNEL
);
108 for (buf_offset
= 0; len
;
109 len
-= cmd
->in_length
, buf_offset
+= cmd
->in_length
) {
112 cmd
->in_offset
= offset
+ buf_offset
;
113 cmd
->in_length
= min(max_cmd_size
, len
);
115 cmd_size
= sizeof(*cmd
) + cmd
->in_length
;
117 rc
= nd_desc
->ndctl(nd_desc
, to_nvdimm(ndd
->dev
),
118 ND_CMD_GET_CONFIG_DATA
, cmd
, cmd_size
, &cmd_rc
);
126 /* out_buf should be valid, copy it into our output buffer */
127 memcpy(buf
+ buf_offset
, cmd
->out_buf
, cmd
->in_length
);
134 int nvdimm_set_config_data(struct nvdimm_drvdata
*ndd
, size_t offset
,
135 void *buf
, size_t len
)
137 size_t max_cmd_size
, buf_offset
;
138 struct nd_cmd_set_config_hdr
*cmd
;
139 int rc
= validate_dimm(ndd
), cmd_rc
= 0;
140 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
141 struct nvdimm_bus_descriptor
*nd_desc
= nvdimm_bus
->nd_desc
;
146 if (offset
+ len
> ndd
->nsarea
.config_size
)
149 max_cmd_size
= min_t(u32
, len
, ndd
->nsarea
.max_xfer
);
150 cmd
= kvzalloc(max_cmd_size
+ sizeof(*cmd
) + sizeof(u32
), GFP_KERNEL
);
154 for (buf_offset
= 0; len
; len
-= cmd
->in_length
,
155 buf_offset
+= cmd
->in_length
) {
158 cmd
->in_offset
= offset
+ buf_offset
;
159 cmd
->in_length
= min(max_cmd_size
, len
);
160 memcpy(cmd
->in_buf
, buf
+ buf_offset
, cmd
->in_length
);
162 /* status is output in the last 4-bytes of the command buffer */
163 cmd_size
= sizeof(*cmd
) + cmd
->in_length
+ sizeof(u32
);
165 rc
= nd_desc
->ndctl(nd_desc
, to_nvdimm(ndd
->dev
),
166 ND_CMD_SET_CONFIG_DATA
, cmd
, cmd_size
, &cmd_rc
);
179 void nvdimm_set_aliasing(struct device
*dev
)
181 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
183 set_bit(NDD_ALIASING
, &nvdimm
->flags
);
186 void nvdimm_set_locked(struct device
*dev
)
188 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
190 set_bit(NDD_LOCKED
, &nvdimm
->flags
);
193 void nvdimm_clear_locked(struct device
*dev
)
195 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
197 clear_bit(NDD_LOCKED
, &nvdimm
->flags
);
200 static void nvdimm_release(struct device
*dev
)
202 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
204 ida_simple_remove(&dimm_ida
, nvdimm
->id
);
208 static struct device_type nvdimm_device_type
= {
210 .release
= nvdimm_release
,
213 bool is_nvdimm(struct device
*dev
)
215 return dev
->type
== &nvdimm_device_type
;
218 struct nvdimm
*to_nvdimm(struct device
*dev
)
220 struct nvdimm
*nvdimm
= container_of(dev
, struct nvdimm
, dev
);
222 WARN_ON(!is_nvdimm(dev
));
225 EXPORT_SYMBOL_GPL(to_nvdimm
);
227 struct nvdimm
*nd_blk_region_to_dimm(struct nd_blk_region
*ndbr
)
229 struct nd_region
*nd_region
= &ndbr
->nd_region
;
230 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
232 return nd_mapping
->nvdimm
;
234 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm
);
236 unsigned long nd_blk_memremap_flags(struct nd_blk_region
*ndbr
)
238 /* pmem mapping properties are private to libnvdimm */
239 return ARCH_MEMREMAP_PMEM
;
241 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags
);
243 struct nvdimm_drvdata
*to_ndd(struct nd_mapping
*nd_mapping
)
245 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
247 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm
->dev
));
249 return dev_get_drvdata(&nvdimm
->dev
);
251 EXPORT_SYMBOL(to_ndd
);
253 void nvdimm_drvdata_release(struct kref
*kref
)
255 struct nvdimm_drvdata
*ndd
= container_of(kref
, typeof(*ndd
), kref
);
256 struct device
*dev
= ndd
->dev
;
257 struct resource
*res
, *_r
;
259 dev_dbg(dev
, "trace\n");
260 nvdimm_bus_lock(dev
);
261 for_each_dpa_resource_safe(ndd
, res
, _r
)
262 nvdimm_free_dpa(ndd
, res
);
263 nvdimm_bus_unlock(dev
);
270 void get_ndd(struct nvdimm_drvdata
*ndd
)
272 kref_get(&ndd
->kref
);
275 void put_ndd(struct nvdimm_drvdata
*ndd
)
278 kref_put(&ndd
->kref
, nvdimm_drvdata_release
);
281 const char *nvdimm_name(struct nvdimm
*nvdimm
)
283 return dev_name(&nvdimm
->dev
);
285 EXPORT_SYMBOL_GPL(nvdimm_name
);
287 struct kobject
*nvdimm_kobj(struct nvdimm
*nvdimm
)
289 return &nvdimm
->dev
.kobj
;
291 EXPORT_SYMBOL_GPL(nvdimm_kobj
);
293 unsigned long nvdimm_cmd_mask(struct nvdimm
*nvdimm
)
295 return nvdimm
->cmd_mask
;
297 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask
);
299 void *nvdimm_provider_data(struct nvdimm
*nvdimm
)
302 return nvdimm
->provider_data
;
305 EXPORT_SYMBOL_GPL(nvdimm_provider_data
);
307 static ssize_t
commands_show(struct device
*dev
,
308 struct device_attribute
*attr
, char *buf
)
310 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
313 if (!nvdimm
->cmd_mask
)
314 return sprintf(buf
, "\n");
316 for_each_set_bit(cmd
, &nvdimm
->cmd_mask
, BITS_PER_LONG
)
317 len
+= sprintf(buf
+ len
, "%s ", nvdimm_cmd_name(cmd
));
318 len
+= sprintf(buf
+ len
, "\n");
321 static DEVICE_ATTR_RO(commands
);
323 static ssize_t
flags_show(struct device
*dev
,
324 struct device_attribute
*attr
, char *buf
)
326 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
328 return sprintf(buf
, "%s%s\n",
329 test_bit(NDD_ALIASING
, &nvdimm
->flags
) ? "alias " : "",
330 test_bit(NDD_LOCKED
, &nvdimm
->flags
) ? "lock " : "");
332 static DEVICE_ATTR_RO(flags
);
334 static ssize_t
state_show(struct device
*dev
, struct device_attribute
*attr
,
337 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
340 * The state may be in the process of changing, userspace should
341 * quiesce probing if it wants a static answer
343 nvdimm_bus_lock(dev
);
344 nvdimm_bus_unlock(dev
);
345 return sprintf(buf
, "%s\n", atomic_read(&nvdimm
->busy
)
346 ? "active" : "idle");
348 static DEVICE_ATTR_RO(state
);
350 static ssize_t
available_slots_show(struct device
*dev
,
351 struct device_attribute
*attr
, char *buf
)
353 struct nvdimm_drvdata
*ndd
= dev_get_drvdata(dev
);
360 nvdimm_bus_lock(dev
);
361 nfree
= nd_label_nfree(ndd
);
362 if (nfree
- 1 > nfree
) {
363 dev_WARN_ONCE(dev
, 1, "we ate our last label?\n");
367 rc
= sprintf(buf
, "%d\n", nfree
);
368 nvdimm_bus_unlock(dev
);
371 static DEVICE_ATTR_RO(available_slots
);
373 static ssize_t
security_show(struct device
*dev
,
374 struct device_attribute
*attr
, char *buf
)
376 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
378 switch (nvdimm
->sec
.state
) {
379 case NVDIMM_SECURITY_DISABLED
:
380 return sprintf(buf
, "disabled\n");
381 case NVDIMM_SECURITY_UNLOCKED
:
382 return sprintf(buf
, "unlocked\n");
383 case NVDIMM_SECURITY_LOCKED
:
384 return sprintf(buf
, "locked\n");
385 case NVDIMM_SECURITY_FROZEN
:
386 return sprintf(buf
, "frozen\n");
387 case NVDIMM_SECURITY_OVERWRITE
:
388 return sprintf(buf
, "overwrite\n");
395 C( OP_FREEZE, "freeze", 1), \
396 C( OP_DISABLE, "disable", 2)
399 enum nvdimmsec_op_ids
{ OPS
};
401 #define C(a, b, c) { b, c }
408 #define SEC_CMD_SIZE 32
409 #define KEY_ID_SIZE 10
411 static ssize_t
__security_store(struct device
*dev
, const char *buf
, size_t len
)
413 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
415 char cmd
[SEC_CMD_SIZE
+1], keystr
[KEY_ID_SIZE
+1],
416 nkeystr
[KEY_ID_SIZE
+1];
417 unsigned int key
, newkey
;
420 if (atomic_read(&nvdimm
->busy
))
423 rc
= sscanf(buf
, "%"__stringify(SEC_CMD_SIZE
)"s"
424 " %"__stringify(KEY_ID_SIZE
)"s"
425 " %"__stringify(KEY_ID_SIZE
)"s",
426 cmd
, keystr
, nkeystr
);
429 for (i
= 0; i
< ARRAY_SIZE(ops
); i
++)
430 if (sysfs_streq(cmd
, ops
[i
].name
))
432 if (i
>= ARRAY_SIZE(ops
))
435 rc
= kstrtouint(keystr
, 0, &key
);
436 if (rc
>= 0 && ops
[i
].args
> 2)
437 rc
= kstrtouint(nkeystr
, 0, &newkey
);
441 if (i
== OP_FREEZE
) {
442 dev_dbg(dev
, "freeze\n");
443 rc
= nvdimm_security_freeze(nvdimm
);
444 } else if (i
== OP_DISABLE
) {
445 dev_dbg(dev
, "disable %u\n", key
);
446 rc
= nvdimm_security_disable(nvdimm
, key
);
455 static ssize_t
security_store(struct device
*dev
,
456 struct device_attribute
*attr
, const char *buf
, size_t len
)
462 * Require all userspace triggered security management to be
463 * done while probing is idle and the DIMM is not in active use
467 nvdimm_bus_lock(dev
);
468 wait_nvdimm_bus_probe_idle(dev
);
469 rc
= __security_store(dev
, buf
, len
);
470 nvdimm_bus_unlock(dev
);
475 static DEVICE_ATTR_RW(security
);
477 static struct attribute
*nvdimm_attributes
[] = {
478 &dev_attr_state
.attr
,
479 &dev_attr_flags
.attr
,
480 &dev_attr_commands
.attr
,
481 &dev_attr_available_slots
.attr
,
482 &dev_attr_security
.attr
,
486 static umode_t
nvdimm_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
488 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
489 struct nvdimm
*nvdimm
= to_nvdimm(dev
);
491 if (a
!= &dev_attr_security
.attr
)
493 if (nvdimm
->sec
.state
< 0)
495 /* Are there any state mutation ops? */
496 if (nvdimm
->sec
.ops
->freeze
|| nvdimm
->sec
.ops
->disable
)
501 struct attribute_group nvdimm_attribute_group
= {
502 .attrs
= nvdimm_attributes
,
503 .is_visible
= nvdimm_visible
,
505 EXPORT_SYMBOL_GPL(nvdimm_attribute_group
);
507 struct nvdimm
*__nvdimm_create(struct nvdimm_bus
*nvdimm_bus
,
508 void *provider_data
, const struct attribute_group
**groups
,
509 unsigned long flags
, unsigned long cmd_mask
, int num_flush
,
510 struct resource
*flush_wpq
, const char *dimm_id
,
511 const struct nvdimm_security_ops
*sec_ops
)
513 struct nvdimm
*nvdimm
= kzalloc(sizeof(*nvdimm
), GFP_KERNEL
);
519 nvdimm
->id
= ida_simple_get(&dimm_ida
, 0, 0, GFP_KERNEL
);
520 if (nvdimm
->id
< 0) {
525 nvdimm
->dimm_id
= dimm_id
;
526 nvdimm
->provider_data
= provider_data
;
527 nvdimm
->flags
= flags
;
528 nvdimm
->cmd_mask
= cmd_mask
;
529 nvdimm
->num_flush
= num_flush
;
530 nvdimm
->flush_wpq
= flush_wpq
;
531 atomic_set(&nvdimm
->busy
, 0);
533 dev_set_name(dev
, "nmem%d", nvdimm
->id
);
534 dev
->parent
= &nvdimm_bus
->dev
;
535 dev
->type
= &nvdimm_device_type
;
536 dev
->devt
= MKDEV(nvdimm_major
, nvdimm
->id
);
537 dev
->groups
= groups
;
538 nvdimm
->sec
.ops
= sec_ops
;
540 * Security state must be initialized before device_add() for
541 * attribute visibility.
543 nvdimm
->sec
.state
= nvdimm_security_state(nvdimm
);
544 nd_device_register(dev
);
548 EXPORT_SYMBOL_GPL(__nvdimm_create
);
550 int nvdimm_security_freeze(struct nvdimm
*nvdimm
)
554 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm
->dev
));
556 if (!nvdimm
->sec
.ops
|| !nvdimm
->sec
.ops
->freeze
)
559 if (nvdimm
->sec
.state
< 0)
562 rc
= nvdimm
->sec
.ops
->freeze(nvdimm
);
563 nvdimm
->sec
.state
= nvdimm_security_state(nvdimm
);
568 int alias_dpa_busy(struct device
*dev
, void *data
)
570 resource_size_t map_end
, blk_start
, new;
571 struct blk_alloc_info
*info
= data
;
572 struct nd_mapping
*nd_mapping
;
573 struct nd_region
*nd_region
;
574 struct nvdimm_drvdata
*ndd
;
575 struct resource
*res
;
581 nd_region
= to_nd_region(dev
);
582 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
583 nd_mapping
= &nd_region
->mapping
[i
];
584 if (nd_mapping
->nvdimm
== info
->nd_mapping
->nvdimm
)
588 if (i
>= nd_region
->ndr_mappings
)
591 ndd
= to_ndd(nd_mapping
);
592 map_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
593 blk_start
= nd_mapping
->start
;
596 * In the allocation case ->res is set to free space that we are
597 * looking to validate against PMEM aliasing collision rules
598 * (i.e. BLK is allocated after all aliased PMEM).
601 if (info
->res
->start
>= nd_mapping
->start
602 && info
->res
->start
< map_end
)
610 * Find the free dpa from the end of the last pmem allocation to
611 * the end of the interleave-set mapping.
613 for_each_dpa_resource(ndd
, res
) {
614 if (strncmp(res
->name
, "pmem", 4) != 0)
616 if ((res
->start
>= blk_start
&& res
->start
< map_end
)
617 || (res
->end
>= blk_start
618 && res
->end
<= map_end
)) {
619 new = max(blk_start
, min(map_end
+ 1, res
->end
+ 1));
620 if (new != blk_start
) {
627 /* update the free space range with the probed blk_start */
628 if (info
->res
&& blk_start
> info
->res
->start
) {
629 info
->res
->start
= max(info
->res
->start
, blk_start
);
630 if (info
->res
->start
> info
->res
->end
)
631 info
->res
->end
= info
->res
->start
- 1;
635 info
->available
-= blk_start
- nd_mapping
->start
;
641 * nd_blk_available_dpa - account the unused dpa of BLK region
642 * @nd_mapping: container of dpa-resource-root + labels
644 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
645 * we arrange for them to never start at an lower dpa than the last
646 * PMEM allocation in an aliased region.
648 resource_size_t
nd_blk_available_dpa(struct nd_region
*nd_region
)
650 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
651 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
652 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
653 struct blk_alloc_info info
= {
654 .nd_mapping
= nd_mapping
,
655 .available
= nd_mapping
->size
,
658 struct resource
*res
;
663 device_for_each_child(&nvdimm_bus
->dev
, &info
, alias_dpa_busy
);
665 /* now account for busy blk allocations in unaliased dpa */
666 for_each_dpa_resource(ndd
, res
) {
667 if (strncmp(res
->name
, "blk", 3) != 0)
669 info
.available
-= resource_size(res
);
672 return info
.available
;
676 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
677 * contiguous unallocated dpa range.
678 * @nd_region: constrain available space check to this reference region
679 * @nd_mapping: container of dpa-resource-root + labels
681 resource_size_t
nd_pmem_max_contiguous_dpa(struct nd_region
*nd_region
,
682 struct nd_mapping
*nd_mapping
)
684 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
685 struct nvdimm_bus
*nvdimm_bus
;
686 resource_size_t max
= 0;
687 struct resource
*res
;
689 /* if a dimm is disabled the available capacity is zero */
693 nvdimm_bus
= walk_to_nvdimm_bus(ndd
->dev
);
694 if (__reserve_free_pmem(&nd_region
->dev
, nd_mapping
->nvdimm
))
696 for_each_dpa_resource(ndd
, res
) {
697 if (strcmp(res
->name
, "pmem-reserve") != 0)
699 if (resource_size(res
) > max
)
700 max
= resource_size(res
);
702 release_free_pmem(nvdimm_bus
, nd_mapping
);
707 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
708 * @nd_mapping: container of dpa-resource-root + labels
709 * @nd_region: constrain available space check to this reference region
710 * @overlap: calculate available space assuming this level of overlap
712 * Validate that a PMEM label, if present, aligns with the start of an
713 * interleave set and truncate the available size at the lowest BLK
716 * The expectation is that this routine is called multiple times as it
717 * probes for the largest BLK encroachment for any single member DIMM of
718 * the interleave set. Once that value is determined the PMEM-limit for
719 * the set can be established.
721 resource_size_t
nd_pmem_available_dpa(struct nd_region
*nd_region
,
722 struct nd_mapping
*nd_mapping
, resource_size_t
*overlap
)
724 resource_size_t map_start
, map_end
, busy
= 0, available
, blk_start
;
725 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
726 struct resource
*res
;
732 map_start
= nd_mapping
->start
;
733 map_end
= map_start
+ nd_mapping
->size
- 1;
734 blk_start
= max(map_start
, map_end
+ 1 - *overlap
);
735 for_each_dpa_resource(ndd
, res
) {
736 if (res
->start
>= map_start
&& res
->start
< map_end
) {
737 if (strncmp(res
->name
, "blk", 3) == 0)
738 blk_start
= min(blk_start
,
739 max(map_start
, res
->start
));
740 else if (res
->end
> map_end
) {
741 reason
= "misaligned to iset";
744 busy
+= resource_size(res
);
745 } else if (res
->end
>= map_start
&& res
->end
<= map_end
) {
746 if (strncmp(res
->name
, "blk", 3) == 0) {
748 * If a BLK allocation overlaps the start of
749 * PMEM the entire interleave set may now only
752 blk_start
= map_start
;
754 busy
+= resource_size(res
);
755 } else if (map_start
> res
->start
&& map_start
< res
->end
) {
756 /* total eclipse of the mapping */
757 busy
+= nd_mapping
->size
;
758 blk_start
= map_start
;
762 *overlap
= map_end
+ 1 - blk_start
;
763 available
= blk_start
- map_start
;
764 if (busy
< available
)
765 return available
- busy
;
769 nd_dbg_dpa(nd_region
, ndd
, res
, "%s\n", reason
);
773 void nvdimm_free_dpa(struct nvdimm_drvdata
*ndd
, struct resource
*res
)
775 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd
->dev
));
777 __release_region(&ndd
->dpa
, res
->start
, resource_size(res
));
780 struct resource
*nvdimm_allocate_dpa(struct nvdimm_drvdata
*ndd
,
781 struct nd_label_id
*label_id
, resource_size_t start
,
784 char *name
= kmemdup(label_id
, sizeof(*label_id
), GFP_KERNEL
);
785 struct resource
*res
;
790 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd
->dev
));
791 res
= __request_region(&ndd
->dpa
, start
, n
, name
, 0);
798 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
799 * @nvdimm: container of dpa-resource-root + labels
800 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
802 resource_size_t
nvdimm_allocated_dpa(struct nvdimm_drvdata
*ndd
,
803 struct nd_label_id
*label_id
)
805 resource_size_t allocated
= 0;
806 struct resource
*res
;
808 for_each_dpa_resource(ndd
, res
)
809 if (strcmp(res
->name
, label_id
->id
) == 0)
810 allocated
+= resource_size(res
);
815 static int count_dimms(struct device
*dev
, void *c
)
824 int nvdimm_bus_check_dimm_count(struct nvdimm_bus
*nvdimm_bus
, int dimm_count
)
827 /* Flush any possible dimm registration failures */
830 device_for_each_child(&nvdimm_bus
->dev
, &count
, count_dimms
);
831 dev_dbg(&nvdimm_bus
->dev
, "count: %d\n", count
);
832 if (count
!= dimm_count
)
836 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count
);
838 void __exit
nvdimm_devs_exit(void)
840 ida_destroy(&dimm_ida
);