memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = &res;
ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
+ ndr_desc.numa_node = NUMA_NO_NODE;
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
goto err;
}
ndr_desc->res = &res;
ndr_desc->provider_data = nfit_spa;
ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
+ if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
+ ndr_desc->numa_node = acpi_map_pxm_to_online_node(
+ spa->proximity_domain);
+ else
+ ndr_desc->numa_node = NUMA_NO_NODE;
+
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
struct nd_mapping *nd_mapping;
static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
+ /*
+ * Ensure that region devices always have their numa node set as
+ * early as possible.
+ */
+ if (is_nd_pmem(dev) || is_nd_blk(dev))
+ set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
}
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
- int id, num_lanes, ro;
+ int id, num_lanes, ro, numa_node;
void *provider_data;
struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane;
nd_region->nd_set = ndr_desc->nd_set;
nd_region->num_lanes = ndr_desc->num_lanes;
nd_region->ro = ro;
+ nd_region->numa_node = ndr_desc->numa_node;
ida_init(&nd_region->ns_ida);
ida_init(&nd_region->btt_ida);
dev = &nd_region->dev;
struct nd_interleave_set *nd_set;
void *provider_data;
int num_lanes;
+ int numa_node;
};
struct nvdimm_bus;