]>
Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1f7df6f8 DW |
2 | /* |
3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
1f7df6f8 | 4 | */ |
eaf96153 | 5 | #include <linux/scatterlist.h> |
33dd7075 | 6 | #include <linux/memregion.h> |
047fc8a1 | 7 | #include <linux/highmem.h> |
eaf96153 | 8 | #include <linux/sched.h> |
1f7df6f8 | 9 | #include <linux/slab.h> |
0c27af60 | 10 | #include <linux/hash.h> |
eaf96153 | 11 | #include <linux/sort.h> |
1f7df6f8 | 12 | #include <linux/io.h> |
bf9bccc1 | 13 | #include <linux/nd.h> |
1f7df6f8 DW |
14 | #include "nd-core.h" |
15 | #include "nd.h" | |
16 | ||
f284a4f2 DW |
17 | /* |
18 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | |
19 | * irrelevant. | |
20 | */ | |
21 | #include <linux/io-64-nonatomic-hi-lo.h> | |
22 | ||
0c27af60 | 23 | static DEFINE_PER_CPU(int, flush_idx); |
1f7df6f8 | 24 | |
e5ae3b25 DW |
25 | static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, |
26 | struct nd_region_data *ndrd) | |
27 | { | |
28 | int i, j; | |
29 | ||
30 | dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), | |
31 | nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); | |
595c7307 | 32 | for (i = 0; i < (1 << ndrd->hints_shift); i++) { |
e5ae3b25 DW |
33 | struct resource *res = &nvdimm->flush_wpq[i]; |
34 | unsigned long pfn = PHYS_PFN(res->start); | |
35 | void __iomem *flush_page; | |
36 | ||
37 | /* check if flush hints share a page */ | |
38 | for (j = 0; j < i; j++) { | |
39 | struct resource *res_j = &nvdimm->flush_wpq[j]; | |
40 | unsigned long pfn_j = PHYS_PFN(res_j->start); | |
41 | ||
42 | if (pfn == pfn_j) | |
43 | break; | |
44 | } | |
45 | ||
46 | if (j < i) | |
47 | flush_page = (void __iomem *) ((unsigned long) | |
595c7307 DW |
48 | ndrd_get_flush_wpq(ndrd, dimm, j) |
49 | & PAGE_MASK); | |
e5ae3b25 DW |
50 | else |
51 | flush_page = devm_nvdimm_ioremap(dev, | |
480b6837 | 52 | PFN_PHYS(pfn), PAGE_SIZE); |
e5ae3b25 DW |
53 | if (!flush_page) |
54 | return -ENXIO; | |
595c7307 DW |
55 | ndrd_set_flush_wpq(ndrd, dimm, i, flush_page |
56 | + (res->start & ~PAGE_MASK)); | |
e5ae3b25 DW |
57 | } |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | int nd_region_activate(struct nd_region *nd_region) | |
63 | { | |
db58028e | 64 | int i, j, num_flush = 0; |
e5ae3b25 DW |
65 | struct nd_region_data *ndrd; |
66 | struct device *dev = &nd_region->dev; | |
67 | size_t flush_data_size = sizeof(void *); | |
68 | ||
69 | nvdimm_bus_lock(&nd_region->dev); | |
70 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
71 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
72 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
73 | ||
7d988097 DJ |
74 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
75 | nvdimm_bus_unlock(&nd_region->dev); | |
76 | return -EBUSY; | |
77 | } | |
78 | ||
e5ae3b25 DW |
79 | /* at least one null hint slot per-dimm for the "no-hint" case */ |
80 | flush_data_size += sizeof(void *); | |
0c27af60 | 81 | num_flush = min_not_zero(num_flush, nvdimm->num_flush); |
e5ae3b25 DW |
82 | if (!nvdimm->num_flush) |
83 | continue; | |
84 | flush_data_size += nvdimm->num_flush * sizeof(void *); | |
85 | } | |
86 | nvdimm_bus_unlock(&nd_region->dev); | |
87 | ||
88 | ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); | |
89 | if (!ndrd) | |
90 | return -ENOMEM; | |
91 | dev_set_drvdata(dev, ndrd); | |
92 | ||
595c7307 DW |
93 | if (!num_flush) |
94 | return 0; | |
95 | ||
96 | ndrd->hints_shift = ilog2(num_flush); | |
e5ae3b25 DW |
97 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
98 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
99 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
100 | int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); | |
101 | ||
102 | if (rc) | |
103 | return rc; | |
104 | } | |
105 | ||
db58028e DJ |
106 | /* |
107 | * Clear out entries that are duplicates. This should prevent the | |
108 | * extra flushings. | |
109 | */ | |
110 | for (i = 0; i < nd_region->ndr_mappings - 1; i++) { | |
111 | /* ignore if NULL already */ | |
112 | if (!ndrd_get_flush_wpq(ndrd, i, 0)) | |
113 | continue; | |
114 | ||
115 | for (j = i + 1; j < nd_region->ndr_mappings; j++) | |
116 | if (ndrd_get_flush_wpq(ndrd, i, 0) == | |
117 | ndrd_get_flush_wpq(ndrd, j, 0)) | |
118 | ndrd_set_flush_wpq(ndrd, j, 0, NULL); | |
119 | } | |
120 | ||
e5ae3b25 DW |
121 | return 0; |
122 | } | |
123 | ||
1f7df6f8 DW |
124 | static void nd_region_release(struct device *dev) |
125 | { | |
126 | struct nd_region *nd_region = to_nd_region(dev); | |
127 | u16 i; | |
128 | ||
129 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
130 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
131 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
132 | ||
133 | put_device(&nvdimm->dev); | |
134 | } | |
5212e11f | 135 | free_percpu(nd_region->lane); |
33dd7075 | 136 | memregion_free(nd_region->id); |
047fc8a1 RZ |
137 | if (is_nd_blk(dev)) |
138 | kfree(to_nd_blk_region(dev)); | |
139 | else | |
140 | kfree(nd_region); | |
1f7df6f8 DW |
141 | } |
142 | ||
1f7df6f8 DW |
143 | struct nd_region *to_nd_region(struct device *dev) |
144 | { | |
145 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); | |
146 | ||
147 | WARN_ON(dev->type->release != nd_region_release); | |
148 | return nd_region; | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(to_nd_region); | |
151 | ||
243f29fe DW |
152 | struct device *nd_region_dev(struct nd_region *nd_region) |
153 | { | |
154 | if (!nd_region) | |
155 | return NULL; | |
156 | return &nd_region->dev; | |
157 | } | |
158 | EXPORT_SYMBOL_GPL(nd_region_dev); | |
159 | ||
047fc8a1 RZ |
160 | struct nd_blk_region *to_nd_blk_region(struct device *dev) |
161 | { | |
162 | struct nd_region *nd_region = to_nd_region(dev); | |
163 | ||
164 | WARN_ON(!is_nd_blk(dev)); | |
165 | return container_of(nd_region, struct nd_blk_region, nd_region); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(to_nd_blk_region); | |
168 | ||
169 | void *nd_region_provider_data(struct nd_region *nd_region) | |
170 | { | |
171 | return nd_region->provider_data; | |
172 | } | |
173 | EXPORT_SYMBOL_GPL(nd_region_provider_data); | |
174 | ||
175 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) | |
176 | { | |
177 | return ndbr->blk_provider_data; | |
178 | } | |
179 | EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); | |
180 | ||
181 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) | |
182 | { | |
183 | ndbr->blk_provider_data = data; | |
184 | } | |
185 | EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); | |
186 | ||
3d88002e DW |
187 | /** |
188 | * nd_region_to_nstype() - region to an integer namespace type | |
189 | * @nd_region: region-device to interrogate | |
190 | * | |
191 | * This is the 'nstype' attribute of a region as well, an input to the | |
192 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match | |
193 | * namespace devices with namespace drivers. | |
194 | */ | |
195 | int nd_region_to_nstype(struct nd_region *nd_region) | |
196 | { | |
c9e582aa | 197 | if (is_memory(&nd_region->dev)) { |
a0e37452 | 198 | u16 i, label; |
3d88002e | 199 | |
a0e37452 | 200 | for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { |
3d88002e DW |
201 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
202 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
203 | ||
a0e37452 DW |
204 | if (test_bit(NDD_LABELING, &nvdimm->flags)) |
205 | label++; | |
3d88002e | 206 | } |
a0e37452 | 207 | if (label) |
3d88002e DW |
208 | return ND_DEVICE_NAMESPACE_PMEM; |
209 | else | |
210 | return ND_DEVICE_NAMESPACE_IO; | |
211 | } else if (is_nd_blk(&nd_region->dev)) { | |
212 | return ND_DEVICE_NAMESPACE_BLK; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | } | |
bf9bccc1 DW |
217 | EXPORT_SYMBOL(nd_region_to_nstype); |
218 | ||
2522afb8 | 219 | static unsigned long long region_size(struct nd_region *nd_region) |
1f7df6f8 | 220 | { |
2522afb8 DW |
221 | if (is_memory(&nd_region->dev)) { |
222 | return nd_region->ndr_size; | |
1f7df6f8 DW |
223 | } else if (nd_region->ndr_mappings == 1) { |
224 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
225 | ||
2522afb8 | 226 | return nd_mapping->size; |
1f7df6f8 DW |
227 | } |
228 | ||
2522afb8 DW |
229 | return 0; |
230 | } | |
231 | ||
232 | static ssize_t size_show(struct device *dev, | |
233 | struct device_attribute *attr, char *buf) | |
234 | { | |
235 | struct nd_region *nd_region = to_nd_region(dev); | |
236 | ||
237 | return sprintf(buf, "%llu\n", region_size(nd_region)); | |
1f7df6f8 DW |
238 | } |
239 | static DEVICE_ATTR_RO(size); | |
240 | ||
ab630891 DW |
241 | static ssize_t deep_flush_show(struct device *dev, |
242 | struct device_attribute *attr, char *buf) | |
243 | { | |
244 | struct nd_region *nd_region = to_nd_region(dev); | |
245 | ||
246 | /* | |
247 | * NOTE: in the nvdimm_has_flush() error case this attribute is | |
248 | * not visible. | |
249 | */ | |
250 | return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); | |
251 | } | |
252 | ||
253 | static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr, | |
254 | const char *buf, size_t len) | |
255 | { | |
256 | bool flush; | |
257 | int rc = strtobool(buf, &flush); | |
258 | struct nd_region *nd_region = to_nd_region(dev); | |
259 | ||
260 | if (rc) | |
261 | return rc; | |
262 | if (!flush) | |
263 | return -EINVAL; | |
c5d4355d PG |
264 | rc = nvdimm_flush(nd_region, NULL); |
265 | if (rc) | |
266 | return rc; | |
ab630891 DW |
267 | |
268 | return len; | |
269 | } | |
270 | static DEVICE_ATTR_RW(deep_flush); | |
271 | ||
1f7df6f8 DW |
272 | static ssize_t mappings_show(struct device *dev, |
273 | struct device_attribute *attr, char *buf) | |
274 | { | |
275 | struct nd_region *nd_region = to_nd_region(dev); | |
276 | ||
277 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); | |
278 | } | |
279 | static DEVICE_ATTR_RO(mappings); | |
280 | ||
3d88002e DW |
281 | static ssize_t nstype_show(struct device *dev, |
282 | struct device_attribute *attr, char *buf) | |
283 | { | |
284 | struct nd_region *nd_region = to_nd_region(dev); | |
285 | ||
286 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); | |
287 | } | |
288 | static DEVICE_ATTR_RO(nstype); | |
289 | ||
eaf96153 DW |
290 | static ssize_t set_cookie_show(struct device *dev, |
291 | struct device_attribute *attr, char *buf) | |
292 | { | |
293 | struct nd_region *nd_region = to_nd_region(dev); | |
294 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
c12c48ce | 295 | ssize_t rc = 0; |
eaf96153 | 296 | |
c9e582aa | 297 | if (is_memory(dev) && nd_set) |
eaf96153 DW |
298 | /* pass, should be precluded by region_visible */; |
299 | else | |
300 | return -ENXIO; | |
301 | ||
c12c48ce DW |
302 | /* |
303 | * The cookie to show depends on which specification of the | |
304 | * labels we are using. If there are not labels then default to | |
305 | * the v1.1 namespace label cookie definition. To read all this | |
306 | * data we need to wait for probing to settle. | |
307 | */ | |
87a30e1f | 308 | nd_device_lock(dev); |
c12c48ce DW |
309 | nvdimm_bus_lock(dev); |
310 | wait_nvdimm_bus_probe_idle(dev); | |
311 | if (nd_region->ndr_mappings) { | |
312 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
313 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
314 | ||
315 | if (ndd) { | |
316 | struct nd_namespace_index *nsindex; | |
317 | ||
318 | nsindex = to_namespace_index(ndd, ndd->ns_current); | |
319 | rc = sprintf(buf, "%#llx\n", | |
320 | nd_region_interleave_set_cookie(nd_region, | |
321 | nsindex)); | |
322 | } | |
323 | } | |
324 | nvdimm_bus_unlock(dev); | |
87a30e1f | 325 | nd_device_unlock(dev); |
c12c48ce DW |
326 | |
327 | if (rc) | |
328 | return rc; | |
329 | return sprintf(buf, "%#llx\n", nd_set->cookie1); | |
eaf96153 DW |
330 | } |
331 | static DEVICE_ATTR_RO(set_cookie); | |
332 | ||
bf9bccc1 DW |
333 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
334 | { | |
335 | resource_size_t blk_max_overlap = 0, available, overlap; | |
336 | int i; | |
337 | ||
338 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | |
339 | ||
340 | retry: | |
341 | available = 0; | |
342 | overlap = blk_max_overlap; | |
343 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
344 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
345 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
346 | ||
347 | /* if a dimm is disabled the available capacity is zero */ | |
348 | if (!ndd) | |
349 | return 0; | |
350 | ||
c9e582aa | 351 | if (is_memory(&nd_region->dev)) { |
bf9bccc1 DW |
352 | available += nd_pmem_available_dpa(nd_region, |
353 | nd_mapping, &overlap); | |
354 | if (overlap > blk_max_overlap) { | |
355 | blk_max_overlap = overlap; | |
356 | goto retry; | |
357 | } | |
a1f3e4d6 DW |
358 | } else if (is_nd_blk(&nd_region->dev)) |
359 | available += nd_blk_available_dpa(nd_region); | |
bf9bccc1 DW |
360 | } |
361 | ||
362 | return available; | |
363 | } | |
364 | ||
12e3129e KB |
365 | resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) |
366 | { | |
367 | resource_size_t available = 0; | |
368 | int i; | |
369 | ||
370 | if (is_memory(&nd_region->dev)) | |
371 | available = PHYS_ADDR_MAX; | |
372 | ||
373 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | |
374 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
375 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
376 | ||
377 | if (is_memory(&nd_region->dev)) | |
378 | available = min(available, | |
379 | nd_pmem_max_contiguous_dpa(nd_region, | |
380 | nd_mapping)); | |
381 | else if (is_nd_blk(&nd_region->dev)) | |
382 | available += nd_blk_available_dpa(nd_region); | |
383 | } | |
384 | if (is_memory(&nd_region->dev)) | |
385 | return available * nd_region->ndr_mappings; | |
386 | return available; | |
387 | } | |
388 | ||
bf9bccc1 DW |
389 | static ssize_t available_size_show(struct device *dev, |
390 | struct device_attribute *attr, char *buf) | |
391 | { | |
392 | struct nd_region *nd_region = to_nd_region(dev); | |
393 | unsigned long long available = 0; | |
394 | ||
395 | /* | |
396 | * Flush in-flight updates and grab a snapshot of the available | |
397 | * size. Of course, this value is potentially invalidated the | |
398 | * memory nvdimm_bus_lock() is dropped, but that's userspace's | |
399 | * problem to not race itself. | |
400 | */ | |
87a30e1f | 401 | nd_device_lock(dev); |
bf9bccc1 DW |
402 | nvdimm_bus_lock(dev); |
403 | wait_nvdimm_bus_probe_idle(dev); | |
404 | available = nd_region_available_dpa(nd_region); | |
405 | nvdimm_bus_unlock(dev); | |
87a30e1f | 406 | nd_device_unlock(dev); |
bf9bccc1 DW |
407 | |
408 | return sprintf(buf, "%llu\n", available); | |
409 | } | |
410 | static DEVICE_ATTR_RO(available_size); | |
411 | ||
1e687220 KB |
412 | static ssize_t max_available_extent_show(struct device *dev, |
413 | struct device_attribute *attr, char *buf) | |
414 | { | |
415 | struct nd_region *nd_region = to_nd_region(dev); | |
416 | unsigned long long available = 0; | |
417 | ||
87a30e1f | 418 | nd_device_lock(dev); |
1e687220 KB |
419 | nvdimm_bus_lock(dev); |
420 | wait_nvdimm_bus_probe_idle(dev); | |
421 | available = nd_region_allocatable_dpa(nd_region); | |
422 | nvdimm_bus_unlock(dev); | |
87a30e1f | 423 | nd_device_unlock(dev); |
1e687220 KB |
424 | |
425 | return sprintf(buf, "%llu\n", available); | |
426 | } | |
427 | static DEVICE_ATTR_RO(max_available_extent); | |
428 | ||
3d88002e DW |
429 | static ssize_t init_namespaces_show(struct device *dev, |
430 | struct device_attribute *attr, char *buf) | |
431 | { | |
e5ae3b25 | 432 | struct nd_region_data *ndrd = dev_get_drvdata(dev); |
3d88002e DW |
433 | ssize_t rc; |
434 | ||
435 | nvdimm_bus_lock(dev); | |
e5ae3b25 DW |
436 | if (ndrd) |
437 | rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); | |
3d88002e DW |
438 | else |
439 | rc = -ENXIO; | |
440 | nvdimm_bus_unlock(dev); | |
441 | ||
442 | return rc; | |
443 | } | |
444 | static DEVICE_ATTR_RO(init_namespaces); | |
445 | ||
bf9bccc1 DW |
446 | static ssize_t namespace_seed_show(struct device *dev, |
447 | struct device_attribute *attr, char *buf) | |
448 | { | |
449 | struct nd_region *nd_region = to_nd_region(dev); | |
450 | ssize_t rc; | |
451 | ||
452 | nvdimm_bus_lock(dev); | |
453 | if (nd_region->ns_seed) | |
454 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); | |
455 | else | |
456 | rc = sprintf(buf, "\n"); | |
457 | nvdimm_bus_unlock(dev); | |
458 | return rc; | |
459 | } | |
460 | static DEVICE_ATTR_RO(namespace_seed); | |
461 | ||
8c2f7e86 DW |
462 | static ssize_t btt_seed_show(struct device *dev, |
463 | struct device_attribute *attr, char *buf) | |
464 | { | |
465 | struct nd_region *nd_region = to_nd_region(dev); | |
466 | ssize_t rc; | |
467 | ||
468 | nvdimm_bus_lock(dev); | |
469 | if (nd_region->btt_seed) | |
470 | rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); | |
471 | else | |
472 | rc = sprintf(buf, "\n"); | |
473 | nvdimm_bus_unlock(dev); | |
474 | ||
475 | return rc; | |
476 | } | |
477 | static DEVICE_ATTR_RO(btt_seed); | |
478 | ||
e1455744 DW |
479 | static ssize_t pfn_seed_show(struct device *dev, |
480 | struct device_attribute *attr, char *buf) | |
481 | { | |
482 | struct nd_region *nd_region = to_nd_region(dev); | |
483 | ssize_t rc; | |
484 | ||
485 | nvdimm_bus_lock(dev); | |
486 | if (nd_region->pfn_seed) | |
487 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); | |
488 | else | |
489 | rc = sprintf(buf, "\n"); | |
490 | nvdimm_bus_unlock(dev); | |
491 | ||
492 | return rc; | |
493 | } | |
494 | static DEVICE_ATTR_RO(pfn_seed); | |
495 | ||
cd03412a DW |
496 | static ssize_t dax_seed_show(struct device *dev, |
497 | struct device_attribute *attr, char *buf) | |
498 | { | |
499 | struct nd_region *nd_region = to_nd_region(dev); | |
500 | ssize_t rc; | |
501 | ||
502 | nvdimm_bus_lock(dev); | |
503 | if (nd_region->dax_seed) | |
504 | rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); | |
505 | else | |
506 | rc = sprintf(buf, "\n"); | |
507 | nvdimm_bus_unlock(dev); | |
508 | ||
509 | return rc; | |
510 | } | |
511 | static DEVICE_ATTR_RO(dax_seed); | |
512 | ||
58138820 DW |
513 | static ssize_t read_only_show(struct device *dev, |
514 | struct device_attribute *attr, char *buf) | |
515 | { | |
516 | struct nd_region *nd_region = to_nd_region(dev); | |
517 | ||
518 | return sprintf(buf, "%d\n", nd_region->ro); | |
519 | } | |
520 | ||
521 | static ssize_t read_only_store(struct device *dev, | |
522 | struct device_attribute *attr, const char *buf, size_t len) | |
523 | { | |
524 | bool ro; | |
525 | int rc = strtobool(buf, &ro); | |
526 | struct nd_region *nd_region = to_nd_region(dev); | |
527 | ||
528 | if (rc) | |
529 | return rc; | |
530 | ||
531 | nd_region->ro = ro; | |
532 | return len; | |
533 | } | |
534 | static DEVICE_ATTR_RW(read_only); | |
535 | ||
2522afb8 DW |
536 | static ssize_t align_show(struct device *dev, |
537 | struct device_attribute *attr, char *buf) | |
538 | { | |
539 | struct nd_region *nd_region = to_nd_region(dev); | |
540 | ||
541 | return sprintf(buf, "%#lx\n", nd_region->align); | |
542 | } | |
543 | ||
544 | static ssize_t align_store(struct device *dev, | |
545 | struct device_attribute *attr, const char *buf, size_t len) | |
546 | { | |
547 | struct nd_region *nd_region = to_nd_region(dev); | |
548 | unsigned long val, dpa; | |
549 | u32 remainder; | |
550 | int rc; | |
551 | ||
552 | rc = kstrtoul(buf, 0, &val); | |
553 | if (rc) | |
554 | return rc; | |
555 | ||
556 | if (!nd_region->ndr_mappings) | |
557 | return -ENXIO; | |
558 | ||
559 | /* | |
560 | * Ensure space-align is evenly divisible by the region | |
561 | * interleave-width because the kernel typically has no facility | |
562 | * to determine which DIMM(s), dimm-physical-addresses, would | |
563 | * contribute to the tail capacity in system-physical-address | |
564 | * space for the namespace. | |
565 | */ | |
04ff4863 | 566 | dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder); |
2522afb8 DW |
567 | if (!is_power_of_2(dpa) || dpa < PAGE_SIZE |
568 | || val > region_size(nd_region) || remainder) | |
569 | return -EINVAL; | |
570 | ||
571 | /* | |
572 | * Given that space allocation consults this value multiple | |
573 | * times ensure it does not change for the duration of the | |
574 | * allocation. | |
575 | */ | |
576 | nvdimm_bus_lock(dev); | |
577 | nd_region->align = val; | |
578 | nvdimm_bus_unlock(dev); | |
579 | ||
580 | return len; | |
581 | } | |
582 | static DEVICE_ATTR_RW(align); | |
583 | ||
23f49844 | 584 | static ssize_t region_badblocks_show(struct device *dev, |
6a6bef90 DJ |
585 | struct device_attribute *attr, char *buf) |
586 | { | |
587 | struct nd_region *nd_region = to_nd_region(dev); | |
5d394eee | 588 | ssize_t rc; |
6a6bef90 | 589 | |
87a30e1f | 590 | nd_device_lock(dev); |
5d394eee DW |
591 | if (dev->driver) |
592 | rc = badblocks_show(&nd_region->bb, buf, 0); | |
593 | else | |
594 | rc = -ENXIO; | |
87a30e1f | 595 | nd_device_unlock(dev); |
23f49844 | 596 | |
5d394eee DW |
597 | return rc; |
598 | } | |
23f49844 | 599 | static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL); |
6a6bef90 | 600 | |
802f4be6 DJ |
601 | static ssize_t resource_show(struct device *dev, |
602 | struct device_attribute *attr, char *buf) | |
603 | { | |
604 | struct nd_region *nd_region = to_nd_region(dev); | |
605 | ||
606 | return sprintf(buf, "%#llx\n", nd_region->ndr_start); | |
607 | } | |
5cf81ce1 | 608 | static DEVICE_ATTR_ADMIN_RO(resource); |
802f4be6 | 609 | |
96c3a239 DJ |
610 | static ssize_t persistence_domain_show(struct device *dev, |
611 | struct device_attribute *attr, char *buf) | |
612 | { | |
613 | struct nd_region *nd_region = to_nd_region(dev); | |
96c3a239 | 614 | |
fe9a552e DW |
615 | if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) |
616 | return sprintf(buf, "cpu_cache\n"); | |
617 | else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) | |
618 | return sprintf(buf, "memory_controller\n"); | |
619 | else | |
620 | return sprintf(buf, "\n"); | |
96c3a239 DJ |
621 | } |
622 | static DEVICE_ATTR_RO(persistence_domain); | |
623 | ||
1f7df6f8 DW |
624 | static struct attribute *nd_region_attributes[] = { |
625 | &dev_attr_size.attr, | |
2522afb8 | 626 | &dev_attr_align.attr, |
3d88002e | 627 | &dev_attr_nstype.attr, |
1f7df6f8 | 628 | &dev_attr_mappings.attr, |
8c2f7e86 | 629 | &dev_attr_btt_seed.attr, |
e1455744 | 630 | &dev_attr_pfn_seed.attr, |
cd03412a | 631 | &dev_attr_dax_seed.attr, |
ab630891 | 632 | &dev_attr_deep_flush.attr, |
58138820 | 633 | &dev_attr_read_only.attr, |
eaf96153 | 634 | &dev_attr_set_cookie.attr, |
bf9bccc1 | 635 | &dev_attr_available_size.attr, |
1e687220 | 636 | &dev_attr_max_available_extent.attr, |
bf9bccc1 | 637 | &dev_attr_namespace_seed.attr, |
3d88002e | 638 | &dev_attr_init_namespaces.attr, |
23f49844 | 639 | &dev_attr_badblocks.attr, |
802f4be6 | 640 | &dev_attr_resource.attr, |
96c3a239 | 641 | &dev_attr_persistence_domain.attr, |
1f7df6f8 DW |
642 | NULL, |
643 | }; | |
644 | ||
eaf96153 DW |
645 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
646 | { | |
647 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
648 | struct nd_region *nd_region = to_nd_region(dev); | |
649 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
bf9bccc1 | 650 | int type = nd_region_to_nstype(nd_region); |
eaf96153 | 651 | |
c9e582aa | 652 | if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr) |
6bb691ac DK |
653 | return 0; |
654 | ||
c9e582aa | 655 | if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) |
cd03412a DW |
656 | return 0; |
657 | ||
c42adf87 | 658 | if (!is_memory(dev) && a == &dev_attr_badblocks.attr) |
6a6bef90 DJ |
659 | return 0; |
660 | ||
bfd2e914 DW |
661 | if (a == &dev_attr_resource.attr && !is_memory(dev)) |
662 | return 0; | |
802f4be6 | 663 | |
ab630891 DW |
664 | if (a == &dev_attr_deep_flush.attr) { |
665 | int has_flush = nvdimm_has_flush(nd_region); | |
666 | ||
667 | if (has_flush == 1) | |
668 | return a->mode; | |
669 | else if (has_flush == 0) | |
670 | return 0444; | |
671 | else | |
672 | return 0; | |
673 | } | |
674 | ||
896196dc DW |
675 | if (a == &dev_attr_persistence_domain.attr) { |
676 | if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) | |
677 | | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0) | |
678 | return 0; | |
679 | return a->mode; | |
680 | } | |
681 | ||
543094e1 VV |
682 | if (a == &dev_attr_align.attr) |
683 | return a->mode; | |
2522afb8 | 684 | |
bf9bccc1 DW |
685 | if (a != &dev_attr_set_cookie.attr |
686 | && a != &dev_attr_available_size.attr) | |
eaf96153 DW |
687 | return a->mode; |
688 | ||
bf9bccc1 DW |
689 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
690 | || type == ND_DEVICE_NAMESPACE_BLK) | |
691 | && a == &dev_attr_available_size.attr) | |
692 | return a->mode; | |
c9e582aa | 693 | else if (is_memory(dev) && nd_set) |
bf9bccc1 | 694 | return a->mode; |
eaf96153 DW |
695 | |
696 | return 0; | |
697 | } | |
698 | ||
1f7df6f8 DW |
699 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
700 | { | |
701 | struct nd_region *nd_region = to_nd_region(dev); | |
702 | struct nd_mapping *nd_mapping; | |
703 | struct nvdimm *nvdimm; | |
704 | ||
705 | if (n >= nd_region->ndr_mappings) | |
706 | return -ENXIO; | |
707 | nd_mapping = &nd_region->mapping[n]; | |
708 | nvdimm = nd_mapping->nvdimm; | |
709 | ||
401c0a19 DW |
710 | return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev), |
711 | nd_mapping->start, nd_mapping->size, | |
712 | nd_mapping->position); | |
1f7df6f8 DW |
713 | } |
714 | ||
715 | #define REGION_MAPPING(idx) \ | |
716 | static ssize_t mapping##idx##_show(struct device *dev, \ | |
717 | struct device_attribute *attr, char *buf) \ | |
718 | { \ | |
719 | return mappingN(dev, buf, idx); \ | |
720 | } \ | |
721 | static DEVICE_ATTR_RO(mapping##idx) | |
722 | ||
723 | /* | |
724 | * 32 should be enough for a while, even in the presence of socket | |
725 | * interleave a 32-way interleave set is a degenerate case. | |
726 | */ | |
727 | REGION_MAPPING(0); | |
728 | REGION_MAPPING(1); | |
729 | REGION_MAPPING(2); | |
730 | REGION_MAPPING(3); | |
731 | REGION_MAPPING(4); | |
732 | REGION_MAPPING(5); | |
733 | REGION_MAPPING(6); | |
734 | REGION_MAPPING(7); | |
735 | REGION_MAPPING(8); | |
736 | REGION_MAPPING(9); | |
737 | REGION_MAPPING(10); | |
738 | REGION_MAPPING(11); | |
739 | REGION_MAPPING(12); | |
740 | REGION_MAPPING(13); | |
741 | REGION_MAPPING(14); | |
742 | REGION_MAPPING(15); | |
743 | REGION_MAPPING(16); | |
744 | REGION_MAPPING(17); | |
745 | REGION_MAPPING(18); | |
746 | REGION_MAPPING(19); | |
747 | REGION_MAPPING(20); | |
748 | REGION_MAPPING(21); | |
749 | REGION_MAPPING(22); | |
750 | REGION_MAPPING(23); | |
751 | REGION_MAPPING(24); | |
752 | REGION_MAPPING(25); | |
753 | REGION_MAPPING(26); | |
754 | REGION_MAPPING(27); | |
755 | REGION_MAPPING(28); | |
756 | REGION_MAPPING(29); | |
757 | REGION_MAPPING(30); | |
758 | REGION_MAPPING(31); | |
759 | ||
760 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) | |
761 | { | |
762 | struct device *dev = container_of(kobj, struct device, kobj); | |
763 | struct nd_region *nd_region = to_nd_region(dev); | |
764 | ||
765 | if (n < nd_region->ndr_mappings) | |
766 | return a->mode; | |
767 | return 0; | |
768 | } | |
769 | ||
770 | static struct attribute *mapping_attributes[] = { | |
771 | &dev_attr_mapping0.attr, | |
772 | &dev_attr_mapping1.attr, | |
773 | &dev_attr_mapping2.attr, | |
774 | &dev_attr_mapping3.attr, | |
775 | &dev_attr_mapping4.attr, | |
776 | &dev_attr_mapping5.attr, | |
777 | &dev_attr_mapping6.attr, | |
778 | &dev_attr_mapping7.attr, | |
779 | &dev_attr_mapping8.attr, | |
780 | &dev_attr_mapping9.attr, | |
781 | &dev_attr_mapping10.attr, | |
782 | &dev_attr_mapping11.attr, | |
783 | &dev_attr_mapping12.attr, | |
784 | &dev_attr_mapping13.attr, | |
785 | &dev_attr_mapping14.attr, | |
786 | &dev_attr_mapping15.attr, | |
787 | &dev_attr_mapping16.attr, | |
788 | &dev_attr_mapping17.attr, | |
789 | &dev_attr_mapping18.attr, | |
790 | &dev_attr_mapping19.attr, | |
791 | &dev_attr_mapping20.attr, | |
792 | &dev_attr_mapping21.attr, | |
793 | &dev_attr_mapping22.attr, | |
794 | &dev_attr_mapping23.attr, | |
795 | &dev_attr_mapping24.attr, | |
796 | &dev_attr_mapping25.attr, | |
797 | &dev_attr_mapping26.attr, | |
798 | &dev_attr_mapping27.attr, | |
799 | &dev_attr_mapping28.attr, | |
800 | &dev_attr_mapping29.attr, | |
801 | &dev_attr_mapping30.attr, | |
802 | &dev_attr_mapping31.attr, | |
803 | NULL, | |
804 | }; | |
805 | ||
4ce79fa9 | 806 | static const struct attribute_group nd_mapping_attribute_group = { |
1f7df6f8 DW |
807 | .is_visible = mapping_visible, |
808 | .attrs = mapping_attributes, | |
809 | }; | |
1f7df6f8 | 810 | |
7c4fc8cd | 811 | static const struct attribute_group nd_region_attribute_group = { |
cb719d5f DW |
812 | .attrs = nd_region_attributes, |
813 | .is_visible = region_visible, | |
814 | }; | |
cb719d5f | 815 | |
adbb6829 DW |
816 | static const struct attribute_group *nd_region_attribute_groups[] = { |
817 | &nd_device_attribute_group, | |
7c4fc8cd | 818 | &nd_region_attribute_group, |
e2f6a0e3 | 819 | &nd_numa_attribute_group, |
4ce79fa9 | 820 | &nd_mapping_attribute_group, |
adbb6829 DW |
821 | NULL, |
822 | }; | |
823 | ||
824 | static const struct device_type nd_blk_device_type = { | |
cb719d5f DW |
825 | .name = "nd_blk", |
826 | .release = nd_region_release, | |
adbb6829 | 827 | .groups = nd_region_attribute_groups, |
cb719d5f DW |
828 | }; |
829 | ||
adbb6829 | 830 | static const struct device_type nd_pmem_device_type = { |
cb719d5f DW |
831 | .name = "nd_pmem", |
832 | .release = nd_region_release, | |
adbb6829 | 833 | .groups = nd_region_attribute_groups, |
cb719d5f DW |
834 | }; |
835 | ||
adbb6829 | 836 | static const struct device_type nd_volatile_device_type = { |
cb719d5f DW |
837 | .name = "nd_volatile", |
838 | .release = nd_region_release, | |
adbb6829 | 839 | .groups = nd_region_attribute_groups, |
cb719d5f DW |
840 | }; |
841 | ||
842 | bool is_nd_pmem(struct device *dev) | |
843 | { | |
844 | return dev ? dev->type == &nd_pmem_device_type : false; | |
845 | } | |
846 | ||
847 | bool is_nd_blk(struct device *dev) | |
848 | { | |
849 | return dev ? dev->type == &nd_blk_device_type : false; | |
850 | } | |
851 | ||
852 | bool is_nd_volatile(struct device *dev) | |
853 | { | |
854 | return dev ? dev->type == &nd_volatile_device_type : false; | |
855 | } | |
856 | ||
857 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, | |
858 | struct nd_namespace_index *nsindex) | |
859 | { | |
860 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
861 | ||
862 | if (!nd_set) | |
863 | return 0; | |
864 | ||
865 | if (nsindex && __le16_to_cpu(nsindex->major) == 1 | |
866 | && __le16_to_cpu(nsindex->minor) == 1) | |
867 | return nd_set->cookie1; | |
868 | return nd_set->cookie2; | |
869 | } | |
870 | ||
871 | u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) | |
872 | { | |
873 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
874 | ||
875 | if (nd_set) | |
876 | return nd_set->altcookie; | |
877 | return 0; | |
878 | } | |
879 | ||
880 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping) | |
881 | { | |
882 | struct nd_label_ent *label_ent, *e; | |
883 | ||
884 | lockdep_assert_held(&nd_mapping->lock); | |
885 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | |
886 | list_del(&label_ent->list); | |
887 | kfree(label_ent); | |
888 | } | |
889 | } | |
890 | ||
891 | /* | |
892 | * When a namespace is activated create new seeds for the next | |
893 | * namespace, or namespace-personality to be configured. | |
894 | */ | |
895 | void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) | |
896 | { | |
897 | nvdimm_bus_lock(dev); | |
898 | if (nd_region->ns_seed == dev) { | |
899 | nd_region_create_ns_seed(nd_region); | |
900 | } else if (is_nd_btt(dev)) { | |
901 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
902 | ||
903 | if (nd_region->btt_seed == dev) | |
904 | nd_region_create_btt_seed(nd_region); | |
905 | if (nd_region->ns_seed == &nd_btt->ndns->dev) | |
906 | nd_region_create_ns_seed(nd_region); | |
907 | } else if (is_nd_pfn(dev)) { | |
908 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | |
909 | ||
910 | if (nd_region->pfn_seed == dev) | |
911 | nd_region_create_pfn_seed(nd_region); | |
912 | if (nd_region->ns_seed == &nd_pfn->ndns->dev) | |
913 | nd_region_create_ns_seed(nd_region); | |
914 | } else if (is_nd_dax(dev)) { | |
915 | struct nd_dax *nd_dax = to_nd_dax(dev); | |
916 | ||
917 | if (nd_region->dax_seed == dev) | |
918 | nd_region_create_dax_seed(nd_region); | |
919 | if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) | |
920 | nd_region_create_ns_seed(nd_region); | |
921 | } | |
922 | nvdimm_bus_unlock(dev); | |
923 | } | |
1f7df6f8 | 924 | |
047fc8a1 | 925 | int nd_blk_region_init(struct nd_region *nd_region) |
1f7df6f8 | 926 | { |
047fc8a1 RZ |
927 | struct device *dev = &nd_region->dev; |
928 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | |
929 | ||
930 | if (!is_nd_blk(dev)) | |
931 | return 0; | |
932 | ||
933 | if (nd_region->ndr_mappings < 1) { | |
d5d51fec | 934 | dev_dbg(dev, "invalid BLK region\n"); |
047fc8a1 RZ |
935 | return -ENXIO; |
936 | } | |
937 | ||
938 | return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); | |
1f7df6f8 | 939 | } |
1f7df6f8 | 940 | |
5212e11f VV |
941 | /** |
942 | * nd_region_acquire_lane - allocate and lock a lane | |
943 | * @nd_region: region id and number of lanes possible | |
944 | * | |
945 | * A lane correlates to a BLK-data-window and/or a log slot in the BTT. | |
946 | * We optimize for the common case where there are 256 lanes, one | |
947 | * per-cpu. For larger systems we need to lock to share lanes. For now | |
948 | * this implementation assumes the cost of maintaining an allocator for | |
949 | * free lanes is on the order of the lock hold time, so it implements a | |
950 | * static lane = cpu % num_lanes mapping. | |
951 | * | |
952 | * In the case of a BTT instance on top of a BLK namespace a lane may be | |
953 | * acquired recursively. We lock on the first instance. | |
954 | * | |
955 | * In the case of a BTT instance on top of PMEM, we only acquire a lane | |
956 | * for the BTT metadata updates. | |
957 | */ | |
958 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region) | |
959 | { | |
960 | unsigned int cpu, lane; | |
961 | ||
962 | cpu = get_cpu(); | |
963 | if (nd_region->num_lanes < nr_cpu_ids) { | |
964 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
965 | ||
966 | lane = cpu % nd_region->num_lanes; | |
967 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
968 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
969 | if (ndl_count->count++ == 0) | |
970 | spin_lock(&ndl_lock->lock); | |
971 | } else | |
972 | lane = cpu; | |
973 | ||
974 | return lane; | |
975 | } | |
976 | EXPORT_SYMBOL(nd_region_acquire_lane); | |
977 | ||
978 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) | |
979 | { | |
980 | if (nd_region->num_lanes < nr_cpu_ids) { | |
981 | unsigned int cpu = get_cpu(); | |
982 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
983 | ||
984 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
985 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
986 | if (--ndl_count->count == 0) | |
987 | spin_unlock(&ndl_lock->lock); | |
988 | put_cpu(); | |
989 | } | |
990 | put_cpu(); | |
991 | } | |
992 | EXPORT_SYMBOL(nd_region_release_lane); | |
993 | ||
2522afb8 DW |
994 | /* |
995 | * PowerPC requires this alignment for memremap_pages(). All other archs | |
996 | * should be ok with SUBSECTION_SIZE (see memremap_compat_align()). | |
997 | */ | |
998 | #define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M | |
999 | ||
1000 | static unsigned long default_align(struct nd_region *nd_region) | |
1001 | { | |
04ff4863 | 1002 | unsigned long align; |
2522afb8 DW |
1003 | int i, mappings; |
1004 | u32 remainder; | |
1005 | ||
1006 | if (is_nd_blk(&nd_region->dev)) | |
1007 | align = PAGE_SIZE; | |
1008 | else | |
1009 | align = MEMREMAP_COMPAT_ALIGN_MAX; | |
1010 | ||
1011 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
1012 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
1013 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
1014 | ||
1015 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) { | |
1016 | align = MEMREMAP_COMPAT_ALIGN_MAX; | |
1017 | break; | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | mappings = max_t(u16, 1, nd_region->ndr_mappings); | |
04ff4863 | 1022 | div_u64_rem(align, mappings, &remainder); |
2522afb8 DW |
1023 | if (remainder) |
1024 | align *= mappings; | |
1025 | ||
1026 | return align; | |
1027 | } | |
1028 | ||
1f7df6f8 | 1029 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
adbb6829 DW |
1030 | struct nd_region_desc *ndr_desc, |
1031 | const struct device_type *dev_type, const char *caller) | |
1f7df6f8 DW |
1032 | { |
1033 | struct nd_region *nd_region; | |
1034 | struct device *dev; | |
047fc8a1 | 1035 | void *region_buf; |
5212e11f | 1036 | unsigned int i; |
58138820 | 1037 | int ro = 0; |
1f7df6f8 DW |
1038 | |
1039 | for (i = 0; i < ndr_desc->num_mappings; i++) { | |
44c462eb DW |
1040 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
1041 | struct nvdimm *nvdimm = mapping->nvdimm; | |
1f7df6f8 | 1042 | |
5b26db95 AK |
1043 | if ((mapping->start | mapping->size) % PAGE_SIZE) { |
1044 | dev_err(&nvdimm_bus->dev, | |
1045 | "%s: %s mapping%d is not %ld aligned\n", | |
1046 | caller, dev_name(&nvdimm->dev), i, PAGE_SIZE); | |
1f7df6f8 DW |
1047 | return NULL; |
1048 | } | |
58138820 | 1049 | |
8f078b38 | 1050 | if (test_bit(NDD_UNARMED, &nvdimm->flags)) |
58138820 | 1051 | ro = 1; |
d5d30d5a DW |
1052 | |
1053 | if (test_bit(NDD_NOBLK, &nvdimm->flags) | |
1054 | && dev_type == &nd_blk_device_type) { | |
1055 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", | |
1056 | caller, dev_name(&nvdimm->dev), i); | |
1057 | return NULL; | |
1058 | } | |
1f7df6f8 DW |
1059 | } |
1060 | ||
047fc8a1 RZ |
1061 | if (dev_type == &nd_blk_device_type) { |
1062 | struct nd_blk_region_desc *ndbr_desc; | |
1063 | struct nd_blk_region *ndbr; | |
1064 | ||
1065 | ndbr_desc = to_blk_region_desc(ndr_desc); | |
1066 | ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) | |
1067 | * ndr_desc->num_mappings, | |
1068 | GFP_KERNEL); | |
1069 | if (ndbr) { | |
1070 | nd_region = &ndbr->nd_region; | |
1071 | ndbr->enable = ndbr_desc->enable; | |
047fc8a1 RZ |
1072 | ndbr->do_io = ndbr_desc->do_io; |
1073 | } | |
1074 | region_buf = ndbr; | |
1075 | } else { | |
2b90cb22 GS |
1076 | nd_region = kzalloc(struct_size(nd_region, mapping, |
1077 | ndr_desc->num_mappings), | |
1078 | GFP_KERNEL); | |
047fc8a1 RZ |
1079 | region_buf = nd_region; |
1080 | } | |
1081 | ||
1082 | if (!region_buf) | |
1f7df6f8 | 1083 | return NULL; |
33dd7075 | 1084 | nd_region->id = memregion_alloc(GFP_KERNEL); |
5212e11f VV |
1085 | if (nd_region->id < 0) |
1086 | goto err_id; | |
1087 | ||
1088 | nd_region->lane = alloc_percpu(struct nd_percpu_lane); | |
1089 | if (!nd_region->lane) | |
1090 | goto err_percpu; | |
1091 | ||
1092 | for (i = 0; i < nr_cpu_ids; i++) { | |
1093 | struct nd_percpu_lane *ndl; | |
1094 | ||
1095 | ndl = per_cpu_ptr(nd_region->lane, i); | |
1096 | spin_lock_init(&ndl->lock); | |
1097 | ndl->count = 0; | |
1f7df6f8 DW |
1098 | } |
1099 | ||
1f7df6f8 | 1100 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
44c462eb DW |
1101 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
1102 | struct nvdimm *nvdimm = mapping->nvdimm; | |
1103 | ||
1104 | nd_region->mapping[i].nvdimm = nvdimm; | |
1105 | nd_region->mapping[i].start = mapping->start; | |
1106 | nd_region->mapping[i].size = mapping->size; | |
401c0a19 | 1107 | nd_region->mapping[i].position = mapping->position; |
ae8219f1 DW |
1108 | INIT_LIST_HEAD(&nd_region->mapping[i].labels); |
1109 | mutex_init(&nd_region->mapping[i].lock); | |
1f7df6f8 DW |
1110 | |
1111 | get_device(&nvdimm->dev); | |
1112 | } | |
1113 | nd_region->ndr_mappings = ndr_desc->num_mappings; | |
1114 | nd_region->provider_data = ndr_desc->provider_data; | |
eaf96153 | 1115 | nd_region->nd_set = ndr_desc->nd_set; |
5212e11f | 1116 | nd_region->num_lanes = ndr_desc->num_lanes; |
004f1afb | 1117 | nd_region->flags = ndr_desc->flags; |
58138820 | 1118 | nd_region->ro = ro; |
41d7a6d6 | 1119 | nd_region->numa_node = ndr_desc->numa_node; |
8fc5c735 | 1120 | nd_region->target_node = ndr_desc->target_node; |
1b40e09a | 1121 | ida_init(&nd_region->ns_ida); |
8c2f7e86 | 1122 | ida_init(&nd_region->btt_ida); |
e1455744 | 1123 | ida_init(&nd_region->pfn_ida); |
cd03412a | 1124 | ida_init(&nd_region->dax_ida); |
1f7df6f8 DW |
1125 | dev = &nd_region->dev; |
1126 | dev_set_name(dev, "region%d", nd_region->id); | |
1127 | dev->parent = &nvdimm_bus->dev; | |
1128 | dev->type = dev_type; | |
1129 | dev->groups = ndr_desc->attr_groups; | |
1ff19f48 | 1130 | dev->of_node = ndr_desc->of_node; |
1f7df6f8 DW |
1131 | nd_region->ndr_size = resource_size(ndr_desc->res); |
1132 | nd_region->ndr_start = ndr_desc->res->start; | |
2522afb8 | 1133 | nd_region->align = default_align(nd_region); |
c5d4355d PG |
1134 | if (ndr_desc->flush) |
1135 | nd_region->flush = ndr_desc->flush; | |
1136 | else | |
1137 | nd_region->flush = NULL; | |
1138 | ||
1f7df6f8 DW |
1139 | nd_device_register(dev); |
1140 | ||
1141 | return nd_region; | |
5212e11f VV |
1142 | |
1143 | err_percpu: | |
33dd7075 | 1144 | memregion_free(nd_region->id); |
5212e11f | 1145 | err_id: |
047fc8a1 | 1146 | kfree(region_buf); |
5212e11f | 1147 | return NULL; |
1f7df6f8 DW |
1148 | } |
1149 | ||
1150 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, | |
1151 | struct nd_region_desc *ndr_desc) | |
1152 | { | |
5212e11f | 1153 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
1154 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
1155 | __func__); | |
1156 | } | |
1157 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); | |
1158 | ||
1159 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, | |
1160 | struct nd_region_desc *ndr_desc) | |
1161 | { | |
1162 | if (ndr_desc->num_mappings > 1) | |
1163 | return NULL; | |
5212e11f | 1164 | ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); |
1f7df6f8 DW |
1165 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
1166 | __func__); | |
1167 | } | |
1168 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); | |
1169 | ||
1170 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, | |
1171 | struct nd_region_desc *ndr_desc) | |
1172 | { | |
5212e11f | 1173 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
1174 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
1175 | __func__); | |
1176 | } | |
1177 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); | |
b354aba0 | 1178 | |
c5d4355d PG |
1179 | int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) |
1180 | { | |
1181 | int rc = 0; | |
1182 | ||
1183 | if (!nd_region->flush) | |
1184 | rc = generic_nvdimm_flush(nd_region); | |
1185 | else { | |
1186 | if (nd_region->flush(nd_region, bio)) | |
1187 | rc = -EIO; | |
1188 | } | |
1189 | ||
1190 | return rc; | |
1191 | } | |
f284a4f2 DW |
1192 | /** |
1193 | * nvdimm_flush - flush any posted write queues between the cpu and pmem media | |
1194 | * @nd_region: blk or interleaved pmem region | |
1195 | */ | |
c5d4355d | 1196 | int generic_nvdimm_flush(struct nd_region *nd_region) |
f284a4f2 DW |
1197 | { |
1198 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); | |
0c27af60 DW |
1199 | int i, idx; |
1200 | ||
1201 | /* | |
1202 | * Try to encourage some diversity in flush hint addresses | |
1203 | * across cpus assuming a limited number of flush hints. | |
1204 | */ | |
1205 | idx = this_cpu_read(flush_idx); | |
1206 | idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); | |
f284a4f2 DW |
1207 | |
1208 | /* | |
3e79f082 AK |
1209 | * The pmem_wmb() is needed to 'sfence' all |
1210 | * previous writes such that they are architecturally visible for | |
1211 | * the platform buffer flush. Note that we've already arranged for pmem | |
0aed55af DW |
1212 | * writes to avoid the cache via memcpy_flushcache(). The final |
1213 | * wmb() ensures ordering for the NVDIMM flush write. | |
f284a4f2 | 1214 | */ |
3e79f082 | 1215 | pmem_wmb(); |
f284a4f2 | 1216 | for (i = 0; i < nd_region->ndr_mappings; i++) |
595c7307 DW |
1217 | if (ndrd_get_flush_wpq(ndrd, i, 0)) |
1218 | writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); | |
f284a4f2 | 1219 | wmb(); |
c5d4355d PG |
1220 | |
1221 | return 0; | |
f284a4f2 DW |
1222 | } |
1223 | EXPORT_SYMBOL_GPL(nvdimm_flush); | |
1224 | ||
1225 | /** | |
1226 | * nvdimm_has_flush - determine write flushing requirements | |
1227 | * @nd_region: blk or interleaved pmem region | |
1228 | * | |
1229 | * Returns 1 if writes require flushing | |
1230 | * Returns 0 if writes do not require flushing | |
1231 | * Returns -ENXIO if flushing capability can not be determined | |
1232 | */ | |
1233 | int nvdimm_has_flush(struct nd_region *nd_region) | |
1234 | { | |
f284a4f2 DW |
1235 | int i; |
1236 | ||
c00b396e DW |
1237 | /* no nvdimm or pmem api == flushing capability unknown */ |
1238 | if (nd_region->ndr_mappings == 0 | |
1239 | || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) | |
f284a4f2 DW |
1240 | return -ENXIO; |
1241 | ||
bc042fdf DW |
1242 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
1243 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
1244 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
1245 | ||
1246 | /* flush hints present / available */ | |
1247 | if (nvdimm->num_flush) | |
f284a4f2 | 1248 | return 1; |
bc042fdf | 1249 | } |
f284a4f2 DW |
1250 | |
1251 | /* | |
1252 | * The platform defines dimm devices without hints, assume | |
1253 | * platform persistence mechanism like ADR | |
1254 | */ | |
1255 | return 0; | |
1256 | } | |
1257 | EXPORT_SYMBOL_GPL(nvdimm_has_flush); | |
1258 | ||
0b277961 DW |
1259 | int nvdimm_has_cache(struct nd_region *nd_region) |
1260 | { | |
546eb031 RZ |
1261 | return is_nd_pmem(&nd_region->dev) && |
1262 | !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); | |
0b277961 DW |
1263 | } |
1264 | EXPORT_SYMBOL_GPL(nvdimm_has_cache); | |
1265 | ||
fefc1d97 PG |
1266 | bool is_nvdimm_sync(struct nd_region *nd_region) |
1267 | { | |
4c806b89 AK |
1268 | if (is_nd_volatile(&nd_region->dev)) |
1269 | return true; | |
1270 | ||
fefc1d97 PG |
1271 | return is_nd_pmem(&nd_region->dev) && |
1272 | !test_bit(ND_REGION_ASYNC, &nd_region->flags); | |
1273 | } | |
1274 | EXPORT_SYMBOL_GPL(is_nvdimm_sync); | |
1275 | ||
ae86cbfe DW |
1276 | struct conflict_context { |
1277 | struct nd_region *nd_region; | |
1278 | resource_size_t start, size; | |
1279 | }; | |
1280 | ||
1281 | static int region_conflict(struct device *dev, void *data) | |
1282 | { | |
1283 | struct nd_region *nd_region; | |
1284 | struct conflict_context *ctx = data; | |
1285 | resource_size_t res_end, region_end, region_start; | |
1286 | ||
1287 | if (!is_memory(dev)) | |
1288 | return 0; | |
1289 | ||
1290 | nd_region = to_nd_region(dev); | |
1291 | if (nd_region == ctx->nd_region) | |
1292 | return 0; | |
1293 | ||
1294 | res_end = ctx->start + ctx->size; | |
1295 | region_start = nd_region->ndr_start; | |
1296 | region_end = region_start + nd_region->ndr_size; | |
1297 | if (ctx->start >= region_start && ctx->start < region_end) | |
1298 | return -EBUSY; | |
1299 | if (res_end > region_start && res_end <= region_end) | |
1300 | return -EBUSY; | |
1301 | return 0; | |
1302 | } | |
1303 | ||
1304 | int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, | |
1305 | resource_size_t size) | |
1306 | { | |
1307 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | |
1308 | struct conflict_context ctx = { | |
1309 | .nd_region = nd_region, | |
1310 | .start = start, | |
1311 | .size = size, | |
1312 | }; | |
1313 | ||
1314 | return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); | |
1315 | } |