]>
Commit | Line | Data |
---|---|---|
5b497af4 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1f7df6f8 DW |
2 | /* |
3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
1f7df6f8 | 4 | */ |
eaf96153 | 5 | #include <linux/scatterlist.h> |
047fc8a1 | 6 | #include <linux/highmem.h> |
eaf96153 | 7 | #include <linux/sched.h> |
1f7df6f8 | 8 | #include <linux/slab.h> |
0c27af60 | 9 | #include <linux/hash.h> |
eaf96153 | 10 | #include <linux/sort.h> |
1f7df6f8 | 11 | #include <linux/io.h> |
bf9bccc1 | 12 | #include <linux/nd.h> |
1f7df6f8 DW |
13 | #include "nd-core.h" |
14 | #include "nd.h" | |
15 | ||
f284a4f2 DW |
16 | /* |
17 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | |
18 | * irrelevant. | |
19 | */ | |
20 | #include <linux/io-64-nonatomic-hi-lo.h> | |
21 | ||
1f7df6f8 | 22 | static DEFINE_IDA(region_ida); |
0c27af60 | 23 | static DEFINE_PER_CPU(int, flush_idx); |
1f7df6f8 | 24 | |
e5ae3b25 DW |
25 | static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, |
26 | struct nd_region_data *ndrd) | |
27 | { | |
28 | int i, j; | |
29 | ||
30 | dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), | |
31 | nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); | |
595c7307 | 32 | for (i = 0; i < (1 << ndrd->hints_shift); i++) { |
e5ae3b25 DW |
33 | struct resource *res = &nvdimm->flush_wpq[i]; |
34 | unsigned long pfn = PHYS_PFN(res->start); | |
35 | void __iomem *flush_page; | |
36 | ||
37 | /* check if flush hints share a page */ | |
38 | for (j = 0; j < i; j++) { | |
39 | struct resource *res_j = &nvdimm->flush_wpq[j]; | |
40 | unsigned long pfn_j = PHYS_PFN(res_j->start); | |
41 | ||
42 | if (pfn == pfn_j) | |
43 | break; | |
44 | } | |
45 | ||
46 | if (j < i) | |
47 | flush_page = (void __iomem *) ((unsigned long) | |
595c7307 DW |
48 | ndrd_get_flush_wpq(ndrd, dimm, j) |
49 | & PAGE_MASK); | |
e5ae3b25 DW |
50 | else |
51 | flush_page = devm_nvdimm_ioremap(dev, | |
480b6837 | 52 | PFN_PHYS(pfn), PAGE_SIZE); |
e5ae3b25 DW |
53 | if (!flush_page) |
54 | return -ENXIO; | |
595c7307 DW |
55 | ndrd_set_flush_wpq(ndrd, dimm, i, flush_page |
56 | + (res->start & ~PAGE_MASK)); | |
e5ae3b25 DW |
57 | } |
58 | ||
59 | return 0; | |
60 | } | |
61 | ||
62 | int nd_region_activate(struct nd_region *nd_region) | |
63 | { | |
db58028e | 64 | int i, j, num_flush = 0; |
e5ae3b25 DW |
65 | struct nd_region_data *ndrd; |
66 | struct device *dev = &nd_region->dev; | |
67 | size_t flush_data_size = sizeof(void *); | |
68 | ||
69 | nvdimm_bus_lock(&nd_region->dev); | |
70 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
71 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
72 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
73 | ||
7d988097 DJ |
74 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
75 | nvdimm_bus_unlock(&nd_region->dev); | |
76 | return -EBUSY; | |
77 | } | |
78 | ||
e5ae3b25 DW |
79 | /* at least one null hint slot per-dimm for the "no-hint" case */ |
80 | flush_data_size += sizeof(void *); | |
0c27af60 | 81 | num_flush = min_not_zero(num_flush, nvdimm->num_flush); |
e5ae3b25 DW |
82 | if (!nvdimm->num_flush) |
83 | continue; | |
84 | flush_data_size += nvdimm->num_flush * sizeof(void *); | |
85 | } | |
86 | nvdimm_bus_unlock(&nd_region->dev); | |
87 | ||
88 | ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); | |
89 | if (!ndrd) | |
90 | return -ENOMEM; | |
91 | dev_set_drvdata(dev, ndrd); | |
92 | ||
595c7307 DW |
93 | if (!num_flush) |
94 | return 0; | |
95 | ||
96 | ndrd->hints_shift = ilog2(num_flush); | |
e5ae3b25 DW |
97 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
98 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
99 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
100 | int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); | |
101 | ||
102 | if (rc) | |
103 | return rc; | |
104 | } | |
105 | ||
db58028e DJ |
106 | /* |
107 | * Clear out entries that are duplicates. This should prevent the | |
108 | * extra flushings. | |
109 | */ | |
110 | for (i = 0; i < nd_region->ndr_mappings - 1; i++) { | |
111 | /* ignore if NULL already */ | |
112 | if (!ndrd_get_flush_wpq(ndrd, i, 0)) | |
113 | continue; | |
114 | ||
115 | for (j = i + 1; j < nd_region->ndr_mappings; j++) | |
116 | if (ndrd_get_flush_wpq(ndrd, i, 0) == | |
117 | ndrd_get_flush_wpq(ndrd, j, 0)) | |
118 | ndrd_set_flush_wpq(ndrd, j, 0, NULL); | |
119 | } | |
120 | ||
e5ae3b25 DW |
121 | return 0; |
122 | } | |
123 | ||
1f7df6f8 DW |
124 | static void nd_region_release(struct device *dev) |
125 | { | |
126 | struct nd_region *nd_region = to_nd_region(dev); | |
127 | u16 i; | |
128 | ||
129 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
130 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
131 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
132 | ||
133 | put_device(&nvdimm->dev); | |
134 | } | |
5212e11f | 135 | free_percpu(nd_region->lane); |
1f7df6f8 | 136 | ida_simple_remove(®ion_ida, nd_region->id); |
047fc8a1 RZ |
137 | if (is_nd_blk(dev)) |
138 | kfree(to_nd_blk_region(dev)); | |
139 | else | |
140 | kfree(nd_region); | |
1f7df6f8 DW |
141 | } |
142 | ||
1f7df6f8 DW |
143 | struct nd_region *to_nd_region(struct device *dev) |
144 | { | |
145 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); | |
146 | ||
147 | WARN_ON(dev->type->release != nd_region_release); | |
148 | return nd_region; | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(to_nd_region); | |
151 | ||
243f29fe DW |
152 | struct device *nd_region_dev(struct nd_region *nd_region) |
153 | { | |
154 | if (!nd_region) | |
155 | return NULL; | |
156 | return &nd_region->dev; | |
157 | } | |
158 | EXPORT_SYMBOL_GPL(nd_region_dev); | |
159 | ||
047fc8a1 RZ |
160 | struct nd_blk_region *to_nd_blk_region(struct device *dev) |
161 | { | |
162 | struct nd_region *nd_region = to_nd_region(dev); | |
163 | ||
164 | WARN_ON(!is_nd_blk(dev)); | |
165 | return container_of(nd_region, struct nd_blk_region, nd_region); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(to_nd_blk_region); | |
168 | ||
169 | void *nd_region_provider_data(struct nd_region *nd_region) | |
170 | { | |
171 | return nd_region->provider_data; | |
172 | } | |
173 | EXPORT_SYMBOL_GPL(nd_region_provider_data); | |
174 | ||
175 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) | |
176 | { | |
177 | return ndbr->blk_provider_data; | |
178 | } | |
179 | EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); | |
180 | ||
181 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) | |
182 | { | |
183 | ndbr->blk_provider_data = data; | |
184 | } | |
185 | EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); | |
186 | ||
3d88002e DW |
187 | /** |
188 | * nd_region_to_nstype() - region to an integer namespace type | |
189 | * @nd_region: region-device to interrogate | |
190 | * | |
191 | * This is the 'nstype' attribute of a region as well, an input to the | |
192 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match | |
193 | * namespace devices with namespace drivers. | |
194 | */ | |
195 | int nd_region_to_nstype(struct nd_region *nd_region) | |
196 | { | |
c9e582aa | 197 | if (is_memory(&nd_region->dev)) { |
3d88002e DW |
198 | u16 i, alias; |
199 | ||
200 | for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { | |
201 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
202 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
203 | ||
8f078b38 | 204 | if (test_bit(NDD_ALIASING, &nvdimm->flags)) |
3d88002e DW |
205 | alias++; |
206 | } | |
207 | if (alias) | |
208 | return ND_DEVICE_NAMESPACE_PMEM; | |
209 | else | |
210 | return ND_DEVICE_NAMESPACE_IO; | |
211 | } else if (is_nd_blk(&nd_region->dev)) { | |
212 | return ND_DEVICE_NAMESPACE_BLK; | |
213 | } | |
214 | ||
215 | return 0; | |
216 | } | |
bf9bccc1 DW |
217 | EXPORT_SYMBOL(nd_region_to_nstype); |
218 | ||
1f7df6f8 DW |
219 | static ssize_t size_show(struct device *dev, |
220 | struct device_attribute *attr, char *buf) | |
221 | { | |
222 | struct nd_region *nd_region = to_nd_region(dev); | |
223 | unsigned long long size = 0; | |
224 | ||
c9e582aa | 225 | if (is_memory(dev)) { |
1f7df6f8 DW |
226 | size = nd_region->ndr_size; |
227 | } else if (nd_region->ndr_mappings == 1) { | |
228 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
229 | ||
230 | size = nd_mapping->size; | |
231 | } | |
232 | ||
233 | return sprintf(buf, "%llu\n", size); | |
234 | } | |
235 | static DEVICE_ATTR_RO(size); | |
236 | ||
ab630891 DW |
237 | static ssize_t deep_flush_show(struct device *dev, |
238 | struct device_attribute *attr, char *buf) | |
239 | { | |
240 | struct nd_region *nd_region = to_nd_region(dev); | |
241 | ||
242 | /* | |
243 | * NOTE: in the nvdimm_has_flush() error case this attribute is | |
244 | * not visible. | |
245 | */ | |
246 | return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); | |
247 | } | |
248 | ||
249 | static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr, | |
250 | const char *buf, size_t len) | |
251 | { | |
252 | bool flush; | |
253 | int rc = strtobool(buf, &flush); | |
254 | struct nd_region *nd_region = to_nd_region(dev); | |
255 | ||
256 | if (rc) | |
257 | return rc; | |
258 | if (!flush) | |
259 | return -EINVAL; | |
c5d4355d PG |
260 | rc = nvdimm_flush(nd_region, NULL); |
261 | if (rc) | |
262 | return rc; | |
ab630891 DW |
263 | |
264 | return len; | |
265 | } | |
266 | static DEVICE_ATTR_RW(deep_flush); | |
267 | ||
1f7df6f8 DW |
268 | static ssize_t mappings_show(struct device *dev, |
269 | struct device_attribute *attr, char *buf) | |
270 | { | |
271 | struct nd_region *nd_region = to_nd_region(dev); | |
272 | ||
273 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); | |
274 | } | |
275 | static DEVICE_ATTR_RO(mappings); | |
276 | ||
3d88002e DW |
277 | static ssize_t nstype_show(struct device *dev, |
278 | struct device_attribute *attr, char *buf) | |
279 | { | |
280 | struct nd_region *nd_region = to_nd_region(dev); | |
281 | ||
282 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); | |
283 | } | |
284 | static DEVICE_ATTR_RO(nstype); | |
285 | ||
eaf96153 DW |
286 | static ssize_t set_cookie_show(struct device *dev, |
287 | struct device_attribute *attr, char *buf) | |
288 | { | |
289 | struct nd_region *nd_region = to_nd_region(dev); | |
290 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
c12c48ce | 291 | ssize_t rc = 0; |
eaf96153 | 292 | |
c9e582aa | 293 | if (is_memory(dev) && nd_set) |
eaf96153 DW |
294 | /* pass, should be precluded by region_visible */; |
295 | else | |
296 | return -ENXIO; | |
297 | ||
c12c48ce DW |
298 | /* |
299 | * The cookie to show depends on which specification of the | |
300 | * labels we are using. If there are not labels then default to | |
301 | * the v1.1 namespace label cookie definition. To read all this | |
302 | * data we need to wait for probing to settle. | |
303 | */ | |
87a30e1f | 304 | nd_device_lock(dev); |
c12c48ce DW |
305 | nvdimm_bus_lock(dev); |
306 | wait_nvdimm_bus_probe_idle(dev); | |
307 | if (nd_region->ndr_mappings) { | |
308 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
309 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
310 | ||
311 | if (ndd) { | |
312 | struct nd_namespace_index *nsindex; | |
313 | ||
314 | nsindex = to_namespace_index(ndd, ndd->ns_current); | |
315 | rc = sprintf(buf, "%#llx\n", | |
316 | nd_region_interleave_set_cookie(nd_region, | |
317 | nsindex)); | |
318 | } | |
319 | } | |
320 | nvdimm_bus_unlock(dev); | |
87a30e1f | 321 | nd_device_unlock(dev); |
c12c48ce DW |
322 | |
323 | if (rc) | |
324 | return rc; | |
325 | return sprintf(buf, "%#llx\n", nd_set->cookie1); | |
eaf96153 DW |
326 | } |
327 | static DEVICE_ATTR_RO(set_cookie); | |
328 | ||
bf9bccc1 DW |
329 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
330 | { | |
331 | resource_size_t blk_max_overlap = 0, available, overlap; | |
332 | int i; | |
333 | ||
334 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | |
335 | ||
336 | retry: | |
337 | available = 0; | |
338 | overlap = blk_max_overlap; | |
339 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
340 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
341 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
342 | ||
343 | /* if a dimm is disabled the available capacity is zero */ | |
344 | if (!ndd) | |
345 | return 0; | |
346 | ||
c9e582aa | 347 | if (is_memory(&nd_region->dev)) { |
bf9bccc1 DW |
348 | available += nd_pmem_available_dpa(nd_region, |
349 | nd_mapping, &overlap); | |
350 | if (overlap > blk_max_overlap) { | |
351 | blk_max_overlap = overlap; | |
352 | goto retry; | |
353 | } | |
a1f3e4d6 DW |
354 | } else if (is_nd_blk(&nd_region->dev)) |
355 | available += nd_blk_available_dpa(nd_region); | |
bf9bccc1 DW |
356 | } |
357 | ||
358 | return available; | |
359 | } | |
360 | ||
12e3129e KB |
361 | resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) |
362 | { | |
363 | resource_size_t available = 0; | |
364 | int i; | |
365 | ||
366 | if (is_memory(&nd_region->dev)) | |
367 | available = PHYS_ADDR_MAX; | |
368 | ||
369 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | |
370 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
371 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
372 | ||
373 | if (is_memory(&nd_region->dev)) | |
374 | available = min(available, | |
375 | nd_pmem_max_contiguous_dpa(nd_region, | |
376 | nd_mapping)); | |
377 | else if (is_nd_blk(&nd_region->dev)) | |
378 | available += nd_blk_available_dpa(nd_region); | |
379 | } | |
380 | if (is_memory(&nd_region->dev)) | |
381 | return available * nd_region->ndr_mappings; | |
382 | return available; | |
383 | } | |
384 | ||
bf9bccc1 DW |
385 | static ssize_t available_size_show(struct device *dev, |
386 | struct device_attribute *attr, char *buf) | |
387 | { | |
388 | struct nd_region *nd_region = to_nd_region(dev); | |
389 | unsigned long long available = 0; | |
390 | ||
391 | /* | |
392 | * Flush in-flight updates and grab a snapshot of the available | |
393 | * size. Of course, this value is potentially invalidated the | |
394 | * memory nvdimm_bus_lock() is dropped, but that's userspace's | |
395 | * problem to not race itself. | |
396 | */ | |
87a30e1f | 397 | nd_device_lock(dev); |
bf9bccc1 DW |
398 | nvdimm_bus_lock(dev); |
399 | wait_nvdimm_bus_probe_idle(dev); | |
400 | available = nd_region_available_dpa(nd_region); | |
401 | nvdimm_bus_unlock(dev); | |
87a30e1f | 402 | nd_device_unlock(dev); |
bf9bccc1 DW |
403 | |
404 | return sprintf(buf, "%llu\n", available); | |
405 | } | |
406 | static DEVICE_ATTR_RO(available_size); | |
407 | ||
1e687220 KB |
408 | static ssize_t max_available_extent_show(struct device *dev, |
409 | struct device_attribute *attr, char *buf) | |
410 | { | |
411 | struct nd_region *nd_region = to_nd_region(dev); | |
412 | unsigned long long available = 0; | |
413 | ||
87a30e1f | 414 | nd_device_lock(dev); |
1e687220 KB |
415 | nvdimm_bus_lock(dev); |
416 | wait_nvdimm_bus_probe_idle(dev); | |
417 | available = nd_region_allocatable_dpa(nd_region); | |
418 | nvdimm_bus_unlock(dev); | |
87a30e1f | 419 | nd_device_unlock(dev); |
1e687220 KB |
420 | |
421 | return sprintf(buf, "%llu\n", available); | |
422 | } | |
423 | static DEVICE_ATTR_RO(max_available_extent); | |
424 | ||
3d88002e DW |
425 | static ssize_t init_namespaces_show(struct device *dev, |
426 | struct device_attribute *attr, char *buf) | |
427 | { | |
e5ae3b25 | 428 | struct nd_region_data *ndrd = dev_get_drvdata(dev); |
3d88002e DW |
429 | ssize_t rc; |
430 | ||
431 | nvdimm_bus_lock(dev); | |
e5ae3b25 DW |
432 | if (ndrd) |
433 | rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); | |
3d88002e DW |
434 | else |
435 | rc = -ENXIO; | |
436 | nvdimm_bus_unlock(dev); | |
437 | ||
438 | return rc; | |
439 | } | |
440 | static DEVICE_ATTR_RO(init_namespaces); | |
441 | ||
bf9bccc1 DW |
442 | static ssize_t namespace_seed_show(struct device *dev, |
443 | struct device_attribute *attr, char *buf) | |
444 | { | |
445 | struct nd_region *nd_region = to_nd_region(dev); | |
446 | ssize_t rc; | |
447 | ||
448 | nvdimm_bus_lock(dev); | |
449 | if (nd_region->ns_seed) | |
450 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); | |
451 | else | |
452 | rc = sprintf(buf, "\n"); | |
453 | nvdimm_bus_unlock(dev); | |
454 | return rc; | |
455 | } | |
456 | static DEVICE_ATTR_RO(namespace_seed); | |
457 | ||
8c2f7e86 DW |
458 | static ssize_t btt_seed_show(struct device *dev, |
459 | struct device_attribute *attr, char *buf) | |
460 | { | |
461 | struct nd_region *nd_region = to_nd_region(dev); | |
462 | ssize_t rc; | |
463 | ||
464 | nvdimm_bus_lock(dev); | |
465 | if (nd_region->btt_seed) | |
466 | rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); | |
467 | else | |
468 | rc = sprintf(buf, "\n"); | |
469 | nvdimm_bus_unlock(dev); | |
470 | ||
471 | return rc; | |
472 | } | |
473 | static DEVICE_ATTR_RO(btt_seed); | |
474 | ||
e1455744 DW |
475 | static ssize_t pfn_seed_show(struct device *dev, |
476 | struct device_attribute *attr, char *buf) | |
477 | { | |
478 | struct nd_region *nd_region = to_nd_region(dev); | |
479 | ssize_t rc; | |
480 | ||
481 | nvdimm_bus_lock(dev); | |
482 | if (nd_region->pfn_seed) | |
483 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); | |
484 | else | |
485 | rc = sprintf(buf, "\n"); | |
486 | nvdimm_bus_unlock(dev); | |
487 | ||
488 | return rc; | |
489 | } | |
490 | static DEVICE_ATTR_RO(pfn_seed); | |
491 | ||
cd03412a DW |
492 | static ssize_t dax_seed_show(struct device *dev, |
493 | struct device_attribute *attr, char *buf) | |
494 | { | |
495 | struct nd_region *nd_region = to_nd_region(dev); | |
496 | ssize_t rc; | |
497 | ||
498 | nvdimm_bus_lock(dev); | |
499 | if (nd_region->dax_seed) | |
500 | rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); | |
501 | else | |
502 | rc = sprintf(buf, "\n"); | |
503 | nvdimm_bus_unlock(dev); | |
504 | ||
505 | return rc; | |
506 | } | |
507 | static DEVICE_ATTR_RO(dax_seed); | |
508 | ||
58138820 DW |
509 | static ssize_t read_only_show(struct device *dev, |
510 | struct device_attribute *attr, char *buf) | |
511 | { | |
512 | struct nd_region *nd_region = to_nd_region(dev); | |
513 | ||
514 | return sprintf(buf, "%d\n", nd_region->ro); | |
515 | } | |
516 | ||
517 | static ssize_t read_only_store(struct device *dev, | |
518 | struct device_attribute *attr, const char *buf, size_t len) | |
519 | { | |
520 | bool ro; | |
521 | int rc = strtobool(buf, &ro); | |
522 | struct nd_region *nd_region = to_nd_region(dev); | |
523 | ||
524 | if (rc) | |
525 | return rc; | |
526 | ||
527 | nd_region->ro = ro; | |
528 | return len; | |
529 | } | |
530 | static DEVICE_ATTR_RW(read_only); | |
531 | ||
23f49844 | 532 | static ssize_t region_badblocks_show(struct device *dev, |
6a6bef90 DJ |
533 | struct device_attribute *attr, char *buf) |
534 | { | |
535 | struct nd_region *nd_region = to_nd_region(dev); | |
5d394eee | 536 | ssize_t rc; |
6a6bef90 | 537 | |
87a30e1f | 538 | nd_device_lock(dev); |
5d394eee DW |
539 | if (dev->driver) |
540 | rc = badblocks_show(&nd_region->bb, buf, 0); | |
541 | else | |
542 | rc = -ENXIO; | |
87a30e1f | 543 | nd_device_unlock(dev); |
23f49844 | 544 | |
5d394eee DW |
545 | return rc; |
546 | } | |
23f49844 | 547 | static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL); |
6a6bef90 | 548 | |
802f4be6 DJ |
549 | static ssize_t resource_show(struct device *dev, |
550 | struct device_attribute *attr, char *buf) | |
551 | { | |
552 | struct nd_region *nd_region = to_nd_region(dev); | |
553 | ||
554 | return sprintf(buf, "%#llx\n", nd_region->ndr_start); | |
555 | } | |
556 | static DEVICE_ATTR_RO(resource); | |
557 | ||
96c3a239 DJ |
558 | static ssize_t persistence_domain_show(struct device *dev, |
559 | struct device_attribute *attr, char *buf) | |
560 | { | |
561 | struct nd_region *nd_region = to_nd_region(dev); | |
96c3a239 | 562 | |
fe9a552e DW |
563 | if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) |
564 | return sprintf(buf, "cpu_cache\n"); | |
565 | else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) | |
566 | return sprintf(buf, "memory_controller\n"); | |
567 | else | |
568 | return sprintf(buf, "\n"); | |
96c3a239 DJ |
569 | } |
570 | static DEVICE_ATTR_RO(persistence_domain); | |
571 | ||
1f7df6f8 DW |
572 | static struct attribute *nd_region_attributes[] = { |
573 | &dev_attr_size.attr, | |
3d88002e | 574 | &dev_attr_nstype.attr, |
1f7df6f8 | 575 | &dev_attr_mappings.attr, |
8c2f7e86 | 576 | &dev_attr_btt_seed.attr, |
e1455744 | 577 | &dev_attr_pfn_seed.attr, |
cd03412a | 578 | &dev_attr_dax_seed.attr, |
ab630891 | 579 | &dev_attr_deep_flush.attr, |
58138820 | 580 | &dev_attr_read_only.attr, |
eaf96153 | 581 | &dev_attr_set_cookie.attr, |
bf9bccc1 | 582 | &dev_attr_available_size.attr, |
1e687220 | 583 | &dev_attr_max_available_extent.attr, |
bf9bccc1 | 584 | &dev_attr_namespace_seed.attr, |
3d88002e | 585 | &dev_attr_init_namespaces.attr, |
23f49844 | 586 | &dev_attr_badblocks.attr, |
802f4be6 | 587 | &dev_attr_resource.attr, |
96c3a239 | 588 | &dev_attr_persistence_domain.attr, |
1f7df6f8 DW |
589 | NULL, |
590 | }; | |
591 | ||
eaf96153 DW |
592 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
593 | { | |
594 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
595 | struct nd_region *nd_region = to_nd_region(dev); | |
596 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
bf9bccc1 | 597 | int type = nd_region_to_nstype(nd_region); |
eaf96153 | 598 | |
c9e582aa | 599 | if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr) |
6bb691ac DK |
600 | return 0; |
601 | ||
c9e582aa | 602 | if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) |
cd03412a DW |
603 | return 0; |
604 | ||
c42adf87 | 605 | if (!is_memory(dev) && a == &dev_attr_badblocks.attr) |
6a6bef90 DJ |
606 | return 0; |
607 | ||
b8ff981f | 608 | if (a == &dev_attr_resource.attr) { |
c42adf87 | 609 | if (is_memory(dev)) |
b8ff981f DW |
610 | return 0400; |
611 | else | |
612 | return 0; | |
613 | } | |
802f4be6 | 614 | |
ab630891 DW |
615 | if (a == &dev_attr_deep_flush.attr) { |
616 | int has_flush = nvdimm_has_flush(nd_region); | |
617 | ||
618 | if (has_flush == 1) | |
619 | return a->mode; | |
620 | else if (has_flush == 0) | |
621 | return 0444; | |
622 | else | |
623 | return 0; | |
624 | } | |
625 | ||
896196dc DW |
626 | if (a == &dev_attr_persistence_domain.attr) { |
627 | if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) | |
628 | | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0) | |
629 | return 0; | |
630 | return a->mode; | |
631 | } | |
632 | ||
bf9bccc1 DW |
633 | if (a != &dev_attr_set_cookie.attr |
634 | && a != &dev_attr_available_size.attr) | |
eaf96153 DW |
635 | return a->mode; |
636 | ||
bf9bccc1 DW |
637 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
638 | || type == ND_DEVICE_NAMESPACE_BLK) | |
639 | && a == &dev_attr_available_size.attr) | |
640 | return a->mode; | |
c9e582aa | 641 | else if (is_memory(dev) && nd_set) |
bf9bccc1 | 642 | return a->mode; |
eaf96153 DW |
643 | |
644 | return 0; | |
645 | } | |
646 | ||
1f7df6f8 DW |
647 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
648 | { | |
649 | struct nd_region *nd_region = to_nd_region(dev); | |
650 | struct nd_mapping *nd_mapping; | |
651 | struct nvdimm *nvdimm; | |
652 | ||
653 | if (n >= nd_region->ndr_mappings) | |
654 | return -ENXIO; | |
655 | nd_mapping = &nd_region->mapping[n]; | |
656 | nvdimm = nd_mapping->nvdimm; | |
657 | ||
401c0a19 DW |
658 | return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev), |
659 | nd_mapping->start, nd_mapping->size, | |
660 | nd_mapping->position); | |
1f7df6f8 DW |
661 | } |
662 | ||
663 | #define REGION_MAPPING(idx) \ | |
664 | static ssize_t mapping##idx##_show(struct device *dev, \ | |
665 | struct device_attribute *attr, char *buf) \ | |
666 | { \ | |
667 | return mappingN(dev, buf, idx); \ | |
668 | } \ | |
669 | static DEVICE_ATTR_RO(mapping##idx) | |
670 | ||
671 | /* | |
672 | * 32 should be enough for a while, even in the presence of socket | |
673 | * interleave a 32-way interleave set is a degenerate case. | |
674 | */ | |
675 | REGION_MAPPING(0); | |
676 | REGION_MAPPING(1); | |
677 | REGION_MAPPING(2); | |
678 | REGION_MAPPING(3); | |
679 | REGION_MAPPING(4); | |
680 | REGION_MAPPING(5); | |
681 | REGION_MAPPING(6); | |
682 | REGION_MAPPING(7); | |
683 | REGION_MAPPING(8); | |
684 | REGION_MAPPING(9); | |
685 | REGION_MAPPING(10); | |
686 | REGION_MAPPING(11); | |
687 | REGION_MAPPING(12); | |
688 | REGION_MAPPING(13); | |
689 | REGION_MAPPING(14); | |
690 | REGION_MAPPING(15); | |
691 | REGION_MAPPING(16); | |
692 | REGION_MAPPING(17); | |
693 | REGION_MAPPING(18); | |
694 | REGION_MAPPING(19); | |
695 | REGION_MAPPING(20); | |
696 | REGION_MAPPING(21); | |
697 | REGION_MAPPING(22); | |
698 | REGION_MAPPING(23); | |
699 | REGION_MAPPING(24); | |
700 | REGION_MAPPING(25); | |
701 | REGION_MAPPING(26); | |
702 | REGION_MAPPING(27); | |
703 | REGION_MAPPING(28); | |
704 | REGION_MAPPING(29); | |
705 | REGION_MAPPING(30); | |
706 | REGION_MAPPING(31); | |
707 | ||
708 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) | |
709 | { | |
710 | struct device *dev = container_of(kobj, struct device, kobj); | |
711 | struct nd_region *nd_region = to_nd_region(dev); | |
712 | ||
713 | if (n < nd_region->ndr_mappings) | |
714 | return a->mode; | |
715 | return 0; | |
716 | } | |
717 | ||
718 | static struct attribute *mapping_attributes[] = { | |
719 | &dev_attr_mapping0.attr, | |
720 | &dev_attr_mapping1.attr, | |
721 | &dev_attr_mapping2.attr, | |
722 | &dev_attr_mapping3.attr, | |
723 | &dev_attr_mapping4.attr, | |
724 | &dev_attr_mapping5.attr, | |
725 | &dev_attr_mapping6.attr, | |
726 | &dev_attr_mapping7.attr, | |
727 | &dev_attr_mapping8.attr, | |
728 | &dev_attr_mapping9.attr, | |
729 | &dev_attr_mapping10.attr, | |
730 | &dev_attr_mapping11.attr, | |
731 | &dev_attr_mapping12.attr, | |
732 | &dev_attr_mapping13.attr, | |
733 | &dev_attr_mapping14.attr, | |
734 | &dev_attr_mapping15.attr, | |
735 | &dev_attr_mapping16.attr, | |
736 | &dev_attr_mapping17.attr, | |
737 | &dev_attr_mapping18.attr, | |
738 | &dev_attr_mapping19.attr, | |
739 | &dev_attr_mapping20.attr, | |
740 | &dev_attr_mapping21.attr, | |
741 | &dev_attr_mapping22.attr, | |
742 | &dev_attr_mapping23.attr, | |
743 | &dev_attr_mapping24.attr, | |
744 | &dev_attr_mapping25.attr, | |
745 | &dev_attr_mapping26.attr, | |
746 | &dev_attr_mapping27.attr, | |
747 | &dev_attr_mapping28.attr, | |
748 | &dev_attr_mapping29.attr, | |
749 | &dev_attr_mapping30.attr, | |
750 | &dev_attr_mapping31.attr, | |
751 | NULL, | |
752 | }; | |
753 | ||
754 | struct attribute_group nd_mapping_attribute_group = { | |
755 | .is_visible = mapping_visible, | |
756 | .attrs = mapping_attributes, | |
757 | }; | |
758 | EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); | |
759 | ||
cb719d5f DW |
760 | struct attribute_group nd_region_attribute_group = { |
761 | .attrs = nd_region_attributes, | |
762 | .is_visible = region_visible, | |
763 | }; | |
764 | EXPORT_SYMBOL_GPL(nd_region_attribute_group); | |
765 | ||
766 | static struct device_type nd_blk_device_type = { | |
767 | .name = "nd_blk", | |
768 | .release = nd_region_release, | |
769 | }; | |
770 | ||
771 | static struct device_type nd_pmem_device_type = { | |
772 | .name = "nd_pmem", | |
773 | .release = nd_region_release, | |
774 | }; | |
775 | ||
776 | static struct device_type nd_volatile_device_type = { | |
777 | .name = "nd_volatile", | |
778 | .release = nd_region_release, | |
779 | }; | |
780 | ||
781 | bool is_nd_pmem(struct device *dev) | |
782 | { | |
783 | return dev ? dev->type == &nd_pmem_device_type : false; | |
784 | } | |
785 | ||
786 | bool is_nd_blk(struct device *dev) | |
787 | { | |
788 | return dev ? dev->type == &nd_blk_device_type : false; | |
789 | } | |
790 | ||
791 | bool is_nd_volatile(struct device *dev) | |
792 | { | |
793 | return dev ? dev->type == &nd_volatile_device_type : false; | |
794 | } | |
795 | ||
796 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, | |
797 | struct nd_namespace_index *nsindex) | |
798 | { | |
799 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
800 | ||
801 | if (!nd_set) | |
802 | return 0; | |
803 | ||
804 | if (nsindex && __le16_to_cpu(nsindex->major) == 1 | |
805 | && __le16_to_cpu(nsindex->minor) == 1) | |
806 | return nd_set->cookie1; | |
807 | return nd_set->cookie2; | |
808 | } | |
809 | ||
810 | u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) | |
811 | { | |
812 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
813 | ||
814 | if (nd_set) | |
815 | return nd_set->altcookie; | |
816 | return 0; | |
817 | } | |
818 | ||
819 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping) | |
820 | { | |
821 | struct nd_label_ent *label_ent, *e; | |
822 | ||
823 | lockdep_assert_held(&nd_mapping->lock); | |
824 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | |
825 | list_del(&label_ent->list); | |
826 | kfree(label_ent); | |
827 | } | |
828 | } | |
829 | ||
830 | /* | |
831 | * When a namespace is activated create new seeds for the next | |
832 | * namespace, or namespace-personality to be configured. | |
833 | */ | |
834 | void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) | |
835 | { | |
836 | nvdimm_bus_lock(dev); | |
837 | if (nd_region->ns_seed == dev) { | |
838 | nd_region_create_ns_seed(nd_region); | |
839 | } else if (is_nd_btt(dev)) { | |
840 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
841 | ||
842 | if (nd_region->btt_seed == dev) | |
843 | nd_region_create_btt_seed(nd_region); | |
844 | if (nd_region->ns_seed == &nd_btt->ndns->dev) | |
845 | nd_region_create_ns_seed(nd_region); | |
846 | } else if (is_nd_pfn(dev)) { | |
847 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | |
848 | ||
849 | if (nd_region->pfn_seed == dev) | |
850 | nd_region_create_pfn_seed(nd_region); | |
851 | if (nd_region->ns_seed == &nd_pfn->ndns->dev) | |
852 | nd_region_create_ns_seed(nd_region); | |
853 | } else if (is_nd_dax(dev)) { | |
854 | struct nd_dax *nd_dax = to_nd_dax(dev); | |
855 | ||
856 | if (nd_region->dax_seed == dev) | |
857 | nd_region_create_dax_seed(nd_region); | |
858 | if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) | |
859 | nd_region_create_ns_seed(nd_region); | |
860 | } | |
861 | nvdimm_bus_unlock(dev); | |
862 | } | |
863 | ||
047fc8a1 | 864 | int nd_blk_region_init(struct nd_region *nd_region) |
1f7df6f8 | 865 | { |
047fc8a1 RZ |
866 | struct device *dev = &nd_region->dev; |
867 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | |
868 | ||
869 | if (!is_nd_blk(dev)) | |
870 | return 0; | |
871 | ||
872 | if (nd_region->ndr_mappings < 1) { | |
d5d51fec | 873 | dev_dbg(dev, "invalid BLK region\n"); |
047fc8a1 RZ |
874 | return -ENXIO; |
875 | } | |
876 | ||
877 | return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); | |
1f7df6f8 | 878 | } |
1f7df6f8 | 879 | |
5212e11f VV |
880 | /** |
881 | * nd_region_acquire_lane - allocate and lock a lane | |
882 | * @nd_region: region id and number of lanes possible | |
883 | * | |
884 | * A lane correlates to a BLK-data-window and/or a log slot in the BTT. | |
885 | * We optimize for the common case where there are 256 lanes, one | |
886 | * per-cpu. For larger systems we need to lock to share lanes. For now | |
887 | * this implementation assumes the cost of maintaining an allocator for | |
888 | * free lanes is on the order of the lock hold time, so it implements a | |
889 | * static lane = cpu % num_lanes mapping. | |
890 | * | |
891 | * In the case of a BTT instance on top of a BLK namespace a lane may be | |
892 | * acquired recursively. We lock on the first instance. | |
893 | * | |
894 | * In the case of a BTT instance on top of PMEM, we only acquire a lane | |
895 | * for the BTT metadata updates. | |
896 | */ | |
897 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region) | |
898 | { | |
899 | unsigned int cpu, lane; | |
900 | ||
901 | cpu = get_cpu(); | |
902 | if (nd_region->num_lanes < nr_cpu_ids) { | |
903 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
904 | ||
905 | lane = cpu % nd_region->num_lanes; | |
906 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
907 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
908 | if (ndl_count->count++ == 0) | |
909 | spin_lock(&ndl_lock->lock); | |
910 | } else | |
911 | lane = cpu; | |
912 | ||
913 | return lane; | |
914 | } | |
915 | EXPORT_SYMBOL(nd_region_acquire_lane); | |
916 | ||
917 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) | |
918 | { | |
919 | if (nd_region->num_lanes < nr_cpu_ids) { | |
920 | unsigned int cpu = get_cpu(); | |
921 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
922 | ||
923 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
924 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
925 | if (--ndl_count->count == 0) | |
926 | spin_unlock(&ndl_lock->lock); | |
927 | put_cpu(); | |
928 | } | |
929 | put_cpu(); | |
930 | } | |
931 | EXPORT_SYMBOL(nd_region_release_lane); | |
932 | ||
1f7df6f8 DW |
933 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
934 | struct nd_region_desc *ndr_desc, struct device_type *dev_type, | |
935 | const char *caller) | |
936 | { | |
937 | struct nd_region *nd_region; | |
938 | struct device *dev; | |
047fc8a1 | 939 | void *region_buf; |
5212e11f | 940 | unsigned int i; |
58138820 | 941 | int ro = 0; |
1f7df6f8 DW |
942 | |
943 | for (i = 0; i < ndr_desc->num_mappings; i++) { | |
44c462eb DW |
944 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
945 | struct nvdimm *nvdimm = mapping->nvdimm; | |
1f7df6f8 | 946 | |
5b26db95 AK |
947 | if ((mapping->start | mapping->size) % PAGE_SIZE) { |
948 | dev_err(&nvdimm_bus->dev, | |
949 | "%s: %s mapping%d is not %ld aligned\n", | |
950 | caller, dev_name(&nvdimm->dev), i, PAGE_SIZE); | |
1f7df6f8 DW |
951 | return NULL; |
952 | } | |
58138820 | 953 | |
8f078b38 | 954 | if (test_bit(NDD_UNARMED, &nvdimm->flags)) |
58138820 | 955 | ro = 1; |
d5d30d5a DW |
956 | |
957 | if (test_bit(NDD_NOBLK, &nvdimm->flags) | |
958 | && dev_type == &nd_blk_device_type) { | |
959 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", | |
960 | caller, dev_name(&nvdimm->dev), i); | |
961 | return NULL; | |
962 | } | |
1f7df6f8 DW |
963 | } |
964 | ||
047fc8a1 RZ |
965 | if (dev_type == &nd_blk_device_type) { |
966 | struct nd_blk_region_desc *ndbr_desc; | |
967 | struct nd_blk_region *ndbr; | |
968 | ||
969 | ndbr_desc = to_blk_region_desc(ndr_desc); | |
970 | ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) | |
971 | * ndr_desc->num_mappings, | |
972 | GFP_KERNEL); | |
973 | if (ndbr) { | |
974 | nd_region = &ndbr->nd_region; | |
975 | ndbr->enable = ndbr_desc->enable; | |
047fc8a1 RZ |
976 | ndbr->do_io = ndbr_desc->do_io; |
977 | } | |
978 | region_buf = ndbr; | |
979 | } else { | |
2b90cb22 GS |
980 | nd_region = kzalloc(struct_size(nd_region, mapping, |
981 | ndr_desc->num_mappings), | |
982 | GFP_KERNEL); | |
047fc8a1 RZ |
983 | region_buf = nd_region; |
984 | } | |
985 | ||
986 | if (!region_buf) | |
1f7df6f8 DW |
987 | return NULL; |
988 | nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); | |
5212e11f VV |
989 | if (nd_region->id < 0) |
990 | goto err_id; | |
991 | ||
992 | nd_region->lane = alloc_percpu(struct nd_percpu_lane); | |
993 | if (!nd_region->lane) | |
994 | goto err_percpu; | |
995 | ||
996 | for (i = 0; i < nr_cpu_ids; i++) { | |
997 | struct nd_percpu_lane *ndl; | |
998 | ||
999 | ndl = per_cpu_ptr(nd_region->lane, i); | |
1000 | spin_lock_init(&ndl->lock); | |
1001 | ndl->count = 0; | |
1f7df6f8 DW |
1002 | } |
1003 | ||
1f7df6f8 | 1004 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
44c462eb DW |
1005 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
1006 | struct nvdimm *nvdimm = mapping->nvdimm; | |
1007 | ||
1008 | nd_region->mapping[i].nvdimm = nvdimm; | |
1009 | nd_region->mapping[i].start = mapping->start; | |
1010 | nd_region->mapping[i].size = mapping->size; | |
401c0a19 | 1011 | nd_region->mapping[i].position = mapping->position; |
ae8219f1 DW |
1012 | INIT_LIST_HEAD(&nd_region->mapping[i].labels); |
1013 | mutex_init(&nd_region->mapping[i].lock); | |
1f7df6f8 DW |
1014 | |
1015 | get_device(&nvdimm->dev); | |
1016 | } | |
1017 | nd_region->ndr_mappings = ndr_desc->num_mappings; | |
1018 | nd_region->provider_data = ndr_desc->provider_data; | |
eaf96153 | 1019 | nd_region->nd_set = ndr_desc->nd_set; |
5212e11f | 1020 | nd_region->num_lanes = ndr_desc->num_lanes; |
004f1afb | 1021 | nd_region->flags = ndr_desc->flags; |
58138820 | 1022 | nd_region->ro = ro; |
41d7a6d6 | 1023 | nd_region->numa_node = ndr_desc->numa_node; |
8fc5c735 | 1024 | nd_region->target_node = ndr_desc->target_node; |
1b40e09a | 1025 | ida_init(&nd_region->ns_ida); |
8c2f7e86 | 1026 | ida_init(&nd_region->btt_ida); |
e1455744 | 1027 | ida_init(&nd_region->pfn_ida); |
cd03412a | 1028 | ida_init(&nd_region->dax_ida); |
1f7df6f8 DW |
1029 | dev = &nd_region->dev; |
1030 | dev_set_name(dev, "region%d", nd_region->id); | |
1031 | dev->parent = &nvdimm_bus->dev; | |
1032 | dev->type = dev_type; | |
1033 | dev->groups = ndr_desc->attr_groups; | |
1ff19f48 | 1034 | dev->of_node = ndr_desc->of_node; |
1f7df6f8 DW |
1035 | nd_region->ndr_size = resource_size(ndr_desc->res); |
1036 | nd_region->ndr_start = ndr_desc->res->start; | |
c5d4355d PG |
1037 | if (ndr_desc->flush) |
1038 | nd_region->flush = ndr_desc->flush; | |
1039 | else | |
1040 | nd_region->flush = NULL; | |
1041 | ||
1f7df6f8 DW |
1042 | nd_device_register(dev); |
1043 | ||
1044 | return nd_region; | |
5212e11f VV |
1045 | |
1046 | err_percpu: | |
1047 | ida_simple_remove(®ion_ida, nd_region->id); | |
1048 | err_id: | |
047fc8a1 | 1049 | kfree(region_buf); |
5212e11f | 1050 | return NULL; |
1f7df6f8 DW |
1051 | } |
1052 | ||
1053 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, | |
1054 | struct nd_region_desc *ndr_desc) | |
1055 | { | |
5212e11f | 1056 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
1057 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
1058 | __func__); | |
1059 | } | |
1060 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); | |
1061 | ||
1062 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, | |
1063 | struct nd_region_desc *ndr_desc) | |
1064 | { | |
1065 | if (ndr_desc->num_mappings > 1) | |
1066 | return NULL; | |
5212e11f | 1067 | ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); |
1f7df6f8 DW |
1068 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
1069 | __func__); | |
1070 | } | |
1071 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); | |
1072 | ||
1073 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, | |
1074 | struct nd_region_desc *ndr_desc) | |
1075 | { | |
5212e11f | 1076 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
1077 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
1078 | __func__); | |
1079 | } | |
1080 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); | |
b354aba0 | 1081 | |
c5d4355d PG |
1082 | int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) |
1083 | { | |
1084 | int rc = 0; | |
1085 | ||
1086 | if (!nd_region->flush) | |
1087 | rc = generic_nvdimm_flush(nd_region); | |
1088 | else { | |
1089 | if (nd_region->flush(nd_region, bio)) | |
1090 | rc = -EIO; | |
1091 | } | |
1092 | ||
1093 | return rc; | |
1094 | } | |
f284a4f2 DW |
1095 | /** |
1096 | * nvdimm_flush - flush any posted write queues between the cpu and pmem media | |
1097 | * @nd_region: blk or interleaved pmem region | |
1098 | */ | |
c5d4355d | 1099 | int generic_nvdimm_flush(struct nd_region *nd_region) |
f284a4f2 DW |
1100 | { |
1101 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); | |
0c27af60 DW |
1102 | int i, idx; |
1103 | ||
1104 | /* | |
1105 | * Try to encourage some diversity in flush hint addresses | |
1106 | * across cpus assuming a limited number of flush hints. | |
1107 | */ | |
1108 | idx = this_cpu_read(flush_idx); | |
1109 | idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); | |
f284a4f2 DW |
1110 | |
1111 | /* | |
1112 | * The first wmb() is needed to 'sfence' all previous writes | |
1113 | * such that they are architecturally visible for the platform | |
1114 | * buffer flush. Note that we've already arranged for pmem | |
0aed55af DW |
1115 | * writes to avoid the cache via memcpy_flushcache(). The final |
1116 | * wmb() ensures ordering for the NVDIMM flush write. | |
f284a4f2 DW |
1117 | */ |
1118 | wmb(); | |
1119 | for (i = 0; i < nd_region->ndr_mappings; i++) | |
595c7307 DW |
1120 | if (ndrd_get_flush_wpq(ndrd, i, 0)) |
1121 | writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); | |
f284a4f2 | 1122 | wmb(); |
c5d4355d PG |
1123 | |
1124 | return 0; | |
f284a4f2 DW |
1125 | } |
1126 | EXPORT_SYMBOL_GPL(nvdimm_flush); | |
1127 | ||
1128 | /** | |
1129 | * nvdimm_has_flush - determine write flushing requirements | |
1130 | * @nd_region: blk or interleaved pmem region | |
1131 | * | |
1132 | * Returns 1 if writes require flushing | |
1133 | * Returns 0 if writes do not require flushing | |
1134 | * Returns -ENXIO if flushing capability can not be determined | |
1135 | */ | |
1136 | int nvdimm_has_flush(struct nd_region *nd_region) | |
1137 | { | |
f284a4f2 DW |
1138 | int i; |
1139 | ||
c00b396e DW |
1140 | /* no nvdimm or pmem api == flushing capability unknown */ |
1141 | if (nd_region->ndr_mappings == 0 | |
1142 | || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) | |
f284a4f2 DW |
1143 | return -ENXIO; |
1144 | ||
bc042fdf DW |
1145 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
1146 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
1147 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
1148 | ||
1149 | /* flush hints present / available */ | |
1150 | if (nvdimm->num_flush) | |
f284a4f2 | 1151 | return 1; |
bc042fdf | 1152 | } |
f284a4f2 DW |
1153 | |
1154 | /* | |
1155 | * The platform defines dimm devices without hints, assume | |
1156 | * platform persistence mechanism like ADR | |
1157 | */ | |
1158 | return 0; | |
1159 | } | |
1160 | EXPORT_SYMBOL_GPL(nvdimm_has_flush); | |
1161 | ||
0b277961 DW |
1162 | int nvdimm_has_cache(struct nd_region *nd_region) |
1163 | { | |
546eb031 RZ |
1164 | return is_nd_pmem(&nd_region->dev) && |
1165 | !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); | |
0b277961 DW |
1166 | } |
1167 | EXPORT_SYMBOL_GPL(nvdimm_has_cache); | |
1168 | ||
fefc1d97 PG |
1169 | bool is_nvdimm_sync(struct nd_region *nd_region) |
1170 | { | |
4c806b89 AK |
1171 | if (is_nd_volatile(&nd_region->dev)) |
1172 | return true; | |
1173 | ||
fefc1d97 PG |
1174 | return is_nd_pmem(&nd_region->dev) && |
1175 | !test_bit(ND_REGION_ASYNC, &nd_region->flags); | |
1176 | } | |
1177 | EXPORT_SYMBOL_GPL(is_nvdimm_sync); | |
1178 | ||
ae86cbfe DW |
1179 | struct conflict_context { |
1180 | struct nd_region *nd_region; | |
1181 | resource_size_t start, size; | |
1182 | }; | |
1183 | ||
1184 | static int region_conflict(struct device *dev, void *data) | |
1185 | { | |
1186 | struct nd_region *nd_region; | |
1187 | struct conflict_context *ctx = data; | |
1188 | resource_size_t res_end, region_end, region_start; | |
1189 | ||
1190 | if (!is_memory(dev)) | |
1191 | return 0; | |
1192 | ||
1193 | nd_region = to_nd_region(dev); | |
1194 | if (nd_region == ctx->nd_region) | |
1195 | return 0; | |
1196 | ||
1197 | res_end = ctx->start + ctx->size; | |
1198 | region_start = nd_region->ndr_start; | |
1199 | region_end = region_start + nd_region->ndr_size; | |
1200 | if (ctx->start >= region_start && ctx->start < region_end) | |
1201 | return -EBUSY; | |
1202 | if (res_end > region_start && res_end <= region_end) | |
1203 | return -EBUSY; | |
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, | |
1208 | resource_size_t size) | |
1209 | { | |
1210 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); | |
1211 | struct conflict_context ctx = { | |
1212 | .nd_region = nd_region, | |
1213 | .start = start, | |
1214 | .size = size, | |
1215 | }; | |
1216 | ||
1217 | return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); | |
1218 | } | |
1219 | ||
b354aba0 DW |
1220 | void __exit nd_region_devs_exit(void) |
1221 | { | |
1222 | ida_destroy(®ion_ida); | |
1223 | } |