]>
Commit | Line | Data |
---|---|---|
1f7df6f8 DW |
1 | /* |
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
eaf96153 | 13 | #include <linux/scatterlist.h> |
047fc8a1 | 14 | #include <linux/highmem.h> |
eaf96153 | 15 | #include <linux/sched.h> |
1f7df6f8 | 16 | #include <linux/slab.h> |
0c27af60 | 17 | #include <linux/hash.h> |
f284a4f2 | 18 | #include <linux/pmem.h> |
eaf96153 | 19 | #include <linux/sort.h> |
1f7df6f8 | 20 | #include <linux/io.h> |
bf9bccc1 | 21 | #include <linux/nd.h> |
1f7df6f8 DW |
22 | #include "nd-core.h" |
23 | #include "nd.h" | |
24 | ||
f284a4f2 DW |
25 | /* |
26 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | |
27 | * irrelevant. | |
28 | */ | |
29 | #include <linux/io-64-nonatomic-hi-lo.h> | |
30 | ||
1f7df6f8 | 31 | static DEFINE_IDA(region_ida); |
0c27af60 | 32 | static DEFINE_PER_CPU(int, flush_idx); |
1f7df6f8 | 33 | |
e5ae3b25 DW |
34 | static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, |
35 | struct nd_region_data *ndrd) | |
36 | { | |
37 | int i, j; | |
38 | ||
39 | dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), | |
40 | nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); | |
595c7307 | 41 | for (i = 0; i < (1 << ndrd->hints_shift); i++) { |
e5ae3b25 DW |
42 | struct resource *res = &nvdimm->flush_wpq[i]; |
43 | unsigned long pfn = PHYS_PFN(res->start); | |
44 | void __iomem *flush_page; | |
45 | ||
46 | /* check if flush hints share a page */ | |
47 | for (j = 0; j < i; j++) { | |
48 | struct resource *res_j = &nvdimm->flush_wpq[j]; | |
49 | unsigned long pfn_j = PHYS_PFN(res_j->start); | |
50 | ||
51 | if (pfn == pfn_j) | |
52 | break; | |
53 | } | |
54 | ||
55 | if (j < i) | |
56 | flush_page = (void __iomem *) ((unsigned long) | |
595c7307 DW |
57 | ndrd_get_flush_wpq(ndrd, dimm, j) |
58 | & PAGE_MASK); | |
e5ae3b25 DW |
59 | else |
60 | flush_page = devm_nvdimm_ioremap(dev, | |
480b6837 | 61 | PFN_PHYS(pfn), PAGE_SIZE); |
e5ae3b25 DW |
62 | if (!flush_page) |
63 | return -ENXIO; | |
595c7307 DW |
64 | ndrd_set_flush_wpq(ndrd, dimm, i, flush_page |
65 | + (res->start & ~PAGE_MASK)); | |
e5ae3b25 DW |
66 | } |
67 | ||
68 | return 0; | |
69 | } | |
70 | ||
71 | int nd_region_activate(struct nd_region *nd_region) | |
72 | { | |
db58028e | 73 | int i, j, num_flush = 0; |
e5ae3b25 DW |
74 | struct nd_region_data *ndrd; |
75 | struct device *dev = &nd_region->dev; | |
76 | size_t flush_data_size = sizeof(void *); | |
77 | ||
78 | nvdimm_bus_lock(&nd_region->dev); | |
79 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
80 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
81 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
82 | ||
83 | /* at least one null hint slot per-dimm for the "no-hint" case */ | |
84 | flush_data_size += sizeof(void *); | |
0c27af60 | 85 | num_flush = min_not_zero(num_flush, nvdimm->num_flush); |
e5ae3b25 DW |
86 | if (!nvdimm->num_flush) |
87 | continue; | |
88 | flush_data_size += nvdimm->num_flush * sizeof(void *); | |
89 | } | |
90 | nvdimm_bus_unlock(&nd_region->dev); | |
91 | ||
92 | ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); | |
93 | if (!ndrd) | |
94 | return -ENOMEM; | |
95 | dev_set_drvdata(dev, ndrd); | |
96 | ||
595c7307 DW |
97 | if (!num_flush) |
98 | return 0; | |
99 | ||
100 | ndrd->hints_shift = ilog2(num_flush); | |
e5ae3b25 DW |
101 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
102 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
103 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
104 | int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); | |
105 | ||
106 | if (rc) | |
107 | return rc; | |
108 | } | |
109 | ||
db58028e DJ |
110 | /* |
111 | * Clear out entries that are duplicates. This should prevent the | |
112 | * extra flushings. | |
113 | */ | |
114 | for (i = 0; i < nd_region->ndr_mappings - 1; i++) { | |
115 | /* ignore if NULL already */ | |
116 | if (!ndrd_get_flush_wpq(ndrd, i, 0)) | |
117 | continue; | |
118 | ||
119 | for (j = i + 1; j < nd_region->ndr_mappings; j++) | |
120 | if (ndrd_get_flush_wpq(ndrd, i, 0) == | |
121 | ndrd_get_flush_wpq(ndrd, j, 0)) | |
122 | ndrd_set_flush_wpq(ndrd, j, 0, NULL); | |
123 | } | |
124 | ||
e5ae3b25 DW |
125 | return 0; |
126 | } | |
127 | ||
1f7df6f8 DW |
128 | static void nd_region_release(struct device *dev) |
129 | { | |
130 | struct nd_region *nd_region = to_nd_region(dev); | |
131 | u16 i; | |
132 | ||
133 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
134 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
135 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
136 | ||
137 | put_device(&nvdimm->dev); | |
138 | } | |
5212e11f | 139 | free_percpu(nd_region->lane); |
1f7df6f8 | 140 | ida_simple_remove(®ion_ida, nd_region->id); |
047fc8a1 RZ |
141 | if (is_nd_blk(dev)) |
142 | kfree(to_nd_blk_region(dev)); | |
143 | else | |
144 | kfree(nd_region); | |
1f7df6f8 DW |
145 | } |
146 | ||
147 | static struct device_type nd_blk_device_type = { | |
148 | .name = "nd_blk", | |
149 | .release = nd_region_release, | |
150 | }; | |
151 | ||
152 | static struct device_type nd_pmem_device_type = { | |
153 | .name = "nd_pmem", | |
154 | .release = nd_region_release, | |
155 | }; | |
156 | ||
157 | static struct device_type nd_volatile_device_type = { | |
158 | .name = "nd_volatile", | |
159 | .release = nd_region_release, | |
160 | }; | |
161 | ||
3d88002e | 162 | bool is_nd_pmem(struct device *dev) |
1f7df6f8 DW |
163 | { |
164 | return dev ? dev->type == &nd_pmem_device_type : false; | |
165 | } | |
166 | ||
3d88002e DW |
167 | bool is_nd_blk(struct device *dev) |
168 | { | |
169 | return dev ? dev->type == &nd_blk_device_type : false; | |
170 | } | |
171 | ||
1f7df6f8 DW |
172 | struct nd_region *to_nd_region(struct device *dev) |
173 | { | |
174 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); | |
175 | ||
176 | WARN_ON(dev->type->release != nd_region_release); | |
177 | return nd_region; | |
178 | } | |
179 | EXPORT_SYMBOL_GPL(to_nd_region); | |
180 | ||
047fc8a1 RZ |
181 | struct nd_blk_region *to_nd_blk_region(struct device *dev) |
182 | { | |
183 | struct nd_region *nd_region = to_nd_region(dev); | |
184 | ||
185 | WARN_ON(!is_nd_blk(dev)); | |
186 | return container_of(nd_region, struct nd_blk_region, nd_region); | |
187 | } | |
188 | EXPORT_SYMBOL_GPL(to_nd_blk_region); | |
189 | ||
190 | void *nd_region_provider_data(struct nd_region *nd_region) | |
191 | { | |
192 | return nd_region->provider_data; | |
193 | } | |
194 | EXPORT_SYMBOL_GPL(nd_region_provider_data); | |
195 | ||
196 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) | |
197 | { | |
198 | return ndbr->blk_provider_data; | |
199 | } | |
200 | EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); | |
201 | ||
202 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) | |
203 | { | |
204 | ndbr->blk_provider_data = data; | |
205 | } | |
206 | EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); | |
207 | ||
3d88002e DW |
208 | /** |
209 | * nd_region_to_nstype() - region to an integer namespace type | |
210 | * @nd_region: region-device to interrogate | |
211 | * | |
212 | * This is the 'nstype' attribute of a region as well, an input to the | |
213 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match | |
214 | * namespace devices with namespace drivers. | |
215 | */ | |
216 | int nd_region_to_nstype(struct nd_region *nd_region) | |
217 | { | |
218 | if (is_nd_pmem(&nd_region->dev)) { | |
219 | u16 i, alias; | |
220 | ||
221 | for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { | |
222 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
223 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
224 | ||
225 | if (nvdimm->flags & NDD_ALIASING) | |
226 | alias++; | |
227 | } | |
228 | if (alias) | |
229 | return ND_DEVICE_NAMESPACE_PMEM; | |
230 | else | |
231 | return ND_DEVICE_NAMESPACE_IO; | |
232 | } else if (is_nd_blk(&nd_region->dev)) { | |
233 | return ND_DEVICE_NAMESPACE_BLK; | |
234 | } | |
235 | ||
236 | return 0; | |
237 | } | |
bf9bccc1 DW |
238 | EXPORT_SYMBOL(nd_region_to_nstype); |
239 | ||
1f7df6f8 DW |
240 | static ssize_t size_show(struct device *dev, |
241 | struct device_attribute *attr, char *buf) | |
242 | { | |
243 | struct nd_region *nd_region = to_nd_region(dev); | |
244 | unsigned long long size = 0; | |
245 | ||
246 | if (is_nd_pmem(dev)) { | |
247 | size = nd_region->ndr_size; | |
248 | } else if (nd_region->ndr_mappings == 1) { | |
249 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
250 | ||
251 | size = nd_mapping->size; | |
252 | } | |
253 | ||
254 | return sprintf(buf, "%llu\n", size); | |
255 | } | |
256 | static DEVICE_ATTR_RO(size); | |
257 | ||
258 | static ssize_t mappings_show(struct device *dev, | |
259 | struct device_attribute *attr, char *buf) | |
260 | { | |
261 | struct nd_region *nd_region = to_nd_region(dev); | |
262 | ||
263 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); | |
264 | } | |
265 | static DEVICE_ATTR_RO(mappings); | |
266 | ||
3d88002e DW |
267 | static ssize_t nstype_show(struct device *dev, |
268 | struct device_attribute *attr, char *buf) | |
269 | { | |
270 | struct nd_region *nd_region = to_nd_region(dev); | |
271 | ||
272 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); | |
273 | } | |
274 | static DEVICE_ATTR_RO(nstype); | |
275 | ||
eaf96153 DW |
276 | static ssize_t set_cookie_show(struct device *dev, |
277 | struct device_attribute *attr, char *buf) | |
278 | { | |
279 | struct nd_region *nd_region = to_nd_region(dev); | |
280 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
281 | ||
282 | if (is_nd_pmem(dev) && nd_set) | |
283 | /* pass, should be precluded by region_visible */; | |
284 | else | |
285 | return -ENXIO; | |
286 | ||
287 | return sprintf(buf, "%#llx\n", nd_set->cookie); | |
288 | } | |
289 | static DEVICE_ATTR_RO(set_cookie); | |
290 | ||
bf9bccc1 DW |
291 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
292 | { | |
293 | resource_size_t blk_max_overlap = 0, available, overlap; | |
294 | int i; | |
295 | ||
296 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | |
297 | ||
298 | retry: | |
299 | available = 0; | |
300 | overlap = blk_max_overlap; | |
301 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
302 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
303 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
304 | ||
305 | /* if a dimm is disabled the available capacity is zero */ | |
306 | if (!ndd) | |
307 | return 0; | |
308 | ||
309 | if (is_nd_pmem(&nd_region->dev)) { | |
310 | available += nd_pmem_available_dpa(nd_region, | |
311 | nd_mapping, &overlap); | |
312 | if (overlap > blk_max_overlap) { | |
313 | blk_max_overlap = overlap; | |
314 | goto retry; | |
315 | } | |
a1f3e4d6 DW |
316 | } else if (is_nd_blk(&nd_region->dev)) |
317 | available += nd_blk_available_dpa(nd_region); | |
bf9bccc1 DW |
318 | } |
319 | ||
320 | return available; | |
321 | } | |
322 | ||
323 | static ssize_t available_size_show(struct device *dev, | |
324 | struct device_attribute *attr, char *buf) | |
325 | { | |
326 | struct nd_region *nd_region = to_nd_region(dev); | |
327 | unsigned long long available = 0; | |
328 | ||
329 | /* | |
330 | * Flush in-flight updates and grab a snapshot of the available | |
331 | * size. Of course, this value is potentially invalidated the | |
332 | * memory nvdimm_bus_lock() is dropped, but that's userspace's | |
333 | * problem to not race itself. | |
334 | */ | |
335 | nvdimm_bus_lock(dev); | |
336 | wait_nvdimm_bus_probe_idle(dev); | |
337 | available = nd_region_available_dpa(nd_region); | |
338 | nvdimm_bus_unlock(dev); | |
339 | ||
340 | return sprintf(buf, "%llu\n", available); | |
341 | } | |
342 | static DEVICE_ATTR_RO(available_size); | |
343 | ||
3d88002e DW |
344 | static ssize_t init_namespaces_show(struct device *dev, |
345 | struct device_attribute *attr, char *buf) | |
346 | { | |
e5ae3b25 | 347 | struct nd_region_data *ndrd = dev_get_drvdata(dev); |
3d88002e DW |
348 | ssize_t rc; |
349 | ||
350 | nvdimm_bus_lock(dev); | |
e5ae3b25 DW |
351 | if (ndrd) |
352 | rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); | |
3d88002e DW |
353 | else |
354 | rc = -ENXIO; | |
355 | nvdimm_bus_unlock(dev); | |
356 | ||
357 | return rc; | |
358 | } | |
359 | static DEVICE_ATTR_RO(init_namespaces); | |
360 | ||
bf9bccc1 DW |
361 | static ssize_t namespace_seed_show(struct device *dev, |
362 | struct device_attribute *attr, char *buf) | |
363 | { | |
364 | struct nd_region *nd_region = to_nd_region(dev); | |
365 | ssize_t rc; | |
366 | ||
367 | nvdimm_bus_lock(dev); | |
368 | if (nd_region->ns_seed) | |
369 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); | |
370 | else | |
371 | rc = sprintf(buf, "\n"); | |
372 | nvdimm_bus_unlock(dev); | |
373 | return rc; | |
374 | } | |
375 | static DEVICE_ATTR_RO(namespace_seed); | |
376 | ||
8c2f7e86 DW |
377 | static ssize_t btt_seed_show(struct device *dev, |
378 | struct device_attribute *attr, char *buf) | |
379 | { | |
380 | struct nd_region *nd_region = to_nd_region(dev); | |
381 | ssize_t rc; | |
382 | ||
383 | nvdimm_bus_lock(dev); | |
384 | if (nd_region->btt_seed) | |
385 | rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); | |
386 | else | |
387 | rc = sprintf(buf, "\n"); | |
388 | nvdimm_bus_unlock(dev); | |
389 | ||
390 | return rc; | |
391 | } | |
392 | static DEVICE_ATTR_RO(btt_seed); | |
393 | ||
e1455744 DW |
394 | static ssize_t pfn_seed_show(struct device *dev, |
395 | struct device_attribute *attr, char *buf) | |
396 | { | |
397 | struct nd_region *nd_region = to_nd_region(dev); | |
398 | ssize_t rc; | |
399 | ||
400 | nvdimm_bus_lock(dev); | |
401 | if (nd_region->pfn_seed) | |
402 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); | |
403 | else | |
404 | rc = sprintf(buf, "\n"); | |
405 | nvdimm_bus_unlock(dev); | |
406 | ||
407 | return rc; | |
408 | } | |
409 | static DEVICE_ATTR_RO(pfn_seed); | |
410 | ||
cd03412a DW |
411 | static ssize_t dax_seed_show(struct device *dev, |
412 | struct device_attribute *attr, char *buf) | |
413 | { | |
414 | struct nd_region *nd_region = to_nd_region(dev); | |
415 | ssize_t rc; | |
416 | ||
417 | nvdimm_bus_lock(dev); | |
418 | if (nd_region->dax_seed) | |
419 | rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); | |
420 | else | |
421 | rc = sprintf(buf, "\n"); | |
422 | nvdimm_bus_unlock(dev); | |
423 | ||
424 | return rc; | |
425 | } | |
426 | static DEVICE_ATTR_RO(dax_seed); | |
427 | ||
58138820 DW |
428 | static ssize_t read_only_show(struct device *dev, |
429 | struct device_attribute *attr, char *buf) | |
430 | { | |
431 | struct nd_region *nd_region = to_nd_region(dev); | |
432 | ||
433 | return sprintf(buf, "%d\n", nd_region->ro); | |
434 | } | |
435 | ||
436 | static ssize_t read_only_store(struct device *dev, | |
437 | struct device_attribute *attr, const char *buf, size_t len) | |
438 | { | |
439 | bool ro; | |
440 | int rc = strtobool(buf, &ro); | |
441 | struct nd_region *nd_region = to_nd_region(dev); | |
442 | ||
443 | if (rc) | |
444 | return rc; | |
445 | ||
446 | nd_region->ro = ro; | |
447 | return len; | |
448 | } | |
449 | static DEVICE_ATTR_RW(read_only); | |
450 | ||
1f7df6f8 DW |
451 | static struct attribute *nd_region_attributes[] = { |
452 | &dev_attr_size.attr, | |
3d88002e | 453 | &dev_attr_nstype.attr, |
1f7df6f8 | 454 | &dev_attr_mappings.attr, |
8c2f7e86 | 455 | &dev_attr_btt_seed.attr, |
e1455744 | 456 | &dev_attr_pfn_seed.attr, |
cd03412a | 457 | &dev_attr_dax_seed.attr, |
58138820 | 458 | &dev_attr_read_only.attr, |
eaf96153 | 459 | &dev_attr_set_cookie.attr, |
bf9bccc1 DW |
460 | &dev_attr_available_size.attr, |
461 | &dev_attr_namespace_seed.attr, | |
3d88002e | 462 | &dev_attr_init_namespaces.attr, |
1f7df6f8 DW |
463 | NULL, |
464 | }; | |
465 | ||
eaf96153 DW |
466 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
467 | { | |
468 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
469 | struct nd_region *nd_region = to_nd_region(dev); | |
470 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
bf9bccc1 | 471 | int type = nd_region_to_nstype(nd_region); |
eaf96153 | 472 | |
6bb691ac DK |
473 | if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) |
474 | return 0; | |
475 | ||
cd03412a DW |
476 | if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) |
477 | return 0; | |
478 | ||
bf9bccc1 DW |
479 | if (a != &dev_attr_set_cookie.attr |
480 | && a != &dev_attr_available_size.attr) | |
eaf96153 DW |
481 | return a->mode; |
482 | ||
bf9bccc1 DW |
483 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
484 | || type == ND_DEVICE_NAMESPACE_BLK) | |
485 | && a == &dev_attr_available_size.attr) | |
486 | return a->mode; | |
487 | else if (is_nd_pmem(dev) && nd_set) | |
488 | return a->mode; | |
eaf96153 DW |
489 | |
490 | return 0; | |
491 | } | |
492 | ||
1f7df6f8 DW |
493 | struct attribute_group nd_region_attribute_group = { |
494 | .attrs = nd_region_attributes, | |
eaf96153 | 495 | .is_visible = region_visible, |
1f7df6f8 DW |
496 | }; |
497 | EXPORT_SYMBOL_GPL(nd_region_attribute_group); | |
498 | ||
bf9bccc1 DW |
499 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) |
500 | { | |
501 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
502 | ||
503 | if (nd_set) | |
504 | return nd_set->cookie; | |
505 | return 0; | |
506 | } | |
507 | ||
ae8219f1 DW |
508 | void nd_mapping_free_labels(struct nd_mapping *nd_mapping) |
509 | { | |
510 | struct nd_label_ent *label_ent, *e; | |
511 | ||
512 | WARN_ON(!mutex_is_locked(&nd_mapping->lock)); | |
513 | list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { | |
514 | list_del(&label_ent->list); | |
515 | kfree(label_ent); | |
516 | } | |
517 | } | |
518 | ||
eaf96153 DW |
519 | /* |
520 | * Upon successful probe/remove, take/release a reference on the | |
8c2f7e86 | 521 | * associated interleave set (if present), and plant new btt + namespace |
047fc8a1 RZ |
522 | * seeds. Also, on the removal of a BLK region, notify the provider to |
523 | * disable the region. | |
eaf96153 DW |
524 | */ |
525 | static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |
526 | struct device *dev, bool probe) | |
527 | { | |
8c2f7e86 DW |
528 | struct nd_region *nd_region; |
529 | ||
bf9bccc1 | 530 | if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { |
eaf96153 DW |
531 | int i; |
532 | ||
8c2f7e86 | 533 | nd_region = to_nd_region(dev); |
eaf96153 DW |
534 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
535 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
bf9bccc1 | 536 | struct nvdimm_drvdata *ndd = nd_mapping->ndd; |
eaf96153 DW |
537 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
538 | ||
ae8219f1 DW |
539 | mutex_lock(&nd_mapping->lock); |
540 | nd_mapping_free_labels(nd_mapping); | |
541 | mutex_unlock(&nd_mapping->lock); | |
542 | ||
bf9bccc1 DW |
543 | put_ndd(ndd); |
544 | nd_mapping->ndd = NULL; | |
047fc8a1 RZ |
545 | if (ndd) |
546 | atomic_dec(&nvdimm->busy); | |
eaf96153 | 547 | } |
047fc8a1 RZ |
548 | |
549 | if (is_nd_pmem(dev)) | |
550 | return; | |
8c2f7e86 | 551 | } |
98a29c39 DW |
552 | if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) |
553 | && probe) { | |
8c2f7e86 | 554 | nd_region = to_nd_region(dev->parent); |
1b40e09a DW |
555 | nvdimm_bus_lock(dev); |
556 | if (nd_region->ns_seed == dev) | |
98a29c39 | 557 | nd_region_create_ns_seed(nd_region); |
1b40e09a | 558 | nvdimm_bus_unlock(dev); |
eaf96153 | 559 | } |
8c2f7e86 | 560 | if (is_nd_btt(dev) && probe) { |
8ca24353 DW |
561 | struct nd_btt *nd_btt = to_nd_btt(dev); |
562 | ||
8c2f7e86 DW |
563 | nd_region = to_nd_region(dev->parent); |
564 | nvdimm_bus_lock(dev); | |
565 | if (nd_region->btt_seed == dev) | |
566 | nd_region_create_btt_seed(nd_region); | |
98a29c39 DW |
567 | if (nd_region->ns_seed == &nd_btt->ndns->dev) |
568 | nd_region_create_ns_seed(nd_region); | |
8c2f7e86 DW |
569 | nvdimm_bus_unlock(dev); |
570 | } | |
2dc43331 | 571 | if (is_nd_pfn(dev) && probe) { |
98a29c39 DW |
572 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
573 | ||
2dc43331 DW |
574 | nd_region = to_nd_region(dev->parent); |
575 | nvdimm_bus_lock(dev); | |
576 | if (nd_region->pfn_seed == dev) | |
577 | nd_region_create_pfn_seed(nd_region); | |
98a29c39 DW |
578 | if (nd_region->ns_seed == &nd_pfn->ndns->dev) |
579 | nd_region_create_ns_seed(nd_region); | |
2dc43331 DW |
580 | nvdimm_bus_unlock(dev); |
581 | } | |
cd03412a | 582 | if (is_nd_dax(dev) && probe) { |
98a29c39 DW |
583 | struct nd_dax *nd_dax = to_nd_dax(dev); |
584 | ||
cd03412a DW |
585 | nd_region = to_nd_region(dev->parent); |
586 | nvdimm_bus_lock(dev); | |
587 | if (nd_region->dax_seed == dev) | |
588 | nd_region_create_dax_seed(nd_region); | |
98a29c39 DW |
589 | if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) |
590 | nd_region_create_ns_seed(nd_region); | |
cd03412a DW |
591 | nvdimm_bus_unlock(dev); |
592 | } | |
eaf96153 DW |
593 | } |
594 | ||
595 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) | |
596 | { | |
597 | nd_region_notify_driver_action(nvdimm_bus, dev, true); | |
598 | } | |
599 | ||
600 | void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) | |
601 | { | |
602 | nd_region_notify_driver_action(nvdimm_bus, dev, false); | |
603 | } | |
604 | ||
1f7df6f8 DW |
605 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
606 | { | |
607 | struct nd_region *nd_region = to_nd_region(dev); | |
608 | struct nd_mapping *nd_mapping; | |
609 | struct nvdimm *nvdimm; | |
610 | ||
611 | if (n >= nd_region->ndr_mappings) | |
612 | return -ENXIO; | |
613 | nd_mapping = &nd_region->mapping[n]; | |
614 | nvdimm = nd_mapping->nvdimm; | |
615 | ||
616 | return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), | |
617 | nd_mapping->start, nd_mapping->size); | |
618 | } | |
619 | ||
620 | #define REGION_MAPPING(idx) \ | |
621 | static ssize_t mapping##idx##_show(struct device *dev, \ | |
622 | struct device_attribute *attr, char *buf) \ | |
623 | { \ | |
624 | return mappingN(dev, buf, idx); \ | |
625 | } \ | |
626 | static DEVICE_ATTR_RO(mapping##idx) | |
627 | ||
628 | /* | |
629 | * 32 should be enough for a while, even in the presence of socket | |
630 | * interleave a 32-way interleave set is a degenerate case. | |
631 | */ | |
632 | REGION_MAPPING(0); | |
633 | REGION_MAPPING(1); | |
634 | REGION_MAPPING(2); | |
635 | REGION_MAPPING(3); | |
636 | REGION_MAPPING(4); | |
637 | REGION_MAPPING(5); | |
638 | REGION_MAPPING(6); | |
639 | REGION_MAPPING(7); | |
640 | REGION_MAPPING(8); | |
641 | REGION_MAPPING(9); | |
642 | REGION_MAPPING(10); | |
643 | REGION_MAPPING(11); | |
644 | REGION_MAPPING(12); | |
645 | REGION_MAPPING(13); | |
646 | REGION_MAPPING(14); | |
647 | REGION_MAPPING(15); | |
648 | REGION_MAPPING(16); | |
649 | REGION_MAPPING(17); | |
650 | REGION_MAPPING(18); | |
651 | REGION_MAPPING(19); | |
652 | REGION_MAPPING(20); | |
653 | REGION_MAPPING(21); | |
654 | REGION_MAPPING(22); | |
655 | REGION_MAPPING(23); | |
656 | REGION_MAPPING(24); | |
657 | REGION_MAPPING(25); | |
658 | REGION_MAPPING(26); | |
659 | REGION_MAPPING(27); | |
660 | REGION_MAPPING(28); | |
661 | REGION_MAPPING(29); | |
662 | REGION_MAPPING(30); | |
663 | REGION_MAPPING(31); | |
664 | ||
665 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) | |
666 | { | |
667 | struct device *dev = container_of(kobj, struct device, kobj); | |
668 | struct nd_region *nd_region = to_nd_region(dev); | |
669 | ||
670 | if (n < nd_region->ndr_mappings) | |
671 | return a->mode; | |
672 | return 0; | |
673 | } | |
674 | ||
675 | static struct attribute *mapping_attributes[] = { | |
676 | &dev_attr_mapping0.attr, | |
677 | &dev_attr_mapping1.attr, | |
678 | &dev_attr_mapping2.attr, | |
679 | &dev_attr_mapping3.attr, | |
680 | &dev_attr_mapping4.attr, | |
681 | &dev_attr_mapping5.attr, | |
682 | &dev_attr_mapping6.attr, | |
683 | &dev_attr_mapping7.attr, | |
684 | &dev_attr_mapping8.attr, | |
685 | &dev_attr_mapping9.attr, | |
686 | &dev_attr_mapping10.attr, | |
687 | &dev_attr_mapping11.attr, | |
688 | &dev_attr_mapping12.attr, | |
689 | &dev_attr_mapping13.attr, | |
690 | &dev_attr_mapping14.attr, | |
691 | &dev_attr_mapping15.attr, | |
692 | &dev_attr_mapping16.attr, | |
693 | &dev_attr_mapping17.attr, | |
694 | &dev_attr_mapping18.attr, | |
695 | &dev_attr_mapping19.attr, | |
696 | &dev_attr_mapping20.attr, | |
697 | &dev_attr_mapping21.attr, | |
698 | &dev_attr_mapping22.attr, | |
699 | &dev_attr_mapping23.attr, | |
700 | &dev_attr_mapping24.attr, | |
701 | &dev_attr_mapping25.attr, | |
702 | &dev_attr_mapping26.attr, | |
703 | &dev_attr_mapping27.attr, | |
704 | &dev_attr_mapping28.attr, | |
705 | &dev_attr_mapping29.attr, | |
706 | &dev_attr_mapping30.attr, | |
707 | &dev_attr_mapping31.attr, | |
708 | NULL, | |
709 | }; | |
710 | ||
711 | struct attribute_group nd_mapping_attribute_group = { | |
712 | .is_visible = mapping_visible, | |
713 | .attrs = mapping_attributes, | |
714 | }; | |
715 | EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); | |
716 | ||
047fc8a1 | 717 | int nd_blk_region_init(struct nd_region *nd_region) |
1f7df6f8 | 718 | { |
047fc8a1 RZ |
719 | struct device *dev = &nd_region->dev; |
720 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | |
721 | ||
722 | if (!is_nd_blk(dev)) | |
723 | return 0; | |
724 | ||
725 | if (nd_region->ndr_mappings < 1) { | |
726 | dev_err(dev, "invalid BLK region\n"); | |
727 | return -ENXIO; | |
728 | } | |
729 | ||
730 | return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); | |
1f7df6f8 | 731 | } |
1f7df6f8 | 732 | |
5212e11f VV |
733 | /** |
734 | * nd_region_acquire_lane - allocate and lock a lane | |
735 | * @nd_region: region id and number of lanes possible | |
736 | * | |
737 | * A lane correlates to a BLK-data-window and/or a log slot in the BTT. | |
738 | * We optimize for the common case where there are 256 lanes, one | |
739 | * per-cpu. For larger systems we need to lock to share lanes. For now | |
740 | * this implementation assumes the cost of maintaining an allocator for | |
741 | * free lanes is on the order of the lock hold time, so it implements a | |
742 | * static lane = cpu % num_lanes mapping. | |
743 | * | |
744 | * In the case of a BTT instance on top of a BLK namespace a lane may be | |
745 | * acquired recursively. We lock on the first instance. | |
746 | * | |
747 | * In the case of a BTT instance on top of PMEM, we only acquire a lane | |
748 | * for the BTT metadata updates. | |
749 | */ | |
750 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region) | |
751 | { | |
752 | unsigned int cpu, lane; | |
753 | ||
754 | cpu = get_cpu(); | |
755 | if (nd_region->num_lanes < nr_cpu_ids) { | |
756 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
757 | ||
758 | lane = cpu % nd_region->num_lanes; | |
759 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
760 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
761 | if (ndl_count->count++ == 0) | |
762 | spin_lock(&ndl_lock->lock); | |
763 | } else | |
764 | lane = cpu; | |
765 | ||
766 | return lane; | |
767 | } | |
768 | EXPORT_SYMBOL(nd_region_acquire_lane); | |
769 | ||
770 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) | |
771 | { | |
772 | if (nd_region->num_lanes < nr_cpu_ids) { | |
773 | unsigned int cpu = get_cpu(); | |
774 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
775 | ||
776 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
777 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
778 | if (--ndl_count->count == 0) | |
779 | spin_unlock(&ndl_lock->lock); | |
780 | put_cpu(); | |
781 | } | |
782 | put_cpu(); | |
783 | } | |
784 | EXPORT_SYMBOL(nd_region_release_lane); | |
785 | ||
1f7df6f8 DW |
786 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
787 | struct nd_region_desc *ndr_desc, struct device_type *dev_type, | |
788 | const char *caller) | |
789 | { | |
790 | struct nd_region *nd_region; | |
791 | struct device *dev; | |
047fc8a1 | 792 | void *region_buf; |
5212e11f | 793 | unsigned int i; |
58138820 | 794 | int ro = 0; |
1f7df6f8 DW |
795 | |
796 | for (i = 0; i < ndr_desc->num_mappings; i++) { | |
44c462eb DW |
797 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
798 | struct nvdimm *nvdimm = mapping->nvdimm; | |
1f7df6f8 | 799 | |
44c462eb | 800 | if ((mapping->start | mapping->size) % SZ_4K) { |
1f7df6f8 DW |
801 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", |
802 | caller, dev_name(&nvdimm->dev), i); | |
803 | ||
804 | return NULL; | |
805 | } | |
58138820 DW |
806 | |
807 | if (nvdimm->flags & NDD_UNARMED) | |
808 | ro = 1; | |
1f7df6f8 DW |
809 | } |
810 | ||
047fc8a1 RZ |
811 | if (dev_type == &nd_blk_device_type) { |
812 | struct nd_blk_region_desc *ndbr_desc; | |
813 | struct nd_blk_region *ndbr; | |
814 | ||
815 | ndbr_desc = to_blk_region_desc(ndr_desc); | |
816 | ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) | |
817 | * ndr_desc->num_mappings, | |
818 | GFP_KERNEL); | |
819 | if (ndbr) { | |
820 | nd_region = &ndbr->nd_region; | |
821 | ndbr->enable = ndbr_desc->enable; | |
047fc8a1 RZ |
822 | ndbr->do_io = ndbr_desc->do_io; |
823 | } | |
824 | region_buf = ndbr; | |
825 | } else { | |
826 | nd_region = kzalloc(sizeof(struct nd_region) | |
827 | + sizeof(struct nd_mapping) | |
828 | * ndr_desc->num_mappings, | |
829 | GFP_KERNEL); | |
830 | region_buf = nd_region; | |
831 | } | |
832 | ||
833 | if (!region_buf) | |
1f7df6f8 DW |
834 | return NULL; |
835 | nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); | |
5212e11f VV |
836 | if (nd_region->id < 0) |
837 | goto err_id; | |
838 | ||
839 | nd_region->lane = alloc_percpu(struct nd_percpu_lane); | |
840 | if (!nd_region->lane) | |
841 | goto err_percpu; | |
842 | ||
843 | for (i = 0; i < nr_cpu_ids; i++) { | |
844 | struct nd_percpu_lane *ndl; | |
845 | ||
846 | ndl = per_cpu_ptr(nd_region->lane, i); | |
847 | spin_lock_init(&ndl->lock); | |
848 | ndl->count = 0; | |
1f7df6f8 DW |
849 | } |
850 | ||
1f7df6f8 | 851 | for (i = 0; i < ndr_desc->num_mappings; i++) { |
44c462eb DW |
852 | struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; |
853 | struct nvdimm *nvdimm = mapping->nvdimm; | |
854 | ||
855 | nd_region->mapping[i].nvdimm = nvdimm; | |
856 | nd_region->mapping[i].start = mapping->start; | |
857 | nd_region->mapping[i].size = mapping->size; | |
ae8219f1 DW |
858 | INIT_LIST_HEAD(&nd_region->mapping[i].labels); |
859 | mutex_init(&nd_region->mapping[i].lock); | |
1f7df6f8 DW |
860 | |
861 | get_device(&nvdimm->dev); | |
862 | } | |
863 | nd_region->ndr_mappings = ndr_desc->num_mappings; | |
864 | nd_region->provider_data = ndr_desc->provider_data; | |
eaf96153 | 865 | nd_region->nd_set = ndr_desc->nd_set; |
5212e11f | 866 | nd_region->num_lanes = ndr_desc->num_lanes; |
004f1afb | 867 | nd_region->flags = ndr_desc->flags; |
58138820 | 868 | nd_region->ro = ro; |
41d7a6d6 | 869 | nd_region->numa_node = ndr_desc->numa_node; |
1b40e09a | 870 | ida_init(&nd_region->ns_ida); |
8c2f7e86 | 871 | ida_init(&nd_region->btt_ida); |
e1455744 | 872 | ida_init(&nd_region->pfn_ida); |
cd03412a | 873 | ida_init(&nd_region->dax_ida); |
1f7df6f8 DW |
874 | dev = &nd_region->dev; |
875 | dev_set_name(dev, "region%d", nd_region->id); | |
876 | dev->parent = &nvdimm_bus->dev; | |
877 | dev->type = dev_type; | |
878 | dev->groups = ndr_desc->attr_groups; | |
879 | nd_region->ndr_size = resource_size(ndr_desc->res); | |
880 | nd_region->ndr_start = ndr_desc->res->start; | |
881 | nd_device_register(dev); | |
882 | ||
883 | return nd_region; | |
5212e11f VV |
884 | |
885 | err_percpu: | |
886 | ida_simple_remove(®ion_ida, nd_region->id); | |
887 | err_id: | |
047fc8a1 | 888 | kfree(region_buf); |
5212e11f | 889 | return NULL; |
1f7df6f8 DW |
890 | } |
891 | ||
892 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, | |
893 | struct nd_region_desc *ndr_desc) | |
894 | { | |
5212e11f | 895 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
896 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
897 | __func__); | |
898 | } | |
899 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); | |
900 | ||
901 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, | |
902 | struct nd_region_desc *ndr_desc) | |
903 | { | |
904 | if (ndr_desc->num_mappings > 1) | |
905 | return NULL; | |
5212e11f | 906 | ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); |
1f7df6f8 DW |
907 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
908 | __func__); | |
909 | } | |
910 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); | |
911 | ||
912 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, | |
913 | struct nd_region_desc *ndr_desc) | |
914 | { | |
5212e11f | 915 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
916 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
917 | __func__); | |
918 | } | |
919 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); | |
b354aba0 | 920 | |
f284a4f2 DW |
921 | /** |
922 | * nvdimm_flush - flush any posted write queues between the cpu and pmem media | |
923 | * @nd_region: blk or interleaved pmem region | |
924 | */ | |
925 | void nvdimm_flush(struct nd_region *nd_region) | |
926 | { | |
927 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); | |
0c27af60 DW |
928 | int i, idx; |
929 | ||
930 | /* | |
931 | * Try to encourage some diversity in flush hint addresses | |
932 | * across cpus assuming a limited number of flush hints. | |
933 | */ | |
934 | idx = this_cpu_read(flush_idx); | |
935 | idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); | |
f284a4f2 DW |
936 | |
937 | /* | |
938 | * The first wmb() is needed to 'sfence' all previous writes | |
939 | * such that they are architecturally visible for the platform | |
940 | * buffer flush. Note that we've already arranged for pmem | |
941 | * writes to avoid the cache via arch_memcpy_to_pmem(). The | |
942 | * final wmb() ensures ordering for the NVDIMM flush write. | |
943 | */ | |
944 | wmb(); | |
945 | for (i = 0; i < nd_region->ndr_mappings; i++) | |
595c7307 DW |
946 | if (ndrd_get_flush_wpq(ndrd, i, 0)) |
947 | writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); | |
f284a4f2 DW |
948 | wmb(); |
949 | } | |
950 | EXPORT_SYMBOL_GPL(nvdimm_flush); | |
951 | ||
952 | /** | |
953 | * nvdimm_has_flush - determine write flushing requirements | |
954 | * @nd_region: blk or interleaved pmem region | |
955 | * | |
956 | * Returns 1 if writes require flushing | |
957 | * Returns 0 if writes do not require flushing | |
958 | * Returns -ENXIO if flushing capability can not be determined | |
959 | */ | |
960 | int nvdimm_has_flush(struct nd_region *nd_region) | |
961 | { | |
962 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); | |
963 | int i; | |
964 | ||
965 | /* no nvdimm == flushing capability unknown */ | |
966 | if (nd_region->ndr_mappings == 0) | |
967 | return -ENXIO; | |
968 | ||
969 | for (i = 0; i < nd_region->ndr_mappings; i++) | |
970 | /* flush hints present, flushing required */ | |
595c7307 | 971 | if (ndrd_get_flush_wpq(ndrd, i, 0)) |
f284a4f2 DW |
972 | return 1; |
973 | ||
974 | /* | |
975 | * The platform defines dimm devices without hints, assume | |
976 | * platform persistence mechanism like ADR | |
977 | */ | |
978 | return 0; | |
979 | } | |
980 | EXPORT_SYMBOL_GPL(nvdimm_has_flush); | |
981 | ||
b354aba0 DW |
982 | void __exit nd_region_devs_exit(void) |
983 | { | |
984 | ida_destroy(®ion_ida); | |
985 | } |