]>
Commit | Line | Data |
---|---|---|
9e853f23 RZ |
1 | /* |
2 | * Persistent Memory Driver | |
3 | * | |
9f53f9fa | 4 | * Copyright (c) 2014-2015, Intel Corporation. |
9e853f23 RZ |
5 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
6 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | */ | |
17 | ||
18 | #include <asm/cacheflush.h> | |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/hdreg.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/moduleparam.h> | |
b95f5f43 | 25 | #include <linux/badblocks.h> |
9476df7d | 26 | #include <linux/memremap.h> |
32ab0a3f | 27 | #include <linux/vmalloc.h> |
71389703 | 28 | #include <linux/blk-mq.h> |
34c0fd54 | 29 | #include <linux/pfn_t.h> |
9e853f23 | 30 | #include <linux/slab.h> |
0aed55af | 31 | #include <linux/uio.h> |
c1d6e828 | 32 | #include <linux/dax.h> |
9f53f9fa | 33 | #include <linux/nd.h> |
f295e53b | 34 | #include "pmem.h" |
32ab0a3f | 35 | #include "pfn.h" |
9f53f9fa | 36 | #include "nd.h" |
9e853f23 | 37 | |
f284a4f2 DW |
38 | static struct device *to_dev(struct pmem_device *pmem) |
39 | { | |
40 | /* | |
41 | * nvdimm bus services need a 'dev' parameter, and we record the device | |
42 | * at init in bb.dev. | |
43 | */ | |
44 | return pmem->bb.dev; | |
45 | } | |
46 | ||
47 | static struct nd_region *to_region(struct pmem_device *pmem) | |
48 | { | |
49 | return to_nd_region(to_dev(pmem)->parent); | |
50 | } | |
9e853f23 | 51 | |
4e4cbee9 CH |
52 | static blk_status_t pmem_clear_poison(struct pmem_device *pmem, |
53 | phys_addr_t offset, unsigned int len) | |
59e64739 | 54 | { |
f284a4f2 | 55 | struct device *dev = to_dev(pmem); |
59e64739 DW |
56 | sector_t sector; |
57 | long cleared; | |
4e4cbee9 | 58 | blk_status_t rc = BLK_STS_OK; |
59e64739 DW |
59 | |
60 | sector = (offset - pmem->data_offset) / 512; | |
59e64739 | 61 | |
868f036f DW |
62 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
63 | if (cleared < len) | |
4e4cbee9 | 64 | rc = BLK_STS_IOERR; |
59e64739 | 65 | if (cleared > 0 && cleared / 512) { |
868f036f DW |
66 | cleared /= 512; |
67 | dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__, | |
68 | (unsigned long long) sector, cleared, | |
69 | cleared > 1 ? "s" : ""); | |
0a3f27b9 | 70 | badblocks_clear(&pmem->bb, sector, cleared); |
975750a9 TK |
71 | if (pmem->bb_state) |
72 | sysfs_notify_dirent(pmem->bb_state); | |
59e64739 | 73 | } |
3115bb02 | 74 | |
f2b61257 | 75 | arch_invalidate_pmem(pmem->virt_addr + offset, len); |
868f036f DW |
76 | |
77 | return rc; | |
59e64739 DW |
78 | } |
79 | ||
bd697a80 VV |
80 | static void write_pmem(void *pmem_addr, struct page *page, |
81 | unsigned int off, unsigned int len) | |
82 | { | |
98cc093c HY |
83 | unsigned int chunk; |
84 | void *mem; | |
85 | ||
86 | while (len) { | |
87 | mem = kmap_atomic(page); | |
88 | chunk = min_t(unsigned int, len, PAGE_SIZE); | |
89 | memcpy_flushcache(pmem_addr, mem + off, chunk); | |
90 | kunmap_atomic(mem); | |
91 | len -= chunk; | |
92 | off = 0; | |
93 | page++; | |
94 | pmem_addr += PAGE_SIZE; | |
95 | } | |
bd697a80 VV |
96 | } |
97 | ||
4e4cbee9 | 98 | static blk_status_t read_pmem(struct page *page, unsigned int off, |
bd697a80 VV |
99 | void *pmem_addr, unsigned int len) |
100 | { | |
98cc093c | 101 | unsigned int chunk; |
bd697a80 | 102 | int rc; |
98cc093c HY |
103 | void *mem; |
104 | ||
105 | while (len) { | |
106 | mem = kmap_atomic(page); | |
107 | chunk = min_t(unsigned int, len, PAGE_SIZE); | |
108 | rc = memcpy_mcsafe(mem + off, pmem_addr, chunk); | |
109 | kunmap_atomic(mem); | |
110 | if (rc) | |
111 | return BLK_STS_IOERR; | |
112 | len -= chunk; | |
113 | off = 0; | |
114 | page++; | |
115 | pmem_addr += PAGE_SIZE; | |
116 | } | |
4e4cbee9 | 117 | return BLK_STS_OK; |
bd697a80 VV |
118 | } |
119 | ||
4e4cbee9 | 120 | static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
c11f0c0b | 121 | unsigned int len, unsigned int off, bool is_write, |
9e853f23 RZ |
122 | sector_t sector) |
123 | { | |
4e4cbee9 | 124 | blk_status_t rc = BLK_STS_OK; |
59e64739 | 125 | bool bad_pmem = false; |
32ab0a3f | 126 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
7a9eb206 | 127 | void *pmem_addr = pmem->virt_addr + pmem_off; |
9e853f23 | 128 | |
59e64739 DW |
129 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
130 | bad_pmem = true; | |
131 | ||
c11f0c0b | 132 | if (!is_write) { |
59e64739 | 133 | if (unlikely(bad_pmem)) |
4e4cbee9 | 134 | rc = BLK_STS_IOERR; |
b5ebc8ec | 135 | else { |
bd697a80 | 136 | rc = read_pmem(page, off, pmem_addr, len); |
b5ebc8ec DW |
137 | flush_dcache_page(page); |
138 | } | |
9e853f23 | 139 | } else { |
0a370d26 DW |
140 | /* |
141 | * Note that we write the data both before and after | |
142 | * clearing poison. The write before clear poison | |
143 | * handles situations where the latest written data is | |
144 | * preserved and the clear poison operation simply marks | |
145 | * the address range as valid without changing the data. | |
146 | * In this case application software can assume that an | |
147 | * interrupted write will either return the new good | |
148 | * data or an error. | |
149 | * | |
150 | * However, if pmem_clear_poison() leaves the data in an | |
151 | * indeterminate state we need to perform the write | |
152 | * after clear poison. | |
153 | */ | |
9e853f23 | 154 | flush_dcache_page(page); |
bd697a80 | 155 | write_pmem(pmem_addr, page, off, len); |
59e64739 | 156 | if (unlikely(bad_pmem)) { |
3115bb02 | 157 | rc = pmem_clear_poison(pmem, pmem_off, len); |
bd697a80 | 158 | write_pmem(pmem_addr, page, off, len); |
59e64739 | 159 | } |
9e853f23 RZ |
160 | } |
161 | ||
b5ebc8ec | 162 | return rc; |
9e853f23 RZ |
163 | } |
164 | ||
7e267a8c DW |
165 | /* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */ |
166 | #ifndef REQ_FLUSH | |
167 | #define REQ_FLUSH REQ_PREFLUSH | |
168 | #endif | |
169 | ||
dece1635 | 170 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) |
9e853f23 | 171 | { |
4e4cbee9 | 172 | blk_status_t rc = 0; |
f0dc089c DW |
173 | bool do_acct; |
174 | unsigned long start; | |
9e853f23 | 175 | struct bio_vec bvec; |
9e853f23 | 176 | struct bvec_iter iter; |
bd842b8c | 177 | struct pmem_device *pmem = q->queuedata; |
7e267a8c DW |
178 | struct nd_region *nd_region = to_region(pmem); |
179 | ||
1eff9d32 | 180 | if (bio->bi_opf & REQ_FLUSH) |
7e267a8c | 181 | nvdimm_flush(nd_region); |
9e853f23 | 182 | |
f0dc089c | 183 | do_acct = nd_iostat_start(bio, &start); |
e10624f8 DW |
184 | bio_for_each_segment(bvec, bio, iter) { |
185 | rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, | |
c11f0c0b | 186 | bvec.bv_offset, op_is_write(bio_op(bio)), |
e10624f8 DW |
187 | iter.bi_sector); |
188 | if (rc) { | |
4e4cbee9 | 189 | bio->bi_status = rc; |
e10624f8 DW |
190 | break; |
191 | } | |
192 | } | |
f0dc089c DW |
193 | if (do_acct) |
194 | nd_iostat_end(bio, start); | |
61031952 | 195 | |
1eff9d32 | 196 | if (bio->bi_opf & REQ_FUA) |
7e267a8c | 197 | nvdimm_flush(nd_region); |
61031952 | 198 | |
4246a0b6 | 199 | bio_endio(bio); |
dece1635 | 200 | return BLK_QC_T_NONE; |
9e853f23 RZ |
201 | } |
202 | ||
203 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |
c11f0c0b | 204 | struct page *page, bool is_write) |
9e853f23 | 205 | { |
bd842b8c | 206 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
4e4cbee9 | 207 | blk_status_t rc; |
9e853f23 | 208 | |
98cc093c HY |
209 | rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE, |
210 | 0, is_write, sector); | |
9e853f23 | 211 | |
e10624f8 DW |
212 | /* |
213 | * The ->rw_page interface is subtle and tricky. The core | |
214 | * retries on any error, so we can only invoke page_endio() in | |
215 | * the successful completion case. Otherwise, we'll see crashes | |
216 | * caused by double completion. | |
217 | */ | |
218 | if (rc == 0) | |
c11f0c0b | 219 | page_endio(page, is_write, 0); |
e10624f8 | 220 | |
4e4cbee9 | 221 | return blk_status_to_errno(rc); |
9e853f23 RZ |
222 | } |
223 | ||
f295e53b | 224 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
c1d6e828 DW |
225 | __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
226 | long nr_pages, void **kaddr, pfn_t *pfn) | |
9e853f23 | 227 | { |
c1d6e828 | 228 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
589e75d1 | 229 | |
c1d6e828 DW |
230 | if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, |
231 | PFN_PHYS(nr_pages)))) | |
0a70bd43 | 232 | return -EIO; |
e2e05394 | 233 | *kaddr = pmem->virt_addr + offset; |
34c0fd54 | 234 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
9e853f23 | 235 | |
0a70bd43 DW |
236 | /* |
237 | * If badblocks are present, limit known good range to the | |
238 | * requested range. | |
239 | */ | |
240 | if (unlikely(pmem->bb.count)) | |
c1d6e828 DW |
241 | return nr_pages; |
242 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); | |
9e853f23 RZ |
243 | } |
244 | ||
245 | static const struct block_device_operations pmem_fops = { | |
246 | .owner = THIS_MODULE, | |
247 | .rw_page = pmem_rw_page, | |
58138820 | 248 | .revalidate_disk = nvdimm_revalidate_disk, |
9e853f23 RZ |
249 | }; |
250 | ||
c1d6e828 DW |
251 | static long pmem_dax_direct_access(struct dax_device *dax_dev, |
252 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) | |
253 | { | |
254 | struct pmem_device *pmem = dax_get_private(dax_dev); | |
255 | ||
256 | return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); | |
257 | } | |
258 | ||
0aed55af DW |
259 | static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
260 | void *addr, size_t bytes, struct iov_iter *i) | |
261 | { | |
262 | return copy_from_iter_flushcache(addr, bytes, i); | |
263 | } | |
264 | ||
3c1cebff DW |
265 | static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, |
266 | void *addr, size_t size) | |
267 | { | |
4e4f00a9 | 268 | arch_wb_cache_pmem(addr, size); |
3c1cebff DW |
269 | } |
270 | ||
c1d6e828 DW |
271 | static const struct dax_operations pmem_dax_ops = { |
272 | .direct_access = pmem_dax_direct_access, | |
0aed55af | 273 | .copy_from_iter = pmem_copy_from_iter, |
3c1cebff | 274 | .flush = pmem_dax_flush, |
c1d6e828 DW |
275 | }; |
276 | ||
6e0c90d6 DW |
277 | static const struct attribute_group *pmem_attribute_groups[] = { |
278 | &dax_attribute_group, | |
279 | NULL, | |
c1d6e828 DW |
280 | }; |
281 | ||
030b99e3 DW |
282 | static void pmem_release_queue(void *q) |
283 | { | |
284 | blk_cleanup_queue(q); | |
285 | } | |
286 | ||
71389703 DW |
287 | static void pmem_freeze_queue(void *q) |
288 | { | |
d3b5d352 | 289 | blk_freeze_queue_start(q); |
71389703 DW |
290 | } |
291 | ||
c1d6e828 | 292 | static void pmem_release_disk(void *__pmem) |
030b99e3 | 293 | { |
c1d6e828 DW |
294 | struct pmem_device *pmem = __pmem; |
295 | ||
296 | kill_dax(pmem->dax_dev); | |
297 | put_dax(pmem->dax_dev); | |
298 | del_gendisk(pmem->disk); | |
299 | put_disk(pmem->disk); | |
030b99e3 DW |
300 | } |
301 | ||
200c79da DW |
302 | static int pmem_attach_disk(struct device *dev, |
303 | struct nd_namespace_common *ndns) | |
9e853f23 | 304 | { |
200c79da | 305 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
f284a4f2 | 306 | struct nd_region *nd_region = to_nd_region(dev->parent); |
200c79da | 307 | struct vmem_altmap __altmap, *altmap = NULL; |
0b277961 | 308 | int nid = dev_to_node(dev), fua, wbc; |
200c79da DW |
309 | struct resource *res = &nsio->res; |
310 | struct nd_pfn *nd_pfn = NULL; | |
c1d6e828 | 311 | struct dax_device *dax_dev; |
200c79da | 312 | struct nd_pfn_sb *pfn_sb; |
9e853f23 | 313 | struct pmem_device *pmem; |
200c79da | 314 | struct resource pfn_res; |
468ded03 | 315 | struct request_queue *q; |
6e0c90d6 | 316 | struct device *gendev; |
200c79da DW |
317 | struct gendisk *disk; |
318 | void *addr; | |
319 | ||
320 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ | |
321 | if (is_nd_pfn(dev)) { | |
322 | nd_pfn = to_nd_pfn(dev); | |
323 | altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap); | |
324 | if (IS_ERR(altmap)) | |
325 | return PTR_ERR(altmap); | |
326 | } | |
327 | ||
328 | /* we're attaching a block device, disable raw namespace access */ | |
329 | devm_nsio_disable(dev, nsio); | |
9e853f23 | 330 | |
708ab62b | 331 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); |
9e853f23 | 332 | if (!pmem) |
200c79da | 333 | return -ENOMEM; |
9e853f23 | 334 | |
200c79da | 335 | dev_set_drvdata(dev, pmem); |
9e853f23 RZ |
336 | pmem->phys_addr = res->start; |
337 | pmem->size = resource_size(res); | |
0b277961 DW |
338 | fua = nvdimm_has_flush(nd_region); |
339 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { | |
61031952 | 340 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
0b277961 DW |
341 | fua = 0; |
342 | } | |
343 | wbc = nvdimm_has_cache(nd_region); | |
9e853f23 | 344 | |
947df02d | 345 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
450c6633 | 346 | dev_name(&ndns->dev))) { |
947df02d | 347 | dev_warn(dev, "could not reserve region %pR\n", res); |
200c79da | 348 | return -EBUSY; |
9e853f23 RZ |
349 | } |
350 | ||
468ded03 DW |
351 | q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); |
352 | if (!q) | |
200c79da | 353 | return -ENOMEM; |
468ded03 | 354 | |
71389703 DW |
355 | if (devm_add_action_or_reset(dev, pmem_release_queue, q)) |
356 | return -ENOMEM; | |
357 | ||
34c0fd54 | 358 | pmem->pfn_flags = PFN_DEV; |
200c79da DW |
359 | if (is_nd_pfn(dev)) { |
360 | addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, | |
361 | altmap); | |
362 | pfn_sb = nd_pfn->pfn_sb; | |
363 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); | |
364 | pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res); | |
365 | pmem->pfn_flags |= PFN_MAP; | |
366 | res = &pfn_res; /* for badblocks populate */ | |
367 | res->start += pmem->data_offset; | |
368 | } else if (pmem_should_map_pages(dev)) { | |
369 | addr = devm_memremap_pages(dev, &nsio->res, | |
5c2c2587 | 370 | &q->q_usage_counter, NULL); |
34c0fd54 DW |
371 | pmem->pfn_flags |= PFN_MAP; |
372 | } else | |
200c79da DW |
373 | addr = devm_memremap(dev, pmem->phys_addr, |
374 | pmem->size, ARCH_MEMREMAP_PMEM); | |
b36f4761 | 375 | |
030b99e3 | 376 | /* |
71389703 | 377 | * At release time the queue must be frozen before |
030b99e3 DW |
378 | * devm_memremap_pages is unwound |
379 | */ | |
71389703 | 380 | if (devm_add_action_or_reset(dev, pmem_freeze_queue, q)) |
200c79da | 381 | return -ENOMEM; |
8c2f7e86 | 382 | |
200c79da DW |
383 | if (IS_ERR(addr)) |
384 | return PTR_ERR(addr); | |
7a9eb206 | 385 | pmem->virt_addr = addr; |
9e853f23 | 386 | |
0b277961 | 387 | blk_queue_write_cache(q, wbc, fua); |
5a92289f DW |
388 | blk_queue_make_request(q, pmem_make_request); |
389 | blk_queue_physical_block_size(q, PAGE_SIZE); | |
f979b13c | 390 | blk_queue_logical_block_size(q, pmem_sector_size(ndns)); |
5a92289f | 391 | blk_queue_max_hw_sectors(q, UINT_MAX); |
5a92289f | 392 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
163d4baa | 393 | queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); |
5a92289f | 394 | q->queuedata = pmem; |
9e853f23 | 395 | |
538ea4aa | 396 | disk = alloc_disk_node(0, nid); |
030b99e3 DW |
397 | if (!disk) |
398 | return -ENOMEM; | |
c1d6e828 | 399 | pmem->disk = disk; |
9e853f23 | 400 | |
9e853f23 | 401 | disk->fops = &pmem_fops; |
5a92289f | 402 | disk->queue = q; |
9e853f23 | 403 | disk->flags = GENHD_FL_EXT_DEVT; |
5212e11f | 404 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
cfe30b87 DW |
405 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
406 | / 512); | |
b95f5f43 DW |
407 | if (devm_init_badblocks(dev, &pmem->bb)) |
408 | return -ENOMEM; | |
f284a4f2 | 409 | nvdimm_badblocks_populate(nd_region, &pmem->bb, res); |
57f7f317 | 410 | disk->bb = &pmem->bb; |
f02716db | 411 | |
c1d6e828 DW |
412 | dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops); |
413 | if (!dax_dev) { | |
414 | put_disk(disk); | |
415 | return -ENOMEM; | |
416 | } | |
0b277961 | 417 | dax_write_cache(dax_dev, wbc); |
c1d6e828 DW |
418 | pmem->dax_dev = dax_dev; |
419 | ||
6e0c90d6 DW |
420 | gendev = disk_to_dev(disk); |
421 | gendev->groups = pmem_attribute_groups; | |
422 | ||
c1d6e828 DW |
423 | device_add_disk(dev, disk); |
424 | if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) | |
f02716db DW |
425 | return -ENOMEM; |
426 | ||
58138820 | 427 | revalidate_disk(disk); |
9e853f23 | 428 | |
975750a9 TK |
429 | pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, |
430 | "badblocks"); | |
6aa734a2 DW |
431 | if (!pmem->bb_state) |
432 | dev_warn(dev, "'badblocks' notification disabled\n"); | |
975750a9 | 433 | |
8c2f7e86 DW |
434 | return 0; |
435 | } | |
9e853f23 | 436 | |
9f53f9fa | 437 | static int nd_pmem_probe(struct device *dev) |
9e853f23 | 438 | { |
8c2f7e86 | 439 | struct nd_namespace_common *ndns; |
9e853f23 | 440 | |
8c2f7e86 DW |
441 | ndns = nvdimm_namespace_common_probe(dev); |
442 | if (IS_ERR(ndns)) | |
443 | return PTR_ERR(ndns); | |
bf9bccc1 | 444 | |
200c79da DW |
445 | if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev))) |
446 | return -ENXIO; | |
708ab62b | 447 | |
200c79da | 448 | if (is_nd_btt(dev)) |
708ab62b CH |
449 | return nvdimm_namespace_attach_btt(ndns); |
450 | ||
32ab0a3f | 451 | if (is_nd_pfn(dev)) |
200c79da | 452 | return pmem_attach_disk(dev, ndns); |
32ab0a3f | 453 | |
200c79da | 454 | /* if we find a valid info-block we'll come back as that personality */ |
c5ed9268 DW |
455 | if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0 |
456 | || nd_dax_probe(dev, ndns) == 0) | |
32ab0a3f | 457 | return -ENXIO; |
32ab0a3f | 458 | |
200c79da DW |
459 | /* ...otherwise we're just a raw pmem device */ |
460 | return pmem_attach_disk(dev, ndns); | |
9e853f23 RZ |
461 | } |
462 | ||
9f53f9fa | 463 | static int nd_pmem_remove(struct device *dev) |
9e853f23 | 464 | { |
6aa734a2 DW |
465 | struct pmem_device *pmem = dev_get_drvdata(dev); |
466 | ||
8c2f7e86 | 467 | if (is_nd_btt(dev)) |
298f2bc5 | 468 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
6aa734a2 DW |
469 | else { |
470 | /* | |
471 | * Note, this assumes device_lock() context to not race | |
472 | * nd_pmem_notify() | |
473 | */ | |
474 | sysfs_put(pmem->bb_state); | |
475 | pmem->bb_state = NULL; | |
476 | } | |
476f848a DW |
477 | nvdimm_flush(to_nd_region(dev->parent)); |
478 | ||
9e853f23 RZ |
479 | return 0; |
480 | } | |
481 | ||
476f848a DW |
482 | static void nd_pmem_shutdown(struct device *dev) |
483 | { | |
484 | nvdimm_flush(to_nd_region(dev->parent)); | |
485 | } | |
486 | ||
71999466 DW |
487 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
488 | { | |
b2518c78 | 489 | struct nd_region *nd_region; |
298f2bc5 DW |
490 | resource_size_t offset = 0, end_trunc = 0; |
491 | struct nd_namespace_common *ndns; | |
492 | struct nd_namespace_io *nsio; | |
493 | struct resource res; | |
b2518c78 | 494 | struct badblocks *bb; |
975750a9 | 495 | struct kernfs_node *bb_state; |
71999466 DW |
496 | |
497 | if (event != NVDIMM_REVALIDATE_POISON) | |
498 | return; | |
499 | ||
298f2bc5 DW |
500 | if (is_nd_btt(dev)) { |
501 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
502 | ||
503 | ndns = nd_btt->ndns; | |
b2518c78 TK |
504 | nd_region = to_nd_region(ndns->dev.parent); |
505 | nsio = to_nd_namespace_io(&ndns->dev); | |
506 | bb = &nsio->bb; | |
975750a9 | 507 | bb_state = NULL; |
b2518c78 TK |
508 | } else { |
509 | struct pmem_device *pmem = dev_get_drvdata(dev); | |
a3901802 | 510 | |
b2518c78 TK |
511 | nd_region = to_region(pmem); |
512 | bb = &pmem->bb; | |
975750a9 | 513 | bb_state = pmem->bb_state; |
b2518c78 TK |
514 | |
515 | if (is_nd_pfn(dev)) { | |
516 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | |
517 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | |
518 | ||
519 | ndns = nd_pfn->ndns; | |
520 | offset = pmem->data_offset + | |
521 | __le32_to_cpu(pfn_sb->start_pad); | |
522 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | |
523 | } else { | |
524 | ndns = to_ndns(dev); | |
525 | } | |
526 | ||
527 | nsio = to_nd_namespace_io(&ndns->dev); | |
528 | } | |
a3901802 | 529 | |
298f2bc5 DW |
530 | res.start = nsio->res.start + offset; |
531 | res.end = nsio->res.end - end_trunc; | |
b2518c78 | 532 | nvdimm_badblocks_populate(nd_region, bb, &res); |
975750a9 TK |
533 | if (bb_state) |
534 | sysfs_notify_dirent(bb_state); | |
71999466 DW |
535 | } |
536 | ||
9f53f9fa DW |
537 | MODULE_ALIAS("pmem"); |
538 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); | |
bf9bccc1 | 539 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
9f53f9fa DW |
540 | static struct nd_device_driver nd_pmem_driver = { |
541 | .probe = nd_pmem_probe, | |
542 | .remove = nd_pmem_remove, | |
71999466 | 543 | .notify = nd_pmem_notify, |
476f848a | 544 | .shutdown = nd_pmem_shutdown, |
9f53f9fa DW |
545 | .drv = { |
546 | .name = "nd_pmem", | |
9e853f23 | 547 | }, |
bf9bccc1 | 548 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
9e853f23 RZ |
549 | }; |
550 | ||
551 | static int __init pmem_init(void) | |
552 | { | |
55155291 | 553 | return nd_driver_register(&nd_pmem_driver); |
9e853f23 RZ |
554 | } |
555 | module_init(pmem_init); | |
556 | ||
557 | static void pmem_exit(void) | |
558 | { | |
9f53f9fa | 559 | driver_unregister(&nd_pmem_driver.drv); |
9e853f23 RZ |
560 | } |
561 | module_exit(pmem_exit); | |
562 | ||
563 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); | |
564 | MODULE_LICENSE("GPL v2"); |