]>
Commit | Line | Data |
---|---|---|
9e853f23 RZ |
1 | /* |
2 | * Persistent Memory Driver | |
3 | * | |
9f53f9fa | 4 | * Copyright (c) 2014-2015, Intel Corporation. |
9e853f23 RZ |
5 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
6 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | */ | |
17 | ||
18 | #include <asm/cacheflush.h> | |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/hdreg.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/moduleparam.h> | |
b95f5f43 | 25 | #include <linux/badblocks.h> |
9476df7d | 26 | #include <linux/memremap.h> |
32ab0a3f | 27 | #include <linux/vmalloc.h> |
34c0fd54 | 28 | #include <linux/pfn_t.h> |
9e853f23 | 29 | #include <linux/slab.h> |
61031952 | 30 | #include <linux/pmem.h> |
9f53f9fa | 31 | #include <linux/nd.h> |
f295e53b | 32 | #include "pmem.h" |
32ab0a3f | 33 | #include "pfn.h" |
9f53f9fa | 34 | #include "nd.h" |
9e853f23 | 35 | |
f284a4f2 DW |
36 | static struct device *to_dev(struct pmem_device *pmem) |
37 | { | |
38 | /* | |
39 | * nvdimm bus services need a 'dev' parameter, and we record the device | |
40 | * at init in bb.dev. | |
41 | */ | |
42 | return pmem->bb.dev; | |
43 | } | |
44 | ||
45 | static struct nd_region *to_region(struct pmem_device *pmem) | |
46 | { | |
47 | return to_nd_region(to_dev(pmem)->parent); | |
48 | } | |
9e853f23 | 49 | |
3115bb02 | 50 | static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, |
59e64739 DW |
51 | unsigned int len) |
52 | { | |
f284a4f2 | 53 | struct device *dev = to_dev(pmem); |
59e64739 DW |
54 | sector_t sector; |
55 | long cleared; | |
56 | ||
57 | sector = (offset - pmem->data_offset) / 512; | |
58 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); | |
59 | ||
60 | if (cleared > 0 && cleared / 512) { | |
5bf0b6e1 | 61 | dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", |
59e64739 DW |
62 | __func__, (unsigned long long) sector, |
63 | cleared / 512, cleared / 512 > 1 ? "s" : ""); | |
64 | badblocks_clear(&pmem->bb, sector, cleared / 512); | |
3115bb02 TK |
65 | } else { |
66 | return -EIO; | |
59e64739 | 67 | } |
3115bb02 | 68 | |
59e64739 | 69 | invalidate_pmem(pmem->virt_addr + offset, len); |
3115bb02 | 70 | return 0; |
59e64739 DW |
71 | } |
72 | ||
bd697a80 VV |
73 | static void write_pmem(void *pmem_addr, struct page *page, |
74 | unsigned int off, unsigned int len) | |
75 | { | |
76 | void *mem = kmap_atomic(page); | |
77 | ||
78 | memcpy_to_pmem(pmem_addr, mem + off, len); | |
79 | kunmap_atomic(mem); | |
80 | } | |
81 | ||
82 | static int read_pmem(struct page *page, unsigned int off, | |
83 | void *pmem_addr, unsigned int len) | |
84 | { | |
85 | int rc; | |
86 | void *mem = kmap_atomic(page); | |
87 | ||
88 | rc = memcpy_from_pmem(mem + off, pmem_addr, len); | |
89 | kunmap_atomic(mem); | |
90 | return rc; | |
91 | } | |
92 | ||
e10624f8 | 93 | static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
c11f0c0b | 94 | unsigned int len, unsigned int off, bool is_write, |
9e853f23 RZ |
95 | sector_t sector) |
96 | { | |
b5ebc8ec | 97 | int rc = 0; |
59e64739 | 98 | bool bad_pmem = false; |
32ab0a3f | 99 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
7a9eb206 | 100 | void *pmem_addr = pmem->virt_addr + pmem_off; |
9e853f23 | 101 | |
59e64739 DW |
102 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
103 | bad_pmem = true; | |
104 | ||
c11f0c0b | 105 | if (!is_write) { |
59e64739 | 106 | if (unlikely(bad_pmem)) |
b5ebc8ec DW |
107 | rc = -EIO; |
108 | else { | |
bd697a80 | 109 | rc = read_pmem(page, off, pmem_addr, len); |
b5ebc8ec DW |
110 | flush_dcache_page(page); |
111 | } | |
9e853f23 | 112 | } else { |
0a370d26 DW |
113 | /* |
114 | * Note that we write the data both before and after | |
115 | * clearing poison. The write before clear poison | |
116 | * handles situations where the latest written data is | |
117 | * preserved and the clear poison operation simply marks | |
118 | * the address range as valid without changing the data. | |
119 | * In this case application software can assume that an | |
120 | * interrupted write will either return the new good | |
121 | * data or an error. | |
122 | * | |
123 | * However, if pmem_clear_poison() leaves the data in an | |
124 | * indeterminate state we need to perform the write | |
125 | * after clear poison. | |
126 | */ | |
9e853f23 | 127 | flush_dcache_page(page); |
bd697a80 | 128 | write_pmem(pmem_addr, page, off, len); |
59e64739 | 129 | if (unlikely(bad_pmem)) { |
3115bb02 | 130 | rc = pmem_clear_poison(pmem, pmem_off, len); |
bd697a80 | 131 | write_pmem(pmem_addr, page, off, len); |
59e64739 | 132 | } |
9e853f23 RZ |
133 | } |
134 | ||
b5ebc8ec | 135 | return rc; |
9e853f23 RZ |
136 | } |
137 | ||
7e267a8c DW |
138 | /* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */ |
139 | #ifndef REQ_FLUSH | |
140 | #define REQ_FLUSH REQ_PREFLUSH | |
141 | #endif | |
142 | ||
dece1635 | 143 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) |
9e853f23 | 144 | { |
e10624f8 | 145 | int rc = 0; |
f0dc089c DW |
146 | bool do_acct; |
147 | unsigned long start; | |
9e853f23 | 148 | struct bio_vec bvec; |
9e853f23 | 149 | struct bvec_iter iter; |
bd842b8c | 150 | struct pmem_device *pmem = q->queuedata; |
7e267a8c DW |
151 | struct nd_region *nd_region = to_region(pmem); |
152 | ||
1eff9d32 | 153 | if (bio->bi_opf & REQ_FLUSH) |
7e267a8c | 154 | nvdimm_flush(nd_region); |
9e853f23 | 155 | |
f0dc089c | 156 | do_acct = nd_iostat_start(bio, &start); |
e10624f8 DW |
157 | bio_for_each_segment(bvec, bio, iter) { |
158 | rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, | |
c11f0c0b | 159 | bvec.bv_offset, op_is_write(bio_op(bio)), |
e10624f8 DW |
160 | iter.bi_sector); |
161 | if (rc) { | |
162 | bio->bi_error = rc; | |
163 | break; | |
164 | } | |
165 | } | |
f0dc089c DW |
166 | if (do_acct) |
167 | nd_iostat_end(bio, start); | |
61031952 | 168 | |
1eff9d32 | 169 | if (bio->bi_opf & REQ_FUA) |
7e267a8c | 170 | nvdimm_flush(nd_region); |
61031952 | 171 | |
4246a0b6 | 172 | bio_endio(bio); |
dece1635 | 173 | return BLK_QC_T_NONE; |
9e853f23 RZ |
174 | } |
175 | ||
176 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |
c11f0c0b | 177 | struct page *page, bool is_write) |
9e853f23 | 178 | { |
bd842b8c | 179 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
e10624f8 | 180 | int rc; |
9e853f23 | 181 | |
c11f0c0b | 182 | rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector); |
9e853f23 | 183 | |
e10624f8 DW |
184 | /* |
185 | * The ->rw_page interface is subtle and tricky. The core | |
186 | * retries on any error, so we can only invoke page_endio() in | |
187 | * the successful completion case. Otherwise, we'll see crashes | |
188 | * caused by double completion. | |
189 | */ | |
190 | if (rc == 0) | |
c11f0c0b | 191 | page_endio(page, is_write, 0); |
e10624f8 DW |
192 | |
193 | return rc; | |
9e853f23 RZ |
194 | } |
195 | ||
f295e53b DW |
196 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
197 | __weak long pmem_direct_access(struct block_device *bdev, sector_t sector, | |
7a9eb206 | 198 | void **kaddr, pfn_t *pfn, long size) |
9e853f23 | 199 | { |
bd842b8c | 200 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
32ab0a3f | 201 | resource_size_t offset = sector * 512 + pmem->data_offset; |
589e75d1 | 202 | |
0a70bd43 DW |
203 | if (unlikely(is_bad_pmem(&pmem->bb, sector, size))) |
204 | return -EIO; | |
e2e05394 | 205 | *kaddr = pmem->virt_addr + offset; |
34c0fd54 | 206 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); |
9e853f23 | 207 | |
0a70bd43 DW |
208 | /* |
209 | * If badblocks are present, limit known good range to the | |
210 | * requested range. | |
211 | */ | |
212 | if (unlikely(pmem->bb.count)) | |
213 | return size; | |
cfe30b87 | 214 | return pmem->size - pmem->pfn_pad - offset; |
9e853f23 RZ |
215 | } |
216 | ||
217 | static const struct block_device_operations pmem_fops = { | |
218 | .owner = THIS_MODULE, | |
219 | .rw_page = pmem_rw_page, | |
220 | .direct_access = pmem_direct_access, | |
58138820 | 221 | .revalidate_disk = nvdimm_revalidate_disk, |
9e853f23 RZ |
222 | }; |
223 | ||
030b99e3 DW |
224 | static void pmem_release_queue(void *q) |
225 | { | |
226 | blk_cleanup_queue(q); | |
227 | } | |
228 | ||
f02716db | 229 | static void pmem_release_disk(void *disk) |
030b99e3 DW |
230 | { |
231 | del_gendisk(disk); | |
232 | put_disk(disk); | |
233 | } | |
234 | ||
200c79da DW |
235 | static int pmem_attach_disk(struct device *dev, |
236 | struct nd_namespace_common *ndns) | |
9e853f23 | 237 | { |
200c79da | 238 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
f284a4f2 | 239 | struct nd_region *nd_region = to_nd_region(dev->parent); |
200c79da DW |
240 | struct vmem_altmap __altmap, *altmap = NULL; |
241 | struct resource *res = &nsio->res; | |
242 | struct nd_pfn *nd_pfn = NULL; | |
243 | int nid = dev_to_node(dev); | |
244 | struct nd_pfn_sb *pfn_sb; | |
9e853f23 | 245 | struct pmem_device *pmem; |
200c79da | 246 | struct resource pfn_res; |
468ded03 | 247 | struct request_queue *q; |
200c79da DW |
248 | struct gendisk *disk; |
249 | void *addr; | |
250 | ||
251 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ | |
252 | if (is_nd_pfn(dev)) { | |
253 | nd_pfn = to_nd_pfn(dev); | |
254 | altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap); | |
255 | if (IS_ERR(altmap)) | |
256 | return PTR_ERR(altmap); | |
257 | } | |
258 | ||
259 | /* we're attaching a block device, disable raw namespace access */ | |
260 | devm_nsio_disable(dev, nsio); | |
9e853f23 | 261 | |
708ab62b | 262 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); |
9e853f23 | 263 | if (!pmem) |
200c79da | 264 | return -ENOMEM; |
9e853f23 | 265 | |
200c79da | 266 | dev_set_drvdata(dev, pmem); |
9e853f23 RZ |
267 | pmem->phys_addr = res->start; |
268 | pmem->size = resource_size(res); | |
f284a4f2 | 269 | if (nvdimm_has_flush(nd_region) < 0) |
61031952 | 270 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
9e853f23 | 271 | |
947df02d DW |
272 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
273 | dev_name(dev))) { | |
274 | dev_warn(dev, "could not reserve region %pR\n", res); | |
200c79da | 275 | return -EBUSY; |
9e853f23 RZ |
276 | } |
277 | ||
468ded03 DW |
278 | q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); |
279 | if (!q) | |
200c79da | 280 | return -ENOMEM; |
468ded03 | 281 | |
34c0fd54 | 282 | pmem->pfn_flags = PFN_DEV; |
200c79da DW |
283 | if (is_nd_pfn(dev)) { |
284 | addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, | |
285 | altmap); | |
286 | pfn_sb = nd_pfn->pfn_sb; | |
287 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); | |
288 | pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res); | |
289 | pmem->pfn_flags |= PFN_MAP; | |
290 | res = &pfn_res; /* for badblocks populate */ | |
291 | res->start += pmem->data_offset; | |
292 | } else if (pmem_should_map_pages(dev)) { | |
293 | addr = devm_memremap_pages(dev, &nsio->res, | |
5c2c2587 | 294 | &q->q_usage_counter, NULL); |
34c0fd54 DW |
295 | pmem->pfn_flags |= PFN_MAP; |
296 | } else | |
200c79da DW |
297 | addr = devm_memremap(dev, pmem->phys_addr, |
298 | pmem->size, ARCH_MEMREMAP_PMEM); | |
b36f4761 | 299 | |
030b99e3 DW |
300 | /* |
301 | * At release time the queue must be dead before | |
302 | * devm_memremap_pages is unwound | |
303 | */ | |
f02716db | 304 | if (devm_add_action_or_reset(dev, pmem_release_queue, q)) |
200c79da | 305 | return -ENOMEM; |
8c2f7e86 | 306 | |
200c79da DW |
307 | if (IS_ERR(addr)) |
308 | return PTR_ERR(addr); | |
7a9eb206 | 309 | pmem->virt_addr = addr; |
9e853f23 | 310 | |
7e267a8c | 311 | blk_queue_write_cache(q, true, true); |
5a92289f DW |
312 | blk_queue_make_request(q, pmem_make_request); |
313 | blk_queue_physical_block_size(q, PAGE_SIZE); | |
314 | blk_queue_max_hw_sectors(q, UINT_MAX); | |
315 | blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); | |
316 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | |
163d4baa | 317 | queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); |
5a92289f | 318 | q->queuedata = pmem; |
9e853f23 | 319 | |
538ea4aa | 320 | disk = alloc_disk_node(0, nid); |
030b99e3 DW |
321 | if (!disk) |
322 | return -ENOMEM; | |
9e853f23 | 323 | |
9e853f23 | 324 | disk->fops = &pmem_fops; |
5a92289f | 325 | disk->queue = q; |
9e853f23 | 326 | disk->flags = GENHD_FL_EXT_DEVT; |
5212e11f | 327 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
cfe30b87 DW |
328 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
329 | / 512); | |
b95f5f43 DW |
330 | if (devm_init_badblocks(dev, &pmem->bb)) |
331 | return -ENOMEM; | |
f284a4f2 | 332 | nvdimm_badblocks_populate(nd_region, &pmem->bb, res); |
57f7f317 | 333 | disk->bb = &pmem->bb; |
0d52c756 | 334 | device_add_disk(dev, disk); |
f02716db DW |
335 | |
336 | if (devm_add_action_or_reset(dev, pmem_release_disk, disk)) | |
337 | return -ENOMEM; | |
338 | ||
58138820 | 339 | revalidate_disk(disk); |
9e853f23 | 340 | |
8c2f7e86 DW |
341 | return 0; |
342 | } | |
9e853f23 | 343 | |
9f53f9fa | 344 | static int nd_pmem_probe(struct device *dev) |
9e853f23 | 345 | { |
8c2f7e86 | 346 | struct nd_namespace_common *ndns; |
9e853f23 | 347 | |
8c2f7e86 DW |
348 | ndns = nvdimm_namespace_common_probe(dev); |
349 | if (IS_ERR(ndns)) | |
350 | return PTR_ERR(ndns); | |
bf9bccc1 | 351 | |
200c79da DW |
352 | if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev))) |
353 | return -ENXIO; | |
708ab62b | 354 | |
200c79da | 355 | if (is_nd_btt(dev)) |
708ab62b CH |
356 | return nvdimm_namespace_attach_btt(ndns); |
357 | ||
32ab0a3f | 358 | if (is_nd_pfn(dev)) |
200c79da | 359 | return pmem_attach_disk(dev, ndns); |
32ab0a3f | 360 | |
200c79da | 361 | /* if we find a valid info-block we'll come back as that personality */ |
c5ed9268 DW |
362 | if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0 |
363 | || nd_dax_probe(dev, ndns) == 0) | |
32ab0a3f | 364 | return -ENXIO; |
32ab0a3f | 365 | |
200c79da DW |
366 | /* ...otherwise we're just a raw pmem device */ |
367 | return pmem_attach_disk(dev, ndns); | |
9e853f23 RZ |
368 | } |
369 | ||
9f53f9fa | 370 | static int nd_pmem_remove(struct device *dev) |
9e853f23 | 371 | { |
8c2f7e86 | 372 | if (is_nd_btt(dev)) |
298f2bc5 | 373 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
476f848a DW |
374 | nvdimm_flush(to_nd_region(dev->parent)); |
375 | ||
9e853f23 RZ |
376 | return 0; |
377 | } | |
378 | ||
476f848a DW |
379 | static void nd_pmem_shutdown(struct device *dev) |
380 | { | |
381 | nvdimm_flush(to_nd_region(dev->parent)); | |
382 | } | |
383 | ||
71999466 DW |
384 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
385 | { | |
298f2bc5 | 386 | struct pmem_device *pmem = dev_get_drvdata(dev); |
f284a4f2 | 387 | struct nd_region *nd_region = to_region(pmem); |
298f2bc5 DW |
388 | resource_size_t offset = 0, end_trunc = 0; |
389 | struct nd_namespace_common *ndns; | |
390 | struct nd_namespace_io *nsio; | |
391 | struct resource res; | |
71999466 DW |
392 | |
393 | if (event != NVDIMM_REVALIDATE_POISON) | |
394 | return; | |
395 | ||
298f2bc5 DW |
396 | if (is_nd_btt(dev)) { |
397 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
398 | ||
399 | ndns = nd_btt->ndns; | |
400 | } else if (is_nd_pfn(dev)) { | |
a3901802 DW |
401 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); |
402 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | |
403 | ||
298f2bc5 DW |
404 | ndns = nd_pfn->ndns; |
405 | offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad); | |
406 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | |
407 | } else | |
408 | ndns = to_ndns(dev); | |
a3901802 | 409 | |
298f2bc5 DW |
410 | nsio = to_nd_namespace_io(&ndns->dev); |
411 | res.start = nsio->res.start + offset; | |
412 | res.end = nsio->res.end - end_trunc; | |
a3901802 | 413 | nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); |
71999466 DW |
414 | } |
415 | ||
9f53f9fa DW |
416 | MODULE_ALIAS("pmem"); |
417 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); | |
bf9bccc1 | 418 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
9f53f9fa DW |
419 | static struct nd_device_driver nd_pmem_driver = { |
420 | .probe = nd_pmem_probe, | |
421 | .remove = nd_pmem_remove, | |
71999466 | 422 | .notify = nd_pmem_notify, |
476f848a | 423 | .shutdown = nd_pmem_shutdown, |
9f53f9fa DW |
424 | .drv = { |
425 | .name = "nd_pmem", | |
9e853f23 | 426 | }, |
bf9bccc1 | 427 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
9e853f23 RZ |
428 | }; |
429 | ||
430 | static int __init pmem_init(void) | |
431 | { | |
55155291 | 432 | return nd_driver_register(&nd_pmem_driver); |
9e853f23 RZ |
433 | } |
434 | module_init(pmem_init); | |
435 | ||
436 | static void pmem_exit(void) | |
437 | { | |
9f53f9fa | 438 | driver_unregister(&nd_pmem_driver.drv); |
9e853f23 RZ |
439 | } |
440 | module_exit(pmem_exit); | |
441 | ||
442 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); | |
443 | MODULE_LICENSE("GPL v2"); |