]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/nvdimm/pmem.c
libnvdimm: convert to statically allocated badblocks
[mirror_ubuntu-focal-kernel.git] / drivers / nvdimm / pmem.c
CommitLineData
9e853f23
RZ
1/*
2 * Persistent Memory Driver
3 *
9f53f9fa 4 * Copyright (c) 2014-2015, Intel Corporation.
9e853f23
RZ
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
32ab0a3f 24#include <linux/memory_hotplug.h>
9e853f23 25#include <linux/moduleparam.h>
b95f5f43 26#include <linux/badblocks.h>
32ab0a3f 27#include <linux/vmalloc.h>
9e853f23 28#include <linux/slab.h>
61031952 29#include <linux/pmem.h>
9f53f9fa 30#include <linux/nd.h>
32ab0a3f 31#include "pfn.h"
9f53f9fa 32#include "nd.h"
9e853f23
RZ
33
34struct pmem_device {
35 struct request_queue *pmem_queue;
36 struct gendisk *pmem_disk;
32ab0a3f 37 struct nd_namespace_common *ndns;
9e853f23
RZ
38
39 /* One contiguous memory region per device */
40 phys_addr_t phys_addr;
32ab0a3f
DW
41 /* when non-zero this device is hosting a 'pfn' instance */
42 phys_addr_t data_offset;
61031952 43 void __pmem *virt_addr;
9e853f23 44 size_t size;
b95f5f43 45 struct badblocks bb;
9e853f23
RZ
46};
47
48static int pmem_major;
9e853f23
RZ
49
50static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
51 unsigned int len, unsigned int off, int rw,
52 sector_t sector)
53{
54 void *mem = kmap_atomic(page);
32ab0a3f 55 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
61031952 56 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
9e853f23
RZ
57
58 if (rw == READ) {
61031952 59 memcpy_from_pmem(mem + off, pmem_addr, len);
9e853f23
RZ
60 flush_dcache_page(page);
61 } else {
62 flush_dcache_page(page);
61031952 63 memcpy_to_pmem(pmem_addr, mem + off, len);
9e853f23
RZ
64 }
65
66 kunmap_atomic(mem);
67}
68
dece1635 69static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
9e853f23 70{
f0dc089c
DW
71 bool do_acct;
72 unsigned long start;
9e853f23 73 struct bio_vec bvec;
9e853f23 74 struct bvec_iter iter;
edc870e5
DW
75 struct block_device *bdev = bio->bi_bdev;
76 struct pmem_device *pmem = bdev->bd_disk->private_data;
9e853f23 77
f0dc089c 78 do_acct = nd_iostat_start(bio, &start);
edc870e5 79 bio_for_each_segment(bvec, bio, iter)
9e853f23 80 pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
edc870e5 81 bio_data_dir(bio), iter.bi_sector);
f0dc089c
DW
82 if (do_acct)
83 nd_iostat_end(bio, start);
61031952
RZ
84
85 if (bio_data_dir(bio))
86 wmb_pmem();
87
4246a0b6 88 bio_endio(bio);
dece1635 89 return BLK_QC_T_NONE;
9e853f23
RZ
90}
91
92static int pmem_rw_page(struct block_device *bdev, sector_t sector,
93 struct page *page, int rw)
94{
95 struct pmem_device *pmem = bdev->bd_disk->private_data;
96
97 pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
ba8fe0f8
RZ
98 if (rw & WRITE)
99 wmb_pmem();
9e853f23
RZ
100 page_endio(page, rw & WRITE, 0);
101
102 return 0;
103}
104
105static long pmem_direct_access(struct block_device *bdev, sector_t sector,
cb389b9c 106 void __pmem **kaddr, unsigned long *pfn)
9e853f23
RZ
107{
108 struct pmem_device *pmem = bdev->bd_disk->private_data;
32ab0a3f 109 resource_size_t offset = sector * 512 + pmem->data_offset;
589e75d1 110
e2e05394 111 *kaddr = pmem->virt_addr + offset;
9e853f23
RZ
112 *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
113
589e75d1 114 return pmem->size - offset;
9e853f23
RZ
115}
116
117static const struct block_device_operations pmem_fops = {
118 .owner = THIS_MODULE,
119 .rw_page = pmem_rw_page,
120 .direct_access = pmem_direct_access,
58138820 121 .revalidate_disk = nvdimm_revalidate_disk,
9e853f23
RZ
122};
123
9f53f9fa
DW
124static struct pmem_device *pmem_alloc(struct device *dev,
125 struct resource *res, int id)
9e853f23
RZ
126{
127 struct pmem_device *pmem;
9e853f23 128
708ab62b 129 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
9e853f23 130 if (!pmem)
8c2f7e86 131 return ERR_PTR(-ENOMEM);
9e853f23
RZ
132
133 pmem->phys_addr = res->start;
134 pmem->size = resource_size(res);
96601adb 135 if (!arch_has_wmb_pmem())
61031952 136 dev_warn(dev, "unable to guarantee persistence of writes\n");
9e853f23 137
708ab62b
CH
138 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
139 dev_name(dev))) {
9f53f9fa
DW
140 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
141 &pmem->phys_addr, pmem->size);
8c2f7e86 142 return ERR_PTR(-EBUSY);
9e853f23
RZ
143 }
144
b36f4761
DW
145 if (pmem_should_map_pages(dev))
146 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res);
147 else
a639315d
DW
148 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
149 pmem->phys_addr, pmem->size,
150 ARCH_MEMREMAP_PMEM);
b36f4761
DW
151
152 if (IS_ERR(pmem->virt_addr))
153 return (void __force *) pmem->virt_addr;
8c2f7e86
DW
154
155 return pmem;
156}
157
158static void pmem_detach_disk(struct pmem_device *pmem)
159{
32ab0a3f
DW
160 if (!pmem->pmem_disk)
161 return;
162
8c2f7e86
DW
163 del_gendisk(pmem->pmem_disk);
164 put_disk(pmem->pmem_disk);
165 blk_cleanup_queue(pmem->pmem_queue);
166}
167
32ab0a3f
DW
168static int pmem_attach_disk(struct device *dev,
169 struct nd_namespace_common *ndns, struct pmem_device *pmem)
8c2f7e86 170{
538ea4aa 171 int nid = dev_to_node(dev);
8c2f7e86 172 struct gendisk *disk;
9e853f23 173
538ea4aa 174 pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid);
9e853f23 175 if (!pmem->pmem_queue)
8c2f7e86 176 return -ENOMEM;
9e853f23
RZ
177
178 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
6b47496a 179 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
43d3fa3a 180 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
9e853f23 181 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
0f51c4fa 182 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
9e853f23 183
538ea4aa 184 disk = alloc_disk_node(0, nid);
8c2f7e86
DW
185 if (!disk) {
186 blk_cleanup_queue(pmem->pmem_queue);
187 return -ENOMEM;
188 }
9e853f23 189
9e853f23 190 disk->major = pmem_major;
9f53f9fa 191 disk->first_minor = 0;
9e853f23
RZ
192 disk->fops = &pmem_fops;
193 disk->private_data = pmem;
194 disk->queue = pmem->pmem_queue;
195 disk->flags = GENHD_FL_EXT_DEVT;
5212e11f 196 nvdimm_namespace_disk_name(ndns, disk->disk_name);
32ab0a3f
DW
197 disk->driverfs_dev = dev;
198 set_capacity(disk, (pmem->size - pmem->data_offset) / 512);
9e853f23 199 pmem->pmem_disk = disk;
b95f5f43
DW
200 if (devm_init_badblocks(dev, &pmem->bb))
201 return -ENOMEM;
202 nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset);
0caeef63 203
9e853f23 204 add_disk(disk);
58138820 205 revalidate_disk(disk);
9e853f23 206
8c2f7e86
DW
207 return 0;
208}
9e853f23 209
8c2f7e86
DW
210static int pmem_rw_bytes(struct nd_namespace_common *ndns,
211 resource_size_t offset, void *buf, size_t size, int rw)
212{
213 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
214
215 if (unlikely(offset + size > pmem->size)) {
216 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
217 return -EFAULT;
218 }
219
220 if (rw == READ)
61031952
RZ
221 memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
222 else {
223 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
224 wmb_pmem();
225 }
8c2f7e86
DW
226
227 return 0;
228}
229
32ab0a3f
DW
230static int nd_pfn_init(struct nd_pfn *nd_pfn)
231{
232 struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
233 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
234 struct nd_namespace_common *ndns = nd_pfn->ndns;
235 struct nd_region *nd_region;
236 unsigned long npfns;
237 phys_addr_t offset;
238 u64 checksum;
239 int rc;
240
241 if (!pfn_sb)
242 return -ENOMEM;
243
244 nd_pfn->pfn_sb = pfn_sb;
245 rc = nd_pfn_validate(nd_pfn);
246 if (rc == 0 || rc == -EBUSY)
247 return rc;
248
249 /* section alignment for simple hotplug */
250 if (nvdimm_namespace_capacity(ndns) < ND_PFN_ALIGN
251 || pmem->phys_addr & ND_PFN_MASK)
252 return -ENODEV;
253
254 nd_region = to_nd_region(nd_pfn->dev.parent);
255 if (nd_region->ro) {
256 dev_info(&nd_pfn->dev,
257 "%s is read-only, unable to init metadata\n",
258 dev_name(&nd_region->dev));
259 goto err;
260 }
261
262 memset(pfn_sb, 0, sizeof(*pfn_sb));
263 npfns = (pmem->size - SZ_8K) / SZ_4K;
264 /*
265 * Note, we use 64 here for the standard size of struct page,
266 * debugging options may cause it to be larger in which case the
267 * implementation will limit the pfns advertised through
268 * ->direct_access() to those that are included in the memmap.
269 */
270 if (nd_pfn->mode == PFN_MODE_PMEM)
271 offset = ALIGN(SZ_8K + 64 * npfns, PMD_SIZE);
272 else if (nd_pfn->mode == PFN_MODE_RAM)
273 offset = SZ_8K;
274 else
275 goto err;
276
277 npfns = (pmem->size - offset) / SZ_4K;
278 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
279 pfn_sb->dataoff = cpu_to_le64(offset);
280 pfn_sb->npfns = cpu_to_le64(npfns);
281 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
282 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
283 pfn_sb->version_major = cpu_to_le16(1);
284 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
285 pfn_sb->checksum = cpu_to_le64(checksum);
286
287 rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
288 if (rc)
289 goto err;
290
291 return 0;
292 err:
293 nd_pfn->pfn_sb = NULL;
294 kfree(pfn_sb);
295 return -ENXIO;
296}
297
298static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
299{
300 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
301 struct pmem_device *pmem;
302
303 /* free pmem disk */
304 pmem = dev_get_drvdata(&nd_pfn->dev);
305 pmem_detach_disk(pmem);
306
307 /* release nd_pfn resources */
308 kfree(nd_pfn->pfn_sb);
309 nd_pfn->pfn_sb = NULL;
310
311 return 0;
312}
313
314static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
9e853f23 315{
32ab0a3f
DW
316 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
317 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
318 struct device *dev = &nd_pfn->dev;
319 struct vmem_altmap *altmap;
320 struct nd_region *nd_region;
321 struct nd_pfn_sb *pfn_sb;
322 struct pmem_device *pmem;
323 phys_addr_t offset;
324 int rc;
325
326 if (!nd_pfn->uuid || !nd_pfn->ndns)
327 return -ENODEV;
328
329 nd_region = to_nd_region(dev->parent);
330 rc = nd_pfn_init(nd_pfn);
331 if (rc)
332 return rc;
333
334 if (PAGE_SIZE != SZ_4K) {
335 dev_err(dev, "only supported on systems with 4K PAGE_SIZE\n");
336 return -ENXIO;
337 }
338 if (nsio->res.start & ND_PFN_MASK) {
339 dev_err(dev, "%s not memory hotplug section aligned\n",
340 dev_name(&ndns->dev));
341 return -ENXIO;
342 }
343
344 pfn_sb = nd_pfn->pfn_sb;
345 offset = le64_to_cpu(pfn_sb->dataoff);
346 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
347 if (nd_pfn->mode == PFN_MODE_RAM) {
348 if (offset != SZ_8K)
349 return -EINVAL;
350 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
351 altmap = NULL;
352 } else {
353 rc = -ENXIO;
354 goto err;
355 }
356
357 /* establish pfn range for lookup, and switch to direct map */
358 pmem = dev_get_drvdata(dev);
a639315d
DW
359 devm_memunmap(dev, (void __force *) pmem->virt_addr);
360 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res);
32ab0a3f
DW
361 if (IS_ERR(pmem->virt_addr)) {
362 rc = PTR_ERR(pmem->virt_addr);
363 goto err;
364 }
365
366 /* attach pmem disk in "pfn-mode" */
367 pmem->data_offset = offset;
368 rc = pmem_attach_disk(dev, ndns, pmem);
369 if (rc)
370 goto err;
371
372 return rc;
373 err:
374 nvdimm_namespace_detach_pfn(ndns);
375 return rc;
9e853f23
RZ
376}
377
9f53f9fa 378static int nd_pmem_probe(struct device *dev)
9e853f23 379{
9f53f9fa 380 struct nd_region *nd_region = to_nd_region(dev->parent);
8c2f7e86
DW
381 struct nd_namespace_common *ndns;
382 struct nd_namespace_io *nsio;
9e853f23 383 struct pmem_device *pmem;
9e853f23 384
8c2f7e86
DW
385 ndns = nvdimm_namespace_common_probe(dev);
386 if (IS_ERR(ndns))
387 return PTR_ERR(ndns);
bf9bccc1 388
8c2f7e86 389 nsio = to_nd_namespace_io(&ndns->dev);
9f53f9fa 390 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
9e853f23
RZ
391 if (IS_ERR(pmem))
392 return PTR_ERR(pmem);
393
32ab0a3f 394 pmem->ndns = ndns;
9f53f9fa 395 dev_set_drvdata(dev, pmem);
8c2f7e86 396 ndns->rw_bytes = pmem_rw_bytes;
708ab62b 397
8c2f7e86 398 if (is_nd_btt(dev))
708ab62b
CH
399 return nvdimm_namespace_attach_btt(ndns);
400
32ab0a3f
DW
401 if (is_nd_pfn(dev))
402 return nvdimm_namespace_attach_pfn(ndns);
403
404 if (nd_btt_probe(ndns, pmem) == 0) {
8c2f7e86 405 /* we'll come back as btt-pmem */
708ab62b 406 return -ENXIO;
32ab0a3f
DW
407 }
408
409 if (nd_pfn_probe(ndns, pmem) == 0) {
410 /* we'll come back as pfn-pmem */
411 return -ENXIO;
412 }
413
414 return pmem_attach_disk(dev, ndns, pmem);
9e853f23
RZ
415}
416
9f53f9fa 417static int nd_pmem_remove(struct device *dev)
9e853f23 418{
9f53f9fa 419 struct pmem_device *pmem = dev_get_drvdata(dev);
9e853f23 420
8c2f7e86 421 if (is_nd_btt(dev))
32ab0a3f
DW
422 nvdimm_namespace_detach_btt(pmem->ndns);
423 else if (is_nd_pfn(dev))
424 nvdimm_namespace_detach_pfn(pmem->ndns);
8c2f7e86
DW
425 else
426 pmem_detach_disk(pmem);
8c2f7e86 427
9e853f23
RZ
428 return 0;
429}
430
9f53f9fa
DW
431MODULE_ALIAS("pmem");
432MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
bf9bccc1 433MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
9f53f9fa
DW
434static struct nd_device_driver nd_pmem_driver = {
435 .probe = nd_pmem_probe,
436 .remove = nd_pmem_remove,
437 .drv = {
438 .name = "nd_pmem",
9e853f23 439 },
bf9bccc1 440 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
9e853f23
RZ
441};
442
443static int __init pmem_init(void)
444{
445 int error;
446
447 pmem_major = register_blkdev(0, "pmem");
448 if (pmem_major < 0)
449 return pmem_major;
450
9f53f9fa
DW
451 error = nd_driver_register(&nd_pmem_driver);
452 if (error) {
9e853f23 453 unregister_blkdev(pmem_major, "pmem");
9f53f9fa
DW
454 return error;
455 }
456
457 return 0;
9e853f23
RZ
458}
459module_init(pmem_init);
460
461static void pmem_exit(void)
462{
9f53f9fa 463 driver_unregister(&nd_pmem_driver.drv);
9e853f23
RZ
464 unregister_blkdev(pmem_major, "pmem");
465}
466module_exit(pmem_exit);
467
468MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
469MODULE_LICENSE("GPL v2");