]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/nvdimm/pmem.c
dax: remove the pmem_dax_ops->flush abstraction
[mirror_ubuntu-artful-kernel.git] / drivers / nvdimm / pmem.c
CommitLineData
9e853f23
RZ
1/*
2 * Persistent Memory Driver
3 *
9f53f9fa 4 * Copyright (c) 2014-2015, Intel Corporation.
9e853f23
RZ
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
b95f5f43 25#include <linux/badblocks.h>
9476df7d 26#include <linux/memremap.h>
32ab0a3f 27#include <linux/vmalloc.h>
71389703 28#include <linux/blk-mq.h>
34c0fd54 29#include <linux/pfn_t.h>
9e853f23 30#include <linux/slab.h>
0aed55af 31#include <linux/uio.h>
c1d6e828 32#include <linux/dax.h>
9f53f9fa 33#include <linux/nd.h>
f295e53b 34#include "pmem.h"
32ab0a3f 35#include "pfn.h"
9f53f9fa 36#include "nd.h"
9e853f23 37
f284a4f2
DW
38static struct device *to_dev(struct pmem_device *pmem)
39{
40 /*
41 * nvdimm bus services need a 'dev' parameter, and we record the device
42 * at init in bb.dev.
43 */
44 return pmem->bb.dev;
45}
46
47static struct nd_region *to_region(struct pmem_device *pmem)
48{
49 return to_nd_region(to_dev(pmem)->parent);
50}
9e853f23 51
4e4cbee9
CH
52static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
53 phys_addr_t offset, unsigned int len)
59e64739 54{
f284a4f2 55 struct device *dev = to_dev(pmem);
59e64739
DW
56 sector_t sector;
57 long cleared;
4e4cbee9 58 blk_status_t rc = BLK_STS_OK;
59e64739
DW
59
60 sector = (offset - pmem->data_offset) / 512;
59e64739 61
868f036f
DW
62 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
63 if (cleared < len)
4e4cbee9 64 rc = BLK_STS_IOERR;
59e64739 65 if (cleared > 0 && cleared / 512) {
868f036f
DW
66 cleared /= 512;
67 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
68 (unsigned long long) sector, cleared,
69 cleared > 1 ? "s" : "");
0a3f27b9 70 badblocks_clear(&pmem->bb, sector, cleared);
975750a9
TK
71 if (pmem->bb_state)
72 sysfs_notify_dirent(pmem->bb_state);
59e64739 73 }
3115bb02 74
f2b61257 75 arch_invalidate_pmem(pmem->virt_addr + offset, len);
868f036f
DW
76
77 return rc;
59e64739
DW
78}
79
bd697a80
VV
80static void write_pmem(void *pmem_addr, struct page *page,
81 unsigned int off, unsigned int len)
82{
83 void *mem = kmap_atomic(page);
84
0aed55af 85 memcpy_flushcache(pmem_addr, mem + off, len);
bd697a80
VV
86 kunmap_atomic(mem);
87}
88
4e4cbee9 89static blk_status_t read_pmem(struct page *page, unsigned int off,
bd697a80
VV
90 void *pmem_addr, unsigned int len)
91{
92 int rc;
93 void *mem = kmap_atomic(page);
94
6abccd1b 95 rc = memcpy_mcsafe(mem + off, pmem_addr, len);
bd697a80 96 kunmap_atomic(mem);
d47d1d27 97 if (rc)
4e4cbee9
CH
98 return BLK_STS_IOERR;
99 return BLK_STS_OK;
bd697a80
VV
100}
101
4e4cbee9 102static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
c11f0c0b 103 unsigned int len, unsigned int off, bool is_write,
9e853f23
RZ
104 sector_t sector)
105{
4e4cbee9 106 blk_status_t rc = BLK_STS_OK;
59e64739 107 bool bad_pmem = false;
32ab0a3f 108 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
7a9eb206 109 void *pmem_addr = pmem->virt_addr + pmem_off;
9e853f23 110
59e64739
DW
111 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
112 bad_pmem = true;
113
c11f0c0b 114 if (!is_write) {
59e64739 115 if (unlikely(bad_pmem))
4e4cbee9 116 rc = BLK_STS_IOERR;
b5ebc8ec 117 else {
bd697a80 118 rc = read_pmem(page, off, pmem_addr, len);
b5ebc8ec
DW
119 flush_dcache_page(page);
120 }
9e853f23 121 } else {
0a370d26
DW
122 /*
123 * Note that we write the data both before and after
124 * clearing poison. The write before clear poison
125 * handles situations where the latest written data is
126 * preserved and the clear poison operation simply marks
127 * the address range as valid without changing the data.
128 * In this case application software can assume that an
129 * interrupted write will either return the new good
130 * data or an error.
131 *
132 * However, if pmem_clear_poison() leaves the data in an
133 * indeterminate state we need to perform the write
134 * after clear poison.
135 */
9e853f23 136 flush_dcache_page(page);
bd697a80 137 write_pmem(pmem_addr, page, off, len);
59e64739 138 if (unlikely(bad_pmem)) {
3115bb02 139 rc = pmem_clear_poison(pmem, pmem_off, len);
bd697a80 140 write_pmem(pmem_addr, page, off, len);
59e64739 141 }
9e853f23
RZ
142 }
143
b5ebc8ec 144 return rc;
9e853f23
RZ
145}
146
7e267a8c
DW
147/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
148#ifndef REQ_FLUSH
149#define REQ_FLUSH REQ_PREFLUSH
150#endif
151
dece1635 152static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
9e853f23 153{
4e4cbee9 154 blk_status_t rc = 0;
f0dc089c
DW
155 bool do_acct;
156 unsigned long start;
9e853f23 157 struct bio_vec bvec;
9e853f23 158 struct bvec_iter iter;
bd842b8c 159 struct pmem_device *pmem = q->queuedata;
7e267a8c
DW
160 struct nd_region *nd_region = to_region(pmem);
161
1eff9d32 162 if (bio->bi_opf & REQ_FLUSH)
7e267a8c 163 nvdimm_flush(nd_region);
9e853f23 164
f0dc089c 165 do_acct = nd_iostat_start(bio, &start);
e10624f8
DW
166 bio_for_each_segment(bvec, bio, iter) {
167 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
c11f0c0b 168 bvec.bv_offset, op_is_write(bio_op(bio)),
e10624f8
DW
169 iter.bi_sector);
170 if (rc) {
4e4cbee9 171 bio->bi_status = rc;
e10624f8
DW
172 break;
173 }
174 }
f0dc089c
DW
175 if (do_acct)
176 nd_iostat_end(bio, start);
61031952 177
1eff9d32 178 if (bio->bi_opf & REQ_FUA)
7e267a8c 179 nvdimm_flush(nd_region);
61031952 180
4246a0b6 181 bio_endio(bio);
dece1635 182 return BLK_QC_T_NONE;
9e853f23
RZ
183}
184
185static int pmem_rw_page(struct block_device *bdev, sector_t sector,
c11f0c0b 186 struct page *page, bool is_write)
9e853f23 187{
bd842b8c 188 struct pmem_device *pmem = bdev->bd_queue->queuedata;
4e4cbee9 189 blk_status_t rc;
9e853f23 190
c11f0c0b 191 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
9e853f23 192
e10624f8
DW
193 /*
194 * The ->rw_page interface is subtle and tricky. The core
195 * retries on any error, so we can only invoke page_endio() in
196 * the successful completion case. Otherwise, we'll see crashes
197 * caused by double completion.
198 */
199 if (rc == 0)
c11f0c0b 200 page_endio(page, is_write, 0);
e10624f8 201
4e4cbee9 202 return blk_status_to_errno(rc);
9e853f23
RZ
203}
204
f295e53b 205/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
c1d6e828
DW
206__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
207 long nr_pages, void **kaddr, pfn_t *pfn)
9e853f23 208{
c1d6e828 209 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
589e75d1 210
c1d6e828
DW
211 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
212 PFN_PHYS(nr_pages))))
0a70bd43 213 return -EIO;
e2e05394 214 *kaddr = pmem->virt_addr + offset;
34c0fd54 215 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
9e853f23 216
0a70bd43
DW
217 /*
218 * If badblocks are present, limit known good range to the
219 * requested range.
220 */
221 if (unlikely(pmem->bb.count))
c1d6e828
DW
222 return nr_pages;
223 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
9e853f23
RZ
224}
225
226static const struct block_device_operations pmem_fops = {
227 .owner = THIS_MODULE,
228 .rw_page = pmem_rw_page,
58138820 229 .revalidate_disk = nvdimm_revalidate_disk,
9e853f23
RZ
230};
231
c1d6e828
DW
232static long pmem_dax_direct_access(struct dax_device *dax_dev,
233 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
234{
235 struct pmem_device *pmem = dax_get_private(dax_dev);
236
237 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
238}
239
0aed55af
DW
240static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
241 void *addr, size_t bytes, struct iov_iter *i)
242{
243 return copy_from_iter_flushcache(addr, bytes, i);
244}
245
c1d6e828
DW
246static const struct dax_operations pmem_dax_ops = {
247 .direct_access = pmem_dax_direct_access,
0aed55af 248 .copy_from_iter = pmem_copy_from_iter,
c1d6e828
DW
249};
250
6e0c90d6
DW
251static const struct attribute_group *pmem_attribute_groups[] = {
252 &dax_attribute_group,
253 NULL,
c1d6e828
DW
254};
255
030b99e3
DW
256static void pmem_release_queue(void *q)
257{
258 blk_cleanup_queue(q);
259}
260
71389703
DW
261static void pmem_freeze_queue(void *q)
262{
d3b5d352 263 blk_freeze_queue_start(q);
71389703
DW
264}
265
c1d6e828 266static void pmem_release_disk(void *__pmem)
030b99e3 267{
c1d6e828
DW
268 struct pmem_device *pmem = __pmem;
269
270 kill_dax(pmem->dax_dev);
271 put_dax(pmem->dax_dev);
272 del_gendisk(pmem->disk);
273 put_disk(pmem->disk);
030b99e3
DW
274}
275
200c79da
DW
276static int pmem_attach_disk(struct device *dev,
277 struct nd_namespace_common *ndns)
9e853f23 278{
200c79da 279 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
f284a4f2 280 struct nd_region *nd_region = to_nd_region(dev->parent);
200c79da 281 struct vmem_altmap __altmap, *altmap = NULL;
0b277961 282 int nid = dev_to_node(dev), fua, wbc;
200c79da
DW
283 struct resource *res = &nsio->res;
284 struct nd_pfn *nd_pfn = NULL;
c1d6e828 285 struct dax_device *dax_dev;
200c79da 286 struct nd_pfn_sb *pfn_sb;
9e853f23 287 struct pmem_device *pmem;
200c79da 288 struct resource pfn_res;
468ded03 289 struct request_queue *q;
6e0c90d6 290 struct device *gendev;
200c79da
DW
291 struct gendisk *disk;
292 void *addr;
293
294 /* while nsio_rw_bytes is active, parse a pfn info block if present */
295 if (is_nd_pfn(dev)) {
296 nd_pfn = to_nd_pfn(dev);
297 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
298 if (IS_ERR(altmap))
299 return PTR_ERR(altmap);
300 }
301
302 /* we're attaching a block device, disable raw namespace access */
303 devm_nsio_disable(dev, nsio);
9e853f23 304
708ab62b 305 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
9e853f23 306 if (!pmem)
200c79da 307 return -ENOMEM;
9e853f23 308
200c79da 309 dev_set_drvdata(dev, pmem);
9e853f23
RZ
310 pmem->phys_addr = res->start;
311 pmem->size = resource_size(res);
0b277961
DW
312 fua = nvdimm_has_flush(nd_region);
313 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
61031952 314 dev_warn(dev, "unable to guarantee persistence of writes\n");
0b277961
DW
315 fua = 0;
316 }
317 wbc = nvdimm_has_cache(nd_region);
9e853f23 318
947df02d 319 if (!devm_request_mem_region(dev, res->start, resource_size(res),
450c6633 320 dev_name(&ndns->dev))) {
947df02d 321 dev_warn(dev, "could not reserve region %pR\n", res);
200c79da 322 return -EBUSY;
9e853f23
RZ
323 }
324
468ded03
DW
325 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
326 if (!q)
200c79da 327 return -ENOMEM;
468ded03 328
71389703
DW
329 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
330 return -ENOMEM;
331
34c0fd54 332 pmem->pfn_flags = PFN_DEV;
200c79da
DW
333 if (is_nd_pfn(dev)) {
334 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
335 altmap);
336 pfn_sb = nd_pfn->pfn_sb;
337 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
338 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
339 pmem->pfn_flags |= PFN_MAP;
340 res = &pfn_res; /* for badblocks populate */
341 res->start += pmem->data_offset;
342 } else if (pmem_should_map_pages(dev)) {
343 addr = devm_memremap_pages(dev, &nsio->res,
5c2c2587 344 &q->q_usage_counter, NULL);
34c0fd54
DW
345 pmem->pfn_flags |= PFN_MAP;
346 } else
200c79da
DW
347 addr = devm_memremap(dev, pmem->phys_addr,
348 pmem->size, ARCH_MEMREMAP_PMEM);
b36f4761 349
030b99e3 350 /*
71389703 351 * At release time the queue must be frozen before
030b99e3
DW
352 * devm_memremap_pages is unwound
353 */
71389703 354 if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
200c79da 355 return -ENOMEM;
8c2f7e86 356
200c79da
DW
357 if (IS_ERR(addr))
358 return PTR_ERR(addr);
7a9eb206 359 pmem->virt_addr = addr;
9e853f23 360
0b277961 361 blk_queue_write_cache(q, wbc, fua);
5a92289f
DW
362 blk_queue_make_request(q, pmem_make_request);
363 blk_queue_physical_block_size(q, PAGE_SIZE);
f979b13c 364 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
5a92289f 365 blk_queue_max_hw_sectors(q, UINT_MAX);
5a92289f 366 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
163d4baa 367 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
5a92289f 368 q->queuedata = pmem;
9e853f23 369
538ea4aa 370 disk = alloc_disk_node(0, nid);
030b99e3
DW
371 if (!disk)
372 return -ENOMEM;
c1d6e828 373 pmem->disk = disk;
9e853f23 374
9e853f23 375 disk->fops = &pmem_fops;
5a92289f 376 disk->queue = q;
9e853f23 377 disk->flags = GENHD_FL_EXT_DEVT;
5212e11f 378 nvdimm_namespace_disk_name(ndns, disk->disk_name);
cfe30b87
DW
379 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
380 / 512);
b95f5f43
DW
381 if (devm_init_badblocks(dev, &pmem->bb))
382 return -ENOMEM;
f284a4f2 383 nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
57f7f317 384 disk->bb = &pmem->bb;
f02716db 385
c1d6e828
DW
386 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
387 if (!dax_dev) {
388 put_disk(disk);
389 return -ENOMEM;
390 }
0b277961 391 dax_write_cache(dax_dev, wbc);
c1d6e828
DW
392 pmem->dax_dev = dax_dev;
393
6e0c90d6
DW
394 gendev = disk_to_dev(disk);
395 gendev->groups = pmem_attribute_groups;
396
c1d6e828
DW
397 device_add_disk(dev, disk);
398 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
f02716db
DW
399 return -ENOMEM;
400
58138820 401 revalidate_disk(disk);
9e853f23 402
975750a9
TK
403 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
404 "badblocks");
6aa734a2
DW
405 if (!pmem->bb_state)
406 dev_warn(dev, "'badblocks' notification disabled\n");
975750a9 407
8c2f7e86
DW
408 return 0;
409}
9e853f23 410
9f53f9fa 411static int nd_pmem_probe(struct device *dev)
9e853f23 412{
8c2f7e86 413 struct nd_namespace_common *ndns;
9e853f23 414
8c2f7e86
DW
415 ndns = nvdimm_namespace_common_probe(dev);
416 if (IS_ERR(ndns))
417 return PTR_ERR(ndns);
bf9bccc1 418
200c79da
DW
419 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
420 return -ENXIO;
708ab62b 421
200c79da 422 if (is_nd_btt(dev))
708ab62b
CH
423 return nvdimm_namespace_attach_btt(ndns);
424
32ab0a3f 425 if (is_nd_pfn(dev))
200c79da 426 return pmem_attach_disk(dev, ndns);
32ab0a3f 427
200c79da 428 /* if we find a valid info-block we'll come back as that personality */
c5ed9268
DW
429 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
430 || nd_dax_probe(dev, ndns) == 0)
32ab0a3f 431 return -ENXIO;
32ab0a3f 432
200c79da
DW
433 /* ...otherwise we're just a raw pmem device */
434 return pmem_attach_disk(dev, ndns);
9e853f23
RZ
435}
436
9f53f9fa 437static int nd_pmem_remove(struct device *dev)
9e853f23 438{
6aa734a2
DW
439 struct pmem_device *pmem = dev_get_drvdata(dev);
440
8c2f7e86 441 if (is_nd_btt(dev))
298f2bc5 442 nvdimm_namespace_detach_btt(to_nd_btt(dev));
6aa734a2
DW
443 else {
444 /*
445 * Note, this assumes device_lock() context to not race
446 * nd_pmem_notify()
447 */
448 sysfs_put(pmem->bb_state);
449 pmem->bb_state = NULL;
450 }
476f848a
DW
451 nvdimm_flush(to_nd_region(dev->parent));
452
9e853f23
RZ
453 return 0;
454}
455
476f848a
DW
456static void nd_pmem_shutdown(struct device *dev)
457{
458 nvdimm_flush(to_nd_region(dev->parent));
459}
460
71999466
DW
461static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
462{
b2518c78 463 struct nd_region *nd_region;
298f2bc5
DW
464 resource_size_t offset = 0, end_trunc = 0;
465 struct nd_namespace_common *ndns;
466 struct nd_namespace_io *nsio;
467 struct resource res;
b2518c78 468 struct badblocks *bb;
975750a9 469 struct kernfs_node *bb_state;
71999466
DW
470
471 if (event != NVDIMM_REVALIDATE_POISON)
472 return;
473
298f2bc5
DW
474 if (is_nd_btt(dev)) {
475 struct nd_btt *nd_btt = to_nd_btt(dev);
476
477 ndns = nd_btt->ndns;
b2518c78
TK
478 nd_region = to_nd_region(ndns->dev.parent);
479 nsio = to_nd_namespace_io(&ndns->dev);
480 bb = &nsio->bb;
975750a9 481 bb_state = NULL;
b2518c78
TK
482 } else {
483 struct pmem_device *pmem = dev_get_drvdata(dev);
a3901802 484
b2518c78
TK
485 nd_region = to_region(pmem);
486 bb = &pmem->bb;
975750a9 487 bb_state = pmem->bb_state;
b2518c78
TK
488
489 if (is_nd_pfn(dev)) {
490 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
491 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
492
493 ndns = nd_pfn->ndns;
494 offset = pmem->data_offset +
495 __le32_to_cpu(pfn_sb->start_pad);
496 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
497 } else {
498 ndns = to_ndns(dev);
499 }
500
501 nsio = to_nd_namespace_io(&ndns->dev);
502 }
a3901802 503
298f2bc5
DW
504 res.start = nsio->res.start + offset;
505 res.end = nsio->res.end - end_trunc;
b2518c78 506 nvdimm_badblocks_populate(nd_region, bb, &res);
975750a9
TK
507 if (bb_state)
508 sysfs_notify_dirent(bb_state);
71999466
DW
509}
510
9f53f9fa
DW
511MODULE_ALIAS("pmem");
512MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
bf9bccc1 513MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
9f53f9fa
DW
514static struct nd_device_driver nd_pmem_driver = {
515 .probe = nd_pmem_probe,
516 .remove = nd_pmem_remove,
71999466 517 .notify = nd_pmem_notify,
476f848a 518 .shutdown = nd_pmem_shutdown,
9f53f9fa
DW
519 .drv = {
520 .name = "nd_pmem",
9e853f23 521 },
bf9bccc1 522 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
9e853f23
RZ
523};
524
525static int __init pmem_init(void)
526{
55155291 527 return nd_driver_register(&nd_pmem_driver);
9e853f23
RZ
528}
529module_init(pmem_init);
530
531static void pmem_exit(void)
532{
9f53f9fa 533 driver_unregister(&nd_pmem_driver.drv);
9e853f23
RZ
534}
535module_exit(pmem_exit);
536
537MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
538MODULE_LICENSE("GPL v2");