2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/mount.h>
17 #include <linux/pfn_t.h>
18 #include <linux/hash.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/dax.h>
26 static dev_t dax_devt
;
27 static struct class *dax_class
;
28 static DEFINE_IDA(dax_minor_ida
);
29 static int nr_dax
= CONFIG_NR_DEV_DAX
;
30 module_param(nr_dax
, int, S_IRUGO
);
31 static struct vfsmount
*dax_mnt
;
32 static struct kmem_cache
*dax_cache __read_mostly
;
33 static struct super_block
*dax_superblock __read_mostly
;
34 MODULE_PARM_DESC(nr_dax
, "max number of device-dax instances");
37 * struct dax_region - mapping infrastructure for dax devices
38 * @id: kernel-wide unique region for a memory range
39 * @base: linear address corresponding to @res
40 * @kref: to pin while other agents have a need to do lookups
41 * @dev: parent device backing this region
42 * @align: allocation and mapping alignment for child dax devices
43 * @res: physical address range of the region
44 * @pfn_flags: identify whether the pfns are paged back or not
54 unsigned long pfn_flags
;
58 * struct dax_dev - subdivision of a dax region
59 * @region - parent region
60 * @dev - device backing the character device
61 * @cdev - core chardev data
62 * @alive - !alive + rcu grace period == no new mappings can be established
63 * @id - child id in the region
64 * @num_resources - number of physical address extents in this device
65 * @res - array of physical address ranges
68 struct dax_region
*region
;
75 struct resource res
[0];
78 static struct inode
*dax_alloc_inode(struct super_block
*sb
)
80 return kmem_cache_alloc(dax_cache
, GFP_KERNEL
);
83 static void dax_i_callback(struct rcu_head
*head
)
85 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
87 kmem_cache_free(dax_cache
, inode
);
90 static void dax_destroy_inode(struct inode
*inode
)
92 call_rcu(&inode
->i_rcu
, dax_i_callback
);
95 static const struct super_operations dax_sops
= {
96 .statfs
= simple_statfs
,
97 .alloc_inode
= dax_alloc_inode
,
98 .destroy_inode
= dax_destroy_inode
,
99 .drop_inode
= generic_delete_inode
,
102 static struct dentry
*dax_mount(struct file_system_type
*fs_type
,
103 int flags
, const char *dev_name
, void *data
)
105 return mount_pseudo(fs_type
, "dax:", &dax_sops
, NULL
, DAXFS_MAGIC
);
108 static struct file_system_type dax_type
= {
111 .kill_sb
= kill_anon_super
,
114 static int dax_test(struct inode
*inode
, void *data
)
116 return inode
->i_cdev
== data
;
119 static int dax_set(struct inode
*inode
, void *data
)
121 inode
->i_cdev
= data
;
125 static struct inode
*dax_inode_get(struct cdev
*cdev
, dev_t devt
)
129 inode
= iget5_locked(dax_superblock
, hash_32(devt
+ DAXFS_MAGIC
, 31),
130 dax_test
, dax_set
, cdev
);
135 if (inode
->i_state
& I_NEW
) {
136 inode
->i_mode
= S_IFCHR
;
137 inode
->i_flags
= S_DAX
;
138 inode
->i_rdev
= devt
;
139 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
140 unlock_new_inode(inode
);
145 static void init_once(void *inode
)
147 inode_init_once(inode
);
150 static int dax_inode_init(void)
154 dax_cache
= kmem_cache_create("dax_cache", sizeof(struct inode
), 0,
155 (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
156 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
161 rc
= register_filesystem(&dax_type
);
163 goto err_register_fs
;
165 dax_mnt
= kern_mount(&dax_type
);
166 if (IS_ERR(dax_mnt
)) {
167 rc
= PTR_ERR(dax_mnt
);
170 dax_superblock
= dax_mnt
->mnt_sb
;
175 unregister_filesystem(&dax_type
);
177 kmem_cache_destroy(dax_cache
);
182 static void dax_inode_exit(void)
184 kern_unmount(dax_mnt
);
185 unregister_filesystem(&dax_type
);
186 kmem_cache_destroy(dax_cache
);
189 static void dax_region_free(struct kref
*kref
)
191 struct dax_region
*dax_region
;
193 dax_region
= container_of(kref
, struct dax_region
, kref
);
197 void dax_region_put(struct dax_region
*dax_region
)
199 kref_put(&dax_region
->kref
, dax_region_free
);
201 EXPORT_SYMBOL_GPL(dax_region_put
);
203 struct dax_region
*alloc_dax_region(struct device
*parent
, int region_id
,
204 struct resource
*res
, unsigned int align
, void *addr
,
205 unsigned long pfn_flags
)
207 struct dax_region
*dax_region
;
209 if (!IS_ALIGNED(res
->start
, align
)
210 || !IS_ALIGNED(resource_size(res
), align
))
213 dax_region
= kzalloc(sizeof(*dax_region
), GFP_KERNEL
);
217 memcpy(&dax_region
->res
, res
, sizeof(*res
));
218 dax_region
->pfn_flags
= pfn_flags
;
219 kref_init(&dax_region
->kref
);
220 dax_region
->id
= region_id
;
221 ida_init(&dax_region
->ida
);
222 dax_region
->align
= align
;
223 dax_region
->dev
= parent
;
224 dax_region
->base
= addr
;
228 EXPORT_SYMBOL_GPL(alloc_dax_region
);
230 static struct dax_dev
*to_dax_dev(struct device
*dev
)
232 return container_of(dev
, struct dax_dev
, dev
);
235 static ssize_t
size_show(struct device
*dev
,
236 struct device_attribute
*attr
, char *buf
)
238 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
239 unsigned long long size
= 0;
242 for (i
= 0; i
< dax_dev
->num_resources
; i
++)
243 size
+= resource_size(&dax_dev
->res
[i
]);
245 return sprintf(buf
, "%llu\n", size
);
247 static DEVICE_ATTR_RO(size
);
249 static struct attribute
*dax_device_attributes
[] = {
254 static const struct attribute_group dax_device_attribute_group
= {
255 .attrs
= dax_device_attributes
,
258 static const struct attribute_group
*dax_attribute_groups
[] = {
259 &dax_device_attribute_group
,
263 static int check_vma(struct dax_dev
*dax_dev
, struct vm_area_struct
*vma
,
266 struct dax_region
*dax_region
= dax_dev
->region
;
267 struct device
*dev
= &dax_dev
->dev
;
273 /* prevent private mappings from being established */
274 if ((vma
->vm_flags
& VM_SHARED
) != VM_SHARED
) {
275 dev_info(dev
, "%s: %s: fail, attempted private mapping\n",
276 current
->comm
, func
);
280 mask
= dax_region
->align
- 1;
281 if (vma
->vm_start
& mask
|| vma
->vm_end
& mask
) {
282 dev_info(dev
, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
283 current
->comm
, func
, vma
->vm_start
, vma
->vm_end
,
288 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) == PFN_DEV
289 && (vma
->vm_flags
& VM_DONTCOPY
) == 0) {
290 dev_info(dev
, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
291 current
->comm
, func
);
295 if (!vma_is_dax(vma
)) {
296 dev_info(dev
, "%s: %s: fail, vma is not DAX capable\n",
297 current
->comm
, func
);
304 static phys_addr_t
pgoff_to_phys(struct dax_dev
*dax_dev
, pgoff_t pgoff
,
307 struct resource
*res
;
311 for (i
= 0; i
< dax_dev
->num_resources
; i
++) {
312 res
= &dax_dev
->res
[i
];
313 phys
= pgoff
* PAGE_SIZE
+ res
->start
;
314 if (phys
>= res
->start
&& phys
<= res
->end
)
316 pgoff
-= PHYS_PFN(resource_size(res
));
319 if (i
< dax_dev
->num_resources
) {
320 res
= &dax_dev
->res
[i
];
321 if (phys
+ size
- 1 <= res
->end
)
328 static int __dax_dev_fault(struct dax_dev
*dax_dev
, struct vm_area_struct
*vma
,
329 struct vm_fault
*vmf
)
331 unsigned long vaddr
= (unsigned long) vmf
->virtual_address
;
332 struct device
*dev
= &dax_dev
->dev
;
333 struct dax_region
*dax_region
;
334 int rc
= VM_FAULT_SIGBUS
;
338 if (check_vma(dax_dev
, vma
, __func__
))
339 return VM_FAULT_SIGBUS
;
341 dax_region
= dax_dev
->region
;
342 if (dax_region
->align
> PAGE_SIZE
) {
343 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
344 return VM_FAULT_SIGBUS
;
347 phys
= pgoff_to_phys(dax_dev
, vmf
->pgoff
, PAGE_SIZE
);
349 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
351 return VM_FAULT_SIGBUS
;
354 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
356 rc
= vm_insert_mixed(vma
, vaddr
, pfn
);
360 if (rc
< 0 && rc
!= -EBUSY
)
361 return VM_FAULT_SIGBUS
;
363 return VM_FAULT_NOPAGE
;
366 static int dax_dev_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
369 struct file
*filp
= vma
->vm_file
;
370 struct dax_dev
*dax_dev
= filp
->private_data
;
372 dev_dbg(&dax_dev
->dev
, "%s: %s: %s (%#lx - %#lx)\n", __func__
,
373 current
->comm
, (vmf
->flags
& FAULT_FLAG_WRITE
)
374 ? "write" : "read", vma
->vm_start
, vma
->vm_end
);
376 rc
= __dax_dev_fault(dax_dev
, vma
, vmf
);
382 static int __dax_dev_pmd_fault(struct dax_dev
*dax_dev
,
383 struct vm_area_struct
*vma
, unsigned long addr
, pmd_t
*pmd
,
386 unsigned long pmd_addr
= addr
& PMD_MASK
;
387 struct device
*dev
= &dax_dev
->dev
;
388 struct dax_region
*dax_region
;
393 if (check_vma(dax_dev
, vma
, __func__
))
394 return VM_FAULT_SIGBUS
;
396 dax_region
= dax_dev
->region
;
397 if (dax_region
->align
> PMD_SIZE
) {
398 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
399 return VM_FAULT_SIGBUS
;
402 /* dax pmd mappings require pfn_t_devmap() */
403 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) != (PFN_DEV
|PFN_MAP
)) {
404 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
405 return VM_FAULT_SIGBUS
;
408 pgoff
= linear_page_index(vma
, pmd_addr
);
409 phys
= pgoff_to_phys(dax_dev
, pgoff
, PMD_SIZE
);
411 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
413 return VM_FAULT_SIGBUS
;
416 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
418 return vmf_insert_pfn_pmd(vma
, addr
, pmd
, pfn
,
419 flags
& FAULT_FLAG_WRITE
);
422 static int dax_dev_pmd_fault(struct vm_area_struct
*vma
, unsigned long addr
,
423 pmd_t
*pmd
, unsigned int flags
)
426 struct file
*filp
= vma
->vm_file
;
427 struct dax_dev
*dax_dev
= filp
->private_data
;
429 dev_dbg(&dax_dev
->dev
, "%s: %s: %s (%#lx - %#lx)\n", __func__
,
430 current
->comm
, (flags
& FAULT_FLAG_WRITE
)
431 ? "write" : "read", vma
->vm_start
, vma
->vm_end
);
434 rc
= __dax_dev_pmd_fault(dax_dev
, vma
, addr
, pmd
, flags
);
440 static const struct vm_operations_struct dax_dev_vm_ops
= {
441 .fault
= dax_dev_fault
,
442 .pmd_fault
= dax_dev_pmd_fault
,
445 static int dax_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
447 struct dax_dev
*dax_dev
= filp
->private_data
;
450 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
452 rc
= check_vma(dax_dev
, vma
, __func__
);
456 vma
->vm_ops
= &dax_dev_vm_ops
;
457 vma
->vm_flags
|= VM_MIXEDMAP
| VM_HUGEPAGE
;
461 /* return an unmapped area aligned to the dax region specified alignment */
462 static unsigned long dax_get_unmapped_area(struct file
*filp
,
463 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
466 unsigned long off
, off_end
, off_align
, len_align
, addr_align
, align
;
467 struct dax_dev
*dax_dev
= filp
? filp
->private_data
: NULL
;
468 struct dax_region
*dax_region
;
470 if (!dax_dev
|| addr
)
473 dax_region
= dax_dev
->region
;
474 align
= dax_region
->align
;
475 off
= pgoff
<< PAGE_SHIFT
;
477 off_align
= round_up(off
, align
);
479 if ((off_end
<= off_align
) || ((off_end
- off_align
) < align
))
482 len_align
= len
+ align
;
483 if ((off
+ len_align
) < off
)
486 addr_align
= current
->mm
->get_unmapped_area(filp
, addr
, len_align
,
488 if (!IS_ERR_VALUE(addr_align
)) {
489 addr_align
+= (off
- addr_align
) & (align
- 1);
493 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
496 static int dax_open(struct inode
*inode
, struct file
*filp
)
498 struct dax_dev
*dax_dev
;
500 dax_dev
= container_of(inode
->i_cdev
, struct dax_dev
, cdev
);
501 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
502 inode
->i_mapping
= dax_dev
->inode
->i_mapping
;
503 inode
->i_mapping
->host
= dax_dev
->inode
;
504 filp
->f_mapping
= inode
->i_mapping
;
505 filp
->private_data
= dax_dev
;
506 inode
->i_flags
= S_DAX
;
511 static int dax_release(struct inode
*inode
, struct file
*filp
)
513 struct dax_dev
*dax_dev
= filp
->private_data
;
515 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
519 static const struct file_operations dax_fops
= {
520 .llseek
= noop_llseek
,
521 .owner
= THIS_MODULE
,
523 .release
= dax_release
,
524 .get_unmapped_area
= dax_get_unmapped_area
,
528 static void dax_dev_release(struct device
*dev
)
530 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
531 struct dax_region
*dax_region
= dax_dev
->region
;
533 ida_simple_remove(&dax_region
->ida
, dax_dev
->id
);
534 ida_simple_remove(&dax_minor_ida
, MINOR(dev
->devt
));
535 dax_region_put(dax_region
);
536 iput(dax_dev
->inode
);
540 static void unregister_dax_dev(void *dev
)
542 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
543 struct cdev
*cdev
= &dax_dev
->cdev
;
545 dev_dbg(dev
, "%s\n", __func__
);
548 * Note, rcu is not protecting the liveness of dax_dev, rcu is
549 * ensuring that any fault handlers that might have seen
550 * dax_dev->alive == true, have completed. Any fault handlers
551 * that start after synchronize_rcu() has started will abort
552 * upon seeing dax_dev->alive == false.
554 dax_dev
->alive
= false;
556 unmap_mapping_range(dax_dev
->inode
->i_mapping
, 0, 0, 1);
558 device_unregister(dev
);
561 struct dax_dev
*devm_create_dax_dev(struct dax_region
*dax_region
,
562 struct resource
*res
, int count
)
564 struct device
*parent
= dax_region
->dev
;
565 struct dax_dev
*dax_dev
;
566 int rc
= 0, minor
, i
;
571 dax_dev
= kzalloc(sizeof(*dax_dev
) + sizeof(*res
) * count
, GFP_KERNEL
);
573 return ERR_PTR(-ENOMEM
);
575 for (i
= 0; i
< count
; i
++) {
576 if (!IS_ALIGNED(res
[i
].start
, dax_region
->align
)
577 || !IS_ALIGNED(resource_size(&res
[i
]),
578 dax_region
->align
)) {
582 dax_dev
->res
[i
].start
= res
[i
].start
;
583 dax_dev
->res
[i
].end
= res
[i
].end
;
589 dax_dev
->id
= ida_simple_get(&dax_region
->ida
, 0, 0, GFP_KERNEL
);
590 if (dax_dev
->id
< 0) {
595 minor
= ida_simple_get(&dax_minor_ida
, 0, 0, GFP_KERNEL
);
601 dev_t
= MKDEV(MAJOR(dax_devt
), minor
);
603 dax_dev
->inode
= dax_inode_get(&dax_dev
->cdev
, dev_t
);
604 if (!dax_dev
->inode
) {
609 /* device_initialize() so cdev can reference kobj parent */
610 device_initialize(dev
);
612 cdev
= &dax_dev
->cdev
;
613 cdev_init(cdev
, &dax_fops
);
614 cdev
->owner
= parent
->driver
->owner
;
615 cdev
->kobj
.parent
= &dev
->kobj
;
616 rc
= cdev_add(&dax_dev
->cdev
, dev_t
, 1);
620 /* from here on we're committed to teardown via dax_dev_release() */
621 dax_dev
->num_resources
= count
;
622 dax_dev
->alive
= true;
623 dax_dev
->region
= dax_region
;
624 kref_get(&dax_region
->kref
);
627 dev
->class = dax_class
;
628 dev
->parent
= parent
;
629 dev
->groups
= dax_attribute_groups
;
630 dev
->release
= dax_dev_release
;
631 dev_set_name(dev
, "dax%d.%d", dax_region
->id
, dax_dev
->id
);
632 rc
= device_add(dev
);
638 rc
= devm_add_action_or_reset(dax_region
->dev
, unregister_dax_dev
, dev
);
645 iput(dax_dev
->inode
);
647 ida_simple_remove(&dax_minor_ida
, minor
);
649 ida_simple_remove(&dax_region
->ida
, dax_dev
->id
);
655 EXPORT_SYMBOL_GPL(devm_create_dax_dev
);
657 static int __init
dax_init(void)
661 rc
= dax_inode_init();
665 nr_dax
= max(nr_dax
, 256);
666 rc
= alloc_chrdev_region(&dax_devt
, 0, nr_dax
, "dax");
670 dax_class
= class_create(THIS_MODULE
, "dax");
671 if (IS_ERR(dax_class
)) {
672 rc
= PTR_ERR(dax_class
);
679 unregister_chrdev_region(dax_devt
, nr_dax
);
685 static void __exit
dax_exit(void)
687 class_destroy(dax_class
);
688 unregister_chrdev_region(dax_devt
, nr_dax
);
689 ida_destroy(&dax_minor_ida
);
693 MODULE_AUTHOR("Intel Corporation");
694 MODULE_LICENSE("GPL v2");
695 subsys_initcall(dax_init
);
696 module_exit(dax_exit
);