2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/mount.h>
17 #include <linux/pfn_t.h>
18 #include <linux/hash.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/dax.h>
26 static dev_t dax_devt
;
27 static struct class *dax_class
;
28 static DEFINE_IDA(dax_minor_ida
);
29 static int nr_dax
= CONFIG_NR_DEV_DAX
;
30 module_param(nr_dax
, int, S_IRUGO
);
31 static struct vfsmount
*dax_mnt
;
32 static struct kmem_cache
*dax_cache __read_mostly
;
33 static struct super_block
*dax_superblock __read_mostly
;
34 MODULE_PARM_DESC(nr_dax
, "max number of device-dax instances");
37 * struct dax_region - mapping infrastructure for dax devices
38 * @id: kernel-wide unique region for a memory range
39 * @base: linear address corresponding to @res
40 * @kref: to pin while other agents have a need to do lookups
41 * @dev: parent device backing this region
42 * @align: allocation and mapping alignment for child dax devices
43 * @res: physical address range of the region
44 * @pfn_flags: identify whether the pfns are paged back or not
54 unsigned long pfn_flags
;
58 * struct dax_dev - subdivision of a dax region
59 * @region - parent region
60 * @dev - device backing the character device
61 * @cdev - core chardev data
62 * @alive - !alive + rcu grace period == no new mappings can be established
63 * @id - child id in the region
64 * @num_resources - number of physical address extents in this device
65 * @res - array of physical address ranges
68 struct dax_region
*region
;
75 struct resource res
[0];
78 static ssize_t
id_show(struct device
*dev
,
79 struct device_attribute
*attr
, char *buf
)
81 struct dax_region
*dax_region
;
85 dax_region
= dev_get_drvdata(dev
);
87 rc
= sprintf(buf
, "%d\n", dax_region
->id
);
92 static DEVICE_ATTR_RO(id
);
94 static ssize_t
region_size_show(struct device
*dev
,
95 struct device_attribute
*attr
, char *buf
)
97 struct dax_region
*dax_region
;
101 dax_region
= dev_get_drvdata(dev
);
103 rc
= sprintf(buf
, "%llu\n", (unsigned long long)
104 resource_size(&dax_region
->res
));
109 static struct device_attribute dev_attr_region_size
= __ATTR(size
, 0444,
110 region_size_show
, NULL
);
112 static ssize_t
align_show(struct device
*dev
,
113 struct device_attribute
*attr
, char *buf
)
115 struct dax_region
*dax_region
;
119 dax_region
= dev_get_drvdata(dev
);
121 rc
= sprintf(buf
, "%u\n", dax_region
->align
);
126 static DEVICE_ATTR_RO(align
);
128 static struct attribute
*dax_region_attributes
[] = {
129 &dev_attr_region_size
.attr
,
130 &dev_attr_align
.attr
,
135 static const struct attribute_group dax_region_attribute_group
= {
136 .name
= "dax_region",
137 .attrs
= dax_region_attributes
,
140 static const struct attribute_group
*dax_region_attribute_groups
[] = {
141 &dax_region_attribute_group
,
145 static struct inode
*dax_alloc_inode(struct super_block
*sb
)
147 return kmem_cache_alloc(dax_cache
, GFP_KERNEL
);
150 static void dax_i_callback(struct rcu_head
*head
)
152 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
154 kmem_cache_free(dax_cache
, inode
);
157 static void dax_destroy_inode(struct inode
*inode
)
159 call_rcu(&inode
->i_rcu
, dax_i_callback
);
162 static const struct super_operations dax_sops
= {
163 .statfs
= simple_statfs
,
164 .alloc_inode
= dax_alloc_inode
,
165 .destroy_inode
= dax_destroy_inode
,
166 .drop_inode
= generic_delete_inode
,
169 static struct dentry
*dax_mount(struct file_system_type
*fs_type
,
170 int flags
, const char *dev_name
, void *data
)
172 return mount_pseudo(fs_type
, "dax:", &dax_sops
, NULL
, DAXFS_MAGIC
);
175 static struct file_system_type dax_type
= {
178 .kill_sb
= kill_anon_super
,
181 static int dax_test(struct inode
*inode
, void *data
)
183 return inode
->i_cdev
== data
;
186 static int dax_set(struct inode
*inode
, void *data
)
188 inode
->i_cdev
= data
;
192 static struct inode
*dax_inode_get(struct cdev
*cdev
, dev_t devt
)
196 inode
= iget5_locked(dax_superblock
, hash_32(devt
+ DAXFS_MAGIC
, 31),
197 dax_test
, dax_set
, cdev
);
202 if (inode
->i_state
& I_NEW
) {
203 inode
->i_mode
= S_IFCHR
;
204 inode
->i_flags
= S_DAX
;
205 inode
->i_rdev
= devt
;
206 mapping_set_gfp_mask(&inode
->i_data
, GFP_USER
);
207 unlock_new_inode(inode
);
212 static void init_once(void *inode
)
214 inode_init_once(inode
);
217 static int dax_inode_init(void)
221 dax_cache
= kmem_cache_create("dax_cache", sizeof(struct inode
), 0,
222 (SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
|
223 SLAB_MEM_SPREAD
|SLAB_ACCOUNT
),
228 rc
= register_filesystem(&dax_type
);
230 goto err_register_fs
;
232 dax_mnt
= kern_mount(&dax_type
);
233 if (IS_ERR(dax_mnt
)) {
234 rc
= PTR_ERR(dax_mnt
);
237 dax_superblock
= dax_mnt
->mnt_sb
;
242 unregister_filesystem(&dax_type
);
244 kmem_cache_destroy(dax_cache
);
249 static void dax_inode_exit(void)
251 kern_unmount(dax_mnt
);
252 unregister_filesystem(&dax_type
);
253 kmem_cache_destroy(dax_cache
);
256 static void dax_region_free(struct kref
*kref
)
258 struct dax_region
*dax_region
;
260 dax_region
= container_of(kref
, struct dax_region
, kref
);
264 void dax_region_put(struct dax_region
*dax_region
)
266 kref_put(&dax_region
->kref
, dax_region_free
);
268 EXPORT_SYMBOL_GPL(dax_region_put
);
270 static void dax_region_unregister(void *region
)
272 struct dax_region
*dax_region
= region
;
274 sysfs_remove_groups(&dax_region
->dev
->kobj
,
275 dax_region_attribute_groups
);
276 dax_region_put(dax_region
);
279 struct dax_region
*alloc_dax_region(struct device
*parent
, int region_id
,
280 struct resource
*res
, unsigned int align
, void *addr
,
281 unsigned long pfn_flags
)
283 struct dax_region
*dax_region
;
286 * The DAX core assumes that it can store its private data in
287 * parent->driver_data. This WARN is a reminder / safeguard for
288 * developers of device-dax drivers.
290 if (dev_get_drvdata(parent
)) {
291 dev_WARN(parent
, "dax core failed to setup private data\n");
295 if (!IS_ALIGNED(res
->start
, align
)
296 || !IS_ALIGNED(resource_size(res
), align
))
299 dax_region
= kzalloc(sizeof(*dax_region
), GFP_KERNEL
);
303 dev_set_drvdata(parent
, dax_region
);
304 memcpy(&dax_region
->res
, res
, sizeof(*res
));
305 dax_region
->pfn_flags
= pfn_flags
;
306 kref_init(&dax_region
->kref
);
307 dax_region
->id
= region_id
;
308 ida_init(&dax_region
->ida
);
309 dax_region
->align
= align
;
310 dax_region
->dev
= parent
;
311 dax_region
->base
= addr
;
312 if (sysfs_create_groups(&parent
->kobj
, dax_region_attribute_groups
)) {
317 kref_get(&dax_region
->kref
);
318 if (devm_add_action_or_reset(parent
, dax_region_unregister
, dax_region
))
322 EXPORT_SYMBOL_GPL(alloc_dax_region
);
324 static struct dax_dev
*to_dax_dev(struct device
*dev
)
326 return container_of(dev
, struct dax_dev
, dev
);
329 static ssize_t
size_show(struct device
*dev
,
330 struct device_attribute
*attr
, char *buf
)
332 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
333 unsigned long long size
= 0;
336 for (i
= 0; i
< dax_dev
->num_resources
; i
++)
337 size
+= resource_size(&dax_dev
->res
[i
]);
339 return sprintf(buf
, "%llu\n", size
);
341 static DEVICE_ATTR_RO(size
);
343 static struct attribute
*dax_device_attributes
[] = {
348 static const struct attribute_group dax_device_attribute_group
= {
349 .attrs
= dax_device_attributes
,
352 static const struct attribute_group
*dax_attribute_groups
[] = {
353 &dax_device_attribute_group
,
357 static int check_vma(struct dax_dev
*dax_dev
, struct vm_area_struct
*vma
,
360 struct dax_region
*dax_region
= dax_dev
->region
;
361 struct device
*dev
= &dax_dev
->dev
;
367 /* prevent private mappings from being established */
368 if ((vma
->vm_flags
& VM_MAYSHARE
) != VM_MAYSHARE
) {
369 dev_info(dev
, "%s: %s: fail, attempted private mapping\n",
370 current
->comm
, func
);
374 mask
= dax_region
->align
- 1;
375 if (vma
->vm_start
& mask
|| vma
->vm_end
& mask
) {
376 dev_info(dev
, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
377 current
->comm
, func
, vma
->vm_start
, vma
->vm_end
,
382 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) == PFN_DEV
383 && (vma
->vm_flags
& VM_DONTCOPY
) == 0) {
384 dev_info(dev
, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
385 current
->comm
, func
);
389 if (!vma_is_dax(vma
)) {
390 dev_info(dev
, "%s: %s: fail, vma is not DAX capable\n",
391 current
->comm
, func
);
398 static phys_addr_t
pgoff_to_phys(struct dax_dev
*dax_dev
, pgoff_t pgoff
,
401 struct resource
*res
;
405 for (i
= 0; i
< dax_dev
->num_resources
; i
++) {
406 res
= &dax_dev
->res
[i
];
407 phys
= pgoff
* PAGE_SIZE
+ res
->start
;
408 if (phys
>= res
->start
&& phys
<= res
->end
)
410 pgoff
-= PHYS_PFN(resource_size(res
));
413 if (i
< dax_dev
->num_resources
) {
414 res
= &dax_dev
->res
[i
];
415 if (phys
+ size
- 1 <= res
->end
)
422 static int __dax_dev_pte_fault(struct dax_dev
*dax_dev
, struct vm_fault
*vmf
)
424 struct device
*dev
= &dax_dev
->dev
;
425 struct dax_region
*dax_region
;
426 int rc
= VM_FAULT_SIGBUS
;
430 if (check_vma(dax_dev
, vmf
->vma
, __func__
))
431 return VM_FAULT_SIGBUS
;
433 dax_region
= dax_dev
->region
;
434 if (dax_region
->align
> PAGE_SIZE
) {
435 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
436 return VM_FAULT_SIGBUS
;
439 phys
= pgoff_to_phys(dax_dev
, vmf
->pgoff
, PAGE_SIZE
);
441 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
443 return VM_FAULT_SIGBUS
;
446 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
448 rc
= vm_insert_mixed(vmf
->vma
, vmf
->address
, pfn
);
452 if (rc
< 0 && rc
!= -EBUSY
)
453 return VM_FAULT_SIGBUS
;
455 return VM_FAULT_NOPAGE
;
458 static int __dax_dev_pmd_fault(struct dax_dev
*dax_dev
, struct vm_fault
*vmf
)
460 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
461 struct device
*dev
= &dax_dev
->dev
;
462 struct dax_region
*dax_region
;
467 if (check_vma(dax_dev
, vmf
->vma
, __func__
))
468 return VM_FAULT_SIGBUS
;
470 dax_region
= dax_dev
->region
;
471 if (dax_region
->align
> PMD_SIZE
) {
472 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
473 return VM_FAULT_SIGBUS
;
476 /* dax pmd mappings require pfn_t_devmap() */
477 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) != (PFN_DEV
|PFN_MAP
)) {
478 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
479 return VM_FAULT_SIGBUS
;
482 pgoff
= linear_page_index(vmf
->vma
, pmd_addr
);
483 phys
= pgoff_to_phys(dax_dev
, pgoff
, PMD_SIZE
);
485 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
487 return VM_FAULT_SIGBUS
;
490 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
492 return vmf_insert_pfn_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
, pfn
,
493 vmf
->flags
& FAULT_FLAG_WRITE
);
496 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
497 static int __dax_dev_pud_fault(struct dax_dev
*dax_dev
, struct vm_fault
*vmf
)
499 unsigned long pud_addr
= vmf
->address
& PUD_MASK
;
500 struct device
*dev
= &dax_dev
->dev
;
501 struct dax_region
*dax_region
;
506 if (check_vma(dax_dev
, vmf
->vma
, __func__
))
507 return VM_FAULT_SIGBUS
;
509 dax_region
= dax_dev
->region
;
510 if (dax_region
->align
> PUD_SIZE
) {
511 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
512 return VM_FAULT_SIGBUS
;
515 /* dax pud mappings require pfn_t_devmap() */
516 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) != (PFN_DEV
|PFN_MAP
)) {
517 dev_dbg(dev
, "%s: alignment > fault size\n", __func__
);
518 return VM_FAULT_SIGBUS
;
521 pgoff
= linear_page_index(vmf
->vma
, pud_addr
);
522 phys
= pgoff_to_phys(dax_dev
, pgoff
, PUD_SIZE
);
524 dev_dbg(dev
, "%s: phys_to_pgoff(%#lx) failed\n", __func__
,
526 return VM_FAULT_SIGBUS
;
529 pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
531 return vmf_insert_pfn_pud(vmf
->vma
, vmf
->address
, vmf
->pud
, pfn
,
532 vmf
->flags
& FAULT_FLAG_WRITE
);
535 static int __dax_dev_pud_fault(struct dax_dev
*dax_dev
, struct vm_fault
*vmf
)
537 return VM_FAULT_FALLBACK
;
539 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
541 static int dax_dev_huge_fault(struct vm_fault
*vmf
,
542 enum page_entry_size pe_size
)
545 struct file
*filp
= vmf
->vma
->vm_file
;
546 struct dax_dev
*dax_dev
= filp
->private_data
;
548 dev_dbg(&dax_dev
->dev
, "%s: %s: %s (%#lx - %#lx)\n", __func__
,
549 current
->comm
, (vmf
->flags
& FAULT_FLAG_WRITE
)
551 vmf
->vma
->vm_start
, vmf
->vma
->vm_end
);
556 rc
= __dax_dev_pte_fault(dax_dev
, vmf
);
559 rc
= __dax_dev_pmd_fault(dax_dev
, vmf
);
562 rc
= __dax_dev_pud_fault(dax_dev
, vmf
);
565 return VM_FAULT_FALLBACK
;
572 static int dax_dev_fault(struct vm_fault
*vmf
)
574 return dax_dev_huge_fault(vmf
, PE_SIZE_PTE
);
577 static const struct vm_operations_struct dax_dev_vm_ops
= {
578 .fault
= dax_dev_fault
,
579 .huge_fault
= dax_dev_huge_fault
,
582 static int dax_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
584 struct dax_dev
*dax_dev
= filp
->private_data
;
587 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
589 rc
= check_vma(dax_dev
, vma
, __func__
);
593 vma
->vm_ops
= &dax_dev_vm_ops
;
594 vma
->vm_flags
|= VM_MIXEDMAP
| VM_HUGEPAGE
;
598 /* return an unmapped area aligned to the dax region specified alignment */
599 static unsigned long dax_get_unmapped_area(struct file
*filp
,
600 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
603 unsigned long off
, off_end
, off_align
, len_align
, addr_align
, align
;
604 struct dax_dev
*dax_dev
= filp
? filp
->private_data
: NULL
;
605 struct dax_region
*dax_region
;
607 if (!dax_dev
|| addr
)
610 dax_region
= dax_dev
->region
;
611 align
= dax_region
->align
;
612 off
= pgoff
<< PAGE_SHIFT
;
614 off_align
= round_up(off
, align
);
616 if ((off_end
<= off_align
) || ((off_end
- off_align
) < align
))
619 len_align
= len
+ align
;
620 if ((off
+ len_align
) < off
)
623 addr_align
= current
->mm
->get_unmapped_area(filp
, addr
, len_align
,
625 if (!IS_ERR_VALUE(addr_align
)) {
626 addr_align
+= (off
- addr_align
) & (align
- 1);
630 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
633 static int dax_open(struct inode
*inode
, struct file
*filp
)
635 struct dax_dev
*dax_dev
;
637 dax_dev
= container_of(inode
->i_cdev
, struct dax_dev
, cdev
);
638 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
639 inode
->i_mapping
= dax_dev
->inode
->i_mapping
;
640 inode
->i_mapping
->host
= dax_dev
->inode
;
641 filp
->f_mapping
= inode
->i_mapping
;
642 filp
->private_data
= dax_dev
;
643 inode
->i_flags
= S_DAX
;
648 static int dax_release(struct inode
*inode
, struct file
*filp
)
650 struct dax_dev
*dax_dev
= filp
->private_data
;
652 dev_dbg(&dax_dev
->dev
, "%s\n", __func__
);
656 static const struct file_operations dax_fops
= {
657 .llseek
= noop_llseek
,
658 .owner
= THIS_MODULE
,
660 .release
= dax_release
,
661 .get_unmapped_area
= dax_get_unmapped_area
,
665 static void dax_dev_release(struct device
*dev
)
667 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
668 struct dax_region
*dax_region
= dax_dev
->region
;
670 ida_simple_remove(&dax_region
->ida
, dax_dev
->id
);
671 ida_simple_remove(&dax_minor_ida
, MINOR(dev
->devt
));
672 dax_region_put(dax_region
);
673 iput(dax_dev
->inode
);
677 static void unregister_dax_dev(void *dev
)
679 struct dax_dev
*dax_dev
= to_dax_dev(dev
);
680 struct cdev
*cdev
= &dax_dev
->cdev
;
682 dev_dbg(dev
, "%s\n", __func__
);
685 * Note, rcu is not protecting the liveness of dax_dev, rcu is
686 * ensuring that any fault handlers that might have seen
687 * dax_dev->alive == true, have completed. Any fault handlers
688 * that start after synchronize_rcu() has started will abort
689 * upon seeing dax_dev->alive == false.
691 dax_dev
->alive
= false;
693 unmap_mapping_range(dax_dev
->inode
->i_mapping
, 0, 0, 1);
695 device_unregister(dev
);
698 struct dax_dev
*devm_create_dax_dev(struct dax_region
*dax_region
,
699 struct resource
*res
, int count
)
701 struct device
*parent
= dax_region
->dev
;
702 struct dax_dev
*dax_dev
;
703 int rc
= 0, minor
, i
;
708 dax_dev
= kzalloc(sizeof(*dax_dev
) + sizeof(*res
) * count
, GFP_KERNEL
);
710 return ERR_PTR(-ENOMEM
);
712 for (i
= 0; i
< count
; i
++) {
713 if (!IS_ALIGNED(res
[i
].start
, dax_region
->align
)
714 || !IS_ALIGNED(resource_size(&res
[i
]),
715 dax_region
->align
)) {
719 dax_dev
->res
[i
].start
= res
[i
].start
;
720 dax_dev
->res
[i
].end
= res
[i
].end
;
726 dax_dev
->id
= ida_simple_get(&dax_region
->ida
, 0, 0, GFP_KERNEL
);
727 if (dax_dev
->id
< 0) {
732 minor
= ida_simple_get(&dax_minor_ida
, 0, 0, GFP_KERNEL
);
738 dev_t
= MKDEV(MAJOR(dax_devt
), minor
);
740 dax_dev
->inode
= dax_inode_get(&dax_dev
->cdev
, dev_t
);
741 if (!dax_dev
->inode
) {
746 /* device_initialize() so cdev can reference kobj parent */
747 device_initialize(dev
);
749 cdev
= &dax_dev
->cdev
;
750 cdev_init(cdev
, &dax_fops
);
751 cdev
->owner
= parent
->driver
->owner
;
752 cdev
->kobj
.parent
= &dev
->kobj
;
753 rc
= cdev_add(&dax_dev
->cdev
, dev_t
, 1);
757 /* from here on we're committed to teardown via dax_dev_release() */
758 dax_dev
->num_resources
= count
;
759 dax_dev
->alive
= true;
760 dax_dev
->region
= dax_region
;
761 kref_get(&dax_region
->kref
);
764 dev
->class = dax_class
;
765 dev
->parent
= parent
;
766 dev
->groups
= dax_attribute_groups
;
767 dev
->release
= dax_dev_release
;
768 dev_set_name(dev
, "dax%d.%d", dax_region
->id
, dax_dev
->id
);
769 rc
= device_add(dev
);
775 rc
= devm_add_action_or_reset(dax_region
->dev
, unregister_dax_dev
, dev
);
782 iput(dax_dev
->inode
);
784 ida_simple_remove(&dax_minor_ida
, minor
);
786 ida_simple_remove(&dax_region
->ida
, dax_dev
->id
);
792 EXPORT_SYMBOL_GPL(devm_create_dax_dev
);
794 static int __init
dax_init(void)
798 rc
= dax_inode_init();
802 nr_dax
= max(nr_dax
, 256);
803 rc
= alloc_chrdev_region(&dax_devt
, 0, nr_dax
, "dax");
807 dax_class
= class_create(THIS_MODULE
, "dax");
808 if (IS_ERR(dax_class
)) {
809 rc
= PTR_ERR(dax_class
);
816 unregister_chrdev_region(dax_devt
, nr_dax
);
822 static void __exit
dax_exit(void)
824 class_destroy(dax_class
);
825 unregister_chrdev_region(dax_devt
, nr_dax
);
826 ida_destroy(&dax_minor_ida
);
830 MODULE_AUTHOR("Intel Corporation");
831 MODULE_LICENSE("GPL v2");
832 subsys_initcall(dax_init
);
833 module_exit(dax_exit
);