]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dax/device.c
selftests: kvm: Mmap the entire vcpu mmap area
[mirror_ubuntu-jammy-kernel.git] / drivers / dax / device.c
CommitLineData
51cf784c
DW
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
89ec9f2c 3#include <linux/memremap.h>
ab68f262
DW
4#include <linux/pagemap.h>
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/pfn_t.h>
ba09c01d 8#include <linux/cdev.h>
ab68f262
DW
9#include <linux/slab.h>
10#include <linux/dax.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
ef842302 13#include <linux/mman.h>
efebc711 14#include "dax-private.h"
51cf784c 15#include "bus.h"
ab68f262 16
5f0694b3 17static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
dee41079
DW
18 const char *func)
19{
5f0694b3 20 struct device *dev = &dev_dax->dev;
dee41079
DW
21 unsigned long mask;
22
7b6be844 23 if (!dax_alive(dev_dax->dax_dev))
dee41079
DW
24 return -ENXIO;
25
4cb19355 26 /* prevent private mappings from being established */
325896ff 27 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
5a14e91d
JM
28 dev_info_ratelimited(dev,
29 "%s: %s: fail, attempted private mapping\n",
dee41079
DW
30 current->comm, func);
31 return -EINVAL;
32 }
33
33cf94d7 34 mask = dev_dax->align - 1;
dee41079 35 if (vma->vm_start & mask || vma->vm_end & mask) {
5a14e91d
JM
36 dev_info_ratelimited(dev,
37 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
dee41079
DW
38 current->comm, func, vma->vm_start, vma->vm_end,
39 mask);
40 return -EINVAL;
41 }
42
dee41079 43 if (!vma_is_dax(vma)) {
5a14e91d
JM
44 dev_info_ratelimited(dev,
45 "%s: %s: fail, vma is not DAX capable\n",
dee41079
DW
46 current->comm, func);
47 return -EINVAL;
48 }
49
50 return 0;
51}
52
efebc711 53/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
73616367 54__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
dee41079
DW
55 unsigned long size)
56{
60e93dc0
DW
57 int i;
58
59 for (i = 0; i < dev_dax->nr_range; i++) {
60 struct dev_dax_range *dax_range = &dev_dax->ranges[i];
61 struct range *range = &dax_range->range;
62 unsigned long long pgoff_end;
63 phys_addr_t phys;
64
65 pgoff_end = dax_range->pgoff + PHYS_PFN(range_len(range)) - 1;
66 if (pgoff < dax_range->pgoff || pgoff > pgoff_end)
67 continue;
68 phys = PFN_PHYS(pgoff - dax_range->pgoff) + range->start;
f5516ec5 69 if (phys + size - 1 <= range->end)
dee41079 70 return phys;
60e93dc0 71 break;
dee41079 72 }
dee41079
DW
73 return -1;
74}
75
226ab561 76static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
2232c638 77 struct vm_fault *vmf, pfn_t *pfn)
dee41079 78{
5f0694b3 79 struct device *dev = &dev_dax->dev;
dee41079 80 phys_addr_t phys;
0134ed4f 81 unsigned int fault_size = PAGE_SIZE;
dee41079 82
5f0694b3 83 if (check_vma(dev_dax, vmf->vma, __func__))
dee41079
DW
84 return VM_FAULT_SIGBUS;
85
33cf94d7 86 if (dev_dax->align > PAGE_SIZE) {
6daaca52 87 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
33cf94d7 88 dev_dax->align, fault_size);
dee41079
DW
89 return VM_FAULT_SIGBUS;
90 }
91
33cf94d7 92 if (fault_size != dev_dax->align)
0134ed4f
DJ
93 return VM_FAULT_SIGBUS;
94
73616367 95 phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
dee41079 96 if (phys == -1) {
6daaca52 97 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff);
dee41079
DW
98 return VM_FAULT_SIGBUS;
99 }
100
ec826909 101 *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
dee41079 102
2232c638 103 return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
dee41079
DW
104}
105
226ab561 106static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
2232c638 107 struct vm_fault *vmf, pfn_t *pfn)
dee41079 108{
d8a849e1 109 unsigned long pmd_addr = vmf->address & PMD_MASK;
5f0694b3 110 struct device *dev = &dev_dax->dev;
dee41079
DW
111 phys_addr_t phys;
112 pgoff_t pgoff;
0134ed4f 113 unsigned int fault_size = PMD_SIZE;
dee41079 114
5f0694b3 115 if (check_vma(dev_dax, vmf->vma, __func__))
dee41079
DW
116 return VM_FAULT_SIGBUS;
117
33cf94d7 118 if (dev_dax->align > PMD_SIZE) {
6daaca52 119 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
33cf94d7 120 dev_dax->align, fault_size);
dee41079
DW
121 return VM_FAULT_SIGBUS;
122 }
123
33cf94d7 124 if (fault_size < dev_dax->align)
0134ed4f 125 return VM_FAULT_SIGBUS;
33cf94d7 126 else if (fault_size > dev_dax->align)
0134ed4f
DJ
127 return VM_FAULT_FALLBACK;
128
129 /* if we are outside of the VMA */
130 if (pmd_addr < vmf->vma->vm_start ||
131 (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
132 return VM_FAULT_SIGBUS;
133
f4200391 134 pgoff = linear_page_index(vmf->vma, pmd_addr);
73616367 135 phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
dee41079 136 if (phys == -1) {
6daaca52 137 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
dee41079
DW
138 return VM_FAULT_SIGBUS;
139 }
140
ec826909 141 *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
dee41079 142
fce86ff5 143 return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
dee41079
DW
144}
145
9557feee 146#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
226ab561 147static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
2232c638 148 struct vm_fault *vmf, pfn_t *pfn)
9557feee
DJ
149{
150 unsigned long pud_addr = vmf->address & PUD_MASK;
5f0694b3 151 struct device *dev = &dev_dax->dev;
9557feee
DJ
152 phys_addr_t phys;
153 pgoff_t pgoff;
70b085b0
DJ
154 unsigned int fault_size = PUD_SIZE;
155
9557feee 156
5f0694b3 157 if (check_vma(dev_dax, vmf->vma, __func__))
9557feee
DJ
158 return VM_FAULT_SIGBUS;
159
33cf94d7 160 if (dev_dax->align > PUD_SIZE) {
6daaca52 161 dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n",
33cf94d7 162 dev_dax->align, fault_size);
9557feee
DJ
163 return VM_FAULT_SIGBUS;
164 }
165
33cf94d7 166 if (fault_size < dev_dax->align)
70b085b0 167 return VM_FAULT_SIGBUS;
33cf94d7 168 else if (fault_size > dev_dax->align)
70b085b0
DJ
169 return VM_FAULT_FALLBACK;
170
171 /* if we are outside of the VMA */
172 if (pud_addr < vmf->vma->vm_start ||
173 (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
174 return VM_FAULT_SIGBUS;
175
9557feee 176 pgoff = linear_page_index(vmf->vma, pud_addr);
73616367 177 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
9557feee 178 if (phys == -1) {
6daaca52 179 dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff);
9557feee
DJ
180 return VM_FAULT_SIGBUS;
181 }
182
ec826909 183 *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
9557feee 184
fce86ff5 185 return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
9557feee
DJ
186}
187#else
226ab561 188static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
2232c638 189 struct vm_fault *vmf, pfn_t *pfn)
9557feee
DJ
190{
191 return VM_FAULT_FALLBACK;
192}
193#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
194
226ab561 195static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
c791ace1 196 enum page_entry_size pe_size)
dee41079 197{
f4200391 198 struct file *filp = vmf->vma->vm_file;
2232c638 199 unsigned long fault_size;
36bdac1e
SJ
200 vm_fault_t rc = VM_FAULT_SIGBUS;
201 int id;
2232c638 202 pfn_t pfn;
5f0694b3 203 struct dev_dax *dev_dax = filp->private_data;
dee41079 204
6daaca52
DW
205 dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
206 (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read",
76202620 207 vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
dee41079 208
7b6be844 209 id = dax_read_lock();
c791ace1
DJ
210 switch (pe_size) {
211 case PE_SIZE_PTE:
2232c638
DW
212 fault_size = PAGE_SIZE;
213 rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
a2d58167 214 break;
c791ace1 215 case PE_SIZE_PMD:
2232c638
DW
216 fault_size = PMD_SIZE;
217 rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
9557feee 218 break;
c791ace1 219 case PE_SIZE_PUD:
2232c638
DW
220 fault_size = PUD_SIZE;
221 rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
a2d58167
DJ
222 break;
223 default:
54eafcc9 224 rc = VM_FAULT_SIGBUS;
a2d58167 225 }
2232c638
DW
226
227 if (rc == VM_FAULT_NOPAGE) {
228 unsigned long i;
35de2995 229 pgoff_t pgoff;
2232c638
DW
230
231 /*
232 * In the device-dax case the only possibility for a
233 * VM_FAULT_NOPAGE result is when device-dax capacity is
234 * mapped. No need to consider the zero page, or racing
235 * conflicting mappings.
236 */
35de2995
DW
237 pgoff = linear_page_index(vmf->vma, vmf->address
238 & ~(fault_size - 1));
2232c638
DW
239 for (i = 0; i < fault_size / PAGE_SIZE; i++) {
240 struct page *page;
241
242 page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
243 if (page->mapping)
244 continue;
245 page->mapping = filp->f_mapping;
35de2995 246 page->index = pgoff + i;
2232c638
DW
247 }
248 }
7b6be844 249 dax_read_unlock(id);
dee41079
DW
250
251 return rc;
252}
253
226ab561 254static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
c791ace1 255{
5f0694b3 256 return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
c791ace1
DJ
257}
258
dd3b614f 259static int dev_dax_may_split(struct vm_area_struct *vma, unsigned long addr)
9702cffd
DW
260{
261 struct file *filp = vma->vm_file;
262 struct dev_dax *dev_dax = filp->private_data;
9702cffd 263
33cf94d7 264 if (!IS_ALIGNED(addr, dev_dax->align))
9702cffd
DW
265 return -EINVAL;
266 return 0;
267}
268
c1d53b92
DW
269static unsigned long dev_dax_pagesize(struct vm_area_struct *vma)
270{
271 struct file *filp = vma->vm_file;
272 struct dev_dax *dev_dax = filp->private_data;
c1d53b92 273
33cf94d7 274 return dev_dax->align;
c1d53b92
DW
275}
276
5f0694b3
DW
277static const struct vm_operations_struct dax_vm_ops = {
278 .fault = dev_dax_fault,
279 .huge_fault = dev_dax_huge_fault,
dd3b614f 280 .may_split = dev_dax_may_split,
c1d53b92 281 .pagesize = dev_dax_pagesize,
dee41079
DW
282};
283
af69f51e 284static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
dee41079 285{
5f0694b3 286 struct dev_dax *dev_dax = filp->private_data;
7b6be844 287 int rc, id;
dee41079 288
6daaca52 289 dev_dbg(&dev_dax->dev, "trace\n");
dee41079 290
7b6be844
DW
291 /*
292 * We lock to check dax_dev liveness and will re-check at
293 * fault time.
294 */
295 id = dax_read_lock();
5f0694b3 296 rc = check_vma(dev_dax, vma, __func__);
7b6be844 297 dax_read_unlock(id);
dee41079
DW
298 if (rc)
299 return rc;
300
5f0694b3 301 vma->vm_ops = &dax_vm_ops;
e1fb4a08 302 vma->vm_flags |= VM_HUGEPAGE;
dee41079 303 return 0;
043a9255
DW
304}
305
306/* return an unmapped area aligned to the dax region specified alignment */
af69f51e 307static unsigned long dax_get_unmapped_area(struct file *filp,
043a9255
DW
308 unsigned long addr, unsigned long len, unsigned long pgoff,
309 unsigned long flags)
310{
311 unsigned long off, off_end, off_align, len_align, addr_align, align;
5f0694b3 312 struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
043a9255 313
5f0694b3 314 if (!dev_dax || addr)
043a9255
DW
315 goto out;
316
33cf94d7 317 align = dev_dax->align;
043a9255
DW
318 off = pgoff << PAGE_SHIFT;
319 off_end = off + len;
320 off_align = round_up(off, align);
321
322 if ((off_end <= off_align) || ((off_end - off_align) < align))
323 goto out;
324
325 len_align = len + align;
326 if ((off + len_align) < off)
327 goto out;
dee41079 328
043a9255
DW
329 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
330 pgoff, flags);
331 if (!IS_ERR_VALUE(addr_align)) {
332 addr_align += (off - addr_align) & (align - 1);
333 return addr_align;
334 }
335 out:
336 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
337}
338
41c9b1be
DJ
339static const struct address_space_operations dev_dax_aops = {
340 .set_page_dirty = noop_set_page_dirty,
341 .invalidatepage = noop_invalidatepage,
342};
343
af69f51e 344static int dax_open(struct inode *inode, struct file *filp)
043a9255 345{
7b6be844
DW
346 struct dax_device *dax_dev = inode_dax(inode);
347 struct inode *__dax_inode = dax_inode(dax_dev);
348 struct dev_dax *dev_dax = dax_get_private(dax_dev);
043a9255 349
6daaca52 350 dev_dbg(&dev_dax->dev, "trace\n");
7b6be844
DW
351 inode->i_mapping = __dax_inode->i_mapping;
352 inode->i_mapping->host = __dax_inode;
41c9b1be 353 inode->i_mapping->a_ops = &dev_dax_aops;
3bc52c45 354 filp->f_mapping = inode->i_mapping;
5660e13d 355 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
735e4ae5 356 filp->f_sb_err = file_sample_sb_err(filp);
5f0694b3 357 filp->private_data = dev_dax;
ebd84d72 358 inode->i_flags = S_DAX;
043a9255 359
043a9255
DW
360 return 0;
361}
dee41079 362
af69f51e 363static int dax_release(struct inode *inode, struct file *filp)
043a9255 364{
5f0694b3 365 struct dev_dax *dev_dax = filp->private_data;
043a9255 366
6daaca52 367 dev_dbg(&dev_dax->dev, "trace\n");
043a9255 368 return 0;
dee41079
DW
369}
370
ab68f262
DW
371static const struct file_operations dax_fops = {
372 .llseek = noop_llseek,
373 .owner = THIS_MODULE,
af69f51e
DW
374 .open = dax_open,
375 .release = dax_release,
376 .get_unmapped_area = dax_get_unmapped_area,
377 .mmap = dax_mmap,
ef842302 378 .mmap_supported_flags = MAP_SYNC,
ab68f262
DW
379};
380
9567da0b 381static void dev_dax_cdev_del(void *cdev)
043a9255 382{
9567da0b
DW
383 cdev_del(cdev);
384}
043a9255 385
9567da0b
DW
386static void dev_dax_kill(void *dev_dax)
387{
388 kill_dev_dax(dev_dax);
ebd84d72
DW
389}
390
f11cf813 391int dev_dax_probe(struct dev_dax *dev_dax)
043a9255 392{
9567da0b 393 struct dax_device *dax_dev = dev_dax->dax_dev;
f11cf813 394 struct device *dev = &dev_dax->dev;
f5516ec5 395 struct dev_pagemap *pgmap;
7b6be844 396 struct inode *inode;
ba09c01d 397 struct cdev *cdev;
89ec9f2c 398 void *addr;
60e93dc0 399 int rc, i;
89ec9f2c 400
f5516ec5 401 pgmap = dev_dax->pgmap;
60e93dc0
DW
402 if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1,
403 "static pgmap / multi-range device conflict\n"))
404 return -EINVAL;
405
f5516ec5 406 if (!pgmap) {
60e93dc0
DW
407 pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range)
408 * (dev_dax->nr_range - 1), GFP_KERNEL);
f5516ec5
DW
409 if (!pgmap)
410 return -ENOMEM;
60e93dc0
DW
411 pgmap->nr_range = dev_dax->nr_range;
412 }
413
414 for (i = 0; i < dev_dax->nr_range; i++) {
415 struct range *range = &dev_dax->ranges[i].range;
416
417 if (!devm_request_mem_region(dev, range->start,
418 range_len(range), dev_name(dev))) {
419 dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve range\n",
420 i, range->start, range->end);
421 return -EBUSY;
422 }
423 /* don't update the range for static pgmap */
424 if (!dev_dax->pgmap)
425 pgmap->ranges[i] = *range;
f5516ec5 426 }
60e93dc0 427
f5516ec5
DW
428 pgmap->type = MEMORY_DEVICE_GENERIC;
429 addr = devm_memremap_pages(dev, pgmap);
50f44ee7 430 if (IS_ERR(addr))
89ec9f2c 431 return PTR_ERR(addr);
89ec9f2c 432
7b6be844
DW
433 inode = dax_inode(dax_dev);
434 cdev = inode->i_cdev;
ba09c01d 435 cdev_init(cdev, &dax_fops);
730926c3
DW
436 if (dev->class) {
437 /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
438 cdev->owner = dev->parent->driver->owner;
439 } else
440 cdev->owner = dev->driver->owner;
9567da0b
DW
441 cdev_set_parent(cdev, &dev->kobj);
442 rc = cdev_add(cdev, dev->devt, 1);
d76911ee 443 if (rc)
9567da0b 444 return rc;
d76911ee 445
9567da0b
DW
446 rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
447 if (rc)
448 return rc;
043a9255 449
9567da0b
DW
450 run_dax(dax_dev);
451 return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
452}
730926c3 453EXPORT_SYMBOL_GPL(dev_dax_probe);
043a9255 454
f11cf813 455static int dev_dax_remove(struct dev_dax *dev_dax)
9567da0b
DW
456{
457 /* all probe actions are unwound by devm */
458 return 0;
043a9255 459}
9567da0b 460
d200781e 461static struct dax_device_driver device_dax_driver = {
f11cf813
DW
462 .probe = dev_dax_probe,
463 .remove = dev_dax_remove,
d200781e 464 .match_always = 1,
9567da0b 465};
043a9255 466
ab68f262
DW
467static int __init dax_init(void)
468{
9567da0b 469 return dax_driver_register(&device_dax_driver);
ab68f262
DW
470}
471
472static void __exit dax_exit(void)
473{
d200781e 474 dax_driver_unregister(&device_dax_driver);
ab68f262
DW
475}
476
477MODULE_AUTHOR("Intel Corporation");
478MODULE_LICENSE("GPL v2");
9567da0b 479module_init(dax_init);
ab68f262 480module_exit(dax_exit);
9567da0b 481MODULE_ALIAS_DAX_DEVICE(0);