]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/dax/dax.c
Merge tag 'platform-drivers-x86-v4.10-2' of git://git.infradead.org/users/dvhart...
[mirror_ubuntu-zesty-kernel.git] / drivers / dax / dax.c
1 /*
2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/mount.h>
17 #include <linux/pfn_t.h>
18 #include <linux/hash.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/dax.h>
22 #include <linux/fs.h>
23 #include <linux/mm.h>
24 #include "dax.h"
25
26 static dev_t dax_devt;
27 static struct class *dax_class;
28 static DEFINE_IDA(dax_minor_ida);
29 static int nr_dax = CONFIG_NR_DEV_DAX;
30 module_param(nr_dax, int, S_IRUGO);
31 static struct vfsmount *dax_mnt;
32 static struct kmem_cache *dax_cache __read_mostly;
33 static struct super_block *dax_superblock __read_mostly;
34 MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
35
36 /**
37 * struct dax_region - mapping infrastructure for dax devices
38 * @id: kernel-wide unique region for a memory range
39 * @base: linear address corresponding to @res
40 * @kref: to pin while other agents have a need to do lookups
41 * @dev: parent device backing this region
42 * @align: allocation and mapping alignment for child dax devices
43 * @res: physical address range of the region
44 * @pfn_flags: identify whether the pfns are paged back or not
45 */
46 struct dax_region {
47 int id;
48 struct ida ida;
49 void *base;
50 struct kref kref;
51 struct device *dev;
52 unsigned int align;
53 struct resource res;
54 unsigned long pfn_flags;
55 };
56
57 /**
58 * struct dax_dev - subdivision of a dax region
59 * @region - parent region
60 * @dev - device backing the character device
61 * @cdev - core chardev data
62 * @alive - !alive + rcu grace period == no new mappings can be established
63 * @id - child id in the region
64 * @num_resources - number of physical address extents in this device
65 * @res - array of physical address ranges
66 */
67 struct dax_dev {
68 struct dax_region *region;
69 struct inode *inode;
70 struct device dev;
71 struct cdev cdev;
72 bool alive;
73 int id;
74 int num_resources;
75 struct resource res[0];
76 };
77
78 static struct inode *dax_alloc_inode(struct super_block *sb)
79 {
80 return kmem_cache_alloc(dax_cache, GFP_KERNEL);
81 }
82
83 static void dax_i_callback(struct rcu_head *head)
84 {
85 struct inode *inode = container_of(head, struct inode, i_rcu);
86
87 kmem_cache_free(dax_cache, inode);
88 }
89
90 static void dax_destroy_inode(struct inode *inode)
91 {
92 call_rcu(&inode->i_rcu, dax_i_callback);
93 }
94
95 static const struct super_operations dax_sops = {
96 .statfs = simple_statfs,
97 .alloc_inode = dax_alloc_inode,
98 .destroy_inode = dax_destroy_inode,
99 .drop_inode = generic_delete_inode,
100 };
101
102 static struct dentry *dax_mount(struct file_system_type *fs_type,
103 int flags, const char *dev_name, void *data)
104 {
105 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
106 }
107
108 static struct file_system_type dax_type = {
109 .name = "dax",
110 .mount = dax_mount,
111 .kill_sb = kill_anon_super,
112 };
113
114 static int dax_test(struct inode *inode, void *data)
115 {
116 return inode->i_cdev == data;
117 }
118
119 static int dax_set(struct inode *inode, void *data)
120 {
121 inode->i_cdev = data;
122 return 0;
123 }
124
125 static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
126 {
127 struct inode *inode;
128
129 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
130 dax_test, dax_set, cdev);
131
132 if (!inode)
133 return NULL;
134
135 if (inode->i_state & I_NEW) {
136 inode->i_mode = S_IFCHR;
137 inode->i_flags = S_DAX;
138 inode->i_rdev = devt;
139 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
140 unlock_new_inode(inode);
141 }
142 return inode;
143 }
144
145 static void init_once(void *inode)
146 {
147 inode_init_once(inode);
148 }
149
150 static int dax_inode_init(void)
151 {
152 int rc;
153
154 dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
155 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
156 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
157 init_once);
158 if (!dax_cache)
159 return -ENOMEM;
160
161 rc = register_filesystem(&dax_type);
162 if (rc)
163 goto err_register_fs;
164
165 dax_mnt = kern_mount(&dax_type);
166 if (IS_ERR(dax_mnt)) {
167 rc = PTR_ERR(dax_mnt);
168 goto err_mount;
169 }
170 dax_superblock = dax_mnt->mnt_sb;
171
172 return 0;
173
174 err_mount:
175 unregister_filesystem(&dax_type);
176 err_register_fs:
177 kmem_cache_destroy(dax_cache);
178
179 return rc;
180 }
181
182 static void dax_inode_exit(void)
183 {
184 kern_unmount(dax_mnt);
185 unregister_filesystem(&dax_type);
186 kmem_cache_destroy(dax_cache);
187 }
188
189 static void dax_region_free(struct kref *kref)
190 {
191 struct dax_region *dax_region;
192
193 dax_region = container_of(kref, struct dax_region, kref);
194 kfree(dax_region);
195 }
196
197 void dax_region_put(struct dax_region *dax_region)
198 {
199 kref_put(&dax_region->kref, dax_region_free);
200 }
201 EXPORT_SYMBOL_GPL(dax_region_put);
202
203 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
204 struct resource *res, unsigned int align, void *addr,
205 unsigned long pfn_flags)
206 {
207 struct dax_region *dax_region;
208
209 if (!IS_ALIGNED(res->start, align)
210 || !IS_ALIGNED(resource_size(res), align))
211 return NULL;
212
213 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
214 if (!dax_region)
215 return NULL;
216
217 memcpy(&dax_region->res, res, sizeof(*res));
218 dax_region->pfn_flags = pfn_flags;
219 kref_init(&dax_region->kref);
220 dax_region->id = region_id;
221 ida_init(&dax_region->ida);
222 dax_region->align = align;
223 dax_region->dev = parent;
224 dax_region->base = addr;
225
226 return dax_region;
227 }
228 EXPORT_SYMBOL_GPL(alloc_dax_region);
229
230 static struct dax_dev *to_dax_dev(struct device *dev)
231 {
232 return container_of(dev, struct dax_dev, dev);
233 }
234
235 static ssize_t size_show(struct device *dev,
236 struct device_attribute *attr, char *buf)
237 {
238 struct dax_dev *dax_dev = to_dax_dev(dev);
239 unsigned long long size = 0;
240 int i;
241
242 for (i = 0; i < dax_dev->num_resources; i++)
243 size += resource_size(&dax_dev->res[i]);
244
245 return sprintf(buf, "%llu\n", size);
246 }
247 static DEVICE_ATTR_RO(size);
248
249 static struct attribute *dax_device_attributes[] = {
250 &dev_attr_size.attr,
251 NULL,
252 };
253
254 static const struct attribute_group dax_device_attribute_group = {
255 .attrs = dax_device_attributes,
256 };
257
258 static const struct attribute_group *dax_attribute_groups[] = {
259 &dax_device_attribute_group,
260 NULL,
261 };
262
263 static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
264 const char *func)
265 {
266 struct dax_region *dax_region = dax_dev->region;
267 struct device *dev = &dax_dev->dev;
268 unsigned long mask;
269
270 if (!dax_dev->alive)
271 return -ENXIO;
272
273 /* prevent private mappings from being established */
274 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
275 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
276 current->comm, func);
277 return -EINVAL;
278 }
279
280 mask = dax_region->align - 1;
281 if (vma->vm_start & mask || vma->vm_end & mask) {
282 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
283 current->comm, func, vma->vm_start, vma->vm_end,
284 mask);
285 return -EINVAL;
286 }
287
288 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
289 && (vma->vm_flags & VM_DONTCOPY) == 0) {
290 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
291 current->comm, func);
292 return -EINVAL;
293 }
294
295 if (!vma_is_dax(vma)) {
296 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
297 current->comm, func);
298 return -EINVAL;
299 }
300
301 return 0;
302 }
303
304 static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
305 unsigned long size)
306 {
307 struct resource *res;
308 phys_addr_t phys;
309 int i;
310
311 for (i = 0; i < dax_dev->num_resources; i++) {
312 res = &dax_dev->res[i];
313 phys = pgoff * PAGE_SIZE + res->start;
314 if (phys >= res->start && phys <= res->end)
315 break;
316 pgoff -= PHYS_PFN(resource_size(res));
317 }
318
319 if (i < dax_dev->num_resources) {
320 res = &dax_dev->res[i];
321 if (phys + size - 1 <= res->end)
322 return phys;
323 }
324
325 return -1;
326 }
327
328 static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
329 struct vm_fault *vmf)
330 {
331 struct device *dev = &dax_dev->dev;
332 struct dax_region *dax_region;
333 int rc = VM_FAULT_SIGBUS;
334 phys_addr_t phys;
335 pfn_t pfn;
336
337 if (check_vma(dax_dev, vma, __func__))
338 return VM_FAULT_SIGBUS;
339
340 dax_region = dax_dev->region;
341 if (dax_region->align > PAGE_SIZE) {
342 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
343 return VM_FAULT_SIGBUS;
344 }
345
346 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
347 if (phys == -1) {
348 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
349 vmf->pgoff);
350 return VM_FAULT_SIGBUS;
351 }
352
353 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
354
355 rc = vm_insert_mixed(vma, vmf->address, pfn);
356
357 if (rc == -ENOMEM)
358 return VM_FAULT_OOM;
359 if (rc < 0 && rc != -EBUSY)
360 return VM_FAULT_SIGBUS;
361
362 return VM_FAULT_NOPAGE;
363 }
364
365 static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
366 {
367 int rc;
368 struct file *filp = vma->vm_file;
369 struct dax_dev *dax_dev = filp->private_data;
370
371 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
372 current->comm, (vmf->flags & FAULT_FLAG_WRITE)
373 ? "write" : "read", vma->vm_start, vma->vm_end);
374 rcu_read_lock();
375 rc = __dax_dev_fault(dax_dev, vma, vmf);
376 rcu_read_unlock();
377
378 return rc;
379 }
380
381 static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
382 struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
383 unsigned int flags)
384 {
385 unsigned long pmd_addr = addr & PMD_MASK;
386 struct device *dev = &dax_dev->dev;
387 struct dax_region *dax_region;
388 phys_addr_t phys;
389 pgoff_t pgoff;
390 pfn_t pfn;
391
392 if (check_vma(dax_dev, vma, __func__))
393 return VM_FAULT_SIGBUS;
394
395 dax_region = dax_dev->region;
396 if (dax_region->align > PMD_SIZE) {
397 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
398 return VM_FAULT_SIGBUS;
399 }
400
401 /* dax pmd mappings require pfn_t_devmap() */
402 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
403 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
404 return VM_FAULT_SIGBUS;
405 }
406
407 pgoff = linear_page_index(vma, pmd_addr);
408 phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
409 if (phys == -1) {
410 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
411 pgoff);
412 return VM_FAULT_SIGBUS;
413 }
414
415 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
416
417 return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
418 flags & FAULT_FLAG_WRITE);
419 }
420
421 static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
422 pmd_t *pmd, unsigned int flags)
423 {
424 int rc;
425 struct file *filp = vma->vm_file;
426 struct dax_dev *dax_dev = filp->private_data;
427
428 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
429 current->comm, (flags & FAULT_FLAG_WRITE)
430 ? "write" : "read", vma->vm_start, vma->vm_end);
431
432 rcu_read_lock();
433 rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
434 rcu_read_unlock();
435
436 return rc;
437 }
438
439 static const struct vm_operations_struct dax_dev_vm_ops = {
440 .fault = dax_dev_fault,
441 .pmd_fault = dax_dev_pmd_fault,
442 };
443
444 static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
445 {
446 struct dax_dev *dax_dev = filp->private_data;
447 int rc;
448
449 dev_dbg(&dax_dev->dev, "%s\n", __func__);
450
451 rc = check_vma(dax_dev, vma, __func__);
452 if (rc)
453 return rc;
454
455 vma->vm_ops = &dax_dev_vm_ops;
456 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
457 return 0;
458 }
459
460 /* return an unmapped area aligned to the dax region specified alignment */
461 static unsigned long dax_get_unmapped_area(struct file *filp,
462 unsigned long addr, unsigned long len, unsigned long pgoff,
463 unsigned long flags)
464 {
465 unsigned long off, off_end, off_align, len_align, addr_align, align;
466 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
467 struct dax_region *dax_region;
468
469 if (!dax_dev || addr)
470 goto out;
471
472 dax_region = dax_dev->region;
473 align = dax_region->align;
474 off = pgoff << PAGE_SHIFT;
475 off_end = off + len;
476 off_align = round_up(off, align);
477
478 if ((off_end <= off_align) || ((off_end - off_align) < align))
479 goto out;
480
481 len_align = len + align;
482 if ((off + len_align) < off)
483 goto out;
484
485 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
486 pgoff, flags);
487 if (!IS_ERR_VALUE(addr_align)) {
488 addr_align += (off - addr_align) & (align - 1);
489 return addr_align;
490 }
491 out:
492 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
493 }
494
495 static int dax_open(struct inode *inode, struct file *filp)
496 {
497 struct dax_dev *dax_dev;
498
499 dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
500 dev_dbg(&dax_dev->dev, "%s\n", __func__);
501 inode->i_mapping = dax_dev->inode->i_mapping;
502 inode->i_mapping->host = dax_dev->inode;
503 filp->f_mapping = inode->i_mapping;
504 filp->private_data = dax_dev;
505 inode->i_flags = S_DAX;
506
507 return 0;
508 }
509
510 static int dax_release(struct inode *inode, struct file *filp)
511 {
512 struct dax_dev *dax_dev = filp->private_data;
513
514 dev_dbg(&dax_dev->dev, "%s\n", __func__);
515 return 0;
516 }
517
518 static const struct file_operations dax_fops = {
519 .llseek = noop_llseek,
520 .owner = THIS_MODULE,
521 .open = dax_open,
522 .release = dax_release,
523 .get_unmapped_area = dax_get_unmapped_area,
524 .mmap = dax_mmap,
525 };
526
527 static void dax_dev_release(struct device *dev)
528 {
529 struct dax_dev *dax_dev = to_dax_dev(dev);
530 struct dax_region *dax_region = dax_dev->region;
531
532 ida_simple_remove(&dax_region->ida, dax_dev->id);
533 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
534 dax_region_put(dax_region);
535 iput(dax_dev->inode);
536 kfree(dax_dev);
537 }
538
539 static void unregister_dax_dev(void *dev)
540 {
541 struct dax_dev *dax_dev = to_dax_dev(dev);
542 struct cdev *cdev = &dax_dev->cdev;
543
544 dev_dbg(dev, "%s\n", __func__);
545
546 /*
547 * Note, rcu is not protecting the liveness of dax_dev, rcu is
548 * ensuring that any fault handlers that might have seen
549 * dax_dev->alive == true, have completed. Any fault handlers
550 * that start after synchronize_rcu() has started will abort
551 * upon seeing dax_dev->alive == false.
552 */
553 dax_dev->alive = false;
554 synchronize_rcu();
555 unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
556 cdev_del(cdev);
557 device_unregister(dev);
558 }
559
560 struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
561 struct resource *res, int count)
562 {
563 struct device *parent = dax_region->dev;
564 struct dax_dev *dax_dev;
565 int rc = 0, minor, i;
566 struct device *dev;
567 struct cdev *cdev;
568 dev_t dev_t;
569
570 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
571 if (!dax_dev)
572 return ERR_PTR(-ENOMEM);
573
574 for (i = 0; i < count; i++) {
575 if (!IS_ALIGNED(res[i].start, dax_region->align)
576 || !IS_ALIGNED(resource_size(&res[i]),
577 dax_region->align)) {
578 rc = -EINVAL;
579 break;
580 }
581 dax_dev->res[i].start = res[i].start;
582 dax_dev->res[i].end = res[i].end;
583 }
584
585 if (i < count)
586 goto err_id;
587
588 dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
589 if (dax_dev->id < 0) {
590 rc = dax_dev->id;
591 goto err_id;
592 }
593
594 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
595 if (minor < 0) {
596 rc = minor;
597 goto err_minor;
598 }
599
600 dev_t = MKDEV(MAJOR(dax_devt), minor);
601 dev = &dax_dev->dev;
602 dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
603 if (!dax_dev->inode) {
604 rc = -ENOMEM;
605 goto err_inode;
606 }
607
608 /* device_initialize() so cdev can reference kobj parent */
609 device_initialize(dev);
610
611 cdev = &dax_dev->cdev;
612 cdev_init(cdev, &dax_fops);
613 cdev->owner = parent->driver->owner;
614 cdev->kobj.parent = &dev->kobj;
615 rc = cdev_add(&dax_dev->cdev, dev_t, 1);
616 if (rc)
617 goto err_cdev;
618
619 /* from here on we're committed to teardown via dax_dev_release() */
620 dax_dev->num_resources = count;
621 dax_dev->alive = true;
622 dax_dev->region = dax_region;
623 kref_get(&dax_region->kref);
624
625 dev->devt = dev_t;
626 dev->class = dax_class;
627 dev->parent = parent;
628 dev->groups = dax_attribute_groups;
629 dev->release = dax_dev_release;
630 dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
631 rc = device_add(dev);
632 if (rc) {
633 put_device(dev);
634 return ERR_PTR(rc);
635 }
636
637 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
638 if (rc)
639 return ERR_PTR(rc);
640
641 return dax_dev;
642
643 err_cdev:
644 iput(dax_dev->inode);
645 err_inode:
646 ida_simple_remove(&dax_minor_ida, minor);
647 err_minor:
648 ida_simple_remove(&dax_region->ida, dax_dev->id);
649 err_id:
650 kfree(dax_dev);
651
652 return ERR_PTR(rc);
653 }
654 EXPORT_SYMBOL_GPL(devm_create_dax_dev);
655
656 static int __init dax_init(void)
657 {
658 int rc;
659
660 rc = dax_inode_init();
661 if (rc)
662 return rc;
663
664 nr_dax = max(nr_dax, 256);
665 rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
666 if (rc)
667 goto err_chrdev;
668
669 dax_class = class_create(THIS_MODULE, "dax");
670 if (IS_ERR(dax_class)) {
671 rc = PTR_ERR(dax_class);
672 goto err_class;
673 }
674
675 return 0;
676
677 err_class:
678 unregister_chrdev_region(dax_devt, nr_dax);
679 err_chrdev:
680 dax_inode_exit();
681 return rc;
682 }
683
684 static void __exit dax_exit(void)
685 {
686 class_destroy(dax_class);
687 unregister_chrdev_region(dax_devt, nr_dax);
688 ida_destroy(&dax_minor_ida);
689 dax_inode_exit();
690 }
691
692 MODULE_AUTHOR("Intel Corporation");
693 MODULE_LICENSE("GPL v2");
694 subsys_initcall(dax_init);
695 module_exit(dax_exit);