]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/dax.c
clocksource/drivers/arm_arch_timer: Avoid infinite recursion when ftrace is enabled
[mirror_ubuntu-zesty-kernel.git] / fs / dax.c
1 /*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 /* We choose 4096 entries - same as per-zone page wait tables */
39 #define DAX_WAIT_TABLE_BITS 12
40 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
41
42 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
43
44 static int __init init_dax_wait_table(void)
45 {
46 int i;
47
48 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
49 init_waitqueue_head(wait_table + i);
50 return 0;
51 }
52 fs_initcall(init_dax_wait_table);
53
54 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
55 {
56 struct request_queue *q = bdev->bd_queue;
57 long rc = -EIO;
58
59 dax->addr = ERR_PTR(-EIO);
60 if (blk_queue_enter(q, true) != 0)
61 return rc;
62
63 rc = bdev_direct_access(bdev, dax);
64 if (rc < 0) {
65 dax->addr = ERR_PTR(rc);
66 blk_queue_exit(q);
67 return rc;
68 }
69 return rc;
70 }
71
72 static void dax_unmap_atomic(struct block_device *bdev,
73 const struct blk_dax_ctl *dax)
74 {
75 if (IS_ERR(dax->addr))
76 return;
77 blk_queue_exit(bdev->bd_queue);
78 }
79
80 static int dax_is_pmd_entry(void *entry)
81 {
82 return (unsigned long)entry & RADIX_DAX_PMD;
83 }
84
85 static int dax_is_pte_entry(void *entry)
86 {
87 return !((unsigned long)entry & RADIX_DAX_PMD);
88 }
89
90 static int dax_is_zero_entry(void *entry)
91 {
92 return (unsigned long)entry & RADIX_DAX_HZP;
93 }
94
95 static int dax_is_empty_entry(void *entry)
96 {
97 return (unsigned long)entry & RADIX_DAX_EMPTY;
98 }
99
100 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
101 {
102 struct page *page = alloc_pages(GFP_KERNEL, 0);
103 struct blk_dax_ctl dax = {
104 .size = PAGE_SIZE,
105 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
106 };
107 long rc;
108
109 if (!page)
110 return ERR_PTR(-ENOMEM);
111
112 rc = dax_map_atomic(bdev, &dax);
113 if (rc < 0)
114 return ERR_PTR(rc);
115 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
116 dax_unmap_atomic(bdev, &dax);
117 return page;
118 }
119
120 /*
121 * DAX radix tree locking
122 */
123 struct exceptional_entry_key {
124 struct address_space *mapping;
125 pgoff_t entry_start;
126 };
127
128 struct wait_exceptional_entry_queue {
129 wait_queue_t wait;
130 struct exceptional_entry_key key;
131 };
132
133 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
134 pgoff_t index, void *entry, struct exceptional_entry_key *key)
135 {
136 unsigned long hash;
137
138 /*
139 * If 'entry' is a PMD, align the 'index' that we use for the wait
140 * queue to the start of that PMD. This ensures that all offsets in
141 * the range covered by the PMD map to the same bit lock.
142 */
143 if (dax_is_pmd_entry(entry))
144 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
145
146 key->mapping = mapping;
147 key->entry_start = index;
148
149 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
150 return wait_table + hash;
151 }
152
153 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
154 int sync, void *keyp)
155 {
156 struct exceptional_entry_key *key = keyp;
157 struct wait_exceptional_entry_queue *ewait =
158 container_of(wait, struct wait_exceptional_entry_queue, wait);
159
160 if (key->mapping != ewait->key.mapping ||
161 key->entry_start != ewait->key.entry_start)
162 return 0;
163 return autoremove_wake_function(wait, mode, sync, NULL);
164 }
165
166 /*
167 * Check whether the given slot is locked. The function must be called with
168 * mapping->tree_lock held
169 */
170 static inline int slot_locked(struct address_space *mapping, void **slot)
171 {
172 unsigned long entry = (unsigned long)
173 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
174 return entry & RADIX_DAX_ENTRY_LOCK;
175 }
176
177 /*
178 * Mark the given slot is locked. The function must be called with
179 * mapping->tree_lock held
180 */
181 static inline void *lock_slot(struct address_space *mapping, void **slot)
182 {
183 unsigned long entry = (unsigned long)
184 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
185
186 entry |= RADIX_DAX_ENTRY_LOCK;
187 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
188 return (void *)entry;
189 }
190
191 /*
192 * Mark the given slot is unlocked. The function must be called with
193 * mapping->tree_lock held
194 */
195 static inline void *unlock_slot(struct address_space *mapping, void **slot)
196 {
197 unsigned long entry = (unsigned long)
198 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
199
200 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
201 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
202 return (void *)entry;
203 }
204
205 /*
206 * Lookup entry in radix tree, wait for it to become unlocked if it is
207 * exceptional entry and return it. The caller must call
208 * put_unlocked_mapping_entry() when he decided not to lock the entry or
209 * put_locked_mapping_entry() when he locked the entry and now wants to
210 * unlock it.
211 *
212 * The function must be called with mapping->tree_lock held.
213 */
214 static void *get_unlocked_mapping_entry(struct address_space *mapping,
215 pgoff_t index, void ***slotp)
216 {
217 void *entry, **slot;
218 struct wait_exceptional_entry_queue ewait;
219 wait_queue_head_t *wq;
220
221 init_wait(&ewait.wait);
222 ewait.wait.func = wake_exceptional_entry_func;
223
224 for (;;) {
225 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
226 &slot);
227 if (!entry || !radix_tree_exceptional_entry(entry) ||
228 !slot_locked(mapping, slot)) {
229 if (slotp)
230 *slotp = slot;
231 return entry;
232 }
233
234 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
235 prepare_to_wait_exclusive(wq, &ewait.wait,
236 TASK_UNINTERRUPTIBLE);
237 spin_unlock_irq(&mapping->tree_lock);
238 schedule();
239 finish_wait(wq, &ewait.wait);
240 spin_lock_irq(&mapping->tree_lock);
241 }
242 }
243
244 static void dax_unlock_mapping_entry(struct address_space *mapping,
245 pgoff_t index)
246 {
247 void *entry, **slot;
248
249 spin_lock_irq(&mapping->tree_lock);
250 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
251 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
252 !slot_locked(mapping, slot))) {
253 spin_unlock_irq(&mapping->tree_lock);
254 return;
255 }
256 unlock_slot(mapping, slot);
257 spin_unlock_irq(&mapping->tree_lock);
258 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
259 }
260
261 static void put_locked_mapping_entry(struct address_space *mapping,
262 pgoff_t index, void *entry)
263 {
264 if (!radix_tree_exceptional_entry(entry)) {
265 unlock_page(entry);
266 put_page(entry);
267 } else {
268 dax_unlock_mapping_entry(mapping, index);
269 }
270 }
271
272 /*
273 * Called when we are done with radix tree entry we looked up via
274 * get_unlocked_mapping_entry() and which we didn't lock in the end.
275 */
276 static void put_unlocked_mapping_entry(struct address_space *mapping,
277 pgoff_t index, void *entry)
278 {
279 if (!radix_tree_exceptional_entry(entry))
280 return;
281
282 /* We have to wake up next waiter for the radix tree entry lock */
283 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
284 }
285
286 /*
287 * Find radix tree entry at given index. If it points to a page, return with
288 * the page locked. If it points to the exceptional entry, return with the
289 * radix tree entry locked. If the radix tree doesn't contain given index,
290 * create empty exceptional entry for the index and return with it locked.
291 *
292 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
293 * either return that locked entry or will return an error. This error will
294 * happen if there are any 4k entries (either zero pages or DAX entries)
295 * within the 2MiB range that we are requesting.
296 *
297 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
298 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
299 * insertion will fail if it finds any 4k entries already in the tree, and a
300 * 4k insertion will cause an existing 2MiB entry to be unmapped and
301 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
302 * well as 2MiB empty entries.
303 *
304 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
305 * real storage backing them. We will leave these real 2MiB DAX entries in
306 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
307 *
308 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
309 * persistent memory the benefit is doubtful. We can add that later if we can
310 * show it helps.
311 */
312 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
313 unsigned long size_flag)
314 {
315 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
316 void *entry, **slot;
317
318 restart:
319 spin_lock_irq(&mapping->tree_lock);
320 entry = get_unlocked_mapping_entry(mapping, index, &slot);
321
322 if (entry) {
323 if (size_flag & RADIX_DAX_PMD) {
324 if (!radix_tree_exceptional_entry(entry) ||
325 dax_is_pte_entry(entry)) {
326 put_unlocked_mapping_entry(mapping, index,
327 entry);
328 entry = ERR_PTR(-EEXIST);
329 goto out_unlock;
330 }
331 } else { /* trying to grab a PTE entry */
332 if (radix_tree_exceptional_entry(entry) &&
333 dax_is_pmd_entry(entry) &&
334 (dax_is_zero_entry(entry) ||
335 dax_is_empty_entry(entry))) {
336 pmd_downgrade = true;
337 }
338 }
339 }
340
341 /* No entry for given index? Make sure radix tree is big enough. */
342 if (!entry || pmd_downgrade) {
343 int err;
344
345 if (pmd_downgrade) {
346 /*
347 * Make sure 'entry' remains valid while we drop
348 * mapping->tree_lock.
349 */
350 entry = lock_slot(mapping, slot);
351 }
352
353 spin_unlock_irq(&mapping->tree_lock);
354 /*
355 * Besides huge zero pages the only other thing that gets
356 * downgraded are empty entries which don't need to be
357 * unmapped.
358 */
359 if (pmd_downgrade && dax_is_zero_entry(entry))
360 unmap_mapping_range(mapping,
361 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
362
363 err = radix_tree_preload(
364 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
365 if (err) {
366 if (pmd_downgrade)
367 put_locked_mapping_entry(mapping, index, entry);
368 return ERR_PTR(err);
369 }
370 spin_lock_irq(&mapping->tree_lock);
371
372 if (!entry) {
373 /*
374 * We needed to drop the page_tree lock while calling
375 * radix_tree_preload() and we didn't have an entry to
376 * lock. See if another thread inserted an entry at
377 * our index during this time.
378 */
379 entry = __radix_tree_lookup(&mapping->page_tree, index,
380 NULL, &slot);
381 if (entry) {
382 radix_tree_preload_end();
383 spin_unlock_irq(&mapping->tree_lock);
384 goto restart;
385 }
386 }
387
388 if (pmd_downgrade) {
389 radix_tree_delete(&mapping->page_tree, index);
390 mapping->nrexceptional--;
391 dax_wake_mapping_entry_waiter(mapping, index, entry,
392 true);
393 }
394
395 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
396
397 err = __radix_tree_insert(&mapping->page_tree, index,
398 dax_radix_order(entry), entry);
399 radix_tree_preload_end();
400 if (err) {
401 spin_unlock_irq(&mapping->tree_lock);
402 /*
403 * Our insertion of a DAX entry failed, most likely
404 * because we were inserting a PMD entry and it
405 * collided with a PTE sized entry at a different
406 * index in the PMD range. We haven't inserted
407 * anything into the radix tree and have no waiters to
408 * wake.
409 */
410 return ERR_PTR(err);
411 }
412 /* Good, we have inserted empty locked entry into the tree. */
413 mapping->nrexceptional++;
414 spin_unlock_irq(&mapping->tree_lock);
415 return entry;
416 }
417 /* Normal page in radix tree? */
418 if (!radix_tree_exceptional_entry(entry)) {
419 struct page *page = entry;
420
421 get_page(page);
422 spin_unlock_irq(&mapping->tree_lock);
423 lock_page(page);
424 /* Page got truncated? Retry... */
425 if (unlikely(page->mapping != mapping)) {
426 unlock_page(page);
427 put_page(page);
428 goto restart;
429 }
430 return page;
431 }
432 entry = lock_slot(mapping, slot);
433 out_unlock:
434 spin_unlock_irq(&mapping->tree_lock);
435 return entry;
436 }
437
438 /*
439 * We do not necessarily hold the mapping->tree_lock when we call this
440 * function so it is possible that 'entry' is no longer a valid item in the
441 * radix tree. This is okay because all we really need to do is to find the
442 * correct waitqueue where tasks might be waiting for that old 'entry' and
443 * wake them.
444 */
445 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
446 pgoff_t index, void *entry, bool wake_all)
447 {
448 struct exceptional_entry_key key;
449 wait_queue_head_t *wq;
450
451 wq = dax_entry_waitqueue(mapping, index, entry, &key);
452
453 /*
454 * Checking for locked entry and prepare_to_wait_exclusive() happens
455 * under mapping->tree_lock, ditto for entry handling in our callers.
456 * So at this point all tasks that could have seen our entry locked
457 * must be in the waitqueue and the following check will see them.
458 */
459 if (waitqueue_active(wq))
460 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
461 }
462
463 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
464 pgoff_t index, bool trunc)
465 {
466 int ret = 0;
467 void *entry;
468 struct radix_tree_root *page_tree = &mapping->page_tree;
469
470 spin_lock_irq(&mapping->tree_lock);
471 entry = get_unlocked_mapping_entry(mapping, index, NULL);
472 if (!entry || !radix_tree_exceptional_entry(entry))
473 goto out;
474 if (!trunc &&
475 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
476 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
477 goto out;
478 radix_tree_delete(page_tree, index);
479 mapping->nrexceptional--;
480 ret = 1;
481 out:
482 put_unlocked_mapping_entry(mapping, index, entry);
483 spin_unlock_irq(&mapping->tree_lock);
484 return ret;
485 }
486 /*
487 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
488 * entry to get unlocked before deleting it.
489 */
490 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
491 {
492 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
493
494 /*
495 * This gets called from truncate / punch_hole path. As such, the caller
496 * must hold locks protecting against concurrent modifications of the
497 * radix tree (usually fs-private i_mmap_sem for writing). Since the
498 * caller has seen exceptional entry for this index, we better find it
499 * at that index as well...
500 */
501 WARN_ON_ONCE(!ret);
502 return ret;
503 }
504
505 /*
506 * Invalidate exceptional DAX entry if it is clean.
507 */
508 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
509 pgoff_t index)
510 {
511 return __dax_invalidate_mapping_entry(mapping, index, false);
512 }
513
514 /*
515 * The user has performed a load from a hole in the file. Allocating
516 * a new page in the file would cause excessive storage usage for
517 * workloads with sparse files. We allocate a page cache page instead.
518 * We'll kick it out of the page cache if it's ever written to,
519 * otherwise it will simply fall out of the page cache under memory
520 * pressure without ever having been dirtied.
521 */
522 static int dax_load_hole(struct address_space *mapping, void **entry,
523 struct vm_fault *vmf)
524 {
525 struct page *page;
526 int ret;
527
528 /* Hole page already exists? Return it... */
529 if (!radix_tree_exceptional_entry(*entry)) {
530 page = *entry;
531 goto out;
532 }
533
534 /* This will replace locked radix tree entry with a hole page */
535 page = find_or_create_page(mapping, vmf->pgoff,
536 vmf->gfp_mask | __GFP_ZERO);
537 if (!page)
538 return VM_FAULT_OOM;
539 out:
540 vmf->page = page;
541 ret = finish_fault(vmf);
542 vmf->page = NULL;
543 *entry = page;
544 if (!ret) {
545 /* Grab reference for PTE that is now referencing the page */
546 get_page(page);
547 return VM_FAULT_NOPAGE;
548 }
549 return ret;
550 }
551
552 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
553 struct page *to, unsigned long vaddr)
554 {
555 struct blk_dax_ctl dax = {
556 .sector = sector,
557 .size = size,
558 };
559 void *vto;
560
561 if (dax_map_atomic(bdev, &dax) < 0)
562 return PTR_ERR(dax.addr);
563 vto = kmap_atomic(to);
564 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
565 kunmap_atomic(vto);
566 dax_unmap_atomic(bdev, &dax);
567 return 0;
568 }
569
570 /*
571 * By this point grab_mapping_entry() has ensured that we have a locked entry
572 * of the appropriate size so we don't have to worry about downgrading PMDs to
573 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
574 * already in the tree, we will skip the insertion and just dirty the PMD as
575 * appropriate.
576 */
577 static void *dax_insert_mapping_entry(struct address_space *mapping,
578 struct vm_fault *vmf,
579 void *entry, sector_t sector,
580 unsigned long flags)
581 {
582 struct radix_tree_root *page_tree = &mapping->page_tree;
583 int error = 0;
584 bool hole_fill = false;
585 void *new_entry;
586 pgoff_t index = vmf->pgoff;
587
588 if (vmf->flags & FAULT_FLAG_WRITE)
589 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
590
591 /* Replacing hole page with block mapping? */
592 if (!radix_tree_exceptional_entry(entry)) {
593 hole_fill = true;
594 /*
595 * Unmap the page now before we remove it from page cache below.
596 * The page is locked so it cannot be faulted in again.
597 */
598 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
599 PAGE_SIZE, 0);
600 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
601 if (error)
602 return ERR_PTR(error);
603 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
604 /* replacing huge zero page with PMD block mapping */
605 unmap_mapping_range(mapping,
606 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
607 }
608
609 spin_lock_irq(&mapping->tree_lock);
610 new_entry = dax_radix_locked_entry(sector, flags);
611
612 if (hole_fill) {
613 __delete_from_page_cache(entry, NULL);
614 /* Drop pagecache reference */
615 put_page(entry);
616 error = __radix_tree_insert(page_tree, index,
617 dax_radix_order(new_entry), new_entry);
618 if (error) {
619 new_entry = ERR_PTR(error);
620 goto unlock;
621 }
622 mapping->nrexceptional++;
623 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
624 /*
625 * Only swap our new entry into the radix tree if the current
626 * entry is a zero page or an empty entry. If a normal PTE or
627 * PMD entry is already in the tree, we leave it alone. This
628 * means that if we are trying to insert a PTE and the
629 * existing entry is a PMD, we will just leave the PMD in the
630 * tree and dirty it if necessary.
631 */
632 struct radix_tree_node *node;
633 void **slot;
634 void *ret;
635
636 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
637 WARN_ON_ONCE(ret != entry);
638 __radix_tree_replace(page_tree, node, slot,
639 new_entry, NULL, NULL);
640 }
641 if (vmf->flags & FAULT_FLAG_WRITE)
642 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
643 unlock:
644 spin_unlock_irq(&mapping->tree_lock);
645 if (hole_fill) {
646 radix_tree_preload_end();
647 /*
648 * We don't need hole page anymore, it has been replaced with
649 * locked radix tree entry now.
650 */
651 if (mapping->a_ops->freepage)
652 mapping->a_ops->freepage(entry);
653 unlock_page(entry);
654 put_page(entry);
655 }
656 return new_entry;
657 }
658
659 static inline unsigned long
660 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
661 {
662 unsigned long address;
663
664 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
665 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
666 return address;
667 }
668
669 /* Walk all mappings of a given index of a file and writeprotect them */
670 static void dax_mapping_entry_mkclean(struct address_space *mapping,
671 pgoff_t index, unsigned long pfn)
672 {
673 struct vm_area_struct *vma;
674 pte_t pte, *ptep = NULL;
675 pmd_t *pmdp = NULL;
676 spinlock_t *ptl;
677 bool changed;
678
679 i_mmap_lock_read(mapping);
680 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
681 unsigned long address;
682
683 cond_resched();
684
685 if (!(vma->vm_flags & VM_SHARED))
686 continue;
687
688 address = pgoff_address(index, vma);
689 changed = false;
690 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
691 continue;
692
693 if (pmdp) {
694 #ifdef CONFIG_FS_DAX_PMD
695 pmd_t pmd;
696
697 if (pfn != pmd_pfn(*pmdp))
698 goto unlock_pmd;
699 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
700 goto unlock_pmd;
701
702 flush_cache_page(vma, address, pfn);
703 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
704 pmd = pmd_wrprotect(pmd);
705 pmd = pmd_mkclean(pmd);
706 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
707 changed = true;
708 unlock_pmd:
709 spin_unlock(ptl);
710 #endif
711 } else {
712 if (pfn != pte_pfn(*ptep))
713 goto unlock_pte;
714 if (!pte_dirty(*ptep) && !pte_write(*ptep))
715 goto unlock_pte;
716
717 flush_cache_page(vma, address, pfn);
718 pte = ptep_clear_flush(vma, address, ptep);
719 pte = pte_wrprotect(pte);
720 pte = pte_mkclean(pte);
721 set_pte_at(vma->vm_mm, address, ptep, pte);
722 changed = true;
723 unlock_pte:
724 pte_unmap_unlock(ptep, ptl);
725 }
726
727 if (changed)
728 mmu_notifier_invalidate_page(vma->vm_mm, address);
729 }
730 i_mmap_unlock_read(mapping);
731 }
732
733 static int dax_writeback_one(struct block_device *bdev,
734 struct address_space *mapping, pgoff_t index, void *entry)
735 {
736 struct radix_tree_root *page_tree = &mapping->page_tree;
737 struct blk_dax_ctl dax;
738 void *entry2, **slot;
739 int ret = 0;
740
741 /*
742 * A page got tagged dirty in DAX mapping? Something is seriously
743 * wrong.
744 */
745 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
746 return -EIO;
747
748 spin_lock_irq(&mapping->tree_lock);
749 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
750 /* Entry got punched out / reallocated? */
751 if (!entry2 || !radix_tree_exceptional_entry(entry2))
752 goto put_unlocked;
753 /*
754 * Entry got reallocated elsewhere? No need to writeback. We have to
755 * compare sectors as we must not bail out due to difference in lockbit
756 * or entry type.
757 */
758 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
759 goto put_unlocked;
760 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
761 dax_is_zero_entry(entry))) {
762 ret = -EIO;
763 goto put_unlocked;
764 }
765
766 /* Another fsync thread may have already written back this entry */
767 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
768 goto put_unlocked;
769 /* Lock the entry to serialize with page faults */
770 entry = lock_slot(mapping, slot);
771 /*
772 * We can clear the tag now but we have to be careful so that concurrent
773 * dax_writeback_one() calls for the same index cannot finish before we
774 * actually flush the caches. This is achieved as the calls will look
775 * at the entry only under tree_lock and once they do that they will
776 * see the entry locked and wait for it to unlock.
777 */
778 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
779 spin_unlock_irq(&mapping->tree_lock);
780
781 /*
782 * Even if dax_writeback_mapping_range() was given a wbc->range_start
783 * in the middle of a PMD, the 'index' we are given will be aligned to
784 * the start index of the PMD, as will the sector we pull from
785 * 'entry'. This allows us to flush for PMD_SIZE and not have to
786 * worry about partial PMD writebacks.
787 */
788 dax.sector = dax_radix_sector(entry);
789 dax.size = PAGE_SIZE << dax_radix_order(entry);
790
791 /*
792 * We cannot hold tree_lock while calling dax_map_atomic() because it
793 * eventually calls cond_resched().
794 */
795 ret = dax_map_atomic(bdev, &dax);
796 if (ret < 0) {
797 put_locked_mapping_entry(mapping, index, entry);
798 return ret;
799 }
800
801 if (WARN_ON_ONCE(ret < dax.size)) {
802 ret = -EIO;
803 goto unmap;
804 }
805
806 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
807 wb_cache_pmem(dax.addr, dax.size);
808 /*
809 * After we have flushed the cache, we can clear the dirty tag. There
810 * cannot be new dirty data in the pfn after the flush has completed as
811 * the pfn mappings are writeprotected and fault waits for mapping
812 * entry lock.
813 */
814 spin_lock_irq(&mapping->tree_lock);
815 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
816 spin_unlock_irq(&mapping->tree_lock);
817 unmap:
818 dax_unmap_atomic(bdev, &dax);
819 put_locked_mapping_entry(mapping, index, entry);
820 return ret;
821
822 put_unlocked:
823 put_unlocked_mapping_entry(mapping, index, entry2);
824 spin_unlock_irq(&mapping->tree_lock);
825 return ret;
826 }
827
828 /*
829 * Flush the mapping to the persistent domain within the byte range of [start,
830 * end]. This is required by data integrity operations to ensure file data is
831 * on persistent storage prior to completion of the operation.
832 */
833 int dax_writeback_mapping_range(struct address_space *mapping,
834 struct block_device *bdev, struct writeback_control *wbc)
835 {
836 struct inode *inode = mapping->host;
837 pgoff_t start_index, end_index;
838 pgoff_t indices[PAGEVEC_SIZE];
839 struct pagevec pvec;
840 bool done = false;
841 int i, ret = 0;
842
843 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
844 return -EIO;
845
846 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
847 return 0;
848
849 start_index = wbc->range_start >> PAGE_SHIFT;
850 end_index = wbc->range_end >> PAGE_SHIFT;
851
852 tag_pages_for_writeback(mapping, start_index, end_index);
853
854 pagevec_init(&pvec, 0);
855 while (!done) {
856 pvec.nr = find_get_entries_tag(mapping, start_index,
857 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
858 pvec.pages, indices);
859
860 if (pvec.nr == 0)
861 break;
862
863 for (i = 0; i < pvec.nr; i++) {
864 if (indices[i] > end_index) {
865 done = true;
866 break;
867 }
868
869 ret = dax_writeback_one(bdev, mapping, indices[i],
870 pvec.pages[i]);
871 if (ret < 0)
872 return ret;
873 }
874 }
875 return 0;
876 }
877 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
878
879 static int dax_insert_mapping(struct address_space *mapping,
880 struct block_device *bdev, sector_t sector, size_t size,
881 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
882 {
883 unsigned long vaddr = vmf->address;
884 struct blk_dax_ctl dax = {
885 .sector = sector,
886 .size = size,
887 };
888 void *ret;
889 void *entry = *entryp;
890
891 if (dax_map_atomic(bdev, &dax) < 0)
892 return PTR_ERR(dax.addr);
893 dax_unmap_atomic(bdev, &dax);
894
895 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
896 if (IS_ERR(ret))
897 return PTR_ERR(ret);
898 *entryp = ret;
899
900 return vm_insert_mixed(vma, vaddr, dax.pfn);
901 }
902
903 /**
904 * dax_pfn_mkwrite - handle first write to DAX page
905 * @vma: The virtual memory area where the fault occurred
906 * @vmf: The description of the fault
907 */
908 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
909 {
910 struct file *file = vma->vm_file;
911 struct address_space *mapping = file->f_mapping;
912 void *entry, **slot;
913 pgoff_t index = vmf->pgoff;
914
915 spin_lock_irq(&mapping->tree_lock);
916 entry = get_unlocked_mapping_entry(mapping, index, &slot);
917 if (!entry || !radix_tree_exceptional_entry(entry)) {
918 if (entry)
919 put_unlocked_mapping_entry(mapping, index, entry);
920 spin_unlock_irq(&mapping->tree_lock);
921 return VM_FAULT_NOPAGE;
922 }
923 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
924 entry = lock_slot(mapping, slot);
925 spin_unlock_irq(&mapping->tree_lock);
926 /*
927 * If we race with somebody updating the PTE and finish_mkwrite_fault()
928 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
929 * the fault in either case.
930 */
931 finish_mkwrite_fault(vmf);
932 put_locked_mapping_entry(mapping, index, entry);
933 return VM_FAULT_NOPAGE;
934 }
935 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
936
937 static bool dax_range_is_aligned(struct block_device *bdev,
938 unsigned int offset, unsigned int length)
939 {
940 unsigned short sector_size = bdev_logical_block_size(bdev);
941
942 if (!IS_ALIGNED(offset, sector_size))
943 return false;
944 if (!IS_ALIGNED(length, sector_size))
945 return false;
946
947 return true;
948 }
949
950 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
951 unsigned int offset, unsigned int length)
952 {
953 struct blk_dax_ctl dax = {
954 .sector = sector,
955 .size = PAGE_SIZE,
956 };
957
958 if (dax_range_is_aligned(bdev, offset, length)) {
959 sector_t start_sector = dax.sector + (offset >> 9);
960
961 return blkdev_issue_zeroout(bdev, start_sector,
962 length >> 9, GFP_NOFS, true);
963 } else {
964 if (dax_map_atomic(bdev, &dax) < 0)
965 return PTR_ERR(dax.addr);
966 clear_pmem(dax.addr + offset, length);
967 dax_unmap_atomic(bdev, &dax);
968 }
969 return 0;
970 }
971 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
972
973 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
974 {
975 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
976 }
977
978 static loff_t
979 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
980 struct iomap *iomap)
981 {
982 struct iov_iter *iter = data;
983 loff_t end = pos + length, done = 0;
984 ssize_t ret = 0;
985
986 if (iov_iter_rw(iter) == READ) {
987 end = min(end, i_size_read(inode));
988 if (pos >= end)
989 return 0;
990
991 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
992 return iov_iter_zero(min(length, end - pos), iter);
993 }
994
995 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
996 return -EIO;
997
998 /*
999 * Write can allocate block for an area which has a hole page mapped
1000 * into page tables. We have to tear down these mappings so that data
1001 * written by write(2) is visible in mmap.
1002 */
1003 if (iomap->flags & IOMAP_F_NEW) {
1004 invalidate_inode_pages2_range(inode->i_mapping,
1005 pos >> PAGE_SHIFT,
1006 (end - 1) >> PAGE_SHIFT);
1007 }
1008
1009 while (pos < end) {
1010 unsigned offset = pos & (PAGE_SIZE - 1);
1011 struct blk_dax_ctl dax = { 0 };
1012 ssize_t map_len;
1013
1014 if (fatal_signal_pending(current)) {
1015 ret = -EINTR;
1016 break;
1017 }
1018
1019 dax.sector = dax_iomap_sector(iomap, pos);
1020 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1021 map_len = dax_map_atomic(iomap->bdev, &dax);
1022 if (map_len < 0) {
1023 ret = map_len;
1024 break;
1025 }
1026
1027 dax.addr += offset;
1028 map_len -= offset;
1029 if (map_len > end - pos)
1030 map_len = end - pos;
1031
1032 if (iov_iter_rw(iter) == WRITE)
1033 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1034 else
1035 map_len = copy_to_iter(dax.addr, map_len, iter);
1036 dax_unmap_atomic(iomap->bdev, &dax);
1037 if (map_len <= 0) {
1038 ret = map_len ? map_len : -EFAULT;
1039 break;
1040 }
1041
1042 pos += map_len;
1043 length -= map_len;
1044 done += map_len;
1045 }
1046
1047 return done ? done : ret;
1048 }
1049
1050 /**
1051 * dax_iomap_rw - Perform I/O to a DAX file
1052 * @iocb: The control block for this I/O
1053 * @iter: The addresses to do I/O from or to
1054 * @ops: iomap ops passed from the file system
1055 *
1056 * This function performs read and write operations to directly mapped
1057 * persistent memory. The callers needs to take care of read/write exclusion
1058 * and evicting any page cache pages in the region under I/O.
1059 */
1060 ssize_t
1061 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1062 struct iomap_ops *ops)
1063 {
1064 struct address_space *mapping = iocb->ki_filp->f_mapping;
1065 struct inode *inode = mapping->host;
1066 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1067 unsigned flags = 0;
1068
1069 if (iov_iter_rw(iter) == WRITE)
1070 flags |= IOMAP_WRITE;
1071
1072 while (iov_iter_count(iter)) {
1073 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1074 iter, dax_iomap_actor);
1075 if (ret <= 0)
1076 break;
1077 pos += ret;
1078 done += ret;
1079 }
1080
1081 iocb->ki_pos += done;
1082 return done ? done : ret;
1083 }
1084 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1085
1086 static int dax_fault_return(int error)
1087 {
1088 if (error == 0)
1089 return VM_FAULT_NOPAGE;
1090 if (error == -ENOMEM)
1091 return VM_FAULT_OOM;
1092 return VM_FAULT_SIGBUS;
1093 }
1094
1095 /**
1096 * dax_iomap_fault - handle a page fault on a DAX file
1097 * @vma: The virtual memory area where the fault occurred
1098 * @vmf: The description of the fault
1099 * @ops: iomap ops passed from the file system
1100 *
1101 * When a page fault occurs, filesystems may call this helper in their fault
1102 * or mkwrite handler for DAX files. Assumes the caller has done all the
1103 * necessary locking for the page fault to proceed successfully.
1104 */
1105 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1106 struct iomap_ops *ops)
1107 {
1108 struct address_space *mapping = vma->vm_file->f_mapping;
1109 struct inode *inode = mapping->host;
1110 unsigned long vaddr = vmf->address;
1111 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1112 sector_t sector;
1113 struct iomap iomap = { 0 };
1114 unsigned flags = IOMAP_FAULT;
1115 int error, major = 0;
1116 int vmf_ret = 0;
1117 void *entry;
1118
1119 /*
1120 * Check whether offset isn't beyond end of file now. Caller is supposed
1121 * to hold locks serializing us with truncate / punch hole so this is
1122 * a reliable test.
1123 */
1124 if (pos >= i_size_read(inode))
1125 return VM_FAULT_SIGBUS;
1126
1127 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1128 flags |= IOMAP_WRITE;
1129
1130 /*
1131 * Note that we don't bother to use iomap_apply here: DAX required
1132 * the file system block size to be equal the page size, which means
1133 * that we never have to deal with more than a single extent here.
1134 */
1135 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1136 if (error)
1137 return dax_fault_return(error);
1138 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1139 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1140 goto finish_iomap;
1141 }
1142
1143 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1144 if (IS_ERR(entry)) {
1145 vmf_ret = dax_fault_return(PTR_ERR(entry));
1146 goto finish_iomap;
1147 }
1148
1149 sector = dax_iomap_sector(&iomap, pos);
1150
1151 if (vmf->cow_page) {
1152 switch (iomap.type) {
1153 case IOMAP_HOLE:
1154 case IOMAP_UNWRITTEN:
1155 clear_user_highpage(vmf->cow_page, vaddr);
1156 break;
1157 case IOMAP_MAPPED:
1158 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1159 vmf->cow_page, vaddr);
1160 break;
1161 default:
1162 WARN_ON_ONCE(1);
1163 error = -EIO;
1164 break;
1165 }
1166
1167 if (error)
1168 goto error_unlock_entry;
1169
1170 __SetPageUptodate(vmf->cow_page);
1171 vmf_ret = finish_fault(vmf);
1172 if (!vmf_ret)
1173 vmf_ret = VM_FAULT_DONE_COW;
1174 goto unlock_entry;
1175 }
1176
1177 switch (iomap.type) {
1178 case IOMAP_MAPPED:
1179 if (iomap.flags & IOMAP_F_NEW) {
1180 count_vm_event(PGMAJFAULT);
1181 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1182 major = VM_FAULT_MAJOR;
1183 }
1184 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1185 PAGE_SIZE, &entry, vma, vmf);
1186 /* -EBUSY is fine, somebody else faulted on the same PTE */
1187 if (error == -EBUSY)
1188 error = 0;
1189 break;
1190 case IOMAP_UNWRITTEN:
1191 case IOMAP_HOLE:
1192 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1193 vmf_ret = dax_load_hole(mapping, &entry, vmf);
1194 goto unlock_entry;
1195 }
1196 /*FALLTHRU*/
1197 default:
1198 WARN_ON_ONCE(1);
1199 error = -EIO;
1200 break;
1201 }
1202
1203 error_unlock_entry:
1204 vmf_ret = dax_fault_return(error) | major;
1205 unlock_entry:
1206 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1207 finish_iomap:
1208 if (ops->iomap_end) {
1209 int copied = PAGE_SIZE;
1210
1211 if (vmf_ret & VM_FAULT_ERROR)
1212 copied = 0;
1213 /*
1214 * The fault is done by now and there's no way back (other
1215 * thread may be already happily using PTE we have installed).
1216 * Just ignore error from ->iomap_end since we cannot do much
1217 * with it.
1218 */
1219 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1220 }
1221 return vmf_ret;
1222 }
1223 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1224
1225 #ifdef CONFIG_FS_DAX_PMD
1226 /*
1227 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1228 * more often than one might expect in the below functions.
1229 */
1230 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1231
1232 static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
1233 struct vm_fault *vmf, unsigned long address,
1234 struct iomap *iomap, loff_t pos, bool write, void **entryp)
1235 {
1236 struct address_space *mapping = vma->vm_file->f_mapping;
1237 struct block_device *bdev = iomap->bdev;
1238 struct blk_dax_ctl dax = {
1239 .sector = dax_iomap_sector(iomap, pos),
1240 .size = PMD_SIZE,
1241 };
1242 long length = dax_map_atomic(bdev, &dax);
1243 void *ret;
1244
1245 if (length < 0) /* dax_map_atomic() failed */
1246 return VM_FAULT_FALLBACK;
1247 if (length < PMD_SIZE)
1248 goto unmap_fallback;
1249 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1250 goto unmap_fallback;
1251 if (!pfn_t_devmap(dax.pfn))
1252 goto unmap_fallback;
1253
1254 dax_unmap_atomic(bdev, &dax);
1255
1256 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1257 RADIX_DAX_PMD);
1258 if (IS_ERR(ret))
1259 return VM_FAULT_FALLBACK;
1260 *entryp = ret;
1261
1262 return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
1263
1264 unmap_fallback:
1265 dax_unmap_atomic(bdev, &dax);
1266 return VM_FAULT_FALLBACK;
1267 }
1268
1269 static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
1270 struct vm_fault *vmf, unsigned long address,
1271 struct iomap *iomap, void **entryp)
1272 {
1273 struct address_space *mapping = vma->vm_file->f_mapping;
1274 unsigned long pmd_addr = address & PMD_MASK;
1275 struct page *zero_page;
1276 spinlock_t *ptl;
1277 pmd_t pmd_entry;
1278 void *ret;
1279
1280 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1281
1282 if (unlikely(!zero_page))
1283 return VM_FAULT_FALLBACK;
1284
1285 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1286 RADIX_DAX_PMD | RADIX_DAX_HZP);
1287 if (IS_ERR(ret))
1288 return VM_FAULT_FALLBACK;
1289 *entryp = ret;
1290
1291 ptl = pmd_lock(vma->vm_mm, pmd);
1292 if (!pmd_none(*pmd)) {
1293 spin_unlock(ptl);
1294 return VM_FAULT_FALLBACK;
1295 }
1296
1297 pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
1298 pmd_entry = pmd_mkhuge(pmd_entry);
1299 set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
1300 spin_unlock(ptl);
1301 return VM_FAULT_NOPAGE;
1302 }
1303
1304 int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1305 pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
1306 {
1307 struct address_space *mapping = vma->vm_file->f_mapping;
1308 unsigned long pmd_addr = address & PMD_MASK;
1309 bool write = flags & FAULT_FLAG_WRITE;
1310 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1311 struct inode *inode = mapping->host;
1312 int result = VM_FAULT_FALLBACK;
1313 struct iomap iomap = { 0 };
1314 pgoff_t max_pgoff, pgoff;
1315 struct vm_fault vmf;
1316 void *entry;
1317 loff_t pos;
1318 int error;
1319
1320 /* Fall back to PTEs if we're going to COW */
1321 if (write && !(vma->vm_flags & VM_SHARED))
1322 goto fallback;
1323
1324 /* If the PMD would extend outside the VMA */
1325 if (pmd_addr < vma->vm_start)
1326 goto fallback;
1327 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1328 goto fallback;
1329
1330 /*
1331 * Check whether offset isn't beyond end of file now. Caller is
1332 * supposed to hold locks serializing us with truncate / punch hole so
1333 * this is a reliable test.
1334 */
1335 pgoff = linear_page_index(vma, pmd_addr);
1336 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1337
1338 if (pgoff > max_pgoff)
1339 return VM_FAULT_SIGBUS;
1340
1341 /* If the PMD would extend beyond the file size */
1342 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1343 goto fallback;
1344
1345 /*
1346 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1347 * setting up a mapping, so really we're using iomap_begin() as a way
1348 * to look up our filesystem block.
1349 */
1350 pos = (loff_t)pgoff << PAGE_SHIFT;
1351 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1352 if (error)
1353 goto fallback;
1354
1355 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1356 goto finish_iomap;
1357
1358 /*
1359 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1360 * PMD or a HZP entry. If it can't (because a 4k page is already in
1361 * the tree, for instance), it will return -EEXIST and we just fall
1362 * back to 4k entries.
1363 */
1364 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1365 if (IS_ERR(entry))
1366 goto finish_iomap;
1367
1368 vmf.pgoff = pgoff;
1369 vmf.flags = flags;
1370 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
1371
1372 switch (iomap.type) {
1373 case IOMAP_MAPPED:
1374 result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
1375 &iomap, pos, write, &entry);
1376 break;
1377 case IOMAP_UNWRITTEN:
1378 case IOMAP_HOLE:
1379 if (WARN_ON_ONCE(write))
1380 goto unlock_entry;
1381 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1382 &entry);
1383 break;
1384 default:
1385 WARN_ON_ONCE(1);
1386 break;
1387 }
1388
1389 unlock_entry:
1390 put_locked_mapping_entry(mapping, pgoff, entry);
1391 finish_iomap:
1392 if (ops->iomap_end) {
1393 int copied = PMD_SIZE;
1394
1395 if (result == VM_FAULT_FALLBACK)
1396 copied = 0;
1397 /*
1398 * The fault is done by now and there's no way back (other
1399 * thread may be already happily using PMD we have installed).
1400 * Just ignore error from ->iomap_end since we cannot do much
1401 * with it.
1402 */
1403 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1404 &iomap);
1405 }
1406 fallback:
1407 if (result == VM_FAULT_FALLBACK) {
1408 split_huge_pmd(vma, pmd, address);
1409 count_vm_event(THP_FAULT_FALLBACK);
1410 }
1411 return result;
1412 }
1413 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1414 #endif /* CONFIG_FS_DAX_PMD */