]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/dax.c
dax: fix radix tree insertion race
[mirror_ubuntu-zesty-kernel.git] / fs / dax.c
1 /*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 /* We choose 4096 entries - same as per-zone page wait tables */
39 #define DAX_WAIT_TABLE_BITS 12
40 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
41
42 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
43
44 static int __init init_dax_wait_table(void)
45 {
46 int i;
47
48 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
49 init_waitqueue_head(wait_table + i);
50 return 0;
51 }
52 fs_initcall(init_dax_wait_table);
53
54 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
55 {
56 struct request_queue *q = bdev->bd_queue;
57 long rc = -EIO;
58
59 dax->addr = ERR_PTR(-EIO);
60 if (blk_queue_enter(q, true) != 0)
61 return rc;
62
63 rc = bdev_direct_access(bdev, dax);
64 if (rc < 0) {
65 dax->addr = ERR_PTR(rc);
66 blk_queue_exit(q);
67 return rc;
68 }
69 return rc;
70 }
71
72 static void dax_unmap_atomic(struct block_device *bdev,
73 const struct blk_dax_ctl *dax)
74 {
75 if (IS_ERR(dax->addr))
76 return;
77 blk_queue_exit(bdev->bd_queue);
78 }
79
80 static int dax_is_pmd_entry(void *entry)
81 {
82 return (unsigned long)entry & RADIX_DAX_PMD;
83 }
84
85 static int dax_is_pte_entry(void *entry)
86 {
87 return !((unsigned long)entry & RADIX_DAX_PMD);
88 }
89
90 static int dax_is_zero_entry(void *entry)
91 {
92 return (unsigned long)entry & RADIX_DAX_HZP;
93 }
94
95 static int dax_is_empty_entry(void *entry)
96 {
97 return (unsigned long)entry & RADIX_DAX_EMPTY;
98 }
99
100 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
101 {
102 struct page *page = alloc_pages(GFP_KERNEL, 0);
103 struct blk_dax_ctl dax = {
104 .size = PAGE_SIZE,
105 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
106 };
107 long rc;
108
109 if (!page)
110 return ERR_PTR(-ENOMEM);
111
112 rc = dax_map_atomic(bdev, &dax);
113 if (rc < 0)
114 return ERR_PTR(rc);
115 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
116 dax_unmap_atomic(bdev, &dax);
117 return page;
118 }
119
120 /*
121 * DAX radix tree locking
122 */
123 struct exceptional_entry_key {
124 struct address_space *mapping;
125 pgoff_t entry_start;
126 };
127
128 struct wait_exceptional_entry_queue {
129 wait_queue_t wait;
130 struct exceptional_entry_key key;
131 };
132
133 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
134 pgoff_t index, void *entry, struct exceptional_entry_key *key)
135 {
136 unsigned long hash;
137
138 /*
139 * If 'entry' is a PMD, align the 'index' that we use for the wait
140 * queue to the start of that PMD. This ensures that all offsets in
141 * the range covered by the PMD map to the same bit lock.
142 */
143 if (dax_is_pmd_entry(entry))
144 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
145
146 key->mapping = mapping;
147 key->entry_start = index;
148
149 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
150 return wait_table + hash;
151 }
152
153 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
154 int sync, void *keyp)
155 {
156 struct exceptional_entry_key *key = keyp;
157 struct wait_exceptional_entry_queue *ewait =
158 container_of(wait, struct wait_exceptional_entry_queue, wait);
159
160 if (key->mapping != ewait->key.mapping ||
161 key->entry_start != ewait->key.entry_start)
162 return 0;
163 return autoremove_wake_function(wait, mode, sync, NULL);
164 }
165
166 /*
167 * Check whether the given slot is locked. The function must be called with
168 * mapping->tree_lock held
169 */
170 static inline int slot_locked(struct address_space *mapping, void **slot)
171 {
172 unsigned long entry = (unsigned long)
173 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
174 return entry & RADIX_DAX_ENTRY_LOCK;
175 }
176
177 /*
178 * Mark the given slot is locked. The function must be called with
179 * mapping->tree_lock held
180 */
181 static inline void *lock_slot(struct address_space *mapping, void **slot)
182 {
183 unsigned long entry = (unsigned long)
184 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
185
186 entry |= RADIX_DAX_ENTRY_LOCK;
187 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
188 return (void *)entry;
189 }
190
191 /*
192 * Mark the given slot is unlocked. The function must be called with
193 * mapping->tree_lock held
194 */
195 static inline void *unlock_slot(struct address_space *mapping, void **slot)
196 {
197 unsigned long entry = (unsigned long)
198 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
199
200 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
201 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
202 return (void *)entry;
203 }
204
205 /*
206 * Lookup entry in radix tree, wait for it to become unlocked if it is
207 * exceptional entry and return it. The caller must call
208 * put_unlocked_mapping_entry() when he decided not to lock the entry or
209 * put_locked_mapping_entry() when he locked the entry and now wants to
210 * unlock it.
211 *
212 * The function must be called with mapping->tree_lock held.
213 */
214 static void *get_unlocked_mapping_entry(struct address_space *mapping,
215 pgoff_t index, void ***slotp)
216 {
217 void *entry, **slot;
218 struct wait_exceptional_entry_queue ewait;
219 wait_queue_head_t *wq;
220
221 init_wait(&ewait.wait);
222 ewait.wait.func = wake_exceptional_entry_func;
223
224 for (;;) {
225 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
226 &slot);
227 if (!entry || !radix_tree_exceptional_entry(entry) ||
228 !slot_locked(mapping, slot)) {
229 if (slotp)
230 *slotp = slot;
231 return entry;
232 }
233
234 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
235 prepare_to_wait_exclusive(wq, &ewait.wait,
236 TASK_UNINTERRUPTIBLE);
237 spin_unlock_irq(&mapping->tree_lock);
238 schedule();
239 finish_wait(wq, &ewait.wait);
240 spin_lock_irq(&mapping->tree_lock);
241 }
242 }
243
244 static void dax_unlock_mapping_entry(struct address_space *mapping,
245 pgoff_t index)
246 {
247 void *entry, **slot;
248
249 spin_lock_irq(&mapping->tree_lock);
250 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
251 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
252 !slot_locked(mapping, slot))) {
253 spin_unlock_irq(&mapping->tree_lock);
254 return;
255 }
256 unlock_slot(mapping, slot);
257 spin_unlock_irq(&mapping->tree_lock);
258 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
259 }
260
261 static void put_locked_mapping_entry(struct address_space *mapping,
262 pgoff_t index, void *entry)
263 {
264 if (!radix_tree_exceptional_entry(entry)) {
265 unlock_page(entry);
266 put_page(entry);
267 } else {
268 dax_unlock_mapping_entry(mapping, index);
269 }
270 }
271
272 /*
273 * Called when we are done with radix tree entry we looked up via
274 * get_unlocked_mapping_entry() and which we didn't lock in the end.
275 */
276 static void put_unlocked_mapping_entry(struct address_space *mapping,
277 pgoff_t index, void *entry)
278 {
279 if (!radix_tree_exceptional_entry(entry))
280 return;
281
282 /* We have to wake up next waiter for the radix tree entry lock */
283 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
284 }
285
286 /*
287 * Find radix tree entry at given index. If it points to a page, return with
288 * the page locked. If it points to the exceptional entry, return with the
289 * radix tree entry locked. If the radix tree doesn't contain given index,
290 * create empty exceptional entry for the index and return with it locked.
291 *
292 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
293 * either return that locked entry or will return an error. This error will
294 * happen if there are any 4k entries (either zero pages or DAX entries)
295 * within the 2MiB range that we are requesting.
296 *
297 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
298 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
299 * insertion will fail if it finds any 4k entries already in the tree, and a
300 * 4k insertion will cause an existing 2MiB entry to be unmapped and
301 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
302 * well as 2MiB empty entries.
303 *
304 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
305 * real storage backing them. We will leave these real 2MiB DAX entries in
306 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
307 *
308 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
309 * persistent memory the benefit is doubtful. We can add that later if we can
310 * show it helps.
311 */
312 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
313 unsigned long size_flag)
314 {
315 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
316 void *entry, **slot;
317
318 restart:
319 spin_lock_irq(&mapping->tree_lock);
320 entry = get_unlocked_mapping_entry(mapping, index, &slot);
321
322 if (entry) {
323 if (size_flag & RADIX_DAX_PMD) {
324 if (!radix_tree_exceptional_entry(entry) ||
325 dax_is_pte_entry(entry)) {
326 put_unlocked_mapping_entry(mapping, index,
327 entry);
328 entry = ERR_PTR(-EEXIST);
329 goto out_unlock;
330 }
331 } else { /* trying to grab a PTE entry */
332 if (radix_tree_exceptional_entry(entry) &&
333 dax_is_pmd_entry(entry) &&
334 (dax_is_zero_entry(entry) ||
335 dax_is_empty_entry(entry))) {
336 pmd_downgrade = true;
337 }
338 }
339 }
340
341 /* No entry for given index? Make sure radix tree is big enough. */
342 if (!entry || pmd_downgrade) {
343 int err;
344
345 if (pmd_downgrade) {
346 /*
347 * Make sure 'entry' remains valid while we drop
348 * mapping->tree_lock.
349 */
350 entry = lock_slot(mapping, slot);
351 }
352
353 spin_unlock_irq(&mapping->tree_lock);
354 /*
355 * Besides huge zero pages the only other thing that gets
356 * downgraded are empty entries which don't need to be
357 * unmapped.
358 */
359 if (pmd_downgrade && dax_is_zero_entry(entry))
360 unmap_mapping_range(mapping,
361 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
362
363 err = radix_tree_preload(
364 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
365 if (err) {
366 if (pmd_downgrade)
367 put_locked_mapping_entry(mapping, index, entry);
368 return ERR_PTR(err);
369 }
370 spin_lock_irq(&mapping->tree_lock);
371
372 if (!entry) {
373 /*
374 * We needed to drop the page_tree lock while calling
375 * radix_tree_preload() and we didn't have an entry to
376 * lock. See if another thread inserted an entry at
377 * our index during this time.
378 */
379 entry = __radix_tree_lookup(&mapping->page_tree, index,
380 NULL, &slot);
381 if (entry) {
382 radix_tree_preload_end();
383 spin_unlock_irq(&mapping->tree_lock);
384 goto restart;
385 }
386 }
387
388 if (pmd_downgrade) {
389 radix_tree_delete(&mapping->page_tree, index);
390 mapping->nrexceptional--;
391 dax_wake_mapping_entry_waiter(mapping, index, entry,
392 true);
393 }
394
395 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
396
397 err = __radix_tree_insert(&mapping->page_tree, index,
398 dax_radix_order(entry), entry);
399 radix_tree_preload_end();
400 if (err) {
401 spin_unlock_irq(&mapping->tree_lock);
402 /*
403 * Our insertion of a DAX entry failed, most likely
404 * because we were inserting a PMD entry and it
405 * collided with a PTE sized entry at a different
406 * index in the PMD range. We haven't inserted
407 * anything into the radix tree and have no waiters to
408 * wake.
409 */
410 return ERR_PTR(err);
411 }
412 /* Good, we have inserted empty locked entry into the tree. */
413 mapping->nrexceptional++;
414 spin_unlock_irq(&mapping->tree_lock);
415 return entry;
416 }
417 /* Normal page in radix tree? */
418 if (!radix_tree_exceptional_entry(entry)) {
419 struct page *page = entry;
420
421 get_page(page);
422 spin_unlock_irq(&mapping->tree_lock);
423 lock_page(page);
424 /* Page got truncated? Retry... */
425 if (unlikely(page->mapping != mapping)) {
426 unlock_page(page);
427 put_page(page);
428 goto restart;
429 }
430 return page;
431 }
432 entry = lock_slot(mapping, slot);
433 out_unlock:
434 spin_unlock_irq(&mapping->tree_lock);
435 return entry;
436 }
437
438 /*
439 * We do not necessarily hold the mapping->tree_lock when we call this
440 * function so it is possible that 'entry' is no longer a valid item in the
441 * radix tree. This is okay because all we really need to do is to find the
442 * correct waitqueue where tasks might be waiting for that old 'entry' and
443 * wake them.
444 */
445 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
446 pgoff_t index, void *entry, bool wake_all)
447 {
448 struct exceptional_entry_key key;
449 wait_queue_head_t *wq;
450
451 wq = dax_entry_waitqueue(mapping, index, entry, &key);
452
453 /*
454 * Checking for locked entry and prepare_to_wait_exclusive() happens
455 * under mapping->tree_lock, ditto for entry handling in our callers.
456 * So at this point all tasks that could have seen our entry locked
457 * must be in the waitqueue and the following check will see them.
458 */
459 if (waitqueue_active(wq))
460 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
461 }
462
463 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
464 pgoff_t index, bool trunc)
465 {
466 int ret = 0;
467 void *entry;
468 struct radix_tree_root *page_tree = &mapping->page_tree;
469
470 spin_lock_irq(&mapping->tree_lock);
471 entry = get_unlocked_mapping_entry(mapping, index, NULL);
472 if (!entry || !radix_tree_exceptional_entry(entry))
473 goto out;
474 if (!trunc &&
475 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
476 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
477 goto out;
478 radix_tree_delete(page_tree, index);
479 mapping->nrexceptional--;
480 ret = 1;
481 out:
482 put_unlocked_mapping_entry(mapping, index, entry);
483 spin_unlock_irq(&mapping->tree_lock);
484 return ret;
485 }
486 /*
487 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
488 * entry to get unlocked before deleting it.
489 */
490 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
491 {
492 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
493
494 /*
495 * This gets called from truncate / punch_hole path. As such, the caller
496 * must hold locks protecting against concurrent modifications of the
497 * radix tree (usually fs-private i_mmap_sem for writing). Since the
498 * caller has seen exceptional entry for this index, we better find it
499 * at that index as well...
500 */
501 WARN_ON_ONCE(!ret);
502 return ret;
503 }
504
505 /*
506 * Invalidate exceptional DAX entry if easily possible. This handles DAX
507 * entries for invalidate_inode_pages() so we evict the entry only if we can
508 * do so without blocking.
509 */
510 int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
511 {
512 int ret = 0;
513 void *entry, **slot;
514 struct radix_tree_root *page_tree = &mapping->page_tree;
515
516 spin_lock_irq(&mapping->tree_lock);
517 entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
518 if (!entry || !radix_tree_exceptional_entry(entry) ||
519 slot_locked(mapping, slot))
520 goto out;
521 if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
522 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
523 goto out;
524 radix_tree_delete(page_tree, index);
525 mapping->nrexceptional--;
526 ret = 1;
527 out:
528 spin_unlock_irq(&mapping->tree_lock);
529 if (ret)
530 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
531 return ret;
532 }
533
534 /*
535 * Invalidate exceptional DAX entry if it is clean.
536 */
537 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
538 pgoff_t index)
539 {
540 return __dax_invalidate_mapping_entry(mapping, index, false);
541 }
542
543 /*
544 * The user has performed a load from a hole in the file. Allocating
545 * a new page in the file would cause excessive storage usage for
546 * workloads with sparse files. We allocate a page cache page instead.
547 * We'll kick it out of the page cache if it's ever written to,
548 * otherwise it will simply fall out of the page cache under memory
549 * pressure without ever having been dirtied.
550 */
551 static int dax_load_hole(struct address_space *mapping, void **entry,
552 struct vm_fault *vmf)
553 {
554 struct page *page;
555 int ret;
556
557 /* Hole page already exists? Return it... */
558 if (!radix_tree_exceptional_entry(*entry)) {
559 page = *entry;
560 goto out;
561 }
562
563 /* This will replace locked radix tree entry with a hole page */
564 page = find_or_create_page(mapping, vmf->pgoff,
565 vmf->gfp_mask | __GFP_ZERO);
566 if (!page)
567 return VM_FAULT_OOM;
568 out:
569 vmf->page = page;
570 ret = finish_fault(vmf);
571 vmf->page = NULL;
572 *entry = page;
573 if (!ret) {
574 /* Grab reference for PTE that is now referencing the page */
575 get_page(page);
576 return VM_FAULT_NOPAGE;
577 }
578 return ret;
579 }
580
581 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
582 struct page *to, unsigned long vaddr)
583 {
584 struct blk_dax_ctl dax = {
585 .sector = sector,
586 .size = size,
587 };
588 void *vto;
589
590 if (dax_map_atomic(bdev, &dax) < 0)
591 return PTR_ERR(dax.addr);
592 vto = kmap_atomic(to);
593 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
594 kunmap_atomic(vto);
595 dax_unmap_atomic(bdev, &dax);
596 return 0;
597 }
598
599 /*
600 * By this point grab_mapping_entry() has ensured that we have a locked entry
601 * of the appropriate size so we don't have to worry about downgrading PMDs to
602 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
603 * already in the tree, we will skip the insertion and just dirty the PMD as
604 * appropriate.
605 */
606 static void *dax_insert_mapping_entry(struct address_space *mapping,
607 struct vm_fault *vmf,
608 void *entry, sector_t sector,
609 unsigned long flags)
610 {
611 struct radix_tree_root *page_tree = &mapping->page_tree;
612 int error = 0;
613 bool hole_fill = false;
614 void *new_entry;
615 pgoff_t index = vmf->pgoff;
616
617 if (vmf->flags & FAULT_FLAG_WRITE)
618 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
619
620 /* Replacing hole page with block mapping? */
621 if (!radix_tree_exceptional_entry(entry)) {
622 hole_fill = true;
623 /*
624 * Unmap the page now before we remove it from page cache below.
625 * The page is locked so it cannot be faulted in again.
626 */
627 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
628 PAGE_SIZE, 0);
629 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
630 if (error)
631 return ERR_PTR(error);
632 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
633 /* replacing huge zero page with PMD block mapping */
634 unmap_mapping_range(mapping,
635 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
636 }
637
638 spin_lock_irq(&mapping->tree_lock);
639 new_entry = dax_radix_locked_entry(sector, flags);
640
641 if (hole_fill) {
642 __delete_from_page_cache(entry, NULL);
643 /* Drop pagecache reference */
644 put_page(entry);
645 error = __radix_tree_insert(page_tree, index,
646 dax_radix_order(new_entry), new_entry);
647 if (error) {
648 new_entry = ERR_PTR(error);
649 goto unlock;
650 }
651 mapping->nrexceptional++;
652 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
653 /*
654 * Only swap our new entry into the radix tree if the current
655 * entry is a zero page or an empty entry. If a normal PTE or
656 * PMD entry is already in the tree, we leave it alone. This
657 * means that if we are trying to insert a PTE and the
658 * existing entry is a PMD, we will just leave the PMD in the
659 * tree and dirty it if necessary.
660 */
661 struct radix_tree_node *node;
662 void **slot;
663 void *ret;
664
665 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
666 WARN_ON_ONCE(ret != entry);
667 __radix_tree_replace(page_tree, node, slot,
668 new_entry, NULL, NULL);
669 }
670 if (vmf->flags & FAULT_FLAG_WRITE)
671 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
672 unlock:
673 spin_unlock_irq(&mapping->tree_lock);
674 if (hole_fill) {
675 radix_tree_preload_end();
676 /*
677 * We don't need hole page anymore, it has been replaced with
678 * locked radix tree entry now.
679 */
680 if (mapping->a_ops->freepage)
681 mapping->a_ops->freepage(entry);
682 unlock_page(entry);
683 put_page(entry);
684 }
685 return new_entry;
686 }
687
688 static inline unsigned long
689 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
690 {
691 unsigned long address;
692
693 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
694 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
695 return address;
696 }
697
698 /* Walk all mappings of a given index of a file and writeprotect them */
699 static void dax_mapping_entry_mkclean(struct address_space *mapping,
700 pgoff_t index, unsigned long pfn)
701 {
702 struct vm_area_struct *vma;
703 pte_t pte, *ptep = NULL;
704 pmd_t *pmdp = NULL;
705 spinlock_t *ptl;
706 bool changed;
707
708 i_mmap_lock_read(mapping);
709 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
710 unsigned long address;
711
712 cond_resched();
713
714 if (!(vma->vm_flags & VM_SHARED))
715 continue;
716
717 address = pgoff_address(index, vma);
718 changed = false;
719 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
720 continue;
721
722 if (pmdp) {
723 #ifdef CONFIG_FS_DAX_PMD
724 pmd_t pmd;
725
726 if (pfn != pmd_pfn(*pmdp))
727 goto unlock_pmd;
728 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
729 goto unlock_pmd;
730
731 flush_cache_page(vma, address, pfn);
732 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
733 pmd = pmd_wrprotect(pmd);
734 pmd = pmd_mkclean(pmd);
735 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
736 changed = true;
737 unlock_pmd:
738 spin_unlock(ptl);
739 #endif
740 } else {
741 if (pfn != pte_pfn(*ptep))
742 goto unlock_pte;
743 if (!pte_dirty(*ptep) && !pte_write(*ptep))
744 goto unlock_pte;
745
746 flush_cache_page(vma, address, pfn);
747 pte = ptep_clear_flush(vma, address, ptep);
748 pte = pte_wrprotect(pte);
749 pte = pte_mkclean(pte);
750 set_pte_at(vma->vm_mm, address, ptep, pte);
751 changed = true;
752 unlock_pte:
753 pte_unmap_unlock(ptep, ptl);
754 }
755
756 if (changed)
757 mmu_notifier_invalidate_page(vma->vm_mm, address);
758 }
759 i_mmap_unlock_read(mapping);
760 }
761
762 static int dax_writeback_one(struct block_device *bdev,
763 struct address_space *mapping, pgoff_t index, void *entry)
764 {
765 struct radix_tree_root *page_tree = &mapping->page_tree;
766 struct blk_dax_ctl dax;
767 void *entry2, **slot;
768 int ret = 0;
769
770 /*
771 * A page got tagged dirty in DAX mapping? Something is seriously
772 * wrong.
773 */
774 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
775 return -EIO;
776
777 spin_lock_irq(&mapping->tree_lock);
778 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
779 /* Entry got punched out / reallocated? */
780 if (!entry2 || !radix_tree_exceptional_entry(entry2))
781 goto put_unlocked;
782 /*
783 * Entry got reallocated elsewhere? No need to writeback. We have to
784 * compare sectors as we must not bail out due to difference in lockbit
785 * or entry type.
786 */
787 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
788 goto put_unlocked;
789 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
790 dax_is_zero_entry(entry))) {
791 ret = -EIO;
792 goto put_unlocked;
793 }
794
795 /* Another fsync thread may have already written back this entry */
796 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
797 goto put_unlocked;
798 /* Lock the entry to serialize with page faults */
799 entry = lock_slot(mapping, slot);
800 /*
801 * We can clear the tag now but we have to be careful so that concurrent
802 * dax_writeback_one() calls for the same index cannot finish before we
803 * actually flush the caches. This is achieved as the calls will look
804 * at the entry only under tree_lock and once they do that they will
805 * see the entry locked and wait for it to unlock.
806 */
807 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
808 spin_unlock_irq(&mapping->tree_lock);
809
810 /*
811 * Even if dax_writeback_mapping_range() was given a wbc->range_start
812 * in the middle of a PMD, the 'index' we are given will be aligned to
813 * the start index of the PMD, as will the sector we pull from
814 * 'entry'. This allows us to flush for PMD_SIZE and not have to
815 * worry about partial PMD writebacks.
816 */
817 dax.sector = dax_radix_sector(entry);
818 dax.size = PAGE_SIZE << dax_radix_order(entry);
819
820 /*
821 * We cannot hold tree_lock while calling dax_map_atomic() because it
822 * eventually calls cond_resched().
823 */
824 ret = dax_map_atomic(bdev, &dax);
825 if (ret < 0) {
826 put_locked_mapping_entry(mapping, index, entry);
827 return ret;
828 }
829
830 if (WARN_ON_ONCE(ret < dax.size)) {
831 ret = -EIO;
832 goto unmap;
833 }
834
835 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
836 wb_cache_pmem(dax.addr, dax.size);
837 /*
838 * After we have flushed the cache, we can clear the dirty tag. There
839 * cannot be new dirty data in the pfn after the flush has completed as
840 * the pfn mappings are writeprotected and fault waits for mapping
841 * entry lock.
842 */
843 spin_lock_irq(&mapping->tree_lock);
844 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
845 spin_unlock_irq(&mapping->tree_lock);
846 unmap:
847 dax_unmap_atomic(bdev, &dax);
848 put_locked_mapping_entry(mapping, index, entry);
849 return ret;
850
851 put_unlocked:
852 put_unlocked_mapping_entry(mapping, index, entry2);
853 spin_unlock_irq(&mapping->tree_lock);
854 return ret;
855 }
856
857 /*
858 * Flush the mapping to the persistent domain within the byte range of [start,
859 * end]. This is required by data integrity operations to ensure file data is
860 * on persistent storage prior to completion of the operation.
861 */
862 int dax_writeback_mapping_range(struct address_space *mapping,
863 struct block_device *bdev, struct writeback_control *wbc)
864 {
865 struct inode *inode = mapping->host;
866 pgoff_t start_index, end_index;
867 pgoff_t indices[PAGEVEC_SIZE];
868 struct pagevec pvec;
869 bool done = false;
870 int i, ret = 0;
871
872 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
873 return -EIO;
874
875 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
876 return 0;
877
878 start_index = wbc->range_start >> PAGE_SHIFT;
879 end_index = wbc->range_end >> PAGE_SHIFT;
880
881 tag_pages_for_writeback(mapping, start_index, end_index);
882
883 pagevec_init(&pvec, 0);
884 while (!done) {
885 pvec.nr = find_get_entries_tag(mapping, start_index,
886 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
887 pvec.pages, indices);
888
889 if (pvec.nr == 0)
890 break;
891
892 for (i = 0; i < pvec.nr; i++) {
893 if (indices[i] > end_index) {
894 done = true;
895 break;
896 }
897
898 ret = dax_writeback_one(bdev, mapping, indices[i],
899 pvec.pages[i]);
900 if (ret < 0)
901 return ret;
902 }
903 }
904 return 0;
905 }
906 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
907
908 static int dax_insert_mapping(struct address_space *mapping,
909 struct block_device *bdev, sector_t sector, size_t size,
910 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
911 {
912 unsigned long vaddr = vmf->address;
913 struct blk_dax_ctl dax = {
914 .sector = sector,
915 .size = size,
916 };
917 void *ret;
918 void *entry = *entryp;
919
920 if (dax_map_atomic(bdev, &dax) < 0)
921 return PTR_ERR(dax.addr);
922 dax_unmap_atomic(bdev, &dax);
923
924 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
925 if (IS_ERR(ret))
926 return PTR_ERR(ret);
927 *entryp = ret;
928
929 return vm_insert_mixed(vma, vaddr, dax.pfn);
930 }
931
932 /**
933 * dax_pfn_mkwrite - handle first write to DAX page
934 * @vma: The virtual memory area where the fault occurred
935 * @vmf: The description of the fault
936 */
937 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
938 {
939 struct file *file = vma->vm_file;
940 struct address_space *mapping = file->f_mapping;
941 void *entry, **slot;
942 pgoff_t index = vmf->pgoff;
943
944 spin_lock_irq(&mapping->tree_lock);
945 entry = get_unlocked_mapping_entry(mapping, index, &slot);
946 if (!entry || !radix_tree_exceptional_entry(entry)) {
947 if (entry)
948 put_unlocked_mapping_entry(mapping, index, entry);
949 spin_unlock_irq(&mapping->tree_lock);
950 return VM_FAULT_NOPAGE;
951 }
952 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
953 entry = lock_slot(mapping, slot);
954 spin_unlock_irq(&mapping->tree_lock);
955 /*
956 * If we race with somebody updating the PTE and finish_mkwrite_fault()
957 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
958 * the fault in either case.
959 */
960 finish_mkwrite_fault(vmf);
961 put_locked_mapping_entry(mapping, index, entry);
962 return VM_FAULT_NOPAGE;
963 }
964 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
965
966 static bool dax_range_is_aligned(struct block_device *bdev,
967 unsigned int offset, unsigned int length)
968 {
969 unsigned short sector_size = bdev_logical_block_size(bdev);
970
971 if (!IS_ALIGNED(offset, sector_size))
972 return false;
973 if (!IS_ALIGNED(length, sector_size))
974 return false;
975
976 return true;
977 }
978
979 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
980 unsigned int offset, unsigned int length)
981 {
982 struct blk_dax_ctl dax = {
983 .sector = sector,
984 .size = PAGE_SIZE,
985 };
986
987 if (dax_range_is_aligned(bdev, offset, length)) {
988 sector_t start_sector = dax.sector + (offset >> 9);
989
990 return blkdev_issue_zeroout(bdev, start_sector,
991 length >> 9, GFP_NOFS, true);
992 } else {
993 if (dax_map_atomic(bdev, &dax) < 0)
994 return PTR_ERR(dax.addr);
995 clear_pmem(dax.addr + offset, length);
996 dax_unmap_atomic(bdev, &dax);
997 }
998 return 0;
999 }
1000 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1001
1002 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
1003 {
1004 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
1005 }
1006
1007 static loff_t
1008 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1009 struct iomap *iomap)
1010 {
1011 struct iov_iter *iter = data;
1012 loff_t end = pos + length, done = 0;
1013 ssize_t ret = 0;
1014
1015 if (iov_iter_rw(iter) == READ) {
1016 end = min(end, i_size_read(inode));
1017 if (pos >= end)
1018 return 0;
1019
1020 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1021 return iov_iter_zero(min(length, end - pos), iter);
1022 }
1023
1024 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1025 return -EIO;
1026
1027 /*
1028 * Write can allocate block for an area which has a hole page mapped
1029 * into page tables. We have to tear down these mappings so that data
1030 * written by write(2) is visible in mmap.
1031 */
1032 if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1033 invalidate_inode_pages2_range(inode->i_mapping,
1034 pos >> PAGE_SHIFT,
1035 (end - 1) >> PAGE_SHIFT);
1036 }
1037
1038 while (pos < end) {
1039 unsigned offset = pos & (PAGE_SIZE - 1);
1040 struct blk_dax_ctl dax = { 0 };
1041 ssize_t map_len;
1042
1043 if (fatal_signal_pending(current)) {
1044 ret = -EINTR;
1045 break;
1046 }
1047
1048 dax.sector = dax_iomap_sector(iomap, pos);
1049 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1050 map_len = dax_map_atomic(iomap->bdev, &dax);
1051 if (map_len < 0) {
1052 ret = map_len;
1053 break;
1054 }
1055
1056 dax.addr += offset;
1057 map_len -= offset;
1058 if (map_len > end - pos)
1059 map_len = end - pos;
1060
1061 if (iov_iter_rw(iter) == WRITE)
1062 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1063 else
1064 map_len = copy_to_iter(dax.addr, map_len, iter);
1065 dax_unmap_atomic(iomap->bdev, &dax);
1066 if (map_len <= 0) {
1067 ret = map_len ? map_len : -EFAULT;
1068 break;
1069 }
1070
1071 pos += map_len;
1072 length -= map_len;
1073 done += map_len;
1074 }
1075
1076 return done ? done : ret;
1077 }
1078
1079 /**
1080 * dax_iomap_rw - Perform I/O to a DAX file
1081 * @iocb: The control block for this I/O
1082 * @iter: The addresses to do I/O from or to
1083 * @ops: iomap ops passed from the file system
1084 *
1085 * This function performs read and write operations to directly mapped
1086 * persistent memory. The callers needs to take care of read/write exclusion
1087 * and evicting any page cache pages in the region under I/O.
1088 */
1089 ssize_t
1090 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1091 struct iomap_ops *ops)
1092 {
1093 struct address_space *mapping = iocb->ki_filp->f_mapping;
1094 struct inode *inode = mapping->host;
1095 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1096 unsigned flags = 0;
1097
1098 if (iov_iter_rw(iter) == WRITE)
1099 flags |= IOMAP_WRITE;
1100
1101 while (iov_iter_count(iter)) {
1102 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1103 iter, dax_iomap_actor);
1104 if (ret <= 0)
1105 break;
1106 pos += ret;
1107 done += ret;
1108 }
1109
1110 iocb->ki_pos += done;
1111 return done ? done : ret;
1112 }
1113 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1114
1115 static int dax_fault_return(int error)
1116 {
1117 if (error == 0)
1118 return VM_FAULT_NOPAGE;
1119 if (error == -ENOMEM)
1120 return VM_FAULT_OOM;
1121 return VM_FAULT_SIGBUS;
1122 }
1123
1124 /**
1125 * dax_iomap_fault - handle a page fault on a DAX file
1126 * @vma: The virtual memory area where the fault occurred
1127 * @vmf: The description of the fault
1128 * @ops: iomap ops passed from the file system
1129 *
1130 * When a page fault occurs, filesystems may call this helper in their fault
1131 * or mkwrite handler for DAX files. Assumes the caller has done all the
1132 * necessary locking for the page fault to proceed successfully.
1133 */
1134 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1135 struct iomap_ops *ops)
1136 {
1137 struct address_space *mapping = vma->vm_file->f_mapping;
1138 struct inode *inode = mapping->host;
1139 unsigned long vaddr = vmf->address;
1140 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1141 sector_t sector;
1142 struct iomap iomap = { 0 };
1143 unsigned flags = IOMAP_FAULT;
1144 int error, major = 0;
1145 int vmf_ret = 0;
1146 void *entry;
1147
1148 /*
1149 * Check whether offset isn't beyond end of file now. Caller is supposed
1150 * to hold locks serializing us with truncate / punch hole so this is
1151 * a reliable test.
1152 */
1153 if (pos >= i_size_read(inode))
1154 return VM_FAULT_SIGBUS;
1155
1156 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1157 flags |= IOMAP_WRITE;
1158
1159 /*
1160 * Note that we don't bother to use iomap_apply here: DAX required
1161 * the file system block size to be equal the page size, which means
1162 * that we never have to deal with more than a single extent here.
1163 */
1164 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1165 if (error)
1166 return dax_fault_return(error);
1167 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1168 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1169 goto finish_iomap;
1170 }
1171
1172 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1173 if (IS_ERR(entry)) {
1174 vmf_ret = dax_fault_return(PTR_ERR(entry));
1175 goto finish_iomap;
1176 }
1177
1178 sector = dax_iomap_sector(&iomap, pos);
1179
1180 if (vmf->cow_page) {
1181 switch (iomap.type) {
1182 case IOMAP_HOLE:
1183 case IOMAP_UNWRITTEN:
1184 clear_user_highpage(vmf->cow_page, vaddr);
1185 break;
1186 case IOMAP_MAPPED:
1187 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1188 vmf->cow_page, vaddr);
1189 break;
1190 default:
1191 WARN_ON_ONCE(1);
1192 error = -EIO;
1193 break;
1194 }
1195
1196 if (error)
1197 goto error_unlock_entry;
1198
1199 __SetPageUptodate(vmf->cow_page);
1200 vmf_ret = finish_fault(vmf);
1201 if (!vmf_ret)
1202 vmf_ret = VM_FAULT_DONE_COW;
1203 goto unlock_entry;
1204 }
1205
1206 switch (iomap.type) {
1207 case IOMAP_MAPPED:
1208 if (iomap.flags & IOMAP_F_NEW) {
1209 count_vm_event(PGMAJFAULT);
1210 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1211 major = VM_FAULT_MAJOR;
1212 }
1213 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1214 PAGE_SIZE, &entry, vma, vmf);
1215 /* -EBUSY is fine, somebody else faulted on the same PTE */
1216 if (error == -EBUSY)
1217 error = 0;
1218 break;
1219 case IOMAP_UNWRITTEN:
1220 case IOMAP_HOLE:
1221 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1222 vmf_ret = dax_load_hole(mapping, &entry, vmf);
1223 goto unlock_entry;
1224 }
1225 /*FALLTHRU*/
1226 default:
1227 WARN_ON_ONCE(1);
1228 error = -EIO;
1229 break;
1230 }
1231
1232 error_unlock_entry:
1233 vmf_ret = dax_fault_return(error) | major;
1234 unlock_entry:
1235 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1236 finish_iomap:
1237 if (ops->iomap_end) {
1238 int copied = PAGE_SIZE;
1239
1240 if (vmf_ret & VM_FAULT_ERROR)
1241 copied = 0;
1242 /*
1243 * The fault is done by now and there's no way back (other
1244 * thread may be already happily using PTE we have installed).
1245 * Just ignore error from ->iomap_end since we cannot do much
1246 * with it.
1247 */
1248 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1249 }
1250 return vmf_ret;
1251 }
1252 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1253
1254 #ifdef CONFIG_FS_DAX_PMD
1255 /*
1256 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1257 * more often than one might expect in the below functions.
1258 */
1259 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1260
1261 static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
1262 struct vm_fault *vmf, unsigned long address,
1263 struct iomap *iomap, loff_t pos, bool write, void **entryp)
1264 {
1265 struct address_space *mapping = vma->vm_file->f_mapping;
1266 struct block_device *bdev = iomap->bdev;
1267 struct blk_dax_ctl dax = {
1268 .sector = dax_iomap_sector(iomap, pos),
1269 .size = PMD_SIZE,
1270 };
1271 long length = dax_map_atomic(bdev, &dax);
1272 void *ret;
1273
1274 if (length < 0) /* dax_map_atomic() failed */
1275 return VM_FAULT_FALLBACK;
1276 if (length < PMD_SIZE)
1277 goto unmap_fallback;
1278 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1279 goto unmap_fallback;
1280 if (!pfn_t_devmap(dax.pfn))
1281 goto unmap_fallback;
1282
1283 dax_unmap_atomic(bdev, &dax);
1284
1285 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1286 RADIX_DAX_PMD);
1287 if (IS_ERR(ret))
1288 return VM_FAULT_FALLBACK;
1289 *entryp = ret;
1290
1291 return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
1292
1293 unmap_fallback:
1294 dax_unmap_atomic(bdev, &dax);
1295 return VM_FAULT_FALLBACK;
1296 }
1297
1298 static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
1299 struct vm_fault *vmf, unsigned long address,
1300 struct iomap *iomap, void **entryp)
1301 {
1302 struct address_space *mapping = vma->vm_file->f_mapping;
1303 unsigned long pmd_addr = address & PMD_MASK;
1304 struct page *zero_page;
1305 spinlock_t *ptl;
1306 pmd_t pmd_entry;
1307 void *ret;
1308
1309 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1310
1311 if (unlikely(!zero_page))
1312 return VM_FAULT_FALLBACK;
1313
1314 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1315 RADIX_DAX_PMD | RADIX_DAX_HZP);
1316 if (IS_ERR(ret))
1317 return VM_FAULT_FALLBACK;
1318 *entryp = ret;
1319
1320 ptl = pmd_lock(vma->vm_mm, pmd);
1321 if (!pmd_none(*pmd)) {
1322 spin_unlock(ptl);
1323 return VM_FAULT_FALLBACK;
1324 }
1325
1326 pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
1327 pmd_entry = pmd_mkhuge(pmd_entry);
1328 set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
1329 spin_unlock(ptl);
1330 return VM_FAULT_NOPAGE;
1331 }
1332
1333 int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1334 pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
1335 {
1336 struct address_space *mapping = vma->vm_file->f_mapping;
1337 unsigned long pmd_addr = address & PMD_MASK;
1338 bool write = flags & FAULT_FLAG_WRITE;
1339 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1340 struct inode *inode = mapping->host;
1341 int result = VM_FAULT_FALLBACK;
1342 struct iomap iomap = { 0 };
1343 pgoff_t max_pgoff, pgoff;
1344 struct vm_fault vmf;
1345 void *entry;
1346 loff_t pos;
1347 int error;
1348
1349 /* Fall back to PTEs if we're going to COW */
1350 if (write && !(vma->vm_flags & VM_SHARED))
1351 goto fallback;
1352
1353 /* If the PMD would extend outside the VMA */
1354 if (pmd_addr < vma->vm_start)
1355 goto fallback;
1356 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1357 goto fallback;
1358
1359 /*
1360 * Check whether offset isn't beyond end of file now. Caller is
1361 * supposed to hold locks serializing us with truncate / punch hole so
1362 * this is a reliable test.
1363 */
1364 pgoff = linear_page_index(vma, pmd_addr);
1365 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1366
1367 if (pgoff > max_pgoff)
1368 return VM_FAULT_SIGBUS;
1369
1370 /* If the PMD would extend beyond the file size */
1371 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1372 goto fallback;
1373
1374 /*
1375 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1376 * setting up a mapping, so really we're using iomap_begin() as a way
1377 * to look up our filesystem block.
1378 */
1379 pos = (loff_t)pgoff << PAGE_SHIFT;
1380 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1381 if (error)
1382 goto fallback;
1383
1384 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1385 goto finish_iomap;
1386
1387 /*
1388 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1389 * PMD or a HZP entry. If it can't (because a 4k page is already in
1390 * the tree, for instance), it will return -EEXIST and we just fall
1391 * back to 4k entries.
1392 */
1393 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1394 if (IS_ERR(entry))
1395 goto finish_iomap;
1396
1397 vmf.pgoff = pgoff;
1398 vmf.flags = flags;
1399 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
1400
1401 switch (iomap.type) {
1402 case IOMAP_MAPPED:
1403 result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
1404 &iomap, pos, write, &entry);
1405 break;
1406 case IOMAP_UNWRITTEN:
1407 case IOMAP_HOLE:
1408 if (WARN_ON_ONCE(write))
1409 goto unlock_entry;
1410 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1411 &entry);
1412 break;
1413 default:
1414 WARN_ON_ONCE(1);
1415 break;
1416 }
1417
1418 unlock_entry:
1419 put_locked_mapping_entry(mapping, pgoff, entry);
1420 finish_iomap:
1421 if (ops->iomap_end) {
1422 int copied = PMD_SIZE;
1423
1424 if (result == VM_FAULT_FALLBACK)
1425 copied = 0;
1426 /*
1427 * The fault is done by now and there's no way back (other
1428 * thread may be already happily using PMD we have installed).
1429 * Just ignore error from ->iomap_end since we cannot do much
1430 * with it.
1431 */
1432 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1433 &iomap);
1434 }
1435 fallback:
1436 if (result == VM_FAULT_FALLBACK) {
1437 split_huge_pmd(vma, pmd, address);
1438 count_vm_event(THP_FAULT_FALLBACK);
1439 }
1440 return result;
1441 }
1442 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1443 #endif /* CONFIG_FS_DAX_PMD */