]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/dax.c
signals: Prepare to split out <linux/signal_types.h> from <linux/signal.h>
[mirror_ubuntu-artful-kernel.git] / fs / dax.c
CommitLineData
d475c634
MW
1/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
d77e92e2 20#include <linux/dax.h>
d475c634
MW
21#include <linux/fs.h>
22#include <linux/genhd.h>
f7ca90b1
MW
23#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
d475c634 26#include <linux/mutex.h>
9973c98e 27#include <linux/pagevec.h>
2765cfbb 28#include <linux/pmem.h>
289c6aed 29#include <linux/sched.h>
d475c634 30#include <linux/uio.h>
f7ca90b1 31#include <linux/vmstat.h>
34c0fd54 32#include <linux/pfn_t.h>
0e749e54 33#include <linux/sizes.h>
4b4bb46d 34#include <linux/mmu_notifier.h>
a254e568
CH
35#include <linux/iomap.h>
36#include "internal.h"
d475c634 37
282a8e03
RZ
38#define CREATE_TRACE_POINTS
39#include <trace/events/fs_dax.h>
40
ac401cc7
JK
41/* We choose 4096 entries - same as per-zone page wait tables */
42#define DAX_WAIT_TABLE_BITS 12
43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
ce95ab0f 45static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
46
47static int __init init_dax_wait_table(void)
48{
49 int i;
50
51 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
52 init_waitqueue_head(wait_table + i);
53 return 0;
54}
55fs_initcall(init_dax_wait_table);
56
b2e0d162
DW
57static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
58{
59 struct request_queue *q = bdev->bd_queue;
60 long rc = -EIO;
61
7a9eb206 62 dax->addr = ERR_PTR(-EIO);
b2e0d162
DW
63 if (blk_queue_enter(q, true) != 0)
64 return rc;
65
66 rc = bdev_direct_access(bdev, dax);
67 if (rc < 0) {
7a9eb206 68 dax->addr = ERR_PTR(rc);
b2e0d162
DW
69 blk_queue_exit(q);
70 return rc;
71 }
72 return rc;
73}
74
75static void dax_unmap_atomic(struct block_device *bdev,
76 const struct blk_dax_ctl *dax)
77{
78 if (IS_ERR(dax->addr))
79 return;
80 blk_queue_exit(bdev->bd_queue);
81}
82
642261ac 83static int dax_is_pmd_entry(void *entry)
d1a5f2b4 84{
642261ac 85 return (unsigned long)entry & RADIX_DAX_PMD;
d1a5f2b4
DW
86}
87
642261ac 88static int dax_is_pte_entry(void *entry)
d475c634 89{
642261ac 90 return !((unsigned long)entry & RADIX_DAX_PMD);
d475c634
MW
91}
92
642261ac 93static int dax_is_zero_entry(void *entry)
d475c634 94{
642261ac 95 return (unsigned long)entry & RADIX_DAX_HZP;
d475c634
MW
96}
97
642261ac 98static int dax_is_empty_entry(void *entry)
b2e0d162 99{
642261ac 100 return (unsigned long)entry & RADIX_DAX_EMPTY;
b2e0d162
DW
101}
102
d1a5f2b4 103struct page *read_dax_sector(struct block_device *bdev, sector_t n)
d475c634 104{
d1a5f2b4 105 struct page *page = alloc_pages(GFP_KERNEL, 0);
b2e0d162 106 struct blk_dax_ctl dax = {
d1a5f2b4
DW
107 .size = PAGE_SIZE,
108 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
b2e0d162 109 };
d1a5f2b4 110 long rc;
d475c634 111
d1a5f2b4
DW
112 if (!page)
113 return ERR_PTR(-ENOMEM);
d475c634 114
d1a5f2b4
DW
115 rc = dax_map_atomic(bdev, &dax);
116 if (rc < 0)
117 return ERR_PTR(rc);
118 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
b2e0d162 119 dax_unmap_atomic(bdev, &dax);
d1a5f2b4 120 return page;
d475c634 121}
f7ca90b1 122
ac401cc7
JK
123/*
124 * DAX radix tree locking
125 */
126struct exceptional_entry_key {
127 struct address_space *mapping;
63e95b5c 128 pgoff_t entry_start;
ac401cc7
JK
129};
130
131struct wait_exceptional_entry_queue {
132 wait_queue_t wait;
133 struct exceptional_entry_key key;
134};
135
63e95b5c
RZ
136static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
137 pgoff_t index, void *entry, struct exceptional_entry_key *key)
138{
139 unsigned long hash;
140
141 /*
142 * If 'entry' is a PMD, align the 'index' that we use for the wait
143 * queue to the start of that PMD. This ensures that all offsets in
144 * the range covered by the PMD map to the same bit lock.
145 */
642261ac 146 if (dax_is_pmd_entry(entry))
63e95b5c
RZ
147 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
148
149 key->mapping = mapping;
150 key->entry_start = index;
151
152 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
153 return wait_table + hash;
154}
155
ac401cc7
JK
156static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
157 int sync, void *keyp)
158{
159 struct exceptional_entry_key *key = keyp;
160 struct wait_exceptional_entry_queue *ewait =
161 container_of(wait, struct wait_exceptional_entry_queue, wait);
162
163 if (key->mapping != ewait->key.mapping ||
63e95b5c 164 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
165 return 0;
166 return autoremove_wake_function(wait, mode, sync, NULL);
167}
168
169/*
170 * Check whether the given slot is locked. The function must be called with
171 * mapping->tree_lock held
172 */
173static inline int slot_locked(struct address_space *mapping, void **slot)
174{
175 unsigned long entry = (unsigned long)
176 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
177 return entry & RADIX_DAX_ENTRY_LOCK;
178}
179
180/*
181 * Mark the given slot is locked. The function must be called with
182 * mapping->tree_lock held
183 */
184static inline void *lock_slot(struct address_space *mapping, void **slot)
185{
186 unsigned long entry = (unsigned long)
187 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
188
189 entry |= RADIX_DAX_ENTRY_LOCK;
6d75f366 190 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
ac401cc7
JK
191 return (void *)entry;
192}
193
194/*
195 * Mark the given slot is unlocked. The function must be called with
196 * mapping->tree_lock held
197 */
198static inline void *unlock_slot(struct address_space *mapping, void **slot)
199{
200 unsigned long entry = (unsigned long)
201 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
202
203 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
6d75f366 204 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
ac401cc7
JK
205 return (void *)entry;
206}
207
208/*
209 * Lookup entry in radix tree, wait for it to become unlocked if it is
210 * exceptional entry and return it. The caller must call
211 * put_unlocked_mapping_entry() when he decided not to lock the entry or
212 * put_locked_mapping_entry() when he locked the entry and now wants to
213 * unlock it.
214 *
215 * The function must be called with mapping->tree_lock held.
216 */
217static void *get_unlocked_mapping_entry(struct address_space *mapping,
218 pgoff_t index, void ***slotp)
219{
e3ad61c6 220 void *entry, **slot;
ac401cc7 221 struct wait_exceptional_entry_queue ewait;
63e95b5c 222 wait_queue_head_t *wq;
ac401cc7
JK
223
224 init_wait(&ewait.wait);
225 ewait.wait.func = wake_exceptional_entry_func;
ac401cc7
JK
226
227 for (;;) {
e3ad61c6 228 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
ac401cc7 229 &slot);
e3ad61c6 230 if (!entry || !radix_tree_exceptional_entry(entry) ||
ac401cc7
JK
231 !slot_locked(mapping, slot)) {
232 if (slotp)
233 *slotp = slot;
e3ad61c6 234 return entry;
ac401cc7 235 }
63e95b5c
RZ
236
237 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
ac401cc7
JK
238 prepare_to_wait_exclusive(wq, &ewait.wait,
239 TASK_UNINTERRUPTIBLE);
240 spin_unlock_irq(&mapping->tree_lock);
241 schedule();
242 finish_wait(wq, &ewait.wait);
243 spin_lock_irq(&mapping->tree_lock);
244 }
245}
246
b1aa812b
JK
247static void dax_unlock_mapping_entry(struct address_space *mapping,
248 pgoff_t index)
249{
250 void *entry, **slot;
251
252 spin_lock_irq(&mapping->tree_lock);
253 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
254 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
255 !slot_locked(mapping, slot))) {
256 spin_unlock_irq(&mapping->tree_lock);
257 return;
258 }
259 unlock_slot(mapping, slot);
260 spin_unlock_irq(&mapping->tree_lock);
261 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
262}
263
422476c4
RZ
264static void put_locked_mapping_entry(struct address_space *mapping,
265 pgoff_t index, void *entry)
266{
267 if (!radix_tree_exceptional_entry(entry)) {
268 unlock_page(entry);
269 put_page(entry);
270 } else {
271 dax_unlock_mapping_entry(mapping, index);
272 }
273}
274
275/*
276 * Called when we are done with radix tree entry we looked up via
277 * get_unlocked_mapping_entry() and which we didn't lock in the end.
278 */
279static void put_unlocked_mapping_entry(struct address_space *mapping,
280 pgoff_t index, void *entry)
281{
282 if (!radix_tree_exceptional_entry(entry))
283 return;
284
285 /* We have to wake up next waiter for the radix tree entry lock */
286 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
287}
288
ac401cc7
JK
289/*
290 * Find radix tree entry at given index. If it points to a page, return with
291 * the page locked. If it points to the exceptional entry, return with the
292 * radix tree entry locked. If the radix tree doesn't contain given index,
293 * create empty exceptional entry for the index and return with it locked.
294 *
642261ac
RZ
295 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
296 * either return that locked entry or will return an error. This error will
297 * happen if there are any 4k entries (either zero pages or DAX entries)
298 * within the 2MiB range that we are requesting.
299 *
300 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
301 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
302 * insertion will fail if it finds any 4k entries already in the tree, and a
303 * 4k insertion will cause an existing 2MiB entry to be unmapped and
304 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
305 * well as 2MiB empty entries.
306 *
307 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
308 * real storage backing them. We will leave these real 2MiB DAX entries in
309 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
310 *
ac401cc7
JK
311 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
312 * persistent memory the benefit is doubtful. We can add that later if we can
313 * show it helps.
314 */
642261ac
RZ
315static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
316 unsigned long size_flag)
ac401cc7 317{
642261ac 318 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
e3ad61c6 319 void *entry, **slot;
ac401cc7
JK
320
321restart:
322 spin_lock_irq(&mapping->tree_lock);
e3ad61c6 323 entry = get_unlocked_mapping_entry(mapping, index, &slot);
642261ac
RZ
324
325 if (entry) {
326 if (size_flag & RADIX_DAX_PMD) {
327 if (!radix_tree_exceptional_entry(entry) ||
328 dax_is_pte_entry(entry)) {
329 put_unlocked_mapping_entry(mapping, index,
330 entry);
331 entry = ERR_PTR(-EEXIST);
332 goto out_unlock;
333 }
334 } else { /* trying to grab a PTE entry */
335 if (radix_tree_exceptional_entry(entry) &&
336 dax_is_pmd_entry(entry) &&
337 (dax_is_zero_entry(entry) ||
338 dax_is_empty_entry(entry))) {
339 pmd_downgrade = true;
340 }
341 }
342 }
343
ac401cc7 344 /* No entry for given index? Make sure radix tree is big enough. */
642261ac 345 if (!entry || pmd_downgrade) {
ac401cc7
JK
346 int err;
347
642261ac
RZ
348 if (pmd_downgrade) {
349 /*
350 * Make sure 'entry' remains valid while we drop
351 * mapping->tree_lock.
352 */
353 entry = lock_slot(mapping, slot);
354 }
355
ac401cc7 356 spin_unlock_irq(&mapping->tree_lock);
642261ac
RZ
357 /*
358 * Besides huge zero pages the only other thing that gets
359 * downgraded are empty entries which don't need to be
360 * unmapped.
361 */
362 if (pmd_downgrade && dax_is_zero_entry(entry))
363 unmap_mapping_range(mapping,
364 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
365
ac401cc7
JK
366 err = radix_tree_preload(
367 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
0cb80b48
JK
368 if (err) {
369 if (pmd_downgrade)
370 put_locked_mapping_entry(mapping, index, entry);
ac401cc7 371 return ERR_PTR(err);
0cb80b48 372 }
ac401cc7 373 spin_lock_irq(&mapping->tree_lock);
642261ac
RZ
374
375 if (pmd_downgrade) {
376 radix_tree_delete(&mapping->page_tree, index);
377 mapping->nrexceptional--;
378 dax_wake_mapping_entry_waiter(mapping, index, entry,
379 true);
380 }
381
382 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
383
384 err = __radix_tree_insert(&mapping->page_tree, index,
385 dax_radix_order(entry), entry);
ac401cc7
JK
386 radix_tree_preload_end();
387 if (err) {
388 spin_unlock_irq(&mapping->tree_lock);
642261ac
RZ
389 /*
390 * Someone already created the entry? This is a
391 * normal failure when inserting PMDs in a range
392 * that already contains PTEs. In that case we want
393 * to return -EEXIST immediately.
394 */
395 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
ac401cc7 396 goto restart;
642261ac
RZ
397 /*
398 * Our insertion of a DAX PMD entry failed, most
399 * likely because it collided with a PTE sized entry
400 * at a different index in the PMD range. We haven't
401 * inserted anything into the radix tree and have no
402 * waiters to wake.
403 */
ac401cc7
JK
404 return ERR_PTR(err);
405 }
406 /* Good, we have inserted empty locked entry into the tree. */
407 mapping->nrexceptional++;
408 spin_unlock_irq(&mapping->tree_lock);
e3ad61c6 409 return entry;
ac401cc7
JK
410 }
411 /* Normal page in radix tree? */
e3ad61c6
RZ
412 if (!radix_tree_exceptional_entry(entry)) {
413 struct page *page = entry;
ac401cc7
JK
414
415 get_page(page);
416 spin_unlock_irq(&mapping->tree_lock);
417 lock_page(page);
418 /* Page got truncated? Retry... */
419 if (unlikely(page->mapping != mapping)) {
420 unlock_page(page);
421 put_page(page);
422 goto restart;
423 }
424 return page;
425 }
e3ad61c6 426 entry = lock_slot(mapping, slot);
642261ac 427 out_unlock:
ac401cc7 428 spin_unlock_irq(&mapping->tree_lock);
e3ad61c6 429 return entry;
ac401cc7
JK
430}
431
63e95b5c
RZ
432/*
433 * We do not necessarily hold the mapping->tree_lock when we call this
434 * function so it is possible that 'entry' is no longer a valid item in the
642261ac
RZ
435 * radix tree. This is okay because all we really need to do is to find the
436 * correct waitqueue where tasks might be waiting for that old 'entry' and
437 * wake them.
63e95b5c 438 */
ac401cc7 439void dax_wake_mapping_entry_waiter(struct address_space *mapping,
63e95b5c 440 pgoff_t index, void *entry, bool wake_all)
ac401cc7 441{
63e95b5c
RZ
442 struct exceptional_entry_key key;
443 wait_queue_head_t *wq;
444
445 wq = dax_entry_waitqueue(mapping, index, entry, &key);
ac401cc7
JK
446
447 /*
448 * Checking for locked entry and prepare_to_wait_exclusive() happens
449 * under mapping->tree_lock, ditto for entry handling in our callers.
450 * So at this point all tasks that could have seen our entry locked
451 * must be in the waitqueue and the following check will see them.
452 */
63e95b5c 453 if (waitqueue_active(wq))
ac401cc7 454 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
ac401cc7
JK
455}
456
c6dcf52c
JK
457static int __dax_invalidate_mapping_entry(struct address_space *mapping,
458 pgoff_t index, bool trunc)
459{
460 int ret = 0;
461 void *entry;
462 struct radix_tree_root *page_tree = &mapping->page_tree;
463
464 spin_lock_irq(&mapping->tree_lock);
465 entry = get_unlocked_mapping_entry(mapping, index, NULL);
466 if (!entry || !radix_tree_exceptional_entry(entry))
467 goto out;
468 if (!trunc &&
469 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
470 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
471 goto out;
472 radix_tree_delete(page_tree, index);
473 mapping->nrexceptional--;
474 ret = 1;
475out:
476 put_unlocked_mapping_entry(mapping, index, entry);
477 spin_unlock_irq(&mapping->tree_lock);
478 return ret;
479}
ac401cc7
JK
480/*
481 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
482 * entry to get unlocked before deleting it.
483 */
484int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
485{
c6dcf52c 486 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
ac401cc7 487
ac401cc7
JK
488 /*
489 * This gets called from truncate / punch_hole path. As such, the caller
490 * must hold locks protecting against concurrent modifications of the
491 * radix tree (usually fs-private i_mmap_sem for writing). Since the
492 * caller has seen exceptional entry for this index, we better find it
493 * at that index as well...
494 */
c6dcf52c
JK
495 WARN_ON_ONCE(!ret);
496 return ret;
497}
498
499/*
500 * Invalidate exceptional DAX entry if easily possible. This handles DAX
501 * entries for invalidate_inode_pages() so we evict the entry only if we can
502 * do so without blocking.
503 */
504int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
505{
506 int ret = 0;
507 void *entry, **slot;
508 struct radix_tree_root *page_tree = &mapping->page_tree;
509
510 spin_lock_irq(&mapping->tree_lock);
511 entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
512 if (!entry || !radix_tree_exceptional_entry(entry) ||
513 slot_locked(mapping, slot))
514 goto out;
515 if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
516 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
517 goto out;
518 radix_tree_delete(page_tree, index);
ac401cc7 519 mapping->nrexceptional--;
c6dcf52c
JK
520 ret = 1;
521out:
ac401cc7 522 spin_unlock_irq(&mapping->tree_lock);
c6dcf52c
JK
523 if (ret)
524 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
525 return ret;
526}
ac401cc7 527
c6dcf52c
JK
528/*
529 * Invalidate exceptional DAX entry if it is clean.
530 */
531int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
532 pgoff_t index)
533{
534 return __dax_invalidate_mapping_entry(mapping, index, false);
ac401cc7
JK
535}
536
f7ca90b1
MW
537/*
538 * The user has performed a load from a hole in the file. Allocating
539 * a new page in the file would cause excessive storage usage for
540 * workloads with sparse files. We allocate a page cache page instead.
541 * We'll kick it out of the page cache if it's ever written to,
542 * otherwise it will simply fall out of the page cache under memory
543 * pressure without ever having been dirtied.
544 */
f449b936 545static int dax_load_hole(struct address_space *mapping, void **entry,
ac401cc7 546 struct vm_fault *vmf)
f7ca90b1 547{
ac401cc7 548 struct page *page;
f449b936 549 int ret;
f7ca90b1 550
ac401cc7 551 /* Hole page already exists? Return it... */
f449b936
JK
552 if (!radix_tree_exceptional_entry(*entry)) {
553 page = *entry;
554 goto out;
ac401cc7 555 }
f7ca90b1 556
ac401cc7
JK
557 /* This will replace locked radix tree entry with a hole page */
558 page = find_or_create_page(mapping, vmf->pgoff,
559 vmf->gfp_mask | __GFP_ZERO);
b1aa812b 560 if (!page)
ac401cc7 561 return VM_FAULT_OOM;
f449b936 562 out:
f7ca90b1 563 vmf->page = page;
f449b936
JK
564 ret = finish_fault(vmf);
565 vmf->page = NULL;
566 *entry = page;
567 if (!ret) {
568 /* Grab reference for PTE that is now referencing the page */
569 get_page(page);
570 return VM_FAULT_NOPAGE;
571 }
572 return ret;
f7ca90b1
MW
573}
574
b0d5e82f
CH
575static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
576 struct page *to, unsigned long vaddr)
f7ca90b1 577{
b2e0d162 578 struct blk_dax_ctl dax = {
b0d5e82f
CH
579 .sector = sector,
580 .size = size,
b2e0d162 581 };
e2e05394
RZ
582 void *vto;
583
b2e0d162
DW
584 if (dax_map_atomic(bdev, &dax) < 0)
585 return PTR_ERR(dax.addr);
f7ca90b1 586 vto = kmap_atomic(to);
b2e0d162 587 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
f7ca90b1 588 kunmap_atomic(vto);
b2e0d162 589 dax_unmap_atomic(bdev, &dax);
f7ca90b1
MW
590 return 0;
591}
592
642261ac
RZ
593/*
594 * By this point grab_mapping_entry() has ensured that we have a locked entry
595 * of the appropriate size so we don't have to worry about downgrading PMDs to
596 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
597 * already in the tree, we will skip the insertion and just dirty the PMD as
598 * appropriate.
599 */
ac401cc7
JK
600static void *dax_insert_mapping_entry(struct address_space *mapping,
601 struct vm_fault *vmf,
642261ac
RZ
602 void *entry, sector_t sector,
603 unsigned long flags)
9973c98e
RZ
604{
605 struct radix_tree_root *page_tree = &mapping->page_tree;
ac401cc7
JK
606 int error = 0;
607 bool hole_fill = false;
608 void *new_entry;
609 pgoff_t index = vmf->pgoff;
9973c98e 610
ac401cc7 611 if (vmf->flags & FAULT_FLAG_WRITE)
d2b2a28e 612 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 613
ac401cc7
JK
614 /* Replacing hole page with block mapping? */
615 if (!radix_tree_exceptional_entry(entry)) {
616 hole_fill = true;
617 /*
618 * Unmap the page now before we remove it from page cache below.
619 * The page is locked so it cannot be faulted in again.
620 */
621 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
622 PAGE_SIZE, 0);
623 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
624 if (error)
625 return ERR_PTR(error);
642261ac
RZ
626 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
627 /* replacing huge zero page with PMD block mapping */
628 unmap_mapping_range(mapping,
629 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
9973c98e
RZ
630 }
631
ac401cc7 632 spin_lock_irq(&mapping->tree_lock);
642261ac
RZ
633 new_entry = dax_radix_locked_entry(sector, flags);
634
ac401cc7
JK
635 if (hole_fill) {
636 __delete_from_page_cache(entry, NULL);
637 /* Drop pagecache reference */
638 put_page(entry);
642261ac
RZ
639 error = __radix_tree_insert(page_tree, index,
640 dax_radix_order(new_entry), new_entry);
ac401cc7
JK
641 if (error) {
642 new_entry = ERR_PTR(error);
9973c98e
RZ
643 goto unlock;
644 }
ac401cc7 645 mapping->nrexceptional++;
642261ac
RZ
646 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
647 /*
648 * Only swap our new entry into the radix tree if the current
649 * entry is a zero page or an empty entry. If a normal PTE or
650 * PMD entry is already in the tree, we leave it alone. This
651 * means that if we are trying to insert a PTE and the
652 * existing entry is a PMD, we will just leave the PMD in the
653 * tree and dirty it if necessary.
654 */
f7942430 655 struct radix_tree_node *node;
ac401cc7
JK
656 void **slot;
657 void *ret;
9973c98e 658
f7942430 659 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
ac401cc7 660 WARN_ON_ONCE(ret != entry);
4d693d08
JW
661 __radix_tree_replace(page_tree, node, slot,
662 new_entry, NULL, NULL);
9973c98e 663 }
ac401cc7 664 if (vmf->flags & FAULT_FLAG_WRITE)
9973c98e
RZ
665 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
666 unlock:
667 spin_unlock_irq(&mapping->tree_lock);
ac401cc7
JK
668 if (hole_fill) {
669 radix_tree_preload_end();
670 /*
671 * We don't need hole page anymore, it has been replaced with
672 * locked radix tree entry now.
673 */
674 if (mapping->a_ops->freepage)
675 mapping->a_ops->freepage(entry);
676 unlock_page(entry);
677 put_page(entry);
678 }
679 return new_entry;
9973c98e
RZ
680}
681
4b4bb46d
JK
682static inline unsigned long
683pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
684{
685 unsigned long address;
686
687 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
688 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
689 return address;
690}
691
692/* Walk all mappings of a given index of a file and writeprotect them */
693static void dax_mapping_entry_mkclean(struct address_space *mapping,
694 pgoff_t index, unsigned long pfn)
695{
696 struct vm_area_struct *vma;
f729c8c9
RZ
697 pte_t pte, *ptep = NULL;
698 pmd_t *pmdp = NULL;
4b4bb46d
JK
699 spinlock_t *ptl;
700 bool changed;
701
702 i_mmap_lock_read(mapping);
703 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
704 unsigned long address;
705
706 cond_resched();
707
708 if (!(vma->vm_flags & VM_SHARED))
709 continue;
710
711 address = pgoff_address(index, vma);
712 changed = false;
f729c8c9 713 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
4b4bb46d 714 continue;
4b4bb46d 715
f729c8c9
RZ
716 if (pmdp) {
717#ifdef CONFIG_FS_DAX_PMD
718 pmd_t pmd;
719
720 if (pfn != pmd_pfn(*pmdp))
721 goto unlock_pmd;
722 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
723 goto unlock_pmd;
724
725 flush_cache_page(vma, address, pfn);
726 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
727 pmd = pmd_wrprotect(pmd);
728 pmd = pmd_mkclean(pmd);
729 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
730 changed = true;
731unlock_pmd:
732 spin_unlock(ptl);
733#endif
734 } else {
735 if (pfn != pte_pfn(*ptep))
736 goto unlock_pte;
737 if (!pte_dirty(*ptep) && !pte_write(*ptep))
738 goto unlock_pte;
739
740 flush_cache_page(vma, address, pfn);
741 pte = ptep_clear_flush(vma, address, ptep);
742 pte = pte_wrprotect(pte);
743 pte = pte_mkclean(pte);
744 set_pte_at(vma->vm_mm, address, ptep, pte);
745 changed = true;
746unlock_pte:
747 pte_unmap_unlock(ptep, ptl);
748 }
4b4bb46d
JK
749
750 if (changed)
751 mmu_notifier_invalidate_page(vma->vm_mm, address);
752 }
753 i_mmap_unlock_read(mapping);
754}
755
9973c98e
RZ
756static int dax_writeback_one(struct block_device *bdev,
757 struct address_space *mapping, pgoff_t index, void *entry)
758{
759 struct radix_tree_root *page_tree = &mapping->page_tree;
9973c98e 760 struct blk_dax_ctl dax;
a6abc2c0 761 void *entry2, **slot;
9973c98e
RZ
762 int ret = 0;
763
9973c98e 764 /*
a6abc2c0
JK
765 * A page got tagged dirty in DAX mapping? Something is seriously
766 * wrong.
9973c98e 767 */
a6abc2c0
JK
768 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
769 return -EIO;
9973c98e 770
a6abc2c0
JK
771 spin_lock_irq(&mapping->tree_lock);
772 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
773 /* Entry got punched out / reallocated? */
774 if (!entry2 || !radix_tree_exceptional_entry(entry2))
775 goto put_unlocked;
776 /*
777 * Entry got reallocated elsewhere? No need to writeback. We have to
778 * compare sectors as we must not bail out due to difference in lockbit
779 * or entry type.
780 */
781 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
782 goto put_unlocked;
642261ac
RZ
783 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
784 dax_is_zero_entry(entry))) {
9973c98e 785 ret = -EIO;
a6abc2c0 786 goto put_unlocked;
9973c98e
RZ
787 }
788
a6abc2c0
JK
789 /* Another fsync thread may have already written back this entry */
790 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
791 goto put_unlocked;
792 /* Lock the entry to serialize with page faults */
793 entry = lock_slot(mapping, slot);
794 /*
795 * We can clear the tag now but we have to be careful so that concurrent
796 * dax_writeback_one() calls for the same index cannot finish before we
797 * actually flush the caches. This is achieved as the calls will look
798 * at the entry only under tree_lock and once they do that they will
799 * see the entry locked and wait for it to unlock.
800 */
801 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
802 spin_unlock_irq(&mapping->tree_lock);
803
642261ac
RZ
804 /*
805 * Even if dax_writeback_mapping_range() was given a wbc->range_start
806 * in the middle of a PMD, the 'index' we are given will be aligned to
807 * the start index of the PMD, as will the sector we pull from
808 * 'entry'. This allows us to flush for PMD_SIZE and not have to
809 * worry about partial PMD writebacks.
810 */
811 dax.sector = dax_radix_sector(entry);
812 dax.size = PAGE_SIZE << dax_radix_order(entry);
9973c98e
RZ
813
814 /*
815 * We cannot hold tree_lock while calling dax_map_atomic() because it
816 * eventually calls cond_resched().
817 */
818 ret = dax_map_atomic(bdev, &dax);
a6abc2c0
JK
819 if (ret < 0) {
820 put_locked_mapping_entry(mapping, index, entry);
9973c98e 821 return ret;
a6abc2c0 822 }
9973c98e
RZ
823
824 if (WARN_ON_ONCE(ret < dax.size)) {
825 ret = -EIO;
826 goto unmap;
827 }
828
4b4bb46d 829 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
9973c98e 830 wb_cache_pmem(dax.addr, dax.size);
4b4bb46d
JK
831 /*
832 * After we have flushed the cache, we can clear the dirty tag. There
833 * cannot be new dirty data in the pfn after the flush has completed as
834 * the pfn mappings are writeprotected and fault waits for mapping
835 * entry lock.
836 */
837 spin_lock_irq(&mapping->tree_lock);
838 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
839 spin_unlock_irq(&mapping->tree_lock);
9973c98e
RZ
840 unmap:
841 dax_unmap_atomic(bdev, &dax);
a6abc2c0 842 put_locked_mapping_entry(mapping, index, entry);
9973c98e
RZ
843 return ret;
844
a6abc2c0
JK
845 put_unlocked:
846 put_unlocked_mapping_entry(mapping, index, entry2);
9973c98e
RZ
847 spin_unlock_irq(&mapping->tree_lock);
848 return ret;
849}
850
851/*
852 * Flush the mapping to the persistent domain within the byte range of [start,
853 * end]. This is required by data integrity operations to ensure file data is
854 * on persistent storage prior to completion of the operation.
855 */
7f6d5b52
RZ
856int dax_writeback_mapping_range(struct address_space *mapping,
857 struct block_device *bdev, struct writeback_control *wbc)
9973c98e
RZ
858{
859 struct inode *inode = mapping->host;
642261ac 860 pgoff_t start_index, end_index;
9973c98e
RZ
861 pgoff_t indices[PAGEVEC_SIZE];
862 struct pagevec pvec;
863 bool done = false;
864 int i, ret = 0;
9973c98e
RZ
865
866 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
867 return -EIO;
868
7f6d5b52
RZ
869 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
870 return 0;
871
09cbfeaf
KS
872 start_index = wbc->range_start >> PAGE_SHIFT;
873 end_index = wbc->range_end >> PAGE_SHIFT;
9973c98e
RZ
874
875 tag_pages_for_writeback(mapping, start_index, end_index);
876
877 pagevec_init(&pvec, 0);
878 while (!done) {
879 pvec.nr = find_get_entries_tag(mapping, start_index,
880 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
881 pvec.pages, indices);
882
883 if (pvec.nr == 0)
884 break;
885
886 for (i = 0; i < pvec.nr; i++) {
887 if (indices[i] > end_index) {
888 done = true;
889 break;
890 }
891
892 ret = dax_writeback_one(bdev, mapping, indices[i],
893 pvec.pages[i]);
894 if (ret < 0)
895 return ret;
896 }
897 }
9973c98e
RZ
898 return 0;
899}
900EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
901
ac401cc7 902static int dax_insert_mapping(struct address_space *mapping,
1aaba095
CH
903 struct block_device *bdev, sector_t sector, size_t size,
904 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
f7ca90b1 905{
1a29d85e 906 unsigned long vaddr = vmf->address;
b2e0d162 907 struct blk_dax_ctl dax = {
1aaba095
CH
908 .sector = sector,
909 .size = size,
b2e0d162 910 };
ac401cc7
JK
911 void *ret;
912 void *entry = *entryp;
f7ca90b1 913
4d9a2c87
JK
914 if (dax_map_atomic(bdev, &dax) < 0)
915 return PTR_ERR(dax.addr);
b2e0d162 916 dax_unmap_atomic(bdev, &dax);
f7ca90b1 917
642261ac 918 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
4d9a2c87
JK
919 if (IS_ERR(ret))
920 return PTR_ERR(ret);
ac401cc7 921 *entryp = ret;
9973c98e 922
4d9a2c87 923 return vm_insert_mixed(vma, vaddr, dax.pfn);
f7ca90b1
MW
924}
925
0e3b210c
BH
926/**
927 * dax_pfn_mkwrite - handle first write to DAX page
0e3b210c 928 * @vmf: The description of the fault
0e3b210c 929 */
11bac800 930int dax_pfn_mkwrite(struct vm_fault *vmf)
0e3b210c 931{
11bac800 932 struct file *file = vmf->vma->vm_file;
ac401cc7 933 struct address_space *mapping = file->f_mapping;
2f89dc12 934 void *entry, **slot;
ac401cc7 935 pgoff_t index = vmf->pgoff;
30f471fd 936
ac401cc7 937 spin_lock_irq(&mapping->tree_lock);
2f89dc12
JK
938 entry = get_unlocked_mapping_entry(mapping, index, &slot);
939 if (!entry || !radix_tree_exceptional_entry(entry)) {
940 if (entry)
941 put_unlocked_mapping_entry(mapping, index, entry);
942 spin_unlock_irq(&mapping->tree_lock);
943 return VM_FAULT_NOPAGE;
944 }
ac401cc7 945 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
2f89dc12 946 entry = lock_slot(mapping, slot);
ac401cc7 947 spin_unlock_irq(&mapping->tree_lock);
2f89dc12
JK
948 /*
949 * If we race with somebody updating the PTE and finish_mkwrite_fault()
950 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
951 * the fault in either case.
952 */
953 finish_mkwrite_fault(vmf);
954 put_locked_mapping_entry(mapping, index, entry);
0e3b210c
BH
955 return VM_FAULT_NOPAGE;
956}
957EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
958
4b0228fa
VV
959static bool dax_range_is_aligned(struct block_device *bdev,
960 unsigned int offset, unsigned int length)
961{
962 unsigned short sector_size = bdev_logical_block_size(bdev);
963
964 if (!IS_ALIGNED(offset, sector_size))
965 return false;
966 if (!IS_ALIGNED(length, sector_size))
967 return false;
968
969 return true;
970}
971
679c8bd3
CH
972int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
973 unsigned int offset, unsigned int length)
974{
975 struct blk_dax_ctl dax = {
976 .sector = sector,
977 .size = PAGE_SIZE,
978 };
979
4b0228fa
VV
980 if (dax_range_is_aligned(bdev, offset, length)) {
981 sector_t start_sector = dax.sector + (offset >> 9);
982
983 return blkdev_issue_zeroout(bdev, start_sector,
984 length >> 9, GFP_NOFS, true);
985 } else {
986 if (dax_map_atomic(bdev, &dax) < 0)
987 return PTR_ERR(dax.addr);
988 clear_pmem(dax.addr + offset, length);
4b0228fa
VV
989 dax_unmap_atomic(bdev, &dax);
990 }
679c8bd3
CH
991 return 0;
992}
993EXPORT_SYMBOL_GPL(__dax_zero_page_range);
994
333ccc97 995static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
25726bc1 996{
333ccc97 997 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
25726bc1 998}
a254e568 999
a254e568 1000static loff_t
11c59c92 1001dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
a254e568
CH
1002 struct iomap *iomap)
1003{
1004 struct iov_iter *iter = data;
1005 loff_t end = pos + length, done = 0;
1006 ssize_t ret = 0;
1007
1008 if (iov_iter_rw(iter) == READ) {
1009 end = min(end, i_size_read(inode));
1010 if (pos >= end)
1011 return 0;
1012
1013 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1014 return iov_iter_zero(min(length, end - pos), iter);
1015 }
1016
1017 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1018 return -EIO;
1019
e3fce68c
JK
1020 /*
1021 * Write can allocate block for an area which has a hole page mapped
1022 * into page tables. We have to tear down these mappings so that data
1023 * written by write(2) is visible in mmap.
1024 */
1025 if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1026 invalidate_inode_pages2_range(inode->i_mapping,
1027 pos >> PAGE_SHIFT,
1028 (end - 1) >> PAGE_SHIFT);
1029 }
1030
a254e568
CH
1031 while (pos < end) {
1032 unsigned offset = pos & (PAGE_SIZE - 1);
1033 struct blk_dax_ctl dax = { 0 };
1034 ssize_t map_len;
1035
d1908f52
MH
1036 if (fatal_signal_pending(current)) {
1037 ret = -EINTR;
1038 break;
1039 }
1040
333ccc97 1041 dax.sector = dax_iomap_sector(iomap, pos);
a254e568
CH
1042 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1043 map_len = dax_map_atomic(iomap->bdev, &dax);
1044 if (map_len < 0) {
1045 ret = map_len;
1046 break;
1047 }
1048
1049 dax.addr += offset;
1050 map_len -= offset;
1051 if (map_len > end - pos)
1052 map_len = end - pos;
1053
1054 if (iov_iter_rw(iter) == WRITE)
1055 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1056 else
1057 map_len = copy_to_iter(dax.addr, map_len, iter);
1058 dax_unmap_atomic(iomap->bdev, &dax);
1059 if (map_len <= 0) {
1060 ret = map_len ? map_len : -EFAULT;
1061 break;
1062 }
1063
1064 pos += map_len;
1065 length -= map_len;
1066 done += map_len;
1067 }
1068
1069 return done ? done : ret;
1070}
1071
1072/**
11c59c92 1073 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
1074 * @iocb: The control block for this I/O
1075 * @iter: The addresses to do I/O from or to
1076 * @ops: iomap ops passed from the file system
1077 *
1078 * This function performs read and write operations to directly mapped
1079 * persistent memory. The callers needs to take care of read/write exclusion
1080 * and evicting any page cache pages in the region under I/O.
1081 */
1082ssize_t
11c59c92 1083dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 1084 const struct iomap_ops *ops)
a254e568
CH
1085{
1086 struct address_space *mapping = iocb->ki_filp->f_mapping;
1087 struct inode *inode = mapping->host;
1088 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1089 unsigned flags = 0;
1090
168316db
CH
1091 if (iov_iter_rw(iter) == WRITE) {
1092 lockdep_assert_held_exclusive(&inode->i_rwsem);
a254e568 1093 flags |= IOMAP_WRITE;
168316db
CH
1094 } else {
1095 lockdep_assert_held(&inode->i_rwsem);
1096 }
a254e568 1097
a254e568
CH
1098 while (iov_iter_count(iter)) {
1099 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
11c59c92 1100 iter, dax_iomap_actor);
a254e568
CH
1101 if (ret <= 0)
1102 break;
1103 pos += ret;
1104 done += ret;
1105 }
1106
1107 iocb->ki_pos += done;
1108 return done ? done : ret;
1109}
11c59c92 1110EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6 1111
9f141d6e
JK
1112static int dax_fault_return(int error)
1113{
1114 if (error == 0)
1115 return VM_FAULT_NOPAGE;
1116 if (error == -ENOMEM)
1117 return VM_FAULT_OOM;
1118 return VM_FAULT_SIGBUS;
1119}
1120
a2d58167
DJ
1121static int dax_iomap_pte_fault(struct vm_fault *vmf,
1122 const struct iomap_ops *ops)
a7d73fe6 1123{
11bac800 1124 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
a7d73fe6 1125 struct inode *inode = mapping->host;
1a29d85e 1126 unsigned long vaddr = vmf->address;
a7d73fe6
CH
1127 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1128 sector_t sector;
1129 struct iomap iomap = { 0 };
9484ab1b 1130 unsigned flags = IOMAP_FAULT;
a7d73fe6 1131 int error, major = 0;
b1aa812b 1132 int vmf_ret = 0;
a7d73fe6
CH
1133 void *entry;
1134
1135 /*
1136 * Check whether offset isn't beyond end of file now. Caller is supposed
1137 * to hold locks serializing us with truncate / punch hole so this is
1138 * a reliable test.
1139 */
1140 if (pos >= i_size_read(inode))
1141 return VM_FAULT_SIGBUS;
1142
a7d73fe6
CH
1143 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1144 flags |= IOMAP_WRITE;
1145
1146 /*
1147 * Note that we don't bother to use iomap_apply here: DAX required
1148 * the file system block size to be equal the page size, which means
1149 * that we never have to deal with more than a single extent here.
1150 */
1151 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1152 if (error)
9f141d6e 1153 return dax_fault_return(error);
a7d73fe6 1154 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
9f141d6e
JK
1155 vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
1156 goto finish_iomap;
1157 }
1158
1159 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1160 if (IS_ERR(entry)) {
1161 vmf_ret = dax_fault_return(PTR_ERR(entry));
1550290b 1162 goto finish_iomap;
a7d73fe6
CH
1163 }
1164
333ccc97 1165 sector = dax_iomap_sector(&iomap, pos);
a7d73fe6
CH
1166
1167 if (vmf->cow_page) {
1168 switch (iomap.type) {
1169 case IOMAP_HOLE:
1170 case IOMAP_UNWRITTEN:
1171 clear_user_highpage(vmf->cow_page, vaddr);
1172 break;
1173 case IOMAP_MAPPED:
1174 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1175 vmf->cow_page, vaddr);
1176 break;
1177 default:
1178 WARN_ON_ONCE(1);
1179 error = -EIO;
1180 break;
1181 }
1182
1183 if (error)
9f141d6e 1184 goto error_unlock_entry;
b1aa812b
JK
1185
1186 __SetPageUptodate(vmf->cow_page);
1187 vmf_ret = finish_fault(vmf);
1188 if (!vmf_ret)
1189 vmf_ret = VM_FAULT_DONE_COW;
9f141d6e 1190 goto unlock_entry;
a7d73fe6
CH
1191 }
1192
1193 switch (iomap.type) {
1194 case IOMAP_MAPPED:
1195 if (iomap.flags & IOMAP_F_NEW) {
1196 count_vm_event(PGMAJFAULT);
11bac800 1197 mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
a7d73fe6
CH
1198 major = VM_FAULT_MAJOR;
1199 }
1200 error = dax_insert_mapping(mapping, iomap.bdev, sector,
11bac800 1201 PAGE_SIZE, &entry, vmf->vma, vmf);
9f141d6e
JK
1202 /* -EBUSY is fine, somebody else faulted on the same PTE */
1203 if (error == -EBUSY)
1204 error = 0;
a7d73fe6
CH
1205 break;
1206 case IOMAP_UNWRITTEN:
1207 case IOMAP_HOLE:
1550290b 1208 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
f449b936 1209 vmf_ret = dax_load_hole(mapping, &entry, vmf);
9f141d6e 1210 goto unlock_entry;
1550290b 1211 }
a7d73fe6
CH
1212 /*FALLTHRU*/
1213 default:
1214 WARN_ON_ONCE(1);
1215 error = -EIO;
1216 break;
1217 }
1218
9f141d6e
JK
1219 error_unlock_entry:
1220 vmf_ret = dax_fault_return(error) | major;
a7d73fe6 1221 unlock_entry:
f449b936 1222 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
9f141d6e
JK
1223 finish_iomap:
1224 if (ops->iomap_end) {
1225 int copied = PAGE_SIZE;
1226
1227 if (vmf_ret & VM_FAULT_ERROR)
1228 copied = 0;
1229 /*
1230 * The fault is done by now and there's no way back (other
1231 * thread may be already happily using PTE we have installed).
1232 * Just ignore error from ->iomap_end since we cannot do much
1233 * with it.
1234 */
1235 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1550290b 1236 }
9f141d6e 1237 return vmf_ret;
a7d73fe6 1238}
642261ac
RZ
1239
1240#ifdef CONFIG_FS_DAX_PMD
1241/*
1242 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1243 * more often than one might expect in the below functions.
1244 */
1245#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1246
f4200391
DJ
1247static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1248 loff_t pos, void **entryp)
642261ac 1249{
f4200391 1250 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
642261ac 1251 struct block_device *bdev = iomap->bdev;
27a7ffac 1252 struct inode *inode = mapping->host;
642261ac
RZ
1253 struct blk_dax_ctl dax = {
1254 .sector = dax_iomap_sector(iomap, pos),
1255 .size = PMD_SIZE,
1256 };
1257 long length = dax_map_atomic(bdev, &dax);
27a7ffac 1258 void *ret = NULL;
642261ac
RZ
1259
1260 if (length < 0) /* dax_map_atomic() failed */
27a7ffac 1261 goto fallback;
642261ac
RZ
1262 if (length < PMD_SIZE)
1263 goto unmap_fallback;
1264 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1265 goto unmap_fallback;
1266 if (!pfn_t_devmap(dax.pfn))
1267 goto unmap_fallback;
1268
1269 dax_unmap_atomic(bdev, &dax);
1270
1271 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1272 RADIX_DAX_PMD);
1273 if (IS_ERR(ret))
27a7ffac 1274 goto fallback;
642261ac
RZ
1275 *entryp = ret;
1276
f4200391
DJ
1277 trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
1278 return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1279 dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
642261ac
RZ
1280
1281 unmap_fallback:
1282 dax_unmap_atomic(bdev, &dax);
27a7ffac 1283fallback:
f4200391
DJ
1284 trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
1285 dax.pfn, ret);
642261ac
RZ
1286 return VM_FAULT_FALLBACK;
1287}
1288
f4200391
DJ
1289static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1290 void **entryp)
642261ac 1291{
f4200391
DJ
1292 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1293 unsigned long pmd_addr = vmf->address & PMD_MASK;
653b2ea3 1294 struct inode *inode = mapping->host;
642261ac 1295 struct page *zero_page;
653b2ea3 1296 void *ret = NULL;
642261ac
RZ
1297 spinlock_t *ptl;
1298 pmd_t pmd_entry;
642261ac 1299
f4200391 1300 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
642261ac
RZ
1301
1302 if (unlikely(!zero_page))
653b2ea3 1303 goto fallback;
642261ac
RZ
1304
1305 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1306 RADIX_DAX_PMD | RADIX_DAX_HZP);
1307 if (IS_ERR(ret))
653b2ea3 1308 goto fallback;
642261ac
RZ
1309 *entryp = ret;
1310
f4200391
DJ
1311 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1312 if (!pmd_none(*(vmf->pmd))) {
642261ac 1313 spin_unlock(ptl);
653b2ea3 1314 goto fallback;
642261ac
RZ
1315 }
1316
f4200391 1317 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
642261ac 1318 pmd_entry = pmd_mkhuge(pmd_entry);
f4200391 1319 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
642261ac 1320 spin_unlock(ptl);
f4200391 1321 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
642261ac 1322 return VM_FAULT_NOPAGE;
653b2ea3
RZ
1323
1324fallback:
f4200391 1325 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
653b2ea3 1326 return VM_FAULT_FALLBACK;
642261ac
RZ
1327}
1328
a2d58167
DJ
1329static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1330 const struct iomap_ops *ops)
642261ac 1331{
f4200391 1332 struct vm_area_struct *vma = vmf->vma;
642261ac 1333 struct address_space *mapping = vma->vm_file->f_mapping;
d8a849e1
DJ
1334 unsigned long pmd_addr = vmf->address & PMD_MASK;
1335 bool write = vmf->flags & FAULT_FLAG_WRITE;
9484ab1b 1336 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
642261ac
RZ
1337 struct inode *inode = mapping->host;
1338 int result = VM_FAULT_FALLBACK;
1339 struct iomap iomap = { 0 };
1340 pgoff_t max_pgoff, pgoff;
642261ac
RZ
1341 void *entry;
1342 loff_t pos;
1343 int error;
1344
282a8e03
RZ
1345 /*
1346 * Check whether offset isn't beyond end of file now. Caller is
1347 * supposed to hold locks serializing us with truncate / punch hole so
1348 * this is a reliable test.
1349 */
1350 pgoff = linear_page_index(vma, pmd_addr);
1351 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1352
f4200391 1353 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
282a8e03 1354
642261ac
RZ
1355 /* Fall back to PTEs if we're going to COW */
1356 if (write && !(vma->vm_flags & VM_SHARED))
1357 goto fallback;
1358
1359 /* If the PMD would extend outside the VMA */
1360 if (pmd_addr < vma->vm_start)
1361 goto fallback;
1362 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1363 goto fallback;
1364
282a8e03
RZ
1365 if (pgoff > max_pgoff) {
1366 result = VM_FAULT_SIGBUS;
1367 goto out;
1368 }
642261ac
RZ
1369
1370 /* If the PMD would extend beyond the file size */
1371 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1372 goto fallback;
1373
642261ac
RZ
1374 /*
1375 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1376 * setting up a mapping, so really we're using iomap_begin() as a way
1377 * to look up our filesystem block.
1378 */
1379 pos = (loff_t)pgoff << PAGE_SHIFT;
1380 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1381 if (error)
9f141d6e
JK
1382 goto fallback;
1383
642261ac
RZ
1384 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1385 goto finish_iomap;
1386
9f141d6e
JK
1387 /*
1388 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1389 * PMD or a HZP entry. If it can't (because a 4k page is already in
1390 * the tree, for instance), it will return -EEXIST and we just fall
1391 * back to 4k entries.
1392 */
1393 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1394 if (IS_ERR(entry))
1395 goto finish_iomap;
1396
642261ac
RZ
1397 switch (iomap.type) {
1398 case IOMAP_MAPPED:
f4200391 1399 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
642261ac
RZ
1400 break;
1401 case IOMAP_UNWRITTEN:
1402 case IOMAP_HOLE:
1403 if (WARN_ON_ONCE(write))
9f141d6e 1404 goto unlock_entry;
f4200391 1405 result = dax_pmd_load_hole(vmf, &iomap, &entry);
642261ac
RZ
1406 break;
1407 default:
1408 WARN_ON_ONCE(1);
1409 break;
1410 }
1411
9f141d6e
JK
1412 unlock_entry:
1413 put_locked_mapping_entry(mapping, pgoff, entry);
642261ac
RZ
1414 finish_iomap:
1415 if (ops->iomap_end) {
9f141d6e
JK
1416 int copied = PMD_SIZE;
1417
1418 if (result == VM_FAULT_FALLBACK)
1419 copied = 0;
1420 /*
1421 * The fault is done by now and there's no way back (other
1422 * thread may be already happily using PMD we have installed).
1423 * Just ignore error from ->iomap_end since we cannot do much
1424 * with it.
1425 */
1426 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1427 &iomap);
642261ac 1428 }
642261ac
RZ
1429 fallback:
1430 if (result == VM_FAULT_FALLBACK) {
d8a849e1 1431 split_huge_pmd(vma, vmf->pmd, vmf->address);
642261ac
RZ
1432 count_vm_event(THP_FAULT_FALLBACK);
1433 }
282a8e03 1434out:
f4200391 1435 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
642261ac
RZ
1436 return result;
1437}
a2d58167 1438#else
01cddfe9
AB
1439static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1440 const struct iomap_ops *ops)
a2d58167
DJ
1441{
1442 return VM_FAULT_FALLBACK;
1443}
642261ac 1444#endif /* CONFIG_FS_DAX_PMD */
a2d58167
DJ
1445
1446/**
1447 * dax_iomap_fault - handle a page fault on a DAX file
1448 * @vmf: The description of the fault
1449 * @ops: iomap ops passed from the file system
1450 *
1451 * When a page fault occurs, filesystems may call this helper in
1452 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1453 * has done all the necessary locking for page fault to proceed
1454 * successfully.
1455 */
c791ace1
DJ
1456int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1457 const struct iomap_ops *ops)
a2d58167 1458{
c791ace1
DJ
1459 switch (pe_size) {
1460 case PE_SIZE_PTE:
a2d58167 1461 return dax_iomap_pte_fault(vmf, ops);
c791ace1 1462 case PE_SIZE_PMD:
a2d58167
DJ
1463 return dax_iomap_pmd_fault(vmf, ops);
1464 default:
1465 return VM_FAULT_FALLBACK;
1466 }
1467}
1468EXPORT_SYMBOL_GPL(dax_iomap_fault);