]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/dax.c
ext2: Return BH_New buffers for zeroed blocks
[mirror_ubuntu-artful-kernel.git] / fs / dax.c
CommitLineData
d475c634
MW
1/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
d77e92e2 20#include <linux/dax.h>
d475c634
MW
21#include <linux/fs.h>
22#include <linux/genhd.h>
f7ca90b1
MW
23#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
d475c634 26#include <linux/mutex.h>
9973c98e 27#include <linux/pagevec.h>
2765cfbb 28#include <linux/pmem.h>
289c6aed 29#include <linux/sched.h>
d475c634 30#include <linux/uio.h>
f7ca90b1 31#include <linux/vmstat.h>
34c0fd54 32#include <linux/pfn_t.h>
0e749e54 33#include <linux/sizes.h>
4b4bb46d 34#include <linux/mmu_notifier.h>
a254e568
CH
35#include <linux/iomap.h>
36#include "internal.h"
d475c634 37
ac401cc7
JK
38/* We choose 4096 entries - same as per-zone page wait tables */
39#define DAX_WAIT_TABLE_BITS 12
40#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
41
ce95ab0f 42static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
43
44static int __init init_dax_wait_table(void)
45{
46 int i;
47
48 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
49 init_waitqueue_head(wait_table + i);
50 return 0;
51}
52fs_initcall(init_dax_wait_table);
53
b2e0d162
DW
54static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
55{
56 struct request_queue *q = bdev->bd_queue;
57 long rc = -EIO;
58
7a9eb206 59 dax->addr = ERR_PTR(-EIO);
b2e0d162
DW
60 if (blk_queue_enter(q, true) != 0)
61 return rc;
62
63 rc = bdev_direct_access(bdev, dax);
64 if (rc < 0) {
7a9eb206 65 dax->addr = ERR_PTR(rc);
b2e0d162
DW
66 blk_queue_exit(q);
67 return rc;
68 }
69 return rc;
70}
71
72static void dax_unmap_atomic(struct block_device *bdev,
73 const struct blk_dax_ctl *dax)
74{
75 if (IS_ERR(dax->addr))
76 return;
77 blk_queue_exit(bdev->bd_queue);
78}
79
642261ac 80static int dax_is_pmd_entry(void *entry)
d1a5f2b4 81{
642261ac 82 return (unsigned long)entry & RADIX_DAX_PMD;
d1a5f2b4
DW
83}
84
642261ac 85static int dax_is_pte_entry(void *entry)
d475c634 86{
642261ac 87 return !((unsigned long)entry & RADIX_DAX_PMD);
d475c634
MW
88}
89
642261ac 90static int dax_is_zero_entry(void *entry)
d475c634 91{
642261ac 92 return (unsigned long)entry & RADIX_DAX_HZP;
d475c634
MW
93}
94
642261ac 95static int dax_is_empty_entry(void *entry)
b2e0d162 96{
642261ac 97 return (unsigned long)entry & RADIX_DAX_EMPTY;
b2e0d162
DW
98}
99
d1a5f2b4 100struct page *read_dax_sector(struct block_device *bdev, sector_t n)
d475c634 101{
d1a5f2b4 102 struct page *page = alloc_pages(GFP_KERNEL, 0);
b2e0d162 103 struct blk_dax_ctl dax = {
d1a5f2b4
DW
104 .size = PAGE_SIZE,
105 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
b2e0d162 106 };
d1a5f2b4 107 long rc;
d475c634 108
d1a5f2b4
DW
109 if (!page)
110 return ERR_PTR(-ENOMEM);
d475c634 111
d1a5f2b4
DW
112 rc = dax_map_atomic(bdev, &dax);
113 if (rc < 0)
114 return ERR_PTR(rc);
115 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
b2e0d162 116 dax_unmap_atomic(bdev, &dax);
d1a5f2b4 117 return page;
d475c634 118}
f7ca90b1 119
ac401cc7
JK
120/*
121 * DAX radix tree locking
122 */
123struct exceptional_entry_key {
124 struct address_space *mapping;
63e95b5c 125 pgoff_t entry_start;
ac401cc7
JK
126};
127
128struct wait_exceptional_entry_queue {
129 wait_queue_t wait;
130 struct exceptional_entry_key key;
131};
132
63e95b5c
RZ
133static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
134 pgoff_t index, void *entry, struct exceptional_entry_key *key)
135{
136 unsigned long hash;
137
138 /*
139 * If 'entry' is a PMD, align the 'index' that we use for the wait
140 * queue to the start of that PMD. This ensures that all offsets in
141 * the range covered by the PMD map to the same bit lock.
142 */
642261ac 143 if (dax_is_pmd_entry(entry))
63e95b5c
RZ
144 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
145
146 key->mapping = mapping;
147 key->entry_start = index;
148
149 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
150 return wait_table + hash;
151}
152
ac401cc7
JK
153static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
154 int sync, void *keyp)
155{
156 struct exceptional_entry_key *key = keyp;
157 struct wait_exceptional_entry_queue *ewait =
158 container_of(wait, struct wait_exceptional_entry_queue, wait);
159
160 if (key->mapping != ewait->key.mapping ||
63e95b5c 161 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
162 return 0;
163 return autoremove_wake_function(wait, mode, sync, NULL);
164}
165
166/*
167 * Check whether the given slot is locked. The function must be called with
168 * mapping->tree_lock held
169 */
170static inline int slot_locked(struct address_space *mapping, void **slot)
171{
172 unsigned long entry = (unsigned long)
173 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
174 return entry & RADIX_DAX_ENTRY_LOCK;
175}
176
177/*
178 * Mark the given slot is locked. The function must be called with
179 * mapping->tree_lock held
180 */
181static inline void *lock_slot(struct address_space *mapping, void **slot)
182{
183 unsigned long entry = (unsigned long)
184 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
185
186 entry |= RADIX_DAX_ENTRY_LOCK;
6d75f366 187 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
ac401cc7
JK
188 return (void *)entry;
189}
190
191/*
192 * Mark the given slot is unlocked. The function must be called with
193 * mapping->tree_lock held
194 */
195static inline void *unlock_slot(struct address_space *mapping, void **slot)
196{
197 unsigned long entry = (unsigned long)
198 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
199
200 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
6d75f366 201 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
ac401cc7
JK
202 return (void *)entry;
203}
204
205/*
206 * Lookup entry in radix tree, wait for it to become unlocked if it is
207 * exceptional entry and return it. The caller must call
208 * put_unlocked_mapping_entry() when he decided not to lock the entry or
209 * put_locked_mapping_entry() when he locked the entry and now wants to
210 * unlock it.
211 *
212 * The function must be called with mapping->tree_lock held.
213 */
214static void *get_unlocked_mapping_entry(struct address_space *mapping,
215 pgoff_t index, void ***slotp)
216{
e3ad61c6 217 void *entry, **slot;
ac401cc7 218 struct wait_exceptional_entry_queue ewait;
63e95b5c 219 wait_queue_head_t *wq;
ac401cc7
JK
220
221 init_wait(&ewait.wait);
222 ewait.wait.func = wake_exceptional_entry_func;
ac401cc7
JK
223
224 for (;;) {
e3ad61c6 225 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
ac401cc7 226 &slot);
e3ad61c6 227 if (!entry || !radix_tree_exceptional_entry(entry) ||
ac401cc7
JK
228 !slot_locked(mapping, slot)) {
229 if (slotp)
230 *slotp = slot;
e3ad61c6 231 return entry;
ac401cc7 232 }
63e95b5c
RZ
233
234 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
ac401cc7
JK
235 prepare_to_wait_exclusive(wq, &ewait.wait,
236 TASK_UNINTERRUPTIBLE);
237 spin_unlock_irq(&mapping->tree_lock);
238 schedule();
239 finish_wait(wq, &ewait.wait);
240 spin_lock_irq(&mapping->tree_lock);
241 }
242}
243
b1aa812b
JK
244static void dax_unlock_mapping_entry(struct address_space *mapping,
245 pgoff_t index)
246{
247 void *entry, **slot;
248
249 spin_lock_irq(&mapping->tree_lock);
250 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
251 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
252 !slot_locked(mapping, slot))) {
253 spin_unlock_irq(&mapping->tree_lock);
254 return;
255 }
256 unlock_slot(mapping, slot);
257 spin_unlock_irq(&mapping->tree_lock);
258 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
259}
260
422476c4
RZ
261static void put_locked_mapping_entry(struct address_space *mapping,
262 pgoff_t index, void *entry)
263{
264 if (!radix_tree_exceptional_entry(entry)) {
265 unlock_page(entry);
266 put_page(entry);
267 } else {
268 dax_unlock_mapping_entry(mapping, index);
269 }
270}
271
272/*
273 * Called when we are done with radix tree entry we looked up via
274 * get_unlocked_mapping_entry() and which we didn't lock in the end.
275 */
276static void put_unlocked_mapping_entry(struct address_space *mapping,
277 pgoff_t index, void *entry)
278{
279 if (!radix_tree_exceptional_entry(entry))
280 return;
281
282 /* We have to wake up next waiter for the radix tree entry lock */
283 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
284}
285
ac401cc7
JK
286/*
287 * Find radix tree entry at given index. If it points to a page, return with
288 * the page locked. If it points to the exceptional entry, return with the
289 * radix tree entry locked. If the radix tree doesn't contain given index,
290 * create empty exceptional entry for the index and return with it locked.
291 *
642261ac
RZ
292 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
293 * either return that locked entry or will return an error. This error will
294 * happen if there are any 4k entries (either zero pages or DAX entries)
295 * within the 2MiB range that we are requesting.
296 *
297 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
298 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
299 * insertion will fail if it finds any 4k entries already in the tree, and a
300 * 4k insertion will cause an existing 2MiB entry to be unmapped and
301 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
302 * well as 2MiB empty entries.
303 *
304 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
305 * real storage backing them. We will leave these real 2MiB DAX entries in
306 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
307 *
ac401cc7
JK
308 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
309 * persistent memory the benefit is doubtful. We can add that later if we can
310 * show it helps.
311 */
642261ac
RZ
312static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
313 unsigned long size_flag)
ac401cc7 314{
642261ac 315 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
e3ad61c6 316 void *entry, **slot;
ac401cc7
JK
317
318restart:
319 spin_lock_irq(&mapping->tree_lock);
e3ad61c6 320 entry = get_unlocked_mapping_entry(mapping, index, &slot);
642261ac
RZ
321
322 if (entry) {
323 if (size_flag & RADIX_DAX_PMD) {
324 if (!radix_tree_exceptional_entry(entry) ||
325 dax_is_pte_entry(entry)) {
326 put_unlocked_mapping_entry(mapping, index,
327 entry);
328 entry = ERR_PTR(-EEXIST);
329 goto out_unlock;
330 }
331 } else { /* trying to grab a PTE entry */
332 if (radix_tree_exceptional_entry(entry) &&
333 dax_is_pmd_entry(entry) &&
334 (dax_is_zero_entry(entry) ||
335 dax_is_empty_entry(entry))) {
336 pmd_downgrade = true;
337 }
338 }
339 }
340
ac401cc7 341 /* No entry for given index? Make sure radix tree is big enough. */
642261ac 342 if (!entry || pmd_downgrade) {
ac401cc7
JK
343 int err;
344
642261ac
RZ
345 if (pmd_downgrade) {
346 /*
347 * Make sure 'entry' remains valid while we drop
348 * mapping->tree_lock.
349 */
350 entry = lock_slot(mapping, slot);
351 }
352
ac401cc7 353 spin_unlock_irq(&mapping->tree_lock);
642261ac
RZ
354 /*
355 * Besides huge zero pages the only other thing that gets
356 * downgraded are empty entries which don't need to be
357 * unmapped.
358 */
359 if (pmd_downgrade && dax_is_zero_entry(entry))
360 unmap_mapping_range(mapping,
361 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
362
ac401cc7
JK
363 err = radix_tree_preload(
364 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
0cb80b48
JK
365 if (err) {
366 if (pmd_downgrade)
367 put_locked_mapping_entry(mapping, index, entry);
ac401cc7 368 return ERR_PTR(err);
0cb80b48 369 }
ac401cc7 370 spin_lock_irq(&mapping->tree_lock);
642261ac
RZ
371
372 if (pmd_downgrade) {
373 radix_tree_delete(&mapping->page_tree, index);
374 mapping->nrexceptional--;
375 dax_wake_mapping_entry_waiter(mapping, index, entry,
376 true);
377 }
378
379 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
380
381 err = __radix_tree_insert(&mapping->page_tree, index,
382 dax_radix_order(entry), entry);
ac401cc7
JK
383 radix_tree_preload_end();
384 if (err) {
385 spin_unlock_irq(&mapping->tree_lock);
642261ac
RZ
386 /*
387 * Someone already created the entry? This is a
388 * normal failure when inserting PMDs in a range
389 * that already contains PTEs. In that case we want
390 * to return -EEXIST immediately.
391 */
392 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
ac401cc7 393 goto restart;
642261ac
RZ
394 /*
395 * Our insertion of a DAX PMD entry failed, most
396 * likely because it collided with a PTE sized entry
397 * at a different index in the PMD range. We haven't
398 * inserted anything into the radix tree and have no
399 * waiters to wake.
400 */
ac401cc7
JK
401 return ERR_PTR(err);
402 }
403 /* Good, we have inserted empty locked entry into the tree. */
404 mapping->nrexceptional++;
405 spin_unlock_irq(&mapping->tree_lock);
e3ad61c6 406 return entry;
ac401cc7
JK
407 }
408 /* Normal page in radix tree? */
e3ad61c6
RZ
409 if (!radix_tree_exceptional_entry(entry)) {
410 struct page *page = entry;
ac401cc7
JK
411
412 get_page(page);
413 spin_unlock_irq(&mapping->tree_lock);
414 lock_page(page);
415 /* Page got truncated? Retry... */
416 if (unlikely(page->mapping != mapping)) {
417 unlock_page(page);
418 put_page(page);
419 goto restart;
420 }
421 return page;
422 }
e3ad61c6 423 entry = lock_slot(mapping, slot);
642261ac 424 out_unlock:
ac401cc7 425 spin_unlock_irq(&mapping->tree_lock);
e3ad61c6 426 return entry;
ac401cc7
JK
427}
428
63e95b5c
RZ
429/*
430 * We do not necessarily hold the mapping->tree_lock when we call this
431 * function so it is possible that 'entry' is no longer a valid item in the
642261ac
RZ
432 * radix tree. This is okay because all we really need to do is to find the
433 * correct waitqueue where tasks might be waiting for that old 'entry' and
434 * wake them.
63e95b5c 435 */
ac401cc7 436void dax_wake_mapping_entry_waiter(struct address_space *mapping,
63e95b5c 437 pgoff_t index, void *entry, bool wake_all)
ac401cc7 438{
63e95b5c
RZ
439 struct exceptional_entry_key key;
440 wait_queue_head_t *wq;
441
442 wq = dax_entry_waitqueue(mapping, index, entry, &key);
ac401cc7
JK
443
444 /*
445 * Checking for locked entry and prepare_to_wait_exclusive() happens
446 * under mapping->tree_lock, ditto for entry handling in our callers.
447 * So at this point all tasks that could have seen our entry locked
448 * must be in the waitqueue and the following check will see them.
449 */
63e95b5c 450 if (waitqueue_active(wq))
ac401cc7 451 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
ac401cc7
JK
452}
453
ac401cc7
JK
454/*
455 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
456 * entry to get unlocked before deleting it.
457 */
458int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
459{
460 void *entry;
461
462 spin_lock_irq(&mapping->tree_lock);
463 entry = get_unlocked_mapping_entry(mapping, index, NULL);
464 /*
465 * This gets called from truncate / punch_hole path. As such, the caller
466 * must hold locks protecting against concurrent modifications of the
467 * radix tree (usually fs-private i_mmap_sem for writing). Since the
468 * caller has seen exceptional entry for this index, we better find it
469 * at that index as well...
470 */
471 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
472 spin_unlock_irq(&mapping->tree_lock);
473 return 0;
474 }
475 radix_tree_delete(&mapping->page_tree, index);
476 mapping->nrexceptional--;
477 spin_unlock_irq(&mapping->tree_lock);
63e95b5c 478 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
ac401cc7
JK
479
480 return 1;
481}
482
f7ca90b1
MW
483/*
484 * The user has performed a load from a hole in the file. Allocating
485 * a new page in the file would cause excessive storage usage for
486 * workloads with sparse files. We allocate a page cache page instead.
487 * We'll kick it out of the page cache if it's ever written to,
488 * otherwise it will simply fall out of the page cache under memory
489 * pressure without ever having been dirtied.
490 */
ac401cc7
JK
491static int dax_load_hole(struct address_space *mapping, void *entry,
492 struct vm_fault *vmf)
f7ca90b1 493{
ac401cc7 494 struct page *page;
f7ca90b1 495
ac401cc7
JK
496 /* Hole page already exists? Return it... */
497 if (!radix_tree_exceptional_entry(entry)) {
498 vmf->page = entry;
499 return VM_FAULT_LOCKED;
500 }
f7ca90b1 501
ac401cc7
JK
502 /* This will replace locked radix tree entry with a hole page */
503 page = find_or_create_page(mapping, vmf->pgoff,
504 vmf->gfp_mask | __GFP_ZERO);
b1aa812b 505 if (!page)
ac401cc7 506 return VM_FAULT_OOM;
f7ca90b1
MW
507 vmf->page = page;
508 return VM_FAULT_LOCKED;
509}
510
b0d5e82f
CH
511static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
512 struct page *to, unsigned long vaddr)
f7ca90b1 513{
b2e0d162 514 struct blk_dax_ctl dax = {
b0d5e82f
CH
515 .sector = sector,
516 .size = size,
b2e0d162 517 };
e2e05394
RZ
518 void *vto;
519
b2e0d162
DW
520 if (dax_map_atomic(bdev, &dax) < 0)
521 return PTR_ERR(dax.addr);
f7ca90b1 522 vto = kmap_atomic(to);
b2e0d162 523 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
f7ca90b1 524 kunmap_atomic(vto);
b2e0d162 525 dax_unmap_atomic(bdev, &dax);
f7ca90b1
MW
526 return 0;
527}
528
642261ac
RZ
529/*
530 * By this point grab_mapping_entry() has ensured that we have a locked entry
531 * of the appropriate size so we don't have to worry about downgrading PMDs to
532 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
533 * already in the tree, we will skip the insertion and just dirty the PMD as
534 * appropriate.
535 */
ac401cc7
JK
536static void *dax_insert_mapping_entry(struct address_space *mapping,
537 struct vm_fault *vmf,
642261ac
RZ
538 void *entry, sector_t sector,
539 unsigned long flags)
9973c98e
RZ
540{
541 struct radix_tree_root *page_tree = &mapping->page_tree;
ac401cc7
JK
542 int error = 0;
543 bool hole_fill = false;
544 void *new_entry;
545 pgoff_t index = vmf->pgoff;
9973c98e 546
ac401cc7 547 if (vmf->flags & FAULT_FLAG_WRITE)
d2b2a28e 548 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 549
ac401cc7
JK
550 /* Replacing hole page with block mapping? */
551 if (!radix_tree_exceptional_entry(entry)) {
552 hole_fill = true;
553 /*
554 * Unmap the page now before we remove it from page cache below.
555 * The page is locked so it cannot be faulted in again.
556 */
557 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
558 PAGE_SIZE, 0);
559 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
560 if (error)
561 return ERR_PTR(error);
642261ac
RZ
562 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
563 /* replacing huge zero page with PMD block mapping */
564 unmap_mapping_range(mapping,
565 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
9973c98e
RZ
566 }
567
ac401cc7 568 spin_lock_irq(&mapping->tree_lock);
642261ac
RZ
569 new_entry = dax_radix_locked_entry(sector, flags);
570
ac401cc7
JK
571 if (hole_fill) {
572 __delete_from_page_cache(entry, NULL);
573 /* Drop pagecache reference */
574 put_page(entry);
642261ac
RZ
575 error = __radix_tree_insert(page_tree, index,
576 dax_radix_order(new_entry), new_entry);
ac401cc7
JK
577 if (error) {
578 new_entry = ERR_PTR(error);
9973c98e
RZ
579 goto unlock;
580 }
ac401cc7 581 mapping->nrexceptional++;
642261ac
RZ
582 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
583 /*
584 * Only swap our new entry into the radix tree if the current
585 * entry is a zero page or an empty entry. If a normal PTE or
586 * PMD entry is already in the tree, we leave it alone. This
587 * means that if we are trying to insert a PTE and the
588 * existing entry is a PMD, we will just leave the PMD in the
589 * tree and dirty it if necessary.
590 */
f7942430 591 struct radix_tree_node *node;
ac401cc7
JK
592 void **slot;
593 void *ret;
9973c98e 594
f7942430 595 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
ac401cc7 596 WARN_ON_ONCE(ret != entry);
4d693d08
JW
597 __radix_tree_replace(page_tree, node, slot,
598 new_entry, NULL, NULL);
9973c98e 599 }
ac401cc7 600 if (vmf->flags & FAULT_FLAG_WRITE)
9973c98e
RZ
601 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
602 unlock:
603 spin_unlock_irq(&mapping->tree_lock);
ac401cc7
JK
604 if (hole_fill) {
605 radix_tree_preload_end();
606 /*
607 * We don't need hole page anymore, it has been replaced with
608 * locked radix tree entry now.
609 */
610 if (mapping->a_ops->freepage)
611 mapping->a_ops->freepage(entry);
612 unlock_page(entry);
613 put_page(entry);
614 }
615 return new_entry;
9973c98e
RZ
616}
617
4b4bb46d
JK
618static inline unsigned long
619pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
620{
621 unsigned long address;
622
623 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
624 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
625 return address;
626}
627
628/* Walk all mappings of a given index of a file and writeprotect them */
629static void dax_mapping_entry_mkclean(struct address_space *mapping,
630 pgoff_t index, unsigned long pfn)
631{
632 struct vm_area_struct *vma;
633 pte_t *ptep;
634 pte_t pte;
635 spinlock_t *ptl;
636 bool changed;
637
638 i_mmap_lock_read(mapping);
639 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
640 unsigned long address;
641
642 cond_resched();
643
644 if (!(vma->vm_flags & VM_SHARED))
645 continue;
646
647 address = pgoff_address(index, vma);
648 changed = false;
649 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
650 continue;
651 if (pfn != pte_pfn(*ptep))
652 goto unlock;
653 if (!pte_dirty(*ptep) && !pte_write(*ptep))
654 goto unlock;
655
656 flush_cache_page(vma, address, pfn);
657 pte = ptep_clear_flush(vma, address, ptep);
658 pte = pte_wrprotect(pte);
659 pte = pte_mkclean(pte);
660 set_pte_at(vma->vm_mm, address, ptep, pte);
661 changed = true;
662unlock:
663 pte_unmap_unlock(ptep, ptl);
664
665 if (changed)
666 mmu_notifier_invalidate_page(vma->vm_mm, address);
667 }
668 i_mmap_unlock_read(mapping);
669}
670
9973c98e
RZ
671static int dax_writeback_one(struct block_device *bdev,
672 struct address_space *mapping, pgoff_t index, void *entry)
673{
674 struct radix_tree_root *page_tree = &mapping->page_tree;
9973c98e 675 struct blk_dax_ctl dax;
a6abc2c0 676 void *entry2, **slot;
9973c98e
RZ
677 int ret = 0;
678
9973c98e 679 /*
a6abc2c0
JK
680 * A page got tagged dirty in DAX mapping? Something is seriously
681 * wrong.
9973c98e 682 */
a6abc2c0
JK
683 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
684 return -EIO;
9973c98e 685
a6abc2c0
JK
686 spin_lock_irq(&mapping->tree_lock);
687 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
688 /* Entry got punched out / reallocated? */
689 if (!entry2 || !radix_tree_exceptional_entry(entry2))
690 goto put_unlocked;
691 /*
692 * Entry got reallocated elsewhere? No need to writeback. We have to
693 * compare sectors as we must not bail out due to difference in lockbit
694 * or entry type.
695 */
696 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
697 goto put_unlocked;
642261ac
RZ
698 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
699 dax_is_zero_entry(entry))) {
9973c98e 700 ret = -EIO;
a6abc2c0 701 goto put_unlocked;
9973c98e
RZ
702 }
703
a6abc2c0
JK
704 /* Another fsync thread may have already written back this entry */
705 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
706 goto put_unlocked;
707 /* Lock the entry to serialize with page faults */
708 entry = lock_slot(mapping, slot);
709 /*
710 * We can clear the tag now but we have to be careful so that concurrent
711 * dax_writeback_one() calls for the same index cannot finish before we
712 * actually flush the caches. This is achieved as the calls will look
713 * at the entry only under tree_lock and once they do that they will
714 * see the entry locked and wait for it to unlock.
715 */
716 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
717 spin_unlock_irq(&mapping->tree_lock);
718
642261ac
RZ
719 /*
720 * Even if dax_writeback_mapping_range() was given a wbc->range_start
721 * in the middle of a PMD, the 'index' we are given will be aligned to
722 * the start index of the PMD, as will the sector we pull from
723 * 'entry'. This allows us to flush for PMD_SIZE and not have to
724 * worry about partial PMD writebacks.
725 */
726 dax.sector = dax_radix_sector(entry);
727 dax.size = PAGE_SIZE << dax_radix_order(entry);
9973c98e
RZ
728
729 /*
730 * We cannot hold tree_lock while calling dax_map_atomic() because it
731 * eventually calls cond_resched().
732 */
733 ret = dax_map_atomic(bdev, &dax);
a6abc2c0
JK
734 if (ret < 0) {
735 put_locked_mapping_entry(mapping, index, entry);
9973c98e 736 return ret;
a6abc2c0 737 }
9973c98e
RZ
738
739 if (WARN_ON_ONCE(ret < dax.size)) {
740 ret = -EIO;
741 goto unmap;
742 }
743
4b4bb46d 744 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
9973c98e 745 wb_cache_pmem(dax.addr, dax.size);
4b4bb46d
JK
746 /*
747 * After we have flushed the cache, we can clear the dirty tag. There
748 * cannot be new dirty data in the pfn after the flush has completed as
749 * the pfn mappings are writeprotected and fault waits for mapping
750 * entry lock.
751 */
752 spin_lock_irq(&mapping->tree_lock);
753 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
754 spin_unlock_irq(&mapping->tree_lock);
9973c98e
RZ
755 unmap:
756 dax_unmap_atomic(bdev, &dax);
a6abc2c0 757 put_locked_mapping_entry(mapping, index, entry);
9973c98e
RZ
758 return ret;
759
a6abc2c0
JK
760 put_unlocked:
761 put_unlocked_mapping_entry(mapping, index, entry2);
9973c98e
RZ
762 spin_unlock_irq(&mapping->tree_lock);
763 return ret;
764}
765
766/*
767 * Flush the mapping to the persistent domain within the byte range of [start,
768 * end]. This is required by data integrity operations to ensure file data is
769 * on persistent storage prior to completion of the operation.
770 */
7f6d5b52
RZ
771int dax_writeback_mapping_range(struct address_space *mapping,
772 struct block_device *bdev, struct writeback_control *wbc)
9973c98e
RZ
773{
774 struct inode *inode = mapping->host;
642261ac 775 pgoff_t start_index, end_index;
9973c98e
RZ
776 pgoff_t indices[PAGEVEC_SIZE];
777 struct pagevec pvec;
778 bool done = false;
779 int i, ret = 0;
9973c98e
RZ
780
781 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
782 return -EIO;
783
7f6d5b52
RZ
784 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
785 return 0;
786
09cbfeaf
KS
787 start_index = wbc->range_start >> PAGE_SHIFT;
788 end_index = wbc->range_end >> PAGE_SHIFT;
9973c98e
RZ
789
790 tag_pages_for_writeback(mapping, start_index, end_index);
791
792 pagevec_init(&pvec, 0);
793 while (!done) {
794 pvec.nr = find_get_entries_tag(mapping, start_index,
795 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
796 pvec.pages, indices);
797
798 if (pvec.nr == 0)
799 break;
800
801 for (i = 0; i < pvec.nr; i++) {
802 if (indices[i] > end_index) {
803 done = true;
804 break;
805 }
806
807 ret = dax_writeback_one(bdev, mapping, indices[i],
808 pvec.pages[i]);
809 if (ret < 0)
810 return ret;
811 }
812 }
9973c98e
RZ
813 return 0;
814}
815EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
816
ac401cc7 817static int dax_insert_mapping(struct address_space *mapping,
1aaba095
CH
818 struct block_device *bdev, sector_t sector, size_t size,
819 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
f7ca90b1 820{
1a29d85e 821 unsigned long vaddr = vmf->address;
b2e0d162 822 struct blk_dax_ctl dax = {
1aaba095
CH
823 .sector = sector,
824 .size = size,
b2e0d162 825 };
ac401cc7
JK
826 void *ret;
827 void *entry = *entryp;
f7ca90b1 828
4d9a2c87
JK
829 if (dax_map_atomic(bdev, &dax) < 0)
830 return PTR_ERR(dax.addr);
b2e0d162 831 dax_unmap_atomic(bdev, &dax);
f7ca90b1 832
642261ac 833 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
4d9a2c87
JK
834 if (IS_ERR(ret))
835 return PTR_ERR(ret);
ac401cc7 836 *entryp = ret;
9973c98e 837
4d9a2c87 838 return vm_insert_mixed(vma, vaddr, dax.pfn);
f7ca90b1
MW
839}
840
0e3b210c
BH
841/**
842 * dax_pfn_mkwrite - handle first write to DAX page
843 * @vma: The virtual memory area where the fault occurred
844 * @vmf: The description of the fault
0e3b210c
BH
845 */
846int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
847{
9973c98e 848 struct file *file = vma->vm_file;
ac401cc7 849 struct address_space *mapping = file->f_mapping;
2f89dc12 850 void *entry, **slot;
ac401cc7 851 pgoff_t index = vmf->pgoff;
30f471fd 852
ac401cc7 853 spin_lock_irq(&mapping->tree_lock);
2f89dc12
JK
854 entry = get_unlocked_mapping_entry(mapping, index, &slot);
855 if (!entry || !radix_tree_exceptional_entry(entry)) {
856 if (entry)
857 put_unlocked_mapping_entry(mapping, index, entry);
858 spin_unlock_irq(&mapping->tree_lock);
859 return VM_FAULT_NOPAGE;
860 }
ac401cc7 861 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
2f89dc12 862 entry = lock_slot(mapping, slot);
ac401cc7 863 spin_unlock_irq(&mapping->tree_lock);
2f89dc12
JK
864 /*
865 * If we race with somebody updating the PTE and finish_mkwrite_fault()
866 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
867 * the fault in either case.
868 */
869 finish_mkwrite_fault(vmf);
870 put_locked_mapping_entry(mapping, index, entry);
0e3b210c
BH
871 return VM_FAULT_NOPAGE;
872}
873EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
874
4b0228fa
VV
875static bool dax_range_is_aligned(struct block_device *bdev,
876 unsigned int offset, unsigned int length)
877{
878 unsigned short sector_size = bdev_logical_block_size(bdev);
879
880 if (!IS_ALIGNED(offset, sector_size))
881 return false;
882 if (!IS_ALIGNED(length, sector_size))
883 return false;
884
885 return true;
886}
887
679c8bd3
CH
888int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
889 unsigned int offset, unsigned int length)
890{
891 struct blk_dax_ctl dax = {
892 .sector = sector,
893 .size = PAGE_SIZE,
894 };
895
4b0228fa
VV
896 if (dax_range_is_aligned(bdev, offset, length)) {
897 sector_t start_sector = dax.sector + (offset >> 9);
898
899 return blkdev_issue_zeroout(bdev, start_sector,
900 length >> 9, GFP_NOFS, true);
901 } else {
902 if (dax_map_atomic(bdev, &dax) < 0)
903 return PTR_ERR(dax.addr);
904 clear_pmem(dax.addr + offset, length);
4b0228fa
VV
905 dax_unmap_atomic(bdev, &dax);
906 }
679c8bd3
CH
907 return 0;
908}
909EXPORT_SYMBOL_GPL(__dax_zero_page_range);
910
a254e568 911#ifdef CONFIG_FS_IOMAP
333ccc97 912static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
25726bc1 913{
333ccc97 914 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
25726bc1 915}
a254e568 916
a254e568 917static loff_t
11c59c92 918dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
a254e568
CH
919 struct iomap *iomap)
920{
921 struct iov_iter *iter = data;
922 loff_t end = pos + length, done = 0;
923 ssize_t ret = 0;
924
925 if (iov_iter_rw(iter) == READ) {
926 end = min(end, i_size_read(inode));
927 if (pos >= end)
928 return 0;
929
930 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
931 return iov_iter_zero(min(length, end - pos), iter);
932 }
933
934 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
935 return -EIO;
936
937 while (pos < end) {
938 unsigned offset = pos & (PAGE_SIZE - 1);
939 struct blk_dax_ctl dax = { 0 };
940 ssize_t map_len;
941
333ccc97 942 dax.sector = dax_iomap_sector(iomap, pos);
a254e568
CH
943 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
944 map_len = dax_map_atomic(iomap->bdev, &dax);
945 if (map_len < 0) {
946 ret = map_len;
947 break;
948 }
949
950 dax.addr += offset;
951 map_len -= offset;
952 if (map_len > end - pos)
953 map_len = end - pos;
954
955 if (iov_iter_rw(iter) == WRITE)
956 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
957 else
958 map_len = copy_to_iter(dax.addr, map_len, iter);
959 dax_unmap_atomic(iomap->bdev, &dax);
960 if (map_len <= 0) {
961 ret = map_len ? map_len : -EFAULT;
962 break;
963 }
964
965 pos += map_len;
966 length -= map_len;
967 done += map_len;
968 }
969
970 return done ? done : ret;
971}
972
973/**
11c59c92 974 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
975 * @iocb: The control block for this I/O
976 * @iter: The addresses to do I/O from or to
977 * @ops: iomap ops passed from the file system
978 *
979 * This function performs read and write operations to directly mapped
980 * persistent memory. The callers needs to take care of read/write exclusion
981 * and evicting any page cache pages in the region under I/O.
982 */
983ssize_t
11c59c92 984dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
a254e568
CH
985 struct iomap_ops *ops)
986{
987 struct address_space *mapping = iocb->ki_filp->f_mapping;
988 struct inode *inode = mapping->host;
989 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
990 unsigned flags = 0;
991
992 if (iov_iter_rw(iter) == WRITE)
993 flags |= IOMAP_WRITE;
994
995 /*
996 * Yes, even DAX files can have page cache attached to them: A zeroed
997 * page is inserted into the pagecache when we have to serve a write
998 * fault on a hole. It should never be dirtied and can simply be
999 * dropped from the pagecache once we get real data for the page.
1000 *
1001 * XXX: This is racy against mmap, and there's nothing we can do about
1002 * it. We'll eventually need to shift this down even further so that
1003 * we can check if we allocated blocks over a hole first.
1004 */
1005 if (mapping->nrpages) {
1006 ret = invalidate_inode_pages2_range(mapping,
1007 pos >> PAGE_SHIFT,
1008 (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
1009 WARN_ON_ONCE(ret);
1010 }
1011
1012 while (iov_iter_count(iter)) {
1013 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
11c59c92 1014 iter, dax_iomap_actor);
a254e568
CH
1015 if (ret <= 0)
1016 break;
1017 pos += ret;
1018 done += ret;
1019 }
1020
1021 iocb->ki_pos += done;
1022 return done ? done : ret;
1023}
11c59c92 1024EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6
CH
1025
1026/**
11c59c92 1027 * dax_iomap_fault - handle a page fault on a DAX file
a7d73fe6
CH
1028 * @vma: The virtual memory area where the fault occurred
1029 * @vmf: The description of the fault
1030 * @ops: iomap ops passed from the file system
1031 *
1032 * When a page fault occurs, filesystems may call this helper in their fault
1033 * or mkwrite handler for DAX files. Assumes the caller has done all the
1034 * necessary locking for the page fault to proceed successfully.
1035 */
11c59c92 1036int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
a7d73fe6
CH
1037 struct iomap_ops *ops)
1038{
1039 struct address_space *mapping = vma->vm_file->f_mapping;
1040 struct inode *inode = mapping->host;
1a29d85e 1041 unsigned long vaddr = vmf->address;
a7d73fe6
CH
1042 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1043 sector_t sector;
1044 struct iomap iomap = { 0 };
9484ab1b 1045 unsigned flags = IOMAP_FAULT;
a7d73fe6 1046 int error, major = 0;
b1aa812b 1047 int vmf_ret = 0;
a7d73fe6
CH
1048 void *entry;
1049
1050 /*
1051 * Check whether offset isn't beyond end of file now. Caller is supposed
1052 * to hold locks serializing us with truncate / punch hole so this is
1053 * a reliable test.
1054 */
1055 if (pos >= i_size_read(inode))
1056 return VM_FAULT_SIGBUS;
1057
642261ac 1058 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
a7d73fe6
CH
1059 if (IS_ERR(entry)) {
1060 error = PTR_ERR(entry);
1061 goto out;
1062 }
1063
1064 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1065 flags |= IOMAP_WRITE;
1066
1067 /*
1068 * Note that we don't bother to use iomap_apply here: DAX required
1069 * the file system block size to be equal the page size, which means
1070 * that we never have to deal with more than a single extent here.
1071 */
1072 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1073 if (error)
1074 goto unlock_entry;
1075 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1076 error = -EIO; /* fs corruption? */
1550290b 1077 goto finish_iomap;
a7d73fe6
CH
1078 }
1079
333ccc97 1080 sector = dax_iomap_sector(&iomap, pos);
a7d73fe6
CH
1081
1082 if (vmf->cow_page) {
1083 switch (iomap.type) {
1084 case IOMAP_HOLE:
1085 case IOMAP_UNWRITTEN:
1086 clear_user_highpage(vmf->cow_page, vaddr);
1087 break;
1088 case IOMAP_MAPPED:
1089 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1090 vmf->cow_page, vaddr);
1091 break;
1092 default:
1093 WARN_ON_ONCE(1);
1094 error = -EIO;
1095 break;
1096 }
1097
1098 if (error)
1550290b 1099 goto finish_iomap;
b1aa812b
JK
1100
1101 __SetPageUptodate(vmf->cow_page);
1102 vmf_ret = finish_fault(vmf);
1103 if (!vmf_ret)
1104 vmf_ret = VM_FAULT_DONE_COW;
1550290b 1105 goto finish_iomap;
a7d73fe6
CH
1106 }
1107
1108 switch (iomap.type) {
1109 case IOMAP_MAPPED:
1110 if (iomap.flags & IOMAP_F_NEW) {
1111 count_vm_event(PGMAJFAULT);
1112 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1113 major = VM_FAULT_MAJOR;
1114 }
1115 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1116 PAGE_SIZE, &entry, vma, vmf);
1117 break;
1118 case IOMAP_UNWRITTEN:
1119 case IOMAP_HOLE:
1550290b 1120 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
b1aa812b 1121 vmf_ret = dax_load_hole(mapping, entry, vmf);
1550290b
RZ
1122 break;
1123 }
a7d73fe6
CH
1124 /*FALLTHRU*/
1125 default:
1126 WARN_ON_ONCE(1);
1127 error = -EIO;
1128 break;
1129 }
1130
1550290b
RZ
1131 finish_iomap:
1132 if (ops->iomap_end) {
b1aa812b 1133 if (error || (vmf_ret & VM_FAULT_ERROR)) {
1550290b
RZ
1134 /* keep previous error */
1135 ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
1136 &iomap);
1137 } else {
1138 error = ops->iomap_end(inode, pos, PAGE_SIZE,
1139 PAGE_SIZE, flags, &iomap);
1140 }
1141 }
a7d73fe6 1142 unlock_entry:
b1aa812b 1143 if (vmf_ret != VM_FAULT_LOCKED || error)
1550290b 1144 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
a7d73fe6
CH
1145 out:
1146 if (error == -ENOMEM)
1147 return VM_FAULT_OOM | major;
1148 /* -EBUSY is fine, somebody else faulted on the same PTE */
1149 if (error < 0 && error != -EBUSY)
1150 return VM_FAULT_SIGBUS | major;
b1aa812b 1151 if (vmf_ret) {
1550290b 1152 WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
b1aa812b 1153 return vmf_ret;
1550290b 1154 }
a7d73fe6
CH
1155 return VM_FAULT_NOPAGE | major;
1156}
11c59c92 1157EXPORT_SYMBOL_GPL(dax_iomap_fault);
642261ac
RZ
1158
1159#ifdef CONFIG_FS_DAX_PMD
1160/*
1161 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1162 * more often than one might expect in the below functions.
1163 */
1164#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1165
1166static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
1167 struct vm_fault *vmf, unsigned long address,
1168 struct iomap *iomap, loff_t pos, bool write, void **entryp)
1169{
1170 struct address_space *mapping = vma->vm_file->f_mapping;
1171 struct block_device *bdev = iomap->bdev;
1172 struct blk_dax_ctl dax = {
1173 .sector = dax_iomap_sector(iomap, pos),
1174 .size = PMD_SIZE,
1175 };
1176 long length = dax_map_atomic(bdev, &dax);
1177 void *ret;
1178
1179 if (length < 0) /* dax_map_atomic() failed */
1180 return VM_FAULT_FALLBACK;
1181 if (length < PMD_SIZE)
1182 goto unmap_fallback;
1183 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1184 goto unmap_fallback;
1185 if (!pfn_t_devmap(dax.pfn))
1186 goto unmap_fallback;
1187
1188 dax_unmap_atomic(bdev, &dax);
1189
1190 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1191 RADIX_DAX_PMD);
1192 if (IS_ERR(ret))
1193 return VM_FAULT_FALLBACK;
1194 *entryp = ret;
1195
1196 return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
1197
1198 unmap_fallback:
1199 dax_unmap_atomic(bdev, &dax);
1200 return VM_FAULT_FALLBACK;
1201}
1202
1203static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
1204 struct vm_fault *vmf, unsigned long address,
1205 struct iomap *iomap, void **entryp)
1206{
1207 struct address_space *mapping = vma->vm_file->f_mapping;
1208 unsigned long pmd_addr = address & PMD_MASK;
1209 struct page *zero_page;
1210 spinlock_t *ptl;
1211 pmd_t pmd_entry;
1212 void *ret;
1213
1214 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1215
1216 if (unlikely(!zero_page))
1217 return VM_FAULT_FALLBACK;
1218
1219 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1220 RADIX_DAX_PMD | RADIX_DAX_HZP);
1221 if (IS_ERR(ret))
1222 return VM_FAULT_FALLBACK;
1223 *entryp = ret;
1224
1225 ptl = pmd_lock(vma->vm_mm, pmd);
1226 if (!pmd_none(*pmd)) {
1227 spin_unlock(ptl);
1228 return VM_FAULT_FALLBACK;
1229 }
1230
1231 pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
1232 pmd_entry = pmd_mkhuge(pmd_entry);
1233 set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
1234 spin_unlock(ptl);
1235 return VM_FAULT_NOPAGE;
1236}
1237
1238int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1239 pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
1240{
1241 struct address_space *mapping = vma->vm_file->f_mapping;
1242 unsigned long pmd_addr = address & PMD_MASK;
1243 bool write = flags & FAULT_FLAG_WRITE;
9484ab1b 1244 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
642261ac
RZ
1245 struct inode *inode = mapping->host;
1246 int result = VM_FAULT_FALLBACK;
1247 struct iomap iomap = { 0 };
1248 pgoff_t max_pgoff, pgoff;
1249 struct vm_fault vmf;
1250 void *entry;
1251 loff_t pos;
1252 int error;
1253
1254 /* Fall back to PTEs if we're going to COW */
1255 if (write && !(vma->vm_flags & VM_SHARED))
1256 goto fallback;
1257
1258 /* If the PMD would extend outside the VMA */
1259 if (pmd_addr < vma->vm_start)
1260 goto fallback;
1261 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1262 goto fallback;
1263
1264 /*
1265 * Check whether offset isn't beyond end of file now. Caller is
1266 * supposed to hold locks serializing us with truncate / punch hole so
1267 * this is a reliable test.
1268 */
1269 pgoff = linear_page_index(vma, pmd_addr);
1270 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1271
1272 if (pgoff > max_pgoff)
1273 return VM_FAULT_SIGBUS;
1274
1275 /* If the PMD would extend beyond the file size */
1276 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1277 goto fallback;
1278
1279 /*
1280 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1281 * PMD or a HZP entry. If it can't (because a 4k page is already in
1282 * the tree, for instance), it will return -EEXIST and we just fall
1283 * back to 4k entries.
1284 */
1285 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1286 if (IS_ERR(entry))
1287 goto fallback;
1288
1289 /*
1290 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1291 * setting up a mapping, so really we're using iomap_begin() as a way
1292 * to look up our filesystem block.
1293 */
1294 pos = (loff_t)pgoff << PAGE_SHIFT;
1295 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1296 if (error)
1297 goto unlock_entry;
1298 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1299 goto finish_iomap;
1300
1301 vmf.pgoff = pgoff;
1302 vmf.flags = flags;
1303 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
1304
1305 switch (iomap.type) {
1306 case IOMAP_MAPPED:
1307 result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
1308 &iomap, pos, write, &entry);
1309 break;
1310 case IOMAP_UNWRITTEN:
1311 case IOMAP_HOLE:
1312 if (WARN_ON_ONCE(write))
1313 goto finish_iomap;
1314 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1315 &entry);
1316 break;
1317 default:
1318 WARN_ON_ONCE(1);
1319 break;
1320 }
1321
1322 finish_iomap:
1323 if (ops->iomap_end) {
1324 if (result == VM_FAULT_FALLBACK) {
1325 ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
1326 &iomap);
1327 } else {
1328 error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
1329 iomap_flags, &iomap);
1330 if (error)
1331 result = VM_FAULT_FALLBACK;
1332 }
1333 }
1334 unlock_entry:
1335 put_locked_mapping_entry(mapping, pgoff, entry);
1336 fallback:
1337 if (result == VM_FAULT_FALLBACK) {
1338 split_huge_pmd(vma, pmd, address);
1339 count_vm_event(THP_FAULT_FALLBACK);
1340 }
1341 return result;
1342}
1343EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1344#endif /* CONFIG_FS_DAX_PMD */
a254e568 1345#endif /* CONFIG_FS_IOMAP */