]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/xfs/xfs_iomap.c
xfs: remove unused flags arg from xfs_file_iomap_begin_delay
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_iomap.c
CommitLineData
1da177e4 1/*
3e57ecf6 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
51446f5b 3 * Copyright (c) 2016 Christoph Hellwig.
7b718769 4 * All Rights Reserved.
1da177e4 5 *
7b718769
NS
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
1da177e4
LT
8 * published by the Free Software Foundation.
9 *
7b718769
NS
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
1da177e4 14 *
7b718769
NS
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 18 */
3b3dce05 19#include <linux/iomap.h>
1da177e4 20#include "xfs.h"
1da177e4 21#include "xfs_fs.h"
70a9883c 22#include "xfs_shared.h"
239880ef
DC
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
1da177e4 26#include "xfs_mount.h"
3ab78df2 27#include "xfs_defer.h"
1da177e4 28#include "xfs_inode.h"
a844f451 29#include "xfs_btree.h"
a4fbe6ab 30#include "xfs_bmap_btree.h"
1da177e4 31#include "xfs_bmap.h"
68988114 32#include "xfs_bmap_util.h"
1da177e4 33#include "xfs_error.h"
a4fbe6ab 34#include "xfs_trans.h"
1da177e4 35#include "xfs_trans_space.h"
1da177e4 36#include "xfs_iomap.h"
0b1b213f 37#include "xfs_trace.h"
27b52867 38#include "xfs_icache.h"
a4fbe6ab 39#include "xfs_quota.h"
76a4202a
BF
40#include "xfs_dquot_item.h"
41#include "xfs_dquot.h"
2a06705c 42#include "xfs_reflink.h"
1da177e4 43
1da177e4
LT
44
45#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
46 << mp->m_writeio_log)
1da177e4 47
e9c49736
CH
48void
49xfs_bmbt_to_iomap(
50 struct xfs_inode *ip,
51 struct iomap *iomap,
52 struct xfs_bmbt_irec *imap)
53{
54 struct xfs_mount *mp = ip->i_mount;
55
56 if (imap->br_startblock == HOLESTARTBLOCK) {
57 iomap->blkno = IOMAP_NULL_BLOCK;
58 iomap->type = IOMAP_HOLE;
59 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
60 iomap->blkno = IOMAP_NULL_BLOCK;
61 iomap->type = IOMAP_DELALLOC;
62 } else {
63 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
64 if (imap->br_state == XFS_EXT_UNWRITTEN)
65 iomap->type = IOMAP_UNWRITTEN;
66 else
67 iomap->type = IOMAP_MAPPED;
68 }
69 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
70 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
71 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
72}
73
f7ca3522 74xfs_extlen_t
f8e3a825
CH
75xfs_eof_alignment(
76 struct xfs_inode *ip,
77 xfs_extlen_t extsize)
dd9f438e 78{
f8e3a825
CH
79 struct xfs_mount *mp = ip->i_mount;
80 xfs_extlen_t align = 0;
dd9f438e 81
bf322d98
CH
82 if (!XFS_IS_REALTIME_INODE(ip)) {
83 /*
84 * Round up the allocation request to a stripe unit
85 * (m_dalign) boundary if the file size is >= stripe unit
86 * size, and we are allocating past the allocation eof.
87 *
88 * If mounted with the "-o swalloc" option the alignment is
89 * increased from the strip unit size to the stripe width.
90 */
91 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
92 align = mp->m_swidth;
93 else if (mp->m_dalign)
94 align = mp->m_dalign;
95
76b57302
PW
96 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
97 align = 0;
bf322d98 98 }
dd9f438e
NS
99
100 /*
101 * Always round up the allocation request to an extent boundary
102 * (when file on a real-time subvolume or has di_extsize hint).
103 */
104 if (extsize) {
76b57302
PW
105 if (align)
106 align = roundup_64(align, extsize);
dd9f438e
NS
107 else
108 align = extsize;
dd9f438e
NS
109 }
110
f8e3a825
CH
111 return align;
112}
113
114STATIC int
115xfs_iomap_eof_align_last_fsb(
116 struct xfs_inode *ip,
117 xfs_extlen_t extsize,
118 xfs_fileoff_t *last_fsb)
119{
120 xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
121
76b57302
PW
122 if (align) {
123 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
f8e3a825
CH
124 int eof, error;
125
541d7d3c 126 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
dd9f438e
NS
127 if (error)
128 return error;
129 if (eof)
130 *last_fsb = new_last_fsb;
131 }
132 return 0;
133}
134
572d95f4 135STATIC int
6d4a8ecb 136xfs_alert_fsblock_zero(
572d95f4
NS
137 xfs_inode_t *ip,
138 xfs_bmbt_irec_t *imap)
139{
6a19d939 140 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
572d95f4
NS
141 "Access to block zero in inode %llu "
142 "start_block: %llx start_off: %llx "
08e96e1a 143 "blkcnt: %llx extent-state: %x",
572d95f4
NS
144 (unsigned long long)ip->i_ino,
145 (unsigned long long)imap->br_startblock,
146 (unsigned long long)imap->br_startoff,
147 (unsigned long long)imap->br_blockcount,
148 imap->br_state);
2451337d 149 return -EFSCORRUPTED;
572d95f4
NS
150}
151
a206c817 152int
1da177e4
LT
153xfs_iomap_write_direct(
154 xfs_inode_t *ip,
f403b7f4 155 xfs_off_t offset,
1da177e4 156 size_t count,
3070451e 157 xfs_bmbt_irec_t *imap,
405f8042 158 int nmaps)
1da177e4
LT
159{
160 xfs_mount_t *mp = ip->i_mount;
1da177e4
LT
161 xfs_fileoff_t offset_fsb;
162 xfs_fileoff_t last_fsb;
dd9f438e 163 xfs_filblks_t count_fsb, resaligned;
1da177e4 164 xfs_fsblock_t firstfsb;
f13eb205 165 xfs_extlen_t extsz;
0116d935 166 int nimaps;
06d10dd9 167 int quota_flag;
1da177e4
LT
168 int rt;
169 xfs_trans_t *tp;
2c3234d1 170 struct xfs_defer_ops dfops;
dd9f438e 171 uint qblocks, resblks, resrtextents;
dd9f438e 172 int error;
009c6e87 173 int lockmode;
1ca19157 174 int bmapi_flags = XFS_BMAPI_PREALLOC;
253f4911 175 uint tflags = 0;
1da177e4 176
dd9f438e 177 rt = XFS_IS_REALTIME_INODE(ip);
957d0ebe 178 extsz = xfs_get_extsz_hint(ip);
009c6e87
BF
179 lockmode = XFS_ILOCK_SHARED; /* locked by caller */
180
181 ASSERT(xfs_isilocked(ip, lockmode));
1da177e4 182
957d0ebe
DC
183 offset_fsb = XFS_B_TO_FSBT(mp, offset);
184 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
ce7ae151 185 if ((offset + count) > XFS_ISIZE(ip)) {
009c6e87
BF
186 /*
187 * Assert that the in-core extent list is present since this can
188 * call xfs_iread_extents() and we only have the ilock shared.
189 * This should be safe because the lock was held around a bmapi
190 * call in the caller and we only need it to access the in-core
191 * list.
192 */
193 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
194 XFS_IFEXTENTS);
f8e3a825 195 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
dd9f438e 196 if (error)
009c6e87 197 goto out_unlock;
1da177e4 198 } else {
405f8042 199 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
dd9f438e 200 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
3070451e
CH
201 imap->br_blockcount +
202 imap->br_startoff);
1da177e4 203 }
dd9f438e
NS
204 count_fsb = last_fsb - offset_fsb;
205 ASSERT(count_fsb > 0);
f13eb205 206 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
dd9f438e
NS
207
208 if (unlikely(rt)) {
209 resrtextents = qblocks = resaligned;
210 resrtextents /= mp->m_sb.sb_rextsize;
84e1e99f
DC
211 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
212 quota_flag = XFS_QMOPT_RES_RTBLKS;
213 } else {
214 resrtextents = 0;
dd9f438e 215 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
84e1e99f
DC
216 quota_flag = XFS_QMOPT_RES_REGBLKS;
217 }
1da177e4 218
009c6e87
BF
219 /*
220 * Drop the shared lock acquired by the caller, attach the dquot if
221 * necessary and move on to transaction setup.
222 */
223 xfs_iunlock(ip, lockmode);
224 error = xfs_qm_dqattach(ip, 0);
225 if (error)
226 return error;
227
1ca19157
DC
228 /*
229 * For DAX, we do not allocate unwritten extents, but instead we zero
230 * the block before we commit the transaction. Ideally we'd like to do
231 * this outside the transaction context, but if we commit and then crash
232 * we may not have zeroed the blocks and this will be exposed on
233 * recovery of the allocation. Hence we must zero before commit.
3b0fe478 234 *
1ca19157
DC
235 * Further, if we are mapping unwritten extents here, we need to zero
236 * and convert them to written so that we don't need an unwritten extent
237 * callback for DAX. This also means that we need to be able to dip into
3b0fe478
DC
238 * the reserve block pool for bmbt block allocation if there is no space
239 * left but we need to do unwritten extent conversion.
1ca19157
DC
240 */
241 if (IS_DAX(VFS_I(ip))) {
242 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
63fbb4c1 243 if (imap->br_state == XFS_EXT_UNWRITTEN) {
253f4911 244 tflags |= XFS_TRANS_RESERVE;
3b0fe478
DC
245 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
246 }
1ca19157 247 }
253f4911
CH
248 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
249 tflags, &tp);
250 if (error)
b474c7ae 251 return error;
507630b2 252
009c6e87
BF
253 lockmode = XFS_ILOCK_EXCL;
254 xfs_ilock(ip, lockmode);
1da177e4 255
7d095257 256 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
dd9f438e 257 if (error)
507630b2 258 goto out_trans_cancel;
1da177e4 259
ddc3415a 260 xfs_trans_ijoin(tp, ip, 0);
1da177e4 261
1da177e4 262 /*
3070451e
CH
263 * From this point onwards we overwrite the imap pointer that the
264 * caller gave to us.
1da177e4 265 */
2c3234d1 266 xfs_defer_init(&dfops, &firstfsb);
06d10dd9 267 nimaps = 1;
d531d91d 268 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
264e89ad 269 bmapi_flags, &firstfsb, resblks, imap,
2c3234d1 270 &nimaps, &dfops);
06d10dd9 271 if (error)
507630b2 272 goto out_bmap_cancel;
1da177e4
LT
273
274 /*
06d10dd9 275 * Complete the transaction
1da177e4 276 */
8ad7c629 277 error = xfs_defer_finish(&tp, &dfops);
06d10dd9 278 if (error)
507630b2 279 goto out_bmap_cancel;
1ca19157 280
70393313 281 error = xfs_trans_commit(tp);
06d10dd9 282 if (error)
507630b2 283 goto out_unlock;
1da177e4 284
06d10dd9
NS
285 /*
286 * Copy any maps to caller's array and return any error.
287 */
1da177e4 288 if (nimaps == 0) {
2451337d 289 error = -ENOSPC;
507630b2 290 goto out_unlock;
572d95f4
NS
291 }
292
507630b2 293 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
6d4a8ecb 294 error = xfs_alert_fsblock_zero(ip, imap);
1da177e4 295
507630b2 296out_unlock:
009c6e87 297 xfs_iunlock(ip, lockmode);
507630b2 298 return error;
1da177e4 299
507630b2 300out_bmap_cancel:
2c3234d1 301 xfs_defer_cancel(&dfops);
ea562ed6 302 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
507630b2 303out_trans_cancel:
4906e215 304 xfs_trans_cancel(tp);
507630b2 305 goto out_unlock;
1da177e4
LT
306}
307
76a4202a
BF
308STATIC bool
309xfs_quota_need_throttle(
310 struct xfs_inode *ip,
311 int type,
312 xfs_fsblock_t alloc_blocks)
313{
314 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
315
316 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
317 return false;
318
319 /* no hi watermark, no throttle */
320 if (!dq->q_prealloc_hi_wmark)
321 return false;
322
323 /* under the lo watermark, no throttle */
324 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
325 return false;
326
327 return true;
328}
329
330STATIC void
331xfs_quota_calc_throttle(
332 struct xfs_inode *ip,
333 int type,
334 xfs_fsblock_t *qblocks,
f074051f
BF
335 int *qshift,
336 int64_t *qfreesp)
76a4202a
BF
337{
338 int64_t freesp;
339 int shift = 0;
340 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
341
5cca3f61
ES
342 /* no dq, or over hi wmark, squash the prealloc completely */
343 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
76a4202a 344 *qblocks = 0;
f074051f 345 *qfreesp = 0;
76a4202a
BF
346 return;
347 }
348
349 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
350 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
351 shift = 2;
352 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
353 shift += 2;
354 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
355 shift += 2;
356 }
357
f074051f
BF
358 if (freesp < *qfreesp)
359 *qfreesp = freesp;
360
76a4202a
BF
361 /* only overwrite the throttle values if we are more aggressive */
362 if ((freesp >> shift) < (*qblocks >> *qshift)) {
363 *qblocks = freesp;
364 *qshift = shift;
365 }
366}
367
055388a3 368/*
51446f5b
CH
369 * If we are doing a write at the end of the file and there are no allocations
370 * past this one, then extend the allocation out to the file system's write
371 * iosize.
372 *
055388a3 373 * If we don't have a user specified preallocation size, dynamically increase
51446f5b 374 * the preallocation size as the size of the file grows. Cap the maximum size
055388a3
DC
375 * at a single extent or less if the filesystem is near full. The closer the
376 * filesystem is to full, the smaller the maximum prealocation.
51446f5b
CH
377 *
378 * As an exception we don't do any preallocation at all if the file is smaller
379 * than the minimum preallocation and we are using the default dynamic
380 * preallocation scheme, as it is likely this is the only write to the file that
381 * is going to be done.
382 *
383 * We clean up any extra space left over when the file is closed in
384 * xfs_inactive().
055388a3
DC
385 */
386STATIC xfs_fsblock_t
387xfs_iomap_prealloc_size(
a1e16c26 388 struct xfs_inode *ip,
51446f5b
CH
389 loff_t offset,
390 loff_t count,
656152e5 391 xfs_extnum_t idx)
055388a3 392{
51446f5b 393 struct xfs_mount *mp = ip->i_mount;
656152e5 394 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
51446f5b 395 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
656152e5 396 struct xfs_bmbt_irec prev;
3c58b5f8
BF
397 int shift = 0;
398 int64_t freesp;
76a4202a
BF
399 xfs_fsblock_t qblocks;
400 int qshift = 0;
51446f5b
CH
401 xfs_fsblock_t alloc_blocks = 0;
402
403 if (offset + count <= XFS_ISIZE(ip))
404 return 0;
405
406 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
407 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
408 return 0;
409
410 /*
411 * If an explicit allocsize is set, the file is small, or we
412 * are writing behind a hole, then use the minimum prealloc:
413 */
414 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
415 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
656152e5
CH
416 !xfs_iext_get_extent(ifp, idx - 1, &prev) ||
417 prev.br_startoff + prev.br_blockcount < offset_fsb)
51446f5b 418 return mp->m_writeio_blocks;
055388a3 419
51446f5b
CH
420 /*
421 * Determine the initial size of the preallocation. We are beyond the
422 * current EOF here, but we need to take into account whether this is
423 * a sparse write or an extending write when determining the
424 * preallocation size. Hence we need to look up the extent that ends
425 * at the current write offset and use the result to determine the
426 * preallocation size.
427 *
428 * If the extent is a hole, then preallocation is essentially disabled.
429 * Otherwise we take the size of the preceding data extent as the basis
430 * for the preallocation size. If the size of the extent is greater than
431 * half the maximum extent length, then use the current offset as the
432 * basis. This ensures that for large files the preallocation size
433 * always extends to MAXEXTLEN rather than falling short due to things
434 * like stripe unit/width alignment of real extents.
435 */
656152e5
CH
436 if (prev.br_blockcount <= (MAXEXTLEN >> 1))
437 alloc_blocks = prev.br_blockcount << 1;
51446f5b
CH
438 else
439 alloc_blocks = XFS_B_TO_FSB(mp, offset);
3c58b5f8
BF
440 if (!alloc_blocks)
441 goto check_writeio;
76a4202a 442 qblocks = alloc_blocks;
3c58b5f8 443
c9bdbdc0
BF
444 /*
445 * MAXEXTLEN is not a power of two value but we round the prealloc down
446 * to the nearest power of two value after throttling. To prevent the
447 * round down from unconditionally reducing the maximum supported prealloc
448 * size, we round up first, apply appropriate throttling, round down and
449 * cap the value to MAXEXTLEN.
450 */
451 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
452 alloc_blocks);
3c58b5f8 453
0d485ada 454 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
3c58b5f8
BF
455 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
456 shift = 2;
457 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
458 shift++;
459 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
460 shift++;
461 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
462 shift++;
463 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
464 shift++;
055388a3 465 }
76a4202a
BF
466
467 /*
f074051f
BF
468 * Check each quota to cap the prealloc size, provide a shift value to
469 * throttle with and adjust amount of available space.
76a4202a
BF
470 */
471 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
f074051f
BF
472 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
473 &freesp);
76a4202a 474 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
f074051f
BF
475 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
476 &freesp);
76a4202a 477 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
f074051f
BF
478 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
479 &freesp);
76a4202a
BF
480
481 /*
482 * The final prealloc size is set to the minimum of free space available
483 * in each of the quotas and the overall filesystem.
484 *
485 * The shift throttle value is set to the maximum value as determined by
486 * the global low free space values and per-quota low free space values.
487 */
488 alloc_blocks = MIN(alloc_blocks, qblocks);
489 shift = MAX(shift, qshift);
490
3c58b5f8
BF
491 if (shift)
492 alloc_blocks >>= shift;
c9bdbdc0
BF
493 /*
494 * rounddown_pow_of_two() returns an undefined result if we pass in
495 * alloc_blocks = 0.
496 */
497 if (alloc_blocks)
498 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
499 if (alloc_blocks > MAXEXTLEN)
500 alloc_blocks = MAXEXTLEN;
3c58b5f8
BF
501
502 /*
503 * If we are still trying to allocate more space than is
504 * available, squash the prealloc hard. This can happen if we
505 * have a large file on a small filesystem and the above
506 * lowspace thresholds are smaller than MAXEXTLEN.
507 */
508 while (alloc_blocks && alloc_blocks >= freesp)
509 alloc_blocks >>= 4;
3c58b5f8 510check_writeio:
055388a3
DC
511 if (alloc_blocks < mp->m_writeio_blocks)
512 alloc_blocks = mp->m_writeio_blocks;
19cb7e38
BF
513 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
514 mp->m_writeio_blocks);
055388a3
DC
515 return alloc_blocks;
516}
517
51446f5b
CH
518static int
519xfs_file_iomap_begin_delay(
520 struct inode *inode,
521 loff_t offset,
522 loff_t count,
51446f5b 523 struct iomap *iomap)
1da177e4 524{
51446f5b
CH
525 struct xfs_inode *ip = XFS_I(inode);
526 struct xfs_mount *mp = ip->i_mount;
527 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
528 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
529 xfs_fileoff_t maxbytes_fsb =
530 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
f782088c 531 xfs_fileoff_t end_fsb;
51446f5b
CH
532 int error = 0, eof = 0;
533 struct xfs_bmbt_irec got;
51446f5b 534 xfs_extnum_t idx;
f782088c 535 xfs_fsblock_t prealloc_blocks = 0;
51446f5b
CH
536
537 ASSERT(!XFS_IS_REALTIME_INODE(ip));
538 ASSERT(!xfs_get_extsz_hint(ip));
dd9f438e 539
51446f5b 540 xfs_ilock(ip, XFS_ILOCK_EXCL);
1da177e4 541
51446f5b
CH
542 if (unlikely(XFS_TEST_ERROR(
543 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
544 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
9e24cfd0 545 mp, XFS_ERRTAG_BMAPIFORMAT))) {
51446f5b
CH
546 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
547 error = -EFSCORRUPTED;
548 goto out_unlock;
549 }
a1e16c26 550
51446f5b 551 XFS_STATS_INC(mp, xs_blk_mapw);
055388a3 552
51446f5b
CH
553 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
554 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
555 if (error)
556 goto out_unlock;
1da177e4 557 }
1da177e4 558
656152e5 559 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
51446f5b 560 if (!eof && got.br_startoff <= offset_fsb) {
3ba020be
CH
561 if (xfs_is_reflink_inode(ip)) {
562 bool shared;
563
564 end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
565 maxbytes_fsb);
566 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
567 error = xfs_reflink_reserve_cow(ip, &got, &shared);
568 if (error)
569 goto out_unlock;
570 }
571
51446f5b
CH
572 trace_xfs_iomap_found(ip, offset, count, 0, &got);
573 goto done;
1da177e4 574 }
dd9f438e 575
51446f5b
CH
576 error = xfs_qm_dqattach_locked(ip, 0);
577 if (error)
578 goto out_unlock;
579
3ed9116e 580 /*
51446f5b
CH
581 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
582 * to keep the chunks of work done where somewhat symmetric with the
583 * work writeback does. This is a completely arbitrary number pulled
584 * out of thin air as a best guess for initial testing.
585 *
586 * Note that the values needs to be less than 32-bits wide until
587 * the lower level functions are updated.
3ed9116e 588 */
51446f5b 589 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
f782088c 590 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
51446f5b
CH
591
592 if (eof) {
656152e5 593 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx);
51446f5b
CH
594 if (prealloc_blocks) {
595 xfs_extlen_t align;
596 xfs_off_t end_offset;
f782088c 597 xfs_fileoff_t p_end_fsb;
3ed9116e 598
51446f5b 599 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
f782088c
BF
600 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
601 prealloc_blocks;
51446f5b
CH
602
603 align = xfs_eof_alignment(ip, 0);
604 if (align)
f782088c 605 p_end_fsb = roundup_64(p_end_fsb, align);
51446f5b 606
f782088c
BF
607 p_end_fsb = min(p_end_fsb, maxbytes_fsb);
608 ASSERT(p_end_fsb > offset_fsb);
609 prealloc_blocks = p_end_fsb - end_fsb;
51446f5b
CH
610 }
611 }
612
613retry:
be51f811 614 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
f782088c 615 end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof);
055388a3
DC
616 switch (error) {
617 case 0:
51446f5b 618 break;
2451337d
DC
619 case -ENOSPC:
620 case -EDQUOT:
51446f5b 621 /* retry without any preallocation */
0b1b213f 622 trace_xfs_delalloc_enospc(ip, offset, count);
f782088c
BF
623 if (prealloc_blocks) {
624 prealloc_blocks = 0;
9aa05000 625 goto retry;
055388a3 626 }
51446f5b
CH
627 /*FALLTHRU*/
628 default:
629 goto out_unlock;
1da177e4
LT
630 }
631
f65e6fad
BF
632 /*
633 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
634 * them out if the write happens to fail.
635 */
636 iomap->flags = IOMAP_F_NEW;
51446f5b
CH
637 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
638done:
639 if (isnullstartblock(got.br_startblock))
640 got.br_startblock = DELAYSTARTBLOCK;
641
642 if (!got.br_startblock) {
643 error = xfs_alert_fsblock_zero(ip, &got);
644 if (error)
645 goto out_unlock;
646 }
647
648 xfs_bmbt_to_iomap(ip, iomap, &got);
649
650out_unlock:
651 xfs_iunlock(ip, XFS_ILOCK_EXCL);
652 return error;
1da177e4
LT
653}
654
655/*
656 * Pass in a delayed allocate extent, convert it to real extents;
657 * return to the caller the extent we create which maps on top of
658 * the originating callers request.
659 *
660 * Called without a lock on the inode.
e4143a1c
DC
661 *
662 * We no longer bother to look at the incoming map - all we have to
663 * guarantee is that whatever we allocate fills the required range.
1da177e4 664 */
a206c817 665int
1da177e4
LT
666xfs_iomap_write_allocate(
667 xfs_inode_t *ip,
60b4984f 668 int whichfork,
f403b7f4 669 xfs_off_t offset,
405f8042 670 xfs_bmbt_irec_t *imap)
1da177e4
LT
671{
672 xfs_mount_t *mp = ip->i_mount;
1da177e4
LT
673 xfs_fileoff_t offset_fsb, last_block;
674 xfs_fileoff_t end_fsb, map_start_fsb;
675 xfs_fsblock_t first_block;
2c3234d1 676 struct xfs_defer_ops dfops;
1da177e4 677 xfs_filblks_t count_fsb;
1da177e4 678 xfs_trans_t *tp;
f6106efa 679 int nimaps;
1da177e4 680 int error = 0;
d2b3964a 681 int flags = XFS_BMAPI_DELALLOC;
1da177e4
LT
682 int nres;
683
60b4984f 684 if (whichfork == XFS_COW_FORK)
5eda4300 685 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
60b4984f 686
1da177e4
LT
687 /*
688 * Make sure that the dquots are there.
689 */
7d095257
CH
690 error = xfs_qm_dqattach(ip, 0);
691 if (error)
b474c7ae 692 return error;
1da177e4 693
24e17b5f 694 offset_fsb = XFS_B_TO_FSBT(mp, offset);
3070451e
CH
695 count_fsb = imap->br_blockcount;
696 map_start_fsb = imap->br_startoff;
1da177e4 697
ff6d6af2 698 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
1da177e4
LT
699
700 while (count_fsb != 0) {
701 /*
702 * Set up a transaction with which to allocate the
703 * backing store for the file. Do allocations in a
704 * loop until we get some space in the range we are
705 * interested in. The other space that might be allocated
706 * is in the delayed allocation extent on which we sit
707 * but before our buffer starts.
708 */
1da177e4
LT
709 nimaps = 0;
710 while (nimaps == 0) {
1da177e4 711 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
0af32fb4
CH
712 /*
713 * We have already reserved space for the extent and any
714 * indirect blocks when creating the delalloc extent,
715 * there is no need to reserve space in this transaction
716 * again.
717 */
718 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
253f4911
CH
719 0, XFS_TRANS_RESERVE, &tp);
720 if (error)
b474c7ae 721 return error;
253f4911 722
1da177e4 723 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 724 xfs_trans_ijoin(tp, ip, 0);
1da177e4 725
2c3234d1 726 xfs_defer_init(&dfops, &first_block);
1da177e4 727
1da177e4 728 /*
e4143a1c
DC
729 * it is possible that the extents have changed since
730 * we did the read call as we dropped the ilock for a
731 * while. We have to be careful about truncates or hole
732 * punchs here - we are not allowed to allocate
733 * non-delalloc blocks here.
734 *
735 * The only protection against truncation is the pages
736 * for the range we are being asked to convert are
737 * locked and hence a truncate will block on them
738 * first.
739 *
740 * As a result, if we go beyond the range we really
741 * need and hit an delalloc extent boundary followed by
742 * a hole while we have excess blocks in the map, we
743 * will fill the hole incorrectly and overrun the
744 * transaction reservation.
745 *
746 * Using a single map prevents this as we are forced to
747 * check each map we look for overlap with the desired
748 * range and abort as soon as we find it. Also, given
749 * that we only return a single map, having one beyond
750 * what we can return is probably a bit silly.
751 *
752 * We also need to check that we don't go beyond EOF;
753 * this is a truncate optimisation as a truncate sets
754 * the new file size before block on the pages we
755 * currently have locked under writeback. Because they
756 * are about to be tossed, we don't need to write them
757 * back....
1da177e4 758 */
e4143a1c 759 nimaps = 1;
ce7ae151 760 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
7fb2cd4d 761 error = xfs_bmap_last_offset(ip, &last_block,
7c9ef85c
DC
762 XFS_DATA_FORK);
763 if (error)
764 goto trans_cancel;
765
1da177e4
LT
766 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
767 if ((map_start_fsb + count_fsb) > last_block) {
768 count_fsb = last_block - map_start_fsb;
769 if (count_fsb == 0) {
2451337d 770 error = -EAGAIN;
1da177e4
LT
771 goto trans_cancel;
772 }
773 }
774
3070451e 775 /*
3070451e
CH
776 * From this point onwards we overwrite the imap
777 * pointer that the caller gave to us.
778 */
c0dc7828 779 error = xfs_bmapi_write(tp, ip, map_start_fsb,
60b4984f 780 count_fsb, flags, &first_block,
dbd5c8c9 781 nres, imap, &nimaps,
2c3234d1 782 &dfops);
1da177e4
LT
783 if (error)
784 goto trans_cancel;
785
8ad7c629 786 error = xfs_defer_finish(&tp, &dfops);
1da177e4
LT
787 if (error)
788 goto trans_cancel;
789
70393313 790 error = xfs_trans_commit(tp);
1da177e4
LT
791 if (error)
792 goto error0;
793
794 xfs_iunlock(ip, XFS_ILOCK_EXCL);
795 }
796
797 /*
798 * See if we were able to allocate an extent that
799 * covers at least part of the callers request
800 */
3070451e 801 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
6d4a8ecb 802 return xfs_alert_fsblock_zero(ip, imap);
86c4d623 803
3070451e
CH
804 if ((offset_fsb >= imap->br_startoff) &&
805 (offset_fsb < (imap->br_startoff +
806 imap->br_blockcount))) {
ff6d6af2 807 XFS_STATS_INC(mp, xs_xstrat_quick);
e4143a1c 808 return 0;
1da177e4
LT
809 }
810
e4143a1c
DC
811 /*
812 * So far we have not mapped the requested part of the
1da177e4
LT
813 * file, just surrounding data, try again.
814 */
3070451e
CH
815 count_fsb -= imap->br_blockcount;
816 map_start_fsb = imap->br_startoff + imap->br_blockcount;
1da177e4
LT
817 }
818
819trans_cancel:
2c3234d1 820 xfs_defer_cancel(&dfops);
4906e215 821 xfs_trans_cancel(tp);
1da177e4
LT
822error0:
823 xfs_iunlock(ip, XFS_ILOCK_EXCL);
b474c7ae 824 return error;
1da177e4
LT
825}
826
827int
828xfs_iomap_write_unwritten(
829 xfs_inode_t *ip,
f403b7f4 830 xfs_off_t offset,
d32057fc 831 xfs_off_t count)
1da177e4
LT
832{
833 xfs_mount_t *mp = ip->i_mount;
1da177e4
LT
834 xfs_fileoff_t offset_fsb;
835 xfs_filblks_t count_fsb;
836 xfs_filblks_t numblks_fsb;
dd9f438e
NS
837 xfs_fsblock_t firstfsb;
838 int nimaps;
839 xfs_trans_t *tp;
840 xfs_bmbt_irec_t imap;
2c3234d1 841 struct xfs_defer_ops dfops;
84803fb7 842 xfs_fsize_t i_size;
dd9f438e 843 uint resblks;
1da177e4 844 int error;
1da177e4 845
0b1b213f 846 trace_xfs_unwritten_convert(ip, offset, count);
1da177e4
LT
847
848 offset_fsb = XFS_B_TO_FSBT(mp, offset);
849 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
850 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
851
4ddd8bb1
LM
852 /*
853 * Reserve enough blocks in this transaction for two complete extent
854 * btree splits. We may be converting the middle part of an unwritten
855 * extent and in this case we will insert two new extents in the btree
856 * each of which could cause a full split.
857 *
858 * This reservation amount will be used in the first call to
859 * xfs_bmbt_split() to select an AG with enough space to satisfy the
860 * rest of the operation.
861 */
dd9f438e 862 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
1da177e4 863
dd9f438e 864 do {
1da177e4 865 /*
253f4911 866 * Set up a transaction to convert the range of extents
1da177e4
LT
867 * from unwritten to real. Do allocations in a loop until
868 * we have covered the range passed in.
80641dc6 869 *
253f4911
CH
870 * Note that we can't risk to recursing back into the filesystem
871 * here as we might be asked to write out the same inode that we
872 * complete here and might deadlock on the iolock.
1da177e4 873 */
253f4911
CH
874 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
875 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
876 if (error)
b474c7ae 877 return error;
1da177e4
LT
878
879 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 880 xfs_trans_ijoin(tp, ip, 0);
1da177e4
LT
881
882 /*
883 * Modify the unwritten extent state of the buffer.
884 */
2c3234d1 885 xfs_defer_init(&dfops, &firstfsb);
1da177e4 886 nimaps = 1;
c0dc7828 887 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
dbd5c8c9 888 XFS_BMAPI_CONVERT, &firstfsb, resblks,
2c3234d1 889 &imap, &nimaps, &dfops);
1da177e4
LT
890 if (error)
891 goto error_on_bmapi_transaction;
892
84803fb7
CH
893 /*
894 * Log the updated inode size as we go. We have to be careful
895 * to only log it up to the actual write offset if it is
896 * halfway into a block.
897 */
898 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
899 if (i_size > offset + count)
900 i_size = offset + count;
901
902 i_size = xfs_new_eof(ip, i_size);
903 if (i_size) {
904 ip->i_d.di_size = i_size;
905 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
906 }
907
8ad7c629 908 error = xfs_defer_finish(&tp, &dfops);
1da177e4
LT
909 if (error)
910 goto error_on_bmapi_transaction;
911
70393313 912 error = xfs_trans_commit(tp);
1da177e4
LT
913 xfs_iunlock(ip, XFS_ILOCK_EXCL);
914 if (error)
b474c7ae 915 return error;
572d95f4 916
86c4d623 917 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
6d4a8ecb 918 return xfs_alert_fsblock_zero(ip, &imap);
1da177e4
LT
919
920 if ((numblks_fsb = imap.br_blockcount) == 0) {
921 /*
922 * The numblks_fsb value should always get
923 * smaller, otherwise the loop is stuck.
924 */
925 ASSERT(imap.br_blockcount);
926 break;
927 }
928 offset_fsb += numblks_fsb;
929 count_fsb -= numblks_fsb;
930 } while (count_fsb > 0);
931
932 return 0;
933
934error_on_bmapi_transaction:
2c3234d1 935 xfs_defer_cancel(&dfops);
4906e215 936 xfs_trans_cancel(tp);
1da177e4 937 xfs_iunlock(ip, XFS_ILOCK_EXCL);
b474c7ae 938 return error;
1da177e4 939}
3b3dce05 940
6c31f495
CH
941static inline bool imap_needs_alloc(struct inode *inode,
942 struct xfs_bmbt_irec *imap, int nimaps)
68a9f5e7
CH
943{
944 return !nimaps ||
945 imap->br_startblock == HOLESTARTBLOCK ||
6c31f495 946 imap->br_startblock == DELAYSTARTBLOCK ||
63fbb4c1 947 (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN);
68a9f5e7
CH
948}
949
acdda3aa
CH
950static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
951{
952 /*
953 * COW writes will allocate delalloc space, so we need to make sure
954 * to take the lock exclusively here.
955 */
956 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
957 return true;
958 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE))
959 return true;
960 return false;
961}
962
68a9f5e7
CH
963static int
964xfs_file_iomap_begin(
965 struct inode *inode,
966 loff_t offset,
967 loff_t length,
968 unsigned flags,
969 struct iomap *iomap)
970{
971 struct xfs_inode *ip = XFS_I(inode);
972 struct xfs_mount *mp = ip->i_mount;
973 struct xfs_bmbt_irec imap;
974 xfs_fileoff_t offset_fsb, end_fsb;
975 int nimaps = 1, error = 0;
3ba020be 976 bool shared = false, trimmed = false;
66642c5c 977 unsigned lockmode;
fa5d932c 978 struct block_device *bdev;
68a9f5e7
CH
979
980 if (XFS_FORCED_SHUTDOWN(mp))
981 return -EIO;
982
acdda3aa
CH
983 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
984 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
2a06705c 985 /* Reserve delalloc blocks for regular writeback. */
f91fb956 986 return xfs_file_iomap_begin_delay(inode, offset, length, iomap);
51446f5b
CH
987 }
988
acdda3aa 989 if (need_excl_ilock(ip, flags)) {
3ba020be
CH
990 lockmode = XFS_ILOCK_EXCL;
991 xfs_ilock(ip, XFS_ILOCK_EXCL);
992 } else {
993 lockmode = xfs_ilock_data_map_shared(ip);
994 }
68a9f5e7 995
29a5d29e
GR
996 if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) {
997 error = -EAGAIN;
998 goto out_unlock;
999 }
1000
68a9f5e7
CH
1001 ASSERT(offset <= mp->m_super->s_maxbytes);
1002 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
1003 length = mp->m_super->s_maxbytes - offset;
1004 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1005 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1006
1007 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
db1327b1 1008 &nimaps, 0);
3ba020be
CH
1009 if (error)
1010 goto out_unlock;
db1327b1 1011
3c68d44a 1012 if (flags & IOMAP_REPORT) {
5f9268ca
CH
1013 /* Trim the mapping to the nearest shared extent boundary. */
1014 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
1015 &trimmed);
3ba020be
CH
1016 if (error)
1017 goto out_unlock;
1018 }
1019
1020 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
3c68d44a 1021 if (flags & IOMAP_DIRECT) {
29a5d29e
GR
1022 /*
1023 * A reflinked inode will result in CoW alloc.
1024 * FIXME: It could still overwrite on unshared extents
1025 * and not need allocation.
1026 */
1027 if (flags & IOMAP_NOWAIT) {
1028 error = -EAGAIN;
1029 goto out_unlock;
1030 }
3c68d44a
CH
1031 /* may drop and re-acquire the ilock */
1032 error = xfs_reflink_allocate_cow(ip, &imap, &shared,
1033 &lockmode);
1034 if (error)
1035 goto out_unlock;
1036 } else {
1037 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
1038 if (error)
1039 goto out_unlock;
1040 }
3ba020be
CH
1041
1042 end_fsb = imap.br_startoff + imap.br_blockcount;
1043 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
68a9f5e7
CH
1044 }
1045
6c31f495 1046 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
29a5d29e
GR
1047 /*
1048 * If nowait is set bail since we are going to make
1049 * allocations.
1050 */
1051 if (flags & IOMAP_NOWAIT) {
1052 error = -EAGAIN;
1053 goto out_unlock;
1054 }
68a9f5e7
CH
1055 /*
1056 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1057 * pages to keep the chunks of work done where somewhat symmetric
1058 * with the work writeback does. This is a completely arbitrary
1059 * number pulled out of thin air as a best guess for initial
1060 * testing.
1061 *
1062 * Note that the values needs to be less than 32-bits wide until
1063 * the lower level functions are updated.
1064 */
1065 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
51446f5b
CH
1066 /*
1067 * xfs_iomap_write_direct() expects the shared lock. It
1068 * is unlocked on return.
1069 */
66642c5c
CH
1070 if (lockmode == XFS_ILOCK_EXCL)
1071 xfs_ilock_demote(ip, lockmode);
51446f5b
CH
1072 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1073 nimaps);
68a9f5e7
CH
1074 if (error)
1075 return error;
1076
ecd50729 1077 iomap->flags = IOMAP_F_NEW;
68a9f5e7 1078 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
68a9f5e7 1079 } else {
b95a2127
CH
1080 ASSERT(nimaps);
1081
66642c5c 1082 xfs_iunlock(ip, lockmode);
b95a2127 1083 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
68a9f5e7
CH
1084 }
1085
b95a2127 1086 xfs_bmbt_to_iomap(ip, iomap, &imap);
fa5d932c
DW
1087
1088 /* optionally associate a dax device with the iomap bdev */
1089 bdev = iomap->bdev;
1090 if (blk_queue_dax(bdev->bd_queue))
f5705aa8 1091 iomap->dax_dev = fs_dax_get_by_host(bdev->bd_disk->disk_name);
fa5d932c
DW
1092 else
1093 iomap->dax_dev = NULL;
1094
db1327b1
DW
1095 if (shared)
1096 iomap->flags |= IOMAP_F_SHARED;
68a9f5e7 1097 return 0;
3ba020be
CH
1098out_unlock:
1099 xfs_iunlock(ip, lockmode);
1100 return error;
68a9f5e7
CH
1101}
1102
1103static int
1104xfs_file_iomap_end_delalloc(
1105 struct xfs_inode *ip,
1106 loff_t offset,
1107 loff_t length,
f65e6fad
BF
1108 ssize_t written,
1109 struct iomap *iomap)
68a9f5e7
CH
1110{
1111 struct xfs_mount *mp = ip->i_mount;
1112 xfs_fileoff_t start_fsb;
1113 xfs_fileoff_t end_fsb;
1114 int error = 0;
1115
f65e6fad
BF
1116 /*
1117 * Behave as if the write failed if drop writes is enabled. Set the NEW
1118 * flag to force delalloc cleanup.
1119 */
f8c47250 1120 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
f65e6fad 1121 iomap->flags |= IOMAP_F_NEW;
9dbddd7b 1122 written = 0;
f65e6fad 1123 }
9dbddd7b 1124
fa7f138a
BF
1125 /*
1126 * start_fsb refers to the first unused block after a short write. If
1127 * nothing was written, round offset down to point at the first block in
1128 * the range.
1129 */
1130 if (unlikely(!written))
1131 start_fsb = XFS_B_TO_FSBT(mp, offset);
1132 else
1133 start_fsb = XFS_B_TO_FSB(mp, offset + written);
68a9f5e7
CH
1134 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1135
1136 /*
f65e6fad
BF
1137 * Trim delalloc blocks if they were allocated by this write and we
1138 * didn't manage to write the whole range.
68a9f5e7
CH
1139 *
1140 * We don't need to care about racing delalloc as we hold i_mutex
1141 * across the reserve/allocate/unreserve calls. If there are delalloc
1142 * blocks in the range, they are ours.
1143 */
f65e6fad 1144 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
fa7f138a
BF
1145 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1146 XFS_FSB_TO_B(mp, end_fsb) - 1);
1147
68a9f5e7
CH
1148 xfs_ilock(ip, XFS_ILOCK_EXCL);
1149 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1150 end_fsb - start_fsb);
1151 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1152
1153 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1154 xfs_alert(mp, "%s: unable to clean up ino %lld",
1155 __func__, ip->i_ino);
1156 return error;
1157 }
1158 }
1159
1160 return 0;
1161}
1162
1163static int
1164xfs_file_iomap_end(
1165 struct inode *inode,
1166 loff_t offset,
1167 loff_t length,
1168 ssize_t written,
1169 unsigned flags,
1170 struct iomap *iomap)
1171{
f5705aa8 1172 fs_put_dax(iomap->dax_dev);
68a9f5e7
CH
1173 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1174 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
f65e6fad 1175 length, written, iomap);
68a9f5e7
CH
1176 return 0;
1177}
1178
8ff6daa1 1179const struct iomap_ops xfs_iomap_ops = {
68a9f5e7
CH
1180 .iomap_begin = xfs_file_iomap_begin,
1181 .iomap_end = xfs_file_iomap_end,
1182};
1d4795e7
CH
1183
1184static int
1185xfs_xattr_iomap_begin(
1186 struct inode *inode,
1187 loff_t offset,
1188 loff_t length,
1189 unsigned flags,
1190 struct iomap *iomap)
1191{
1192 struct xfs_inode *ip = XFS_I(inode);
1193 struct xfs_mount *mp = ip->i_mount;
1194 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1195 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1196 struct xfs_bmbt_irec imap;
1197 int nimaps = 1, error = 0;
1198 unsigned lockmode;
1199
1200 if (XFS_FORCED_SHUTDOWN(mp))
1201 return -EIO;
1202
84358536 1203 lockmode = xfs_ilock_attr_map_shared(ip);
1d4795e7
CH
1204
1205 /* if there are no attribute fork or extents, return ENOENT */
84358536 1206 if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1d4795e7
CH
1207 error = -ENOENT;
1208 goto out_unlock;
1209 }
1210
1211 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1212 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1213 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
1214out_unlock:
1215 xfs_iunlock(ip, lockmode);
1216
1217 if (!error) {
1218 ASSERT(nimaps);
1219 xfs_bmbt_to_iomap(ip, iomap, &imap);
1220 }
1221
1222 return error;
1223}
1224
8ff6daa1 1225const struct iomap_ops xfs_xattr_iomap_ops = {
1d4795e7
CH
1226 .iomap_begin = xfs_xattr_iomap_begin,
1227};