]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/xfs/xfs_iomap.c
xfs: create delalloc extents in CoW fork
[mirror_ubuntu-artful-kernel.git] / fs / xfs / xfs_iomap.c
CommitLineData
1da177e4 1/*
3e57ecf6 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
51446f5b 3 * Copyright (c) 2016 Christoph Hellwig.
7b718769 4 * All Rights Reserved.
1da177e4 5 *
7b718769
NS
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
1da177e4
LT
8 * published by the Free Software Foundation.
9 *
7b718769
NS
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
1da177e4 14 *
7b718769
NS
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 18 */
3b3dce05 19#include <linux/iomap.h>
1da177e4 20#include "xfs.h"
1da177e4 21#include "xfs_fs.h"
70a9883c 22#include "xfs_shared.h"
239880ef
DC
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
1da177e4 26#include "xfs_mount.h"
3ab78df2 27#include "xfs_defer.h"
1da177e4 28#include "xfs_inode.h"
a844f451 29#include "xfs_btree.h"
a4fbe6ab 30#include "xfs_bmap_btree.h"
1da177e4 31#include "xfs_bmap.h"
68988114 32#include "xfs_bmap_util.h"
1da177e4 33#include "xfs_error.h"
a4fbe6ab 34#include "xfs_trans.h"
1da177e4 35#include "xfs_trans_space.h"
1da177e4 36#include "xfs_iomap.h"
0b1b213f 37#include "xfs_trace.h"
27b52867 38#include "xfs_icache.h"
a4fbe6ab 39#include "xfs_quota.h"
76a4202a
BF
40#include "xfs_dquot_item.h"
41#include "xfs_dquot.h"
2a06705c 42#include "xfs_reflink.h"
1da177e4 43
1da177e4
LT
44
45#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
46 << mp->m_writeio_log)
1da177e4 47
e9c49736
CH
48void
49xfs_bmbt_to_iomap(
50 struct xfs_inode *ip,
51 struct iomap *iomap,
52 struct xfs_bmbt_irec *imap)
53{
54 struct xfs_mount *mp = ip->i_mount;
55
56 if (imap->br_startblock == HOLESTARTBLOCK) {
57 iomap->blkno = IOMAP_NULL_BLOCK;
58 iomap->type = IOMAP_HOLE;
59 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
60 iomap->blkno = IOMAP_NULL_BLOCK;
61 iomap->type = IOMAP_DELALLOC;
62 } else {
63 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
64 if (imap->br_state == XFS_EXT_UNWRITTEN)
65 iomap->type = IOMAP_UNWRITTEN;
66 else
67 iomap->type = IOMAP_MAPPED;
68 }
69 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
70 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
71 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
72}
73
f8e3a825
CH
74static xfs_extlen_t
75xfs_eof_alignment(
76 struct xfs_inode *ip,
77 xfs_extlen_t extsize)
dd9f438e 78{
f8e3a825
CH
79 struct xfs_mount *mp = ip->i_mount;
80 xfs_extlen_t align = 0;
dd9f438e 81
bf322d98
CH
82 if (!XFS_IS_REALTIME_INODE(ip)) {
83 /*
84 * Round up the allocation request to a stripe unit
85 * (m_dalign) boundary if the file size is >= stripe unit
86 * size, and we are allocating past the allocation eof.
87 *
88 * If mounted with the "-o swalloc" option the alignment is
89 * increased from the strip unit size to the stripe width.
90 */
91 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
92 align = mp->m_swidth;
93 else if (mp->m_dalign)
94 align = mp->m_dalign;
95
76b57302
PW
96 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
97 align = 0;
bf322d98 98 }
dd9f438e
NS
99
100 /*
101 * Always round up the allocation request to an extent boundary
102 * (when file on a real-time subvolume or has di_extsize hint).
103 */
104 if (extsize) {
76b57302
PW
105 if (align)
106 align = roundup_64(align, extsize);
dd9f438e
NS
107 else
108 align = extsize;
dd9f438e
NS
109 }
110
f8e3a825
CH
111 return align;
112}
113
114STATIC int
115xfs_iomap_eof_align_last_fsb(
116 struct xfs_inode *ip,
117 xfs_extlen_t extsize,
118 xfs_fileoff_t *last_fsb)
119{
120 xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
121
76b57302
PW
122 if (align) {
123 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
f8e3a825
CH
124 int eof, error;
125
541d7d3c 126 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
dd9f438e
NS
127 if (error)
128 return error;
129 if (eof)
130 *last_fsb = new_last_fsb;
131 }
132 return 0;
133}
134
572d95f4 135STATIC int
6d4a8ecb 136xfs_alert_fsblock_zero(
572d95f4
NS
137 xfs_inode_t *ip,
138 xfs_bmbt_irec_t *imap)
139{
6a19d939 140 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
572d95f4
NS
141 "Access to block zero in inode %llu "
142 "start_block: %llx start_off: %llx "
08e96e1a 143 "blkcnt: %llx extent-state: %x",
572d95f4
NS
144 (unsigned long long)ip->i_ino,
145 (unsigned long long)imap->br_startblock,
146 (unsigned long long)imap->br_startoff,
147 (unsigned long long)imap->br_blockcount,
148 imap->br_state);
2451337d 149 return -EFSCORRUPTED;
572d95f4
NS
150}
151
a206c817 152int
1da177e4
LT
153xfs_iomap_write_direct(
154 xfs_inode_t *ip,
f403b7f4 155 xfs_off_t offset,
1da177e4 156 size_t count,
3070451e 157 xfs_bmbt_irec_t *imap,
405f8042 158 int nmaps)
1da177e4
LT
159{
160 xfs_mount_t *mp = ip->i_mount;
1da177e4
LT
161 xfs_fileoff_t offset_fsb;
162 xfs_fileoff_t last_fsb;
dd9f438e 163 xfs_filblks_t count_fsb, resaligned;
1da177e4 164 xfs_fsblock_t firstfsb;
dd9f438e 165 xfs_extlen_t extsz, temp;
0116d935 166 int nimaps;
06d10dd9 167 int quota_flag;
1da177e4
LT
168 int rt;
169 xfs_trans_t *tp;
2c3234d1 170 struct xfs_defer_ops dfops;
dd9f438e 171 uint qblocks, resblks, resrtextents;
dd9f438e 172 int error;
009c6e87 173 int lockmode;
1ca19157 174 int bmapi_flags = XFS_BMAPI_PREALLOC;
253f4911 175 uint tflags = 0;
1da177e4 176
dd9f438e 177 rt = XFS_IS_REALTIME_INODE(ip);
957d0ebe 178 extsz = xfs_get_extsz_hint(ip);
009c6e87
BF
179 lockmode = XFS_ILOCK_SHARED; /* locked by caller */
180
181 ASSERT(xfs_isilocked(ip, lockmode));
1da177e4 182
957d0ebe
DC
183 offset_fsb = XFS_B_TO_FSBT(mp, offset);
184 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
ce7ae151 185 if ((offset + count) > XFS_ISIZE(ip)) {
009c6e87
BF
186 /*
187 * Assert that the in-core extent list is present since this can
188 * call xfs_iread_extents() and we only have the ilock shared.
189 * This should be safe because the lock was held around a bmapi
190 * call in the caller and we only need it to access the in-core
191 * list.
192 */
193 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
194 XFS_IFEXTENTS);
f8e3a825 195 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
dd9f438e 196 if (error)
009c6e87 197 goto out_unlock;
1da177e4 198 } else {
405f8042 199 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
dd9f438e 200 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
3070451e
CH
201 imap->br_blockcount +
202 imap->br_startoff);
1da177e4 203 }
dd9f438e
NS
204 count_fsb = last_fsb - offset_fsb;
205 ASSERT(count_fsb > 0);
206
207 resaligned = count_fsb;
208 if (unlikely(extsz)) {
209 if ((temp = do_mod(offset_fsb, extsz)))
210 resaligned += temp;
211 if ((temp = do_mod(resaligned, extsz)))
212 resaligned += extsz - temp;
213 }
214
215 if (unlikely(rt)) {
216 resrtextents = qblocks = resaligned;
217 resrtextents /= mp->m_sb.sb_rextsize;
84e1e99f
DC
218 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
219 quota_flag = XFS_QMOPT_RES_RTBLKS;
220 } else {
221 resrtextents = 0;
dd9f438e 222 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
84e1e99f
DC
223 quota_flag = XFS_QMOPT_RES_REGBLKS;
224 }
1da177e4 225
009c6e87
BF
226 /*
227 * Drop the shared lock acquired by the caller, attach the dquot if
228 * necessary and move on to transaction setup.
229 */
230 xfs_iunlock(ip, lockmode);
231 error = xfs_qm_dqattach(ip, 0);
232 if (error)
233 return error;
234
1ca19157
DC
235 /*
236 * For DAX, we do not allocate unwritten extents, but instead we zero
237 * the block before we commit the transaction. Ideally we'd like to do
238 * this outside the transaction context, but if we commit and then crash
239 * we may not have zeroed the blocks and this will be exposed on
240 * recovery of the allocation. Hence we must zero before commit.
3b0fe478 241 *
1ca19157
DC
242 * Further, if we are mapping unwritten extents here, we need to zero
243 * and convert them to written so that we don't need an unwritten extent
244 * callback for DAX. This also means that we need to be able to dip into
3b0fe478
DC
245 * the reserve block pool for bmbt block allocation if there is no space
246 * left but we need to do unwritten extent conversion.
1ca19157
DC
247 */
248 if (IS_DAX(VFS_I(ip))) {
249 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
3b0fe478 250 if (ISUNWRITTEN(imap)) {
253f4911 251 tflags |= XFS_TRANS_RESERVE;
3b0fe478
DC
252 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
253 }
1ca19157 254 }
253f4911
CH
255 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
256 tflags, &tp);
257 if (error)
b474c7ae 258 return error;
507630b2 259
009c6e87
BF
260 lockmode = XFS_ILOCK_EXCL;
261 xfs_ilock(ip, lockmode);
1da177e4 262
7d095257 263 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
dd9f438e 264 if (error)
507630b2 265 goto out_trans_cancel;
1da177e4 266
ddc3415a 267 xfs_trans_ijoin(tp, ip, 0);
1da177e4 268
1da177e4 269 /*
3070451e
CH
270 * From this point onwards we overwrite the imap pointer that the
271 * caller gave to us.
1da177e4 272 */
2c3234d1 273 xfs_defer_init(&dfops, &firstfsb);
06d10dd9 274 nimaps = 1;
d531d91d 275 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
264e89ad 276 bmapi_flags, &firstfsb, resblks, imap,
2c3234d1 277 &nimaps, &dfops);
06d10dd9 278 if (error)
507630b2 279 goto out_bmap_cancel;
1da177e4
LT
280
281 /*
06d10dd9 282 * Complete the transaction
1da177e4 283 */
2c3234d1 284 error = xfs_defer_finish(&tp, &dfops, NULL);
06d10dd9 285 if (error)
507630b2 286 goto out_bmap_cancel;
1ca19157 287
70393313 288 error = xfs_trans_commit(tp);
06d10dd9 289 if (error)
507630b2 290 goto out_unlock;
1da177e4 291
06d10dd9
NS
292 /*
293 * Copy any maps to caller's array and return any error.
294 */
1da177e4 295 if (nimaps == 0) {
2451337d 296 error = -ENOSPC;
507630b2 297 goto out_unlock;
572d95f4
NS
298 }
299
507630b2 300 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
6d4a8ecb 301 error = xfs_alert_fsblock_zero(ip, imap);
1da177e4 302
507630b2 303out_unlock:
009c6e87 304 xfs_iunlock(ip, lockmode);
507630b2 305 return error;
1da177e4 306
507630b2 307out_bmap_cancel:
2c3234d1 308 xfs_defer_cancel(&dfops);
ea562ed6 309 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
507630b2 310out_trans_cancel:
4906e215 311 xfs_trans_cancel(tp);
507630b2 312 goto out_unlock;
1da177e4
LT
313}
314
76a4202a
BF
315STATIC bool
316xfs_quota_need_throttle(
317 struct xfs_inode *ip,
318 int type,
319 xfs_fsblock_t alloc_blocks)
320{
321 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
322
323 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
324 return false;
325
326 /* no hi watermark, no throttle */
327 if (!dq->q_prealloc_hi_wmark)
328 return false;
329
330 /* under the lo watermark, no throttle */
331 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
332 return false;
333
334 return true;
335}
336
337STATIC void
338xfs_quota_calc_throttle(
339 struct xfs_inode *ip,
340 int type,
341 xfs_fsblock_t *qblocks,
f074051f
BF
342 int *qshift,
343 int64_t *qfreesp)
76a4202a
BF
344{
345 int64_t freesp;
346 int shift = 0;
347 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
348
5cca3f61
ES
349 /* no dq, or over hi wmark, squash the prealloc completely */
350 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
76a4202a 351 *qblocks = 0;
f074051f 352 *qfreesp = 0;
76a4202a
BF
353 return;
354 }
355
356 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
357 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
358 shift = 2;
359 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
360 shift += 2;
361 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
362 shift += 2;
363 }
364
f074051f
BF
365 if (freesp < *qfreesp)
366 *qfreesp = freesp;
367
76a4202a
BF
368 /* only overwrite the throttle values if we are more aggressive */
369 if ((freesp >> shift) < (*qblocks >> *qshift)) {
370 *qblocks = freesp;
371 *qshift = shift;
372 }
373}
374
055388a3 375/*
51446f5b
CH
376 * If we are doing a write at the end of the file and there are no allocations
377 * past this one, then extend the allocation out to the file system's write
378 * iosize.
379 *
055388a3 380 * If we don't have a user specified preallocation size, dynamically increase
51446f5b 381 * the preallocation size as the size of the file grows. Cap the maximum size
055388a3
DC
382 * at a single extent or less if the filesystem is near full. The closer the
383 * filesystem is to full, the smaller the maximum prealocation.
51446f5b
CH
384 *
385 * As an exception we don't do any preallocation at all if the file is smaller
386 * than the minimum preallocation and we are using the default dynamic
387 * preallocation scheme, as it is likely this is the only write to the file that
388 * is going to be done.
389 *
390 * We clean up any extra space left over when the file is closed in
391 * xfs_inactive().
055388a3
DC
392 */
393STATIC xfs_fsblock_t
394xfs_iomap_prealloc_size(
a1e16c26 395 struct xfs_inode *ip,
51446f5b
CH
396 loff_t offset,
397 loff_t count,
398 xfs_extnum_t idx,
399 struct xfs_bmbt_irec *prev)
055388a3 400{
51446f5b
CH
401 struct xfs_mount *mp = ip->i_mount;
402 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
3c58b5f8
BF
403 int shift = 0;
404 int64_t freesp;
76a4202a
BF
405 xfs_fsblock_t qblocks;
406 int qshift = 0;
51446f5b
CH
407 xfs_fsblock_t alloc_blocks = 0;
408
409 if (offset + count <= XFS_ISIZE(ip))
410 return 0;
411
412 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
413 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
414 return 0;
415
416 /*
417 * If an explicit allocsize is set, the file is small, or we
418 * are writing behind a hole, then use the minimum prealloc:
419 */
420 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
421 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
422 idx == 0 ||
423 prev->br_startoff + prev->br_blockcount < offset_fsb)
424 return mp->m_writeio_blocks;
055388a3 425
51446f5b
CH
426 /*
427 * Determine the initial size of the preallocation. We are beyond the
428 * current EOF here, but we need to take into account whether this is
429 * a sparse write or an extending write when determining the
430 * preallocation size. Hence we need to look up the extent that ends
431 * at the current write offset and use the result to determine the
432 * preallocation size.
433 *
434 * If the extent is a hole, then preallocation is essentially disabled.
435 * Otherwise we take the size of the preceding data extent as the basis
436 * for the preallocation size. If the size of the extent is greater than
437 * half the maximum extent length, then use the current offset as the
438 * basis. This ensures that for large files the preallocation size
439 * always extends to MAXEXTLEN rather than falling short due to things
440 * like stripe unit/width alignment of real extents.
441 */
442 if (prev->br_blockcount <= (MAXEXTLEN >> 1))
443 alloc_blocks = prev->br_blockcount << 1;
444 else
445 alloc_blocks = XFS_B_TO_FSB(mp, offset);
3c58b5f8
BF
446 if (!alloc_blocks)
447 goto check_writeio;
76a4202a 448 qblocks = alloc_blocks;
3c58b5f8 449
c9bdbdc0
BF
450 /*
451 * MAXEXTLEN is not a power of two value but we round the prealloc down
452 * to the nearest power of two value after throttling. To prevent the
453 * round down from unconditionally reducing the maximum supported prealloc
454 * size, we round up first, apply appropriate throttling, round down and
455 * cap the value to MAXEXTLEN.
456 */
457 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
458 alloc_blocks);
3c58b5f8 459
0d485ada 460 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
3c58b5f8
BF
461 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
462 shift = 2;
463 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
464 shift++;
465 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
466 shift++;
467 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
468 shift++;
469 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
470 shift++;
055388a3 471 }
76a4202a
BF
472
473 /*
f074051f
BF
474 * Check each quota to cap the prealloc size, provide a shift value to
475 * throttle with and adjust amount of available space.
76a4202a
BF
476 */
477 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
f074051f
BF
478 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
479 &freesp);
76a4202a 480 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
f074051f
BF
481 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
482 &freesp);
76a4202a 483 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
f074051f
BF
484 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
485 &freesp);
76a4202a
BF
486
487 /*
488 * The final prealloc size is set to the minimum of free space available
489 * in each of the quotas and the overall filesystem.
490 *
491 * The shift throttle value is set to the maximum value as determined by
492 * the global low free space values and per-quota low free space values.
493 */
494 alloc_blocks = MIN(alloc_blocks, qblocks);
495 shift = MAX(shift, qshift);
496
3c58b5f8
BF
497 if (shift)
498 alloc_blocks >>= shift;
c9bdbdc0
BF
499 /*
500 * rounddown_pow_of_two() returns an undefined result if we pass in
501 * alloc_blocks = 0.
502 */
503 if (alloc_blocks)
504 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
505 if (alloc_blocks > MAXEXTLEN)
506 alloc_blocks = MAXEXTLEN;
3c58b5f8
BF
507
508 /*
509 * If we are still trying to allocate more space than is
510 * available, squash the prealloc hard. This can happen if we
511 * have a large file on a small filesystem and the above
512 * lowspace thresholds are smaller than MAXEXTLEN.
513 */
514 while (alloc_blocks && alloc_blocks >= freesp)
515 alloc_blocks >>= 4;
3c58b5f8 516check_writeio:
055388a3
DC
517 if (alloc_blocks < mp->m_writeio_blocks)
518 alloc_blocks = mp->m_writeio_blocks;
19cb7e38
BF
519 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
520 mp->m_writeio_blocks);
055388a3
DC
521 return alloc_blocks;
522}
523
51446f5b
CH
524static int
525xfs_file_iomap_begin_delay(
526 struct inode *inode,
527 loff_t offset,
528 loff_t count,
529 unsigned flags,
530 struct iomap *iomap)
1da177e4 531{
51446f5b
CH
532 struct xfs_inode *ip = XFS_I(inode);
533 struct xfs_mount *mp = ip->i_mount;
534 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
535 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
536 xfs_fileoff_t maxbytes_fsb =
537 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
538 xfs_fileoff_t end_fsb, orig_end_fsb;
539 int error = 0, eof = 0;
540 struct xfs_bmbt_irec got;
541 struct xfs_bmbt_irec prev;
542 xfs_extnum_t idx;
543
544 ASSERT(!XFS_IS_REALTIME_INODE(ip));
545 ASSERT(!xfs_get_extsz_hint(ip));
dd9f438e 546
51446f5b 547 xfs_ilock(ip, XFS_ILOCK_EXCL);
1da177e4 548
51446f5b
CH
549 if (unlikely(XFS_TEST_ERROR(
550 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
551 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
552 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
553 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
554 error = -EFSCORRUPTED;
555 goto out_unlock;
556 }
a1e16c26 557
51446f5b 558 XFS_STATS_INC(mp, xs_blk_mapw);
055388a3 559
51446f5b
CH
560 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
561 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
562 if (error)
563 goto out_unlock;
1da177e4 564 }
1da177e4 565
51446f5b
CH
566 xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
567 &got, &prev);
568 if (!eof && got.br_startoff <= offset_fsb) {
569 trace_xfs_iomap_found(ip, offset, count, 0, &got);
570 goto done;
1da177e4 571 }
dd9f438e 572
51446f5b
CH
573 error = xfs_qm_dqattach_locked(ip, 0);
574 if (error)
575 goto out_unlock;
576
3ed9116e 577 /*
51446f5b
CH
578 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
579 * to keep the chunks of work done where somewhat symmetric with the
580 * work writeback does. This is a completely arbitrary number pulled
581 * out of thin air as a best guess for initial testing.
582 *
583 * Note that the values needs to be less than 32-bits wide until
584 * the lower level functions are updated.
3ed9116e 585 */
51446f5b
CH
586 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
587 end_fsb = orig_end_fsb =
588 min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
589
590 if (eof) {
591 xfs_fsblock_t prealloc_blocks;
3ed9116e 592
51446f5b
CH
593 prealloc_blocks =
594 xfs_iomap_prealloc_size(ip, offset, count, idx, &prev);
595 if (prealloc_blocks) {
596 xfs_extlen_t align;
597 xfs_off_t end_offset;
3ed9116e 598
51446f5b
CH
599 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
600 end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
601 prealloc_blocks;
602
603 align = xfs_eof_alignment(ip, 0);
604 if (align)
605 end_fsb = roundup_64(end_fsb, align);
606
607 end_fsb = min(end_fsb, maxbytes_fsb);
608 ASSERT(end_fsb > offset_fsb);
609 }
610 }
611
612retry:
be51f811 613 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
51446f5b
CH
614 end_fsb - offset_fsb, &got,
615 &prev, &idx, eof);
055388a3
DC
616 switch (error) {
617 case 0:
51446f5b 618 break;
2451337d
DC
619 case -ENOSPC:
620 case -EDQUOT:
51446f5b 621 /* retry without any preallocation */
0b1b213f 622 trace_xfs_delalloc_enospc(ip, offset, count);
51446f5b
CH
623 if (end_fsb != orig_end_fsb) {
624 end_fsb = orig_end_fsb;
9aa05000 625 goto retry;
055388a3 626 }
51446f5b
CH
627 /*FALLTHRU*/
628 default:
629 goto out_unlock;
1da177e4
LT
630 }
631
27b52867
BF
632 /*
633 * Tag the inode as speculatively preallocated so we can reclaim this
634 * space on demand, if necessary.
635 */
51446f5b 636 if (end_fsb != orig_end_fsb)
27b52867
BF
637 xfs_inode_set_eofblocks_tag(ip);
638
51446f5b
CH
639 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
640done:
641 if (isnullstartblock(got.br_startblock))
642 got.br_startblock = DELAYSTARTBLOCK;
643
644 if (!got.br_startblock) {
645 error = xfs_alert_fsblock_zero(ip, &got);
646 if (error)
647 goto out_unlock;
648 }
649
650 xfs_bmbt_to_iomap(ip, iomap, &got);
651
652out_unlock:
653 xfs_iunlock(ip, XFS_ILOCK_EXCL);
654 return error;
1da177e4
LT
655}
656
657/*
658 * Pass in a delayed allocate extent, convert it to real extents;
659 * return to the caller the extent we create which maps on top of
660 * the originating callers request.
661 *
662 * Called without a lock on the inode.
e4143a1c
DC
663 *
664 * We no longer bother to look at the incoming map - all we have to
665 * guarantee is that whatever we allocate fills the required range.
1da177e4 666 */
a206c817 667int
1da177e4
LT
668xfs_iomap_write_allocate(
669 xfs_inode_t *ip,
f403b7f4 670 xfs_off_t offset,
405f8042 671 xfs_bmbt_irec_t *imap)
1da177e4
LT
672{
673 xfs_mount_t *mp = ip->i_mount;
1da177e4
LT
674 xfs_fileoff_t offset_fsb, last_block;
675 xfs_fileoff_t end_fsb, map_start_fsb;
676 xfs_fsblock_t first_block;
2c3234d1 677 struct xfs_defer_ops dfops;
1da177e4 678 xfs_filblks_t count_fsb;
1da177e4 679 xfs_trans_t *tp;
f6106efa 680 int nimaps;
1da177e4
LT
681 int error = 0;
682 int nres;
683
1da177e4
LT
684 /*
685 * Make sure that the dquots are there.
686 */
7d095257
CH
687 error = xfs_qm_dqattach(ip, 0);
688 if (error)
b474c7ae 689 return error;
1da177e4 690
24e17b5f 691 offset_fsb = XFS_B_TO_FSBT(mp, offset);
3070451e
CH
692 count_fsb = imap->br_blockcount;
693 map_start_fsb = imap->br_startoff;
1da177e4 694
ff6d6af2 695 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
1da177e4
LT
696
697 while (count_fsb != 0) {
698 /*
699 * Set up a transaction with which to allocate the
700 * backing store for the file. Do allocations in a
701 * loop until we get some space in the range we are
702 * interested in. The other space that might be allocated
703 * is in the delayed allocation extent on which we sit
704 * but before our buffer starts.
705 */
1da177e4
LT
706 nimaps = 0;
707 while (nimaps == 0) {
1da177e4 708 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
0af32fb4
CH
709 /*
710 * We have already reserved space for the extent and any
711 * indirect blocks when creating the delalloc extent,
712 * there is no need to reserve space in this transaction
713 * again.
714 */
715 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
253f4911
CH
716 0, XFS_TRANS_RESERVE, &tp);
717 if (error)
b474c7ae 718 return error;
253f4911 719
1da177e4 720 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 721 xfs_trans_ijoin(tp, ip, 0);
1da177e4 722
2c3234d1 723 xfs_defer_init(&dfops, &first_block);
1da177e4 724
1da177e4 725 /*
e4143a1c
DC
726 * it is possible that the extents have changed since
727 * we did the read call as we dropped the ilock for a
728 * while. We have to be careful about truncates or hole
729 * punchs here - we are not allowed to allocate
730 * non-delalloc blocks here.
731 *
732 * The only protection against truncation is the pages
733 * for the range we are being asked to convert are
734 * locked and hence a truncate will block on them
735 * first.
736 *
737 * As a result, if we go beyond the range we really
738 * need and hit an delalloc extent boundary followed by
739 * a hole while we have excess blocks in the map, we
740 * will fill the hole incorrectly and overrun the
741 * transaction reservation.
742 *
743 * Using a single map prevents this as we are forced to
744 * check each map we look for overlap with the desired
745 * range and abort as soon as we find it. Also, given
746 * that we only return a single map, having one beyond
747 * what we can return is probably a bit silly.
748 *
749 * We also need to check that we don't go beyond EOF;
750 * this is a truncate optimisation as a truncate sets
751 * the new file size before block on the pages we
752 * currently have locked under writeback. Because they
753 * are about to be tossed, we don't need to write them
754 * back....
1da177e4 755 */
e4143a1c 756 nimaps = 1;
ce7ae151 757 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
7fb2cd4d 758 error = xfs_bmap_last_offset(ip, &last_block,
7c9ef85c
DC
759 XFS_DATA_FORK);
760 if (error)
761 goto trans_cancel;
762
1da177e4
LT
763 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
764 if ((map_start_fsb + count_fsb) > last_block) {
765 count_fsb = last_block - map_start_fsb;
766 if (count_fsb == 0) {
2451337d 767 error = -EAGAIN;
1da177e4
LT
768 goto trans_cancel;
769 }
770 }
771
3070451e 772 /*
3070451e
CH
773 * From this point onwards we overwrite the imap
774 * pointer that the caller gave to us.
775 */
c0dc7828 776 error = xfs_bmapi_write(tp, ip, map_start_fsb,
dbd5c8c9
BF
777 count_fsb, 0, &first_block,
778 nres, imap, &nimaps,
2c3234d1 779 &dfops);
1da177e4
LT
780 if (error)
781 goto trans_cancel;
782
2c3234d1 783 error = xfs_defer_finish(&tp, &dfops, NULL);
1da177e4
LT
784 if (error)
785 goto trans_cancel;
786
70393313 787 error = xfs_trans_commit(tp);
1da177e4
LT
788 if (error)
789 goto error0;
790
791 xfs_iunlock(ip, XFS_ILOCK_EXCL);
792 }
793
794 /*
795 * See if we were able to allocate an extent that
796 * covers at least part of the callers request
797 */
3070451e 798 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
6d4a8ecb 799 return xfs_alert_fsblock_zero(ip, imap);
86c4d623 800
3070451e
CH
801 if ((offset_fsb >= imap->br_startoff) &&
802 (offset_fsb < (imap->br_startoff +
803 imap->br_blockcount))) {
ff6d6af2 804 XFS_STATS_INC(mp, xs_xstrat_quick);
e4143a1c 805 return 0;
1da177e4
LT
806 }
807
e4143a1c
DC
808 /*
809 * So far we have not mapped the requested part of the
1da177e4
LT
810 * file, just surrounding data, try again.
811 */
3070451e
CH
812 count_fsb -= imap->br_blockcount;
813 map_start_fsb = imap->br_startoff + imap->br_blockcount;
1da177e4
LT
814 }
815
816trans_cancel:
2c3234d1 817 xfs_defer_cancel(&dfops);
4906e215 818 xfs_trans_cancel(tp);
1da177e4
LT
819error0:
820 xfs_iunlock(ip, XFS_ILOCK_EXCL);
b474c7ae 821 return error;
1da177e4
LT
822}
823
824int
825xfs_iomap_write_unwritten(
826 xfs_inode_t *ip,
f403b7f4 827 xfs_off_t offset,
d32057fc 828 xfs_off_t count)
1da177e4
LT
829{
830 xfs_mount_t *mp = ip->i_mount;
1da177e4
LT
831 xfs_fileoff_t offset_fsb;
832 xfs_filblks_t count_fsb;
833 xfs_filblks_t numblks_fsb;
dd9f438e
NS
834 xfs_fsblock_t firstfsb;
835 int nimaps;
836 xfs_trans_t *tp;
837 xfs_bmbt_irec_t imap;
2c3234d1 838 struct xfs_defer_ops dfops;
84803fb7 839 xfs_fsize_t i_size;
dd9f438e 840 uint resblks;
1da177e4 841 int error;
1da177e4 842
0b1b213f 843 trace_xfs_unwritten_convert(ip, offset, count);
1da177e4
LT
844
845 offset_fsb = XFS_B_TO_FSBT(mp, offset);
846 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
847 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
848
4ddd8bb1
LM
849 /*
850 * Reserve enough blocks in this transaction for two complete extent
851 * btree splits. We may be converting the middle part of an unwritten
852 * extent and in this case we will insert two new extents in the btree
853 * each of which could cause a full split.
854 *
855 * This reservation amount will be used in the first call to
856 * xfs_bmbt_split() to select an AG with enough space to satisfy the
857 * rest of the operation.
858 */
dd9f438e 859 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
1da177e4 860
dd9f438e 861 do {
1da177e4 862 /*
253f4911 863 * Set up a transaction to convert the range of extents
1da177e4
LT
864 * from unwritten to real. Do allocations in a loop until
865 * we have covered the range passed in.
80641dc6 866 *
253f4911
CH
867 * Note that we can't risk to recursing back into the filesystem
868 * here as we might be asked to write out the same inode that we
869 * complete here and might deadlock on the iolock.
1da177e4 870 */
253f4911
CH
871 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
872 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
873 if (error)
b474c7ae 874 return error;
1da177e4
LT
875
876 xfs_ilock(ip, XFS_ILOCK_EXCL);
ddc3415a 877 xfs_trans_ijoin(tp, ip, 0);
1da177e4
LT
878
879 /*
880 * Modify the unwritten extent state of the buffer.
881 */
2c3234d1 882 xfs_defer_init(&dfops, &firstfsb);
1da177e4 883 nimaps = 1;
c0dc7828 884 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
dbd5c8c9 885 XFS_BMAPI_CONVERT, &firstfsb, resblks,
2c3234d1 886 &imap, &nimaps, &dfops);
1da177e4
LT
887 if (error)
888 goto error_on_bmapi_transaction;
889
84803fb7
CH
890 /*
891 * Log the updated inode size as we go. We have to be careful
892 * to only log it up to the actual write offset if it is
893 * halfway into a block.
894 */
895 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
896 if (i_size > offset + count)
897 i_size = offset + count;
898
899 i_size = xfs_new_eof(ip, i_size);
900 if (i_size) {
901 ip->i_d.di_size = i_size;
902 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
903 }
904
2c3234d1 905 error = xfs_defer_finish(&tp, &dfops, NULL);
1da177e4
LT
906 if (error)
907 goto error_on_bmapi_transaction;
908
70393313 909 error = xfs_trans_commit(tp);
1da177e4
LT
910 xfs_iunlock(ip, XFS_ILOCK_EXCL);
911 if (error)
b474c7ae 912 return error;
572d95f4 913
86c4d623 914 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
6d4a8ecb 915 return xfs_alert_fsblock_zero(ip, &imap);
1da177e4
LT
916
917 if ((numblks_fsb = imap.br_blockcount) == 0) {
918 /*
919 * The numblks_fsb value should always get
920 * smaller, otherwise the loop is stuck.
921 */
922 ASSERT(imap.br_blockcount);
923 break;
924 }
925 offset_fsb += numblks_fsb;
926 count_fsb -= numblks_fsb;
927 } while (count_fsb > 0);
928
929 return 0;
930
931error_on_bmapi_transaction:
2c3234d1 932 xfs_defer_cancel(&dfops);
4906e215 933 xfs_trans_cancel(tp);
1da177e4 934 xfs_iunlock(ip, XFS_ILOCK_EXCL);
b474c7ae 935 return error;
1da177e4 936}
3b3dce05 937
6c31f495
CH
938static inline bool imap_needs_alloc(struct inode *inode,
939 struct xfs_bmbt_irec *imap, int nimaps)
68a9f5e7
CH
940{
941 return !nimaps ||
942 imap->br_startblock == HOLESTARTBLOCK ||
6c31f495
CH
943 imap->br_startblock == DELAYSTARTBLOCK ||
944 (IS_DAX(inode) && ISUNWRITTEN(imap));
68a9f5e7
CH
945}
946
947static int
948xfs_file_iomap_begin(
949 struct inode *inode,
950 loff_t offset,
951 loff_t length,
952 unsigned flags,
953 struct iomap *iomap)
954{
955 struct xfs_inode *ip = XFS_I(inode);
956 struct xfs_mount *mp = ip->i_mount;
957 struct xfs_bmbt_irec imap;
958 xfs_fileoff_t offset_fsb, end_fsb;
959 int nimaps = 1, error = 0;
66642c5c 960 unsigned lockmode;
68a9f5e7
CH
961
962 if (XFS_FORCED_SHUTDOWN(mp))
963 return -EIO;
964
2a06705c
DW
965 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
966 error = xfs_reflink_reserve_cow_range(ip, offset, length);
967 if (error < 0)
968 return error;
969 }
970
971 if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
972 !xfs_get_extsz_hint(ip)) {
973 /* Reserve delalloc blocks for regular writeback. */
51446f5b
CH
974 return xfs_file_iomap_begin_delay(inode, offset, length, flags,
975 iomap);
976 }
977
66642c5c 978 lockmode = xfs_ilock_data_map_shared(ip);
68a9f5e7
CH
979
980 ASSERT(offset <= mp->m_super->s_maxbytes);
981 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
982 length = mp->m_super->s_maxbytes - offset;
983 offset_fsb = XFS_B_TO_FSBT(mp, offset);
984 end_fsb = XFS_B_TO_FSB(mp, offset + length);
985
986 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
987 &nimaps, XFS_BMAPI_ENTIRE);
988 if (error) {
66642c5c 989 xfs_iunlock(ip, lockmode);
68a9f5e7
CH
990 return error;
991 }
992
6c31f495 993 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
68a9f5e7
CH
994 /*
995 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
996 * pages to keep the chunks of work done where somewhat symmetric
997 * with the work writeback does. This is a completely arbitrary
998 * number pulled out of thin air as a best guess for initial
999 * testing.
1000 *
1001 * Note that the values needs to be less than 32-bits wide until
1002 * the lower level functions are updated.
1003 */
1004 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
51446f5b
CH
1005 /*
1006 * xfs_iomap_write_direct() expects the shared lock. It
1007 * is unlocked on return.
1008 */
66642c5c
CH
1009 if (lockmode == XFS_ILOCK_EXCL)
1010 xfs_ilock_demote(ip, lockmode);
51446f5b
CH
1011 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1012 nimaps);
68a9f5e7
CH
1013 if (error)
1014 return error;
1015
ecd50729 1016 iomap->flags = IOMAP_F_NEW;
68a9f5e7 1017 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
68a9f5e7 1018 } else {
b95a2127
CH
1019 ASSERT(nimaps);
1020
66642c5c 1021 xfs_iunlock(ip, lockmode);
b95a2127 1022 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
68a9f5e7
CH
1023 }
1024
b95a2127 1025 xfs_bmbt_to_iomap(ip, iomap, &imap);
68a9f5e7
CH
1026 return 0;
1027}
1028
1029static int
1030xfs_file_iomap_end_delalloc(
1031 struct xfs_inode *ip,
1032 loff_t offset,
1033 loff_t length,
1034 ssize_t written)
1035{
1036 struct xfs_mount *mp = ip->i_mount;
1037 xfs_fileoff_t start_fsb;
1038 xfs_fileoff_t end_fsb;
1039 int error = 0;
1040
1041 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1042 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1043
1044 /*
1045 * Trim back delalloc blocks if we didn't manage to write the whole
1046 * range reserved.
1047 *
1048 * We don't need to care about racing delalloc as we hold i_mutex
1049 * across the reserve/allocate/unreserve calls. If there are delalloc
1050 * blocks in the range, they are ours.
1051 */
1052 if (start_fsb < end_fsb) {
1053 xfs_ilock(ip, XFS_ILOCK_EXCL);
1054 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1055 end_fsb - start_fsb);
1056 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1057
1058 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1059 xfs_alert(mp, "%s: unable to clean up ino %lld",
1060 __func__, ip->i_ino);
1061 return error;
1062 }
1063 }
1064
1065 return 0;
1066}
1067
1068static int
1069xfs_file_iomap_end(
1070 struct inode *inode,
1071 loff_t offset,
1072 loff_t length,
1073 ssize_t written,
1074 unsigned flags,
1075 struct iomap *iomap)
1076{
1077 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1078 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1079 length, written);
1080 return 0;
1081}
1082
1083struct iomap_ops xfs_iomap_ops = {
1084 .iomap_begin = xfs_file_iomap_begin,
1085 .iomap_end = xfs_file_iomap_end,
1086};
1d4795e7
CH
1087
1088static int
1089xfs_xattr_iomap_begin(
1090 struct inode *inode,
1091 loff_t offset,
1092 loff_t length,
1093 unsigned flags,
1094 struct iomap *iomap)
1095{
1096 struct xfs_inode *ip = XFS_I(inode);
1097 struct xfs_mount *mp = ip->i_mount;
1098 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1099 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1100 struct xfs_bmbt_irec imap;
1101 int nimaps = 1, error = 0;
1102 unsigned lockmode;
1103
1104 if (XFS_FORCED_SHUTDOWN(mp))
1105 return -EIO;
1106
1107 lockmode = xfs_ilock_data_map_shared(ip);
1108
1109 /* if there are no attribute fork or extents, return ENOENT */
1110 if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1111 error = -ENOENT;
1112 goto out_unlock;
1113 }
1114
1115 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1116 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1117 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
1118out_unlock:
1119 xfs_iunlock(ip, lockmode);
1120
1121 if (!error) {
1122 ASSERT(nimaps);
1123 xfs_bmbt_to_iomap(ip, iomap, &imap);
1124 }
1125
1126 return error;
1127}
1128
1129struct iomap_ops xfs_xattr_iomap_ops = {
1130 .iomap_begin = xfs_xattr_iomap_begin,
1131};