]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/xfs_iomap.c
xfs: clear delalloc and cache on buffered write failure
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / xfs_iomap.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include <linux/iomap.h>
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_bmap.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_error.h"
34 #include "xfs_trans.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_iomap.h"
37 #include "xfs_trace.h"
38 #include "xfs_icache.h"
39 #include "xfs_quota.h"
40 #include "xfs_dquot_item.h"
41 #include "xfs_dquot.h"
42 #include "xfs_reflink.h"
43
44
45 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
46 << mp->m_writeio_log)
47
48 void
49 xfs_bmbt_to_iomap(
50 struct xfs_inode *ip,
51 struct iomap *iomap,
52 struct xfs_bmbt_irec *imap)
53 {
54 struct xfs_mount *mp = ip->i_mount;
55
56 if (imap->br_startblock == HOLESTARTBLOCK) {
57 iomap->blkno = IOMAP_NULL_BLOCK;
58 iomap->type = IOMAP_HOLE;
59 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
60 iomap->blkno = IOMAP_NULL_BLOCK;
61 iomap->type = IOMAP_DELALLOC;
62 } else {
63 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
64 if (imap->br_state == XFS_EXT_UNWRITTEN)
65 iomap->type = IOMAP_UNWRITTEN;
66 else
67 iomap->type = IOMAP_MAPPED;
68 }
69 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
70 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
71 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
72 }
73
74 xfs_extlen_t
75 xfs_eof_alignment(
76 struct xfs_inode *ip,
77 xfs_extlen_t extsize)
78 {
79 struct xfs_mount *mp = ip->i_mount;
80 xfs_extlen_t align = 0;
81
82 if (!XFS_IS_REALTIME_INODE(ip)) {
83 /*
84 * Round up the allocation request to a stripe unit
85 * (m_dalign) boundary if the file size is >= stripe unit
86 * size, and we are allocating past the allocation eof.
87 *
88 * If mounted with the "-o swalloc" option the alignment is
89 * increased from the strip unit size to the stripe width.
90 */
91 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
92 align = mp->m_swidth;
93 else if (mp->m_dalign)
94 align = mp->m_dalign;
95
96 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
97 align = 0;
98 }
99
100 /*
101 * Always round up the allocation request to an extent boundary
102 * (when file on a real-time subvolume or has di_extsize hint).
103 */
104 if (extsize) {
105 if (align)
106 align = roundup_64(align, extsize);
107 else
108 align = extsize;
109 }
110
111 return align;
112 }
113
114 STATIC int
115 xfs_iomap_eof_align_last_fsb(
116 struct xfs_inode *ip,
117 xfs_extlen_t extsize,
118 xfs_fileoff_t *last_fsb)
119 {
120 xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
121
122 if (align) {
123 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
124 int eof, error;
125
126 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
127 if (error)
128 return error;
129 if (eof)
130 *last_fsb = new_last_fsb;
131 }
132 return 0;
133 }
134
135 STATIC int
136 xfs_alert_fsblock_zero(
137 xfs_inode_t *ip,
138 xfs_bmbt_irec_t *imap)
139 {
140 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
141 "Access to block zero in inode %llu "
142 "start_block: %llx start_off: %llx "
143 "blkcnt: %llx extent-state: %x",
144 (unsigned long long)ip->i_ino,
145 (unsigned long long)imap->br_startblock,
146 (unsigned long long)imap->br_startoff,
147 (unsigned long long)imap->br_blockcount,
148 imap->br_state);
149 return -EFSCORRUPTED;
150 }
151
152 int
153 xfs_iomap_write_direct(
154 xfs_inode_t *ip,
155 xfs_off_t offset,
156 size_t count,
157 xfs_bmbt_irec_t *imap,
158 int nmaps)
159 {
160 xfs_mount_t *mp = ip->i_mount;
161 xfs_fileoff_t offset_fsb;
162 xfs_fileoff_t last_fsb;
163 xfs_filblks_t count_fsb, resaligned;
164 xfs_fsblock_t firstfsb;
165 xfs_extlen_t extsz;
166 int nimaps;
167 int quota_flag;
168 int rt;
169 xfs_trans_t *tp;
170 struct xfs_defer_ops dfops;
171 uint qblocks, resblks, resrtextents;
172 int error;
173 int lockmode;
174 int bmapi_flags = XFS_BMAPI_PREALLOC;
175 uint tflags = 0;
176
177 rt = XFS_IS_REALTIME_INODE(ip);
178 extsz = xfs_get_extsz_hint(ip);
179 lockmode = XFS_ILOCK_SHARED; /* locked by caller */
180
181 ASSERT(xfs_isilocked(ip, lockmode));
182
183 offset_fsb = XFS_B_TO_FSBT(mp, offset);
184 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
185 if ((offset + count) > XFS_ISIZE(ip)) {
186 /*
187 * Assert that the in-core extent list is present since this can
188 * call xfs_iread_extents() and we only have the ilock shared.
189 * This should be safe because the lock was held around a bmapi
190 * call in the caller and we only need it to access the in-core
191 * list.
192 */
193 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
194 XFS_IFEXTENTS);
195 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
196 if (error)
197 goto out_unlock;
198 } else {
199 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
200 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
201 imap->br_blockcount +
202 imap->br_startoff);
203 }
204 count_fsb = last_fsb - offset_fsb;
205 ASSERT(count_fsb > 0);
206 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, extsz);
207
208 if (unlikely(rt)) {
209 resrtextents = qblocks = resaligned;
210 resrtextents /= mp->m_sb.sb_rextsize;
211 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
212 quota_flag = XFS_QMOPT_RES_RTBLKS;
213 } else {
214 resrtextents = 0;
215 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
216 quota_flag = XFS_QMOPT_RES_REGBLKS;
217 }
218
219 /*
220 * Drop the shared lock acquired by the caller, attach the dquot if
221 * necessary and move on to transaction setup.
222 */
223 xfs_iunlock(ip, lockmode);
224 error = xfs_qm_dqattach(ip, 0);
225 if (error)
226 return error;
227
228 /*
229 * For DAX, we do not allocate unwritten extents, but instead we zero
230 * the block before we commit the transaction. Ideally we'd like to do
231 * this outside the transaction context, but if we commit and then crash
232 * we may not have zeroed the blocks and this will be exposed on
233 * recovery of the allocation. Hence we must zero before commit.
234 *
235 * Further, if we are mapping unwritten extents here, we need to zero
236 * and convert them to written so that we don't need an unwritten extent
237 * callback for DAX. This also means that we need to be able to dip into
238 * the reserve block pool for bmbt block allocation if there is no space
239 * left but we need to do unwritten extent conversion.
240 */
241 if (IS_DAX(VFS_I(ip))) {
242 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
243 if (ISUNWRITTEN(imap)) {
244 tflags |= XFS_TRANS_RESERVE;
245 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
246 }
247 }
248 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
249 tflags, &tp);
250 if (error)
251 return error;
252
253 lockmode = XFS_ILOCK_EXCL;
254 xfs_ilock(ip, lockmode);
255
256 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
257 if (error)
258 goto out_trans_cancel;
259
260 xfs_trans_ijoin(tp, ip, 0);
261
262 /*
263 * From this point onwards we overwrite the imap pointer that the
264 * caller gave to us.
265 */
266 xfs_defer_init(&dfops, &firstfsb);
267 nimaps = 1;
268 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
269 bmapi_flags, &firstfsb, resblks, imap,
270 &nimaps, &dfops);
271 if (error)
272 goto out_bmap_cancel;
273
274 /*
275 * Complete the transaction
276 */
277 error = xfs_defer_finish(&tp, &dfops, NULL);
278 if (error)
279 goto out_bmap_cancel;
280
281 error = xfs_trans_commit(tp);
282 if (error)
283 goto out_unlock;
284
285 /*
286 * Copy any maps to caller's array and return any error.
287 */
288 if (nimaps == 0) {
289 error = -ENOSPC;
290 goto out_unlock;
291 }
292
293 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
294 error = xfs_alert_fsblock_zero(ip, imap);
295
296 out_unlock:
297 xfs_iunlock(ip, lockmode);
298 return error;
299
300 out_bmap_cancel:
301 xfs_defer_cancel(&dfops);
302 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
303 out_trans_cancel:
304 xfs_trans_cancel(tp);
305 goto out_unlock;
306 }
307
308 STATIC bool
309 xfs_quota_need_throttle(
310 struct xfs_inode *ip,
311 int type,
312 xfs_fsblock_t alloc_blocks)
313 {
314 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
315
316 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
317 return false;
318
319 /* no hi watermark, no throttle */
320 if (!dq->q_prealloc_hi_wmark)
321 return false;
322
323 /* under the lo watermark, no throttle */
324 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
325 return false;
326
327 return true;
328 }
329
330 STATIC void
331 xfs_quota_calc_throttle(
332 struct xfs_inode *ip,
333 int type,
334 xfs_fsblock_t *qblocks,
335 int *qshift,
336 int64_t *qfreesp)
337 {
338 int64_t freesp;
339 int shift = 0;
340 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
341
342 /* no dq, or over hi wmark, squash the prealloc completely */
343 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
344 *qblocks = 0;
345 *qfreesp = 0;
346 return;
347 }
348
349 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
350 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
351 shift = 2;
352 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
353 shift += 2;
354 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
355 shift += 2;
356 }
357
358 if (freesp < *qfreesp)
359 *qfreesp = freesp;
360
361 /* only overwrite the throttle values if we are more aggressive */
362 if ((freesp >> shift) < (*qblocks >> *qshift)) {
363 *qblocks = freesp;
364 *qshift = shift;
365 }
366 }
367
368 /*
369 * If we are doing a write at the end of the file and there are no allocations
370 * past this one, then extend the allocation out to the file system's write
371 * iosize.
372 *
373 * If we don't have a user specified preallocation size, dynamically increase
374 * the preallocation size as the size of the file grows. Cap the maximum size
375 * at a single extent or less if the filesystem is near full. The closer the
376 * filesystem is to full, the smaller the maximum prealocation.
377 *
378 * As an exception we don't do any preallocation at all if the file is smaller
379 * than the minimum preallocation and we are using the default dynamic
380 * preallocation scheme, as it is likely this is the only write to the file that
381 * is going to be done.
382 *
383 * We clean up any extra space left over when the file is closed in
384 * xfs_inactive().
385 */
386 STATIC xfs_fsblock_t
387 xfs_iomap_prealloc_size(
388 struct xfs_inode *ip,
389 loff_t offset,
390 loff_t count,
391 xfs_extnum_t idx)
392 {
393 struct xfs_mount *mp = ip->i_mount;
394 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
395 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
396 struct xfs_bmbt_irec prev;
397 int shift = 0;
398 int64_t freesp;
399 xfs_fsblock_t qblocks;
400 int qshift = 0;
401 xfs_fsblock_t alloc_blocks = 0;
402
403 if (offset + count <= XFS_ISIZE(ip))
404 return 0;
405
406 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
407 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
408 return 0;
409
410 /*
411 * If an explicit allocsize is set, the file is small, or we
412 * are writing behind a hole, then use the minimum prealloc:
413 */
414 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
415 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
416 !xfs_iext_get_extent(ifp, idx - 1, &prev) ||
417 prev.br_startoff + prev.br_blockcount < offset_fsb)
418 return mp->m_writeio_blocks;
419
420 /*
421 * Determine the initial size of the preallocation. We are beyond the
422 * current EOF here, but we need to take into account whether this is
423 * a sparse write or an extending write when determining the
424 * preallocation size. Hence we need to look up the extent that ends
425 * at the current write offset and use the result to determine the
426 * preallocation size.
427 *
428 * If the extent is a hole, then preallocation is essentially disabled.
429 * Otherwise we take the size of the preceding data extent as the basis
430 * for the preallocation size. If the size of the extent is greater than
431 * half the maximum extent length, then use the current offset as the
432 * basis. This ensures that for large files the preallocation size
433 * always extends to MAXEXTLEN rather than falling short due to things
434 * like stripe unit/width alignment of real extents.
435 */
436 if (prev.br_blockcount <= (MAXEXTLEN >> 1))
437 alloc_blocks = prev.br_blockcount << 1;
438 else
439 alloc_blocks = XFS_B_TO_FSB(mp, offset);
440 if (!alloc_blocks)
441 goto check_writeio;
442 qblocks = alloc_blocks;
443
444 /*
445 * MAXEXTLEN is not a power of two value but we round the prealloc down
446 * to the nearest power of two value after throttling. To prevent the
447 * round down from unconditionally reducing the maximum supported prealloc
448 * size, we round up first, apply appropriate throttling, round down and
449 * cap the value to MAXEXTLEN.
450 */
451 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
452 alloc_blocks);
453
454 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
455 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
456 shift = 2;
457 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
458 shift++;
459 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
460 shift++;
461 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
462 shift++;
463 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
464 shift++;
465 }
466
467 /*
468 * Check each quota to cap the prealloc size, provide a shift value to
469 * throttle with and adjust amount of available space.
470 */
471 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
472 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
473 &freesp);
474 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
475 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
476 &freesp);
477 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
478 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
479 &freesp);
480
481 /*
482 * The final prealloc size is set to the minimum of free space available
483 * in each of the quotas and the overall filesystem.
484 *
485 * The shift throttle value is set to the maximum value as determined by
486 * the global low free space values and per-quota low free space values.
487 */
488 alloc_blocks = MIN(alloc_blocks, qblocks);
489 shift = MAX(shift, qshift);
490
491 if (shift)
492 alloc_blocks >>= shift;
493 /*
494 * rounddown_pow_of_two() returns an undefined result if we pass in
495 * alloc_blocks = 0.
496 */
497 if (alloc_blocks)
498 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
499 if (alloc_blocks > MAXEXTLEN)
500 alloc_blocks = MAXEXTLEN;
501
502 /*
503 * If we are still trying to allocate more space than is
504 * available, squash the prealloc hard. This can happen if we
505 * have a large file on a small filesystem and the above
506 * lowspace thresholds are smaller than MAXEXTLEN.
507 */
508 while (alloc_blocks && alloc_blocks >= freesp)
509 alloc_blocks >>= 4;
510 check_writeio:
511 if (alloc_blocks < mp->m_writeio_blocks)
512 alloc_blocks = mp->m_writeio_blocks;
513 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
514 mp->m_writeio_blocks);
515 return alloc_blocks;
516 }
517
518 static int
519 xfs_file_iomap_begin_delay(
520 struct inode *inode,
521 loff_t offset,
522 loff_t count,
523 unsigned flags,
524 struct iomap *iomap)
525 {
526 struct xfs_inode *ip = XFS_I(inode);
527 struct xfs_mount *mp = ip->i_mount;
528 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
529 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
530 xfs_fileoff_t maxbytes_fsb =
531 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
532 xfs_fileoff_t end_fsb;
533 int error = 0, eof = 0;
534 struct xfs_bmbt_irec got;
535 xfs_extnum_t idx;
536 xfs_fsblock_t prealloc_blocks = 0;
537
538 ASSERT(!XFS_IS_REALTIME_INODE(ip));
539 ASSERT(!xfs_get_extsz_hint(ip));
540
541 xfs_ilock(ip, XFS_ILOCK_EXCL);
542
543 if (unlikely(XFS_TEST_ERROR(
544 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
545 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
546 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
547 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
548 error = -EFSCORRUPTED;
549 goto out_unlock;
550 }
551
552 XFS_STATS_INC(mp, xs_blk_mapw);
553
554 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
555 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
556 if (error)
557 goto out_unlock;
558 }
559
560 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
561 if (!eof && got.br_startoff <= offset_fsb) {
562 if (xfs_is_reflink_inode(ip)) {
563 bool shared;
564
565 end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
566 maxbytes_fsb);
567 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
568 error = xfs_reflink_reserve_cow(ip, &got, &shared);
569 if (error)
570 goto out_unlock;
571 }
572
573 trace_xfs_iomap_found(ip, offset, count, 0, &got);
574 goto done;
575 }
576
577 error = xfs_qm_dqattach_locked(ip, 0);
578 if (error)
579 goto out_unlock;
580
581 /*
582 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
583 * to keep the chunks of work done where somewhat symmetric with the
584 * work writeback does. This is a completely arbitrary number pulled
585 * out of thin air as a best guess for initial testing.
586 *
587 * Note that the values needs to be less than 32-bits wide until
588 * the lower level functions are updated.
589 */
590 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
591 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
592
593 if (eof) {
594 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx);
595 if (prealloc_blocks) {
596 xfs_extlen_t align;
597 xfs_off_t end_offset;
598 xfs_fileoff_t p_end_fsb;
599
600 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
601 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
602 prealloc_blocks;
603
604 align = xfs_eof_alignment(ip, 0);
605 if (align)
606 p_end_fsb = roundup_64(p_end_fsb, align);
607
608 p_end_fsb = min(p_end_fsb, maxbytes_fsb);
609 ASSERT(p_end_fsb > offset_fsb);
610 prealloc_blocks = p_end_fsb - end_fsb;
611 }
612 }
613
614 retry:
615 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
616 end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof);
617 switch (error) {
618 case 0:
619 break;
620 case -ENOSPC:
621 case -EDQUOT:
622 /* retry without any preallocation */
623 trace_xfs_delalloc_enospc(ip, offset, count);
624 if (prealloc_blocks) {
625 prealloc_blocks = 0;
626 goto retry;
627 }
628 /*FALLTHRU*/
629 default:
630 goto out_unlock;
631 }
632
633 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
634 done:
635 if (isnullstartblock(got.br_startblock))
636 got.br_startblock = DELAYSTARTBLOCK;
637
638 if (!got.br_startblock) {
639 error = xfs_alert_fsblock_zero(ip, &got);
640 if (error)
641 goto out_unlock;
642 }
643
644 xfs_bmbt_to_iomap(ip, iomap, &got);
645
646 out_unlock:
647 xfs_iunlock(ip, XFS_ILOCK_EXCL);
648 return error;
649 }
650
651 /*
652 * Pass in a delayed allocate extent, convert it to real extents;
653 * return to the caller the extent we create which maps on top of
654 * the originating callers request.
655 *
656 * Called without a lock on the inode.
657 *
658 * We no longer bother to look at the incoming map - all we have to
659 * guarantee is that whatever we allocate fills the required range.
660 */
661 int
662 xfs_iomap_write_allocate(
663 xfs_inode_t *ip,
664 int whichfork,
665 xfs_off_t offset,
666 xfs_bmbt_irec_t *imap)
667 {
668 xfs_mount_t *mp = ip->i_mount;
669 xfs_fileoff_t offset_fsb, last_block;
670 xfs_fileoff_t end_fsb, map_start_fsb;
671 xfs_fsblock_t first_block;
672 struct xfs_defer_ops dfops;
673 xfs_filblks_t count_fsb;
674 xfs_trans_t *tp;
675 int nimaps;
676 int error = 0;
677 int flags = XFS_BMAPI_DELALLOC;
678 int nres;
679
680 if (whichfork == XFS_COW_FORK)
681 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
682
683 /*
684 * Make sure that the dquots are there.
685 */
686 error = xfs_qm_dqattach(ip, 0);
687 if (error)
688 return error;
689
690 offset_fsb = XFS_B_TO_FSBT(mp, offset);
691 count_fsb = imap->br_blockcount;
692 map_start_fsb = imap->br_startoff;
693
694 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
695
696 while (count_fsb != 0) {
697 /*
698 * Set up a transaction with which to allocate the
699 * backing store for the file. Do allocations in a
700 * loop until we get some space in the range we are
701 * interested in. The other space that might be allocated
702 * is in the delayed allocation extent on which we sit
703 * but before our buffer starts.
704 */
705 nimaps = 0;
706 while (nimaps == 0) {
707 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
708 /*
709 * We have already reserved space for the extent and any
710 * indirect blocks when creating the delalloc extent,
711 * there is no need to reserve space in this transaction
712 * again.
713 */
714 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
715 0, XFS_TRANS_RESERVE, &tp);
716 if (error)
717 return error;
718
719 xfs_ilock(ip, XFS_ILOCK_EXCL);
720 xfs_trans_ijoin(tp, ip, 0);
721
722 xfs_defer_init(&dfops, &first_block);
723
724 /*
725 * it is possible that the extents have changed since
726 * we did the read call as we dropped the ilock for a
727 * while. We have to be careful about truncates or hole
728 * punchs here - we are not allowed to allocate
729 * non-delalloc blocks here.
730 *
731 * The only protection against truncation is the pages
732 * for the range we are being asked to convert are
733 * locked and hence a truncate will block on them
734 * first.
735 *
736 * As a result, if we go beyond the range we really
737 * need and hit an delalloc extent boundary followed by
738 * a hole while we have excess blocks in the map, we
739 * will fill the hole incorrectly and overrun the
740 * transaction reservation.
741 *
742 * Using a single map prevents this as we are forced to
743 * check each map we look for overlap with the desired
744 * range and abort as soon as we find it. Also, given
745 * that we only return a single map, having one beyond
746 * what we can return is probably a bit silly.
747 *
748 * We also need to check that we don't go beyond EOF;
749 * this is a truncate optimisation as a truncate sets
750 * the new file size before block on the pages we
751 * currently have locked under writeback. Because they
752 * are about to be tossed, we don't need to write them
753 * back....
754 */
755 nimaps = 1;
756 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
757 error = xfs_bmap_last_offset(ip, &last_block,
758 XFS_DATA_FORK);
759 if (error)
760 goto trans_cancel;
761
762 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
763 if ((map_start_fsb + count_fsb) > last_block) {
764 count_fsb = last_block - map_start_fsb;
765 if (count_fsb == 0) {
766 error = -EAGAIN;
767 goto trans_cancel;
768 }
769 }
770
771 /*
772 * From this point onwards we overwrite the imap
773 * pointer that the caller gave to us.
774 */
775 error = xfs_bmapi_write(tp, ip, map_start_fsb,
776 count_fsb, flags, &first_block,
777 nres, imap, &nimaps,
778 &dfops);
779 if (error)
780 goto trans_cancel;
781
782 error = xfs_defer_finish(&tp, &dfops, NULL);
783 if (error)
784 goto trans_cancel;
785
786 error = xfs_trans_commit(tp);
787 if (error)
788 goto error0;
789
790 xfs_iunlock(ip, XFS_ILOCK_EXCL);
791 }
792
793 /*
794 * See if we were able to allocate an extent that
795 * covers at least part of the callers request
796 */
797 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
798 return xfs_alert_fsblock_zero(ip, imap);
799
800 if ((offset_fsb >= imap->br_startoff) &&
801 (offset_fsb < (imap->br_startoff +
802 imap->br_blockcount))) {
803 XFS_STATS_INC(mp, xs_xstrat_quick);
804 return 0;
805 }
806
807 /*
808 * So far we have not mapped the requested part of the
809 * file, just surrounding data, try again.
810 */
811 count_fsb -= imap->br_blockcount;
812 map_start_fsb = imap->br_startoff + imap->br_blockcount;
813 }
814
815 trans_cancel:
816 xfs_defer_cancel(&dfops);
817 xfs_trans_cancel(tp);
818 error0:
819 xfs_iunlock(ip, XFS_ILOCK_EXCL);
820 return error;
821 }
822
823 int
824 xfs_iomap_write_unwritten(
825 xfs_inode_t *ip,
826 xfs_off_t offset,
827 xfs_off_t count)
828 {
829 xfs_mount_t *mp = ip->i_mount;
830 xfs_fileoff_t offset_fsb;
831 xfs_filblks_t count_fsb;
832 xfs_filblks_t numblks_fsb;
833 xfs_fsblock_t firstfsb;
834 int nimaps;
835 xfs_trans_t *tp;
836 xfs_bmbt_irec_t imap;
837 struct xfs_defer_ops dfops;
838 xfs_fsize_t i_size;
839 uint resblks;
840 int error;
841
842 trace_xfs_unwritten_convert(ip, offset, count);
843
844 offset_fsb = XFS_B_TO_FSBT(mp, offset);
845 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
846 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
847
848 /*
849 * Reserve enough blocks in this transaction for two complete extent
850 * btree splits. We may be converting the middle part of an unwritten
851 * extent and in this case we will insert two new extents in the btree
852 * each of which could cause a full split.
853 *
854 * This reservation amount will be used in the first call to
855 * xfs_bmbt_split() to select an AG with enough space to satisfy the
856 * rest of the operation.
857 */
858 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
859
860 do {
861 /*
862 * Set up a transaction to convert the range of extents
863 * from unwritten to real. Do allocations in a loop until
864 * we have covered the range passed in.
865 *
866 * Note that we can't risk to recursing back into the filesystem
867 * here as we might be asked to write out the same inode that we
868 * complete here and might deadlock on the iolock.
869 */
870 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
871 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
872 if (error)
873 return error;
874
875 xfs_ilock(ip, XFS_ILOCK_EXCL);
876 xfs_trans_ijoin(tp, ip, 0);
877
878 /*
879 * Modify the unwritten extent state of the buffer.
880 */
881 xfs_defer_init(&dfops, &firstfsb);
882 nimaps = 1;
883 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
884 XFS_BMAPI_CONVERT, &firstfsb, resblks,
885 &imap, &nimaps, &dfops);
886 if (error)
887 goto error_on_bmapi_transaction;
888
889 /*
890 * Log the updated inode size as we go. We have to be careful
891 * to only log it up to the actual write offset if it is
892 * halfway into a block.
893 */
894 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
895 if (i_size > offset + count)
896 i_size = offset + count;
897
898 i_size = xfs_new_eof(ip, i_size);
899 if (i_size) {
900 ip->i_d.di_size = i_size;
901 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
902 }
903
904 error = xfs_defer_finish(&tp, &dfops, NULL);
905 if (error)
906 goto error_on_bmapi_transaction;
907
908 error = xfs_trans_commit(tp);
909 xfs_iunlock(ip, XFS_ILOCK_EXCL);
910 if (error)
911 return error;
912
913 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
914 return xfs_alert_fsblock_zero(ip, &imap);
915
916 if ((numblks_fsb = imap.br_blockcount) == 0) {
917 /*
918 * The numblks_fsb value should always get
919 * smaller, otherwise the loop is stuck.
920 */
921 ASSERT(imap.br_blockcount);
922 break;
923 }
924 offset_fsb += numblks_fsb;
925 count_fsb -= numblks_fsb;
926 } while (count_fsb > 0);
927
928 return 0;
929
930 error_on_bmapi_transaction:
931 xfs_defer_cancel(&dfops);
932 xfs_trans_cancel(tp);
933 xfs_iunlock(ip, XFS_ILOCK_EXCL);
934 return error;
935 }
936
937 static inline bool imap_needs_alloc(struct inode *inode,
938 struct xfs_bmbt_irec *imap, int nimaps)
939 {
940 return !nimaps ||
941 imap->br_startblock == HOLESTARTBLOCK ||
942 imap->br_startblock == DELAYSTARTBLOCK ||
943 (IS_DAX(inode) && ISUNWRITTEN(imap));
944 }
945
946 static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
947 {
948 /*
949 * COW writes will allocate delalloc space, so we need to make sure
950 * to take the lock exclusively here.
951 */
952 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
953 return true;
954 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE))
955 return true;
956 return false;
957 }
958
959 static int
960 xfs_file_iomap_begin(
961 struct inode *inode,
962 loff_t offset,
963 loff_t length,
964 unsigned flags,
965 struct iomap *iomap)
966 {
967 struct xfs_inode *ip = XFS_I(inode);
968 struct xfs_mount *mp = ip->i_mount;
969 struct xfs_bmbt_irec imap;
970 xfs_fileoff_t offset_fsb, end_fsb;
971 int nimaps = 1, error = 0;
972 bool shared = false, trimmed = false;
973 unsigned lockmode;
974
975 if (XFS_FORCED_SHUTDOWN(mp))
976 return -EIO;
977
978 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
979 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
980 /* Reserve delalloc blocks for regular writeback. */
981 return xfs_file_iomap_begin_delay(inode, offset, length, flags,
982 iomap);
983 }
984
985 if (need_excl_ilock(ip, flags)) {
986 lockmode = XFS_ILOCK_EXCL;
987 xfs_ilock(ip, XFS_ILOCK_EXCL);
988 } else {
989 lockmode = xfs_ilock_data_map_shared(ip);
990 }
991
992 ASSERT(offset <= mp->m_super->s_maxbytes);
993 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
994 length = mp->m_super->s_maxbytes - offset;
995 offset_fsb = XFS_B_TO_FSBT(mp, offset);
996 end_fsb = XFS_B_TO_FSB(mp, offset + length);
997
998 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
999 &nimaps, 0);
1000 if (error)
1001 goto out_unlock;
1002
1003 if (flags & IOMAP_REPORT) {
1004 /* Trim the mapping to the nearest shared extent boundary. */
1005 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
1006 &trimmed);
1007 if (error)
1008 goto out_unlock;
1009 }
1010
1011 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
1012 if (flags & IOMAP_DIRECT) {
1013 /* may drop and re-acquire the ilock */
1014 error = xfs_reflink_allocate_cow(ip, &imap, &shared,
1015 &lockmode);
1016 if (error)
1017 goto out_unlock;
1018 } else {
1019 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
1020 if (error)
1021 goto out_unlock;
1022 }
1023
1024 end_fsb = imap.br_startoff + imap.br_blockcount;
1025 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1026 }
1027
1028 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
1029 /*
1030 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1031 * pages to keep the chunks of work done where somewhat symmetric
1032 * with the work writeback does. This is a completely arbitrary
1033 * number pulled out of thin air as a best guess for initial
1034 * testing.
1035 *
1036 * Note that the values needs to be less than 32-bits wide until
1037 * the lower level functions are updated.
1038 */
1039 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
1040 /*
1041 * xfs_iomap_write_direct() expects the shared lock. It
1042 * is unlocked on return.
1043 */
1044 if (lockmode == XFS_ILOCK_EXCL)
1045 xfs_ilock_demote(ip, lockmode);
1046 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1047 nimaps);
1048 if (error)
1049 return error;
1050
1051 iomap->flags = IOMAP_F_NEW;
1052 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1053 } else {
1054 ASSERT(nimaps);
1055
1056 xfs_iunlock(ip, lockmode);
1057 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1058 }
1059
1060 xfs_bmbt_to_iomap(ip, iomap, &imap);
1061 if (shared)
1062 iomap->flags |= IOMAP_F_SHARED;
1063 return 0;
1064 out_unlock:
1065 xfs_iunlock(ip, lockmode);
1066 return error;
1067 }
1068
1069 static int
1070 xfs_file_iomap_end_delalloc(
1071 struct xfs_inode *ip,
1072 loff_t offset,
1073 loff_t length,
1074 ssize_t written)
1075 {
1076 struct xfs_mount *mp = ip->i_mount;
1077 xfs_fileoff_t start_fsb;
1078 xfs_fileoff_t end_fsb;
1079 int error = 0;
1080
1081 /*
1082 * start_fsb refers to the first unused block after a short write. If
1083 * nothing was written, round offset down to point at the first block in
1084 * the range.
1085 */
1086 if (unlikely(!written))
1087 start_fsb = XFS_B_TO_FSBT(mp, offset);
1088 else
1089 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1090 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1091
1092 /*
1093 * Trim back delalloc blocks if we didn't manage to write the whole
1094 * range reserved.
1095 *
1096 * We don't need to care about racing delalloc as we hold i_mutex
1097 * across the reserve/allocate/unreserve calls. If there are delalloc
1098 * blocks in the range, they are ours.
1099 */
1100 if (start_fsb < end_fsb) {
1101 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1102 XFS_FSB_TO_B(mp, end_fsb) - 1);
1103
1104 xfs_ilock(ip, XFS_ILOCK_EXCL);
1105 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1106 end_fsb - start_fsb);
1107 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1108
1109 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1110 xfs_alert(mp, "%s: unable to clean up ino %lld",
1111 __func__, ip->i_ino);
1112 return error;
1113 }
1114 }
1115
1116 return 0;
1117 }
1118
1119 static int
1120 xfs_file_iomap_end(
1121 struct inode *inode,
1122 loff_t offset,
1123 loff_t length,
1124 ssize_t written,
1125 unsigned flags,
1126 struct iomap *iomap)
1127 {
1128 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1129 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1130 length, written);
1131 return 0;
1132 }
1133
1134 const struct iomap_ops xfs_iomap_ops = {
1135 .iomap_begin = xfs_file_iomap_begin,
1136 .iomap_end = xfs_file_iomap_end,
1137 };
1138
1139 static int
1140 xfs_xattr_iomap_begin(
1141 struct inode *inode,
1142 loff_t offset,
1143 loff_t length,
1144 unsigned flags,
1145 struct iomap *iomap)
1146 {
1147 struct xfs_inode *ip = XFS_I(inode);
1148 struct xfs_mount *mp = ip->i_mount;
1149 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1150 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1151 struct xfs_bmbt_irec imap;
1152 int nimaps = 1, error = 0;
1153 unsigned lockmode;
1154
1155 if (XFS_FORCED_SHUTDOWN(mp))
1156 return -EIO;
1157
1158 lockmode = xfs_ilock_data_map_shared(ip);
1159
1160 /* if there are no attribute fork or extents, return ENOENT */
1161 if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1162 error = -ENOENT;
1163 goto out_unlock;
1164 }
1165
1166 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1167 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1168 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
1169 out_unlock:
1170 xfs_iunlock(ip, lockmode);
1171
1172 if (!error) {
1173 ASSERT(nimaps);
1174 xfs_bmbt_to_iomap(ip, iomap, &imap);
1175 }
1176
1177 return error;
1178 }
1179
1180 const struct iomap_ops xfs_xattr_iomap_ops = {
1181 .iomap_begin = xfs_xattr_iomap_begin,
1182 };