]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/xfs/xfs_iomap.c
32b113c4e973b4aeafbf850a7ccc80b655e5bea4
[mirror_ubuntu-zesty-kernel.git] / fs / xfs / xfs_iomap.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include <linux/iomap.h>
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_bmap.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_error.h"
34 #include "xfs_trans.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_iomap.h"
37 #include "xfs_trace.h"
38 #include "xfs_icache.h"
39 #include "xfs_quota.h"
40 #include "xfs_dquot_item.h"
41 #include "xfs_dquot.h"
42 #include "xfs_reflink.h"
43
44
45 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
46 << mp->m_writeio_log)
47
48 void
49 xfs_bmbt_to_iomap(
50 struct xfs_inode *ip,
51 struct iomap *iomap,
52 struct xfs_bmbt_irec *imap)
53 {
54 struct xfs_mount *mp = ip->i_mount;
55
56 if (imap->br_startblock == HOLESTARTBLOCK) {
57 iomap->blkno = IOMAP_NULL_BLOCK;
58 iomap->type = IOMAP_HOLE;
59 } else if (imap->br_startblock == DELAYSTARTBLOCK) {
60 iomap->blkno = IOMAP_NULL_BLOCK;
61 iomap->type = IOMAP_DELALLOC;
62 } else {
63 iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
64 if (imap->br_state == XFS_EXT_UNWRITTEN)
65 iomap->type = IOMAP_UNWRITTEN;
66 else
67 iomap->type = IOMAP_MAPPED;
68 }
69 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
70 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
71 iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
72 }
73
74 xfs_extlen_t
75 xfs_eof_alignment(
76 struct xfs_inode *ip,
77 xfs_extlen_t extsize)
78 {
79 struct xfs_mount *mp = ip->i_mount;
80 xfs_extlen_t align = 0;
81
82 if (!XFS_IS_REALTIME_INODE(ip)) {
83 /*
84 * Round up the allocation request to a stripe unit
85 * (m_dalign) boundary if the file size is >= stripe unit
86 * size, and we are allocating past the allocation eof.
87 *
88 * If mounted with the "-o swalloc" option the alignment is
89 * increased from the strip unit size to the stripe width.
90 */
91 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
92 align = mp->m_swidth;
93 else if (mp->m_dalign)
94 align = mp->m_dalign;
95
96 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
97 align = 0;
98 }
99
100 /*
101 * Always round up the allocation request to an extent boundary
102 * (when file on a real-time subvolume or has di_extsize hint).
103 */
104 if (extsize) {
105 if (align)
106 align = roundup_64(align, extsize);
107 else
108 align = extsize;
109 }
110
111 return align;
112 }
113
114 STATIC int
115 xfs_iomap_eof_align_last_fsb(
116 struct xfs_inode *ip,
117 xfs_extlen_t extsize,
118 xfs_fileoff_t *last_fsb)
119 {
120 xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
121
122 if (align) {
123 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
124 int eof, error;
125
126 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
127 if (error)
128 return error;
129 if (eof)
130 *last_fsb = new_last_fsb;
131 }
132 return 0;
133 }
134
135 STATIC int
136 xfs_alert_fsblock_zero(
137 xfs_inode_t *ip,
138 xfs_bmbt_irec_t *imap)
139 {
140 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
141 "Access to block zero in inode %llu "
142 "start_block: %llx start_off: %llx "
143 "blkcnt: %llx extent-state: %x",
144 (unsigned long long)ip->i_ino,
145 (unsigned long long)imap->br_startblock,
146 (unsigned long long)imap->br_startoff,
147 (unsigned long long)imap->br_blockcount,
148 imap->br_state);
149 return -EFSCORRUPTED;
150 }
151
152 int
153 xfs_iomap_write_direct(
154 xfs_inode_t *ip,
155 xfs_off_t offset,
156 size_t count,
157 xfs_bmbt_irec_t *imap,
158 int nmaps)
159 {
160 xfs_mount_t *mp = ip->i_mount;
161 xfs_fileoff_t offset_fsb;
162 xfs_fileoff_t last_fsb;
163 xfs_filblks_t count_fsb, resaligned;
164 xfs_fsblock_t firstfsb;
165 xfs_extlen_t extsz, temp;
166 int nimaps;
167 int quota_flag;
168 int rt;
169 xfs_trans_t *tp;
170 struct xfs_defer_ops dfops;
171 uint qblocks, resblks, resrtextents;
172 int error;
173 int lockmode;
174 int bmapi_flags = XFS_BMAPI_PREALLOC;
175 uint tflags = 0;
176
177 rt = XFS_IS_REALTIME_INODE(ip);
178 extsz = xfs_get_extsz_hint(ip);
179 lockmode = XFS_ILOCK_SHARED; /* locked by caller */
180
181 ASSERT(xfs_isilocked(ip, lockmode));
182
183 offset_fsb = XFS_B_TO_FSBT(mp, offset);
184 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
185 if ((offset + count) > XFS_ISIZE(ip)) {
186 /*
187 * Assert that the in-core extent list is present since this can
188 * call xfs_iread_extents() and we only have the ilock shared.
189 * This should be safe because the lock was held around a bmapi
190 * call in the caller and we only need it to access the in-core
191 * list.
192 */
193 ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
194 XFS_IFEXTENTS);
195 error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
196 if (error)
197 goto out_unlock;
198 } else {
199 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
200 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
201 imap->br_blockcount +
202 imap->br_startoff);
203 }
204 count_fsb = last_fsb - offset_fsb;
205 ASSERT(count_fsb > 0);
206
207 resaligned = count_fsb;
208 if (unlikely(extsz)) {
209 if ((temp = do_mod(offset_fsb, extsz)))
210 resaligned += temp;
211 if ((temp = do_mod(resaligned, extsz)))
212 resaligned += extsz - temp;
213 }
214
215 if (unlikely(rt)) {
216 resrtextents = qblocks = resaligned;
217 resrtextents /= mp->m_sb.sb_rextsize;
218 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
219 quota_flag = XFS_QMOPT_RES_RTBLKS;
220 } else {
221 resrtextents = 0;
222 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
223 quota_flag = XFS_QMOPT_RES_REGBLKS;
224 }
225
226 /*
227 * Drop the shared lock acquired by the caller, attach the dquot if
228 * necessary and move on to transaction setup.
229 */
230 xfs_iunlock(ip, lockmode);
231 error = xfs_qm_dqattach(ip, 0);
232 if (error)
233 return error;
234
235 /*
236 * For DAX, we do not allocate unwritten extents, but instead we zero
237 * the block before we commit the transaction. Ideally we'd like to do
238 * this outside the transaction context, but if we commit and then crash
239 * we may not have zeroed the blocks and this will be exposed on
240 * recovery of the allocation. Hence we must zero before commit.
241 *
242 * Further, if we are mapping unwritten extents here, we need to zero
243 * and convert them to written so that we don't need an unwritten extent
244 * callback for DAX. This also means that we need to be able to dip into
245 * the reserve block pool for bmbt block allocation if there is no space
246 * left but we need to do unwritten extent conversion.
247 */
248 if (IS_DAX(VFS_I(ip))) {
249 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
250 if (ISUNWRITTEN(imap)) {
251 tflags |= XFS_TRANS_RESERVE;
252 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
253 }
254 }
255 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
256 tflags, &tp);
257 if (error)
258 return error;
259
260 lockmode = XFS_ILOCK_EXCL;
261 xfs_ilock(ip, lockmode);
262
263 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
264 if (error)
265 goto out_trans_cancel;
266
267 xfs_trans_ijoin(tp, ip, 0);
268
269 /*
270 * From this point onwards we overwrite the imap pointer that the
271 * caller gave to us.
272 */
273 xfs_defer_init(&dfops, &firstfsb);
274 nimaps = 1;
275 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
276 bmapi_flags, &firstfsb, resblks, imap,
277 &nimaps, &dfops);
278 if (error)
279 goto out_bmap_cancel;
280
281 /*
282 * Complete the transaction
283 */
284 error = xfs_defer_finish(&tp, &dfops, NULL);
285 if (error)
286 goto out_bmap_cancel;
287
288 error = xfs_trans_commit(tp);
289 if (error)
290 goto out_unlock;
291
292 /*
293 * Copy any maps to caller's array and return any error.
294 */
295 if (nimaps == 0) {
296 error = -ENOSPC;
297 goto out_unlock;
298 }
299
300 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
301 error = xfs_alert_fsblock_zero(ip, imap);
302
303 out_unlock:
304 xfs_iunlock(ip, lockmode);
305 return error;
306
307 out_bmap_cancel:
308 xfs_defer_cancel(&dfops);
309 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
310 out_trans_cancel:
311 xfs_trans_cancel(tp);
312 goto out_unlock;
313 }
314
315 STATIC bool
316 xfs_quota_need_throttle(
317 struct xfs_inode *ip,
318 int type,
319 xfs_fsblock_t alloc_blocks)
320 {
321 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
322
323 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
324 return false;
325
326 /* no hi watermark, no throttle */
327 if (!dq->q_prealloc_hi_wmark)
328 return false;
329
330 /* under the lo watermark, no throttle */
331 if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark)
332 return false;
333
334 return true;
335 }
336
337 STATIC void
338 xfs_quota_calc_throttle(
339 struct xfs_inode *ip,
340 int type,
341 xfs_fsblock_t *qblocks,
342 int *qshift,
343 int64_t *qfreesp)
344 {
345 int64_t freesp;
346 int shift = 0;
347 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
348
349 /* no dq, or over hi wmark, squash the prealloc completely */
350 if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) {
351 *qblocks = 0;
352 *qfreesp = 0;
353 return;
354 }
355
356 freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount;
357 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
358 shift = 2;
359 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
360 shift += 2;
361 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
362 shift += 2;
363 }
364
365 if (freesp < *qfreesp)
366 *qfreesp = freesp;
367
368 /* only overwrite the throttle values if we are more aggressive */
369 if ((freesp >> shift) < (*qblocks >> *qshift)) {
370 *qblocks = freesp;
371 *qshift = shift;
372 }
373 }
374
375 /*
376 * If we are doing a write at the end of the file and there are no allocations
377 * past this one, then extend the allocation out to the file system's write
378 * iosize.
379 *
380 * If we don't have a user specified preallocation size, dynamically increase
381 * the preallocation size as the size of the file grows. Cap the maximum size
382 * at a single extent or less if the filesystem is near full. The closer the
383 * filesystem is to full, the smaller the maximum prealocation.
384 *
385 * As an exception we don't do any preallocation at all if the file is smaller
386 * than the minimum preallocation and we are using the default dynamic
387 * preallocation scheme, as it is likely this is the only write to the file that
388 * is going to be done.
389 *
390 * We clean up any extra space left over when the file is closed in
391 * xfs_inactive().
392 */
393 STATIC xfs_fsblock_t
394 xfs_iomap_prealloc_size(
395 struct xfs_inode *ip,
396 loff_t offset,
397 loff_t count,
398 xfs_extnum_t idx)
399 {
400 struct xfs_mount *mp = ip->i_mount;
401 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
402 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
403 struct xfs_bmbt_irec prev;
404 int shift = 0;
405 int64_t freesp;
406 xfs_fsblock_t qblocks;
407 int qshift = 0;
408 xfs_fsblock_t alloc_blocks = 0;
409
410 if (offset + count <= XFS_ISIZE(ip))
411 return 0;
412
413 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) &&
414 (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_writeio_blocks)))
415 return 0;
416
417 /*
418 * If an explicit allocsize is set, the file is small, or we
419 * are writing behind a hole, then use the minimum prealloc:
420 */
421 if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
422 XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
423 !xfs_iext_get_extent(ifp, idx - 1, &prev) ||
424 prev.br_startoff + prev.br_blockcount < offset_fsb)
425 return mp->m_writeio_blocks;
426
427 /*
428 * Determine the initial size of the preallocation. We are beyond the
429 * current EOF here, but we need to take into account whether this is
430 * a sparse write or an extending write when determining the
431 * preallocation size. Hence we need to look up the extent that ends
432 * at the current write offset and use the result to determine the
433 * preallocation size.
434 *
435 * If the extent is a hole, then preallocation is essentially disabled.
436 * Otherwise we take the size of the preceding data extent as the basis
437 * for the preallocation size. If the size of the extent is greater than
438 * half the maximum extent length, then use the current offset as the
439 * basis. This ensures that for large files the preallocation size
440 * always extends to MAXEXTLEN rather than falling short due to things
441 * like stripe unit/width alignment of real extents.
442 */
443 if (prev.br_blockcount <= (MAXEXTLEN >> 1))
444 alloc_blocks = prev.br_blockcount << 1;
445 else
446 alloc_blocks = XFS_B_TO_FSB(mp, offset);
447 if (!alloc_blocks)
448 goto check_writeio;
449 qblocks = alloc_blocks;
450
451 /*
452 * MAXEXTLEN is not a power of two value but we round the prealloc down
453 * to the nearest power of two value after throttling. To prevent the
454 * round down from unconditionally reducing the maximum supported prealloc
455 * size, we round up first, apply appropriate throttling, round down and
456 * cap the value to MAXEXTLEN.
457 */
458 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
459 alloc_blocks);
460
461 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
462 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
463 shift = 2;
464 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
465 shift++;
466 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
467 shift++;
468 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
469 shift++;
470 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
471 shift++;
472 }
473
474 /*
475 * Check each quota to cap the prealloc size, provide a shift value to
476 * throttle with and adjust amount of available space.
477 */
478 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks))
479 xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift,
480 &freesp);
481 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks))
482 xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift,
483 &freesp);
484 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks))
485 xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift,
486 &freesp);
487
488 /*
489 * The final prealloc size is set to the minimum of free space available
490 * in each of the quotas and the overall filesystem.
491 *
492 * The shift throttle value is set to the maximum value as determined by
493 * the global low free space values and per-quota low free space values.
494 */
495 alloc_blocks = MIN(alloc_blocks, qblocks);
496 shift = MAX(shift, qshift);
497
498 if (shift)
499 alloc_blocks >>= shift;
500 /*
501 * rounddown_pow_of_two() returns an undefined result if we pass in
502 * alloc_blocks = 0.
503 */
504 if (alloc_blocks)
505 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
506 if (alloc_blocks > MAXEXTLEN)
507 alloc_blocks = MAXEXTLEN;
508
509 /*
510 * If we are still trying to allocate more space than is
511 * available, squash the prealloc hard. This can happen if we
512 * have a large file on a small filesystem and the above
513 * lowspace thresholds are smaller than MAXEXTLEN.
514 */
515 while (alloc_blocks && alloc_blocks >= freesp)
516 alloc_blocks >>= 4;
517 check_writeio:
518 if (alloc_blocks < mp->m_writeio_blocks)
519 alloc_blocks = mp->m_writeio_blocks;
520 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
521 mp->m_writeio_blocks);
522 return alloc_blocks;
523 }
524
525 static int
526 xfs_file_iomap_begin_delay(
527 struct inode *inode,
528 loff_t offset,
529 loff_t count,
530 unsigned flags,
531 struct iomap *iomap)
532 {
533 struct xfs_inode *ip = XFS_I(inode);
534 struct xfs_mount *mp = ip->i_mount;
535 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
536 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
537 xfs_fileoff_t maxbytes_fsb =
538 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
539 xfs_fileoff_t end_fsb;
540 int error = 0, eof = 0;
541 struct xfs_bmbt_irec got;
542 xfs_extnum_t idx;
543 xfs_fsblock_t prealloc_blocks = 0;
544
545 ASSERT(!XFS_IS_REALTIME_INODE(ip));
546 ASSERT(!xfs_get_extsz_hint(ip));
547
548 xfs_ilock(ip, XFS_ILOCK_EXCL);
549
550 if (unlikely(XFS_TEST_ERROR(
551 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
552 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
553 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
554 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
555 error = -EFSCORRUPTED;
556 goto out_unlock;
557 }
558
559 XFS_STATS_INC(mp, xs_blk_mapw);
560
561 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
562 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
563 if (error)
564 goto out_unlock;
565 }
566
567 eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
568 if (!eof && got.br_startoff <= offset_fsb) {
569 if (xfs_is_reflink_inode(ip)) {
570 bool shared;
571
572 end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
573 maxbytes_fsb);
574 xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
575 error = xfs_reflink_reserve_cow(ip, &got, &shared);
576 if (error)
577 goto out_unlock;
578 }
579
580 trace_xfs_iomap_found(ip, offset, count, 0, &got);
581 goto done;
582 }
583
584 error = xfs_qm_dqattach_locked(ip, 0);
585 if (error)
586 goto out_unlock;
587
588 /*
589 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
590 * to keep the chunks of work done where somewhat symmetric with the
591 * work writeback does. This is a completely arbitrary number pulled
592 * out of thin air as a best guess for initial testing.
593 *
594 * Note that the values needs to be less than 32-bits wide until
595 * the lower level functions are updated.
596 */
597 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
598 end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
599
600 if (eof) {
601 prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx);
602 if (prealloc_blocks) {
603 xfs_extlen_t align;
604 xfs_off_t end_offset;
605 xfs_fileoff_t p_end_fsb;
606
607 end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
608 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
609 prealloc_blocks;
610
611 align = xfs_eof_alignment(ip, 0);
612 if (align)
613 p_end_fsb = roundup_64(p_end_fsb, align);
614
615 p_end_fsb = min(p_end_fsb, maxbytes_fsb);
616 ASSERT(p_end_fsb > offset_fsb);
617 prealloc_blocks = p_end_fsb - end_fsb;
618 }
619 }
620
621 retry:
622 error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
623 end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof);
624 switch (error) {
625 case 0:
626 break;
627 case -ENOSPC:
628 case -EDQUOT:
629 /* retry without any preallocation */
630 trace_xfs_delalloc_enospc(ip, offset, count);
631 if (prealloc_blocks) {
632 prealloc_blocks = 0;
633 goto retry;
634 }
635 /*FALLTHRU*/
636 default:
637 goto out_unlock;
638 }
639
640 trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
641 done:
642 if (isnullstartblock(got.br_startblock))
643 got.br_startblock = DELAYSTARTBLOCK;
644
645 if (!got.br_startblock) {
646 error = xfs_alert_fsblock_zero(ip, &got);
647 if (error)
648 goto out_unlock;
649 }
650
651 xfs_bmbt_to_iomap(ip, iomap, &got);
652
653 out_unlock:
654 xfs_iunlock(ip, XFS_ILOCK_EXCL);
655 return error;
656 }
657
658 /*
659 * Pass in a delayed allocate extent, convert it to real extents;
660 * return to the caller the extent we create which maps on top of
661 * the originating callers request.
662 *
663 * Called without a lock on the inode.
664 *
665 * We no longer bother to look at the incoming map - all we have to
666 * guarantee is that whatever we allocate fills the required range.
667 */
668 int
669 xfs_iomap_write_allocate(
670 xfs_inode_t *ip,
671 int whichfork,
672 xfs_off_t offset,
673 xfs_bmbt_irec_t *imap)
674 {
675 xfs_mount_t *mp = ip->i_mount;
676 xfs_fileoff_t offset_fsb, last_block;
677 xfs_fileoff_t end_fsb, map_start_fsb;
678 xfs_fsblock_t first_block;
679 struct xfs_defer_ops dfops;
680 xfs_filblks_t count_fsb;
681 xfs_trans_t *tp;
682 int nimaps;
683 int error = 0;
684 int flags = XFS_BMAPI_DELALLOC;
685 int nres;
686
687 if (whichfork == XFS_COW_FORK)
688 flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
689
690 /*
691 * Make sure that the dquots are there.
692 */
693 error = xfs_qm_dqattach(ip, 0);
694 if (error)
695 return error;
696
697 offset_fsb = XFS_B_TO_FSBT(mp, offset);
698 count_fsb = imap->br_blockcount;
699 map_start_fsb = imap->br_startoff;
700
701 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
702
703 while (count_fsb != 0) {
704 /*
705 * Set up a transaction with which to allocate the
706 * backing store for the file. Do allocations in a
707 * loop until we get some space in the range we are
708 * interested in. The other space that might be allocated
709 * is in the delayed allocation extent on which we sit
710 * but before our buffer starts.
711 */
712 nimaps = 0;
713 while (nimaps == 0) {
714 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
715 /*
716 * We have already reserved space for the extent and any
717 * indirect blocks when creating the delalloc extent,
718 * there is no need to reserve space in this transaction
719 * again.
720 */
721 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0,
722 0, XFS_TRANS_RESERVE, &tp);
723 if (error)
724 return error;
725
726 xfs_ilock(ip, XFS_ILOCK_EXCL);
727 xfs_trans_ijoin(tp, ip, 0);
728
729 xfs_defer_init(&dfops, &first_block);
730
731 /*
732 * it is possible that the extents have changed since
733 * we did the read call as we dropped the ilock for a
734 * while. We have to be careful about truncates or hole
735 * punchs here - we are not allowed to allocate
736 * non-delalloc blocks here.
737 *
738 * The only protection against truncation is the pages
739 * for the range we are being asked to convert are
740 * locked and hence a truncate will block on them
741 * first.
742 *
743 * As a result, if we go beyond the range we really
744 * need and hit an delalloc extent boundary followed by
745 * a hole while we have excess blocks in the map, we
746 * will fill the hole incorrectly and overrun the
747 * transaction reservation.
748 *
749 * Using a single map prevents this as we are forced to
750 * check each map we look for overlap with the desired
751 * range and abort as soon as we find it. Also, given
752 * that we only return a single map, having one beyond
753 * what we can return is probably a bit silly.
754 *
755 * We also need to check that we don't go beyond EOF;
756 * this is a truncate optimisation as a truncate sets
757 * the new file size before block on the pages we
758 * currently have locked under writeback. Because they
759 * are about to be tossed, we don't need to write them
760 * back....
761 */
762 nimaps = 1;
763 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
764 error = xfs_bmap_last_offset(ip, &last_block,
765 XFS_DATA_FORK);
766 if (error)
767 goto trans_cancel;
768
769 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
770 if ((map_start_fsb + count_fsb) > last_block) {
771 count_fsb = last_block - map_start_fsb;
772 if (count_fsb == 0) {
773 error = -EAGAIN;
774 goto trans_cancel;
775 }
776 }
777
778 /*
779 * From this point onwards we overwrite the imap
780 * pointer that the caller gave to us.
781 */
782 error = xfs_bmapi_write(tp, ip, map_start_fsb,
783 count_fsb, flags, &first_block,
784 nres, imap, &nimaps,
785 &dfops);
786 if (error)
787 goto trans_cancel;
788
789 error = xfs_defer_finish(&tp, &dfops, NULL);
790 if (error)
791 goto trans_cancel;
792
793 error = xfs_trans_commit(tp);
794 if (error)
795 goto error0;
796
797 xfs_iunlock(ip, XFS_ILOCK_EXCL);
798 }
799
800 /*
801 * See if we were able to allocate an extent that
802 * covers at least part of the callers request
803 */
804 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
805 return xfs_alert_fsblock_zero(ip, imap);
806
807 if ((offset_fsb >= imap->br_startoff) &&
808 (offset_fsb < (imap->br_startoff +
809 imap->br_blockcount))) {
810 XFS_STATS_INC(mp, xs_xstrat_quick);
811 return 0;
812 }
813
814 /*
815 * So far we have not mapped the requested part of the
816 * file, just surrounding data, try again.
817 */
818 count_fsb -= imap->br_blockcount;
819 map_start_fsb = imap->br_startoff + imap->br_blockcount;
820 }
821
822 trans_cancel:
823 xfs_defer_cancel(&dfops);
824 xfs_trans_cancel(tp);
825 error0:
826 xfs_iunlock(ip, XFS_ILOCK_EXCL);
827 return error;
828 }
829
830 int
831 xfs_iomap_write_unwritten(
832 xfs_inode_t *ip,
833 xfs_off_t offset,
834 xfs_off_t count)
835 {
836 xfs_mount_t *mp = ip->i_mount;
837 xfs_fileoff_t offset_fsb;
838 xfs_filblks_t count_fsb;
839 xfs_filblks_t numblks_fsb;
840 xfs_fsblock_t firstfsb;
841 int nimaps;
842 xfs_trans_t *tp;
843 xfs_bmbt_irec_t imap;
844 struct xfs_defer_ops dfops;
845 xfs_fsize_t i_size;
846 uint resblks;
847 int error;
848
849 trace_xfs_unwritten_convert(ip, offset, count);
850
851 offset_fsb = XFS_B_TO_FSBT(mp, offset);
852 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
853 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
854
855 /*
856 * Reserve enough blocks in this transaction for two complete extent
857 * btree splits. We may be converting the middle part of an unwritten
858 * extent and in this case we will insert two new extents in the btree
859 * each of which could cause a full split.
860 *
861 * This reservation amount will be used in the first call to
862 * xfs_bmbt_split() to select an AG with enough space to satisfy the
863 * rest of the operation.
864 */
865 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
866
867 do {
868 /*
869 * Set up a transaction to convert the range of extents
870 * from unwritten to real. Do allocations in a loop until
871 * we have covered the range passed in.
872 *
873 * Note that we can't risk to recursing back into the filesystem
874 * here as we might be asked to write out the same inode that we
875 * complete here and might deadlock on the iolock.
876 */
877 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
878 XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
879 if (error)
880 return error;
881
882 xfs_ilock(ip, XFS_ILOCK_EXCL);
883 xfs_trans_ijoin(tp, ip, 0);
884
885 /*
886 * Modify the unwritten extent state of the buffer.
887 */
888 xfs_defer_init(&dfops, &firstfsb);
889 nimaps = 1;
890 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
891 XFS_BMAPI_CONVERT, &firstfsb, resblks,
892 &imap, &nimaps, &dfops);
893 if (error)
894 goto error_on_bmapi_transaction;
895
896 /*
897 * Log the updated inode size as we go. We have to be careful
898 * to only log it up to the actual write offset if it is
899 * halfway into a block.
900 */
901 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
902 if (i_size > offset + count)
903 i_size = offset + count;
904
905 i_size = xfs_new_eof(ip, i_size);
906 if (i_size) {
907 ip->i_d.di_size = i_size;
908 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
909 }
910
911 error = xfs_defer_finish(&tp, &dfops, NULL);
912 if (error)
913 goto error_on_bmapi_transaction;
914
915 error = xfs_trans_commit(tp);
916 xfs_iunlock(ip, XFS_ILOCK_EXCL);
917 if (error)
918 return error;
919
920 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
921 return xfs_alert_fsblock_zero(ip, &imap);
922
923 if ((numblks_fsb = imap.br_blockcount) == 0) {
924 /*
925 * The numblks_fsb value should always get
926 * smaller, otherwise the loop is stuck.
927 */
928 ASSERT(imap.br_blockcount);
929 break;
930 }
931 offset_fsb += numblks_fsb;
932 count_fsb -= numblks_fsb;
933 } while (count_fsb > 0);
934
935 return 0;
936
937 error_on_bmapi_transaction:
938 xfs_defer_cancel(&dfops);
939 xfs_trans_cancel(tp);
940 xfs_iunlock(ip, XFS_ILOCK_EXCL);
941 return error;
942 }
943
944 static inline bool imap_needs_alloc(struct inode *inode,
945 struct xfs_bmbt_irec *imap, int nimaps)
946 {
947 return !nimaps ||
948 imap->br_startblock == HOLESTARTBLOCK ||
949 imap->br_startblock == DELAYSTARTBLOCK ||
950 (IS_DAX(inode) && ISUNWRITTEN(imap));
951 }
952
953 static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
954 {
955 /*
956 * COW writes will allocate delalloc space, so we need to make sure
957 * to take the lock exclusively here.
958 */
959 if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
960 return true;
961 if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE))
962 return true;
963 return false;
964 }
965
966 static int
967 xfs_file_iomap_begin(
968 struct inode *inode,
969 loff_t offset,
970 loff_t length,
971 unsigned flags,
972 struct iomap *iomap)
973 {
974 struct xfs_inode *ip = XFS_I(inode);
975 struct xfs_mount *mp = ip->i_mount;
976 struct xfs_bmbt_irec imap;
977 xfs_fileoff_t offset_fsb, end_fsb;
978 int nimaps = 1, error = 0;
979 bool shared = false, trimmed = false;
980 unsigned lockmode;
981
982 if (XFS_FORCED_SHUTDOWN(mp))
983 return -EIO;
984
985 if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
986 !IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
987 /* Reserve delalloc blocks for regular writeback. */
988 return xfs_file_iomap_begin_delay(inode, offset, length, flags,
989 iomap);
990 }
991
992 if (need_excl_ilock(ip, flags)) {
993 lockmode = XFS_ILOCK_EXCL;
994 xfs_ilock(ip, XFS_ILOCK_EXCL);
995 } else {
996 lockmode = xfs_ilock_data_map_shared(ip);
997 }
998
999 ASSERT(offset <= mp->m_super->s_maxbytes);
1000 if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
1001 length = mp->m_super->s_maxbytes - offset;
1002 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1003 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1004
1005 if (xfs_is_reflink_inode(ip) &&
1006 (flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT)) {
1007 shared = xfs_reflink_find_cow_mapping(ip, offset, &imap);
1008 if (shared) {
1009 xfs_iunlock(ip, lockmode);
1010 goto alloc_done;
1011 }
1012 ASSERT(!isnullstartblock(imap.br_startblock));
1013 }
1014
1015 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1016 &nimaps, 0);
1017 if (error)
1018 goto out_unlock;
1019
1020 if ((flags & IOMAP_REPORT) ||
1021 (xfs_is_reflink_inode(ip) &&
1022 (flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT))) {
1023 /* Trim the mapping to the nearest shared extent boundary. */
1024 error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
1025 &trimmed);
1026 if (error)
1027 goto out_unlock;
1028
1029 /*
1030 * We're here because we're trying to do a directio write to a
1031 * region that isn't aligned to a filesystem block. If the
1032 * extent is shared, fall back to buffered mode to handle the
1033 * RMW.
1034 */
1035 if (!(flags & IOMAP_REPORT) && shared) {
1036 trace_xfs_reflink_bounce_dio_write(ip, &imap);
1037 error = -EREMCHG;
1038 goto out_unlock;
1039 }
1040 }
1041
1042 if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
1043 error = xfs_reflink_reserve_cow(ip, &imap, &shared);
1044 if (error)
1045 goto out_unlock;
1046
1047 end_fsb = imap.br_startoff + imap.br_blockcount;
1048 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
1049 }
1050
1051 if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
1052 /*
1053 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1054 * pages to keep the chunks of work done where somewhat symmetric
1055 * with the work writeback does. This is a completely arbitrary
1056 * number pulled out of thin air as a best guess for initial
1057 * testing.
1058 *
1059 * Note that the values needs to be less than 32-bits wide until
1060 * the lower level functions are updated.
1061 */
1062 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
1063 /*
1064 * xfs_iomap_write_direct() expects the shared lock. It
1065 * is unlocked on return.
1066 */
1067 if (lockmode == XFS_ILOCK_EXCL)
1068 xfs_ilock_demote(ip, lockmode);
1069 error = xfs_iomap_write_direct(ip, offset, length, &imap,
1070 nimaps);
1071 if (error)
1072 return error;
1073
1074 alloc_done:
1075 iomap->flags = IOMAP_F_NEW;
1076 trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
1077 } else {
1078 ASSERT(nimaps);
1079
1080 xfs_iunlock(ip, lockmode);
1081 trace_xfs_iomap_found(ip, offset, length, 0, &imap);
1082 }
1083
1084 xfs_bmbt_to_iomap(ip, iomap, &imap);
1085 if (shared)
1086 iomap->flags |= IOMAP_F_SHARED;
1087 return 0;
1088 out_unlock:
1089 xfs_iunlock(ip, lockmode);
1090 return error;
1091 }
1092
1093 static int
1094 xfs_file_iomap_end_delalloc(
1095 struct xfs_inode *ip,
1096 loff_t offset,
1097 loff_t length,
1098 ssize_t written)
1099 {
1100 struct xfs_mount *mp = ip->i_mount;
1101 xfs_fileoff_t start_fsb;
1102 xfs_fileoff_t end_fsb;
1103 int error = 0;
1104
1105 /*
1106 * start_fsb refers to the first unused block after a short write. If
1107 * nothing was written, round offset down to point at the first block in
1108 * the range.
1109 */
1110 if (unlikely(!written))
1111 start_fsb = XFS_B_TO_FSBT(mp, offset);
1112 else
1113 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1114 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1115
1116 /*
1117 * Trim back delalloc blocks if we didn't manage to write the whole
1118 * range reserved.
1119 *
1120 * We don't need to care about racing delalloc as we hold i_mutex
1121 * across the reserve/allocate/unreserve calls. If there are delalloc
1122 * blocks in the range, they are ours.
1123 */
1124 if (start_fsb < end_fsb) {
1125 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1126 XFS_FSB_TO_B(mp, end_fsb) - 1);
1127
1128 xfs_ilock(ip, XFS_ILOCK_EXCL);
1129 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1130 end_fsb - start_fsb);
1131 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1132
1133 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1134 xfs_alert(mp, "%s: unable to clean up ino %lld",
1135 __func__, ip->i_ino);
1136 return error;
1137 }
1138 }
1139
1140 return 0;
1141 }
1142
1143 static int
1144 xfs_file_iomap_end(
1145 struct inode *inode,
1146 loff_t offset,
1147 loff_t length,
1148 ssize_t written,
1149 unsigned flags,
1150 struct iomap *iomap)
1151 {
1152 if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
1153 return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
1154 length, written);
1155 return 0;
1156 }
1157
1158 struct iomap_ops xfs_iomap_ops = {
1159 .iomap_begin = xfs_file_iomap_begin,
1160 .iomap_end = xfs_file_iomap_end,
1161 };
1162
1163 static int
1164 xfs_xattr_iomap_begin(
1165 struct inode *inode,
1166 loff_t offset,
1167 loff_t length,
1168 unsigned flags,
1169 struct iomap *iomap)
1170 {
1171 struct xfs_inode *ip = XFS_I(inode);
1172 struct xfs_mount *mp = ip->i_mount;
1173 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1174 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1175 struct xfs_bmbt_irec imap;
1176 int nimaps = 1, error = 0;
1177 unsigned lockmode;
1178
1179 if (XFS_FORCED_SHUTDOWN(mp))
1180 return -EIO;
1181
1182 lockmode = xfs_ilock_data_map_shared(ip);
1183
1184 /* if there are no attribute fork or extents, return ENOENT */
1185 if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
1186 error = -ENOENT;
1187 goto out_unlock;
1188 }
1189
1190 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1191 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1192 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK);
1193 out_unlock:
1194 xfs_iunlock(ip, lockmode);
1195
1196 if (!error) {
1197 ASSERT(nimaps);
1198 xfs_bmbt_to_iomap(ip, iomap, &imap);
1199 }
1200
1201 return error;
1202 }
1203
1204 struct iomap_ops xfs_xattr_iomap_ops = {
1205 .iomap_begin = xfs_xattr_iomap_begin,
1206 };