1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_btree.h"
21 #include "xfs_refcount_btree.h"
22 #include "xfs_refcount.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_trans_space.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_reflink.h"
29 #include "xfs_iomap.h"
31 #include "xfs_ag_resv.h"
34 * Copy on Write of Shared Blocks
36 * XFS must preserve "the usual" file semantics even when two files share
37 * the same physical blocks. This means that a write to one file must not
38 * alter the blocks in a different file; the way that we'll do that is
39 * through the use of a copy-on-write mechanism. At a high level, that
40 * means that when we want to write to a shared block, we allocate a new
41 * block, write the data to the new block, and if that succeeds we map the
42 * new block into the file.
44 * XFS provides a "delayed allocation" mechanism that defers the allocation
45 * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
46 * possible. This reduces fragmentation by enabling the filesystem to ask
47 * for bigger chunks less often, which is exactly what we want for CoW.
49 * The delalloc mechanism begins when the kernel wants to make a block
50 * writable (write_begin or page_mkwrite). If the offset is not mapped, we
51 * create a delalloc mapping, which is a regular in-core extent, but without
52 * a real startblock. (For delalloc mappings, the startblock encodes both
53 * a flag that this is a delalloc mapping, and a worst-case estimate of how
54 * many blocks might be required to put the mapping into the BMBT.) delalloc
55 * mappings are a reservation against the free space in the filesystem;
56 * adjacent mappings can also be combined into fewer larger mappings.
58 * As an optimization, the CoW extent size hint (cowextsz) creates
59 * outsized aligned delalloc reservations in the hope of landing out of
60 * order nearby CoW writes in a single extent on disk, thereby reducing
61 * fragmentation and improving future performance.
63 * D: --RRRRRRSSSRRRRRRRR--- (data fork)
64 * C: ------DDDDDDD--------- (CoW fork)
66 * When dirty pages are being written out (typically in writepage), the
67 * delalloc reservations are converted into unwritten mappings by
68 * allocating blocks and replacing the delalloc mapping with real ones.
69 * A delalloc mapping can be replaced by several unwritten ones if the
70 * free space is fragmented.
72 * D: --RRRRRRSSSRRRRRRRR---
73 * C: ------UUUUUUU---------
75 * We want to adapt the delalloc mechanism for copy-on-write, since the
76 * write paths are similar. The first two steps (creating the reservation
77 * and allocating the blocks) are exactly the same as delalloc except that
78 * the mappings must be stored in a separate CoW fork because we do not want
79 * to disturb the mapping in the data fork until we're sure that the write
80 * succeeded. IO completion in this case is the process of removing the old
81 * mapping from the data fork and moving the new mapping from the CoW fork to
82 * the data fork. This will be discussed shortly.
84 * For now, unaligned directio writes will be bounced back to the page cache.
85 * Block-aligned directio writes will use the same mechanism as buffered
88 * Just prior to submitting the actual disk write requests, we convert
89 * the extents representing the range of the file actually being written
90 * (as opposed to extra pieces created for the cowextsize hint) to real
91 * extents. This will become important in the next step:
93 * D: --RRRRRRSSSRRRRRRRR---
94 * C: ------UUrrUUU---------
96 * CoW remapping must be done after the data block write completes,
97 * because we don't want to destroy the old data fork map until we're sure
98 * the new block has been written. Since the new mappings are kept in a
99 * separate fork, we can simply iterate these mappings to find the ones
100 * that cover the file blocks that we just CoW'd. For each extent, simply
101 * unmap the corresponding range in the data fork, map the new range into
102 * the data fork, and remove the extent from the CoW fork. Because of
103 * the presence of the cowextsize hint, however, we must be careful
104 * only to remap the blocks that we've actually written out -- we must
105 * never remap delalloc reservations nor CoW staging blocks that have
106 * yet to be written. This corresponds exactly to the real extents in
109 * D: --RRRRRRrrSRRRRRRRR---
110 * C: ------UU--UUU---------
112 * Since the remapping operation can be applied to an arbitrary file
113 * range, we record the need for the remap step as a flag in the ioend
114 * instead of declaring a new IO type. This is required for direct io
115 * because we only have ioend for the whole dio, and we have to be able to
116 * remember the presence of unwritten blocks and CoW blocks with a single
117 * ioend structure. Better yet, the more ground we can cover with one
122 * Given an AG extent, find the lowest-numbered run of shared blocks
123 * within that range and return the range in fbno/flen. If
124 * find_end_of_shared is true, return the longest contiguous extent of
125 * shared blocks. If there are no shared extents, fbno and flen will
126 * be set to NULLAGBLOCK and 0, respectively.
129 xfs_reflink_find_shared(
130 struct xfs_mount
*mp
,
131 struct xfs_trans
*tp
,
137 bool find_end_of_shared
)
139 struct xfs_buf
*agbp
;
140 struct xfs_btree_cur
*cur
;
143 error
= xfs_alloc_read_agf(mp
, tp
, agno
, 0, &agbp
);
147 cur
= xfs_refcountbt_init_cursor(mp
, tp
, agbp
, agbp
->b_pag
);
149 error
= xfs_refcount_find_shared(cur
, agbno
, aglen
, fbno
, flen
,
152 xfs_btree_del_cursor(cur
, error
);
154 xfs_trans_brelse(tp
, agbp
);
159 * Trim the mapping to the next block where there's a change in the
160 * shared/unshared status. More specifically, this means that we
161 * find the lowest-numbered extent of shared blocks that coincides with
162 * the given block mapping. If the shared extent overlaps the start of
163 * the mapping, trim the mapping to the end of the shared extent. If
164 * the shared region intersects the mapping, trim the mapping to the
165 * start of the shared extent. If there are no shared regions that
166 * overlap, just return the original extent.
169 xfs_reflink_trim_around_shared(
170 struct xfs_inode
*ip
,
171 struct xfs_bmbt_irec
*irec
,
181 /* Holes, unwritten, and delalloc extents cannot be shared */
182 if (!xfs_is_cow_inode(ip
) || !xfs_bmap_is_written_extent(irec
)) {
187 trace_xfs_reflink_trim_around_shared(ip
, irec
);
189 agno
= XFS_FSB_TO_AGNO(ip
->i_mount
, irec
->br_startblock
);
190 agbno
= XFS_FSB_TO_AGBNO(ip
->i_mount
, irec
->br_startblock
);
191 aglen
= irec
->br_blockcount
;
193 error
= xfs_reflink_find_shared(ip
->i_mount
, NULL
, agno
, agbno
,
194 aglen
, &fbno
, &flen
, true);
199 if (fbno
== NULLAGBLOCK
) {
200 /* No shared blocks at all. */
202 } else if (fbno
== agbno
) {
204 * The start of this extent is shared. Truncate the
205 * mapping at the end of the shared region so that a
206 * subsequent iteration starts at the start of the
209 irec
->br_blockcount
= flen
;
214 * There's a shared extent midway through this extent.
215 * Truncate the mapping at the start of the shared
216 * extent so that a subsequent iteration starts at the
217 * start of the shared region.
219 irec
->br_blockcount
= fbno
- agbno
;
226 struct xfs_inode
*ip
,
227 struct xfs_bmbt_irec
*imap
,
230 /* We can't update any real extents in always COW mode. */
231 if (xfs_is_always_cow_inode(ip
) &&
232 !isnullstartblock(imap
->br_startblock
)) {
237 /* Trim the mapping to the nearest shared extent boundary. */
238 return xfs_reflink_trim_around_shared(ip
, imap
, shared
);
242 xfs_reflink_convert_cow_locked(
243 struct xfs_inode
*ip
,
244 xfs_fileoff_t offset_fsb
,
245 xfs_filblks_t count_fsb
)
247 struct xfs_iext_cursor icur
;
248 struct xfs_bmbt_irec got
;
249 struct xfs_btree_cur
*dummy_cur
= NULL
;
253 if (!xfs_iext_lookup_extent(ip
, ip
->i_cowfp
, offset_fsb
, &icur
, &got
))
257 if (got
.br_startoff
>= offset_fsb
+ count_fsb
)
259 if (got
.br_state
== XFS_EXT_NORM
)
261 if (WARN_ON_ONCE(isnullstartblock(got
.br_startblock
)))
264 xfs_trim_extent(&got
, offset_fsb
, count_fsb
);
265 if (!got
.br_blockcount
)
268 got
.br_state
= XFS_EXT_NORM
;
269 error
= xfs_bmap_add_extent_unwritten_real(NULL
, ip
,
270 XFS_COW_FORK
, &icur
, &dummy_cur
, &got
,
274 } while (xfs_iext_next_extent(ip
->i_cowfp
, &icur
, &got
));
279 /* Convert all of the unwritten CoW extents in a file's range to real ones. */
281 xfs_reflink_convert_cow(
282 struct xfs_inode
*ip
,
286 struct xfs_mount
*mp
= ip
->i_mount
;
287 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
288 xfs_fileoff_t end_fsb
= XFS_B_TO_FSB(mp
, offset
+ count
);
289 xfs_filblks_t count_fsb
= end_fsb
- offset_fsb
;
294 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
295 error
= xfs_reflink_convert_cow_locked(ip
, offset_fsb
, count_fsb
);
296 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
301 * Find the extent that maps the given range in the COW fork. Even if the extent
302 * is not shared we might have a preallocation for it in the COW fork. If so we
303 * use it that rather than trigger a new allocation.
306 xfs_find_trim_cow_extent(
307 struct xfs_inode
*ip
,
308 struct xfs_bmbt_irec
*imap
,
309 struct xfs_bmbt_irec
*cmap
,
313 xfs_fileoff_t offset_fsb
= imap
->br_startoff
;
314 xfs_filblks_t count_fsb
= imap
->br_blockcount
;
315 struct xfs_iext_cursor icur
;
320 * If we don't find an overlapping extent, trim the range we need to
321 * allocate to fit the hole we found.
323 if (!xfs_iext_lookup_extent(ip
, ip
->i_cowfp
, offset_fsb
, &icur
, cmap
))
324 cmap
->br_startoff
= offset_fsb
+ count_fsb
;
325 if (cmap
->br_startoff
> offset_fsb
) {
326 xfs_trim_extent(imap
, imap
->br_startoff
,
327 cmap
->br_startoff
- imap
->br_startoff
);
328 return xfs_bmap_trim_cow(ip
, imap
, shared
);
332 if (isnullstartblock(cmap
->br_startblock
)) {
333 xfs_trim_extent(imap
, cmap
->br_startoff
, cmap
->br_blockcount
);
337 /* real extent found - no need to allocate */
338 xfs_trim_extent(cmap
, offset_fsb
, count_fsb
);
343 /* Allocate all CoW reservations covering a range of blocks in a file. */
345 xfs_reflink_allocate_cow(
346 struct xfs_inode
*ip
,
347 struct xfs_bmbt_irec
*imap
,
348 struct xfs_bmbt_irec
*cmap
,
353 struct xfs_mount
*mp
= ip
->i_mount
;
354 xfs_fileoff_t offset_fsb
= imap
->br_startoff
;
355 xfs_filblks_t count_fsb
= imap
->br_blockcount
;
356 struct xfs_trans
*tp
;
357 int nimaps
, error
= 0;
359 xfs_filblks_t resaligned
;
360 xfs_extlen_t resblks
= 0;
362 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
364 ASSERT(!xfs_is_reflink_inode(ip
));
365 xfs_ifork_init_cow(ip
);
368 error
= xfs_find_trim_cow_extent(ip
, imap
, cmap
, shared
, &found
);
369 if (error
|| !*shared
)
374 resaligned
= xfs_aligned_fsb_count(imap
->br_startoff
,
375 imap
->br_blockcount
, xfs_get_cowextsz_hint(ip
));
376 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, resaligned
);
378 xfs_iunlock(ip
, *lockmode
);
381 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
, resblks
, 0,
386 *lockmode
= XFS_ILOCK_EXCL
;
389 * Check for an overlapping extent again now that we dropped the ilock.
391 error
= xfs_find_trim_cow_extent(ip
, imap
, cmap
, shared
, &found
);
392 if (error
|| !*shared
)
393 goto out_trans_cancel
;
395 xfs_trans_cancel(tp
);
399 /* Allocate the entire reservation as unwritten blocks. */
401 error
= xfs_bmapi_write(tp
, ip
, imap
->br_startoff
, imap
->br_blockcount
,
402 XFS_BMAPI_COWFORK
| XFS_BMAPI_PREALLOC
, 0, cmap
,
405 goto out_trans_cancel
;
407 xfs_inode_set_cowblocks_tag(ip
);
408 error
= xfs_trans_commit(tp
);
413 * Allocation succeeded but the requested range was not even partially
414 * satisfied? Bail out!
419 xfs_trim_extent(cmap
, offset_fsb
, count_fsb
);
421 * COW fork extents are supposed to remain unwritten until we're ready
422 * to initiate a disk write. For direct I/O we are going to write the
423 * data and need the conversion, but for buffered writes we're done.
425 if (!convert_now
|| cmap
->br_state
== XFS_EXT_NORM
)
427 trace_xfs_reflink_convert_cow(ip
, cmap
);
428 return xfs_reflink_convert_cow_locked(ip
, offset_fsb
, count_fsb
);
431 xfs_trans_cancel(tp
);
436 * Cancel CoW reservations for some block range of an inode.
438 * If cancel_real is true this function cancels all COW fork extents for the
439 * inode; if cancel_real is false, real extents are not cleared.
441 * Caller must have already joined the inode to the current transaction. The
442 * inode will be joined to the transaction returned to the caller.
445 xfs_reflink_cancel_cow_blocks(
446 struct xfs_inode
*ip
,
447 struct xfs_trans
**tpp
,
448 xfs_fileoff_t offset_fsb
,
449 xfs_fileoff_t end_fsb
,
452 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_COW_FORK
);
453 struct xfs_bmbt_irec got
, del
;
454 struct xfs_iext_cursor icur
;
457 if (!xfs_inode_has_cow_data(ip
))
459 if (!xfs_iext_lookup_extent_before(ip
, ifp
, &end_fsb
, &icur
, &got
))
462 /* Walk backwards until we're out of the I/O range... */
463 while (got
.br_startoff
+ got
.br_blockcount
> offset_fsb
) {
465 xfs_trim_extent(&del
, offset_fsb
, end_fsb
- offset_fsb
);
467 /* Extent delete may have bumped ext forward */
468 if (!del
.br_blockcount
) {
469 xfs_iext_prev(ifp
, &icur
);
473 trace_xfs_reflink_cancel_cow(ip
, &del
);
475 if (isnullstartblock(del
.br_startblock
)) {
476 error
= xfs_bmap_del_extent_delay(ip
, XFS_COW_FORK
,
480 } else if (del
.br_state
== XFS_EXT_UNWRITTEN
|| cancel_real
) {
481 ASSERT((*tpp
)->t_firstblock
== NULLFSBLOCK
);
483 /* Free the CoW orphan record. */
484 xfs_refcount_free_cow_extent(*tpp
, del
.br_startblock
,
487 xfs_bmap_add_free(*tpp
, del
.br_startblock
,
488 del
.br_blockcount
, NULL
);
490 /* Roll the transaction */
491 error
= xfs_defer_finish(tpp
);
495 /* Remove the mapping from the CoW fork. */
496 xfs_bmap_del_extent_cow(ip
, &icur
, &got
, &del
);
498 /* Remove the quota reservation */
499 error
= xfs_quota_unreserve_blkres(ip
,
504 /* Didn't do anything, push cursor back. */
505 xfs_iext_prev(ifp
, &icur
);
508 if (!xfs_iext_get_extent(ifp
, &icur
, &got
))
512 /* clear tag if cow fork is emptied */
514 xfs_inode_clear_cowblocks_tag(ip
);
519 * Cancel CoW reservations for some byte range of an inode.
521 * If cancel_real is true this function cancels all COW fork extents for the
522 * inode; if cancel_real is false, real extents are not cleared.
525 xfs_reflink_cancel_cow_range(
526 struct xfs_inode
*ip
,
531 struct xfs_trans
*tp
;
532 xfs_fileoff_t offset_fsb
;
533 xfs_fileoff_t end_fsb
;
536 trace_xfs_reflink_cancel_cow_range(ip
, offset
, count
);
539 offset_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
540 if (count
== NULLFILEOFF
)
541 end_fsb
= NULLFILEOFF
;
543 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, offset
+ count
);
545 /* Start a rolling transaction to remove the mappings */
546 error
= xfs_trans_alloc(ip
->i_mount
, &M_RES(ip
->i_mount
)->tr_write
,
551 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
552 xfs_trans_ijoin(tp
, ip
, 0);
554 /* Scrape out the old CoW reservations */
555 error
= xfs_reflink_cancel_cow_blocks(ip
, &tp
, offset_fsb
, end_fsb
,
560 error
= xfs_trans_commit(tp
);
562 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
566 xfs_trans_cancel(tp
);
567 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
569 trace_xfs_reflink_cancel_cow_range_error(ip
, error
, _RET_IP_
);
574 * Remap part of the CoW fork into the data fork.
576 * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
577 * into the data fork; this function will remap what it can (at the end of the
578 * range) and update @end_fsb appropriately. Each remap gets its own
579 * transaction because we can end up merging and splitting bmbt blocks for
580 * every remap operation and we'd like to keep the block reservation
581 * requirements as low as possible.
584 xfs_reflink_end_cow_extent(
585 struct xfs_inode
*ip
,
586 xfs_fileoff_t offset_fsb
,
587 xfs_fileoff_t
*end_fsb
)
589 struct xfs_bmbt_irec got
, del
;
590 struct xfs_iext_cursor icur
;
591 struct xfs_mount
*mp
= ip
->i_mount
;
592 struct xfs_trans
*tp
;
593 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_COW_FORK
);
595 unsigned int resblks
;
598 /* No COW extents? That's easy! */
599 if (ifp
->if_bytes
== 0) {
600 *end_fsb
= offset_fsb
;
604 resblks
= XFS_EXTENTADD_SPACE_RES(mp
, XFS_DATA_FORK
);
605 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0,
606 XFS_TRANS_RESERVE
, &tp
);
611 * Lock the inode. We have to ijoin without automatic unlock because
612 * the lead transaction is the refcountbt record deletion; the data
613 * fork update follows as a deferred log item.
615 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
616 xfs_trans_ijoin(tp
, ip
, 0);
618 error
= xfs_iext_count_may_overflow(ip
, XFS_DATA_FORK
,
619 XFS_IEXT_REFLINK_END_COW_CNT
);
624 * In case of racing, overlapping AIO writes no COW extents might be
625 * left by the time I/O completes for the loser of the race. In that
628 if (!xfs_iext_lookup_extent_before(ip
, ifp
, end_fsb
, &icur
, &got
) ||
629 got
.br_startoff
+ got
.br_blockcount
<= offset_fsb
) {
630 *end_fsb
= offset_fsb
;
635 * Structure copy @got into @del, then trim @del to the range that we
636 * were asked to remap. We preserve @got for the eventual CoW fork
637 * deletion; from now on @del represents the mapping that we're
638 * actually remapping.
641 xfs_trim_extent(&del
, offset_fsb
, *end_fsb
- offset_fsb
);
643 ASSERT(del
.br_blockcount
> 0);
646 * Only remap real extents that contain data. With AIO, speculative
647 * preallocations can leak into the range we are called upon, and we
650 if (!xfs_bmap_is_written_extent(&got
)) {
651 *end_fsb
= del
.br_startoff
;
655 /* Unmap the old blocks in the data fork. */
656 rlen
= del
.br_blockcount
;
657 error
= __xfs_bunmapi(tp
, ip
, del
.br_startoff
, &rlen
, 0, 1);
661 /* Trim the extent to whatever got unmapped. */
662 xfs_trim_extent(&del
, del
.br_startoff
+ rlen
, del
.br_blockcount
- rlen
);
663 trace_xfs_reflink_cow_remap(ip
, &del
);
665 /* Free the CoW orphan record. */
666 xfs_refcount_free_cow_extent(tp
, del
.br_startblock
, del
.br_blockcount
);
668 /* Map the new blocks into the data fork. */
669 xfs_bmap_map_extent(tp
, ip
, &del
);
671 /* Charge this new data fork mapping to the on-disk quota. */
672 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_DELBCOUNT
,
673 (long)del
.br_blockcount
);
675 /* Remove the mapping from the CoW fork. */
676 xfs_bmap_del_extent_cow(ip
, &icur
, &got
, &del
);
678 error
= xfs_trans_commit(tp
);
679 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
683 /* Update the caller about how much progress we made. */
684 *end_fsb
= del
.br_startoff
;
688 xfs_trans_cancel(tp
);
689 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
694 * Remap parts of a file's data fork after a successful CoW.
698 struct xfs_inode
*ip
,
702 xfs_fileoff_t offset_fsb
;
703 xfs_fileoff_t end_fsb
;
706 trace_xfs_reflink_end_cow(ip
, offset
, count
);
708 offset_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
709 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, offset
+ count
);
712 * Walk backwards until we're out of the I/O range. The loop function
713 * repeatedly cycles the ILOCK to allocate one transaction per remapped
716 * If we're being called by writeback then the pages will still
717 * have PageWriteback set, which prevents races with reflink remapping
718 * and truncate. Reflink remapping prevents races with writeback by
719 * taking the iolock and mmaplock before flushing the pages and
720 * remapping, which means there won't be any further writeback or page
721 * cache dirtying until the reflink completes.
723 * We should never have two threads issuing writeback for the same file
724 * region. There are also have post-eof checks in the writeback
725 * preparation code so that we don't bother writing out pages that are
726 * about to be truncated.
728 * If we're being called as part of directio write completion, the dio
729 * count is still elevated, which reflink and truncate will wait for.
730 * Reflink remapping takes the iolock and mmaplock and waits for
731 * pending dio to finish, which should prevent any directio until the
732 * remap completes. Multiple concurrent directio writes to the same
733 * region are handled by end_cow processing only occurring for the
734 * threads which succeed; the outcome of multiple overlapping direct
735 * writes is not well defined anyway.
737 * It's possible that a buffered write and a direct write could collide
738 * here (the buffered write stumbles in after the dio flushes and
739 * invalidates the page cache and immediately queues writeback), but we
740 * have never supported this 100%. If either disk write succeeds the
741 * blocks will be remapped.
743 while (end_fsb
> offset_fsb
&& !error
)
744 error
= xfs_reflink_end_cow_extent(ip
, offset_fsb
, &end_fsb
);
747 trace_xfs_reflink_end_cow_error(ip
, error
, _RET_IP_
);
752 * Free leftover CoW reservations that didn't get cleaned out.
755 xfs_reflink_recover_cow(
756 struct xfs_mount
*mp
)
758 struct xfs_perag
*pag
;
762 if (!xfs_has_reflink(mp
))
765 for_each_perag(mp
, agno
, pag
) {
766 error
= xfs_refcount_recover_cow_leftovers(mp
, pag
);
777 * Reflinking (Block) Ranges of Two Files Together
779 * First, ensure that the reflink flag is set on both inodes. The flag is an
780 * optimization to avoid unnecessary refcount btree lookups in the write path.
782 * Now we can iteratively remap the range of extents (and holes) in src to the
783 * corresponding ranges in dest. Let drange and srange denote the ranges of
784 * logical blocks in dest and src touched by the reflink operation.
786 * While the length of drange is greater than zero,
787 * - Read src's bmbt at the start of srange ("imap")
788 * - If imap doesn't exist, make imap appear to start at the end of srange
790 * - If imap starts before srange, advance imap to start at srange.
791 * - If imap goes beyond srange, truncate imap to end at the end of srange.
792 * - Punch (imap start - srange start + imap len) blocks from dest at
793 * offset (drange start).
794 * - If imap points to a real range of pblks,
795 * > Increase the refcount of the imap's pblks
796 * > Map imap's pblks into dest at the offset
797 * (drange start + imap start - srange start)
798 * - Advance drange and srange by (imap start - srange start + imap len)
800 * Finally, if the reflink made dest longer, update both the in-core and
801 * on-disk file sizes.
803 * ASCII Art Demonstration:
805 * Let's say we want to reflink this source file:
807 * ----SSSSSSS-SSSSS----SSSSSS (src file)
808 * <-------------------->
810 * into this destination file:
812 * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
813 * <-------------------->
814 * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
815 * Observe that the range has different logical offsets in either file.
817 * Consider that the first extent in the source file doesn't line up with our
818 * reflink range. Unmapping and remapping are separate operations, so we can
819 * unmap more blocks from the destination file than we remap.
821 * ----SSSSSSS-SSSSS----SSSSSS
823 * --DDDDD---------DDDDD--DDD
826 * Now remap the source extent into the destination file:
828 * ----SSSSSSS-SSSSS----SSSSSS
830 * --DDDDD--SSSSSSSDDDDD--DDD
833 * Do likewise with the second hole and extent in our range. Holes in the
834 * unmap range don't affect our operation.
836 * ----SSSSSSS-SSSSS----SSSSSS
838 * --DDDDD--SSSSSSS-SSSSS-DDD
841 * Finally, unmap and remap part of the third extent. This will increase the
842 * size of the destination file.
844 * ----SSSSSSS-SSSSS----SSSSSS
846 * --DDDDD--SSSSSSS-SSSSS----SSS
849 * Once we update the destination file's i_size, we're done.
853 * Ensure the reflink bit is set in both inodes.
856 xfs_reflink_set_inode_flag(
857 struct xfs_inode
*src
,
858 struct xfs_inode
*dest
)
860 struct xfs_mount
*mp
= src
->i_mount
;
862 struct xfs_trans
*tp
;
864 if (xfs_is_reflink_inode(src
) && xfs_is_reflink_inode(dest
))
867 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_ichange
, 0, 0, 0, &tp
);
871 /* Lock both files against IO */
872 if (src
->i_ino
== dest
->i_ino
)
873 xfs_ilock(src
, XFS_ILOCK_EXCL
);
875 xfs_lock_two_inodes(src
, XFS_ILOCK_EXCL
, dest
, XFS_ILOCK_EXCL
);
877 if (!xfs_is_reflink_inode(src
)) {
878 trace_xfs_reflink_set_inode_flag(src
);
879 xfs_trans_ijoin(tp
, src
, XFS_ILOCK_EXCL
);
880 src
->i_diflags2
|= XFS_DIFLAG2_REFLINK
;
881 xfs_trans_log_inode(tp
, src
, XFS_ILOG_CORE
);
882 xfs_ifork_init_cow(src
);
884 xfs_iunlock(src
, XFS_ILOCK_EXCL
);
886 if (src
->i_ino
== dest
->i_ino
)
889 if (!xfs_is_reflink_inode(dest
)) {
890 trace_xfs_reflink_set_inode_flag(dest
);
891 xfs_trans_ijoin(tp
, dest
, XFS_ILOCK_EXCL
);
892 dest
->i_diflags2
|= XFS_DIFLAG2_REFLINK
;
893 xfs_trans_log_inode(tp
, dest
, XFS_ILOG_CORE
);
894 xfs_ifork_init_cow(dest
);
896 xfs_iunlock(dest
, XFS_ILOCK_EXCL
);
899 error
= xfs_trans_commit(tp
);
905 trace_xfs_reflink_set_inode_flag_error(dest
, error
, _RET_IP_
);
910 * Update destination inode size & cowextsize hint, if necessary.
913 xfs_reflink_update_dest(
914 struct xfs_inode
*dest
,
916 xfs_extlen_t cowextsize
,
917 unsigned int remap_flags
)
919 struct xfs_mount
*mp
= dest
->i_mount
;
920 struct xfs_trans
*tp
;
923 if (newlen
<= i_size_read(VFS_I(dest
)) && cowextsize
== 0)
926 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_ichange
, 0, 0, 0, &tp
);
930 xfs_ilock(dest
, XFS_ILOCK_EXCL
);
931 xfs_trans_ijoin(tp
, dest
, XFS_ILOCK_EXCL
);
933 if (newlen
> i_size_read(VFS_I(dest
))) {
934 trace_xfs_reflink_update_inode_size(dest
, newlen
);
935 i_size_write(VFS_I(dest
), newlen
);
936 dest
->i_disk_size
= newlen
;
940 dest
->i_cowextsize
= cowextsize
;
941 dest
->i_diflags2
|= XFS_DIFLAG2_COWEXTSIZE
;
944 xfs_trans_log_inode(tp
, dest
, XFS_ILOG_CORE
);
946 error
= xfs_trans_commit(tp
);
952 trace_xfs_reflink_update_inode_size_error(dest
, error
, _RET_IP_
);
957 * Do we have enough reserve in this AG to handle a reflink? The refcount
958 * btree already reserved all the space it needs, but the rmap btree can grow
959 * infinitely, so we won't allow more reflinks when the AG is down to the
963 xfs_reflink_ag_has_free_space(
964 struct xfs_mount
*mp
,
967 struct xfs_perag
*pag
;
970 if (!xfs_has_rmapbt(mp
))
973 pag
= xfs_perag_get(mp
, agno
);
974 if (xfs_ag_resv_critical(pag
, XFS_AG_RESV_RMAPBT
) ||
975 xfs_ag_resv_critical(pag
, XFS_AG_RESV_METADATA
))
982 * Remap the given extent into the file. The dmap blockcount will be set to
983 * the number of blocks that were actually remapped.
986 xfs_reflink_remap_extent(
987 struct xfs_inode
*ip
,
988 struct xfs_bmbt_irec
*dmap
,
991 struct xfs_bmbt_irec smap
;
992 struct xfs_mount
*mp
= ip
->i_mount
;
993 struct xfs_trans
*tp
;
996 unsigned int resblks
;
997 bool quota_reserved
= true;
999 bool dmap_written
= xfs_bmap_is_written_extent(dmap
);
1005 * Start a rolling transaction to switch the mappings.
1007 * Adding a written extent to the extent map can cause a bmbt split,
1008 * and removing a mapped extent from the extent can cause a bmbt split.
1009 * The two operations cannot both cause a split since they operate on
1010 * the same index in the bmap btree, so we only need a reservation for
1011 * one bmbt split if either thing is happening. However, we haven't
1012 * locked the inode yet, so we reserve assuming this is the case.
1014 * The first allocation call tries to reserve enough space to handle
1015 * mapping dmap into a sparse part of the file plus the bmbt split. We
1016 * haven't locked the inode or read the existing mapping yet, so we do
1017 * not know for sure that we need the space. This should succeed most
1020 * If the first attempt fails, try again but reserving only enough
1021 * space to handle a bmbt split. This is the hard minimum requirement,
1022 * and we revisit quota reservations later when we know more about what
1025 resblks
= XFS_EXTENTADD_SPACE_RES(mp
, XFS_DATA_FORK
);
1026 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
,
1027 resblks
+ dmap
->br_blockcount
, 0, false, &tp
);
1028 if (error
== -EDQUOT
|| error
== -ENOSPC
) {
1029 quota_reserved
= false;
1030 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
,
1031 resblks
, 0, false, &tp
);
1037 * Read what's currently mapped in the destination file into smap.
1038 * If smap isn't a hole, we will have to remove it before we can add
1039 * dmap to the destination file.
1042 error
= xfs_bmapi_read(ip
, dmap
->br_startoff
, dmap
->br_blockcount
,
1046 ASSERT(nimaps
== 1 && smap
.br_startoff
== dmap
->br_startoff
);
1047 smap_real
= xfs_bmap_is_real_extent(&smap
);
1050 * We can only remap as many blocks as the smaller of the two extent
1051 * maps, because we can only remap one extent at a time.
1053 dmap
->br_blockcount
= min(dmap
->br_blockcount
, smap
.br_blockcount
);
1054 ASSERT(dmap
->br_blockcount
== smap
.br_blockcount
);
1056 trace_xfs_reflink_remap_extent_dest(ip
, &smap
);
1059 * Two extents mapped to the same physical block must not have
1060 * different states; that's filesystem corruption. Move on to the next
1061 * extent if they're both holes or both the same physical extent.
1063 if (dmap
->br_startblock
== smap
.br_startblock
) {
1064 if (dmap
->br_state
!= smap
.br_state
)
1065 error
= -EFSCORRUPTED
;
1069 /* If both extents are unwritten, leave them alone. */
1070 if (dmap
->br_state
== XFS_EXT_UNWRITTEN
&&
1071 smap
.br_state
== XFS_EXT_UNWRITTEN
)
1074 /* No reflinking if the AG of the dest mapping is low on space. */
1076 error
= xfs_reflink_ag_has_free_space(mp
,
1077 XFS_FSB_TO_AGNO(mp
, dmap
->br_startblock
));
1083 * Increase quota reservation if we think the quota block counter for
1084 * this file could increase.
1086 * If we are mapping a written extent into the file, we need to have
1087 * enough quota block count reservation to handle the blocks in that
1088 * extent. We log only the delta to the quota block counts, so if the
1089 * extent we're unmapping also has blocks allocated to it, we don't
1090 * need a quota reservation for the extent itself.
1092 * Note that if we're replacing a delalloc reservation with a written
1093 * extent, we have to take the full quota reservation because removing
1094 * the delalloc reservation gives the block count back to the quota
1095 * count. This is suboptimal, but the VFS flushed the dest range
1096 * before we started. That should have removed all the delalloc
1097 * reservations, but we code defensively.
1099 * xfs_trans_alloc_inode above already tried to grab an even larger
1100 * quota reservation, and kicked off a blockgc scan if it couldn't.
1101 * If we can't get a potentially smaller quota reservation now, we're
1104 if (!quota_reserved
&& !smap_real
&& dmap_written
) {
1105 error
= xfs_trans_reserve_quota_nblks(tp
, ip
,
1106 dmap
->br_blockcount
, 0, false);
1117 error
= xfs_iext_count_may_overflow(ip
, XFS_DATA_FORK
, iext_delta
);
1123 * If the extent we're unmapping is backed by storage (written
1124 * or not), unmap the extent and drop its refcount.
1126 xfs_bmap_unmap_extent(tp
, ip
, &smap
);
1127 xfs_refcount_decrease_extent(tp
, &smap
);
1128 qdelta
-= smap
.br_blockcount
;
1129 } else if (smap
.br_startblock
== DELAYSTARTBLOCK
) {
1130 xfs_filblks_t len
= smap
.br_blockcount
;
1133 * If the extent we're unmapping is a delalloc reservation,
1134 * we can use the regular bunmapi function to release the
1135 * incore state. Dropping the delalloc reservation takes care
1136 * of the quota reservation for us.
1138 error
= __xfs_bunmapi(NULL
, ip
, smap
.br_startoff
, &len
, 0, 1);
1145 * If the extent we're sharing is backed by written storage, increase
1146 * its refcount and map it into the file.
1149 xfs_refcount_increase_extent(tp
, dmap
);
1150 xfs_bmap_map_extent(tp
, ip
, dmap
);
1151 qdelta
+= dmap
->br_blockcount
;
1154 xfs_trans_mod_dquot_byino(tp
, ip
, XFS_TRANS_DQ_BCOUNT
, qdelta
);
1156 /* Update dest isize if needed. */
1157 newlen
= XFS_FSB_TO_B(mp
, dmap
->br_startoff
+ dmap
->br_blockcount
);
1158 newlen
= min_t(xfs_off_t
, newlen
, new_isize
);
1159 if (newlen
> i_size_read(VFS_I(ip
))) {
1160 trace_xfs_reflink_update_inode_size(ip
, newlen
);
1161 i_size_write(VFS_I(ip
), newlen
);
1162 ip
->i_disk_size
= newlen
;
1163 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1166 /* Commit everything and unlock. */
1167 error
= xfs_trans_commit(tp
);
1171 xfs_trans_cancel(tp
);
1173 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1176 trace_xfs_reflink_remap_extent_error(ip
, error
, _RET_IP_
);
1180 /* Remap a range of one file to the other. */
1182 xfs_reflink_remap_blocks(
1183 struct xfs_inode
*src
,
1185 struct xfs_inode
*dest
,
1190 struct xfs_bmbt_irec imap
;
1191 struct xfs_mount
*mp
= src
->i_mount
;
1192 xfs_fileoff_t srcoff
= XFS_B_TO_FSBT(mp
, pos_in
);
1193 xfs_fileoff_t destoff
= XFS_B_TO_FSBT(mp
, pos_out
);
1195 xfs_filblks_t remapped_len
= 0;
1196 xfs_off_t new_isize
= pos_out
+ remap_len
;
1200 len
= min_t(xfs_filblks_t
, XFS_B_TO_FSB(mp
, remap_len
),
1203 trace_xfs_reflink_remap_blocks(src
, srcoff
, len
, dest
, destoff
);
1206 unsigned int lock_mode
;
1208 /* Read extent from the source file */
1210 lock_mode
= xfs_ilock_data_map_shared(src
);
1211 error
= xfs_bmapi_read(src
, srcoff
, len
, &imap
, &nimaps
, 0);
1212 xfs_iunlock(src
, lock_mode
);
1216 * The caller supposedly flushed all dirty pages in the source
1217 * file range, which means that writeback should have allocated
1218 * or deleted all delalloc reservations in that range. If we
1219 * find one, that's a good sign that something is seriously
1222 ASSERT(nimaps
== 1 && imap
.br_startoff
== srcoff
);
1223 if (imap
.br_startblock
== DELAYSTARTBLOCK
) {
1224 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1225 error
= -EFSCORRUPTED
;
1229 trace_xfs_reflink_remap_extent_src(src
, &imap
);
1231 /* Remap into the destination file at the given offset. */
1232 imap
.br_startoff
= destoff
;
1233 error
= xfs_reflink_remap_extent(dest
, &imap
, new_isize
);
1237 if (fatal_signal_pending(current
)) {
1242 /* Advance drange/srange */
1243 srcoff
+= imap
.br_blockcount
;
1244 destoff
+= imap
.br_blockcount
;
1245 len
-= imap
.br_blockcount
;
1246 remapped_len
+= imap
.br_blockcount
;
1250 trace_xfs_reflink_remap_blocks_error(dest
, error
, _RET_IP_
);
1251 *remapped
= min_t(loff_t
, remap_len
,
1252 XFS_FSB_TO_B(src
->i_mount
, remapped_len
));
1257 * If we're reflinking to a point past the destination file's EOF, we must
1258 * zero any speculative post-EOF preallocations that sit between the old EOF
1259 * and the destination file offset.
1262 xfs_reflink_zero_posteof(
1263 struct xfs_inode
*ip
,
1266 loff_t isize
= i_size_read(VFS_I(ip
));
1271 trace_xfs_zero_eof(ip
, isize
, pos
- isize
);
1272 return iomap_zero_range(VFS_I(ip
), isize
, pos
- isize
, NULL
,
1273 &xfs_buffered_write_iomap_ops
);
1277 * Prepare two files for range cloning. Upon a successful return both inodes
1278 * will have the iolock and mmaplock held, the page cache of the out file will
1279 * be truncated, and any leases on the out file will have been broken. This
1280 * function borrows heavily from xfs_file_aio_write_checks.
1282 * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
1283 * checked that the bytes beyond EOF physically match. Hence we cannot use the
1284 * EOF block in the source dedupe range because it's not a complete block match,
1285 * hence can introduce a corruption into the file that has it's block replaced.
1287 * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
1288 * "block aligned" for the purposes of cloning entire files. However, if the
1289 * source file range includes the EOF block and it lands within the existing EOF
1290 * of the destination file, then we can expose stale data from beyond the source
1291 * file EOF in the destination file.
1293 * XFS doesn't support partial block sharing, so in both cases we have check
1294 * these cases ourselves. For dedupe, we can simply round the length to dedupe
1295 * down to the previous whole block and ignore the partial EOF block. While this
1296 * means we can't dedupe the last block of a file, this is an acceptible
1297 * tradeoff for simplicity on implementation.
1299 * For cloning, we want to share the partial EOF block if it is also the new EOF
1300 * block of the destination file. If the partial EOF block lies inside the
1301 * existing destination EOF, then we have to abort the clone to avoid exposing
1302 * stale data in the destination file. Hence we reject these clone attempts with
1303 * -EINVAL in this case.
1306 xfs_reflink_remap_prep(
1307 struct file
*file_in
,
1309 struct file
*file_out
,
1312 unsigned int remap_flags
)
1314 struct inode
*inode_in
= file_inode(file_in
);
1315 struct xfs_inode
*src
= XFS_I(inode_in
);
1316 struct inode
*inode_out
= file_inode(file_out
);
1317 struct xfs_inode
*dest
= XFS_I(inode_out
);
1320 /* Lock both files against IO */
1321 ret
= xfs_ilock2_io_mmap(src
, dest
);
1325 /* Check file eligibility and prepare for block sharing. */
1327 /* Don't reflink realtime inodes */
1328 if (XFS_IS_REALTIME_INODE(src
) || XFS_IS_REALTIME_INODE(dest
))
1331 /* Don't share DAX file data for now. */
1332 if (IS_DAX(inode_in
) || IS_DAX(inode_out
))
1335 ret
= generic_remap_file_range_prep(file_in
, pos_in
, file_out
, pos_out
,
1337 if (ret
|| *len
== 0)
1340 /* Attach dquots to dest inode before changing block map */
1341 ret
= xfs_qm_dqattach(dest
);
1346 * Zero existing post-eof speculative preallocations in the destination
1349 ret
= xfs_reflink_zero_posteof(dest
, pos_out
);
1353 /* Set flags and remap blocks. */
1354 ret
= xfs_reflink_set_inode_flag(src
, dest
);
1359 * If pos_out > EOF, we may have dirtied blocks between EOF and
1360 * pos_out. In that case, we need to extend the flush and unmap to cover
1361 * from EOF to the end of the copy length.
1363 if (pos_out
> XFS_ISIZE(dest
)) {
1364 loff_t flen
= *len
+ (pos_out
- XFS_ISIZE(dest
));
1365 ret
= xfs_flush_unmap_range(dest
, XFS_ISIZE(dest
), flen
);
1367 ret
= xfs_flush_unmap_range(dest
, pos_out
, *len
);
1374 xfs_iunlock2_io_mmap(src
, dest
);
1378 /* Does this inode need the reflink flag? */
1380 xfs_reflink_inode_has_shared_extents(
1381 struct xfs_trans
*tp
,
1382 struct xfs_inode
*ip
,
1385 struct xfs_bmbt_irec got
;
1386 struct xfs_mount
*mp
= ip
->i_mount
;
1387 struct xfs_ifork
*ifp
;
1388 xfs_agnumber_t agno
;
1389 xfs_agblock_t agbno
;
1393 struct xfs_iext_cursor icur
;
1397 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1398 error
= xfs_iread_extents(tp
, ip
, XFS_DATA_FORK
);
1402 *has_shared
= false;
1403 found
= xfs_iext_lookup_extent(ip
, ifp
, 0, &icur
, &got
);
1405 if (isnullstartblock(got
.br_startblock
) ||
1406 got
.br_state
!= XFS_EXT_NORM
)
1408 agno
= XFS_FSB_TO_AGNO(mp
, got
.br_startblock
);
1409 agbno
= XFS_FSB_TO_AGBNO(mp
, got
.br_startblock
);
1410 aglen
= got
.br_blockcount
;
1412 error
= xfs_reflink_find_shared(mp
, tp
, agno
, agbno
, aglen
,
1413 &rbno
, &rlen
, false);
1416 /* Is there still a shared block here? */
1417 if (rbno
!= NULLAGBLOCK
) {
1422 found
= xfs_iext_next_extent(ifp
, &icur
, &got
);
1429 * Clear the inode reflink flag if there are no shared extents.
1431 * The caller is responsible for joining the inode to the transaction passed in.
1432 * The inode will be joined to the transaction that is returned to the caller.
1435 xfs_reflink_clear_inode_flag(
1436 struct xfs_inode
*ip
,
1437 struct xfs_trans
**tpp
)
1442 ASSERT(xfs_is_reflink_inode(ip
));
1444 error
= xfs_reflink_inode_has_shared_extents(*tpp
, ip
, &needs_flag
);
1445 if (error
|| needs_flag
)
1449 * We didn't find any shared blocks so turn off the reflink flag.
1450 * First, get rid of any leftover CoW mappings.
1452 error
= xfs_reflink_cancel_cow_blocks(ip
, tpp
, 0, XFS_MAX_FILEOFF
,
1457 /* Clear the inode flag. */
1458 trace_xfs_reflink_unset_inode_flag(ip
);
1459 ip
->i_diflags2
&= ~XFS_DIFLAG2_REFLINK
;
1460 xfs_inode_clear_cowblocks_tag(ip
);
1461 xfs_trans_log_inode(*tpp
, ip
, XFS_ILOG_CORE
);
1467 * Clear the inode reflink flag if there are no shared extents and the size
1471 xfs_reflink_try_clear_inode_flag(
1472 struct xfs_inode
*ip
)
1474 struct xfs_mount
*mp
= ip
->i_mount
;
1475 struct xfs_trans
*tp
;
1478 /* Start a rolling transaction to remove the mappings */
1479 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0, 0, 0, &tp
);
1483 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1484 xfs_trans_ijoin(tp
, ip
, 0);
1486 error
= xfs_reflink_clear_inode_flag(ip
, &tp
);
1490 error
= xfs_trans_commit(tp
);
1494 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1497 xfs_trans_cancel(tp
);
1499 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1504 * Pre-COW all shared blocks within a given byte range of a file and turn off
1505 * the reflink flag if we unshare all of the file's blocks.
1508 xfs_reflink_unshare(
1509 struct xfs_inode
*ip
,
1513 struct inode
*inode
= VFS_I(ip
);
1516 if (!xfs_is_reflink_inode(ip
))
1519 trace_xfs_reflink_unshare(ip
, offset
, len
);
1521 inode_dio_wait(inode
);
1523 error
= iomap_file_unshare(inode
, offset
, len
,
1524 &xfs_buffered_write_iomap_ops
);
1528 error
= filemap_write_and_wait_range(inode
->i_mapping
, offset
,
1533 /* Turn off the reflink flag if possible. */
1534 error
= xfs_reflink_try_clear_inode_flag(ip
);
1540 trace_xfs_reflink_unshare_error(ip
, error
, _RET_IP_
);