]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/xfs/xfs_bmap_util.c
Merge tag 'xfs-for-linus-v3.13-rc5' of git://oss.sgi.com/xfs/xfs
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / xfs_bmap_util.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_inode.h"
31 #include "xfs_btree.h"
32 #include "xfs_trans.h"
33 #include "xfs_extfree_item.h"
34 #include "xfs_alloc.h"
35 #include "xfs_bmap.h"
36 #include "xfs_bmap_util.h"
37 #include "xfs_bmap_btree.h"
38 #include "xfs_rtalloc.h"
39 #include "xfs_error.h"
40 #include "xfs_quota.h"
41 #include "xfs_trans_space.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44 #include "xfs_log.h"
45 #include "xfs_dinode.h"
46
47 /* Kernel only BMAP related definitions and functions */
48
49 /*
50 * Convert the given file system block to a disk block. We have to treat it
51 * differently based on whether the file is a real time file or not, because the
52 * bmap code does.
53 */
54 xfs_daddr_t
55 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
56 {
57 return (XFS_IS_REALTIME_INODE(ip) ? \
58 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
59 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
60 }
61
62 /*
63 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
64 * caller. Frees all the extents that need freeing, which must be done
65 * last due to locking considerations. We never free any extents in
66 * the first transaction.
67 *
68 * Return 1 if the given transaction was committed and a new one
69 * started, and 0 otherwise in the committed parameter.
70 */
71 int /* error */
72 xfs_bmap_finish(
73 xfs_trans_t **tp, /* transaction pointer addr */
74 xfs_bmap_free_t *flist, /* i/o: list extents to free */
75 int *committed) /* xact committed or not */
76 {
77 xfs_efd_log_item_t *efd; /* extent free data */
78 xfs_efi_log_item_t *efi; /* extent free intention */
79 int error; /* error return value */
80 xfs_bmap_free_item_t *free; /* free extent item */
81 struct xfs_trans_res tres; /* new log reservation */
82 xfs_mount_t *mp; /* filesystem mount structure */
83 xfs_bmap_free_item_t *next; /* next item on free list */
84 xfs_trans_t *ntp; /* new transaction pointer */
85
86 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
87 if (flist->xbf_count == 0) {
88 *committed = 0;
89 return 0;
90 }
91 ntp = *tp;
92 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
93 for (free = flist->xbf_first; free; free = free->xbfi_next)
94 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
95 free->xbfi_blockcount);
96
97 tres.tr_logres = ntp->t_log_res;
98 tres.tr_logcount = ntp->t_log_count;
99 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
100 ntp = xfs_trans_dup(*tp);
101 error = xfs_trans_commit(*tp, 0);
102 *tp = ntp;
103 *committed = 1;
104 /*
105 * We have a new transaction, so we should return committed=1,
106 * even though we're returning an error.
107 */
108 if (error)
109 return error;
110
111 /*
112 * transaction commit worked ok so we can drop the extra ticket
113 * reference that we gained in xfs_trans_dup()
114 */
115 xfs_log_ticket_put(ntp->t_ticket);
116
117 error = xfs_trans_reserve(ntp, &tres, 0, 0);
118 if (error)
119 return error;
120 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
121 for (free = flist->xbf_first; free != NULL; free = next) {
122 next = free->xbfi_next;
123 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
124 free->xbfi_blockcount))) {
125 /*
126 * The bmap free list will be cleaned up at a
127 * higher level. The EFI will be canceled when
128 * this transaction is aborted.
129 * Need to force shutdown here to make sure it
130 * happens, since this transaction may not be
131 * dirty yet.
132 */
133 mp = ntp->t_mountp;
134 if (!XFS_FORCED_SHUTDOWN(mp))
135 xfs_force_shutdown(mp,
136 (error == EFSCORRUPTED) ?
137 SHUTDOWN_CORRUPT_INCORE :
138 SHUTDOWN_META_IO_ERROR);
139 return error;
140 }
141 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
142 free->xbfi_blockcount);
143 xfs_bmap_del_free(flist, NULL, free);
144 }
145 return 0;
146 }
147
148 int
149 xfs_bmap_rtalloc(
150 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
151 {
152 xfs_alloctype_t atype = 0; /* type for allocation routines */
153 int error; /* error return value */
154 xfs_mount_t *mp; /* mount point structure */
155 xfs_extlen_t prod = 0; /* product factor for allocators */
156 xfs_extlen_t ralen = 0; /* realtime allocation length */
157 xfs_extlen_t align; /* minimum allocation alignment */
158 xfs_rtblock_t rtb;
159
160 mp = ap->ip->i_mount;
161 align = xfs_get_extsz_hint(ap->ip);
162 prod = align / mp->m_sb.sb_rextsize;
163 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
164 align, 1, ap->eof, 0,
165 ap->conv, &ap->offset, &ap->length);
166 if (error)
167 return error;
168 ASSERT(ap->length);
169 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
170
171 /*
172 * If the offset & length are not perfectly aligned
173 * then kill prod, it will just get us in trouble.
174 */
175 if (do_mod(ap->offset, align) || ap->length % align)
176 prod = 1;
177 /*
178 * Set ralen to be the actual requested length in rtextents.
179 */
180 ralen = ap->length / mp->m_sb.sb_rextsize;
181 /*
182 * If the old value was close enough to MAXEXTLEN that
183 * we rounded up to it, cut it back so it's valid again.
184 * Note that if it's a really large request (bigger than
185 * MAXEXTLEN), we don't hear about that number, and can't
186 * adjust the starting point to match it.
187 */
188 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
189 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
190
191 /*
192 * Lock out other modifications to the RT bitmap inode.
193 */
194 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
195 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
196
197 /*
198 * If it's an allocation to an empty file at offset 0,
199 * pick an extent that will space things out in the rt area.
200 */
201 if (ap->eof && ap->offset == 0) {
202 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
203
204 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
205 if (error)
206 return error;
207 ap->blkno = rtx * mp->m_sb.sb_rextsize;
208 } else {
209 ap->blkno = 0;
210 }
211
212 xfs_bmap_adjacent(ap);
213
214 /*
215 * Realtime allocation, done through xfs_rtallocate_extent.
216 */
217 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
218 do_div(ap->blkno, mp->m_sb.sb_rextsize);
219 rtb = ap->blkno;
220 ap->length = ralen;
221 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
222 &ralen, atype, ap->wasdel, prod, &rtb)))
223 return error;
224 if (rtb == NULLFSBLOCK && prod > 1 &&
225 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
226 ap->length, &ralen, atype,
227 ap->wasdel, 1, &rtb)))
228 return error;
229 ap->blkno = rtb;
230 if (ap->blkno != NULLFSBLOCK) {
231 ap->blkno *= mp->m_sb.sb_rextsize;
232 ralen *= mp->m_sb.sb_rextsize;
233 ap->length = ralen;
234 ap->ip->i_d.di_nblocks += ralen;
235 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
236 if (ap->wasdel)
237 ap->ip->i_delayed_blks -= ralen;
238 /*
239 * Adjust the disk quota also. This was reserved
240 * earlier.
241 */
242 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
243 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
244 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
245 } else {
246 ap->length = 0;
247 }
248 return 0;
249 }
250
251 /*
252 * Stack switching interfaces for allocation
253 */
254 static void
255 xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257 {
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
261
262 /* we are in a transaction context here */
263 current_set_flags_nested(&pflags, PF_FSTRANS);
264
265 args->result = __xfs_bmapi_allocate(args);
266 complete(args->done);
267
268 current_restore_flags_nested(&pflags, PF_FSTRANS);
269 }
270
271 /*
272 * Some allocation requests often come in with little stack to work on. Push
273 * them off to a worker thread so there is lots of stack to use. Otherwise just
274 * call directly to avoid the context switch overhead here.
275 */
276 int
277 xfs_bmapi_allocate(
278 struct xfs_bmalloca *args)
279 {
280 DECLARE_COMPLETION_ONSTACK(done);
281
282 if (!args->stack_switch)
283 return __xfs_bmapi_allocate(args);
284
285
286 args->done = &done;
287 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
288 queue_work(xfs_alloc_wq, &args->work);
289 wait_for_completion(&done);
290 return args->result;
291 }
292
293 /*
294 * Check if the endoff is outside the last extent. If so the caller will grow
295 * the allocation to a stripe unit boundary. All offsets are considered outside
296 * the end of file for an empty fork, so 1 is returned in *eof in that case.
297 */
298 int
299 xfs_bmap_eof(
300 struct xfs_inode *ip,
301 xfs_fileoff_t endoff,
302 int whichfork,
303 int *eof)
304 {
305 struct xfs_bmbt_irec rec;
306 int error;
307
308 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
309 if (error || *eof)
310 return error;
311
312 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
313 return 0;
314 }
315
316 /*
317 * Extent tree block counting routines.
318 */
319
320 /*
321 * Count leaf blocks given a range of extent records.
322 */
323 STATIC void
324 xfs_bmap_count_leaves(
325 xfs_ifork_t *ifp,
326 xfs_extnum_t idx,
327 int numrecs,
328 int *count)
329 {
330 int b;
331
332 for (b = 0; b < numrecs; b++) {
333 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
334 *count += xfs_bmbt_get_blockcount(frp);
335 }
336 }
337
338 /*
339 * Count leaf blocks given a range of extent records originally
340 * in btree format.
341 */
342 STATIC void
343 xfs_bmap_disk_count_leaves(
344 struct xfs_mount *mp,
345 struct xfs_btree_block *block,
346 int numrecs,
347 int *count)
348 {
349 int b;
350 xfs_bmbt_rec_t *frp;
351
352 for (b = 1; b <= numrecs; b++) {
353 frp = XFS_BMBT_REC_ADDR(mp, block, b);
354 *count += xfs_bmbt_disk_get_blockcount(frp);
355 }
356 }
357
358 /*
359 * Recursively walks each level of a btree
360 * to count total fsblocks in use.
361 */
362 STATIC int /* error */
363 xfs_bmap_count_tree(
364 xfs_mount_t *mp, /* file system mount point */
365 xfs_trans_t *tp, /* transaction pointer */
366 xfs_ifork_t *ifp, /* inode fork pointer */
367 xfs_fsblock_t blockno, /* file system block number */
368 int levelin, /* level in btree */
369 int *count) /* Count of blocks */
370 {
371 int error;
372 xfs_buf_t *bp, *nbp;
373 int level = levelin;
374 __be64 *pp;
375 xfs_fsblock_t bno = blockno;
376 xfs_fsblock_t nextbno;
377 struct xfs_btree_block *block, *nextblock;
378 int numrecs;
379
380 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
381 &xfs_bmbt_buf_ops);
382 if (error)
383 return error;
384 *count += 1;
385 block = XFS_BUF_TO_BLOCK(bp);
386
387 if (--level) {
388 /* Not at node above leaves, count this level of nodes */
389 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
390 while (nextbno != NULLFSBLOCK) {
391 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
392 XFS_BMAP_BTREE_REF,
393 &xfs_bmbt_buf_ops);
394 if (error)
395 return error;
396 *count += 1;
397 nextblock = XFS_BUF_TO_BLOCK(nbp);
398 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
399 xfs_trans_brelse(tp, nbp);
400 }
401
402 /* Dive to the next level */
403 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
404 bno = be64_to_cpu(*pp);
405 if (unlikely((error =
406 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
407 xfs_trans_brelse(tp, bp);
408 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
409 XFS_ERRLEVEL_LOW, mp);
410 return XFS_ERROR(EFSCORRUPTED);
411 }
412 xfs_trans_brelse(tp, bp);
413 } else {
414 /* count all level 1 nodes and their leaves */
415 for (;;) {
416 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
417 numrecs = be16_to_cpu(block->bb_numrecs);
418 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
419 xfs_trans_brelse(tp, bp);
420 if (nextbno == NULLFSBLOCK)
421 break;
422 bno = nextbno;
423 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
424 XFS_BMAP_BTREE_REF,
425 &xfs_bmbt_buf_ops);
426 if (error)
427 return error;
428 *count += 1;
429 block = XFS_BUF_TO_BLOCK(bp);
430 }
431 }
432 return 0;
433 }
434
435 /*
436 * Count fsblocks of the given fork.
437 */
438 int /* error */
439 xfs_bmap_count_blocks(
440 xfs_trans_t *tp, /* transaction pointer */
441 xfs_inode_t *ip, /* incore inode */
442 int whichfork, /* data or attr fork */
443 int *count) /* out: count of blocks */
444 {
445 struct xfs_btree_block *block; /* current btree block */
446 xfs_fsblock_t bno; /* block # of "block" */
447 xfs_ifork_t *ifp; /* fork structure */
448 int level; /* btree level, for checking */
449 xfs_mount_t *mp; /* file system mount structure */
450 __be64 *pp; /* pointer to block address */
451
452 bno = NULLFSBLOCK;
453 mp = ip->i_mount;
454 ifp = XFS_IFORK_PTR(ip, whichfork);
455 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
456 xfs_bmap_count_leaves(ifp, 0,
457 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
458 count);
459 return 0;
460 }
461
462 /*
463 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
464 */
465 block = ifp->if_broot;
466 level = be16_to_cpu(block->bb_level);
467 ASSERT(level > 0);
468 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
469 bno = be64_to_cpu(*pp);
470 ASSERT(bno != NULLDFSBNO);
471 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
472 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
473
474 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
475 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
476 mp);
477 return XFS_ERROR(EFSCORRUPTED);
478 }
479
480 return 0;
481 }
482
483 /*
484 * returns 1 for success, 0 if we failed to map the extent.
485 */
486 STATIC int
487 xfs_getbmapx_fix_eof_hole(
488 xfs_inode_t *ip, /* xfs incore inode pointer */
489 struct getbmapx *out, /* output structure */
490 int prealloced, /* this is a file with
491 * preallocated data space */
492 __int64_t end, /* last block requested */
493 xfs_fsblock_t startblock)
494 {
495 __int64_t fixlen;
496 xfs_mount_t *mp; /* file system mount point */
497 xfs_ifork_t *ifp; /* inode fork pointer */
498 xfs_extnum_t lastx; /* last extent pointer */
499 xfs_fileoff_t fileblock;
500
501 if (startblock == HOLESTARTBLOCK) {
502 mp = ip->i_mount;
503 out->bmv_block = -1;
504 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
505 fixlen -= out->bmv_offset;
506 if (prealloced && out->bmv_offset + out->bmv_length == end) {
507 /* Came to hole at EOF. Trim it. */
508 if (fixlen <= 0)
509 return 0;
510 out->bmv_length = fixlen;
511 }
512 } else {
513 if (startblock == DELAYSTARTBLOCK)
514 out->bmv_block = -2;
515 else
516 out->bmv_block = xfs_fsb_to_db(ip, startblock);
517 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
518 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
519 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
520 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
521 out->bmv_oflags |= BMV_OF_LAST;
522 }
523
524 return 1;
525 }
526
527 /*
528 * Get inode's extents as described in bmv, and format for output.
529 * Calls formatter to fill the user's buffer until all extents
530 * are mapped, until the passed-in bmv->bmv_count slots have
531 * been filled, or until the formatter short-circuits the loop,
532 * if it is tracking filled-in extents on its own.
533 */
534 int /* error code */
535 xfs_getbmap(
536 xfs_inode_t *ip,
537 struct getbmapx *bmv, /* user bmap structure */
538 xfs_bmap_format_t formatter, /* format to user */
539 void *arg) /* formatter arg */
540 {
541 __int64_t bmvend; /* last block requested */
542 int error = 0; /* return value */
543 __int64_t fixlen; /* length for -1 case */
544 int i; /* extent number */
545 int lock; /* lock state */
546 xfs_bmbt_irec_t *map; /* buffer for user's data */
547 xfs_mount_t *mp; /* file system mount point */
548 int nex; /* # of user extents can do */
549 int nexleft; /* # of user extents left */
550 int subnex; /* # of bmapi's can do */
551 int nmap; /* number of map entries */
552 struct getbmapx *out; /* output structure */
553 int whichfork; /* data or attr fork */
554 int prealloced; /* this is a file with
555 * preallocated data space */
556 int iflags; /* interface flags */
557 int bmapi_flags; /* flags for xfs_bmapi */
558 int cur_ext = 0;
559
560 mp = ip->i_mount;
561 iflags = bmv->bmv_iflags;
562 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
563
564 if (whichfork == XFS_ATTR_FORK) {
565 if (XFS_IFORK_Q(ip)) {
566 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
567 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
568 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
569 return XFS_ERROR(EINVAL);
570 } else if (unlikely(
571 ip->i_d.di_aformat != 0 &&
572 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
573 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
574 ip->i_mount);
575 return XFS_ERROR(EFSCORRUPTED);
576 }
577
578 prealloced = 0;
579 fixlen = 1LL << 32;
580 } else {
581 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
582 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
583 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
584 return XFS_ERROR(EINVAL);
585
586 if (xfs_get_extsz_hint(ip) ||
587 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
588 prealloced = 1;
589 fixlen = mp->m_super->s_maxbytes;
590 } else {
591 prealloced = 0;
592 fixlen = XFS_ISIZE(ip);
593 }
594 }
595
596 if (bmv->bmv_length == -1) {
597 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
598 bmv->bmv_length =
599 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
600 } else if (bmv->bmv_length == 0) {
601 bmv->bmv_entries = 0;
602 return 0;
603 } else if (bmv->bmv_length < 0) {
604 return XFS_ERROR(EINVAL);
605 }
606
607 nex = bmv->bmv_count - 1;
608 if (nex <= 0)
609 return XFS_ERROR(EINVAL);
610 bmvend = bmv->bmv_offset + bmv->bmv_length;
611
612
613 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
614 return XFS_ERROR(ENOMEM);
615 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
616 if (!out)
617 return XFS_ERROR(ENOMEM);
618
619 xfs_ilock(ip, XFS_IOLOCK_SHARED);
620 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
621 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
622 error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
623 if (error)
624 goto out_unlock_iolock;
625 }
626 /*
627 * even after flushing the inode, there can still be delalloc
628 * blocks on the inode beyond EOF due to speculative
629 * preallocation. These are not removed until the release
630 * function is called or the inode is inactivated. Hence we
631 * cannot assert here that ip->i_delayed_blks == 0.
632 */
633 }
634
635 lock = xfs_ilock_map_shared(ip);
636
637 /*
638 * Don't let nex be bigger than the number of extents
639 * we can have assuming alternating holes and real extents.
640 */
641 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
642 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
643
644 bmapi_flags = xfs_bmapi_aflag(whichfork);
645 if (!(iflags & BMV_IF_PREALLOC))
646 bmapi_flags |= XFS_BMAPI_IGSTATE;
647
648 /*
649 * Allocate enough space to handle "subnex" maps at a time.
650 */
651 error = ENOMEM;
652 subnex = 16;
653 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
654 if (!map)
655 goto out_unlock_ilock;
656
657 bmv->bmv_entries = 0;
658
659 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
660 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
661 error = 0;
662 goto out_free_map;
663 }
664
665 nexleft = nex;
666
667 do {
668 nmap = (nexleft > subnex) ? subnex : nexleft;
669 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
670 XFS_BB_TO_FSB(mp, bmv->bmv_length),
671 map, &nmap, bmapi_flags);
672 if (error)
673 goto out_free_map;
674 ASSERT(nmap <= subnex);
675
676 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
677 out[cur_ext].bmv_oflags = 0;
678 if (map[i].br_state == XFS_EXT_UNWRITTEN)
679 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
680 else if (map[i].br_startblock == DELAYSTARTBLOCK)
681 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
682 out[cur_ext].bmv_offset =
683 XFS_FSB_TO_BB(mp, map[i].br_startoff);
684 out[cur_ext].bmv_length =
685 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
686 out[cur_ext].bmv_unused1 = 0;
687 out[cur_ext].bmv_unused2 = 0;
688
689 /*
690 * delayed allocation extents that start beyond EOF can
691 * occur due to speculative EOF allocation when the
692 * delalloc extent is larger than the largest freespace
693 * extent at conversion time. These extents cannot be
694 * converted by data writeback, so can exist here even
695 * if we are not supposed to be finding delalloc
696 * extents.
697 */
698 if (map[i].br_startblock == DELAYSTARTBLOCK &&
699 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
700 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
701
702 if (map[i].br_startblock == HOLESTARTBLOCK &&
703 whichfork == XFS_ATTR_FORK) {
704 /* came to the end of attribute fork */
705 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
706 goto out_free_map;
707 }
708
709 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
710 prealloced, bmvend,
711 map[i].br_startblock))
712 goto out_free_map;
713
714 bmv->bmv_offset =
715 out[cur_ext].bmv_offset +
716 out[cur_ext].bmv_length;
717 bmv->bmv_length =
718 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
719
720 /*
721 * In case we don't want to return the hole,
722 * don't increase cur_ext so that we can reuse
723 * it in the next loop.
724 */
725 if ((iflags & BMV_IF_NO_HOLES) &&
726 map[i].br_startblock == HOLESTARTBLOCK) {
727 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
728 continue;
729 }
730
731 nexleft--;
732 bmv->bmv_entries++;
733 cur_ext++;
734 }
735 } while (nmap && nexleft && bmv->bmv_length);
736
737 out_free_map:
738 kmem_free(map);
739 out_unlock_ilock:
740 xfs_iunlock_map_shared(ip, lock);
741 out_unlock_iolock:
742 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
743
744 for (i = 0; i < cur_ext; i++) {
745 int full = 0; /* user array is full */
746
747 /* format results & advance arg */
748 error = formatter(&arg, &out[i], &full);
749 if (error || full)
750 break;
751 }
752
753 kmem_free(out);
754 return error;
755 }
756
757 /*
758 * dead simple method of punching delalyed allocation blocks from a range in
759 * the inode. Walks a block at a time so will be slow, but is only executed in
760 * rare error cases so the overhead is not critical. This will always punch out
761 * both the start and end blocks, even if the ranges only partially overlap
762 * them, so it is up to the caller to ensure that partial blocks are not
763 * passed in.
764 */
765 int
766 xfs_bmap_punch_delalloc_range(
767 struct xfs_inode *ip,
768 xfs_fileoff_t start_fsb,
769 xfs_fileoff_t length)
770 {
771 xfs_fileoff_t remaining = length;
772 int error = 0;
773
774 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
775
776 do {
777 int done;
778 xfs_bmbt_irec_t imap;
779 int nimaps = 1;
780 xfs_fsblock_t firstblock;
781 xfs_bmap_free_t flist;
782
783 /*
784 * Map the range first and check that it is a delalloc extent
785 * before trying to unmap the range. Otherwise we will be
786 * trying to remove a real extent (which requires a
787 * transaction) or a hole, which is probably a bad idea...
788 */
789 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
790 XFS_BMAPI_ENTIRE);
791
792 if (error) {
793 /* something screwed, just bail */
794 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
795 xfs_alert(ip->i_mount,
796 "Failed delalloc mapping lookup ino %lld fsb %lld.",
797 ip->i_ino, start_fsb);
798 }
799 break;
800 }
801 if (!nimaps) {
802 /* nothing there */
803 goto next_block;
804 }
805 if (imap.br_startblock != DELAYSTARTBLOCK) {
806 /* been converted, ignore */
807 goto next_block;
808 }
809 WARN_ON(imap.br_blockcount == 0);
810
811 /*
812 * Note: while we initialise the firstblock/flist pair, they
813 * should never be used because blocks should never be
814 * allocated or freed for a delalloc extent and hence we need
815 * don't cancel or finish them after the xfs_bunmapi() call.
816 */
817 xfs_bmap_init(&flist, &firstblock);
818 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
819 &flist, &done);
820 if (error)
821 break;
822
823 ASSERT(!flist.xbf_count && !flist.xbf_first);
824 next_block:
825 start_fsb++;
826 remaining--;
827 } while(remaining > 0);
828
829 return error;
830 }
831
832 /*
833 * Test whether it is appropriate to check an inode for and free post EOF
834 * blocks. The 'force' parameter determines whether we should also consider
835 * regular files that are marked preallocated or append-only.
836 */
837 bool
838 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
839 {
840 /* prealloc/delalloc exists only on regular files */
841 if (!S_ISREG(ip->i_d.di_mode))
842 return false;
843
844 /*
845 * Zero sized files with no cached pages and delalloc blocks will not
846 * have speculative prealloc/delalloc blocks to remove.
847 */
848 if (VFS_I(ip)->i_size == 0 &&
849 VN_CACHED(VFS_I(ip)) == 0 &&
850 ip->i_delayed_blks == 0)
851 return false;
852
853 /* If we haven't read in the extent list, then don't do it now. */
854 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
855 return false;
856
857 /*
858 * Do not free real preallocated or append-only files unless the file
859 * has delalloc blocks and we are forced to remove them.
860 */
861 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
862 if (!force || ip->i_delayed_blks == 0)
863 return false;
864
865 return true;
866 }
867
868 /*
869 * This is called by xfs_inactive to free any blocks beyond eof
870 * when the link count isn't zero and by xfs_dm_punch_hole() when
871 * punching a hole to EOF.
872 */
873 int
874 xfs_free_eofblocks(
875 xfs_mount_t *mp,
876 xfs_inode_t *ip,
877 bool need_iolock)
878 {
879 xfs_trans_t *tp;
880 int error;
881 xfs_fileoff_t end_fsb;
882 xfs_fileoff_t last_fsb;
883 xfs_filblks_t map_len;
884 int nimaps;
885 xfs_bmbt_irec_t imap;
886
887 /*
888 * Figure out if there are any blocks beyond the end
889 * of the file. If not, then there is nothing to do.
890 */
891 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
892 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
893 if (last_fsb <= end_fsb)
894 return 0;
895 map_len = last_fsb - end_fsb;
896
897 nimaps = 1;
898 xfs_ilock(ip, XFS_ILOCK_SHARED);
899 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
900 xfs_iunlock(ip, XFS_ILOCK_SHARED);
901
902 if (!error && (nimaps != 0) &&
903 (imap.br_startblock != HOLESTARTBLOCK ||
904 ip->i_delayed_blks)) {
905 /*
906 * Attach the dquots to the inode up front.
907 */
908 error = xfs_qm_dqattach(ip, 0);
909 if (error)
910 return error;
911
912 /*
913 * There are blocks after the end of file.
914 * Free them up now by truncating the file to
915 * its current size.
916 */
917 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
918
919 if (need_iolock) {
920 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
921 xfs_trans_cancel(tp, 0);
922 return EAGAIN;
923 }
924 }
925
926 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
927 if (error) {
928 ASSERT(XFS_FORCED_SHUTDOWN(mp));
929 xfs_trans_cancel(tp, 0);
930 if (need_iolock)
931 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
932 return error;
933 }
934
935 xfs_ilock(ip, XFS_ILOCK_EXCL);
936 xfs_trans_ijoin(tp, ip, 0);
937
938 /*
939 * Do not update the on-disk file size. If we update the
940 * on-disk file size and then the system crashes before the
941 * contents of the file are flushed to disk then the files
942 * may be full of holes (ie NULL files bug).
943 */
944 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
945 XFS_ISIZE(ip));
946 if (error) {
947 /*
948 * If we get an error at this point we simply don't
949 * bother truncating the file.
950 */
951 xfs_trans_cancel(tp,
952 (XFS_TRANS_RELEASE_LOG_RES |
953 XFS_TRANS_ABORT));
954 } else {
955 error = xfs_trans_commit(tp,
956 XFS_TRANS_RELEASE_LOG_RES);
957 if (!error)
958 xfs_inode_clear_eofblocks_tag(ip);
959 }
960
961 xfs_iunlock(ip, XFS_ILOCK_EXCL);
962 if (need_iolock)
963 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
964 }
965 return error;
966 }
967
968 int
969 xfs_alloc_file_space(
970 struct xfs_inode *ip,
971 xfs_off_t offset,
972 xfs_off_t len,
973 int alloc_type)
974 {
975 xfs_mount_t *mp = ip->i_mount;
976 xfs_off_t count;
977 xfs_filblks_t allocated_fsb;
978 xfs_filblks_t allocatesize_fsb;
979 xfs_extlen_t extsz, temp;
980 xfs_fileoff_t startoffset_fsb;
981 xfs_fsblock_t firstfsb;
982 int nimaps;
983 int quota_flag;
984 int rt;
985 xfs_trans_t *tp;
986 xfs_bmbt_irec_t imaps[1], *imapp;
987 xfs_bmap_free_t free_list;
988 uint qblocks, resblks, resrtextents;
989 int committed;
990 int error;
991
992 trace_xfs_alloc_file_space(ip);
993
994 if (XFS_FORCED_SHUTDOWN(mp))
995 return XFS_ERROR(EIO);
996
997 error = xfs_qm_dqattach(ip, 0);
998 if (error)
999 return error;
1000
1001 if (len <= 0)
1002 return XFS_ERROR(EINVAL);
1003
1004 rt = XFS_IS_REALTIME_INODE(ip);
1005 extsz = xfs_get_extsz_hint(ip);
1006
1007 count = len;
1008 imapp = &imaps[0];
1009 nimaps = 1;
1010 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1011 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1012
1013 /*
1014 * Allocate file space until done or until there is an error
1015 */
1016 while (allocatesize_fsb && !error) {
1017 xfs_fileoff_t s, e;
1018
1019 /*
1020 * Determine space reservations for data/realtime.
1021 */
1022 if (unlikely(extsz)) {
1023 s = startoffset_fsb;
1024 do_div(s, extsz);
1025 s *= extsz;
1026 e = startoffset_fsb + allocatesize_fsb;
1027 if ((temp = do_mod(startoffset_fsb, extsz)))
1028 e += temp;
1029 if ((temp = do_mod(e, extsz)))
1030 e += extsz - temp;
1031 } else {
1032 s = 0;
1033 e = allocatesize_fsb;
1034 }
1035
1036 /*
1037 * The transaction reservation is limited to a 32-bit block
1038 * count, hence we need to limit the number of blocks we are
1039 * trying to reserve to avoid an overflow. We can't allocate
1040 * more than @nimaps extents, and an extent is limited on disk
1041 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1042 */
1043 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1044 if (unlikely(rt)) {
1045 resrtextents = qblocks = resblks;
1046 resrtextents /= mp->m_sb.sb_rextsize;
1047 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1048 quota_flag = XFS_QMOPT_RES_RTBLKS;
1049 } else {
1050 resrtextents = 0;
1051 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1052 quota_flag = XFS_QMOPT_RES_REGBLKS;
1053 }
1054
1055 /*
1056 * Allocate and setup the transaction.
1057 */
1058 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1059 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1060 resblks, resrtextents);
1061 /*
1062 * Check for running out of space
1063 */
1064 if (error) {
1065 /*
1066 * Free the transaction structure.
1067 */
1068 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1069 xfs_trans_cancel(tp, 0);
1070 break;
1071 }
1072 xfs_ilock(ip, XFS_ILOCK_EXCL);
1073 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1074 0, quota_flag);
1075 if (error)
1076 goto error1;
1077
1078 xfs_trans_ijoin(tp, ip, 0);
1079
1080 xfs_bmap_init(&free_list, &firstfsb);
1081 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1082 allocatesize_fsb, alloc_type, &firstfsb,
1083 0, imapp, &nimaps, &free_list);
1084 if (error) {
1085 goto error0;
1086 }
1087
1088 /*
1089 * Complete the transaction
1090 */
1091 error = xfs_bmap_finish(&tp, &free_list, &committed);
1092 if (error) {
1093 goto error0;
1094 }
1095
1096 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1097 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1098 if (error) {
1099 break;
1100 }
1101
1102 allocated_fsb = imapp->br_blockcount;
1103
1104 if (nimaps == 0) {
1105 error = XFS_ERROR(ENOSPC);
1106 break;
1107 }
1108
1109 startoffset_fsb += allocated_fsb;
1110 allocatesize_fsb -= allocated_fsb;
1111 }
1112
1113 return error;
1114
1115 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1116 xfs_bmap_cancel(&free_list);
1117 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1118
1119 error1: /* Just cancel transaction */
1120 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1121 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1122 return error;
1123 }
1124
1125 /*
1126 * Zero file bytes between startoff and endoff inclusive.
1127 * The iolock is held exclusive and no blocks are buffered.
1128 *
1129 * This function is used by xfs_free_file_space() to zero
1130 * partial blocks when the range to free is not block aligned.
1131 * When unreserving space with boundaries that are not block
1132 * aligned we round up the start and round down the end
1133 * boundaries and then use this function to zero the parts of
1134 * the blocks that got dropped during the rounding.
1135 */
1136 STATIC int
1137 xfs_zero_remaining_bytes(
1138 xfs_inode_t *ip,
1139 xfs_off_t startoff,
1140 xfs_off_t endoff)
1141 {
1142 xfs_bmbt_irec_t imap;
1143 xfs_fileoff_t offset_fsb;
1144 xfs_off_t lastoffset;
1145 xfs_off_t offset;
1146 xfs_buf_t *bp;
1147 xfs_mount_t *mp = ip->i_mount;
1148 int nimap;
1149 int error = 0;
1150
1151 /*
1152 * Avoid doing I/O beyond eof - it's not necessary
1153 * since nothing can read beyond eof. The space will
1154 * be zeroed when the file is extended anyway.
1155 */
1156 if (startoff >= XFS_ISIZE(ip))
1157 return 0;
1158
1159 if (endoff > XFS_ISIZE(ip))
1160 endoff = XFS_ISIZE(ip);
1161
1162 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1163 mp->m_rtdev_targp : mp->m_ddev_targp,
1164 BTOBB(mp->m_sb.sb_blocksize), 0);
1165 if (!bp)
1166 return XFS_ERROR(ENOMEM);
1167
1168 xfs_buf_unlock(bp);
1169
1170 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1171 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1172 nimap = 1;
1173 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1174 if (error || nimap < 1)
1175 break;
1176 ASSERT(imap.br_blockcount >= 1);
1177 ASSERT(imap.br_startoff == offset_fsb);
1178 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1179 if (lastoffset > endoff)
1180 lastoffset = endoff;
1181 if (imap.br_startblock == HOLESTARTBLOCK)
1182 continue;
1183 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1184 if (imap.br_state == XFS_EXT_UNWRITTEN)
1185 continue;
1186 XFS_BUF_UNDONE(bp);
1187 XFS_BUF_UNWRITE(bp);
1188 XFS_BUF_READ(bp);
1189 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1190
1191 if (XFS_FORCED_SHUTDOWN(mp)) {
1192 error = XFS_ERROR(EIO);
1193 break;
1194 }
1195 xfs_buf_iorequest(bp);
1196 error = xfs_buf_iowait(bp);
1197 if (error) {
1198 xfs_buf_ioerror_alert(bp,
1199 "xfs_zero_remaining_bytes(read)");
1200 break;
1201 }
1202 memset(bp->b_addr +
1203 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1204 0, lastoffset - offset + 1);
1205 XFS_BUF_UNDONE(bp);
1206 XFS_BUF_UNREAD(bp);
1207 XFS_BUF_WRITE(bp);
1208
1209 if (XFS_FORCED_SHUTDOWN(mp)) {
1210 error = XFS_ERROR(EIO);
1211 break;
1212 }
1213 xfs_buf_iorequest(bp);
1214 error = xfs_buf_iowait(bp);
1215 if (error) {
1216 xfs_buf_ioerror_alert(bp,
1217 "xfs_zero_remaining_bytes(write)");
1218 break;
1219 }
1220 }
1221 xfs_buf_free(bp);
1222 return error;
1223 }
1224
1225 int
1226 xfs_free_file_space(
1227 struct xfs_inode *ip,
1228 xfs_off_t offset,
1229 xfs_off_t len)
1230 {
1231 int committed;
1232 int done;
1233 xfs_fileoff_t endoffset_fsb;
1234 int error;
1235 xfs_fsblock_t firstfsb;
1236 xfs_bmap_free_t free_list;
1237 xfs_bmbt_irec_t imap;
1238 xfs_off_t ioffset;
1239 xfs_extlen_t mod=0;
1240 xfs_mount_t *mp;
1241 int nimap;
1242 uint resblks;
1243 xfs_off_t rounding;
1244 int rt;
1245 xfs_fileoff_t startoffset_fsb;
1246 xfs_trans_t *tp;
1247
1248 mp = ip->i_mount;
1249
1250 trace_xfs_free_file_space(ip);
1251
1252 error = xfs_qm_dqattach(ip, 0);
1253 if (error)
1254 return error;
1255
1256 error = 0;
1257 if (len <= 0) /* if nothing being freed */
1258 return error;
1259 rt = XFS_IS_REALTIME_INODE(ip);
1260 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1261 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1262
1263 /* wait for the completion of any pending DIOs */
1264 inode_dio_wait(VFS_I(ip));
1265
1266 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1267 ioffset = offset & ~(rounding - 1);
1268 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1269 ioffset, -1);
1270 if (error)
1271 goto out;
1272 truncate_pagecache_range(VFS_I(ip), ioffset, -1);
1273
1274 /*
1275 * Need to zero the stuff we're not freeing, on disk.
1276 * If it's a realtime file & can't use unwritten extents then we
1277 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1278 * will take care of it for us.
1279 */
1280 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1281 nimap = 1;
1282 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1283 &imap, &nimap, 0);
1284 if (error)
1285 goto out;
1286 ASSERT(nimap == 0 || nimap == 1);
1287 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1288 xfs_daddr_t block;
1289
1290 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1291 block = imap.br_startblock;
1292 mod = do_div(block, mp->m_sb.sb_rextsize);
1293 if (mod)
1294 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1295 }
1296 nimap = 1;
1297 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1298 &imap, &nimap, 0);
1299 if (error)
1300 goto out;
1301 ASSERT(nimap == 0 || nimap == 1);
1302 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1303 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1304 mod++;
1305 if (mod && (mod != mp->m_sb.sb_rextsize))
1306 endoffset_fsb -= mod;
1307 }
1308 }
1309 if ((done = (endoffset_fsb <= startoffset_fsb)))
1310 /*
1311 * One contiguous piece to clear
1312 */
1313 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1314 else {
1315 /*
1316 * Some full blocks, possibly two pieces to clear
1317 */
1318 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1319 error = xfs_zero_remaining_bytes(ip, offset,
1320 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1321 if (!error &&
1322 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1323 error = xfs_zero_remaining_bytes(ip,
1324 XFS_FSB_TO_B(mp, endoffset_fsb),
1325 offset + len - 1);
1326 }
1327
1328 /*
1329 * free file space until done or until there is an error
1330 */
1331 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1332 while (!error && !done) {
1333
1334 /*
1335 * allocate and setup the transaction. Allow this
1336 * transaction to dip into the reserve blocks to ensure
1337 * the freeing of the space succeeds at ENOSPC.
1338 */
1339 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1340 tp->t_flags |= XFS_TRANS_RESERVE;
1341 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1342
1343 /*
1344 * check for running out of space
1345 */
1346 if (error) {
1347 /*
1348 * Free the transaction structure.
1349 */
1350 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1351 xfs_trans_cancel(tp, 0);
1352 break;
1353 }
1354 xfs_ilock(ip, XFS_ILOCK_EXCL);
1355 error = xfs_trans_reserve_quota(tp, mp,
1356 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1357 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1358 if (error)
1359 goto error1;
1360
1361 xfs_trans_ijoin(tp, ip, 0);
1362
1363 /*
1364 * issue the bunmapi() call to free the blocks
1365 */
1366 xfs_bmap_init(&free_list, &firstfsb);
1367 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1368 endoffset_fsb - startoffset_fsb,
1369 0, 2, &firstfsb, &free_list, &done);
1370 if (error) {
1371 goto error0;
1372 }
1373
1374 /*
1375 * complete the transaction
1376 */
1377 error = xfs_bmap_finish(&tp, &free_list, &committed);
1378 if (error) {
1379 goto error0;
1380 }
1381
1382 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1383 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1384 }
1385
1386 out:
1387 return error;
1388
1389 error0:
1390 xfs_bmap_cancel(&free_list);
1391 error1:
1392 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1393 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1394 goto out;
1395 }
1396
1397
1398 int
1399 xfs_zero_file_space(
1400 struct xfs_inode *ip,
1401 xfs_off_t offset,
1402 xfs_off_t len)
1403 {
1404 struct xfs_mount *mp = ip->i_mount;
1405 uint granularity;
1406 xfs_off_t start_boundary;
1407 xfs_off_t end_boundary;
1408 int error;
1409
1410 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1411
1412 /*
1413 * Round the range of extents we are going to convert inwards. If the
1414 * offset is aligned, then it doesn't get changed so we zero from the
1415 * start of the block offset points to.
1416 */
1417 start_boundary = round_up(offset, granularity);
1418 end_boundary = round_down(offset + len, granularity);
1419
1420 ASSERT(start_boundary >= offset);
1421 ASSERT(end_boundary <= offset + len);
1422
1423 if (start_boundary < end_boundary - 1) {
1424 /* punch out the page cache over the conversion range */
1425 truncate_pagecache_range(VFS_I(ip), start_boundary,
1426 end_boundary - 1);
1427 /* convert the blocks */
1428 error = xfs_alloc_file_space(ip, start_boundary,
1429 end_boundary - start_boundary - 1,
1430 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1431 if (error)
1432 goto out;
1433
1434 /* We've handled the interior of the range, now for the edges */
1435 if (start_boundary != offset) {
1436 error = xfs_iozero(ip, offset, start_boundary - offset);
1437 if (error)
1438 goto out;
1439 }
1440
1441 if (end_boundary != offset + len)
1442 error = xfs_iozero(ip, end_boundary,
1443 offset + len - end_boundary);
1444
1445 } else {
1446 /*
1447 * It's either a sub-granularity range or the range spanned lies
1448 * partially across two adjacent blocks.
1449 */
1450 error = xfs_iozero(ip, offset, len);
1451 }
1452
1453 out:
1454 return error;
1455
1456 }
1457
1458 /*
1459 * We need to check that the format of the data fork in the temporary inode is
1460 * valid for the target inode before doing the swap. This is not a problem with
1461 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1462 * data fork depending on the space the attribute fork is taking so we can get
1463 * invalid formats on the target inode.
1464 *
1465 * E.g. target has space for 7 extents in extent format, temp inode only has
1466 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1467 * btree, but when swapped it needs to be in extent format. Hence we can't just
1468 * blindly swap data forks on attr2 filesystems.
1469 *
1470 * Note that we check the swap in both directions so that we don't end up with
1471 * a corrupt temporary inode, either.
1472 *
1473 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1474 * inode will prevent this situation from occurring, so all we do here is
1475 * reject and log the attempt. basically we are putting the responsibility on
1476 * userspace to get this right.
1477 */
1478 static int
1479 xfs_swap_extents_check_format(
1480 xfs_inode_t *ip, /* target inode */
1481 xfs_inode_t *tip) /* tmp inode */
1482 {
1483
1484 /* Should never get a local format */
1485 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1486 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1487 return EINVAL;
1488
1489 /*
1490 * if the target inode has less extents that then temporary inode then
1491 * why did userspace call us?
1492 */
1493 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1494 return EINVAL;
1495
1496 /*
1497 * if the target inode is in extent form and the temp inode is in btree
1498 * form then we will end up with the target inode in the wrong format
1499 * as we already know there are less extents in the temp inode.
1500 */
1501 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1502 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1503 return EINVAL;
1504
1505 /* Check temp in extent form to max in target */
1506 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1507 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1508 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1509 return EINVAL;
1510
1511 /* Check target in extent form to max in temp */
1512 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1513 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1514 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1515 return EINVAL;
1516
1517 /*
1518 * If we are in a btree format, check that the temp root block will fit
1519 * in the target and that it has enough extents to be in btree format
1520 * in the target.
1521 *
1522 * Note that we have to be careful to allow btree->extent conversions
1523 * (a common defrag case) which will occur when the temp inode is in
1524 * extent format...
1525 */
1526 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1527 if (XFS_IFORK_BOFF(ip) &&
1528 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1529 return EINVAL;
1530 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1531 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1532 return EINVAL;
1533 }
1534
1535 /* Reciprocal target->temp btree format checks */
1536 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1537 if (XFS_IFORK_BOFF(tip) &&
1538 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1539 return EINVAL;
1540 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1541 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1542 return EINVAL;
1543 }
1544
1545 return 0;
1546 }
1547
1548 int
1549 xfs_swap_extents(
1550 xfs_inode_t *ip, /* target inode */
1551 xfs_inode_t *tip, /* tmp inode */
1552 xfs_swapext_t *sxp)
1553 {
1554 xfs_mount_t *mp = ip->i_mount;
1555 xfs_trans_t *tp;
1556 xfs_bstat_t *sbp = &sxp->sx_stat;
1557 xfs_ifork_t *tempifp, *ifp, *tifp;
1558 int src_log_flags, target_log_flags;
1559 int error = 0;
1560 int aforkblks = 0;
1561 int taforkblks = 0;
1562 __uint64_t tmp;
1563
1564 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1565 if (!tempifp) {
1566 error = XFS_ERROR(ENOMEM);
1567 goto out;
1568 }
1569
1570 /*
1571 * we have to do two separate lock calls here to keep lockdep
1572 * happy. If we try to get all the locks in one call, lock will
1573 * report false positives when we drop the ILOCK and regain them
1574 * below.
1575 */
1576 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1577 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1578
1579 /* Verify that both files have the same format */
1580 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1581 error = XFS_ERROR(EINVAL);
1582 goto out_unlock;
1583 }
1584
1585 /* Verify both files are either real-time or non-realtime */
1586 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1587 error = XFS_ERROR(EINVAL);
1588 goto out_unlock;
1589 }
1590
1591 error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
1592 if (error)
1593 goto out_unlock;
1594 truncate_pagecache_range(VFS_I(tip), 0, -1);
1595
1596 /* Verify O_DIRECT for ftmp */
1597 if (VN_CACHED(VFS_I(tip)) != 0) {
1598 error = XFS_ERROR(EINVAL);
1599 goto out_unlock;
1600 }
1601
1602 /* Verify all data are being swapped */
1603 if (sxp->sx_offset != 0 ||
1604 sxp->sx_length != ip->i_d.di_size ||
1605 sxp->sx_length != tip->i_d.di_size) {
1606 error = XFS_ERROR(EFAULT);
1607 goto out_unlock;
1608 }
1609
1610 trace_xfs_swap_extent_before(ip, 0);
1611 trace_xfs_swap_extent_before(tip, 1);
1612
1613 /* check inode formats now that data is flushed */
1614 error = xfs_swap_extents_check_format(ip, tip);
1615 if (error) {
1616 xfs_notice(mp,
1617 "%s: inode 0x%llx format is incompatible for exchanging.",
1618 __func__, ip->i_ino);
1619 goto out_unlock;
1620 }
1621
1622 /*
1623 * Compare the current change & modify times with that
1624 * passed in. If they differ, we abort this swap.
1625 * This is the mechanism used to ensure the calling
1626 * process that the file was not changed out from
1627 * under it.
1628 */
1629 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1630 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1631 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1632 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1633 error = XFS_ERROR(EBUSY);
1634 goto out_unlock;
1635 }
1636
1637 /* We need to fail if the file is memory mapped. Once we have tossed
1638 * all existing pages, the page fault will have no option
1639 * but to go to the filesystem for pages. By making the page fault call
1640 * vop_read (or write in the case of autogrow) they block on the iolock
1641 * until we have switched the extents.
1642 */
1643 if (VN_MAPPED(VFS_I(ip))) {
1644 error = XFS_ERROR(EBUSY);
1645 goto out_unlock;
1646 }
1647
1648 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1649 xfs_iunlock(tip, XFS_ILOCK_EXCL);
1650
1651 /*
1652 * There is a race condition here since we gave up the
1653 * ilock. However, the data fork will not change since
1654 * we have the iolock (locked for truncation too) so we
1655 * are safe. We don't really care if non-io related
1656 * fields change.
1657 */
1658 truncate_pagecache_range(VFS_I(ip), 0, -1);
1659
1660 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1661 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1662 if (error) {
1663 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1664 xfs_iunlock(tip, XFS_IOLOCK_EXCL);
1665 xfs_trans_cancel(tp, 0);
1666 goto out;
1667 }
1668 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1669
1670 /*
1671 * Count the number of extended attribute blocks
1672 */
1673 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1674 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1675 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1676 if (error)
1677 goto out_trans_cancel;
1678 }
1679 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1680 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1681 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1682 &taforkblks);
1683 if (error)
1684 goto out_trans_cancel;
1685 }
1686
1687 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1688 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1689
1690 /*
1691 * Before we've swapped the forks, lets set the owners of the forks
1692 * appropriately. We have to do this as we are demand paging the btree
1693 * buffers, and so the validation done on read will expect the owner
1694 * field to be correctly set. Once we change the owners, we can swap the
1695 * inode forks.
1696 *
1697 * Note the trickiness in setting the log flags - we set the owner log
1698 * flag on the opposite inode (i.e. the inode we are setting the new
1699 * owner to be) because once we swap the forks and log that, log
1700 * recovery is going to see the fork as owned by the swapped inode,
1701 * not the pre-swapped inodes.
1702 */
1703 src_log_flags = XFS_ILOG_CORE;
1704 target_log_flags = XFS_ILOG_CORE;
1705 if (ip->i_d.di_version == 3 &&
1706 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1707 target_log_flags |= XFS_ILOG_DOWNER;
1708 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1709 tip->i_ino, NULL);
1710 if (error)
1711 goto out_trans_cancel;
1712 }
1713
1714 if (tip->i_d.di_version == 3 &&
1715 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1716 src_log_flags |= XFS_ILOG_DOWNER;
1717 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1718 ip->i_ino, NULL);
1719 if (error)
1720 goto out_trans_cancel;
1721 }
1722
1723 /*
1724 * Swap the data forks of the inodes
1725 */
1726 ifp = &ip->i_df;
1727 tifp = &tip->i_df;
1728 *tempifp = *ifp; /* struct copy */
1729 *ifp = *tifp; /* struct copy */
1730 *tifp = *tempifp; /* struct copy */
1731
1732 /*
1733 * Fix the on-disk inode values
1734 */
1735 tmp = (__uint64_t)ip->i_d.di_nblocks;
1736 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1737 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1738
1739 tmp = (__uint64_t) ip->i_d.di_nextents;
1740 ip->i_d.di_nextents = tip->i_d.di_nextents;
1741 tip->i_d.di_nextents = tmp;
1742
1743 tmp = (__uint64_t) ip->i_d.di_format;
1744 ip->i_d.di_format = tip->i_d.di_format;
1745 tip->i_d.di_format = tmp;
1746
1747 /*
1748 * The extents in the source inode could still contain speculative
1749 * preallocation beyond EOF (e.g. the file is open but not modified
1750 * while defrag is in progress). In that case, we need to copy over the
1751 * number of delalloc blocks the data fork in the source inode is
1752 * tracking beyond EOF so that when the fork is truncated away when the
1753 * temporary inode is unlinked we don't underrun the i_delayed_blks
1754 * counter on that inode.
1755 */
1756 ASSERT(tip->i_delayed_blks == 0);
1757 tip->i_delayed_blks = ip->i_delayed_blks;
1758 ip->i_delayed_blks = 0;
1759
1760 switch (ip->i_d.di_format) {
1761 case XFS_DINODE_FMT_EXTENTS:
1762 /* If the extents fit in the inode, fix the
1763 * pointer. Otherwise it's already NULL or
1764 * pointing to the extent.
1765 */
1766 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1767 ifp->if_u1.if_extents =
1768 ifp->if_u2.if_inline_ext;
1769 }
1770 src_log_flags |= XFS_ILOG_DEXT;
1771 break;
1772 case XFS_DINODE_FMT_BTREE:
1773 ASSERT(ip->i_d.di_version < 3 ||
1774 (src_log_flags & XFS_ILOG_DOWNER));
1775 src_log_flags |= XFS_ILOG_DBROOT;
1776 break;
1777 }
1778
1779 switch (tip->i_d.di_format) {
1780 case XFS_DINODE_FMT_EXTENTS:
1781 /* If the extents fit in the inode, fix the
1782 * pointer. Otherwise it's already NULL or
1783 * pointing to the extent.
1784 */
1785 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1786 tifp->if_u1.if_extents =
1787 tifp->if_u2.if_inline_ext;
1788 }
1789 target_log_flags |= XFS_ILOG_DEXT;
1790 break;
1791 case XFS_DINODE_FMT_BTREE:
1792 target_log_flags |= XFS_ILOG_DBROOT;
1793 ASSERT(tip->i_d.di_version < 3 ||
1794 (target_log_flags & XFS_ILOG_DOWNER));
1795 break;
1796 }
1797
1798 xfs_trans_log_inode(tp, ip, src_log_flags);
1799 xfs_trans_log_inode(tp, tip, target_log_flags);
1800
1801 /*
1802 * If this is a synchronous mount, make sure that the
1803 * transaction goes to disk before returning to the user.
1804 */
1805 if (mp->m_flags & XFS_MOUNT_WSYNC)
1806 xfs_trans_set_sync(tp);
1807
1808 error = xfs_trans_commit(tp, 0);
1809
1810 trace_xfs_swap_extent_after(ip, 0);
1811 trace_xfs_swap_extent_after(tip, 1);
1812 out:
1813 kmem_free(tempifp);
1814 return error;
1815
1816 out_unlock:
1817 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1818 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1819 goto out;
1820
1821 out_trans_cancel:
1822 xfs_trans_cancel(tp, 0);
1823 goto out_unlock;
1824 }