]>
Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
3e57ecf6 | 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
7b718769 | 4 | * All Rights Reserved. |
1da177e4 | 5 | */ |
f0e28280 | 6 | #include <linux/iversion.h> |
40ebd81d | 7 | |
1da177e4 | 8 | #include "xfs.h" |
a844f451 | 9 | #include "xfs_fs.h" |
70a9883c | 10 | #include "xfs_shared.h" |
239880ef DC |
11 | #include "xfs_format.h" |
12 | #include "xfs_log_format.h" | |
13 | #include "xfs_trans_resv.h" | |
1da177e4 | 14 | #include "xfs_sb.h" |
1da177e4 | 15 | #include "xfs_mount.h" |
3ab78df2 | 16 | #include "xfs_defer.h" |
a4fbe6ab | 17 | #include "xfs_inode.h" |
c24b5dfa | 18 | #include "xfs_dir2.h" |
c24b5dfa | 19 | #include "xfs_attr.h" |
239880ef DC |
20 | #include "xfs_trans_space.h" |
21 | #include "xfs_trans.h" | |
1da177e4 | 22 | #include "xfs_buf_item.h" |
a844f451 | 23 | #include "xfs_inode_item.h" |
a844f451 NS |
24 | #include "xfs_ialloc.h" |
25 | #include "xfs_bmap.h" | |
68988114 | 26 | #include "xfs_bmap_util.h" |
e9e899a2 | 27 | #include "xfs_errortag.h" |
1da177e4 | 28 | #include "xfs_error.h" |
1da177e4 | 29 | #include "xfs_quota.h" |
2a82b8be | 30 | #include "xfs_filestream.h" |
0b1b213f | 31 | #include "xfs_trace.h" |
33479e05 | 32 | #include "xfs_icache.h" |
c24b5dfa | 33 | #include "xfs_symlink.h" |
239880ef DC |
34 | #include "xfs_trans_priv.h" |
35 | #include "xfs_log.h" | |
a4fbe6ab | 36 | #include "xfs_bmap_btree.h" |
aa8968f2 | 37 | #include "xfs_reflink.h" |
1da177e4 | 38 | |
1da177e4 | 39 | kmem_zone_t *xfs_inode_zone; |
1da177e4 LT |
40 | |
41 | /* | |
8f04c47a | 42 | * Used in xfs_itruncate_extents(). This is the maximum number of extents |
1da177e4 LT |
43 | * freed from a file in a single transaction. |
44 | */ | |
45 | #define XFS_ITRUNC_MAX_EXTENTS 2 | |
46 | ||
54d7b5c1 DC |
47 | STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); |
48 | STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *); | |
ab297431 | 49 | |
2a0ec1d9 DC |
50 | /* |
51 | * helper function to extract extent size hint from inode | |
52 | */ | |
53 | xfs_extlen_t | |
54 | xfs_get_extsz_hint( | |
55 | struct xfs_inode *ip) | |
56 | { | |
bdb2ed2d CH |
57 | /* |
58 | * No point in aligning allocations if we need to COW to actually | |
59 | * write to them. | |
60 | */ | |
61 | if (xfs_is_always_cow_inode(ip)) | |
62 | return 0; | |
2a0ec1d9 DC |
63 | if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) |
64 | return ip->i_d.di_extsize; | |
65 | if (XFS_IS_REALTIME_INODE(ip)) | |
66 | return ip->i_mount->m_sb.sb_rextsize; | |
67 | return 0; | |
68 | } | |
69 | ||
f7ca3522 DW |
70 | /* |
71 | * Helper function to extract CoW extent size hint from inode. | |
72 | * Between the extent size hint and the CoW extent size hint, we | |
e153aa79 DW |
73 | * return the greater of the two. If the value is zero (automatic), |
74 | * use the default size. | |
f7ca3522 DW |
75 | */ |
76 | xfs_extlen_t | |
77 | xfs_get_cowextsz_hint( | |
78 | struct xfs_inode *ip) | |
79 | { | |
80 | xfs_extlen_t a, b; | |
81 | ||
82 | a = 0; | |
83 | if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) | |
84 | a = ip->i_d.di_cowextsize; | |
85 | b = xfs_get_extsz_hint(ip); | |
86 | ||
e153aa79 DW |
87 | a = max(a, b); |
88 | if (a == 0) | |
89 | return XFS_DEFAULT_COWEXTSZ_HINT; | |
90 | return a; | |
f7ca3522 DW |
91 | } |
92 | ||
fa96acad | 93 | /* |
efa70be1 CH |
94 | * These two are wrapper routines around the xfs_ilock() routine used to |
95 | * centralize some grungy code. They are used in places that wish to lock the | |
96 | * inode solely for reading the extents. The reason these places can't just | |
97 | * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to | |
98 | * bringing in of the extents from disk for a file in b-tree format. If the | |
99 | * inode is in b-tree format, then we need to lock the inode exclusively until | |
100 | * the extents are read in. Locking it exclusively all the time would limit | |
101 | * our parallelism unnecessarily, though. What we do instead is check to see | |
102 | * if the extents have been read in yet, and only lock the inode exclusively | |
103 | * if they have not. | |
fa96acad | 104 | * |
efa70be1 | 105 | * The functions return a value which should be given to the corresponding |
01f4f327 | 106 | * xfs_iunlock() call. |
fa96acad DC |
107 | */ |
108 | uint | |
309ecac8 CH |
109 | xfs_ilock_data_map_shared( |
110 | struct xfs_inode *ip) | |
fa96acad | 111 | { |
309ecac8 | 112 | uint lock_mode = XFS_ILOCK_SHARED; |
fa96acad | 113 | |
f7e67b20 | 114 | if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE && |
309ecac8 | 115 | (ip->i_df.if_flags & XFS_IFEXTENTS) == 0) |
fa96acad | 116 | lock_mode = XFS_ILOCK_EXCL; |
fa96acad | 117 | xfs_ilock(ip, lock_mode); |
fa96acad DC |
118 | return lock_mode; |
119 | } | |
120 | ||
efa70be1 CH |
121 | uint |
122 | xfs_ilock_attr_map_shared( | |
123 | struct xfs_inode *ip) | |
fa96acad | 124 | { |
efa70be1 CH |
125 | uint lock_mode = XFS_ILOCK_SHARED; |
126 | ||
f7e67b20 CH |
127 | if (ip->i_afp && |
128 | ip->i_afp->if_format == XFS_DINODE_FMT_BTREE && | |
efa70be1 CH |
129 | (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0) |
130 | lock_mode = XFS_ILOCK_EXCL; | |
131 | xfs_ilock(ip, lock_mode); | |
132 | return lock_mode; | |
fa96acad DC |
133 | } |
134 | ||
135 | /* | |
65523218 CH |
136 | * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 |
137 | * multi-reader locks: i_mmap_lock and the i_lock. This routine allows | |
138 | * various combinations of the locks to be obtained. | |
fa96acad | 139 | * |
653c60b6 DC |
140 | * The 3 locks should always be ordered so that the IO lock is obtained first, |
141 | * the mmap lock second and the ilock last in order to prevent deadlock. | |
fa96acad | 142 | * |
653c60b6 DC |
143 | * Basic locking order: |
144 | * | |
65523218 | 145 | * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock |
653c60b6 | 146 | * |
c1e8d7c6 | 147 | * mmap_lock locking order: |
653c60b6 | 148 | * |
c1e8d7c6 ML |
149 | * i_rwsem -> page lock -> mmap_lock |
150 | * mmap_lock -> i_mmap_lock -> page_lock | |
653c60b6 | 151 | * |
c1e8d7c6 | 152 | * The difference in mmap_lock locking order mean that we cannot hold the |
653c60b6 | 153 | * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can |
c1e8d7c6 | 154 | * fault in pages during copy in/out (for buffered IO) or require the mmap_lock |
653c60b6 | 155 | * in get_user_pages() to map the user pages into the kernel address space for |
65523218 | 156 | * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because |
c1e8d7c6 | 157 | * page faults already hold the mmap_lock. |
653c60b6 DC |
158 | * |
159 | * Hence to serialise fully against both syscall and mmap based IO, we need to | |
65523218 | 160 | * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both |
653c60b6 DC |
161 | * taken in places where we need to invalidate the page cache in a race |
162 | * free manner (e.g. truncate, hole punch and other extent manipulation | |
163 | * functions). | |
fa96acad DC |
164 | */ |
165 | void | |
166 | xfs_ilock( | |
167 | xfs_inode_t *ip, | |
168 | uint lock_flags) | |
169 | { | |
170 | trace_xfs_ilock(ip, lock_flags, _RET_IP_); | |
171 | ||
172 | /* | |
173 | * You can't set both SHARED and EXCL for the same lock, | |
174 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | |
175 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | |
176 | */ | |
177 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | |
178 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | |
653c60b6 DC |
179 | ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != |
180 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | |
fa96acad DC |
181 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
182 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | |
0952c818 | 183 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
fa96acad | 184 | |
65523218 CH |
185 | if (lock_flags & XFS_IOLOCK_EXCL) { |
186 | down_write_nested(&VFS_I(ip)->i_rwsem, | |
187 | XFS_IOLOCK_DEP(lock_flags)); | |
188 | } else if (lock_flags & XFS_IOLOCK_SHARED) { | |
189 | down_read_nested(&VFS_I(ip)->i_rwsem, | |
190 | XFS_IOLOCK_DEP(lock_flags)); | |
191 | } | |
fa96acad | 192 | |
653c60b6 DC |
193 | if (lock_flags & XFS_MMAPLOCK_EXCL) |
194 | mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); | |
195 | else if (lock_flags & XFS_MMAPLOCK_SHARED) | |
196 | mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags)); | |
197 | ||
fa96acad DC |
198 | if (lock_flags & XFS_ILOCK_EXCL) |
199 | mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); | |
200 | else if (lock_flags & XFS_ILOCK_SHARED) | |
201 | mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); | |
202 | } | |
203 | ||
204 | /* | |
205 | * This is just like xfs_ilock(), except that the caller | |
206 | * is guaranteed not to sleep. It returns 1 if it gets | |
207 | * the requested locks and 0 otherwise. If the IO lock is | |
208 | * obtained but the inode lock cannot be, then the IO lock | |
209 | * is dropped before returning. | |
210 | * | |
211 | * ip -- the inode being locked | |
212 | * lock_flags -- this parameter indicates the inode's locks to be | |
213 | * to be locked. See the comment for xfs_ilock() for a list | |
214 | * of valid values. | |
215 | */ | |
216 | int | |
217 | xfs_ilock_nowait( | |
218 | xfs_inode_t *ip, | |
219 | uint lock_flags) | |
220 | { | |
221 | trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); | |
222 | ||
223 | /* | |
224 | * You can't set both SHARED and EXCL for the same lock, | |
225 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | |
226 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | |
227 | */ | |
228 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | |
229 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | |
653c60b6 DC |
230 | ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != |
231 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | |
fa96acad DC |
232 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
233 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | |
0952c818 | 234 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
fa96acad DC |
235 | |
236 | if (lock_flags & XFS_IOLOCK_EXCL) { | |
65523218 | 237 | if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) |
fa96acad DC |
238 | goto out; |
239 | } else if (lock_flags & XFS_IOLOCK_SHARED) { | |
65523218 | 240 | if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) |
fa96acad DC |
241 | goto out; |
242 | } | |
653c60b6 DC |
243 | |
244 | if (lock_flags & XFS_MMAPLOCK_EXCL) { | |
245 | if (!mrtryupdate(&ip->i_mmaplock)) | |
246 | goto out_undo_iolock; | |
247 | } else if (lock_flags & XFS_MMAPLOCK_SHARED) { | |
248 | if (!mrtryaccess(&ip->i_mmaplock)) | |
249 | goto out_undo_iolock; | |
250 | } | |
251 | ||
fa96acad DC |
252 | if (lock_flags & XFS_ILOCK_EXCL) { |
253 | if (!mrtryupdate(&ip->i_lock)) | |
653c60b6 | 254 | goto out_undo_mmaplock; |
fa96acad DC |
255 | } else if (lock_flags & XFS_ILOCK_SHARED) { |
256 | if (!mrtryaccess(&ip->i_lock)) | |
653c60b6 | 257 | goto out_undo_mmaplock; |
fa96acad DC |
258 | } |
259 | return 1; | |
260 | ||
653c60b6 DC |
261 | out_undo_mmaplock: |
262 | if (lock_flags & XFS_MMAPLOCK_EXCL) | |
263 | mrunlock_excl(&ip->i_mmaplock); | |
264 | else if (lock_flags & XFS_MMAPLOCK_SHARED) | |
265 | mrunlock_shared(&ip->i_mmaplock); | |
266 | out_undo_iolock: | |
fa96acad | 267 | if (lock_flags & XFS_IOLOCK_EXCL) |
65523218 | 268 | up_write(&VFS_I(ip)->i_rwsem); |
fa96acad | 269 | else if (lock_flags & XFS_IOLOCK_SHARED) |
65523218 | 270 | up_read(&VFS_I(ip)->i_rwsem); |
653c60b6 | 271 | out: |
fa96acad DC |
272 | return 0; |
273 | } | |
274 | ||
275 | /* | |
276 | * xfs_iunlock() is used to drop the inode locks acquired with | |
277 | * xfs_ilock() and xfs_ilock_nowait(). The caller must pass | |
278 | * in the flags given to xfs_ilock() or xfs_ilock_nowait() so | |
279 | * that we know which locks to drop. | |
280 | * | |
281 | * ip -- the inode being unlocked | |
282 | * lock_flags -- this parameter indicates the inode's locks to be | |
283 | * to be unlocked. See the comment for xfs_ilock() for a list | |
284 | * of valid values for this parameter. | |
285 | * | |
286 | */ | |
287 | void | |
288 | xfs_iunlock( | |
289 | xfs_inode_t *ip, | |
290 | uint lock_flags) | |
291 | { | |
292 | /* | |
293 | * You can't set both SHARED and EXCL for the same lock, | |
294 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | |
295 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | |
296 | */ | |
297 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | |
298 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | |
653c60b6 DC |
299 | ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != |
300 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | |
fa96acad DC |
301 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
302 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | |
0952c818 | 303 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
fa96acad DC |
304 | ASSERT(lock_flags != 0); |
305 | ||
306 | if (lock_flags & XFS_IOLOCK_EXCL) | |
65523218 | 307 | up_write(&VFS_I(ip)->i_rwsem); |
fa96acad | 308 | else if (lock_flags & XFS_IOLOCK_SHARED) |
65523218 | 309 | up_read(&VFS_I(ip)->i_rwsem); |
fa96acad | 310 | |
653c60b6 DC |
311 | if (lock_flags & XFS_MMAPLOCK_EXCL) |
312 | mrunlock_excl(&ip->i_mmaplock); | |
313 | else if (lock_flags & XFS_MMAPLOCK_SHARED) | |
314 | mrunlock_shared(&ip->i_mmaplock); | |
315 | ||
fa96acad DC |
316 | if (lock_flags & XFS_ILOCK_EXCL) |
317 | mrunlock_excl(&ip->i_lock); | |
318 | else if (lock_flags & XFS_ILOCK_SHARED) | |
319 | mrunlock_shared(&ip->i_lock); | |
320 | ||
321 | trace_xfs_iunlock(ip, lock_flags, _RET_IP_); | |
322 | } | |
323 | ||
324 | /* | |
325 | * give up write locks. the i/o lock cannot be held nested | |
326 | * if it is being demoted. | |
327 | */ | |
328 | void | |
329 | xfs_ilock_demote( | |
330 | xfs_inode_t *ip, | |
331 | uint lock_flags) | |
332 | { | |
653c60b6 DC |
333 | ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); |
334 | ASSERT((lock_flags & | |
335 | ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); | |
fa96acad DC |
336 | |
337 | if (lock_flags & XFS_ILOCK_EXCL) | |
338 | mrdemote(&ip->i_lock); | |
653c60b6 DC |
339 | if (lock_flags & XFS_MMAPLOCK_EXCL) |
340 | mrdemote(&ip->i_mmaplock); | |
fa96acad | 341 | if (lock_flags & XFS_IOLOCK_EXCL) |
65523218 | 342 | downgrade_write(&VFS_I(ip)->i_rwsem); |
fa96acad DC |
343 | |
344 | trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); | |
345 | } | |
346 | ||
742ae1e3 | 347 | #if defined(DEBUG) || defined(XFS_WARN) |
fa96acad DC |
348 | int |
349 | xfs_isilocked( | |
350 | xfs_inode_t *ip, | |
351 | uint lock_flags) | |
352 | { | |
353 | if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { | |
354 | if (!(lock_flags & XFS_ILOCK_SHARED)) | |
355 | return !!ip->i_lock.mr_writer; | |
356 | return rwsem_is_locked(&ip->i_lock.mr_lock); | |
357 | } | |
358 | ||
653c60b6 DC |
359 | if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { |
360 | if (!(lock_flags & XFS_MMAPLOCK_SHARED)) | |
361 | return !!ip->i_mmaplock.mr_writer; | |
362 | return rwsem_is_locked(&ip->i_mmaplock.mr_lock); | |
363 | } | |
364 | ||
fa96acad DC |
365 | if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { |
366 | if (!(lock_flags & XFS_IOLOCK_SHARED)) | |
65523218 CH |
367 | return !debug_locks || |
368 | lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0); | |
369 | return rwsem_is_locked(&VFS_I(ip)->i_rwsem); | |
fa96acad DC |
370 | } |
371 | ||
372 | ASSERT(0); | |
373 | return 0; | |
374 | } | |
375 | #endif | |
376 | ||
b6a9947e DC |
377 | /* |
378 | * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when | |
379 | * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined | |
380 | * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build | |
381 | * errors and warnings. | |
382 | */ | |
383 | #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) | |
3403ccc0 DC |
384 | static bool |
385 | xfs_lockdep_subclass_ok( | |
386 | int subclass) | |
387 | { | |
388 | return subclass < MAX_LOCKDEP_SUBCLASSES; | |
389 | } | |
390 | #else | |
391 | #define xfs_lockdep_subclass_ok(subclass) (true) | |
392 | #endif | |
393 | ||
c24b5dfa | 394 | /* |
653c60b6 | 395 | * Bump the subclass so xfs_lock_inodes() acquires each lock with a different |
0952c818 DC |
396 | * value. This can be called for any type of inode lock combination, including |
397 | * parent locking. Care must be taken to ensure we don't overrun the subclass | |
398 | * storage fields in the class mask we build. | |
c24b5dfa DC |
399 | */ |
400 | static inline int | |
401 | xfs_lock_inumorder(int lock_mode, int subclass) | |
402 | { | |
0952c818 DC |
403 | int class = 0; |
404 | ||
405 | ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | | |
406 | XFS_ILOCK_RTSUM))); | |
3403ccc0 | 407 | ASSERT(xfs_lockdep_subclass_ok(subclass)); |
0952c818 | 408 | |
653c60b6 | 409 | if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { |
0952c818 | 410 | ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); |
0952c818 | 411 | class += subclass << XFS_IOLOCK_SHIFT; |
653c60b6 DC |
412 | } |
413 | ||
414 | if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { | |
0952c818 DC |
415 | ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); |
416 | class += subclass << XFS_MMAPLOCK_SHIFT; | |
653c60b6 DC |
417 | } |
418 | ||
0952c818 DC |
419 | if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { |
420 | ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); | |
421 | class += subclass << XFS_ILOCK_SHIFT; | |
422 | } | |
c24b5dfa | 423 | |
0952c818 | 424 | return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; |
c24b5dfa DC |
425 | } |
426 | ||
427 | /* | |
95afcf5c DC |
428 | * The following routine will lock n inodes in exclusive mode. We assume the |
429 | * caller calls us with the inodes in i_ino order. | |
c24b5dfa | 430 | * |
95afcf5c DC |
431 | * We need to detect deadlock where an inode that we lock is in the AIL and we |
432 | * start waiting for another inode that is locked by a thread in a long running | |
433 | * transaction (such as truncate). This can result in deadlock since the long | |
434 | * running trans might need to wait for the inode we just locked in order to | |
435 | * push the tail and free space in the log. | |
0952c818 DC |
436 | * |
437 | * xfs_lock_inodes() can only be used to lock one type of lock at a time - | |
438 | * the iolock, the mmaplock or the ilock, but not more than one at a time. If we | |
439 | * lock more than one at a time, lockdep will report false positives saying we | |
440 | * have violated locking orders. | |
c24b5dfa | 441 | */ |
0d5a75e9 | 442 | static void |
c24b5dfa | 443 | xfs_lock_inodes( |
efe2330f CH |
444 | struct xfs_inode **ips, |
445 | int inodes, | |
446 | uint lock_mode) | |
c24b5dfa | 447 | { |
efe2330f CH |
448 | int attempts = 0, i, j, try_lock; |
449 | struct xfs_log_item *lp; | |
c24b5dfa | 450 | |
0952c818 DC |
451 | /* |
452 | * Currently supports between 2 and 5 inodes with exclusive locking. We | |
453 | * support an arbitrary depth of locking here, but absolute limits on | |
b63da6c8 | 454 | * inodes depend on the type of locking and the limits placed by |
0952c818 DC |
455 | * lockdep annotations in xfs_lock_inumorder. These are all checked by |
456 | * the asserts. | |
457 | */ | |
95afcf5c | 458 | ASSERT(ips && inodes >= 2 && inodes <= 5); |
0952c818 DC |
459 | ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | |
460 | XFS_ILOCK_EXCL)); | |
461 | ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | | |
462 | XFS_ILOCK_SHARED))); | |
0952c818 DC |
463 | ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || |
464 | inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); | |
465 | ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || | |
466 | inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); | |
467 | ||
468 | if (lock_mode & XFS_IOLOCK_EXCL) { | |
469 | ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); | |
470 | } else if (lock_mode & XFS_MMAPLOCK_EXCL) | |
471 | ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); | |
c24b5dfa DC |
472 | |
473 | try_lock = 0; | |
474 | i = 0; | |
c24b5dfa DC |
475 | again: |
476 | for (; i < inodes; i++) { | |
477 | ASSERT(ips[i]); | |
478 | ||
95afcf5c | 479 | if (i && (ips[i] == ips[i - 1])) /* Already locked */ |
c24b5dfa DC |
480 | continue; |
481 | ||
482 | /* | |
95afcf5c DC |
483 | * If try_lock is not set yet, make sure all locked inodes are |
484 | * not in the AIL. If any are, set try_lock to be used later. | |
c24b5dfa | 485 | */ |
c24b5dfa DC |
486 | if (!try_lock) { |
487 | for (j = (i - 1); j >= 0 && !try_lock; j--) { | |
b3b14aac | 488 | lp = &ips[j]->i_itemp->ili_item; |
22525c17 | 489 | if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) |
c24b5dfa | 490 | try_lock++; |
c24b5dfa DC |
491 | } |
492 | } | |
493 | ||
494 | /* | |
495 | * If any of the previous locks we have locked is in the AIL, | |
496 | * we must TRY to get the second and subsequent locks. If | |
497 | * we can't get any, we must release all we have | |
498 | * and try again. | |
499 | */ | |
95afcf5c DC |
500 | if (!try_lock) { |
501 | xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); | |
502 | continue; | |
503 | } | |
504 | ||
505 | /* try_lock means we have an inode locked that is in the AIL. */ | |
506 | ASSERT(i != 0); | |
507 | if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) | |
508 | continue; | |
c24b5dfa | 509 | |
95afcf5c DC |
510 | /* |
511 | * Unlock all previous guys and try again. xfs_iunlock will try | |
512 | * to push the tail if the inode is in the AIL. | |
513 | */ | |
514 | attempts++; | |
515 | for (j = i - 1; j >= 0; j--) { | |
c24b5dfa | 516 | /* |
95afcf5c DC |
517 | * Check to see if we've already unlocked this one. Not |
518 | * the first one going back, and the inode ptr is the | |
519 | * same. | |
c24b5dfa | 520 | */ |
95afcf5c DC |
521 | if (j != (i - 1) && ips[j] == ips[j + 1]) |
522 | continue; | |
c24b5dfa | 523 | |
95afcf5c DC |
524 | xfs_iunlock(ips[j], lock_mode); |
525 | } | |
c24b5dfa | 526 | |
95afcf5c DC |
527 | if ((attempts % 5) == 0) { |
528 | delay(1); /* Don't just spin the CPU */ | |
c24b5dfa | 529 | } |
95afcf5c DC |
530 | i = 0; |
531 | try_lock = 0; | |
532 | goto again; | |
c24b5dfa | 533 | } |
c24b5dfa DC |
534 | } |
535 | ||
536 | /* | |
653c60b6 | 537 | * xfs_lock_two_inodes() can only be used to lock one type of lock at a time - |
7c2d238a DW |
538 | * the mmaplock or the ilock, but not more than one type at a time. If we lock |
539 | * more than one at a time, lockdep will report false positives saying we have | |
540 | * violated locking orders. The iolock must be double-locked separately since | |
541 | * we use i_rwsem for that. We now support taking one lock EXCL and the other | |
542 | * SHARED. | |
c24b5dfa DC |
543 | */ |
544 | void | |
545 | xfs_lock_two_inodes( | |
7c2d238a DW |
546 | struct xfs_inode *ip0, |
547 | uint ip0_mode, | |
548 | struct xfs_inode *ip1, | |
549 | uint ip1_mode) | |
c24b5dfa | 550 | { |
7c2d238a DW |
551 | struct xfs_inode *temp; |
552 | uint mode_temp; | |
c24b5dfa | 553 | int attempts = 0; |
efe2330f | 554 | struct xfs_log_item *lp; |
c24b5dfa | 555 | |
7c2d238a DW |
556 | ASSERT(hweight32(ip0_mode) == 1); |
557 | ASSERT(hweight32(ip1_mode) == 1); | |
558 | ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); | |
559 | ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); | |
560 | ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || | |
561 | !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); | |
562 | ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || | |
563 | !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); | |
564 | ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || | |
565 | !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); | |
566 | ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) || | |
567 | !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))); | |
653c60b6 | 568 | |
c24b5dfa DC |
569 | ASSERT(ip0->i_ino != ip1->i_ino); |
570 | ||
571 | if (ip0->i_ino > ip1->i_ino) { | |
572 | temp = ip0; | |
573 | ip0 = ip1; | |
574 | ip1 = temp; | |
7c2d238a DW |
575 | mode_temp = ip0_mode; |
576 | ip0_mode = ip1_mode; | |
577 | ip1_mode = mode_temp; | |
c24b5dfa DC |
578 | } |
579 | ||
580 | again: | |
7c2d238a | 581 | xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0)); |
c24b5dfa DC |
582 | |
583 | /* | |
584 | * If the first lock we have locked is in the AIL, we must TRY to get | |
585 | * the second lock. If we can't get it, we must release the first one | |
586 | * and try again. | |
587 | */ | |
b3b14aac | 588 | lp = &ip0->i_itemp->ili_item; |
22525c17 | 589 | if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) { |
7c2d238a DW |
590 | if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) { |
591 | xfs_iunlock(ip0, ip0_mode); | |
c24b5dfa DC |
592 | if ((++attempts % 5) == 0) |
593 | delay(1); /* Don't just spin the CPU */ | |
594 | goto again; | |
595 | } | |
596 | } else { | |
7c2d238a | 597 | xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1)); |
c24b5dfa DC |
598 | } |
599 | } | |
600 | ||
1da177e4 LT |
601 | STATIC uint |
602 | _xfs_dic2xflags( | |
c8ce540d | 603 | uint16_t di_flags, |
58f88ca2 DC |
604 | uint64_t di_flags2, |
605 | bool has_attr) | |
1da177e4 LT |
606 | { |
607 | uint flags = 0; | |
608 | ||
609 | if (di_flags & XFS_DIFLAG_ANY) { | |
610 | if (di_flags & XFS_DIFLAG_REALTIME) | |
e7b89481 | 611 | flags |= FS_XFLAG_REALTIME; |
1da177e4 | 612 | if (di_flags & XFS_DIFLAG_PREALLOC) |
e7b89481 | 613 | flags |= FS_XFLAG_PREALLOC; |
1da177e4 | 614 | if (di_flags & XFS_DIFLAG_IMMUTABLE) |
e7b89481 | 615 | flags |= FS_XFLAG_IMMUTABLE; |
1da177e4 | 616 | if (di_flags & XFS_DIFLAG_APPEND) |
e7b89481 | 617 | flags |= FS_XFLAG_APPEND; |
1da177e4 | 618 | if (di_flags & XFS_DIFLAG_SYNC) |
e7b89481 | 619 | flags |= FS_XFLAG_SYNC; |
1da177e4 | 620 | if (di_flags & XFS_DIFLAG_NOATIME) |
e7b89481 | 621 | flags |= FS_XFLAG_NOATIME; |
1da177e4 | 622 | if (di_flags & XFS_DIFLAG_NODUMP) |
e7b89481 | 623 | flags |= FS_XFLAG_NODUMP; |
1da177e4 | 624 | if (di_flags & XFS_DIFLAG_RTINHERIT) |
e7b89481 | 625 | flags |= FS_XFLAG_RTINHERIT; |
1da177e4 | 626 | if (di_flags & XFS_DIFLAG_PROJINHERIT) |
e7b89481 | 627 | flags |= FS_XFLAG_PROJINHERIT; |
1da177e4 | 628 | if (di_flags & XFS_DIFLAG_NOSYMLINKS) |
e7b89481 | 629 | flags |= FS_XFLAG_NOSYMLINKS; |
dd9f438e | 630 | if (di_flags & XFS_DIFLAG_EXTSIZE) |
e7b89481 | 631 | flags |= FS_XFLAG_EXTSIZE; |
dd9f438e | 632 | if (di_flags & XFS_DIFLAG_EXTSZINHERIT) |
e7b89481 | 633 | flags |= FS_XFLAG_EXTSZINHERIT; |
d3446eac | 634 | if (di_flags & XFS_DIFLAG_NODEFRAG) |
e7b89481 | 635 | flags |= FS_XFLAG_NODEFRAG; |
2a82b8be | 636 | if (di_flags & XFS_DIFLAG_FILESTREAM) |
e7b89481 | 637 | flags |= FS_XFLAG_FILESTREAM; |
1da177e4 LT |
638 | } |
639 | ||
58f88ca2 DC |
640 | if (di_flags2 & XFS_DIFLAG2_ANY) { |
641 | if (di_flags2 & XFS_DIFLAG2_DAX) | |
642 | flags |= FS_XFLAG_DAX; | |
f7ca3522 DW |
643 | if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE) |
644 | flags |= FS_XFLAG_COWEXTSIZE; | |
58f88ca2 DC |
645 | } |
646 | ||
647 | if (has_attr) | |
648 | flags |= FS_XFLAG_HASATTR; | |
649 | ||
1da177e4 LT |
650 | return flags; |
651 | } | |
652 | ||
653 | uint | |
654 | xfs_ip2xflags( | |
58f88ca2 | 655 | struct xfs_inode *ip) |
1da177e4 | 656 | { |
58f88ca2 | 657 | struct xfs_icdinode *dic = &ip->i_d; |
1da177e4 | 658 | |
58f88ca2 | 659 | return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip)); |
1da177e4 LT |
660 | } |
661 | ||
c24b5dfa DC |
662 | /* |
663 | * Lookups up an inode from "name". If ci_name is not NULL, then a CI match | |
664 | * is allowed, otherwise it has to be an exact match. If a CI match is found, | |
665 | * ci_name->name will point to a the actual name (caller must free) or | |
666 | * will be set to NULL if an exact match is found. | |
667 | */ | |
668 | int | |
669 | xfs_lookup( | |
670 | xfs_inode_t *dp, | |
671 | struct xfs_name *name, | |
672 | xfs_inode_t **ipp, | |
673 | struct xfs_name *ci_name) | |
674 | { | |
675 | xfs_ino_t inum; | |
676 | int error; | |
c24b5dfa DC |
677 | |
678 | trace_xfs_lookup(dp, name); | |
679 | ||
680 | if (XFS_FORCED_SHUTDOWN(dp->i_mount)) | |
2451337d | 681 | return -EIO; |
c24b5dfa | 682 | |
c24b5dfa | 683 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); |
c24b5dfa | 684 | if (error) |
dbad7c99 | 685 | goto out_unlock; |
c24b5dfa DC |
686 | |
687 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); | |
688 | if (error) | |
689 | goto out_free_name; | |
690 | ||
691 | return 0; | |
692 | ||
693 | out_free_name: | |
694 | if (ci_name) | |
695 | kmem_free(ci_name->name); | |
dbad7c99 | 696 | out_unlock: |
c24b5dfa DC |
697 | *ipp = NULL; |
698 | return error; | |
699 | } | |
700 | ||
8a569d71 DW |
701 | /* Propagate di_flags from a parent inode to a child inode. */ |
702 | static void | |
703 | xfs_inode_inherit_flags( | |
704 | struct xfs_inode *ip, | |
705 | const struct xfs_inode *pip) | |
706 | { | |
707 | unsigned int di_flags = 0; | |
708 | umode_t mode = VFS_I(ip)->i_mode; | |
709 | ||
710 | if (S_ISDIR(mode)) { | |
711 | if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) | |
712 | di_flags |= XFS_DIFLAG_RTINHERIT; | |
713 | if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { | |
714 | di_flags |= XFS_DIFLAG_EXTSZINHERIT; | |
715 | ip->i_d.di_extsize = pip->i_d.di_extsize; | |
716 | } | |
717 | if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) | |
718 | di_flags |= XFS_DIFLAG_PROJINHERIT; | |
719 | } else if (S_ISREG(mode)) { | |
d4f2c14c DW |
720 | if ((pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) && |
721 | xfs_sb_version_hasrealtime(&ip->i_mount->m_sb)) | |
8a569d71 DW |
722 | di_flags |= XFS_DIFLAG_REALTIME; |
723 | if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { | |
724 | di_flags |= XFS_DIFLAG_EXTSIZE; | |
725 | ip->i_d.di_extsize = pip->i_d.di_extsize; | |
726 | } | |
727 | } | |
728 | if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) && | |
729 | xfs_inherit_noatime) | |
730 | di_flags |= XFS_DIFLAG_NOATIME; | |
731 | if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) && | |
732 | xfs_inherit_nodump) | |
733 | di_flags |= XFS_DIFLAG_NODUMP; | |
734 | if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && | |
735 | xfs_inherit_sync) | |
736 | di_flags |= XFS_DIFLAG_SYNC; | |
737 | if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) && | |
738 | xfs_inherit_nosymlinks) | |
739 | di_flags |= XFS_DIFLAG_NOSYMLINKS; | |
740 | if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && | |
741 | xfs_inherit_nodefrag) | |
742 | di_flags |= XFS_DIFLAG_NODEFRAG; | |
743 | if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) | |
744 | di_flags |= XFS_DIFLAG_FILESTREAM; | |
745 | ||
746 | ip->i_d.di_flags |= di_flags; | |
747 | } | |
748 | ||
749 | /* Propagate di_flags2 from a parent inode to a child inode. */ | |
750 | static void | |
751 | xfs_inode_inherit_flags2( | |
752 | struct xfs_inode *ip, | |
753 | const struct xfs_inode *pip) | |
754 | { | |
755 | if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { | |
756 | ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; | |
757 | ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; | |
758 | } | |
759 | if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX) | |
760 | ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX; | |
761 | } | |
762 | ||
1da177e4 LT |
763 | /* |
764 | * Allocate an inode on disk and return a copy of its in-core version. | |
765 | * The in-core inode is locked exclusively. Set mode, nlink, and rdev | |
766 | * appropriately within the inode. The uid and gid for the inode are | |
767 | * set according to the contents of the given cred structure. | |
768 | * | |
769 | * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc() | |
cd856db6 CM |
770 | * has a free inode available, call xfs_iget() to obtain the in-core |
771 | * version of the allocated inode. Finally, fill in the inode and | |
772 | * log its initial contents. In this case, ialloc_context would be | |
773 | * set to NULL. | |
1da177e4 | 774 | * |
cd856db6 CM |
775 | * If xfs_dialloc() does not have an available inode, it will replenish |
776 | * its supply by doing an allocation. Since we can only do one | |
777 | * allocation within a transaction without deadlocks, we must commit | |
778 | * the current transaction before returning the inode itself. | |
779 | * In this case, therefore, we will set ialloc_context and return. | |
1da177e4 LT |
780 | * The caller should then commit the current transaction, start a new |
781 | * transaction, and call xfs_ialloc() again to actually get the inode. | |
782 | * | |
783 | * To ensure that some other process does not grab the inode that | |
784 | * was allocated during the first call to xfs_ialloc(), this routine | |
785 | * also returns the [locked] bp pointing to the head of the freelist | |
786 | * as ialloc_context. The caller should hold this buffer across | |
787 | * the commit and pass it back into this routine on the second call. | |
b11f94d5 DC |
788 | * |
789 | * If we are allocating quota inodes, we do not have a parent inode | |
790 | * to attach to or associate with (i.e. pip == NULL) because they | |
791 | * are not linked into the directory structure - they are attached | |
792 | * directly to the superblock - and so have no parent. | |
1da177e4 | 793 | */ |
0d5a75e9 | 794 | static int |
1da177e4 LT |
795 | xfs_ialloc( |
796 | xfs_trans_t *tp, | |
797 | xfs_inode_t *pip, | |
576b1d67 | 798 | umode_t mode, |
31b084ae | 799 | xfs_nlink_t nlink, |
66f36464 | 800 | dev_t rdev, |
6743099c | 801 | prid_t prid, |
1da177e4 | 802 | xfs_buf_t **ialloc_context, |
1da177e4 LT |
803 | xfs_inode_t **ipp) |
804 | { | |
93848a99 | 805 | struct xfs_mount *mp = tp->t_mountp; |
1da177e4 LT |
806 | xfs_ino_t ino; |
807 | xfs_inode_t *ip; | |
1da177e4 LT |
808 | uint flags; |
809 | int error; | |
95582b00 | 810 | struct timespec64 tv; |
3987848c | 811 | struct inode *inode; |
1da177e4 LT |
812 | |
813 | /* | |
814 | * Call the space management code to pick | |
815 | * the on-disk inode to be allocated. | |
816 | */ | |
f59cf5c2 | 817 | error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, |
08358906 | 818 | ialloc_context, &ino); |
bf904248 | 819 | if (error) |
1da177e4 | 820 | return error; |
08358906 | 821 | if (*ialloc_context || ino == NULLFSINO) { |
1da177e4 LT |
822 | *ipp = NULL; |
823 | return 0; | |
824 | } | |
825 | ASSERT(*ialloc_context == NULL); | |
826 | ||
8b26984d DC |
827 | /* |
828 | * Protect against obviously corrupt allocation btree records. Later | |
829 | * xfs_iget checks will catch re-allocation of other active in-memory | |
830 | * and on-disk inodes. If we don't catch reallocating the parent inode | |
831 | * here we will deadlock in xfs_iget() so we have to do these checks | |
832 | * first. | |
833 | */ | |
834 | if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) { | |
835 | xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino); | |
836 | return -EFSCORRUPTED; | |
837 | } | |
838 | ||
1da177e4 LT |
839 | /* |
840 | * Get the in-core inode with the lock held exclusively. | |
841 | * This is because we're setting fields here we need | |
842 | * to prevent others from looking at until we're done. | |
843 | */ | |
93848a99 | 844 | error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, |
ec3ba85f | 845 | XFS_ILOCK_EXCL, &ip); |
bf904248 | 846 | if (error) |
1da177e4 | 847 | return error; |
1da177e4 | 848 | ASSERT(ip != NULL); |
3987848c | 849 | inode = VFS_I(ip); |
c19b3b05 | 850 | inode->i_mode = mode; |
54d7b5c1 | 851 | set_nlink(inode, nlink); |
3d8f2821 | 852 | inode->i_uid = current_fsuid(); |
66f36464 | 853 | inode->i_rdev = rdev; |
de7a866f | 854 | ip->i_d.di_projid = prid; |
1da177e4 | 855 | |
bd186aa9 | 856 | if (pip && XFS_INHERIT_GID(pip)) { |
3d8f2821 | 857 | inode->i_gid = VFS_I(pip)->i_gid; |
c19b3b05 DC |
858 | if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode)) |
859 | inode->i_mode |= S_ISGID; | |
3d8f2821 CH |
860 | } else { |
861 | inode->i_gid = current_fsgid(); | |
1da177e4 LT |
862 | } |
863 | ||
864 | /* | |
865 | * If the group ID of the new file does not match the effective group | |
866 | * ID or one of the supplementary group IDs, the S_ISGID bit is cleared | |
867 | * (and only if the irix_sgid_inherit compatibility variable is set). | |
868 | */ | |
54295159 CH |
869 | if (irix_sgid_inherit && |
870 | (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid)) | |
c19b3b05 | 871 | inode->i_mode &= ~S_ISGID; |
1da177e4 LT |
872 | |
873 | ip->i_d.di_size = 0; | |
daf83964 | 874 | ip->i_df.if_nextents = 0; |
1da177e4 | 875 | ASSERT(ip->i_d.di_nblocks == 0); |
dff35fd4 | 876 | |
c2050a45 | 877 | tv = current_time(inode); |
3987848c DC |
878 | inode->i_mtime = tv; |
879 | inode->i_atime = tv; | |
880 | inode->i_ctime = tv; | |
dff35fd4 | 881 | |
1da177e4 LT |
882 | ip->i_d.di_extsize = 0; |
883 | ip->i_d.di_dmevmask = 0; | |
884 | ip->i_d.di_dmstate = 0; | |
885 | ip->i_d.di_flags = 0; | |
93848a99 | 886 | |
6471e9c5 | 887 | if (xfs_sb_version_has_v3inode(&mp->m_sb)) { |
f0e28280 | 888 | inode_set_iversion(inode, 1); |
f93e5436 | 889 | ip->i_d.di_flags2 = mp->m_ino_geo.new_diflags2; |
f7ca3522 | 890 | ip->i_d.di_cowextsize = 0; |
8d2d878d | 891 | ip->i_d.di_crtime = tv; |
93848a99 CH |
892 | } |
893 | ||
1da177e4 LT |
894 | flags = XFS_ILOG_CORE; |
895 | switch (mode & S_IFMT) { | |
896 | case S_IFIFO: | |
897 | case S_IFCHR: | |
898 | case S_IFBLK: | |
899 | case S_IFSOCK: | |
f7e67b20 | 900 | ip->i_df.if_format = XFS_DINODE_FMT_DEV; |
1da177e4 LT |
901 | ip->i_df.if_flags = 0; |
902 | flags |= XFS_ILOG_DEV; | |
903 | break; | |
904 | case S_IFREG: | |
905 | case S_IFDIR: | |
8a569d71 DW |
906 | if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) |
907 | xfs_inode_inherit_flags(ip, pip); | |
908 | if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY)) | |
909 | xfs_inode_inherit_flags2(ip, pip); | |
1da177e4 LT |
910 | /* FALLTHROUGH */ |
911 | case S_IFLNK: | |
f7e67b20 | 912 | ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; |
1da177e4 | 913 | ip->i_df.if_flags = XFS_IFEXTENTS; |
fcacbc3f | 914 | ip->i_df.if_bytes = 0; |
6bdcf26a | 915 | ip->i_df.if_u1.if_root = NULL; |
1da177e4 LT |
916 | break; |
917 | default: | |
918 | ASSERT(0); | |
919 | } | |
1da177e4 LT |
920 | |
921 | /* | |
922 | * Log the new values stuffed into the inode. | |
923 | */ | |
ddc3415a | 924 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
1da177e4 LT |
925 | xfs_trans_log_inode(tp, ip, flags); |
926 | ||
58c90473 | 927 | /* now that we have an i_mode we can setup the inode structure */ |
41be8bed | 928 | xfs_setup_inode(ip); |
1da177e4 LT |
929 | |
930 | *ipp = ip; | |
931 | return 0; | |
932 | } | |
933 | ||
e546cb79 DC |
934 | /* |
935 | * Allocates a new inode from disk and return a pointer to the | |
936 | * incore copy. This routine will internally commit the current | |
937 | * transaction and allocate a new one if the Space Manager needed | |
938 | * to do an allocation to replenish the inode free-list. | |
939 | * | |
940 | * This routine is designed to be called from xfs_create and | |
941 | * xfs_create_dir. | |
942 | * | |
943 | */ | |
944 | int | |
945 | xfs_dir_ialloc( | |
946 | xfs_trans_t **tpp, /* input: current transaction; | |
947 | output: may be a new transaction. */ | |
948 | xfs_inode_t *dp, /* directory within whose allocate | |
949 | the inode. */ | |
950 | umode_t mode, | |
951 | xfs_nlink_t nlink, | |
66f36464 | 952 | dev_t rdev, |
e546cb79 | 953 | prid_t prid, /* project id */ |
c959025e | 954 | xfs_inode_t **ipp) /* pointer to inode; it will be |
e546cb79 | 955 | locked. */ |
e546cb79 DC |
956 | { |
957 | xfs_trans_t *tp; | |
e546cb79 DC |
958 | xfs_inode_t *ip; |
959 | xfs_buf_t *ialloc_context = NULL; | |
960 | int code; | |
e546cb79 DC |
961 | void *dqinfo; |
962 | uint tflags; | |
963 | ||
964 | tp = *tpp; | |
965 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); | |
966 | ||
967 | /* | |
968 | * xfs_ialloc will return a pointer to an incore inode if | |
969 | * the Space Manager has an available inode on the free | |
970 | * list. Otherwise, it will do an allocation and replenish | |
971 | * the freelist. Since we can only do one allocation per | |
972 | * transaction without deadlocks, we will need to commit the | |
973 | * current transaction and start a new one. We will then | |
974 | * need to call xfs_ialloc again to get the inode. | |
975 | * | |
976 | * If xfs_ialloc did an allocation to replenish the freelist, | |
977 | * it returns the bp containing the head of the freelist as | |
978 | * ialloc_context. We will hold a lock on it across the | |
979 | * transaction commit so that no other process can steal | |
980 | * the inode(s) that we've just allocated. | |
981 | */ | |
f59cf5c2 CH |
982 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context, |
983 | &ip); | |
e546cb79 DC |
984 | |
985 | /* | |
986 | * Return an error if we were unable to allocate a new inode. | |
987 | * This should only happen if we run out of space on disk or | |
988 | * encounter a disk error. | |
989 | */ | |
990 | if (code) { | |
991 | *ipp = NULL; | |
992 | return code; | |
993 | } | |
994 | if (!ialloc_context && !ip) { | |
995 | *ipp = NULL; | |
2451337d | 996 | return -ENOSPC; |
e546cb79 DC |
997 | } |
998 | ||
999 | /* | |
1000 | * If the AGI buffer is non-NULL, then we were unable to get an | |
1001 | * inode in one operation. We need to commit the current | |
1002 | * transaction and call xfs_ialloc() again. It is guaranteed | |
1003 | * to succeed the second time. | |
1004 | */ | |
1005 | if (ialloc_context) { | |
1006 | /* | |
1007 | * Normally, xfs_trans_commit releases all the locks. | |
1008 | * We call bhold to hang on to the ialloc_context across | |
1009 | * the commit. Holding this buffer prevents any other | |
1010 | * processes from doing any allocations in this | |
1011 | * allocation group. | |
1012 | */ | |
1013 | xfs_trans_bhold(tp, ialloc_context); | |
e546cb79 DC |
1014 | |
1015 | /* | |
1016 | * We want the quota changes to be associated with the next | |
1017 | * transaction, NOT this one. So, detach the dqinfo from this | |
1018 | * and attach it to the next transaction. | |
1019 | */ | |
1020 | dqinfo = NULL; | |
1021 | tflags = 0; | |
1022 | if (tp->t_dqinfo) { | |
1023 | dqinfo = (void *)tp->t_dqinfo; | |
1024 | tp->t_dqinfo = NULL; | |
1025 | tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY; | |
1026 | tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY); | |
1027 | } | |
1028 | ||
411350df | 1029 | code = xfs_trans_roll(&tp); |
3d3c8b52 | 1030 | |
e546cb79 DC |
1031 | /* |
1032 | * Re-attach the quota info that we detached from prev trx. | |
1033 | */ | |
1034 | if (dqinfo) { | |
1035 | tp->t_dqinfo = dqinfo; | |
1036 | tp->t_flags |= tflags; | |
1037 | } | |
1038 | ||
1039 | if (code) { | |
1040 | xfs_buf_relse(ialloc_context); | |
2e6db6c4 | 1041 | *tpp = tp; |
e546cb79 DC |
1042 | *ipp = NULL; |
1043 | return code; | |
1044 | } | |
1045 | xfs_trans_bjoin(tp, ialloc_context); | |
1046 | ||
1047 | /* | |
1048 | * Call ialloc again. Since we've locked out all | |
1049 | * other allocations in this allocation group, | |
1050 | * this call should always succeed. | |
1051 | */ | |
1052 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, | |
f59cf5c2 | 1053 | &ialloc_context, &ip); |
e546cb79 DC |
1054 | |
1055 | /* | |
1056 | * If we get an error at this point, return to the caller | |
1057 | * so that the current transaction can be aborted. | |
1058 | */ | |
1059 | if (code) { | |
1060 | *tpp = tp; | |
1061 | *ipp = NULL; | |
1062 | return code; | |
1063 | } | |
1064 | ASSERT(!ialloc_context && ip); | |
1065 | ||
e546cb79 DC |
1066 | } |
1067 | ||
1068 | *ipp = ip; | |
1069 | *tpp = tp; | |
1070 | ||
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | /* | |
54d7b5c1 DC |
1075 | * Decrement the link count on an inode & log the change. If this causes the |
1076 | * link count to go to zero, move the inode to AGI unlinked list so that it can | |
1077 | * be freed when the last active reference goes away via xfs_inactive(). | |
e546cb79 | 1078 | */ |
0d5a75e9 | 1079 | static int /* error */ |
e546cb79 DC |
1080 | xfs_droplink( |
1081 | xfs_trans_t *tp, | |
1082 | xfs_inode_t *ip) | |
1083 | { | |
e546cb79 DC |
1084 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); |
1085 | ||
e546cb79 DC |
1086 | drop_nlink(VFS_I(ip)); |
1087 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
1088 | ||
54d7b5c1 DC |
1089 | if (VFS_I(ip)->i_nlink) |
1090 | return 0; | |
1091 | ||
1092 | return xfs_iunlink(tp, ip); | |
e546cb79 DC |
1093 | } |
1094 | ||
e546cb79 DC |
1095 | /* |
1096 | * Increment the link count on an inode & log the change. | |
1097 | */ | |
91083269 | 1098 | static void |
e546cb79 DC |
1099 | xfs_bumplink( |
1100 | xfs_trans_t *tp, | |
1101 | xfs_inode_t *ip) | |
1102 | { | |
1103 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); | |
1104 | ||
e546cb79 | 1105 | inc_nlink(VFS_I(ip)); |
e546cb79 | 1106 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
e546cb79 DC |
1107 | } |
1108 | ||
c24b5dfa DC |
1109 | int |
1110 | xfs_create( | |
1111 | xfs_inode_t *dp, | |
1112 | struct xfs_name *name, | |
1113 | umode_t mode, | |
66f36464 | 1114 | dev_t rdev, |
c24b5dfa DC |
1115 | xfs_inode_t **ipp) |
1116 | { | |
1117 | int is_dir = S_ISDIR(mode); | |
1118 | struct xfs_mount *mp = dp->i_mount; | |
1119 | struct xfs_inode *ip = NULL; | |
1120 | struct xfs_trans *tp = NULL; | |
1121 | int error; | |
c24b5dfa | 1122 | bool unlock_dp_on_error = false; |
c24b5dfa DC |
1123 | prid_t prid; |
1124 | struct xfs_dquot *udqp = NULL; | |
1125 | struct xfs_dquot *gdqp = NULL; | |
1126 | struct xfs_dquot *pdqp = NULL; | |
062647a8 | 1127 | struct xfs_trans_res *tres; |
c24b5dfa | 1128 | uint resblks; |
c24b5dfa DC |
1129 | |
1130 | trace_xfs_create(dp, name); | |
1131 | ||
1132 | if (XFS_FORCED_SHUTDOWN(mp)) | |
2451337d | 1133 | return -EIO; |
c24b5dfa | 1134 | |
163467d3 | 1135 | prid = xfs_get_initial_prid(dp); |
c24b5dfa DC |
1136 | |
1137 | /* | |
1138 | * Make sure that we have allocated dquot(s) on disk. | |
1139 | */ | |
54295159 | 1140 | error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, |
c24b5dfa DC |
1141 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, |
1142 | &udqp, &gdqp, &pdqp); | |
1143 | if (error) | |
1144 | return error; | |
1145 | ||
1146 | if (is_dir) { | |
c24b5dfa | 1147 | resblks = XFS_MKDIR_SPACE_RES(mp, name->len); |
062647a8 | 1148 | tres = &M_RES(mp)->tr_mkdir; |
c24b5dfa DC |
1149 | } else { |
1150 | resblks = XFS_CREATE_SPACE_RES(mp, name->len); | |
062647a8 | 1151 | tres = &M_RES(mp)->tr_create; |
c24b5dfa DC |
1152 | } |
1153 | ||
c24b5dfa DC |
1154 | /* |
1155 | * Initially assume that the file does not exist and | |
1156 | * reserve the resources for that case. If that is not | |
1157 | * the case we'll drop the one we have and get a more | |
1158 | * appropriate transaction later. | |
1159 | */ | |
253f4911 | 1160 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); |
2451337d | 1161 | if (error == -ENOSPC) { |
c24b5dfa DC |
1162 | /* flush outstanding delalloc blocks and retry */ |
1163 | xfs_flush_inodes(mp); | |
253f4911 | 1164 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); |
c24b5dfa | 1165 | } |
4906e215 | 1166 | if (error) |
253f4911 | 1167 | goto out_release_inode; |
c24b5dfa | 1168 | |
65523218 | 1169 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); |
c24b5dfa DC |
1170 | unlock_dp_on_error = true; |
1171 | ||
c24b5dfa DC |
1172 | /* |
1173 | * Reserve disk quota and the inode. | |
1174 | */ | |
1175 | error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, | |
1176 | pdqp, resblks, 1, 0); | |
1177 | if (error) | |
1178 | goto out_trans_cancel; | |
1179 | ||
c24b5dfa DC |
1180 | /* |
1181 | * A newly created regular or special file just has one directory | |
1182 | * entry pointing to them, but a directory also the "." entry | |
1183 | * pointing to itself. | |
1184 | */ | |
c959025e | 1185 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip); |
d6077aa3 | 1186 | if (error) |
4906e215 | 1187 | goto out_trans_cancel; |
c24b5dfa DC |
1188 | |
1189 | /* | |
1190 | * Now we join the directory inode to the transaction. We do not do it | |
1191 | * earlier because xfs_dir_ialloc might commit the previous transaction | |
1192 | * (and release all the locks). An error from here on will result in | |
1193 | * the transaction cancel unlocking dp so don't do it explicitly in the | |
1194 | * error path. | |
1195 | */ | |
65523218 | 1196 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
1197 | unlock_dp_on_error = false; |
1198 | ||
381eee69 | 1199 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, |
63337b63 | 1200 | resblks - XFS_IALLOC_SPACE_RES(mp)); |
c24b5dfa | 1201 | if (error) { |
2451337d | 1202 | ASSERT(error != -ENOSPC); |
4906e215 | 1203 | goto out_trans_cancel; |
c24b5dfa DC |
1204 | } |
1205 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
1206 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | |
1207 | ||
1208 | if (is_dir) { | |
1209 | error = xfs_dir_init(tp, ip, dp); | |
1210 | if (error) | |
c8eac49e | 1211 | goto out_trans_cancel; |
c24b5dfa | 1212 | |
91083269 | 1213 | xfs_bumplink(tp, dp); |
c24b5dfa DC |
1214 | } |
1215 | ||
1216 | /* | |
1217 | * If this is a synchronous mount, make sure that the | |
1218 | * create transaction goes to disk before returning to | |
1219 | * the user. | |
1220 | */ | |
1221 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) | |
1222 | xfs_trans_set_sync(tp); | |
1223 | ||
1224 | /* | |
1225 | * Attach the dquot(s) to the inodes and modify them incore. | |
1226 | * These ids of the inode couldn't have changed since the new | |
1227 | * inode has been locked ever since it was created. | |
1228 | */ | |
1229 | xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); | |
1230 | ||
70393313 | 1231 | error = xfs_trans_commit(tp); |
c24b5dfa DC |
1232 | if (error) |
1233 | goto out_release_inode; | |
1234 | ||
1235 | xfs_qm_dqrele(udqp); | |
1236 | xfs_qm_dqrele(gdqp); | |
1237 | xfs_qm_dqrele(pdqp); | |
1238 | ||
1239 | *ipp = ip; | |
1240 | return 0; | |
1241 | ||
c24b5dfa | 1242 | out_trans_cancel: |
4906e215 | 1243 | xfs_trans_cancel(tp); |
c24b5dfa DC |
1244 | out_release_inode: |
1245 | /* | |
58c90473 DC |
1246 | * Wait until after the current transaction is aborted to finish the |
1247 | * setup of the inode and release the inode. This prevents recursive | |
1248 | * transactions and deadlocks from xfs_inactive. | |
c24b5dfa | 1249 | */ |
58c90473 DC |
1250 | if (ip) { |
1251 | xfs_finish_inode_setup(ip); | |
44a8736b | 1252 | xfs_irele(ip); |
58c90473 | 1253 | } |
c24b5dfa DC |
1254 | |
1255 | xfs_qm_dqrele(udqp); | |
1256 | xfs_qm_dqrele(gdqp); | |
1257 | xfs_qm_dqrele(pdqp); | |
1258 | ||
1259 | if (unlock_dp_on_error) | |
65523218 | 1260 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
1261 | return error; |
1262 | } | |
1263 | ||
99b6436b ZYW |
1264 | int |
1265 | xfs_create_tmpfile( | |
1266 | struct xfs_inode *dp, | |
330033d6 BF |
1267 | umode_t mode, |
1268 | struct xfs_inode **ipp) | |
99b6436b ZYW |
1269 | { |
1270 | struct xfs_mount *mp = dp->i_mount; | |
1271 | struct xfs_inode *ip = NULL; | |
1272 | struct xfs_trans *tp = NULL; | |
1273 | int error; | |
99b6436b ZYW |
1274 | prid_t prid; |
1275 | struct xfs_dquot *udqp = NULL; | |
1276 | struct xfs_dquot *gdqp = NULL; | |
1277 | struct xfs_dquot *pdqp = NULL; | |
1278 | struct xfs_trans_res *tres; | |
1279 | uint resblks; | |
1280 | ||
1281 | if (XFS_FORCED_SHUTDOWN(mp)) | |
2451337d | 1282 | return -EIO; |
99b6436b ZYW |
1283 | |
1284 | prid = xfs_get_initial_prid(dp); | |
1285 | ||
1286 | /* | |
1287 | * Make sure that we have allocated dquot(s) on disk. | |
1288 | */ | |
54295159 | 1289 | error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, |
99b6436b ZYW |
1290 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, |
1291 | &udqp, &gdqp, &pdqp); | |
1292 | if (error) | |
1293 | return error; | |
1294 | ||
1295 | resblks = XFS_IALLOC_SPACE_RES(mp); | |
99b6436b | 1296 | tres = &M_RES(mp)->tr_create_tmpfile; |
253f4911 CH |
1297 | |
1298 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); | |
4906e215 | 1299 | if (error) |
253f4911 | 1300 | goto out_release_inode; |
99b6436b ZYW |
1301 | |
1302 | error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, | |
1303 | pdqp, resblks, 1, 0); | |
1304 | if (error) | |
1305 | goto out_trans_cancel; | |
1306 | ||
c4a6bf7f | 1307 | error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip); |
d6077aa3 | 1308 | if (error) |
4906e215 | 1309 | goto out_trans_cancel; |
99b6436b ZYW |
1310 | |
1311 | if (mp->m_flags & XFS_MOUNT_WSYNC) | |
1312 | xfs_trans_set_sync(tp); | |
1313 | ||
1314 | /* | |
1315 | * Attach the dquot(s) to the inodes and modify them incore. | |
1316 | * These ids of the inode couldn't have changed since the new | |
1317 | * inode has been locked ever since it was created. | |
1318 | */ | |
1319 | xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); | |
1320 | ||
99b6436b ZYW |
1321 | error = xfs_iunlink(tp, ip); |
1322 | if (error) | |
4906e215 | 1323 | goto out_trans_cancel; |
99b6436b | 1324 | |
70393313 | 1325 | error = xfs_trans_commit(tp); |
99b6436b ZYW |
1326 | if (error) |
1327 | goto out_release_inode; | |
1328 | ||
1329 | xfs_qm_dqrele(udqp); | |
1330 | xfs_qm_dqrele(gdqp); | |
1331 | xfs_qm_dqrele(pdqp); | |
1332 | ||
330033d6 | 1333 | *ipp = ip; |
99b6436b ZYW |
1334 | return 0; |
1335 | ||
99b6436b | 1336 | out_trans_cancel: |
4906e215 | 1337 | xfs_trans_cancel(tp); |
99b6436b ZYW |
1338 | out_release_inode: |
1339 | /* | |
58c90473 DC |
1340 | * Wait until after the current transaction is aborted to finish the |
1341 | * setup of the inode and release the inode. This prevents recursive | |
1342 | * transactions and deadlocks from xfs_inactive. | |
99b6436b | 1343 | */ |
58c90473 DC |
1344 | if (ip) { |
1345 | xfs_finish_inode_setup(ip); | |
44a8736b | 1346 | xfs_irele(ip); |
58c90473 | 1347 | } |
99b6436b ZYW |
1348 | |
1349 | xfs_qm_dqrele(udqp); | |
1350 | xfs_qm_dqrele(gdqp); | |
1351 | xfs_qm_dqrele(pdqp); | |
1352 | ||
1353 | return error; | |
1354 | } | |
1355 | ||
c24b5dfa DC |
1356 | int |
1357 | xfs_link( | |
1358 | xfs_inode_t *tdp, | |
1359 | xfs_inode_t *sip, | |
1360 | struct xfs_name *target_name) | |
1361 | { | |
1362 | xfs_mount_t *mp = tdp->i_mount; | |
1363 | xfs_trans_t *tp; | |
1364 | int error; | |
c24b5dfa DC |
1365 | int resblks; |
1366 | ||
1367 | trace_xfs_link(tdp, target_name); | |
1368 | ||
c19b3b05 | 1369 | ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); |
c24b5dfa DC |
1370 | |
1371 | if (XFS_FORCED_SHUTDOWN(mp)) | |
2451337d | 1372 | return -EIO; |
c24b5dfa | 1373 | |
c14cfcca | 1374 | error = xfs_qm_dqattach(sip); |
c24b5dfa DC |
1375 | if (error) |
1376 | goto std_return; | |
1377 | ||
c14cfcca | 1378 | error = xfs_qm_dqattach(tdp); |
c24b5dfa DC |
1379 | if (error) |
1380 | goto std_return; | |
1381 | ||
c24b5dfa | 1382 | resblks = XFS_LINK_SPACE_RES(mp, target_name->len); |
253f4911 | 1383 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp); |
2451337d | 1384 | if (error == -ENOSPC) { |
c24b5dfa | 1385 | resblks = 0; |
253f4911 | 1386 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp); |
c24b5dfa | 1387 | } |
4906e215 | 1388 | if (error) |
253f4911 | 1389 | goto std_return; |
c24b5dfa | 1390 | |
7c2d238a | 1391 | xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
1392 | |
1393 | xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL); | |
65523218 | 1394 | xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
1395 | |
1396 | /* | |
1397 | * If we are using project inheritance, we only allow hard link | |
1398 | * creation in our tree when the project IDs are the same; else | |
1399 | * the tree quota mechanism could be circumvented. | |
1400 | */ | |
1401 | if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | |
de7a866f | 1402 | tdp->i_d.di_projid != sip->i_d.di_projid)) { |
2451337d | 1403 | error = -EXDEV; |
c24b5dfa DC |
1404 | goto error_return; |
1405 | } | |
1406 | ||
94f3cad5 ES |
1407 | if (!resblks) { |
1408 | error = xfs_dir_canenter(tp, tdp, target_name); | |
1409 | if (error) | |
1410 | goto error_return; | |
1411 | } | |
c24b5dfa | 1412 | |
54d7b5c1 DC |
1413 | /* |
1414 | * Handle initial link state of O_TMPFILE inode | |
1415 | */ | |
1416 | if (VFS_I(sip)->i_nlink == 0) { | |
ab297431 ZYW |
1417 | error = xfs_iunlink_remove(tp, sip); |
1418 | if (error) | |
4906e215 | 1419 | goto error_return; |
ab297431 ZYW |
1420 | } |
1421 | ||
c24b5dfa | 1422 | error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, |
381eee69 | 1423 | resblks); |
c24b5dfa | 1424 | if (error) |
4906e215 | 1425 | goto error_return; |
c24b5dfa DC |
1426 | xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
1427 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); | |
1428 | ||
91083269 | 1429 | xfs_bumplink(tp, sip); |
c24b5dfa DC |
1430 | |
1431 | /* | |
1432 | * If this is a synchronous mount, make sure that the | |
1433 | * link transaction goes to disk before returning to | |
1434 | * the user. | |
1435 | */ | |
f6106efa | 1436 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) |
c24b5dfa | 1437 | xfs_trans_set_sync(tp); |
c24b5dfa | 1438 | |
70393313 | 1439 | return xfs_trans_commit(tp); |
c24b5dfa | 1440 | |
c24b5dfa | 1441 | error_return: |
4906e215 | 1442 | xfs_trans_cancel(tp); |
c24b5dfa DC |
1443 | std_return: |
1444 | return error; | |
1445 | } | |
1446 | ||
363e59ba DW |
1447 | /* Clear the reflink flag and the cowblocks tag if possible. */ |
1448 | static void | |
1449 | xfs_itruncate_clear_reflink_flags( | |
1450 | struct xfs_inode *ip) | |
1451 | { | |
1452 | struct xfs_ifork *dfork; | |
1453 | struct xfs_ifork *cfork; | |
1454 | ||
1455 | if (!xfs_is_reflink_inode(ip)) | |
1456 | return; | |
1457 | dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | |
1458 | cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK); | |
1459 | if (dfork->if_bytes == 0 && cfork->if_bytes == 0) | |
1460 | ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; | |
1461 | if (cfork->if_bytes == 0) | |
1462 | xfs_inode_clear_cowblocks_tag(ip); | |
1463 | } | |
1464 | ||
1da177e4 | 1465 | /* |
8f04c47a CH |
1466 | * Free up the underlying blocks past new_size. The new size must be smaller |
1467 | * than the current size. This routine can be used both for the attribute and | |
1468 | * data fork, and does not modify the inode size, which is left to the caller. | |
1da177e4 | 1469 | * |
f6485057 DC |
1470 | * The transaction passed to this routine must have made a permanent log |
1471 | * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the | |
1472 | * given transaction and start new ones, so make sure everything involved in | |
1473 | * the transaction is tidy before calling here. Some transaction will be | |
1474 | * returned to the caller to be committed. The incoming transaction must | |
1475 | * already include the inode, and both inode locks must be held exclusively. | |
1476 | * The inode must also be "held" within the transaction. On return the inode | |
1477 | * will be "held" within the returned transaction. This routine does NOT | |
1478 | * require any disk space to be reserved for it within the transaction. | |
1da177e4 | 1479 | * |
f6485057 DC |
1480 | * If we get an error, we must return with the inode locked and linked into the |
1481 | * current transaction. This keeps things simple for the higher level code, | |
1482 | * because it always knows that the inode is locked and held in the transaction | |
1483 | * that returns to it whether errors occur or not. We don't mark the inode | |
1484 | * dirty on error so that transactions can be easily aborted if possible. | |
1da177e4 LT |
1485 | */ |
1486 | int | |
4e529339 | 1487 | xfs_itruncate_extents_flags( |
8f04c47a CH |
1488 | struct xfs_trans **tpp, |
1489 | struct xfs_inode *ip, | |
1490 | int whichfork, | |
13b86fc3 | 1491 | xfs_fsize_t new_size, |
4e529339 | 1492 | int flags) |
1da177e4 | 1493 | { |
8f04c47a CH |
1494 | struct xfs_mount *mp = ip->i_mount; |
1495 | struct xfs_trans *tp = *tpp; | |
8f04c47a | 1496 | xfs_fileoff_t first_unmap_block; |
8f04c47a | 1497 | xfs_filblks_t unmap_len; |
8f04c47a | 1498 | int error = 0; |
1da177e4 | 1499 | |
0b56185b CH |
1500 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
1501 | ASSERT(!atomic_read(&VFS_I(ip)->i_count) || | |
1502 | xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
ce7ae151 | 1503 | ASSERT(new_size <= XFS_ISIZE(ip)); |
8f04c47a | 1504 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); |
1da177e4 | 1505 | ASSERT(ip->i_itemp != NULL); |
898621d5 | 1506 | ASSERT(ip->i_itemp->ili_lock_flags == 0); |
8f04c47a | 1507 | ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); |
1da177e4 | 1508 | |
673e8e59 CH |
1509 | trace_xfs_itruncate_extents_start(ip, new_size); |
1510 | ||
4e529339 | 1511 | flags |= xfs_bmapi_aflag(whichfork); |
13b86fc3 | 1512 | |
1da177e4 LT |
1513 | /* |
1514 | * Since it is possible for space to become allocated beyond | |
1515 | * the end of the file (in a crash where the space is allocated | |
1516 | * but the inode size is not yet updated), simply remove any | |
1517 | * blocks which show up between the new EOF and the maximum | |
4bbb04ab DW |
1518 | * possible file size. |
1519 | * | |
1520 | * We have to free all the blocks to the bmbt maximum offset, even if | |
1521 | * the page cache can't scale that far. | |
1da177e4 | 1522 | */ |
8f04c47a | 1523 | first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); |
4bbb04ab DW |
1524 | if (first_unmap_block >= XFS_MAX_FILEOFF) { |
1525 | WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF); | |
8f04c47a | 1526 | return 0; |
4bbb04ab | 1527 | } |
8f04c47a | 1528 | |
4bbb04ab DW |
1529 | unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; |
1530 | while (unmap_len > 0) { | |
02dff7bf | 1531 | ASSERT(tp->t_firstblock == NULLFSBLOCK); |
4bbb04ab DW |
1532 | error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, |
1533 | flags, XFS_ITRUNC_MAX_EXTENTS); | |
8f04c47a | 1534 | if (error) |
d5a2e289 | 1535 | goto out; |
1da177e4 | 1536 | |
6dd379c7 | 1537 | /* free the just unmapped extents */ |
9e28a242 | 1538 | error = xfs_defer_finish(&tp); |
8f04c47a | 1539 | if (error) |
9b1f4e98 | 1540 | goto out; |
1da177e4 | 1541 | } |
8f04c47a | 1542 | |
4919d42a DW |
1543 | if (whichfork == XFS_DATA_FORK) { |
1544 | /* Remove all pending CoW reservations. */ | |
1545 | error = xfs_reflink_cancel_cow_blocks(ip, &tp, | |
4bbb04ab | 1546 | first_unmap_block, XFS_MAX_FILEOFF, true); |
4919d42a DW |
1547 | if (error) |
1548 | goto out; | |
aa8968f2 | 1549 | |
4919d42a DW |
1550 | xfs_itruncate_clear_reflink_flags(ip); |
1551 | } | |
aa8968f2 | 1552 | |
673e8e59 CH |
1553 | /* |
1554 | * Always re-log the inode so that our permanent transaction can keep | |
1555 | * on rolling it forward in the log. | |
1556 | */ | |
1557 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
1558 | ||
1559 | trace_xfs_itruncate_extents_end(ip, new_size); | |
1560 | ||
8f04c47a CH |
1561 | out: |
1562 | *tpp = tp; | |
1563 | return error; | |
8f04c47a CH |
1564 | } |
1565 | ||
c24b5dfa DC |
1566 | int |
1567 | xfs_release( | |
1568 | xfs_inode_t *ip) | |
1569 | { | |
1570 | xfs_mount_t *mp = ip->i_mount; | |
1571 | int error; | |
1572 | ||
c19b3b05 | 1573 | if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) |
c24b5dfa DC |
1574 | return 0; |
1575 | ||
1576 | /* If this is a read-only mount, don't do this (would generate I/O) */ | |
1577 | if (mp->m_flags & XFS_MOUNT_RDONLY) | |
1578 | return 0; | |
1579 | ||
1580 | if (!XFS_FORCED_SHUTDOWN(mp)) { | |
1581 | int truncated; | |
1582 | ||
c24b5dfa DC |
1583 | /* |
1584 | * If we previously truncated this file and removed old data | |
1585 | * in the process, we want to initiate "early" writeout on | |
1586 | * the last close. This is an attempt to combat the notorious | |
1587 | * NULL files problem which is particularly noticeable from a | |
1588 | * truncate down, buffered (re-)write (delalloc), followed by | |
1589 | * a crash. What we are effectively doing here is | |
1590 | * significantly reducing the time window where we'd otherwise | |
1591 | * be exposed to that problem. | |
1592 | */ | |
1593 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); | |
1594 | if (truncated) { | |
1595 | xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); | |
eac152b4 | 1596 | if (ip->i_delayed_blks > 0) { |
2451337d | 1597 | error = filemap_flush(VFS_I(ip)->i_mapping); |
c24b5dfa DC |
1598 | if (error) |
1599 | return error; | |
1600 | } | |
1601 | } | |
1602 | } | |
1603 | ||
54d7b5c1 | 1604 | if (VFS_I(ip)->i_nlink == 0) |
c24b5dfa DC |
1605 | return 0; |
1606 | ||
1607 | if (xfs_can_free_eofblocks(ip, false)) { | |
1608 | ||
a36b9261 BF |
1609 | /* |
1610 | * Check if the inode is being opened, written and closed | |
1611 | * frequently and we have delayed allocation blocks outstanding | |
1612 | * (e.g. streaming writes from the NFS server), truncating the | |
1613 | * blocks past EOF will cause fragmentation to occur. | |
1614 | * | |
1615 | * In this case don't do the truncation, but we have to be | |
1616 | * careful how we detect this case. Blocks beyond EOF show up as | |
1617 | * i_delayed_blks even when the inode is clean, so we need to | |
1618 | * truncate them away first before checking for a dirty release. | |
1619 | * Hence on the first dirty close we will still remove the | |
1620 | * speculative allocation, but after that we will leave it in | |
1621 | * place. | |
1622 | */ | |
1623 | if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) | |
1624 | return 0; | |
c24b5dfa DC |
1625 | /* |
1626 | * If we can't get the iolock just skip truncating the blocks | |
c1e8d7c6 | 1627 | * past EOF because we could deadlock with the mmap_lock |
a36b9261 | 1628 | * otherwise. We'll get another chance to drop them once the |
c24b5dfa DC |
1629 | * last reference to the inode is dropped, so we'll never leak |
1630 | * blocks permanently. | |
c24b5dfa | 1631 | */ |
a36b9261 BF |
1632 | if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
1633 | error = xfs_free_eofblocks(ip); | |
1634 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
1635 | if (error) | |
1636 | return error; | |
1637 | } | |
c24b5dfa DC |
1638 | |
1639 | /* delalloc blocks after truncation means it really is dirty */ | |
1640 | if (ip->i_delayed_blks) | |
1641 | xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); | |
1642 | } | |
1643 | return 0; | |
1644 | } | |
1645 | ||
f7be2d7f BF |
1646 | /* |
1647 | * xfs_inactive_truncate | |
1648 | * | |
1649 | * Called to perform a truncate when an inode becomes unlinked. | |
1650 | */ | |
1651 | STATIC int | |
1652 | xfs_inactive_truncate( | |
1653 | struct xfs_inode *ip) | |
1654 | { | |
1655 | struct xfs_mount *mp = ip->i_mount; | |
1656 | struct xfs_trans *tp; | |
1657 | int error; | |
1658 | ||
253f4911 | 1659 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); |
f7be2d7f BF |
1660 | if (error) { |
1661 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | |
f7be2d7f BF |
1662 | return error; |
1663 | } | |
f7be2d7f BF |
1664 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
1665 | xfs_trans_ijoin(tp, ip, 0); | |
1666 | ||
1667 | /* | |
1668 | * Log the inode size first to prevent stale data exposure in the event | |
1669 | * of a system crash before the truncate completes. See the related | |
69bca807 | 1670 | * comment in xfs_vn_setattr_size() for details. |
f7be2d7f BF |
1671 | */ |
1672 | ip->i_d.di_size = 0; | |
1673 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
1674 | ||
1675 | error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); | |
1676 | if (error) | |
1677 | goto error_trans_cancel; | |
1678 | ||
daf83964 | 1679 | ASSERT(ip->i_df.if_nextents == 0); |
f7be2d7f | 1680 | |
70393313 | 1681 | error = xfs_trans_commit(tp); |
f7be2d7f BF |
1682 | if (error) |
1683 | goto error_unlock; | |
1684 | ||
1685 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1686 | return 0; | |
1687 | ||
1688 | error_trans_cancel: | |
4906e215 | 1689 | xfs_trans_cancel(tp); |
f7be2d7f BF |
1690 | error_unlock: |
1691 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1692 | return error; | |
1693 | } | |
1694 | ||
88877d2b BF |
1695 | /* |
1696 | * xfs_inactive_ifree() | |
1697 | * | |
1698 | * Perform the inode free when an inode is unlinked. | |
1699 | */ | |
1700 | STATIC int | |
1701 | xfs_inactive_ifree( | |
1702 | struct xfs_inode *ip) | |
1703 | { | |
88877d2b BF |
1704 | struct xfs_mount *mp = ip->i_mount; |
1705 | struct xfs_trans *tp; | |
1706 | int error; | |
1707 | ||
9d43b180 | 1708 | /* |
76d771b4 CH |
1709 | * We try to use a per-AG reservation for any block needed by the finobt |
1710 | * tree, but as the finobt feature predates the per-AG reservation | |
1711 | * support a degraded file system might not have enough space for the | |
1712 | * reservation at mount time. In that case try to dip into the reserved | |
1713 | * pool and pray. | |
9d43b180 BF |
1714 | * |
1715 | * Send a warning if the reservation does happen to fail, as the inode | |
1716 | * now remains allocated and sits on the unlinked list until the fs is | |
1717 | * repaired. | |
1718 | */ | |
e1f6ca11 | 1719 | if (unlikely(mp->m_finobt_nores)) { |
76d771b4 CH |
1720 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, |
1721 | XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, | |
1722 | &tp); | |
1723 | } else { | |
1724 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); | |
1725 | } | |
88877d2b | 1726 | if (error) { |
2451337d | 1727 | if (error == -ENOSPC) { |
9d43b180 BF |
1728 | xfs_warn_ratelimited(mp, |
1729 | "Failed to remove inode(s) from unlinked list. " | |
1730 | "Please free space, unmount and run xfs_repair."); | |
1731 | } else { | |
1732 | ASSERT(XFS_FORCED_SHUTDOWN(mp)); | |
1733 | } | |
88877d2b BF |
1734 | return error; |
1735 | } | |
1736 | ||
96355d5a DC |
1737 | /* |
1738 | * We do not hold the inode locked across the entire rolling transaction | |
1739 | * here. We only need to hold it for the first transaction that | |
1740 | * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the | |
1741 | * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode | |
1742 | * here breaks the relationship between cluster buffer invalidation and | |
1743 | * stale inode invalidation on cluster buffer item journal commit | |
1744 | * completion, and can result in leaving dirty stale inodes hanging | |
1745 | * around in memory. | |
1746 | * | |
1747 | * We have no need for serialising this inode operation against other | |
1748 | * operations - we freed the inode and hence reallocation is required | |
1749 | * and that will serialise on reallocating the space the deferops need | |
1750 | * to free. Hence we can unlock the inode on the first commit of | |
1751 | * the transaction rather than roll it right through the deferops. This | |
1752 | * avoids relogging the XFS_ISTALE inode. | |
1753 | * | |
1754 | * We check that xfs_ifree() hasn't grown an internal transaction roll | |
1755 | * by asserting that the inode is still locked when it returns. | |
1756 | */ | |
88877d2b | 1757 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
96355d5a | 1758 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
88877d2b | 1759 | |
0e0417f3 | 1760 | error = xfs_ifree(tp, ip); |
96355d5a | 1761 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
88877d2b BF |
1762 | if (error) { |
1763 | /* | |
1764 | * If we fail to free the inode, shut down. The cancel | |
1765 | * might do that, we need to make sure. Otherwise the | |
1766 | * inode might be lost for a long time or forever. | |
1767 | */ | |
1768 | if (!XFS_FORCED_SHUTDOWN(mp)) { | |
1769 | xfs_notice(mp, "%s: xfs_ifree returned error %d", | |
1770 | __func__, error); | |
1771 | xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); | |
1772 | } | |
4906e215 | 1773 | xfs_trans_cancel(tp); |
88877d2b BF |
1774 | return error; |
1775 | } | |
1776 | ||
1777 | /* | |
1778 | * Credit the quota account(s). The inode is gone. | |
1779 | */ | |
1780 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); | |
1781 | ||
1782 | /* | |
d4a97a04 BF |
1783 | * Just ignore errors at this point. There is nothing we can do except |
1784 | * to try to keep going. Make sure it's not a silent error. | |
88877d2b | 1785 | */ |
70393313 | 1786 | error = xfs_trans_commit(tp); |
88877d2b BF |
1787 | if (error) |
1788 | xfs_notice(mp, "%s: xfs_trans_commit returned error %d", | |
1789 | __func__, error); | |
1790 | ||
88877d2b BF |
1791 | return 0; |
1792 | } | |
1793 | ||
c24b5dfa DC |
1794 | /* |
1795 | * xfs_inactive | |
1796 | * | |
1797 | * This is called when the vnode reference count for the vnode | |
1798 | * goes to zero. If the file has been unlinked, then it must | |
1799 | * now be truncated. Also, we clear all of the read-ahead state | |
1800 | * kept for the inode here since the file is now closed. | |
1801 | */ | |
74564fb4 | 1802 | void |
c24b5dfa DC |
1803 | xfs_inactive( |
1804 | xfs_inode_t *ip) | |
1805 | { | |
3d3c8b52 | 1806 | struct xfs_mount *mp; |
3d3c8b52 JL |
1807 | int error; |
1808 | int truncate = 0; | |
c24b5dfa DC |
1809 | |
1810 | /* | |
1811 | * If the inode is already free, then there can be nothing | |
1812 | * to clean up here. | |
1813 | */ | |
c19b3b05 | 1814 | if (VFS_I(ip)->i_mode == 0) { |
c24b5dfa | 1815 | ASSERT(ip->i_df.if_broot_bytes == 0); |
74564fb4 | 1816 | return; |
c24b5dfa DC |
1817 | } |
1818 | ||
1819 | mp = ip->i_mount; | |
17c12bcd | 1820 | ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); |
c24b5dfa | 1821 | |
c24b5dfa DC |
1822 | /* If this is a read-only mount, don't do this (would generate I/O) */ |
1823 | if (mp->m_flags & XFS_MOUNT_RDONLY) | |
74564fb4 | 1824 | return; |
c24b5dfa | 1825 | |
6231848c | 1826 | /* Try to clean out the cow blocks if there are any. */ |
51d62690 | 1827 | if (xfs_inode_has_cow_data(ip)) |
6231848c DW |
1828 | xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); |
1829 | ||
54d7b5c1 | 1830 | if (VFS_I(ip)->i_nlink != 0) { |
c24b5dfa DC |
1831 | /* |
1832 | * force is true because we are evicting an inode from the | |
1833 | * cache. Post-eof blocks must be freed, lest we end up with | |
1834 | * broken free space accounting. | |
3b4683c2 BF |
1835 | * |
1836 | * Note: don't bother with iolock here since lockdep complains | |
1837 | * about acquiring it in reclaim context. We have the only | |
1838 | * reference to the inode at this point anyways. | |
c24b5dfa | 1839 | */ |
3b4683c2 | 1840 | if (xfs_can_free_eofblocks(ip, true)) |
a36b9261 | 1841 | xfs_free_eofblocks(ip); |
74564fb4 BF |
1842 | |
1843 | return; | |
c24b5dfa DC |
1844 | } |
1845 | ||
c19b3b05 | 1846 | if (S_ISREG(VFS_I(ip)->i_mode) && |
c24b5dfa | 1847 | (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 || |
daf83964 | 1848 | ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0)) |
c24b5dfa DC |
1849 | truncate = 1; |
1850 | ||
c14cfcca | 1851 | error = xfs_qm_dqattach(ip); |
c24b5dfa | 1852 | if (error) |
74564fb4 | 1853 | return; |
c24b5dfa | 1854 | |
c19b3b05 | 1855 | if (S_ISLNK(VFS_I(ip)->i_mode)) |
36b21dde | 1856 | error = xfs_inactive_symlink(ip); |
f7be2d7f BF |
1857 | else if (truncate) |
1858 | error = xfs_inactive_truncate(ip); | |
1859 | if (error) | |
74564fb4 | 1860 | return; |
c24b5dfa DC |
1861 | |
1862 | /* | |
1863 | * If there are attributes associated with the file then blow them away | |
1864 | * now. The code calls a routine that recursively deconstructs the | |
6dfe5a04 | 1865 | * attribute fork. If also blows away the in-core attribute fork. |
c24b5dfa | 1866 | */ |
6dfe5a04 | 1867 | if (XFS_IFORK_Q(ip)) { |
c24b5dfa DC |
1868 | error = xfs_attr_inactive(ip); |
1869 | if (error) | |
74564fb4 | 1870 | return; |
c24b5dfa DC |
1871 | } |
1872 | ||
6dfe5a04 | 1873 | ASSERT(!ip->i_afp); |
6dfe5a04 | 1874 | ASSERT(ip->i_d.di_forkoff == 0); |
c24b5dfa DC |
1875 | |
1876 | /* | |
1877 | * Free the inode. | |
1878 | */ | |
88877d2b BF |
1879 | error = xfs_inactive_ifree(ip); |
1880 | if (error) | |
74564fb4 | 1881 | return; |
c24b5dfa DC |
1882 | |
1883 | /* | |
1884 | * Release the dquots held by inode, if any. | |
1885 | */ | |
1886 | xfs_qm_dqdetach(ip); | |
c24b5dfa DC |
1887 | } |
1888 | ||
9b247179 DW |
1889 | /* |
1890 | * In-Core Unlinked List Lookups | |
1891 | * ============================= | |
1892 | * | |
1893 | * Every inode is supposed to be reachable from some other piece of metadata | |
1894 | * with the exception of the root directory. Inodes with a connection to a | |
1895 | * file descriptor but not linked from anywhere in the on-disk directory tree | |
1896 | * are collectively known as unlinked inodes, though the filesystem itself | |
1897 | * maintains links to these inodes so that on-disk metadata are consistent. | |
1898 | * | |
1899 | * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI | |
1900 | * header contains a number of buckets that point to an inode, and each inode | |
1901 | * record has a pointer to the next inode in the hash chain. This | |
1902 | * singly-linked list causes scaling problems in the iunlink remove function | |
1903 | * because we must walk that list to find the inode that points to the inode | |
1904 | * being removed from the unlinked hash bucket list. | |
1905 | * | |
1906 | * What if we modelled the unlinked list as a collection of records capturing | |
1907 | * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd | |
1908 | * have a fast way to look up unlinked list predecessors, which avoids the | |
1909 | * slow list walk. That's exactly what we do here (in-core) with a per-AG | |
1910 | * rhashtable. | |
1911 | * | |
1912 | * Because this is a backref cache, we ignore operational failures since the | |
1913 | * iunlink code can fall back to the slow bucket walk. The only errors that | |
1914 | * should bubble out are for obviously incorrect situations. | |
1915 | * | |
1916 | * All users of the backref cache MUST hold the AGI buffer lock to serialize | |
1917 | * access or have otherwise provided for concurrency control. | |
1918 | */ | |
1919 | ||
1920 | /* Capture a "X.next_unlinked = Y" relationship. */ | |
1921 | struct xfs_iunlink { | |
1922 | struct rhash_head iu_rhash_head; | |
1923 | xfs_agino_t iu_agino; /* X */ | |
1924 | xfs_agino_t iu_next_unlinked; /* Y */ | |
1925 | }; | |
1926 | ||
1927 | /* Unlinked list predecessor lookup hashtable construction */ | |
1928 | static int | |
1929 | xfs_iunlink_obj_cmpfn( | |
1930 | struct rhashtable_compare_arg *arg, | |
1931 | const void *obj) | |
1932 | { | |
1933 | const xfs_agino_t *key = arg->key; | |
1934 | const struct xfs_iunlink *iu = obj; | |
1935 | ||
1936 | if (iu->iu_next_unlinked != *key) | |
1937 | return 1; | |
1938 | return 0; | |
1939 | } | |
1940 | ||
1941 | static const struct rhashtable_params xfs_iunlink_hash_params = { | |
1942 | .min_size = XFS_AGI_UNLINKED_BUCKETS, | |
1943 | .key_len = sizeof(xfs_agino_t), | |
1944 | .key_offset = offsetof(struct xfs_iunlink, | |
1945 | iu_next_unlinked), | |
1946 | .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head), | |
1947 | .automatic_shrinking = true, | |
1948 | .obj_cmpfn = xfs_iunlink_obj_cmpfn, | |
1949 | }; | |
1950 | ||
1951 | /* | |
1952 | * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such | |
1953 | * relation is found. | |
1954 | */ | |
1955 | static xfs_agino_t | |
1956 | xfs_iunlink_lookup_backref( | |
1957 | struct xfs_perag *pag, | |
1958 | xfs_agino_t agino) | |
1959 | { | |
1960 | struct xfs_iunlink *iu; | |
1961 | ||
1962 | iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, | |
1963 | xfs_iunlink_hash_params); | |
1964 | return iu ? iu->iu_agino : NULLAGINO; | |
1965 | } | |
1966 | ||
1967 | /* | |
1968 | * Take ownership of an iunlink cache entry and insert it into the hash table. | |
1969 | * If successful, the entry will be owned by the cache; if not, it is freed. | |
1970 | * Either way, the caller does not own @iu after this call. | |
1971 | */ | |
1972 | static int | |
1973 | xfs_iunlink_insert_backref( | |
1974 | struct xfs_perag *pag, | |
1975 | struct xfs_iunlink *iu) | |
1976 | { | |
1977 | int error; | |
1978 | ||
1979 | error = rhashtable_insert_fast(&pag->pagi_unlinked_hash, | |
1980 | &iu->iu_rhash_head, xfs_iunlink_hash_params); | |
1981 | /* | |
1982 | * Fail loudly if there already was an entry because that's a sign of | |
1983 | * corruption of in-memory data. Also fail loudly if we see an error | |
1984 | * code we didn't anticipate from the rhashtable code. Currently we | |
1985 | * only anticipate ENOMEM. | |
1986 | */ | |
1987 | if (error) { | |
1988 | WARN(error != -ENOMEM, "iunlink cache insert error %d", error); | |
1989 | kmem_free(iu); | |
1990 | } | |
1991 | /* | |
1992 | * Absorb any runtime errors that aren't a result of corruption because | |
1993 | * this is a cache and we can always fall back to bucket list scanning. | |
1994 | */ | |
1995 | if (error != 0 && error != -EEXIST) | |
1996 | error = 0; | |
1997 | return error; | |
1998 | } | |
1999 | ||
2000 | /* Remember that @prev_agino.next_unlinked = @this_agino. */ | |
2001 | static int | |
2002 | xfs_iunlink_add_backref( | |
2003 | struct xfs_perag *pag, | |
2004 | xfs_agino_t prev_agino, | |
2005 | xfs_agino_t this_agino) | |
2006 | { | |
2007 | struct xfs_iunlink *iu; | |
2008 | ||
2009 | if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) | |
2010 | return 0; | |
2011 | ||
707e0dda | 2012 | iu = kmem_zalloc(sizeof(*iu), KM_NOFS); |
9b247179 DW |
2013 | iu->iu_agino = prev_agino; |
2014 | iu->iu_next_unlinked = this_agino; | |
2015 | ||
2016 | return xfs_iunlink_insert_backref(pag, iu); | |
2017 | } | |
2018 | ||
2019 | /* | |
2020 | * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked. | |
2021 | * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there | |
2022 | * wasn't any such entry then we don't bother. | |
2023 | */ | |
2024 | static int | |
2025 | xfs_iunlink_change_backref( | |
2026 | struct xfs_perag *pag, | |
2027 | xfs_agino_t agino, | |
2028 | xfs_agino_t next_unlinked) | |
2029 | { | |
2030 | struct xfs_iunlink *iu; | |
2031 | int error; | |
2032 | ||
2033 | /* Look up the old entry; if there wasn't one then exit. */ | |
2034 | iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, | |
2035 | xfs_iunlink_hash_params); | |
2036 | if (!iu) | |
2037 | return 0; | |
2038 | ||
2039 | /* | |
2040 | * Remove the entry. This shouldn't ever return an error, but if we | |
2041 | * couldn't remove the old entry we don't want to add it again to the | |
2042 | * hash table, and if the entry disappeared on us then someone's | |
2043 | * violated the locking rules and we need to fail loudly. Either way | |
2044 | * we cannot remove the inode because internal state is or would have | |
2045 | * been corrupt. | |
2046 | */ | |
2047 | error = rhashtable_remove_fast(&pag->pagi_unlinked_hash, | |
2048 | &iu->iu_rhash_head, xfs_iunlink_hash_params); | |
2049 | if (error) | |
2050 | return error; | |
2051 | ||
2052 | /* If there is no new next entry just free our item and return. */ | |
2053 | if (next_unlinked == NULLAGINO) { | |
2054 | kmem_free(iu); | |
2055 | return 0; | |
2056 | } | |
2057 | ||
2058 | /* Update the entry and re-add it to the hash table. */ | |
2059 | iu->iu_next_unlinked = next_unlinked; | |
2060 | return xfs_iunlink_insert_backref(pag, iu); | |
2061 | } | |
2062 | ||
2063 | /* Set up the in-core predecessor structures. */ | |
2064 | int | |
2065 | xfs_iunlink_init( | |
2066 | struct xfs_perag *pag) | |
2067 | { | |
2068 | return rhashtable_init(&pag->pagi_unlinked_hash, | |
2069 | &xfs_iunlink_hash_params); | |
2070 | } | |
2071 | ||
2072 | /* Free the in-core predecessor structures. */ | |
2073 | static void | |
2074 | xfs_iunlink_free_item( | |
2075 | void *ptr, | |
2076 | void *arg) | |
2077 | { | |
2078 | struct xfs_iunlink *iu = ptr; | |
2079 | bool *freed_anything = arg; | |
2080 | ||
2081 | *freed_anything = true; | |
2082 | kmem_free(iu); | |
2083 | } | |
2084 | ||
2085 | void | |
2086 | xfs_iunlink_destroy( | |
2087 | struct xfs_perag *pag) | |
2088 | { | |
2089 | bool freed_anything = false; | |
2090 | ||
2091 | rhashtable_free_and_destroy(&pag->pagi_unlinked_hash, | |
2092 | xfs_iunlink_free_item, &freed_anything); | |
2093 | ||
2094 | ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount)); | |
2095 | } | |
2096 | ||
9a4a5118 DW |
2097 | /* |
2098 | * Point the AGI unlinked bucket at an inode and log the results. The caller | |
2099 | * is responsible for validating the old value. | |
2100 | */ | |
2101 | STATIC int | |
2102 | xfs_iunlink_update_bucket( | |
2103 | struct xfs_trans *tp, | |
2104 | xfs_agnumber_t agno, | |
2105 | struct xfs_buf *agibp, | |
2106 | unsigned int bucket_index, | |
2107 | xfs_agino_t new_agino) | |
2108 | { | |
370c782b | 2109 | struct xfs_agi *agi = agibp->b_addr; |
9a4a5118 DW |
2110 | xfs_agino_t old_value; |
2111 | int offset; | |
2112 | ||
2113 | ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino)); | |
2114 | ||
2115 | old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]); | |
2116 | trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index, | |
2117 | old_value, new_agino); | |
2118 | ||
2119 | /* | |
2120 | * We should never find the head of the list already set to the value | |
2121 | * passed in because either we're adding or removing ourselves from the | |
2122 | * head of the list. | |
2123 | */ | |
a5155b87 | 2124 | if (old_value == new_agino) { |
8d57c216 | 2125 | xfs_buf_mark_corrupt(agibp); |
9a4a5118 | 2126 | return -EFSCORRUPTED; |
a5155b87 | 2127 | } |
9a4a5118 DW |
2128 | |
2129 | agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino); | |
2130 | offset = offsetof(struct xfs_agi, agi_unlinked) + | |
2131 | (sizeof(xfs_agino_t) * bucket_index); | |
2132 | xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1); | |
2133 | return 0; | |
2134 | } | |
2135 | ||
f2fc16a3 DW |
2136 | /* Set an on-disk inode's next_unlinked pointer. */ |
2137 | STATIC void | |
2138 | xfs_iunlink_update_dinode( | |
2139 | struct xfs_trans *tp, | |
2140 | xfs_agnumber_t agno, | |
2141 | xfs_agino_t agino, | |
2142 | struct xfs_buf *ibp, | |
2143 | struct xfs_dinode *dip, | |
2144 | struct xfs_imap *imap, | |
2145 | xfs_agino_t next_agino) | |
2146 | { | |
2147 | struct xfs_mount *mp = tp->t_mountp; | |
2148 | int offset; | |
2149 | ||
2150 | ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino)); | |
2151 | ||
2152 | trace_xfs_iunlink_update_dinode(mp, agno, agino, | |
2153 | be32_to_cpu(dip->di_next_unlinked), next_agino); | |
2154 | ||
2155 | dip->di_next_unlinked = cpu_to_be32(next_agino); | |
2156 | offset = imap->im_boffset + | |
2157 | offsetof(struct xfs_dinode, di_next_unlinked); | |
2158 | ||
2159 | /* need to recalc the inode CRC if appropriate */ | |
2160 | xfs_dinode_calc_crc(mp, dip); | |
2161 | xfs_trans_inode_buf(tp, ibp); | |
2162 | xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1); | |
f2fc16a3 DW |
2163 | } |
2164 | ||
2165 | /* Set an in-core inode's unlinked pointer and return the old value. */ | |
2166 | STATIC int | |
2167 | xfs_iunlink_update_inode( | |
2168 | struct xfs_trans *tp, | |
2169 | struct xfs_inode *ip, | |
2170 | xfs_agnumber_t agno, | |
2171 | xfs_agino_t next_agino, | |
2172 | xfs_agino_t *old_next_agino) | |
2173 | { | |
2174 | struct xfs_mount *mp = tp->t_mountp; | |
2175 | struct xfs_dinode *dip; | |
2176 | struct xfs_buf *ibp; | |
2177 | xfs_agino_t old_value; | |
2178 | int error; | |
2179 | ||
2180 | ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino)); | |
2181 | ||
c1995079 | 2182 | error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0); |
f2fc16a3 DW |
2183 | if (error) |
2184 | return error; | |
2185 | ||
2186 | /* Make sure the old pointer isn't garbage. */ | |
2187 | old_value = be32_to_cpu(dip->di_next_unlinked); | |
2188 | if (!xfs_verify_agino_or_null(mp, agno, old_value)) { | |
a5155b87 DW |
2189 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, |
2190 | sizeof(*dip), __this_address); | |
f2fc16a3 DW |
2191 | error = -EFSCORRUPTED; |
2192 | goto out; | |
2193 | } | |
2194 | ||
2195 | /* | |
2196 | * Since we're updating a linked list, we should never find that the | |
2197 | * current pointer is the same as the new value, unless we're | |
2198 | * terminating the list. | |
2199 | */ | |
2200 | *old_next_agino = old_value; | |
2201 | if (old_value == next_agino) { | |
a5155b87 DW |
2202 | if (next_agino != NULLAGINO) { |
2203 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, | |
2204 | dip, sizeof(*dip), __this_address); | |
f2fc16a3 | 2205 | error = -EFSCORRUPTED; |
a5155b87 | 2206 | } |
f2fc16a3 DW |
2207 | goto out; |
2208 | } | |
2209 | ||
2210 | /* Ok, update the new pointer. */ | |
2211 | xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino), | |
2212 | ibp, dip, &ip->i_imap, next_agino); | |
2213 | return 0; | |
2214 | out: | |
2215 | xfs_trans_brelse(tp, ibp); | |
2216 | return error; | |
2217 | } | |
2218 | ||
1da177e4 | 2219 | /* |
c4a6bf7f DW |
2220 | * This is called when the inode's link count has gone to 0 or we are creating |
2221 | * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0. | |
54d7b5c1 DC |
2222 | * |
2223 | * We place the on-disk inode on a list in the AGI. It will be pulled from this | |
2224 | * list when the inode is freed. | |
1da177e4 | 2225 | */ |
54d7b5c1 | 2226 | STATIC int |
1da177e4 | 2227 | xfs_iunlink( |
5837f625 DW |
2228 | struct xfs_trans *tp, |
2229 | struct xfs_inode *ip) | |
1da177e4 | 2230 | { |
5837f625 DW |
2231 | struct xfs_mount *mp = tp->t_mountp; |
2232 | struct xfs_agi *agi; | |
5837f625 | 2233 | struct xfs_buf *agibp; |
86bfd375 | 2234 | xfs_agino_t next_agino; |
5837f625 DW |
2235 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino); |
2236 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); | |
2237 | short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; | |
5837f625 | 2238 | int error; |
1da177e4 | 2239 | |
c4a6bf7f | 2240 | ASSERT(VFS_I(ip)->i_nlink == 0); |
c19b3b05 | 2241 | ASSERT(VFS_I(ip)->i_mode != 0); |
4664c66c | 2242 | trace_xfs_iunlink(ip); |
1da177e4 | 2243 | |
5837f625 DW |
2244 | /* Get the agi buffer first. It ensures lock ordering on the list. */ |
2245 | error = xfs_read_agi(mp, tp, agno, &agibp); | |
859d7182 | 2246 | if (error) |
1da177e4 | 2247 | return error; |
370c782b | 2248 | agi = agibp->b_addr; |
5e1be0fb | 2249 | |
1da177e4 | 2250 | /* |
86bfd375 DW |
2251 | * Get the index into the agi hash table for the list this inode will |
2252 | * go on. Make sure the pointer isn't garbage and that this inode | |
2253 | * isn't already on the list. | |
1da177e4 | 2254 | */ |
86bfd375 DW |
2255 | next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); |
2256 | if (next_agino == agino || | |
a5155b87 | 2257 | !xfs_verify_agino_or_null(mp, agno, next_agino)) { |
8d57c216 | 2258 | xfs_buf_mark_corrupt(agibp); |
86bfd375 | 2259 | return -EFSCORRUPTED; |
a5155b87 | 2260 | } |
1da177e4 | 2261 | |
86bfd375 | 2262 | if (next_agino != NULLAGINO) { |
9b247179 | 2263 | xfs_agino_t old_agino; |
f2fc16a3 | 2264 | |
1da177e4 | 2265 | /* |
f2fc16a3 DW |
2266 | * There is already another inode in the bucket, so point this |
2267 | * inode to the current head of the list. | |
1da177e4 | 2268 | */ |
f2fc16a3 DW |
2269 | error = xfs_iunlink_update_inode(tp, ip, agno, next_agino, |
2270 | &old_agino); | |
c319b58b VA |
2271 | if (error) |
2272 | return error; | |
f2fc16a3 | 2273 | ASSERT(old_agino == NULLAGINO); |
9b247179 DW |
2274 | |
2275 | /* | |
2276 | * agino has been unlinked, add a backref from the next inode | |
2277 | * back to agino. | |
2278 | */ | |
92a00544 | 2279 | error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino); |
9b247179 DW |
2280 | if (error) |
2281 | return error; | |
1da177e4 LT |
2282 | } |
2283 | ||
9a4a5118 DW |
2284 | /* Point the head of the list to point to this inode. */ |
2285 | return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino); | |
1da177e4 LT |
2286 | } |
2287 | ||
23ffa52c DW |
2288 | /* Return the imap, dinode pointer, and buffer for an inode. */ |
2289 | STATIC int | |
2290 | xfs_iunlink_map_ino( | |
2291 | struct xfs_trans *tp, | |
2292 | xfs_agnumber_t agno, | |
2293 | xfs_agino_t agino, | |
2294 | struct xfs_imap *imap, | |
2295 | struct xfs_dinode **dipp, | |
2296 | struct xfs_buf **bpp) | |
2297 | { | |
2298 | struct xfs_mount *mp = tp->t_mountp; | |
2299 | int error; | |
2300 | ||
2301 | imap->im_blkno = 0; | |
2302 | error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0); | |
2303 | if (error) { | |
2304 | xfs_warn(mp, "%s: xfs_imap returned error %d.", | |
2305 | __func__, error); | |
2306 | return error; | |
2307 | } | |
2308 | ||
c1995079 | 2309 | error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0); |
23ffa52c DW |
2310 | if (error) { |
2311 | xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", | |
2312 | __func__, error); | |
2313 | return error; | |
2314 | } | |
2315 | ||
2316 | return 0; | |
2317 | } | |
2318 | ||
2319 | /* | |
2320 | * Walk the unlinked chain from @head_agino until we find the inode that | |
2321 | * points to @target_agino. Return the inode number, map, dinode pointer, | |
2322 | * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp. | |
2323 | * | |
2324 | * @tp, @pag, @head_agino, and @target_agino are input parameters. | |
2325 | * @agino, @imap, @dipp, and @bpp are all output parameters. | |
2326 | * | |
2327 | * Do not call this function if @target_agino is the head of the list. | |
2328 | */ | |
2329 | STATIC int | |
2330 | xfs_iunlink_map_prev( | |
2331 | struct xfs_trans *tp, | |
2332 | xfs_agnumber_t agno, | |
2333 | xfs_agino_t head_agino, | |
2334 | xfs_agino_t target_agino, | |
2335 | xfs_agino_t *agino, | |
2336 | struct xfs_imap *imap, | |
2337 | struct xfs_dinode **dipp, | |
9b247179 DW |
2338 | struct xfs_buf **bpp, |
2339 | struct xfs_perag *pag) | |
23ffa52c DW |
2340 | { |
2341 | struct xfs_mount *mp = tp->t_mountp; | |
2342 | xfs_agino_t next_agino; | |
2343 | int error; | |
2344 | ||
2345 | ASSERT(head_agino != target_agino); | |
2346 | *bpp = NULL; | |
2347 | ||
9b247179 DW |
2348 | /* See if our backref cache can find it faster. */ |
2349 | *agino = xfs_iunlink_lookup_backref(pag, target_agino); | |
2350 | if (*agino != NULLAGINO) { | |
2351 | error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp); | |
2352 | if (error) | |
2353 | return error; | |
2354 | ||
2355 | if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino) | |
2356 | return 0; | |
2357 | ||
2358 | /* | |
2359 | * If we get here the cache contents were corrupt, so drop the | |
2360 | * buffer and fall back to walking the bucket list. | |
2361 | */ | |
2362 | xfs_trans_brelse(tp, *bpp); | |
2363 | *bpp = NULL; | |
2364 | WARN_ON_ONCE(1); | |
2365 | } | |
2366 | ||
2367 | trace_xfs_iunlink_map_prev_fallback(mp, agno); | |
2368 | ||
2369 | /* Otherwise, walk the entire bucket until we find it. */ | |
23ffa52c DW |
2370 | next_agino = head_agino; |
2371 | while (next_agino != target_agino) { | |
2372 | xfs_agino_t unlinked_agino; | |
2373 | ||
2374 | if (*bpp) | |
2375 | xfs_trans_brelse(tp, *bpp); | |
2376 | ||
2377 | *agino = next_agino; | |
2378 | error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp, | |
2379 | bpp); | |
2380 | if (error) | |
2381 | return error; | |
2382 | ||
2383 | unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked); | |
2384 | /* | |
2385 | * Make sure this pointer is valid and isn't an obvious | |
2386 | * infinite loop. | |
2387 | */ | |
2388 | if (!xfs_verify_agino(mp, agno, unlinked_agino) || | |
2389 | next_agino == unlinked_agino) { | |
2390 | XFS_CORRUPTION_ERROR(__func__, | |
2391 | XFS_ERRLEVEL_LOW, mp, | |
2392 | *dipp, sizeof(**dipp)); | |
2393 | error = -EFSCORRUPTED; | |
2394 | return error; | |
2395 | } | |
2396 | next_agino = unlinked_agino; | |
2397 | } | |
2398 | ||
2399 | return 0; | |
2400 | } | |
2401 | ||
1da177e4 LT |
2402 | /* |
2403 | * Pull the on-disk inode from the AGI unlinked list. | |
2404 | */ | |
2405 | STATIC int | |
2406 | xfs_iunlink_remove( | |
5837f625 DW |
2407 | struct xfs_trans *tp, |
2408 | struct xfs_inode *ip) | |
1da177e4 | 2409 | { |
5837f625 DW |
2410 | struct xfs_mount *mp = tp->t_mountp; |
2411 | struct xfs_agi *agi; | |
5837f625 | 2412 | struct xfs_buf *agibp; |
5837f625 DW |
2413 | struct xfs_buf *last_ibp; |
2414 | struct xfs_dinode *last_dip = NULL; | |
5837f625 DW |
2415 | xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino); |
2416 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); | |
2417 | xfs_agino_t next_agino; | |
b1d2a068 | 2418 | xfs_agino_t head_agino; |
5837f625 | 2419 | short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; |
5837f625 | 2420 | int error; |
1da177e4 | 2421 | |
4664c66c DW |
2422 | trace_xfs_iunlink_remove(ip); |
2423 | ||
5837f625 | 2424 | /* Get the agi buffer first. It ensures lock ordering on the list. */ |
5e1be0fb CH |
2425 | error = xfs_read_agi(mp, tp, agno, &agibp); |
2426 | if (error) | |
1da177e4 | 2427 | return error; |
370c782b | 2428 | agi = agibp->b_addr; |
5e1be0fb | 2429 | |
1da177e4 | 2430 | /* |
86bfd375 DW |
2431 | * Get the index into the agi hash table for the list this inode will |
2432 | * go on. Make sure the head pointer isn't garbage. | |
1da177e4 | 2433 | */ |
b1d2a068 DW |
2434 | head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); |
2435 | if (!xfs_verify_agino(mp, agno, head_agino)) { | |
d2e73665 DW |
2436 | XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, |
2437 | agi, sizeof(*agi)); | |
2438 | return -EFSCORRUPTED; | |
2439 | } | |
1da177e4 | 2440 | |
b1d2a068 DW |
2441 | /* |
2442 | * Set our inode's next_unlinked pointer to NULL and then return | |
2443 | * the old pointer value so that we can update whatever was previous | |
2444 | * to us in the list to point to whatever was next in the list. | |
2445 | */ | |
2446 | error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino); | |
2447 | if (error) | |
2448 | return error; | |
9a4a5118 | 2449 | |
9b247179 DW |
2450 | /* |
2451 | * If there was a backref pointing from the next inode back to this | |
2452 | * one, remove it because we've removed this inode from the list. | |
2453 | * | |
2454 | * Later, if this inode was in the middle of the list we'll update | |
2455 | * this inode's backref to point from the next inode. | |
2456 | */ | |
2457 | if (next_agino != NULLAGINO) { | |
92a00544 | 2458 | error = xfs_iunlink_change_backref(agibp->b_pag, next_agino, |
9b247179 DW |
2459 | NULLAGINO); |
2460 | if (error) | |
92a00544 | 2461 | return error; |
9b247179 DW |
2462 | } |
2463 | ||
92a00544 | 2464 | if (head_agino != agino) { |
f2fc16a3 DW |
2465 | struct xfs_imap imap; |
2466 | xfs_agino_t prev_agino; | |
2467 | ||
23ffa52c | 2468 | /* We need to search the list for the inode being freed. */ |
b1d2a068 | 2469 | error = xfs_iunlink_map_prev(tp, agno, head_agino, agino, |
9b247179 | 2470 | &prev_agino, &imap, &last_dip, &last_ibp, |
92a00544 | 2471 | agibp->b_pag); |
23ffa52c | 2472 | if (error) |
92a00544 | 2473 | return error; |
475ee413 | 2474 | |
f2fc16a3 DW |
2475 | /* Point the previous inode on the list to the next inode. */ |
2476 | xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp, | |
2477 | last_dip, &imap, next_agino); | |
9b247179 DW |
2478 | |
2479 | /* | |
2480 | * Now we deal with the backref for this inode. If this inode | |
2481 | * pointed at a real inode, change the backref that pointed to | |
2482 | * us to point to our old next. If this inode was the end of | |
2483 | * the list, delete the backref that pointed to us. Note that | |
2484 | * change_backref takes care of deleting the backref if | |
2485 | * next_agino is NULLAGINO. | |
2486 | */ | |
92a00544 GX |
2487 | return xfs_iunlink_change_backref(agibp->b_pag, agino, |
2488 | next_agino); | |
1da177e4 | 2489 | } |
9b247179 | 2490 | |
92a00544 GX |
2491 | /* Point the head of the list to the next unlinked inode. */ |
2492 | return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, | |
2493 | next_agino); | |
1da177e4 LT |
2494 | } |
2495 | ||
5806165a | 2496 | /* |
71e3e356 DC |
2497 | * Look up the inode number specified and if it is not already marked XFS_ISTALE |
2498 | * mark it stale. We should only find clean inodes in this lookup that aren't | |
2499 | * already stale. | |
5806165a | 2500 | */ |
71e3e356 DC |
2501 | static void |
2502 | xfs_ifree_mark_inode_stale( | |
2503 | struct xfs_buf *bp, | |
5806165a | 2504 | struct xfs_inode *free_ip, |
d9fdd0ad | 2505 | xfs_ino_t inum) |
5806165a | 2506 | { |
71e3e356 DC |
2507 | struct xfs_mount *mp = bp->b_mount; |
2508 | struct xfs_perag *pag = bp->b_pag; | |
2509 | struct xfs_inode_log_item *iip; | |
5806165a DC |
2510 | struct xfs_inode *ip; |
2511 | ||
2512 | retry: | |
2513 | rcu_read_lock(); | |
2514 | ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum)); | |
2515 | ||
2516 | /* Inode not in memory, nothing to do */ | |
71e3e356 DC |
2517 | if (!ip) { |
2518 | rcu_read_unlock(); | |
2519 | return; | |
2520 | } | |
5806165a DC |
2521 | |
2522 | /* | |
2523 | * because this is an RCU protected lookup, we could find a recently | |
2524 | * freed or even reallocated inode during the lookup. We need to check | |
2525 | * under the i_flags_lock for a valid inode here. Skip it if it is not | |
2526 | * valid, the wrong inode or stale. | |
2527 | */ | |
2528 | spin_lock(&ip->i_flags_lock); | |
718ecc50 DC |
2529 | if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) |
2530 | goto out_iflags_unlock; | |
5806165a DC |
2531 | |
2532 | /* | |
2533 | * Don't try to lock/unlock the current inode, but we _cannot_ skip the | |
2534 | * other inodes that we did not find in the list attached to the buffer | |
2535 | * and are not already marked stale. If we can't lock it, back off and | |
2536 | * retry. | |
2537 | */ | |
2538 | if (ip != free_ip) { | |
2539 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | |
71e3e356 | 2540 | spin_unlock(&ip->i_flags_lock); |
5806165a DC |
2541 | rcu_read_unlock(); |
2542 | delay(1); | |
2543 | goto retry; | |
2544 | } | |
5806165a | 2545 | } |
71e3e356 | 2546 | ip->i_flags |= XFS_ISTALE; |
5806165a | 2547 | |
71e3e356 | 2548 | /* |
718ecc50 | 2549 | * If the inode is flushing, it is already attached to the buffer. All |
71e3e356 DC |
2550 | * we needed to do here is mark the inode stale so buffer IO completion |
2551 | * will remove it from the AIL. | |
2552 | */ | |
2553 | iip = ip->i_itemp; | |
718ecc50 | 2554 | if (__xfs_iflags_test(ip, XFS_IFLUSHING)) { |
71e3e356 DC |
2555 | ASSERT(!list_empty(&iip->ili_item.li_bio_list)); |
2556 | ASSERT(iip->ili_last_fields); | |
2557 | goto out_iunlock; | |
2558 | } | |
5806165a DC |
2559 | |
2560 | /* | |
48d55e2a DC |
2561 | * Inodes not attached to the buffer can be released immediately. |
2562 | * Everything else has to go through xfs_iflush_abort() on journal | |
2563 | * commit as the flock synchronises removal of the inode from the | |
2564 | * cluster buffer against inode reclaim. | |
5806165a | 2565 | */ |
718ecc50 | 2566 | if (!iip || list_empty(&iip->ili_item.li_bio_list)) |
71e3e356 | 2567 | goto out_iunlock; |
718ecc50 DC |
2568 | |
2569 | __xfs_iflags_set(ip, XFS_IFLUSHING); | |
2570 | spin_unlock(&ip->i_flags_lock); | |
2571 | rcu_read_unlock(); | |
5806165a | 2572 | |
71e3e356 | 2573 | /* we have a dirty inode in memory that has not yet been flushed. */ |
71e3e356 DC |
2574 | spin_lock(&iip->ili_lock); |
2575 | iip->ili_last_fields = iip->ili_fields; | |
2576 | iip->ili_fields = 0; | |
2577 | iip->ili_fsync_fields = 0; | |
2578 | spin_unlock(&iip->ili_lock); | |
71e3e356 DC |
2579 | ASSERT(iip->ili_last_fields); |
2580 | ||
718ecc50 DC |
2581 | if (ip != free_ip) |
2582 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
2583 | return; | |
2584 | ||
71e3e356 DC |
2585 | out_iunlock: |
2586 | if (ip != free_ip) | |
2587 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
718ecc50 DC |
2588 | out_iflags_unlock: |
2589 | spin_unlock(&ip->i_flags_lock); | |
2590 | rcu_read_unlock(); | |
5806165a DC |
2591 | } |
2592 | ||
5b3eed75 | 2593 | /* |
0b8182db | 2594 | * A big issue when freeing the inode cluster is that we _cannot_ skip any |
5b3eed75 DC |
2595 | * inodes that are in memory - they all must be marked stale and attached to |
2596 | * the cluster buffer. | |
2597 | */ | |
2a30f36d | 2598 | STATIC int |
1da177e4 | 2599 | xfs_ifree_cluster( |
71e3e356 DC |
2600 | struct xfs_inode *free_ip, |
2601 | struct xfs_trans *tp, | |
09b56604 | 2602 | struct xfs_icluster *xic) |
1da177e4 | 2603 | { |
71e3e356 DC |
2604 | struct xfs_mount *mp = free_ip->i_mount; |
2605 | struct xfs_ino_geometry *igeo = M_IGEO(mp); | |
2606 | struct xfs_buf *bp; | |
2607 | xfs_daddr_t blkno; | |
2608 | xfs_ino_t inum = xic->first_ino; | |
1da177e4 | 2609 | int nbufs; |
5b257b4a | 2610 | int i, j; |
3cdaa189 | 2611 | int ioffset; |
ce92464c | 2612 | int error; |
1da177e4 | 2613 | |
ef325959 | 2614 | nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; |
1da177e4 | 2615 | |
ef325959 | 2616 | for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { |
09b56604 BF |
2617 | /* |
2618 | * The allocation bitmap tells us which inodes of the chunk were | |
2619 | * physically allocated. Skip the cluster if an inode falls into | |
2620 | * a sparse region. | |
2621 | */ | |
3cdaa189 BF |
2622 | ioffset = inum - xic->first_ino; |
2623 | if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { | |
ef325959 | 2624 | ASSERT(ioffset % igeo->inodes_per_cluster == 0); |
09b56604 BF |
2625 | continue; |
2626 | } | |
2627 | ||
1da177e4 LT |
2628 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), |
2629 | XFS_INO_TO_AGBNO(mp, inum)); | |
2630 | ||
5b257b4a DC |
2631 | /* |
2632 | * We obtain and lock the backing buffer first in the process | |
718ecc50 DC |
2633 | * here to ensure dirty inodes attached to the buffer remain in |
2634 | * the flushing state while we mark them stale. | |
2635 | * | |
5b257b4a DC |
2636 | * If we scan the in-memory inodes first, then buffer IO can |
2637 | * complete before we get a lock on it, and hence we may fail | |
2638 | * to mark all the active inodes on the buffer stale. | |
2639 | */ | |
ce92464c DW |
2640 | error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, |
2641 | mp->m_bsize * igeo->blocks_per_cluster, | |
2642 | XBF_UNMAPPED, &bp); | |
71e3e356 | 2643 | if (error) |
ce92464c | 2644 | return error; |
b0f539de DC |
2645 | |
2646 | /* | |
2647 | * This buffer may not have been correctly initialised as we | |
2648 | * didn't read it from disk. That's not important because we are | |
2649 | * only using to mark the buffer as stale in the log, and to | |
2650 | * attach stale cached inodes on it. That means it will never be | |
2651 | * dispatched for IO. If it is, we want to know about it, and we | |
2652 | * want it to fail. We can acheive this by adding a write | |
2653 | * verifier to the buffer. | |
2654 | */ | |
8c4ce794 | 2655 | bp->b_ops = &xfs_inode_buf_ops; |
b0f539de | 2656 | |
5b257b4a | 2657 | /* |
71e3e356 DC |
2658 | * Now we need to set all the cached clean inodes as XFS_ISTALE, |
2659 | * too. This requires lookups, and will skip inodes that we've | |
2660 | * already marked XFS_ISTALE. | |
1da177e4 | 2661 | */ |
71e3e356 DC |
2662 | for (i = 0; i < igeo->inodes_per_cluster; i++) |
2663 | xfs_ifree_mark_inode_stale(bp, free_ip, inum + i); | |
1da177e4 | 2664 | |
5b3eed75 | 2665 | xfs_trans_stale_inode_buf(tp, bp); |
1da177e4 LT |
2666 | xfs_trans_binval(tp, bp); |
2667 | } | |
2a30f36d | 2668 | return 0; |
1da177e4 LT |
2669 | } |
2670 | ||
2671 | /* | |
2672 | * This is called to return an inode to the inode free list. | |
2673 | * The inode should already be truncated to 0 length and have | |
2674 | * no pages associated with it. This routine also assumes that | |
2675 | * the inode is already a part of the transaction. | |
2676 | * | |
2677 | * The on-disk copy of the inode will have been added to the list | |
2678 | * of unlinked inodes in the AGI. We need to remove the inode from | |
2679 | * that list atomically with respect to freeing it here. | |
2680 | */ | |
2681 | int | |
2682 | xfs_ifree( | |
0e0417f3 BF |
2683 | struct xfs_trans *tp, |
2684 | struct xfs_inode *ip) | |
1da177e4 LT |
2685 | { |
2686 | int error; | |
09b56604 | 2687 | struct xfs_icluster xic = { 0 }; |
1319ebef | 2688 | struct xfs_inode_log_item *iip = ip->i_itemp; |
1da177e4 | 2689 | |
579aa9ca | 2690 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
54d7b5c1 | 2691 | ASSERT(VFS_I(ip)->i_nlink == 0); |
daf83964 | 2692 | ASSERT(ip->i_df.if_nextents == 0); |
c19b3b05 | 2693 | ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); |
1da177e4 LT |
2694 | ASSERT(ip->i_d.di_nblocks == 0); |
2695 | ||
2696 | /* | |
2697 | * Pull the on-disk inode from the AGI unlinked list. | |
2698 | */ | |
2699 | error = xfs_iunlink_remove(tp, ip); | |
1baaed8f | 2700 | if (error) |
1da177e4 | 2701 | return error; |
1da177e4 | 2702 | |
0e0417f3 | 2703 | error = xfs_difree(tp, ip->i_ino, &xic); |
1baaed8f | 2704 | if (error) |
1da177e4 | 2705 | return error; |
1baaed8f | 2706 | |
b2c20045 CH |
2707 | /* |
2708 | * Free any local-format data sitting around before we reset the | |
2709 | * data fork to extents format. Note that the attr fork data has | |
2710 | * already been freed by xfs_attr_inactive. | |
2711 | */ | |
f7e67b20 | 2712 | if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) { |
b2c20045 CH |
2713 | kmem_free(ip->i_df.if_u1.if_data); |
2714 | ip->i_df.if_u1.if_data = NULL; | |
2715 | ip->i_df.if_bytes = 0; | |
2716 | } | |
98c4f78d | 2717 | |
c19b3b05 | 2718 | VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ |
1da177e4 | 2719 | ip->i_d.di_flags = 0; |
f93e5436 | 2720 | ip->i_d.di_flags2 = ip->i_mount->m_ino_geo.new_diflags2; |
1da177e4 LT |
2721 | ip->i_d.di_dmevmask = 0; |
2722 | ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */ | |
f7e67b20 | 2723 | ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; |
dc1baa71 ES |
2724 | |
2725 | /* Don't attempt to replay owner changes for a deleted inode */ | |
1319ebef DC |
2726 | spin_lock(&iip->ili_lock); |
2727 | iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER); | |
2728 | spin_unlock(&iip->ili_lock); | |
dc1baa71 | 2729 | |
1da177e4 LT |
2730 | /* |
2731 | * Bump the generation count so no one will be confused | |
2732 | * by reincarnations of this inode. | |
2733 | */ | |
9e9a2674 | 2734 | VFS_I(ip)->i_generation++; |
1da177e4 LT |
2735 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2736 | ||
09b56604 BF |
2737 | if (xic.deleted) |
2738 | error = xfs_ifree_cluster(ip, tp, &xic); | |
1da177e4 | 2739 | |
2a30f36d | 2740 | return error; |
1da177e4 LT |
2741 | } |
2742 | ||
1da177e4 | 2743 | /* |
60ec6783 CH |
2744 | * This is called to unpin an inode. The caller must have the inode locked |
2745 | * in at least shared mode so that the buffer cannot be subsequently pinned | |
2746 | * once someone is waiting for it to be unpinned. | |
1da177e4 | 2747 | */ |
60ec6783 | 2748 | static void |
f392e631 | 2749 | xfs_iunpin( |
60ec6783 | 2750 | struct xfs_inode *ip) |
1da177e4 | 2751 | { |
579aa9ca | 2752 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
1da177e4 | 2753 | |
4aaf15d1 DC |
2754 | trace_xfs_inode_unpin_nowait(ip, _RET_IP_); |
2755 | ||
a3f74ffb | 2756 | /* Give the log a push to start the unpinning I/O */ |
656de4ff | 2757 | xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL); |
a14a348b | 2758 | |
a3f74ffb | 2759 | } |
1da177e4 | 2760 | |
f392e631 CH |
2761 | static void |
2762 | __xfs_iunpin_wait( | |
2763 | struct xfs_inode *ip) | |
2764 | { | |
2765 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); | |
2766 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); | |
2767 | ||
2768 | xfs_iunpin(ip); | |
2769 | ||
2770 | do { | |
21417136 | 2771 | prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); |
f392e631 CH |
2772 | if (xfs_ipincount(ip)) |
2773 | io_schedule(); | |
2774 | } while (xfs_ipincount(ip)); | |
21417136 | 2775 | finish_wait(wq, &wait.wq_entry); |
f392e631 CH |
2776 | } |
2777 | ||
777df5af | 2778 | void |
a3f74ffb | 2779 | xfs_iunpin_wait( |
60ec6783 | 2780 | struct xfs_inode *ip) |
a3f74ffb | 2781 | { |
f392e631 CH |
2782 | if (xfs_ipincount(ip)) |
2783 | __xfs_iunpin_wait(ip); | |
1da177e4 LT |
2784 | } |
2785 | ||
27320369 DC |
2786 | /* |
2787 | * Removing an inode from the namespace involves removing the directory entry | |
2788 | * and dropping the link count on the inode. Removing the directory entry can | |
2789 | * result in locking an AGF (directory blocks were freed) and removing a link | |
2790 | * count can result in placing the inode on an unlinked list which results in | |
2791 | * locking an AGI. | |
2792 | * | |
2793 | * The big problem here is that we have an ordering constraint on AGF and AGI | |
2794 | * locking - inode allocation locks the AGI, then can allocate a new extent for | |
2795 | * new inodes, locking the AGF after the AGI. Similarly, freeing the inode | |
2796 | * removes the inode from the unlinked list, requiring that we lock the AGI | |
2797 | * first, and then freeing the inode can result in an inode chunk being freed | |
2798 | * and hence freeing disk space requiring that we lock an AGF. | |
2799 | * | |
2800 | * Hence the ordering that is imposed by other parts of the code is AGI before | |
2801 | * AGF. This means we cannot remove the directory entry before we drop the inode | |
2802 | * reference count and put it on the unlinked list as this results in a lock | |
2803 | * order of AGF then AGI, and this can deadlock against inode allocation and | |
2804 | * freeing. Therefore we must drop the link counts before we remove the | |
2805 | * directory entry. | |
2806 | * | |
2807 | * This is still safe from a transactional point of view - it is not until we | |
310a75a3 | 2808 | * get to xfs_defer_finish() that we have the possibility of multiple |
27320369 DC |
2809 | * transactions in this operation. Hence as long as we remove the directory |
2810 | * entry and drop the link count in the first transaction of the remove | |
2811 | * operation, there are no transactional constraints on the ordering here. | |
2812 | */ | |
c24b5dfa DC |
2813 | int |
2814 | xfs_remove( | |
2815 | xfs_inode_t *dp, | |
2816 | struct xfs_name *name, | |
2817 | xfs_inode_t *ip) | |
2818 | { | |
2819 | xfs_mount_t *mp = dp->i_mount; | |
2820 | xfs_trans_t *tp = NULL; | |
c19b3b05 | 2821 | int is_dir = S_ISDIR(VFS_I(ip)->i_mode); |
c24b5dfa | 2822 | int error = 0; |
c24b5dfa | 2823 | uint resblks; |
c24b5dfa DC |
2824 | |
2825 | trace_xfs_remove(dp, name); | |
2826 | ||
2827 | if (XFS_FORCED_SHUTDOWN(mp)) | |
2451337d | 2828 | return -EIO; |
c24b5dfa | 2829 | |
c14cfcca | 2830 | error = xfs_qm_dqattach(dp); |
c24b5dfa DC |
2831 | if (error) |
2832 | goto std_return; | |
2833 | ||
c14cfcca | 2834 | error = xfs_qm_dqattach(ip); |
c24b5dfa DC |
2835 | if (error) |
2836 | goto std_return; | |
2837 | ||
c24b5dfa DC |
2838 | /* |
2839 | * We try to get the real space reservation first, | |
2840 | * allowing for directory btree deletion(s) implying | |
2841 | * possible bmap insert(s). If we can't get the space | |
2842 | * reservation then we use 0 instead, and avoid the bmap | |
2843 | * btree insert(s) in the directory code by, if the bmap | |
2844 | * insert tries to happen, instead trimming the LAST | |
2845 | * block from the directory. | |
2846 | */ | |
2847 | resblks = XFS_REMOVE_SPACE_RES(mp); | |
253f4911 | 2848 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp); |
2451337d | 2849 | if (error == -ENOSPC) { |
c24b5dfa | 2850 | resblks = 0; |
253f4911 CH |
2851 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0, |
2852 | &tp); | |
c24b5dfa DC |
2853 | } |
2854 | if (error) { | |
2451337d | 2855 | ASSERT(error != -ENOSPC); |
253f4911 | 2856 | goto std_return; |
c24b5dfa DC |
2857 | } |
2858 | ||
7c2d238a | 2859 | xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); |
c24b5dfa | 2860 | |
65523218 | 2861 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
2862 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
2863 | ||
2864 | /* | |
2865 | * If we're removing a directory perform some additional validation. | |
2866 | */ | |
2867 | if (is_dir) { | |
54d7b5c1 DC |
2868 | ASSERT(VFS_I(ip)->i_nlink >= 2); |
2869 | if (VFS_I(ip)->i_nlink != 2) { | |
2451337d | 2870 | error = -ENOTEMPTY; |
c24b5dfa DC |
2871 | goto out_trans_cancel; |
2872 | } | |
2873 | if (!xfs_dir_isempty(ip)) { | |
2451337d | 2874 | error = -ENOTEMPTY; |
c24b5dfa DC |
2875 | goto out_trans_cancel; |
2876 | } | |
c24b5dfa | 2877 | |
27320369 | 2878 | /* Drop the link from ip's "..". */ |
c24b5dfa DC |
2879 | error = xfs_droplink(tp, dp); |
2880 | if (error) | |
27320369 | 2881 | goto out_trans_cancel; |
c24b5dfa | 2882 | |
27320369 | 2883 | /* Drop the "." link from ip to self. */ |
c24b5dfa DC |
2884 | error = xfs_droplink(tp, ip); |
2885 | if (error) | |
27320369 | 2886 | goto out_trans_cancel; |
c24b5dfa DC |
2887 | } else { |
2888 | /* | |
2889 | * When removing a non-directory we need to log the parent | |
2890 | * inode here. For a directory this is done implicitly | |
2891 | * by the xfs_droplink call for the ".." entry. | |
2892 | */ | |
2893 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | |
2894 | } | |
27320369 | 2895 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
c24b5dfa | 2896 | |
27320369 | 2897 | /* Drop the link from dp to ip. */ |
c24b5dfa DC |
2898 | error = xfs_droplink(tp, ip); |
2899 | if (error) | |
27320369 | 2900 | goto out_trans_cancel; |
c24b5dfa | 2901 | |
381eee69 | 2902 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); |
27320369 | 2903 | if (error) { |
2451337d | 2904 | ASSERT(error != -ENOENT); |
c8eac49e | 2905 | goto out_trans_cancel; |
27320369 DC |
2906 | } |
2907 | ||
c24b5dfa DC |
2908 | /* |
2909 | * If this is a synchronous mount, make sure that the | |
2910 | * remove transaction goes to disk before returning to | |
2911 | * the user. | |
2912 | */ | |
2913 | if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) | |
2914 | xfs_trans_set_sync(tp); | |
2915 | ||
70393313 | 2916 | error = xfs_trans_commit(tp); |
c24b5dfa DC |
2917 | if (error) |
2918 | goto std_return; | |
2919 | ||
2cd2ef6a | 2920 | if (is_dir && xfs_inode_is_filestream(ip)) |
c24b5dfa DC |
2921 | xfs_filestream_deassociate(ip); |
2922 | ||
2923 | return 0; | |
2924 | ||
c24b5dfa | 2925 | out_trans_cancel: |
4906e215 | 2926 | xfs_trans_cancel(tp); |
c24b5dfa DC |
2927 | std_return: |
2928 | return error; | |
2929 | } | |
2930 | ||
f6bba201 DC |
2931 | /* |
2932 | * Enter all inodes for a rename transaction into a sorted array. | |
2933 | */ | |
95afcf5c | 2934 | #define __XFS_SORT_INODES 5 |
f6bba201 DC |
2935 | STATIC void |
2936 | xfs_sort_for_rename( | |
95afcf5c DC |
2937 | struct xfs_inode *dp1, /* in: old (source) directory inode */ |
2938 | struct xfs_inode *dp2, /* in: new (target) directory inode */ | |
2939 | struct xfs_inode *ip1, /* in: inode of old entry */ | |
2940 | struct xfs_inode *ip2, /* in: inode of new entry */ | |
2941 | struct xfs_inode *wip, /* in: whiteout inode */ | |
2942 | struct xfs_inode **i_tab,/* out: sorted array of inodes */ | |
2943 | int *num_inodes) /* in/out: inodes in array */ | |
f6bba201 | 2944 | { |
f6bba201 DC |
2945 | int i, j; |
2946 | ||
95afcf5c DC |
2947 | ASSERT(*num_inodes == __XFS_SORT_INODES); |
2948 | memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); | |
2949 | ||
f6bba201 DC |
2950 | /* |
2951 | * i_tab contains a list of pointers to inodes. We initialize | |
2952 | * the table here & we'll sort it. We will then use it to | |
2953 | * order the acquisition of the inode locks. | |
2954 | * | |
2955 | * Note that the table may contain duplicates. e.g., dp1 == dp2. | |
2956 | */ | |
95afcf5c DC |
2957 | i = 0; |
2958 | i_tab[i++] = dp1; | |
2959 | i_tab[i++] = dp2; | |
2960 | i_tab[i++] = ip1; | |
2961 | if (ip2) | |
2962 | i_tab[i++] = ip2; | |
2963 | if (wip) | |
2964 | i_tab[i++] = wip; | |
2965 | *num_inodes = i; | |
f6bba201 DC |
2966 | |
2967 | /* | |
2968 | * Sort the elements via bubble sort. (Remember, there are at | |
95afcf5c | 2969 | * most 5 elements to sort, so this is adequate.) |
f6bba201 DC |
2970 | */ |
2971 | for (i = 0; i < *num_inodes; i++) { | |
2972 | for (j = 1; j < *num_inodes; j++) { | |
2973 | if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { | |
95afcf5c | 2974 | struct xfs_inode *temp = i_tab[j]; |
f6bba201 DC |
2975 | i_tab[j] = i_tab[j-1]; |
2976 | i_tab[j-1] = temp; | |
2977 | } | |
2978 | } | |
2979 | } | |
2980 | } | |
2981 | ||
310606b0 DC |
2982 | static int |
2983 | xfs_finish_rename( | |
c9cfdb38 | 2984 | struct xfs_trans *tp) |
310606b0 | 2985 | { |
310606b0 DC |
2986 | /* |
2987 | * If this is a synchronous mount, make sure that the rename transaction | |
2988 | * goes to disk before returning to the user. | |
2989 | */ | |
2990 | if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) | |
2991 | xfs_trans_set_sync(tp); | |
2992 | ||
70393313 | 2993 | return xfs_trans_commit(tp); |
310606b0 DC |
2994 | } |
2995 | ||
d31a1825 CM |
2996 | /* |
2997 | * xfs_cross_rename() | |
2998 | * | |
2999 | * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall | |
3000 | */ | |
3001 | STATIC int | |
3002 | xfs_cross_rename( | |
3003 | struct xfs_trans *tp, | |
3004 | struct xfs_inode *dp1, | |
3005 | struct xfs_name *name1, | |
3006 | struct xfs_inode *ip1, | |
3007 | struct xfs_inode *dp2, | |
3008 | struct xfs_name *name2, | |
3009 | struct xfs_inode *ip2, | |
d31a1825 CM |
3010 | int spaceres) |
3011 | { | |
3012 | int error = 0; | |
3013 | int ip1_flags = 0; | |
3014 | int ip2_flags = 0; | |
3015 | int dp2_flags = 0; | |
3016 | ||
3017 | /* Swap inode number for dirent in first parent */ | |
381eee69 | 3018 | error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); |
d31a1825 | 3019 | if (error) |
eeacd321 | 3020 | goto out_trans_abort; |
d31a1825 CM |
3021 | |
3022 | /* Swap inode number for dirent in second parent */ | |
381eee69 | 3023 | error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); |
d31a1825 | 3024 | if (error) |
eeacd321 | 3025 | goto out_trans_abort; |
d31a1825 CM |
3026 | |
3027 | /* | |
3028 | * If we're renaming one or more directories across different parents, | |
3029 | * update the respective ".." entries (and link counts) to match the new | |
3030 | * parents. | |
3031 | */ | |
3032 | if (dp1 != dp2) { | |
3033 | dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | |
3034 | ||
c19b3b05 | 3035 | if (S_ISDIR(VFS_I(ip2)->i_mode)) { |
d31a1825 | 3036 | error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, |
381eee69 | 3037 | dp1->i_ino, spaceres); |
d31a1825 | 3038 | if (error) |
eeacd321 | 3039 | goto out_trans_abort; |
d31a1825 CM |
3040 | |
3041 | /* transfer ip2 ".." reference to dp1 */ | |
c19b3b05 | 3042 | if (!S_ISDIR(VFS_I(ip1)->i_mode)) { |
d31a1825 CM |
3043 | error = xfs_droplink(tp, dp2); |
3044 | if (error) | |
eeacd321 | 3045 | goto out_trans_abort; |
91083269 | 3046 | xfs_bumplink(tp, dp1); |
d31a1825 CM |
3047 | } |
3048 | ||
3049 | /* | |
3050 | * Although ip1 isn't changed here, userspace needs | |
3051 | * to be warned about the change, so that applications | |
3052 | * relying on it (like backup ones), will properly | |
3053 | * notify the change | |
3054 | */ | |
3055 | ip1_flags |= XFS_ICHGTIME_CHG; | |
3056 | ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | |
3057 | } | |
3058 | ||
c19b3b05 | 3059 | if (S_ISDIR(VFS_I(ip1)->i_mode)) { |
d31a1825 | 3060 | error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, |
381eee69 | 3061 | dp2->i_ino, spaceres); |
d31a1825 | 3062 | if (error) |
eeacd321 | 3063 | goto out_trans_abort; |
d31a1825 CM |
3064 | |
3065 | /* transfer ip1 ".." reference to dp2 */ | |
c19b3b05 | 3066 | if (!S_ISDIR(VFS_I(ip2)->i_mode)) { |
d31a1825 CM |
3067 | error = xfs_droplink(tp, dp1); |
3068 | if (error) | |
eeacd321 | 3069 | goto out_trans_abort; |
91083269 | 3070 | xfs_bumplink(tp, dp2); |
d31a1825 CM |
3071 | } |
3072 | ||
3073 | /* | |
3074 | * Although ip2 isn't changed here, userspace needs | |
3075 | * to be warned about the change, so that applications | |
3076 | * relying on it (like backup ones), will properly | |
3077 | * notify the change | |
3078 | */ | |
3079 | ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | |
3080 | ip2_flags |= XFS_ICHGTIME_CHG; | |
3081 | } | |
3082 | } | |
3083 | ||
3084 | if (ip1_flags) { | |
3085 | xfs_trans_ichgtime(tp, ip1, ip1_flags); | |
3086 | xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); | |
3087 | } | |
3088 | if (ip2_flags) { | |
3089 | xfs_trans_ichgtime(tp, ip2, ip2_flags); | |
3090 | xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); | |
3091 | } | |
3092 | if (dp2_flags) { | |
3093 | xfs_trans_ichgtime(tp, dp2, dp2_flags); | |
3094 | xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); | |
3095 | } | |
3096 | xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
3097 | xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); | |
c9cfdb38 | 3098 | return xfs_finish_rename(tp); |
eeacd321 DC |
3099 | |
3100 | out_trans_abort: | |
4906e215 | 3101 | xfs_trans_cancel(tp); |
d31a1825 CM |
3102 | return error; |
3103 | } | |
3104 | ||
7dcf5c3e DC |
3105 | /* |
3106 | * xfs_rename_alloc_whiteout() | |
3107 | * | |
b63da6c8 | 3108 | * Return a referenced, unlinked, unlocked inode that can be used as a |
7dcf5c3e DC |
3109 | * whiteout in a rename transaction. We use a tmpfile inode here so that if we |
3110 | * crash between allocating the inode and linking it into the rename transaction | |
3111 | * recovery will free the inode and we won't leak it. | |
3112 | */ | |
3113 | static int | |
3114 | xfs_rename_alloc_whiteout( | |
3115 | struct xfs_inode *dp, | |
3116 | struct xfs_inode **wip) | |
3117 | { | |
3118 | struct xfs_inode *tmpfile; | |
3119 | int error; | |
3120 | ||
a1f69417 | 3121 | error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile); |
7dcf5c3e DC |
3122 | if (error) |
3123 | return error; | |
3124 | ||
22419ac9 BF |
3125 | /* |
3126 | * Prepare the tmpfile inode as if it were created through the VFS. | |
c4a6bf7f DW |
3127 | * Complete the inode setup and flag it as linkable. nlink is already |
3128 | * zero, so we can skip the drop_nlink. | |
22419ac9 | 3129 | */ |
2b3d1d41 | 3130 | xfs_setup_iops(tmpfile); |
7dcf5c3e DC |
3131 | xfs_finish_inode_setup(tmpfile); |
3132 | VFS_I(tmpfile)->i_state |= I_LINKABLE; | |
3133 | ||
3134 | *wip = tmpfile; | |
3135 | return 0; | |
3136 | } | |
3137 | ||
f6bba201 DC |
3138 | /* |
3139 | * xfs_rename | |
3140 | */ | |
3141 | int | |
3142 | xfs_rename( | |
7dcf5c3e DC |
3143 | struct xfs_inode *src_dp, |
3144 | struct xfs_name *src_name, | |
3145 | struct xfs_inode *src_ip, | |
3146 | struct xfs_inode *target_dp, | |
3147 | struct xfs_name *target_name, | |
3148 | struct xfs_inode *target_ip, | |
3149 | unsigned int flags) | |
f6bba201 | 3150 | { |
7dcf5c3e DC |
3151 | struct xfs_mount *mp = src_dp->i_mount; |
3152 | struct xfs_trans *tp; | |
7dcf5c3e DC |
3153 | struct xfs_inode *wip = NULL; /* whiteout inode */ |
3154 | struct xfs_inode *inodes[__XFS_SORT_INODES]; | |
93597ae8 | 3155 | struct xfs_buf *agibp; |
7dcf5c3e | 3156 | int num_inodes = __XFS_SORT_INODES; |
2b93681f | 3157 | bool new_parent = (src_dp != target_dp); |
c19b3b05 | 3158 | bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); |
7dcf5c3e DC |
3159 | int spaceres; |
3160 | int error; | |
f6bba201 DC |
3161 | |
3162 | trace_xfs_rename(src_dp, target_dp, src_name, target_name); | |
3163 | ||
eeacd321 DC |
3164 | if ((flags & RENAME_EXCHANGE) && !target_ip) |
3165 | return -EINVAL; | |
3166 | ||
7dcf5c3e DC |
3167 | /* |
3168 | * If we are doing a whiteout operation, allocate the whiteout inode | |
3169 | * we will be placing at the target and ensure the type is set | |
3170 | * appropriately. | |
3171 | */ | |
3172 | if (flags & RENAME_WHITEOUT) { | |
3173 | ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE))); | |
3174 | error = xfs_rename_alloc_whiteout(target_dp, &wip); | |
3175 | if (error) | |
3176 | return error; | |
3177 | ||
3178 | /* setup target dirent info as whiteout */ | |
3179 | src_name->type = XFS_DIR3_FT_CHRDEV; | |
3180 | } | |
f6bba201 | 3181 | |
7dcf5c3e | 3182 | xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, |
f6bba201 DC |
3183 | inodes, &num_inodes); |
3184 | ||
f6bba201 | 3185 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); |
253f4911 | 3186 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); |
2451337d | 3187 | if (error == -ENOSPC) { |
f6bba201 | 3188 | spaceres = 0; |
253f4911 CH |
3189 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, |
3190 | &tp); | |
f6bba201 | 3191 | } |
445883e8 | 3192 | if (error) |
253f4911 | 3193 | goto out_release_wip; |
f6bba201 DC |
3194 | |
3195 | /* | |
3196 | * Attach the dquots to the inodes | |
3197 | */ | |
3198 | error = xfs_qm_vop_rename_dqattach(inodes); | |
445883e8 DC |
3199 | if (error) |
3200 | goto out_trans_cancel; | |
f6bba201 DC |
3201 | |
3202 | /* | |
3203 | * Lock all the participating inodes. Depending upon whether | |
3204 | * the target_name exists in the target directory, and | |
3205 | * whether the target directory is the same as the source | |
3206 | * directory, we can lock from 2 to 4 inodes. | |
3207 | */ | |
3208 | xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); | |
3209 | ||
3210 | /* | |
3211 | * Join all the inodes to the transaction. From this point on, | |
3212 | * we can rely on either trans_commit or trans_cancel to unlock | |
3213 | * them. | |
3214 | */ | |
65523218 | 3215 | xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); |
f6bba201 | 3216 | if (new_parent) |
65523218 | 3217 | xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); |
f6bba201 DC |
3218 | xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); |
3219 | if (target_ip) | |
3220 | xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); | |
7dcf5c3e DC |
3221 | if (wip) |
3222 | xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); | |
f6bba201 DC |
3223 | |
3224 | /* | |
3225 | * If we are using project inheritance, we only allow renames | |
3226 | * into our tree when the project IDs are the same; else the | |
3227 | * tree quota mechanism would be circumvented. | |
3228 | */ | |
3229 | if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && | |
de7a866f | 3230 | target_dp->i_d.di_projid != src_ip->i_d.di_projid)) { |
2451337d | 3231 | error = -EXDEV; |
445883e8 | 3232 | goto out_trans_cancel; |
f6bba201 DC |
3233 | } |
3234 | ||
eeacd321 DC |
3235 | /* RENAME_EXCHANGE is unique from here on. */ |
3236 | if (flags & RENAME_EXCHANGE) | |
3237 | return xfs_cross_rename(tp, src_dp, src_name, src_ip, | |
3238 | target_dp, target_name, target_ip, | |
f16dea54 | 3239 | spaceres); |
d31a1825 | 3240 | |
f6bba201 | 3241 | /* |
bc56ad8c | 3242 | * Check for expected errors before we dirty the transaction |
3243 | * so we can return an error without a transaction abort. | |
f6bba201 DC |
3244 | */ |
3245 | if (target_ip == NULL) { | |
3246 | /* | |
3247 | * If there's no space reservation, check the entry will | |
3248 | * fit before actually inserting it. | |
3249 | */ | |
94f3cad5 ES |
3250 | if (!spaceres) { |
3251 | error = xfs_dir_canenter(tp, target_dp, target_name); | |
3252 | if (error) | |
445883e8 | 3253 | goto out_trans_cancel; |
94f3cad5 | 3254 | } |
bc56ad8c | 3255 | } else { |
3256 | /* | |
3257 | * If target exists and it's a directory, check that whether | |
3258 | * it can be destroyed. | |
3259 | */ | |
3260 | if (S_ISDIR(VFS_I(target_ip)->i_mode) && | |
3261 | (!xfs_dir_isempty(target_ip) || | |
3262 | (VFS_I(target_ip)->i_nlink > 2))) { | |
3263 | error = -EEXIST; | |
3264 | goto out_trans_cancel; | |
3265 | } | |
3266 | } | |
3267 | ||
3268 | /* | |
3269 | * Directory entry creation below may acquire the AGF. Remove | |
3270 | * the whiteout from the unlinked list first to preserve correct | |
3271 | * AGI/AGF locking order. This dirties the transaction so failures | |
3272 | * after this point will abort and log recovery will clean up the | |
3273 | * mess. | |
3274 | * | |
3275 | * For whiteouts, we need to bump the link count on the whiteout | |
3276 | * inode. After this point, we have a real link, clear the tmpfile | |
3277 | * state flag from the inode so it doesn't accidentally get misused | |
3278 | * in future. | |
3279 | */ | |
3280 | if (wip) { | |
3281 | ASSERT(VFS_I(wip)->i_nlink == 0); | |
3282 | error = xfs_iunlink_remove(tp, wip); | |
3283 | if (error) | |
3284 | goto out_trans_cancel; | |
3285 | ||
3286 | xfs_bumplink(tp, wip); | |
bc56ad8c | 3287 | VFS_I(wip)->i_state &= ~I_LINKABLE; |
3288 | } | |
3289 | ||
3290 | /* | |
3291 | * Set up the target. | |
3292 | */ | |
3293 | if (target_ip == NULL) { | |
f6bba201 DC |
3294 | /* |
3295 | * If target does not exist and the rename crosses | |
3296 | * directories, adjust the target directory link count | |
3297 | * to account for the ".." reference from the new entry. | |
3298 | */ | |
3299 | error = xfs_dir_createname(tp, target_dp, target_name, | |
381eee69 | 3300 | src_ip->i_ino, spaceres); |
f6bba201 | 3301 | if (error) |
c8eac49e | 3302 | goto out_trans_cancel; |
f6bba201 DC |
3303 | |
3304 | xfs_trans_ichgtime(tp, target_dp, | |
3305 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
3306 | ||
3307 | if (new_parent && src_is_directory) { | |
91083269 | 3308 | xfs_bumplink(tp, target_dp); |
f6bba201 DC |
3309 | } |
3310 | } else { /* target_ip != NULL */ | |
f6bba201 DC |
3311 | /* |
3312 | * Link the source inode under the target name. | |
3313 | * If the source inode is a directory and we are moving | |
3314 | * it across directories, its ".." entry will be | |
3315 | * inconsistent until we replace that down below. | |
3316 | * | |
3317 | * In case there is already an entry with the same | |
3318 | * name at the destination directory, remove it first. | |
3319 | */ | |
93597ae8 | 3320 | |
3321 | /* | |
3322 | * Check whether the replace operation will need to allocate | |
3323 | * blocks. This happens when the shortform directory lacks | |
3324 | * space and we have to convert it to a block format directory. | |
3325 | * When more blocks are necessary, we must lock the AGI first | |
3326 | * to preserve locking order (AGI -> AGF). | |
3327 | */ | |
3328 | if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) { | |
3329 | error = xfs_read_agi(mp, tp, | |
3330 | XFS_INO_TO_AGNO(mp, target_ip->i_ino), | |
3331 | &agibp); | |
3332 | if (error) | |
3333 | goto out_trans_cancel; | |
3334 | } | |
3335 | ||
f6bba201 | 3336 | error = xfs_dir_replace(tp, target_dp, target_name, |
381eee69 | 3337 | src_ip->i_ino, spaceres); |
f6bba201 | 3338 | if (error) |
c8eac49e | 3339 | goto out_trans_cancel; |
f6bba201 DC |
3340 | |
3341 | xfs_trans_ichgtime(tp, target_dp, | |
3342 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
3343 | ||
3344 | /* | |
3345 | * Decrement the link count on the target since the target | |
3346 | * dir no longer points to it. | |
3347 | */ | |
3348 | error = xfs_droplink(tp, target_ip); | |
3349 | if (error) | |
c8eac49e | 3350 | goto out_trans_cancel; |
f6bba201 DC |
3351 | |
3352 | if (src_is_directory) { | |
3353 | /* | |
3354 | * Drop the link from the old "." entry. | |
3355 | */ | |
3356 | error = xfs_droplink(tp, target_ip); | |
3357 | if (error) | |
c8eac49e | 3358 | goto out_trans_cancel; |
f6bba201 DC |
3359 | } |
3360 | } /* target_ip != NULL */ | |
3361 | ||
3362 | /* | |
3363 | * Remove the source. | |
3364 | */ | |
3365 | if (new_parent && src_is_directory) { | |
3366 | /* | |
3367 | * Rewrite the ".." entry to point to the new | |
3368 | * directory. | |
3369 | */ | |
3370 | error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, | |
381eee69 | 3371 | target_dp->i_ino, spaceres); |
2451337d | 3372 | ASSERT(error != -EEXIST); |
f6bba201 | 3373 | if (error) |
c8eac49e | 3374 | goto out_trans_cancel; |
f6bba201 DC |
3375 | } |
3376 | ||
3377 | /* | |
3378 | * We always want to hit the ctime on the source inode. | |
3379 | * | |
3380 | * This isn't strictly required by the standards since the source | |
3381 | * inode isn't really being changed, but old unix file systems did | |
3382 | * it and some incremental backup programs won't work without it. | |
3383 | */ | |
3384 | xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); | |
3385 | xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); | |
3386 | ||
3387 | /* | |
3388 | * Adjust the link count on src_dp. This is necessary when | |
3389 | * renaming a directory, either within one parent when | |
3390 | * the target existed, or across two parent directories. | |
3391 | */ | |
3392 | if (src_is_directory && (new_parent || target_ip != NULL)) { | |
3393 | ||
3394 | /* | |
3395 | * Decrement link count on src_directory since the | |
3396 | * entry that's moved no longer points to it. | |
3397 | */ | |
3398 | error = xfs_droplink(tp, src_dp); | |
3399 | if (error) | |
c8eac49e | 3400 | goto out_trans_cancel; |
f6bba201 DC |
3401 | } |
3402 | ||
7dcf5c3e DC |
3403 | /* |
3404 | * For whiteouts, we only need to update the source dirent with the | |
3405 | * inode number of the whiteout inode rather than removing it | |
3406 | * altogether. | |
3407 | */ | |
3408 | if (wip) { | |
3409 | error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, | |
381eee69 | 3410 | spaceres); |
7dcf5c3e DC |
3411 | } else |
3412 | error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, | |
381eee69 | 3413 | spaceres); |
f6bba201 | 3414 | if (error) |
c8eac49e | 3415 | goto out_trans_cancel; |
f6bba201 | 3416 | |
f6bba201 DC |
3417 | xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
3418 | xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); | |
3419 | if (new_parent) | |
3420 | xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); | |
f6bba201 | 3421 | |
c9cfdb38 | 3422 | error = xfs_finish_rename(tp); |
7dcf5c3e | 3423 | if (wip) |
44a8736b | 3424 | xfs_irele(wip); |
7dcf5c3e | 3425 | return error; |
f6bba201 | 3426 | |
445883e8 | 3427 | out_trans_cancel: |
4906e215 | 3428 | xfs_trans_cancel(tp); |
253f4911 | 3429 | out_release_wip: |
7dcf5c3e | 3430 | if (wip) |
44a8736b | 3431 | xfs_irele(wip); |
f6bba201 DC |
3432 | return error; |
3433 | } | |
3434 | ||
e6187b34 DC |
3435 | static int |
3436 | xfs_iflush( | |
93848a99 CH |
3437 | struct xfs_inode *ip, |
3438 | struct xfs_buf *bp) | |
1da177e4 | 3439 | { |
93848a99 CH |
3440 | struct xfs_inode_log_item *iip = ip->i_itemp; |
3441 | struct xfs_dinode *dip; | |
3442 | struct xfs_mount *mp = ip->i_mount; | |
f2019299 | 3443 | int error; |
1da177e4 | 3444 | |
579aa9ca | 3445 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
718ecc50 | 3446 | ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING)); |
f7e67b20 | 3447 | ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || |
daf83964 | 3448 | ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); |
90c60e16 | 3449 | ASSERT(iip->ili_item.li_buf == bp); |
1da177e4 | 3450 | |
88ee2df7 | 3451 | dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); |
1da177e4 | 3452 | |
f2019299 BF |
3453 | /* |
3454 | * We don't flush the inode if any of the following checks fail, but we | |
3455 | * do still update the log item and attach to the backing buffer as if | |
3456 | * the flush happened. This is a formality to facilitate predictable | |
3457 | * error handling as the caller will shutdown and fail the buffer. | |
3458 | */ | |
3459 | error = -EFSCORRUPTED; | |
69ef921b | 3460 | if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), |
9e24cfd0 | 3461 | mp, XFS_ERRTAG_IFLUSH_1)) { |
6a19d939 | 3462 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3463 | "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT, |
6a19d939 | 3464 | __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); |
f2019299 | 3465 | goto flush_out; |
1da177e4 | 3466 | } |
c19b3b05 | 3467 | if (S_ISREG(VFS_I(ip)->i_mode)) { |
1da177e4 | 3468 | if (XFS_TEST_ERROR( |
f7e67b20 CH |
3469 | ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && |
3470 | ip->i_df.if_format != XFS_DINODE_FMT_BTREE, | |
9e24cfd0 | 3471 | mp, XFS_ERRTAG_IFLUSH_3)) { |
6a19d939 | 3472 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3473 | "%s: Bad regular inode %Lu, ptr "PTR_FMT, |
6a19d939 | 3474 | __func__, ip->i_ino, ip); |
f2019299 | 3475 | goto flush_out; |
1da177e4 | 3476 | } |
c19b3b05 | 3477 | } else if (S_ISDIR(VFS_I(ip)->i_mode)) { |
1da177e4 | 3478 | if (XFS_TEST_ERROR( |
f7e67b20 CH |
3479 | ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && |
3480 | ip->i_df.if_format != XFS_DINODE_FMT_BTREE && | |
3481 | ip->i_df.if_format != XFS_DINODE_FMT_LOCAL, | |
9e24cfd0 | 3482 | mp, XFS_ERRTAG_IFLUSH_4)) { |
6a19d939 | 3483 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3484 | "%s: Bad directory inode %Lu, ptr "PTR_FMT, |
6a19d939 | 3485 | __func__, ip->i_ino, ip); |
f2019299 | 3486 | goto flush_out; |
1da177e4 LT |
3487 | } |
3488 | } | |
daf83964 | 3489 | if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) > |
9e24cfd0 | 3490 | ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { |
6a19d939 DC |
3491 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
3492 | "%s: detected corrupt incore inode %Lu, " | |
c9690043 | 3493 | "total extents = %d, nblocks = %Ld, ptr "PTR_FMT, |
6a19d939 | 3494 | __func__, ip->i_ino, |
daf83964 | 3495 | ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp), |
6a19d939 | 3496 | ip->i_d.di_nblocks, ip); |
f2019299 | 3497 | goto flush_out; |
1da177e4 LT |
3498 | } |
3499 | if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, | |
9e24cfd0 | 3500 | mp, XFS_ERRTAG_IFLUSH_6)) { |
6a19d939 | 3501 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3502 | "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT, |
6a19d939 | 3503 | __func__, ip->i_ino, ip->i_d.di_forkoff, ip); |
f2019299 | 3504 | goto flush_out; |
1da177e4 | 3505 | } |
e60896d8 | 3506 | |
1da177e4 | 3507 | /* |
263997a6 | 3508 | * Inode item log recovery for v2 inodes are dependent on the |
e60896d8 DC |
3509 | * di_flushiter count for correct sequencing. We bump the flush |
3510 | * iteration count so we can detect flushes which postdate a log record | |
3511 | * during recovery. This is redundant as we now log every change and | |
3512 | * hence this can't happen but we need to still do it to ensure | |
3513 | * backwards compatibility with old kernels that predate logging all | |
3514 | * inode changes. | |
1da177e4 | 3515 | */ |
6471e9c5 | 3516 | if (!xfs_sb_version_has_v3inode(&mp->m_sb)) |
e60896d8 | 3517 | ip->i_d.di_flushiter++; |
1da177e4 | 3518 | |
0f45a1b2 CH |
3519 | /* |
3520 | * If there are inline format data / attr forks attached to this inode, | |
3521 | * make sure they are not corrupt. | |
3522 | */ | |
f7e67b20 | 3523 | if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL && |
0f45a1b2 CH |
3524 | xfs_ifork_verify_local_data(ip)) |
3525 | goto flush_out; | |
f7e67b20 | 3526 | if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL && |
0f45a1b2 | 3527 | xfs_ifork_verify_local_attr(ip)) |
f2019299 | 3528 | goto flush_out; |
005c5db8 | 3529 | |
1da177e4 | 3530 | /* |
3987848c DC |
3531 | * Copy the dirty parts of the inode into the on-disk inode. We always |
3532 | * copy out the core of the inode, because if the inode is dirty at all | |
3533 | * the core must be. | |
1da177e4 | 3534 | */ |
93f958f9 | 3535 | xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); |
1da177e4 LT |
3536 | |
3537 | /* Wrap, we never let the log put out DI_MAX_FLUSH */ | |
3538 | if (ip->i_d.di_flushiter == DI_MAX_FLUSH) | |
3539 | ip->i_d.di_flushiter = 0; | |
3540 | ||
005c5db8 DW |
3541 | xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); |
3542 | if (XFS_IFORK_Q(ip)) | |
3543 | xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); | |
1da177e4 LT |
3544 | |
3545 | /* | |
f5d8d5c4 CH |
3546 | * We've recorded everything logged in the inode, so we'd like to clear |
3547 | * the ili_fields bits so we don't log and flush things unnecessarily. | |
3548 | * However, we can't stop logging all this information until the data | |
3549 | * we've copied into the disk buffer is written to disk. If we did we | |
3550 | * might overwrite the copy of the inode in the log with all the data | |
3551 | * after re-logging only part of it, and in the face of a crash we | |
3552 | * wouldn't have all the data we need to recover. | |
1da177e4 | 3553 | * |
f5d8d5c4 CH |
3554 | * What we do is move the bits to the ili_last_fields field. When |
3555 | * logging the inode, these bits are moved back to the ili_fields field. | |
664ffb8a CH |
3556 | * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since |
3557 | * we know that the information those bits represent is permanently on | |
f5d8d5c4 CH |
3558 | * disk. As long as the flush completes before the inode is logged |
3559 | * again, then both ili_fields and ili_last_fields will be cleared. | |
1da177e4 | 3560 | */ |
f2019299 BF |
3561 | error = 0; |
3562 | flush_out: | |
1319ebef | 3563 | spin_lock(&iip->ili_lock); |
93848a99 CH |
3564 | iip->ili_last_fields = iip->ili_fields; |
3565 | iip->ili_fields = 0; | |
fc0561ce | 3566 | iip->ili_fsync_fields = 0; |
1319ebef | 3567 | spin_unlock(&iip->ili_lock); |
1da177e4 | 3568 | |
1319ebef DC |
3569 | /* |
3570 | * Store the current LSN of the inode so that we can tell whether the | |
664ffb8a | 3571 | * item has moved in the AIL from xfs_buf_inode_iodone(). |
1319ebef | 3572 | */ |
93848a99 CH |
3573 | xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, |
3574 | &iip->ili_item.li_lsn); | |
1da177e4 | 3575 | |
93848a99 CH |
3576 | /* generate the checksum. */ |
3577 | xfs_dinode_calc_crc(mp, dip); | |
f2019299 | 3578 | return error; |
1da177e4 | 3579 | } |
44a8736b | 3580 | |
e6187b34 DC |
3581 | /* |
3582 | * Non-blocking flush of dirty inode metadata into the backing buffer. | |
3583 | * | |
3584 | * The caller must have a reference to the inode and hold the cluster buffer | |
3585 | * locked. The function will walk across all the inodes on the cluster buffer it | |
3586 | * can find and lock without blocking, and flush them to the cluster buffer. | |
3587 | * | |
5717ea4d DC |
3588 | * On successful flushing of at least one inode, the caller must write out the |
3589 | * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and | |
3590 | * the caller needs to release the buffer. On failure, the filesystem will be | |
3591 | * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED | |
3592 | * will be returned. | |
e6187b34 DC |
3593 | */ |
3594 | int | |
3595 | xfs_iflush_cluster( | |
e6187b34 DC |
3596 | struct xfs_buf *bp) |
3597 | { | |
5717ea4d DC |
3598 | struct xfs_mount *mp = bp->b_mount; |
3599 | struct xfs_log_item *lip, *n; | |
3600 | struct xfs_inode *ip; | |
3601 | struct xfs_inode_log_item *iip; | |
e6187b34 | 3602 | int clcount = 0; |
5717ea4d | 3603 | int error = 0; |
e6187b34 | 3604 | |
5717ea4d DC |
3605 | /* |
3606 | * We must use the safe variant here as on shutdown xfs_iflush_abort() | |
3607 | * can remove itself from the list. | |
3608 | */ | |
3609 | list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { | |
3610 | iip = (struct xfs_inode_log_item *)lip; | |
3611 | ip = iip->ili_inode; | |
e6187b34 DC |
3612 | |
3613 | /* | |
5717ea4d | 3614 | * Quick and dirty check to avoid locks if possible. |
e6187b34 | 3615 | */ |
718ecc50 | 3616 | if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) |
5717ea4d DC |
3617 | continue; |
3618 | if (xfs_ipincount(ip)) | |
e6187b34 | 3619 | continue; |
e6187b34 DC |
3620 | |
3621 | /* | |
5717ea4d DC |
3622 | * The inode is still attached to the buffer, which means it is |
3623 | * dirty but reclaim might try to grab it. Check carefully for | |
3624 | * that, and grab the ilock while still holding the i_flags_lock | |
3625 | * to guarantee reclaim will not be able to reclaim this inode | |
3626 | * once we drop the i_flags_lock. | |
e6187b34 | 3627 | */ |
5717ea4d DC |
3628 | spin_lock(&ip->i_flags_lock); |
3629 | ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE)); | |
718ecc50 | 3630 | if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) { |
5717ea4d DC |
3631 | spin_unlock(&ip->i_flags_lock); |
3632 | continue; | |
e6187b34 | 3633 | } |
e6187b34 DC |
3634 | |
3635 | /* | |
5717ea4d DC |
3636 | * ILOCK will pin the inode against reclaim and prevent |
3637 | * concurrent transactions modifying the inode while we are | |
718ecc50 DC |
3638 | * flushing the inode. If we get the lock, set the flushing |
3639 | * state before we drop the i_flags_lock. | |
e6187b34 | 3640 | */ |
5717ea4d DC |
3641 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { |
3642 | spin_unlock(&ip->i_flags_lock); | |
e6187b34 | 3643 | continue; |
5717ea4d | 3644 | } |
718ecc50 | 3645 | __xfs_iflags_set(ip, XFS_IFLUSHING); |
5717ea4d | 3646 | spin_unlock(&ip->i_flags_lock); |
e6187b34 | 3647 | |
e6187b34 | 3648 | /* |
5717ea4d DC |
3649 | * Abort flushing this inode if we are shut down because the |
3650 | * inode may not currently be in the AIL. This can occur when | |
3651 | * log I/O failure unpins the inode without inserting into the | |
3652 | * AIL, leaving a dirty/unpinned inode attached to the buffer | |
3653 | * that otherwise looks like it should be flushed. | |
e6187b34 | 3654 | */ |
5717ea4d DC |
3655 | if (XFS_FORCED_SHUTDOWN(mp)) { |
3656 | xfs_iunpin_wait(ip); | |
5717ea4d DC |
3657 | xfs_iflush_abort(ip); |
3658 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
3659 | error = -EIO; | |
e6187b34 DC |
3660 | continue; |
3661 | } | |
3662 | ||
5717ea4d DC |
3663 | /* don't block waiting on a log force to unpin dirty inodes */ |
3664 | if (xfs_ipincount(ip)) { | |
718ecc50 | 3665 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
5717ea4d DC |
3666 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
3667 | continue; | |
e6187b34 | 3668 | } |
e6187b34 | 3669 | |
5717ea4d DC |
3670 | if (!xfs_inode_clean(ip)) |
3671 | error = xfs_iflush(ip, bp); | |
3672 | else | |
718ecc50 | 3673 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
5717ea4d DC |
3674 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
3675 | if (error) | |
3676 | break; | |
3677 | clcount++; | |
e6187b34 DC |
3678 | } |
3679 | ||
e6187b34 DC |
3680 | if (error) { |
3681 | bp->b_flags |= XBF_ASYNC; | |
3682 | xfs_buf_ioend_fail(bp); | |
3683 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | |
5717ea4d | 3684 | return error; |
e6187b34 | 3685 | } |
5717ea4d DC |
3686 | |
3687 | if (!clcount) | |
3688 | return -EAGAIN; | |
3689 | ||
3690 | XFS_STATS_INC(mp, xs_icluster_flushcnt); | |
3691 | XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); | |
3692 | return 0; | |
3693 | ||
e6187b34 DC |
3694 | } |
3695 | ||
44a8736b DW |
3696 | /* Release an inode. */ |
3697 | void | |
3698 | xfs_irele( | |
3699 | struct xfs_inode *ip) | |
3700 | { | |
3701 | trace_xfs_irele(ip, _RET_IP_); | |
3702 | iput(VFS_I(ip)); | |
3703 | } | |
54fbdd10 CH |
3704 | |
3705 | /* | |
3706 | * Ensure all commited transactions touching the inode are written to the log. | |
3707 | */ | |
3708 | int | |
3709 | xfs_log_force_inode( | |
3710 | struct xfs_inode *ip) | |
3711 | { | |
3712 | xfs_lsn_t lsn = 0; | |
3713 | ||
3714 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
3715 | if (xfs_ipincount(ip)) | |
3716 | lsn = ip->i_itemp->ili_last_lsn; | |
3717 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
3718 | ||
3719 | if (!lsn) | |
3720 | return 0; | |
3721 | return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL); | |
3722 | } | |
e2aaee9c DW |
3723 | |
3724 | /* | |
3725 | * Grab the exclusive iolock for a data copy from src to dest, making sure to | |
3726 | * abide vfs locking order (lowest pointer value goes first) and breaking the | |
3727 | * layout leases before proceeding. The loop is needed because we cannot call | |
3728 | * the blocking break_layout() with the iolocks held, and therefore have to | |
3729 | * back out both locks. | |
3730 | */ | |
3731 | static int | |
3732 | xfs_iolock_two_inodes_and_break_layout( | |
3733 | struct inode *src, | |
3734 | struct inode *dest) | |
3735 | { | |
3736 | int error; | |
3737 | ||
3738 | if (src > dest) | |
3739 | swap(src, dest); | |
3740 | ||
3741 | retry: | |
3742 | /* Wait to break both inodes' layouts before we start locking. */ | |
3743 | error = break_layout(src, true); | |
3744 | if (error) | |
3745 | return error; | |
3746 | if (src != dest) { | |
3747 | error = break_layout(dest, true); | |
3748 | if (error) | |
3749 | return error; | |
3750 | } | |
3751 | ||
3752 | /* Lock one inode and make sure nobody got in and leased it. */ | |
3753 | inode_lock(src); | |
3754 | error = break_layout(src, false); | |
3755 | if (error) { | |
3756 | inode_unlock(src); | |
3757 | if (error == -EWOULDBLOCK) | |
3758 | goto retry; | |
3759 | return error; | |
3760 | } | |
3761 | ||
3762 | if (src == dest) | |
3763 | return 0; | |
3764 | ||
3765 | /* Lock the other inode and make sure nobody got in and leased it. */ | |
3766 | inode_lock_nested(dest, I_MUTEX_NONDIR2); | |
3767 | error = break_layout(dest, false); | |
3768 | if (error) { | |
3769 | inode_unlock(src); | |
3770 | inode_unlock(dest); | |
3771 | if (error == -EWOULDBLOCK) | |
3772 | goto retry; | |
3773 | return error; | |
3774 | } | |
3775 | ||
3776 | return 0; | |
3777 | } | |
3778 | ||
3779 | /* | |
3780 | * Lock two inodes so that userspace cannot initiate I/O via file syscalls or | |
3781 | * mmap activity. | |
3782 | */ | |
3783 | int | |
3784 | xfs_ilock2_io_mmap( | |
3785 | struct xfs_inode *ip1, | |
3786 | struct xfs_inode *ip2) | |
3787 | { | |
3788 | int ret; | |
3789 | ||
3790 | ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); | |
3791 | if (ret) | |
3792 | return ret; | |
3793 | if (ip1 == ip2) | |
3794 | xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); | |
3795 | else | |
3796 | xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL, | |
3797 | ip2, XFS_MMAPLOCK_EXCL); | |
3798 | return 0; | |
3799 | } | |
3800 | ||
3801 | /* Unlock both inodes to allow IO and mmap activity. */ | |
3802 | void | |
3803 | xfs_iunlock2_io_mmap( | |
3804 | struct xfs_inode *ip1, | |
3805 | struct xfs_inode *ip2) | |
3806 | { | |
3807 | bool same_inode = (ip1 == ip2); | |
3808 | ||
3809 | xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); | |
3810 | if (!same_inode) | |
3811 | xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); | |
3812 | inode_unlock(VFS_I(ip2)); | |
3813 | if (!same_inode) | |
3814 | inode_unlock(VFS_I(ip1)); | |
3815 | } |