]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/xfs/xfs_iget.c
[XFS] Trivial xfs_remove comment fixup
[mirror_ubuntu-jammy-kernel.git] / fs / xfs / xfs_iget.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4
LT
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4
LT
27#include "xfs_dir2.h"
28#include "xfs_dmapi.h"
29#include "xfs_mount.h"
1da177e4 30#include "xfs_bmap_btree.h"
a844f451 31#include "xfs_alloc_btree.h"
1da177e4 32#include "xfs_ialloc_btree.h"
1da177e4 33#include "xfs_dir2_sf.h"
a844f451 34#include "xfs_attr_sf.h"
1da177e4
LT
35#include "xfs_dinode.h"
36#include "xfs_inode.h"
a844f451
NS
37#include "xfs_btree.h"
38#include "xfs_ialloc.h"
1da177e4
LT
39#include "xfs_quota.h"
40#include "xfs_utils.h"
783a2f65
DC
41#include "xfs_trans_priv.h"
42#include "xfs_inode_item.h"
1da177e4 43
1da177e4 44/*
6441e549 45 * Check the validity of the inode we just found it the cache
1da177e4 46 */
6441e549
DC
47static int
48xfs_iget_cache_hit(
6441e549
DC
49 struct xfs_perag *pag,
50 struct xfs_inode *ip,
51 int flags,
52 int lock_flags) __releases(pag->pag_ici_lock)
1da177e4 53{
6441e549 54 struct xfs_mount *mp = ip->i_mount;
6441e549 55 int error = 0;
da353b0d 56
6441e549
DC
57 /*
58 * If INEW is set this inode is being set up
bf904248 59 * If IRECLAIM is set this inode is being torn down
6441e549
DC
60 * Pause and try again.
61 */
bf904248 62 if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) {
6441e549
DC
63 error = EAGAIN;
64 XFS_STATS_INC(xs_ig_frecycle);
65 goto out_error;
66 }
da353b0d 67
bf904248
DC
68 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
69 if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
70
da353b0d 71 /*
bf904248
DC
72 * If lookup is racing with unlink, then we should return an
73 * error immediately so we don't remove it from the reclaim
74 * list and potentially leak the inode.
da353b0d 75 */
bf904248
DC
76
77 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
78 error = ENOENT;
6441e549
DC
79 goto out_error;
80 }
bf904248
DC
81
82 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
da353b0d 83
6441e549 84 /*
bf904248
DC
85 * We need to re-initialise the VFS inode as it has been
86 * 'freed' by the VFS. Do this here so we can deal with
87 * errors cleanly, then tag it so it can be set up correctly
88 * later.
6441e549 89 */
bf904248
DC
90 if (!inode_init_always(mp->m_super, VFS_I(ip))) {
91 error = ENOMEM;
6441e549 92 goto out_error;
da353b0d 93 }
bf904248 94 xfs_iflags_set(ip, XFS_INEW);
6441e549 95 xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
396beb85
DC
96
97 /* clear the radix tree reclaim flag as well. */
98 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
6441e549 99 read_unlock(&pag->pag_ici_lock);
bf904248
DC
100 } else if (!igrab(VFS_I(ip))) {
101 /* If the VFS inode is being torn down, pause and try again. */
102 error = EAGAIN;
103 XFS_STATS_INC(xs_ig_frecycle);
104 goto out_error;
6441e549 105 } else {
bf904248 106 /* we've got a live one */
da353b0d 107 read_unlock(&pag->pag_ici_lock);
6441e549 108 }
1da177e4 109
6441e549
DC
110 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
111 error = ENOENT;
112 goto out;
113 }
da353b0d 114
6441e549
DC
115 if (lock_flags != 0)
116 xfs_ilock(ip, lock_flags);
da353b0d 117
6441e549
DC
118 xfs_iflags_clear(ip, XFS_ISTALE);
119 xfs_itrace_exit_tag(ip, "xfs_iget.found");
120 XFS_STATS_INC(xs_ig_found);
121 return 0;
1da177e4 122
6441e549 123out_error:
da353b0d 124 read_unlock(&pag->pag_ici_lock);
6441e549
DC
125out:
126 return error;
127}
128
129
130static int
131xfs_iget_cache_miss(
132 struct xfs_mount *mp,
133 struct xfs_perag *pag,
134 xfs_trans_t *tp,
135 xfs_ino_t ino,
136 struct xfs_inode **ipp,
137 xfs_daddr_t bno,
138 int flags,
139 int lock_flags) __releases(pag->pag_ici_lock)
140{
141 struct xfs_inode *ip;
142 int error;
143 unsigned long first_index, mask;
144 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
1da177e4 145
1da177e4
LT
146 /*
147 * Read the disk inode attributes into a new inode structure and get
148 * a new vnode for it. This should also initialize i_ino and i_mount.
149 */
745b1f47
NS
150 error = xfs_iread(mp, tp, ino, &ip, bno,
151 (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
6441e549 152 if (error)
1da177e4 153 return error;
1da177e4 154
15947f2d 155 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
1da177e4 156
745b1f47 157 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
6441e549
DC
158 error = ENOENT;
159 goto out_destroy;
1da177e4
LT
160 }
161
162 /*
bad55843
DC
163 * Preload the radix tree so we can insert safely under the
164 * write spinlock.
1da177e4 165 */
da353b0d 166 if (radix_tree_preload(GFP_KERNEL)) {
6441e549
DC
167 error = EAGAIN;
168 goto out_destroy;
da353b0d 169 }
f338f903
LM
170
171 if (lock_flags)
172 xfs_ilock(ip, lock_flags);
173
da353b0d
DC
174 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
175 first_index = agino & mask;
176 write_lock(&pag->pag_ici_lock);
6441e549
DC
177
178 /* insert the new inode */
da353b0d
DC
179 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
180 if (unlikely(error)) {
6441e549 181 WARN_ON(error != -EEXIST);
da353b0d 182 XFS_STATS_INC(xs_ig_dup);
6441e549
DC
183 error = EAGAIN;
184 goto out_unlock;
1da177e4
LT
185 }
186
6441e549 187 /* These values _must_ be set before releasing the radix tree lock! */
1da177e4 188 ip->i_udquot = ip->i_gdquot = NULL;
7a18c386 189 xfs_iflags_set(ip, XFS_INEW);
1da177e4 190
da353b0d
DC
191 write_unlock(&pag->pag_ici_lock);
192 radix_tree_preload_end();
6441e549
DC
193 *ipp = ip;
194 return 0;
195
196out_unlock:
197 write_unlock(&pag->pag_ici_lock);
198 radix_tree_preload_end();
199out_destroy:
200 xfs_idestroy(ip);
201 return error;
202}
203
204/*
205 * Look up an inode by number in the given file system.
206 * The inode is looked up in the cache held in each AG.
bf904248
DC
207 * If the inode is found in the cache, initialise the vfs inode
208 * if necessary.
6441e549
DC
209 *
210 * If it is not in core, read it in from the file system's device,
bf904248 211 * add it to the cache and initialise the vfs inode.
6441e549
DC
212 *
213 * The inode is locked according to the value of the lock_flags parameter.
214 * This flag parameter indicates how and if the inode's IO lock and inode lock
215 * should be taken.
216 *
217 * mp -- the mount point structure for the current file system. It points
218 * to the inode hash table.
219 * tp -- a pointer to the current transaction if there is one. This is
220 * simply passed through to the xfs_iread() call.
221 * ino -- the number of the inode desired. This is the unique identifier
222 * within the file system for the inode being requested.
223 * lock_flags -- flags indicating how to lock the inode. See the comment
224 * for xfs_ilock() for a list of valid values.
225 * bno -- the block number starting the buffer containing the inode,
226 * if known (as by bulkstat), else 0.
227 */
bf904248
DC
228int
229xfs_iget(
6441e549
DC
230 xfs_mount_t *mp,
231 xfs_trans_t *tp,
232 xfs_ino_t ino,
233 uint flags,
234 uint lock_flags,
235 xfs_inode_t **ipp,
236 xfs_daddr_t bno)
237{
238 xfs_inode_t *ip;
239 int error;
240 xfs_perag_t *pag;
241 xfs_agino_t agino;
242
243 /* the radix tree exists only in inode capable AGs */
244 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
245 return EINVAL;
246
247 /* get the perag structure and ensure that it's inode capable */
248 pag = xfs_get_perag(mp, ino);
249 if (!pag->pagi_inodeok)
250 return EINVAL;
251 ASSERT(pag->pag_ici_init);
252 agino = XFS_INO_TO_AGINO(mp, ino);
253
254again:
255 error = 0;
256 read_lock(&pag->pag_ici_lock);
257 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
258
259 if (ip) {
bf904248 260 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
6441e549
DC
261 if (error)
262 goto out_error_or_again;
263 } else {
264 read_unlock(&pag->pag_ici_lock);
265 XFS_STATS_INC(xs_ig_missed);
266
267 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
268 flags, lock_flags);
269 if (error)
270 goto out_error_or_again;
271 }
da353b0d 272 xfs_put_perag(mp, pag);
1da177e4 273
b3aea4ed 274 xfs_iflags_set(ip, XFS_IMODIFIED);
1da177e4
LT
275 *ipp = ip;
276
bf904248
DC
277 ASSERT(ip->i_df.if_ext_max ==
278 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
1da177e4
LT
279 /*
280 * If we have a real type for an on-disk inode, we can set ops(&unlock)
281 * now. If it's a new inode being created, xfs_ialloc will handle it.
282 */
bf904248 283 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
41be8bed 284 xfs_setup_inode(ip);
1da177e4 285 return 0;
6441e549
DC
286
287out_error_or_again:
288 if (error == EAGAIN) {
289 delay(1);
290 goto again;
291 }
292 xfs_put_perag(mp, pag);
293 return error;
1da177e4
LT
294}
295
296
1da177e4
LT
297/*
298 * Look for the inode corresponding to the given ino in the hash table.
299 * If it is there and its i_transp pointer matches tp, return it.
300 * Otherwise, return NULL.
301 */
302xfs_inode_t *
303xfs_inode_incore(xfs_mount_t *mp,
304 xfs_ino_t ino,
305 xfs_trans_t *tp)
306{
1da177e4 307 xfs_inode_t *ip;
da353b0d
DC
308 xfs_perag_t *pag;
309
310 pag = xfs_get_perag(mp, ino);
311 read_lock(&pag->pag_ici_lock);
312 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
313 read_unlock(&pag->pag_ici_lock);
314 xfs_put_perag(mp, pag);
315
316 /* the returned inode must match the transaction */
317 if (ip && (ip->i_transp != tp))
318 return NULL;
319 return ip;
1da177e4
LT
320}
321
322/*
323 * Decrement reference count of an inode structure and unlock it.
324 *
325 * ip -- the inode being released
326 * lock_flags -- this parameter indicates the inode's locks to be
327 * to be released. See the comment on xfs_iunlock() for a list
328 * of valid values.
329 */
330void
331xfs_iput(xfs_inode_t *ip,
332 uint lock_flags)
333{
cf441eeb 334 xfs_itrace_entry(ip);
1da177e4 335 xfs_iunlock(ip, lock_flags);
10090be2 336 IRELE(ip);
1da177e4
LT
337}
338
339/*
340 * Special iput for brand-new inodes that are still locked
341 */
342void
01651646
DC
343xfs_iput_new(
344 xfs_inode_t *ip,
345 uint lock_flags)
1da177e4 346{
01651646 347 struct inode *inode = VFS_I(ip);
1da177e4 348
cf441eeb 349 xfs_itrace_entry(ip);
1da177e4
LT
350
351 if ((ip->i_d.di_mode == 0)) {
7a18c386 352 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
10090be2 353 make_bad_inode(inode);
1da177e4
LT
354 }
355 if (inode->i_state & I_NEW)
356 unlock_new_inode(inode);
357 if (lock_flags)
358 xfs_iunlock(ip, lock_flags);
10090be2 359 IRELE(ip);
1da177e4
LT
360}
361
362
363/*
364 * This routine embodies the part of the reclaim code that pulls
365 * the inode from the inode hash table and the mount structure's
366 * inode list.
367 * This should only be called from xfs_reclaim().
368 */
369void
370xfs_ireclaim(xfs_inode_t *ip)
371{
1da177e4
LT
372 /*
373 * Remove from old hash list and mount list.
374 */
375 XFS_STATS_INC(xs_ig_reclaims);
376
377 xfs_iextract(ip);
378
379 /*
a4e4c4f4
DC
380 * Here we do a spurious inode lock in order to coordinate with inode
381 * cache radix tree lookups. This is because the lookup can reference
382 * the inodes in the cache without taking references. We make that OK
383 * here by ensuring that we wait until the inode is unlocked after the
384 * lookup before we go ahead and free it. We get both the ilock and
385 * the iolock because the code may need to drop the ilock one but will
386 * still hold the iolock.
1da177e4
LT
387 */
388 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
389
390 /*
391 * Release dquots (and their references) if any. An inode may escape
392 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
393 */
394 XFS_QM_DQDETACH(ip->i_mount, ip);
395
1da177e4
LT
396 /*
397 * Free all memory associated with the inode.
398 */
439b8434 399 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1da177e4
LT
400 xfs_idestroy(ip);
401}
402
403/*
404 * This routine removes an about-to-be-destroyed inode from
405 * all of the lists in which it is located with the exception
406 * of the behavior chain.
407 */
408void
409xfs_iextract(
410 xfs_inode_t *ip)
411{
da353b0d
DC
412 xfs_mount_t *mp = ip->i_mount;
413 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
da353b0d
DC
414
415 write_lock(&pag->pag_ici_lock);
416 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
417 write_unlock(&pag->pag_ici_lock);
418 xfs_put_perag(mp, pag);
1da177e4 419
1da177e4 420 mp->m_ireclaims++;
1da177e4
LT
421}
422
423/*
424 * This is a wrapper routine around the xfs_ilock() routine
425 * used to centralize some grungy code. It is used in places
426 * that wish to lock the inode solely for reading the extents.
427 * The reason these places can't just call xfs_ilock(SHARED)
428 * is that the inode lock also guards to bringing in of the
429 * extents from disk for a file in b-tree format. If the inode
430 * is in b-tree format, then we need to lock the inode exclusively
431 * until the extents are read in. Locking it exclusively all
432 * the time would limit our parallelism unnecessarily, though.
433 * What we do instead is check to see if the extents have been
434 * read in yet, and only lock the inode exclusively if they
435 * have not.
436 *
437 * The function returns a value which should be given to the
438 * corresponding xfs_iunlock_map_shared(). This value is
439 * the mode in which the lock was actually taken.
440 */
441uint
442xfs_ilock_map_shared(
443 xfs_inode_t *ip)
444{
445 uint lock_mode;
446
447 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
448 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
449 lock_mode = XFS_ILOCK_EXCL;
450 } else {
451 lock_mode = XFS_ILOCK_SHARED;
452 }
453
454 xfs_ilock(ip, lock_mode);
455
456 return lock_mode;
457}
458
459/*
460 * This is simply the unlock routine to go with xfs_ilock_map_shared().
461 * All it does is call xfs_iunlock() with the given lock_mode.
462 */
463void
464xfs_iunlock_map_shared(
465 xfs_inode_t *ip,
466 unsigned int lock_mode)
467{
468 xfs_iunlock(ip, lock_mode);
469}
470
471/*
472 * The xfs inode contains 2 locks: a multi-reader lock called the
473 * i_iolock and a multi-reader lock called the i_lock. This routine
474 * allows either or both of the locks to be obtained.
475 *
476 * The 2 locks should always be ordered so that the IO lock is
477 * obtained first in order to prevent deadlock.
478 *
479 * ip -- the inode being locked
480 * lock_flags -- this parameter indicates the inode's locks
481 * to be locked. It can be:
482 * XFS_IOLOCK_SHARED,
483 * XFS_IOLOCK_EXCL,
484 * XFS_ILOCK_SHARED,
485 * XFS_ILOCK_EXCL,
486 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
487 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
488 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
489 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
490 */
491void
579aa9ca
CH
492xfs_ilock(
493 xfs_inode_t *ip,
494 uint lock_flags)
1da177e4
LT
495{
496 /*
497 * You can't set both SHARED and EXCL for the same lock,
498 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
499 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
500 */
501 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
502 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
503 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
504 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
f7c66ce3 505 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
1da177e4 506
579aa9ca 507 if (lock_flags & XFS_IOLOCK_EXCL)
f7c66ce3 508 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
579aa9ca 509 else if (lock_flags & XFS_IOLOCK_SHARED)
f7c66ce3 510 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
579aa9ca
CH
511
512 if (lock_flags & XFS_ILOCK_EXCL)
f7c66ce3 513 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
579aa9ca 514 else if (lock_flags & XFS_ILOCK_SHARED)
f7c66ce3 515 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
579aa9ca 516
1da177e4
LT
517 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
518}
519
520/*
521 * This is just like xfs_ilock(), except that the caller
522 * is guaranteed not to sleep. It returns 1 if it gets
523 * the requested locks and 0 otherwise. If the IO lock is
524 * obtained but the inode lock cannot be, then the IO lock
525 * is dropped before returning.
526 *
527 * ip -- the inode being locked
528 * lock_flags -- this parameter indicates the inode's locks to be
529 * to be locked. See the comment for xfs_ilock() for a list
530 * of valid values.
1da177e4
LT
531 */
532int
579aa9ca
CH
533xfs_ilock_nowait(
534 xfs_inode_t *ip,
535 uint lock_flags)
1da177e4 536{
1da177e4
LT
537 /*
538 * You can't set both SHARED and EXCL for the same lock,
539 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
540 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
541 */
542 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
543 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
544 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
545 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
f7c66ce3 546 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
1da177e4 547
1da177e4 548 if (lock_flags & XFS_IOLOCK_EXCL) {
579aa9ca
CH
549 if (!mrtryupdate(&ip->i_iolock))
550 goto out;
1da177e4 551 } else if (lock_flags & XFS_IOLOCK_SHARED) {
579aa9ca
CH
552 if (!mrtryaccess(&ip->i_iolock))
553 goto out;
1da177e4
LT
554 }
555 if (lock_flags & XFS_ILOCK_EXCL) {
579aa9ca
CH
556 if (!mrtryupdate(&ip->i_lock))
557 goto out_undo_iolock;
1da177e4 558 } else if (lock_flags & XFS_ILOCK_SHARED) {
579aa9ca
CH
559 if (!mrtryaccess(&ip->i_lock))
560 goto out_undo_iolock;
1da177e4
LT
561 }
562 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
563 return 1;
579aa9ca
CH
564
565 out_undo_iolock:
566 if (lock_flags & XFS_IOLOCK_EXCL)
567 mrunlock_excl(&ip->i_iolock);
568 else if (lock_flags & XFS_IOLOCK_SHARED)
569 mrunlock_shared(&ip->i_iolock);
570 out:
571 return 0;
1da177e4
LT
572}
573
574/*
575 * xfs_iunlock() is used to drop the inode locks acquired with
576 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
577 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
578 * that we know which locks to drop.
579 *
580 * ip -- the inode being unlocked
581 * lock_flags -- this parameter indicates the inode's locks to be
582 * to be unlocked. See the comment for xfs_ilock() for a list
583 * of valid values for this parameter.
584 *
585 */
586void
579aa9ca
CH
587xfs_iunlock(
588 xfs_inode_t *ip,
589 uint lock_flags)
1da177e4
LT
590{
591 /*
592 * You can't set both SHARED and EXCL for the same lock,
593 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
594 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
595 */
596 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
597 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
598 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
599 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
f7c66ce3
LM
600 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
601 XFS_LOCK_DEP_MASK)) == 0);
1da177e4
LT
602 ASSERT(lock_flags != 0);
603
579aa9ca
CH
604 if (lock_flags & XFS_IOLOCK_EXCL)
605 mrunlock_excl(&ip->i_iolock);
606 else if (lock_flags & XFS_IOLOCK_SHARED)
607 mrunlock_shared(&ip->i_iolock);
1da177e4 608
579aa9ca
CH
609 if (lock_flags & XFS_ILOCK_EXCL)
610 mrunlock_excl(&ip->i_lock);
611 else if (lock_flags & XFS_ILOCK_SHARED)
612 mrunlock_shared(&ip->i_lock);
1da177e4 613
579aa9ca
CH
614 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
615 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
1da177e4
LT
616 /*
617 * Let the AIL know that this item has been unlocked in case
618 * it is in the AIL and anyone is waiting on it. Don't do
619 * this if the caller has asked us not to.
620 */
783a2f65 621 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
579aa9ca 622 (xfs_log_item_t*)(ip->i_itemp));
1da177e4
LT
623 }
624 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
625}
626
627/*
628 * give up write locks. the i/o lock cannot be held nested
629 * if it is being demoted.
630 */
631void
579aa9ca
CH
632xfs_ilock_demote(
633 xfs_inode_t *ip,
634 uint lock_flags)
1da177e4
LT
635{
636 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
637 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
638
579aa9ca 639 if (lock_flags & XFS_ILOCK_EXCL)
1da177e4 640 mrdemote(&ip->i_lock);
579aa9ca 641 if (lock_flags & XFS_IOLOCK_EXCL)
1da177e4 642 mrdemote(&ip->i_iolock);
579aa9ca
CH
643}
644
645#ifdef DEBUG
646/*
647 * Debug-only routine, without additional rw_semaphore APIs, we can
648 * now only answer requests regarding whether we hold the lock for write
649 * (reader state is outside our visibility, we only track writer state).
650 *
651 * Note: this means !xfs_isilocked would give false positives, so don't do that.
652 */
653int
654xfs_isilocked(
655 xfs_inode_t *ip,
656 uint lock_flags)
657{
658 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
659 XFS_ILOCK_EXCL) {
660 if (!ip->i_lock.mr_writer)
661 return 0;
1da177e4 662 }
579aa9ca
CH
663
664 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
665 XFS_IOLOCK_EXCL) {
666 if (!ip->i_iolock.mr_writer)
667 return 0;
668 }
669
670 return 1;
1da177e4 671}
579aa9ca 672#endif
1da177e4 673