]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/xfs/xfs_inode_item.c
xfs: make xfs_inode_item_size idempotent
[mirror_ubuntu-zesty-kernel.git] / fs / xfs / xfs_inode_item.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4 24#include "xfs_trans.h"
1da177e4 25#include "xfs_sb.h"
a844f451 26#include "xfs_ag.h"
1da177e4
LT
27#include "xfs_mount.h"
28#include "xfs_trans_priv.h"
1da177e4 29#include "xfs_bmap_btree.h"
1da177e4 30#include "xfs_dinode.h"
1da177e4 31#include "xfs_inode.h"
a844f451 32#include "xfs_inode_item.h"
db7a19f2 33#include "xfs_error.h"
0b1b213f 34#include "xfs_trace.h"
1da177e4
LT
35
36
37kmem_zone_t *xfs_ili_zone; /* inode log item zone */
38
7bfa31d8
CH
39static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
40{
41 return container_of(lip, struct xfs_inode_log_item, ili_item);
42}
43
44
1da177e4
LT
45/*
46 * This returns the number of iovecs needed to log the given inode item.
47 *
48 * We need one iovec for the inode log format structure, one for the
49 * inode core, and possibly one for the inode data/extents/b-tree root
50 * and one for the inode attribute data/extents/b-tree root.
51 */
52STATIC uint
53xfs_inode_item_size(
7bfa31d8 54 struct xfs_log_item *lip)
1da177e4 55{
7bfa31d8
CH
56 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
57 struct xfs_inode *ip = iip->ili_inode;
58 uint nvecs = 2;
1da177e4 59
1da177e4
LT
60 switch (ip->i_d.di_format) {
61 case XFS_DINODE_FMT_EXTENTS:
1da177e4 62 if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) &&
339a5f5d
CH
63 ip->i_d.di_nextents > 0 &&
64 ip->i_df.if_bytes > 0)
1da177e4 65 nvecs++;
1da177e4
LT
66 break;
67
68 case XFS_DINODE_FMT_BTREE:
1da177e4 69 if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) &&
339a5f5d 70 ip->i_df.if_broot_bytes > 0)
1da177e4 71 nvecs++;
1da177e4
LT
72 break;
73
74 case XFS_DINODE_FMT_LOCAL:
1da177e4 75 if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) &&
339a5f5d 76 ip->i_df.if_bytes > 0)
1da177e4 77 nvecs++;
1da177e4
LT
78 break;
79
80 case XFS_DINODE_FMT_DEV:
1da177e4 81 case XFS_DINODE_FMT_UUID:
1da177e4
LT
82 break;
83
84 default:
85 ASSERT(0);
86 break;
87 }
88
339a5f5d 89 if (!XFS_IFORK_Q(ip))
1da177e4 90 return nvecs;
339a5f5d 91
1da177e4
LT
92
93 /*
94 * Log any necessary attribute data.
95 */
96 switch (ip->i_d.di_aformat) {
97 case XFS_DINODE_FMT_EXTENTS:
1da177e4 98 if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) &&
339a5f5d
CH
99 ip->i_d.di_anextents > 0 &&
100 ip->i_afp->if_bytes > 0)
1da177e4 101 nvecs++;
1da177e4
LT
102 break;
103
104 case XFS_DINODE_FMT_BTREE:
1da177e4 105 if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) &&
339a5f5d 106 ip->i_afp->if_broot_bytes > 0)
1da177e4 107 nvecs++;
1da177e4
LT
108 break;
109
110 case XFS_DINODE_FMT_LOCAL:
1da177e4 111 if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) &&
339a5f5d 112 ip->i_afp->if_bytes > 0)
1da177e4 113 nvecs++;
1da177e4
LT
114 break;
115
116 default:
117 ASSERT(0);
118 break;
119 }
120
121 return nvecs;
122}
123
e828776a
DC
124/*
125 * xfs_inode_item_format_extents - convert in-core extents to on-disk form
126 *
127 * For either the data or attr fork in extent format, we need to endian convert
128 * the in-core extent as we place them into the on-disk inode. In this case, we
129 * need to do this conversion before we write the extents into the log. Because
130 * we don't have the disk inode to write into here, we allocate a buffer and
131 * format the extents into it via xfs_iextents_copy(). We free the buffer in
132 * the unlock routine after the copy for the log has been made.
133 *
134 * In the case of the data fork, the in-core and on-disk fork sizes can be
135 * different due to delayed allocation extents. We only log on-disk extents
136 * here, so always use the physical fork size to determine the size of the
137 * buffer we need to allocate.
138 */
139STATIC void
140xfs_inode_item_format_extents(
141 struct xfs_inode *ip,
142 struct xfs_log_iovec *vecp,
143 int whichfork,
144 int type)
145{
146 xfs_bmbt_rec_t *ext_buffer;
147
148 ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
149 if (whichfork == XFS_DATA_FORK)
150 ip->i_itemp->ili_extents_buf = ext_buffer;
151 else
152 ip->i_itemp->ili_aextents_buf = ext_buffer;
153
154 vecp->i_addr = ext_buffer;
155 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
156 vecp->i_type = type;
157}
158
1da177e4
LT
159/*
160 * This is called to fill in the vector of log iovecs for the
161 * given inode log item. It fills the first item with an inode
162 * log format structure, the second with the on-disk inode structure,
163 * and a possible third and/or fourth with the inode data/extents/b-tree
164 * root and inode attributes data/extents/b-tree root.
165 */
166STATIC void
167xfs_inode_item_format(
7bfa31d8
CH
168 struct xfs_log_item *lip,
169 struct xfs_log_iovec *vecp)
1da177e4 170{
7bfa31d8
CH
171 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
172 struct xfs_inode *ip = iip->ili_inode;
1da177e4 173 uint nvecs;
1da177e4 174 size_t data_bytes;
1da177e4
LT
175 xfs_mount_t *mp;
176
4e0d5f92 177 vecp->i_addr = &iip->ili_format;
1da177e4 178 vecp->i_len = sizeof(xfs_inode_log_format_t);
4139b3b3 179 vecp->i_type = XLOG_REG_TYPE_IFORMAT;
1da177e4
LT
180 vecp++;
181 nvecs = 1;
182
4e0d5f92 183 vecp->i_addr = &ip->i_d;
81591fe2 184 vecp->i_len = sizeof(struct xfs_icdinode);
4139b3b3 185 vecp->i_type = XLOG_REG_TYPE_ICORE;
1da177e4
LT
186 vecp++;
187 nvecs++;
188 iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
189
190 /*
191 * If this is really an old format inode, then we need to
192 * log it as such. This means that we have to copy the link
193 * count from the new field to the old. We don't have to worry
194 * about the new fields, because nothing trusts them as long as
195 * the old inode version number is there. If the superblock already
196 * has a new version number, then we don't bother converting back.
197 */
198 mp = ip->i_mount;
51ce16d5
CH
199 ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb));
200 if (ip->i_d.di_version == 1) {
62118709 201 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
1da177e4
LT
202 /*
203 * Convert it back.
204 */
205 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
206 ip->i_d.di_onlink = ip->i_d.di_nlink;
207 } else {
208 /*
209 * The superblock version has already been bumped,
210 * so just make the conversion to the new inode
211 * format permanent.
212 */
51ce16d5 213 ip->i_d.di_version = 2;
1da177e4
LT
214 ip->i_d.di_onlink = 0;
215 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
216 }
217 }
218
219 switch (ip->i_d.di_format) {
220 case XFS_DINODE_FMT_EXTENTS:
339a5f5d
CH
221 iip->ili_format.ilf_fields &=
222 ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
223 XFS_ILOG_DEV | XFS_ILOG_UUID);
224
225 if ((iip->ili_format.ilf_fields & XFS_ILOG_DEXT) &&
226 ip->i_d.di_nextents > 0 &&
227 ip->i_df.if_bytes > 0) {
1da177e4 228 ASSERT(ip->i_df.if_u1.if_extents != NULL);
339a5f5d 229 ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0);
1da177e4 230 ASSERT(iip->ili_extents_buf == NULL);
339a5f5d 231
f016bad6 232#ifdef XFS_NATIVE_HOST
696123fc
DC
233 if (ip->i_d.di_nextents == ip->i_df.if_bytes /
234 (uint)sizeof(xfs_bmbt_rec_t)) {
1da177e4
LT
235 /*
236 * There are no delayed allocation
237 * extents, so just point to the
238 * real extents array.
239 */
4e0d5f92 240 vecp->i_addr = ip->i_df.if_u1.if_extents;
1da177e4 241 vecp->i_len = ip->i_df.if_bytes;
4139b3b3 242 vecp->i_type = XLOG_REG_TYPE_IEXT;
1da177e4
LT
243 } else
244#endif
245 {
e828776a
DC
246 xfs_inode_item_format_extents(ip, vecp,
247 XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
1da177e4
LT
248 }
249 ASSERT(vecp->i_len <= ip->i_df.if_bytes);
250 iip->ili_format.ilf_dsize = vecp->i_len;
251 vecp++;
252 nvecs++;
339a5f5d
CH
253 } else {
254 iip->ili_format.ilf_fields &= ~XFS_ILOG_DEXT;
1da177e4
LT
255 }
256 break;
257
258 case XFS_DINODE_FMT_BTREE:
339a5f5d
CH
259 iip->ili_format.ilf_fields &=
260 ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
261 XFS_ILOG_DEV | XFS_ILOG_UUID);
262
263 if ((iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) &&
264 ip->i_df.if_broot_bytes > 0) {
1da177e4 265 ASSERT(ip->i_df.if_broot != NULL);
4e0d5f92 266 vecp->i_addr = ip->i_df.if_broot;
1da177e4 267 vecp->i_len = ip->i_df.if_broot_bytes;
4139b3b3 268 vecp->i_type = XLOG_REG_TYPE_IBROOT;
1da177e4
LT
269 vecp++;
270 nvecs++;
271 iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
339a5f5d
CH
272 } else {
273 ASSERT(!(iip->ili_format.ilf_fields &
274 XFS_ILOG_DBROOT));
275#ifdef XFS_TRANS_DEBUG
276 if (iip->ili_root_size > 0) {
277 ASSERT(iip->ili_root_size ==
278 ip->i_df.if_broot_bytes);
279 ASSERT(memcmp(iip->ili_orig_root,
280 ip->i_df.if_broot,
281 iip->ili_root_size) == 0);
282 } else {
283 ASSERT(ip->i_df.if_broot_bytes == 0);
284 }
285#endif
286 iip->ili_format.ilf_fields &= ~XFS_ILOG_DBROOT;
1da177e4
LT
287 }
288 break;
289
290 case XFS_DINODE_FMT_LOCAL:
339a5f5d
CH
291 iip->ili_format.ilf_fields &=
292 ~(XFS_ILOG_DEXT | XFS_ILOG_DBROOT |
293 XFS_ILOG_DEV | XFS_ILOG_UUID);
294 if ((iip->ili_format.ilf_fields & XFS_ILOG_DDATA) &&
295 ip->i_df.if_bytes > 0) {
1da177e4
LT
296 ASSERT(ip->i_df.if_u1.if_data != NULL);
297 ASSERT(ip->i_d.di_size > 0);
298
4e0d5f92 299 vecp->i_addr = ip->i_df.if_u1.if_data;
1da177e4
LT
300 /*
301 * Round i_bytes up to a word boundary.
302 * The underlying memory is guaranteed to
303 * to be there by xfs_idata_realloc().
304 */
305 data_bytes = roundup(ip->i_df.if_bytes, 4);
306 ASSERT((ip->i_df.if_real_bytes == 0) ||
307 (ip->i_df.if_real_bytes == data_bytes));
308 vecp->i_len = (int)data_bytes;
4139b3b3 309 vecp->i_type = XLOG_REG_TYPE_ILOCAL;
1da177e4
LT
310 vecp++;
311 nvecs++;
312 iip->ili_format.ilf_dsize = (unsigned)data_bytes;
339a5f5d
CH
313 } else {
314 iip->ili_format.ilf_fields &= ~XFS_ILOG_DDATA;
1da177e4
LT
315 }
316 break;
317
318 case XFS_DINODE_FMT_DEV:
339a5f5d
CH
319 iip->ili_format.ilf_fields &=
320 ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
321 XFS_ILOG_DEXT | XFS_ILOG_UUID);
1da177e4
LT
322 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
323 iip->ili_format.ilf_u.ilfu_rdev =
324 ip->i_df.if_u2.if_rdev;
325 }
326 break;
327
328 case XFS_DINODE_FMT_UUID:
339a5f5d
CH
329 iip->ili_format.ilf_fields &=
330 ~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT |
331 XFS_ILOG_DEXT | XFS_ILOG_DEV);
1da177e4
LT
332 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
333 iip->ili_format.ilf_u.ilfu_uuid =
334 ip->i_df.if_u2.if_uuid;
335 }
336 break;
337
338 default:
339 ASSERT(0);
340 break;
341 }
342
343 /*
339a5f5d 344 * If there are no attributes associated with the file, then we're done.
1da177e4
LT
345 */
346 if (!XFS_IFORK_Q(ip)) {
1da177e4 347 iip->ili_format.ilf_size = nvecs;
339a5f5d
CH
348 iip->ili_format.ilf_fields &=
349 ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT);
1da177e4
LT
350 return;
351 }
352
353 switch (ip->i_d.di_aformat) {
354 case XFS_DINODE_FMT_EXTENTS:
339a5f5d
CH
355 iip->ili_format.ilf_fields &=
356 ~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
357
358 if ((iip->ili_format.ilf_fields & XFS_ILOG_AEXT) &&
359 ip->i_d.di_anextents > 0 &&
360 ip->i_afp->if_bytes > 0) {
361 ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) ==
362 ip->i_d.di_anextents);
73523a2e 363 ASSERT(ip->i_afp->if_u1.if_extents != NULL);
f016bad6 364#ifdef XFS_NATIVE_HOST
1da177e4
LT
365 /*
366 * There are not delayed allocation extents
367 * for attributes, so just point at the array.
368 */
4e0d5f92 369 vecp->i_addr = ip->i_afp->if_u1.if_extents;
1da177e4 370 vecp->i_len = ip->i_afp->if_bytes;
e828776a 371 vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
1da177e4
LT
372#else
373 ASSERT(iip->ili_aextents_buf == NULL);
e828776a
DC
374 xfs_inode_item_format_extents(ip, vecp,
375 XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
1da177e4
LT
376#endif
377 iip->ili_format.ilf_asize = vecp->i_len;
378 vecp++;
379 nvecs++;
339a5f5d
CH
380 } else {
381 iip->ili_format.ilf_fields &= ~XFS_ILOG_AEXT;
1da177e4
LT
382 }
383 break;
384
385 case XFS_DINODE_FMT_BTREE:
339a5f5d
CH
386 iip->ili_format.ilf_fields &=
387 ~(XFS_ILOG_ADATA | XFS_ILOG_AEXT);
388
389 if ((iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) &&
390 ip->i_afp->if_broot_bytes > 0) {
1da177e4 391 ASSERT(ip->i_afp->if_broot != NULL);
339a5f5d 392
4e0d5f92 393 vecp->i_addr = ip->i_afp->if_broot;
1da177e4 394 vecp->i_len = ip->i_afp->if_broot_bytes;
4139b3b3 395 vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
1da177e4
LT
396 vecp++;
397 nvecs++;
398 iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
339a5f5d
CH
399 } else {
400 iip->ili_format.ilf_fields &= ~XFS_ILOG_ABROOT;
1da177e4
LT
401 }
402 break;
403
404 case XFS_DINODE_FMT_LOCAL:
339a5f5d
CH
405 iip->ili_format.ilf_fields &=
406 ~(XFS_ILOG_AEXT | XFS_ILOG_ABROOT);
407
408 if ((iip->ili_format.ilf_fields & XFS_ILOG_ADATA) &&
409 ip->i_afp->if_bytes > 0) {
1da177e4
LT
410 ASSERT(ip->i_afp->if_u1.if_data != NULL);
411
4e0d5f92 412 vecp->i_addr = ip->i_afp->if_u1.if_data;
1da177e4
LT
413 /*
414 * Round i_bytes up to a word boundary.
415 * The underlying memory is guaranteed to
416 * to be there by xfs_idata_realloc().
417 */
418 data_bytes = roundup(ip->i_afp->if_bytes, 4);
419 ASSERT((ip->i_afp->if_real_bytes == 0) ||
420 (ip->i_afp->if_real_bytes == data_bytes));
421 vecp->i_len = (int)data_bytes;
4139b3b3 422 vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL;
1da177e4
LT
423 vecp++;
424 nvecs++;
425 iip->ili_format.ilf_asize = (unsigned)data_bytes;
339a5f5d
CH
426 } else {
427 iip->ili_format.ilf_fields &= ~XFS_ILOG_ADATA;
1da177e4
LT
428 }
429 break;
430
431 default:
432 ASSERT(0);
433 break;
434 }
435
1da177e4
LT
436 iip->ili_format.ilf_size = nvecs;
437}
438
439
440/*
441 * This is called to pin the inode associated with the inode log
a14a5ab5 442 * item in memory so it cannot be written out.
1da177e4
LT
443 */
444STATIC void
445xfs_inode_item_pin(
7bfa31d8 446 struct xfs_log_item *lip)
1da177e4 447{
7bfa31d8 448 struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
a14a5ab5 449
7bfa31d8
CH
450 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
451
452 trace_xfs_inode_pin(ip, _RET_IP_);
453 atomic_inc(&ip->i_pincount);
1da177e4
LT
454}
455
456
457/*
458 * This is called to unpin the inode associated with the inode log
459 * item which was previously pinned with a call to xfs_inode_item_pin().
a14a5ab5
CH
460 *
461 * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0.
1da177e4 462 */
1da177e4
LT
463STATIC void
464xfs_inode_item_unpin(
7bfa31d8 465 struct xfs_log_item *lip,
9412e318 466 int remove)
1da177e4 467{
7bfa31d8 468 struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode;
a14a5ab5 469
4aaf15d1 470 trace_xfs_inode_unpin(ip, _RET_IP_);
a14a5ab5
CH
471 ASSERT(atomic_read(&ip->i_pincount) > 0);
472 if (atomic_dec_and_test(&ip->i_pincount))
f392e631 473 wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
1da177e4
LT
474}
475
1da177e4
LT
476/*
477 * This is called to attempt to lock the inode associated with this
478 * inode log item, in preparation for the push routine which does the actual
479 * iflush. Don't sleep on the inode lock or the flush lock.
480 *
481 * If the flush lock is already held, indicating that the inode has
482 * been or is in the process of being flushed, then (ideally) we'd like to
483 * see if the inode's buffer is still incore, and if so give it a nudge.
484 * We delay doing so until the pushbuf routine, though, to avoid holding
c41564b5 485 * the AIL lock across a call to the blackhole which is the buffer cache.
1da177e4
LT
486 * Also we don't want to sleep in any device strategy routines, which can happen
487 * if we do the subsequent bawrite in here.
488 */
489STATIC uint
490xfs_inode_item_trylock(
7bfa31d8 491 struct xfs_log_item *lip)
1da177e4 492{
7bfa31d8
CH
493 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
494 struct xfs_inode *ip = iip->ili_inode;
1da177e4 495
7bfa31d8 496 if (xfs_ipincount(ip) > 0)
1da177e4 497 return XFS_ITEM_PINNED;
1da177e4 498
7bfa31d8 499 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
1da177e4 500 return XFS_ITEM_LOCKED;
1da177e4
LT
501
502 if (!xfs_iflock_nowait(ip)) {
503 /*
d808f617
DC
504 * inode has already been flushed to the backing buffer,
505 * leave it locked in shared mode, pushbuf routine will
506 * unlock it.
1da177e4 507 */
d808f617 508 return XFS_ITEM_PUSHBUF;
1da177e4
LT
509 }
510
511 /* Stale items should force out the iclog */
512 if (ip->i_flags & XFS_ISTALE) {
513 xfs_ifunlock(ip);
5b03ff1b 514 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1da177e4
LT
515 return XFS_ITEM_PINNED;
516 }
517
518#ifdef DEBUG
519 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
520 ASSERT(iip->ili_format.ilf_fields != 0);
521 ASSERT(iip->ili_logged == 0);
7bfa31d8 522 ASSERT(lip->li_flags & XFS_LI_IN_AIL);
1da177e4
LT
523 }
524#endif
525 return XFS_ITEM_SUCCESS;
526}
527
528/*
529 * Unlock the inode associated with the inode log item.
530 * Clear the fields of the inode and inode log item that
531 * are specific to the current transaction. If the
532 * hold flags is set, do not unlock the inode.
533 */
534STATIC void
535xfs_inode_item_unlock(
7bfa31d8 536 struct xfs_log_item *lip)
1da177e4 537{
7bfa31d8
CH
538 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
539 struct xfs_inode *ip = iip->ili_inode;
898621d5 540 unsigned short lock_flags;
1da177e4 541
f3ca8738
CH
542 ASSERT(ip->i_itemp != NULL);
543 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1da177e4
LT
544
545 /*
546 * If the inode needed a separate buffer with which to log
547 * its extents, then free it now.
548 */
549 if (iip->ili_extents_buf != NULL) {
550 ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
551 ASSERT(ip->i_d.di_nextents > 0);
552 ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT);
553 ASSERT(ip->i_df.if_bytes > 0);
f0e2d93c 554 kmem_free(iip->ili_extents_buf);
1da177e4
LT
555 iip->ili_extents_buf = NULL;
556 }
557 if (iip->ili_aextents_buf != NULL) {
558 ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
559 ASSERT(ip->i_d.di_anextents > 0);
560 ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT);
561 ASSERT(ip->i_afp->if_bytes > 0);
f0e2d93c 562 kmem_free(iip->ili_aextents_buf);
1da177e4
LT
563 iip->ili_aextents_buf = NULL;
564 }
565
898621d5
CH
566 lock_flags = iip->ili_lock_flags;
567 iip->ili_lock_flags = 0;
ddc3415a 568 if (lock_flags)
f3ca8738 569 xfs_iunlock(ip, lock_flags);
1da177e4
LT
570}
571
572/*
de25c181
DC
573 * This is called to find out where the oldest active copy of the inode log
574 * item in the on disk log resides now that the last log write of it completed
575 * at the given lsn. Since we always re-log all dirty data in an inode, the
576 * latest copy in the on disk log is the only one that matters. Therefore,
577 * simply return the given lsn.
578 *
579 * If the inode has been marked stale because the cluster is being freed, we
580 * don't want to (re-)insert this inode into the AIL. There is a race condition
581 * where the cluster buffer may be unpinned before the inode is inserted into
582 * the AIL during transaction committed processing. If the buffer is unpinned
583 * before the inode item has been committed and inserted, then it is possible
1316d4da 584 * for the buffer to be written and IO completes before the inode is inserted
de25c181
DC
585 * into the AIL. In that case, we'd be inserting a clean, stale inode into the
586 * AIL which will never get removed. It will, however, get reclaimed which
587 * triggers an assert in xfs_inode_free() complaining about freein an inode
588 * still in the AIL.
589 *
1316d4da
DC
590 * To avoid this, just unpin the inode directly and return a LSN of -1 so the
591 * transaction committed code knows that it does not need to do any further
592 * processing on the item.
1da177e4 593 */
1da177e4
LT
594STATIC xfs_lsn_t
595xfs_inode_item_committed(
7bfa31d8 596 struct xfs_log_item *lip,
1da177e4
LT
597 xfs_lsn_t lsn)
598{
de25c181
DC
599 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
600 struct xfs_inode *ip = iip->ili_inode;
601
1316d4da
DC
602 if (xfs_iflags_test(ip, XFS_ISTALE)) {
603 xfs_inode_item_unpin(lip, 0);
604 return -1;
605 }
7bfa31d8 606 return lsn;
1da177e4
LT
607}
608
1da177e4
LT
609/*
610 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
611 * failed to get the inode flush lock but did get the inode locked SHARED.
612 * Here we're trying to see if the inode buffer is incore, and if so whether it's
d808f617
DC
613 * marked delayed write. If that's the case, we'll promote it and that will
614 * allow the caller to write the buffer by triggering the xfsbufd to run.
1da177e4 615 */
17b38471 616STATIC bool
1da177e4 617xfs_inode_item_pushbuf(
7bfa31d8 618 struct xfs_log_item *lip)
1da177e4 619{
7bfa31d8
CH
620 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
621 struct xfs_inode *ip = iip->ili_inode;
622 struct xfs_buf *bp;
17b38471 623 bool ret = true;
1da177e4 624
579aa9ca 625 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
1da177e4 626
1da177e4 627 /*
c63942d3
DC
628 * If a flush is not in progress anymore, chances are that the
629 * inode was taken off the AIL. So, just get out.
1da177e4 630 */
474fce06 631 if (!xfs_isiflocked(ip) ||
7bfa31d8 632 !(lip->li_flags & XFS_LI_IN_AIL)) {
1da177e4 633 xfs_iunlock(ip, XFS_ILOCK_SHARED);
17b38471 634 return true;
1da177e4
LT
635 }
636
7bfa31d8
CH
637 bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
638 iip->ili_format.ilf_len, XBF_TRYLOCK);
1da177e4 639
1da177e4 640 xfs_iunlock(ip, XFS_ILOCK_SHARED);
d808f617 641 if (!bp)
17b38471 642 return true;
d808f617
DC
643 if (XFS_BUF_ISDELAYWRITE(bp))
644 xfs_buf_delwri_promote(bp);
17b38471
CH
645 if (xfs_buf_ispinned(bp))
646 ret = false;
d808f617 647 xfs_buf_relse(bp);
17b38471 648 return ret;
1da177e4
LT
649}
650
1da177e4
LT
651/*
652 * This is called to asynchronously write the inode associated with this
653 * inode log item out to disk. The inode will already have been locked by
654 * a successful call to xfs_inode_item_trylock().
655 */
656STATIC void
657xfs_inode_item_push(
7bfa31d8 658 struct xfs_log_item *lip)
1da177e4 659{
7bfa31d8
CH
660 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
661 struct xfs_inode *ip = iip->ili_inode;
1da177e4 662
579aa9ca 663 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
474fce06 664 ASSERT(xfs_isiflocked(ip));
7bfa31d8 665
1da177e4
LT
666 /*
667 * Since we were able to lock the inode's flush lock and
668 * we found it on the AIL, the inode must be dirty. This
669 * is because the inode is removed from the AIL while still
670 * holding the flush lock in xfs_iflush_done(). Thus, if
671 * we found it in the AIL and were able to obtain the flush
672 * lock without sleeping, then there must not have been
673 * anyone in the process of flushing the inode.
674 */
675 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
676 iip->ili_format.ilf_fields != 0);
677
678 /*
c854363e
DC
679 * Push the inode to it's backing buffer. This will not remove the
680 * inode from the AIL - a further push will be required to trigger a
681 * buffer push. However, this allows all the dirty inodes to be pushed
1bfd8d04
DC
682 * to the buffer before it is pushed to disk. The buffer IO completion
683 * will pull the inode from the AIL, mark it clean and unlock the flush
c854363e 684 * lock.
1da177e4 685 */
1bfd8d04 686 (void) xfs_iflush(ip, SYNC_TRYLOCK);
1da177e4 687 xfs_iunlock(ip, XFS_ILOCK_SHARED);
1da177e4
LT
688}
689
690/*
691 * XXX rcc - this one really has to do something. Probably needs
692 * to stamp in a new field in the incore inode.
693 */
1da177e4
LT
694STATIC void
695xfs_inode_item_committing(
7bfa31d8 696 struct xfs_log_item *lip,
1da177e4
LT
697 xfs_lsn_t lsn)
698{
7bfa31d8 699 INODE_ITEM(lip)->ili_last_lsn = lsn;
1da177e4
LT
700}
701
702/*
703 * This is the ops vector shared by all buf log items.
704 */
272e42b2 705static const struct xfs_item_ops xfs_inode_item_ops = {
7bfa31d8
CH
706 .iop_size = xfs_inode_item_size,
707 .iop_format = xfs_inode_item_format,
708 .iop_pin = xfs_inode_item_pin,
709 .iop_unpin = xfs_inode_item_unpin,
710 .iop_trylock = xfs_inode_item_trylock,
711 .iop_unlock = xfs_inode_item_unlock,
712 .iop_committed = xfs_inode_item_committed,
713 .iop_push = xfs_inode_item_push,
714 .iop_pushbuf = xfs_inode_item_pushbuf,
715 .iop_committing = xfs_inode_item_committing
1da177e4
LT
716};
717
718
719/*
720 * Initialize the inode log item for a newly allocated (in-core) inode.
721 */
722void
723xfs_inode_item_init(
7bfa31d8
CH
724 struct xfs_inode *ip,
725 struct xfs_mount *mp)
1da177e4 726{
7bfa31d8 727 struct xfs_inode_log_item *iip;
1da177e4
LT
728
729 ASSERT(ip->i_itemp == NULL);
730 iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, KM_SLEEP);
731
1da177e4 732 iip->ili_inode = ip;
43f5efc5
DC
733 xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE,
734 &xfs_inode_item_ops);
1da177e4
LT
735 iip->ili_format.ilf_type = XFS_LI_INODE;
736 iip->ili_format.ilf_ino = ip->i_ino;
92bfc6e7
CH
737 iip->ili_format.ilf_blkno = ip->i_imap.im_blkno;
738 iip->ili_format.ilf_len = ip->i_imap.im_len;
739 iip->ili_format.ilf_boffset = ip->i_imap.im_boffset;
1da177e4
LT
740}
741
742/*
743 * Free the inode log item and any memory hanging off of it.
744 */
745void
746xfs_inode_item_destroy(
747 xfs_inode_t *ip)
748{
749#ifdef XFS_TRANS_DEBUG
750 if (ip->i_itemp->ili_root_size != 0) {
f0e2d93c 751 kmem_free(ip->i_itemp->ili_orig_root);
1da177e4
LT
752 }
753#endif
754 kmem_zone_free(xfs_ili_zone, ip->i_itemp);
755}
756
757
758/*
759 * This is the inode flushing I/O completion routine. It is called
760 * from interrupt level when the buffer containing the inode is
761 * flushed to disk. It is responsible for removing the inode item
762 * from the AIL if it has not been re-logged, and unlocking the inode's
763 * flush lock.
30136832
DC
764 *
765 * To reduce AIL lock traffic as much as possible, we scan the buffer log item
766 * list for other inodes that will run this function. We remove them from the
767 * buffer list so we can process all the inode IO completions in one AIL lock
768 * traversal.
1da177e4 769 */
1da177e4
LT
770void
771xfs_iflush_done(
ca30b2a7
CH
772 struct xfs_buf *bp,
773 struct xfs_log_item *lip)
1da177e4 774{
30136832
DC
775 struct xfs_inode_log_item *iip;
776 struct xfs_log_item *blip;
777 struct xfs_log_item *next;
778 struct xfs_log_item *prev;
ca30b2a7 779 struct xfs_ail *ailp = lip->li_ailp;
30136832
DC
780 int need_ail = 0;
781
782 /*
783 * Scan the buffer IO completions for other inodes being completed and
784 * attach them to the current inode log item.
785 */
adadbeef 786 blip = bp->b_fspriv;
30136832
DC
787 prev = NULL;
788 while (blip != NULL) {
789 if (lip->li_cb != xfs_iflush_done) {
790 prev = blip;
791 blip = blip->li_bio_list;
792 continue;
793 }
794
795 /* remove from list */
796 next = blip->li_bio_list;
797 if (!prev) {
adadbeef 798 bp->b_fspriv = next;
30136832
DC
799 } else {
800 prev->li_bio_list = next;
801 }
802
803 /* add to current list */
804 blip->li_bio_list = lip->li_bio_list;
805 lip->li_bio_list = blip;
806
807 /*
808 * while we have the item, do the unlocked check for needing
809 * the AIL lock.
810 */
811 iip = INODE_ITEM(blip);
812 if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
813 need_ail++;
814
815 blip = next;
816 }
817
818 /* make sure we capture the state of the initial inode. */
819 iip = INODE_ITEM(lip);
820 if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
821 need_ail++;
1da177e4
LT
822
823 /*
824 * We only want to pull the item from the AIL if it is
825 * actually there and its location in the log has not
826 * changed since we started the flush. Thus, we only bother
827 * if the ili_logged flag is set and the inode's lsn has not
828 * changed. First we check the lsn outside
829 * the lock since it's cheaper, and then we recheck while
830 * holding the lock before removing the inode from the AIL.
831 */
30136832
DC
832 if (need_ail) {
833 struct xfs_log_item *log_items[need_ail];
834 int i = 0;
783a2f65 835 spin_lock(&ailp->xa_lock);
30136832
DC
836 for (blip = lip; blip; blip = blip->li_bio_list) {
837 iip = INODE_ITEM(blip);
838 if (iip->ili_logged &&
839 blip->li_lsn == iip->ili_flush_lsn) {
840 log_items[i++] = blip;
841 }
842 ASSERT(i <= need_ail);
1da177e4 843 }
30136832
DC
844 /* xfs_trans_ail_delete_bulk() drops the AIL lock. */
845 xfs_trans_ail_delete_bulk(ailp, log_items, i);
1da177e4
LT
846 }
847
1da177e4
LT
848
849 /*
30136832
DC
850 * clean up and unlock the flush lock now we are done. We can clear the
851 * ili_last_fields bits now that we know that the data corresponding to
852 * them is safely on disk.
1da177e4 853 */
30136832
DC
854 for (blip = lip; blip; blip = next) {
855 next = blip->li_bio_list;
856 blip->li_bio_list = NULL;
857
858 iip = INODE_ITEM(blip);
859 iip->ili_logged = 0;
860 iip->ili_last_fields = 0;
861 xfs_ifunlock(iip->ili_inode);
862 }
1da177e4
LT
863}
864
865/*
866 * This is the inode flushing abort routine. It is called
867 * from xfs_iflush when the filesystem is shutting down to clean
868 * up the inode state.
869 * It is responsible for removing the inode item
870 * from the AIL if it has not been re-logged, and unlocking the inode's
871 * flush lock.
872 */
873void
874xfs_iflush_abort(
875 xfs_inode_t *ip)
876{
783a2f65 877 xfs_inode_log_item_t *iip = ip->i_itemp;
1da177e4 878
1da177e4 879 if (iip) {
783a2f65 880 struct xfs_ail *ailp = iip->ili_item.li_ailp;
1da177e4 881 if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
783a2f65 882 spin_lock(&ailp->xa_lock);
1da177e4 883 if (iip->ili_item.li_flags & XFS_LI_IN_AIL) {
783a2f65
DC
884 /* xfs_trans_ail_delete() drops the AIL lock. */
885 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)iip);
1da177e4 886 } else
783a2f65 887 spin_unlock(&ailp->xa_lock);
1da177e4
LT
888 }
889 iip->ili_logged = 0;
890 /*
891 * Clear the ili_last_fields bits now that we know that the
892 * data corresponding to them is safely on disk.
893 */
894 iip->ili_last_fields = 0;
895 /*
896 * Clear the inode logging fields so no more flushes are
897 * attempted.
898 */
899 iip->ili_format.ilf_fields = 0;
900 }
901 /*
902 * Release the inode's flush lock since we're done with it.
903 */
904 xfs_ifunlock(ip);
905}
906
907void
908xfs_istale_done(
ca30b2a7
CH
909 struct xfs_buf *bp,
910 struct xfs_log_item *lip)
1da177e4 911{
ca30b2a7 912 xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
1da177e4 913}
6d192a9b
TS
914
915/*
916 * convert an xfs_inode_log_format struct from either 32 or 64 bit versions
917 * (which can have different field alignments) to the native version
918 */
919int
920xfs_inode_item_format_convert(
921 xfs_log_iovec_t *buf,
922 xfs_inode_log_format_t *in_f)
923{
924 if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) {
4e0d5f92 925 xfs_inode_log_format_32_t *in_f32 = buf->i_addr;
6d192a9b 926
6d192a9b
TS
927 in_f->ilf_type = in_f32->ilf_type;
928 in_f->ilf_size = in_f32->ilf_size;
929 in_f->ilf_fields = in_f32->ilf_fields;
930 in_f->ilf_asize = in_f32->ilf_asize;
931 in_f->ilf_dsize = in_f32->ilf_dsize;
932 in_f->ilf_ino = in_f32->ilf_ino;
933 /* copy biggest field of ilf_u */
934 memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
935 in_f32->ilf_u.ilfu_uuid.__u_bits,
936 sizeof(uuid_t));
937 in_f->ilf_blkno = in_f32->ilf_blkno;
938 in_f->ilf_len = in_f32->ilf_len;
939 in_f->ilf_boffset = in_f32->ilf_boffset;
940 return 0;
941 } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){
4e0d5f92 942 xfs_inode_log_format_64_t *in_f64 = buf->i_addr;
6d192a9b 943
6d192a9b
TS
944 in_f->ilf_type = in_f64->ilf_type;
945 in_f->ilf_size = in_f64->ilf_size;
946 in_f->ilf_fields = in_f64->ilf_fields;
947 in_f->ilf_asize = in_f64->ilf_asize;
948 in_f->ilf_dsize = in_f64->ilf_dsize;
949 in_f->ilf_ino = in_f64->ilf_ino;
950 /* copy biggest field of ilf_u */
951 memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
952 in_f64->ilf_u.ilfu_uuid.__u_bits,
953 sizeof(uuid_t));
954 in_f->ilf_blkno = in_f64->ilf_blkno;
955 in_f->ilf_len = in_f64->ilf_len;
956 in_f->ilf_boffset = in_f64->ilf_boffset;
957 return 0;
958 }
959 return EFSCORRUPTED;
960}