]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
a844f451 | 19 | #include "xfs_fs.h" |
1da177e4 | 20 | #include "xfs_types.h" |
1da177e4 LT |
21 | #include "xfs_log.h" |
22 | #include "xfs_trans.h" | |
1da177e4 LT |
23 | #include "xfs_sb.h" |
24 | #include "xfs_ag.h" | |
1da177e4 | 25 | #include "xfs_mount.h" |
a844f451 NS |
26 | #include "xfs_bmap_btree.h" |
27 | #include "xfs_alloc_btree.h" | |
28 | #include "xfs_ialloc_btree.h" | |
a844f451 NS |
29 | #include "xfs_dinode.h" |
30 | #include "xfs_inode.h" | |
31 | #include "xfs_buf_item.h" | |
1da177e4 LT |
32 | #include "xfs_trans_priv.h" |
33 | #include "xfs_error.h" | |
0b1b213f | 34 | #include "xfs_trace.h" |
1da177e4 | 35 | |
4a5224d7 CH |
36 | /* |
37 | * Check to see if a buffer matching the given parameters is already | |
38 | * a part of the given transaction. | |
39 | */ | |
40 | STATIC struct xfs_buf * | |
41 | xfs_trans_buf_item_match( | |
42 | struct xfs_trans *tp, | |
43 | struct xfs_buftarg *target, | |
de2a4f59 DC |
44 | struct xfs_buf_map *map, |
45 | int nmaps) | |
4a5224d7 | 46 | { |
e98c414f CH |
47 | struct xfs_log_item_desc *lidp; |
48 | struct xfs_buf_log_item *blip; | |
de2a4f59 DC |
49 | int len = 0; |
50 | int i; | |
51 | ||
52 | for (i = 0; i < nmaps; i++) | |
53 | len += map[i].bm_len; | |
1da177e4 | 54 | |
e98c414f CH |
55 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
56 | blip = (struct xfs_buf_log_item *)lidp->lid_item; | |
57 | if (blip->bli_item.li_type == XFS_LI_BUF && | |
49074c06 | 58 | blip->bli_buf->b_target == target && |
de2a4f59 DC |
59 | XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn && |
60 | blip->bli_buf->b_length == len) { | |
61 | ASSERT(blip->bli_buf->b_map_count == nmaps); | |
e98c414f | 62 | return blip->bli_buf; |
de2a4f59 | 63 | } |
4a5224d7 CH |
64 | } |
65 | ||
66 | return NULL; | |
67 | } | |
1da177e4 | 68 | |
d7e84f41 CH |
69 | /* |
70 | * Add the locked buffer to the transaction. | |
71 | * | |
72 | * The buffer must be locked, and it cannot be associated with any | |
73 | * transaction. | |
74 | * | |
75 | * If the buffer does not yet have a buf log item associated with it, | |
76 | * then allocate one for it. Then add the buf item to the transaction. | |
77 | */ | |
78 | STATIC void | |
79 | _xfs_trans_bjoin( | |
80 | struct xfs_trans *tp, | |
81 | struct xfs_buf *bp, | |
82 | int reset_recur) | |
83 | { | |
84 | struct xfs_buf_log_item *bip; | |
85 | ||
bf9d9013 | 86 | ASSERT(bp->b_transp == NULL); |
d7e84f41 CH |
87 | |
88 | /* | |
89 | * The xfs_buf_log_item pointer is stored in b_fsprivate. If | |
90 | * it doesn't have one yet, then allocate one and initialize it. | |
91 | * The checks to see if one is there are in xfs_buf_item_init(). | |
92 | */ | |
93 | xfs_buf_item_init(bp, tp->t_mountp); | |
adadbeef | 94 | bip = bp->b_fspriv; |
d7e84f41 | 95 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
0f22f9d0 | 96 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); |
d7e84f41 CH |
97 | ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); |
98 | if (reset_recur) | |
99 | bip->bli_recur = 0; | |
100 | ||
101 | /* | |
102 | * Take a reference for this transaction on the buf item. | |
103 | */ | |
104 | atomic_inc(&bip->bli_refcount); | |
105 | ||
106 | /* | |
107 | * Get a log_item_desc to point at the new item. | |
108 | */ | |
e98c414f | 109 | xfs_trans_add_item(tp, &bip->bli_item); |
d7e84f41 CH |
110 | |
111 | /* | |
112 | * Initialize b_fsprivate2 so we can find it with incore_match() | |
113 | * in xfs_trans_get_buf() and friends above. | |
114 | */ | |
bf9d9013 | 115 | bp->b_transp = tp; |
d7e84f41 CH |
116 | |
117 | } | |
118 | ||
119 | void | |
120 | xfs_trans_bjoin( | |
121 | struct xfs_trans *tp, | |
122 | struct xfs_buf *bp) | |
123 | { | |
124 | _xfs_trans_bjoin(tp, bp, 0); | |
125 | trace_xfs_trans_bjoin(bp->b_fspriv); | |
126 | } | |
1da177e4 LT |
127 | |
128 | /* | |
129 | * Get and lock the buffer for the caller if it is not already | |
130 | * locked within the given transaction. If it is already locked | |
131 | * within the transaction, just increment its lock recursion count | |
132 | * and return a pointer to it. | |
133 | * | |
1da177e4 LT |
134 | * If the transaction pointer is NULL, make this just a normal |
135 | * get_buf() call. | |
136 | */ | |
de2a4f59 DC |
137 | struct xfs_buf * |
138 | xfs_trans_get_buf_map( | |
139 | struct xfs_trans *tp, | |
140 | struct xfs_buftarg *target, | |
141 | struct xfs_buf_map *map, | |
142 | int nmaps, | |
143 | xfs_buf_flags_t flags) | |
1da177e4 LT |
144 | { |
145 | xfs_buf_t *bp; | |
146 | xfs_buf_log_item_t *bip; | |
147 | ||
de2a4f59 DC |
148 | if (!tp) |
149 | return xfs_buf_get_map(target, map, nmaps, flags); | |
1da177e4 LT |
150 | |
151 | /* | |
152 | * If we find the buffer in the cache with this transaction | |
153 | * pointer in its b_fsprivate2 field, then we know we already | |
154 | * have it locked. In this case we just increment the lock | |
155 | * recursion count and return the buffer to the caller. | |
156 | */ | |
de2a4f59 | 157 | bp = xfs_trans_buf_item_match(tp, target, map, nmaps); |
1da177e4 | 158 | if (bp != NULL) { |
0c842ad4 | 159 | ASSERT(xfs_buf_islocked(bp)); |
c867cb61 CH |
160 | if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { |
161 | xfs_buf_stale(bp); | |
c867cb61 CH |
162 | XFS_BUF_DONE(bp); |
163 | } | |
0b1b213f | 164 | |
bf9d9013 | 165 | ASSERT(bp->b_transp == tp); |
adadbeef | 166 | bip = bp->b_fspriv; |
1da177e4 LT |
167 | ASSERT(bip != NULL); |
168 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
169 | bip->bli_recur++; | |
0b1b213f | 170 | trace_xfs_trans_get_buf_recur(bip); |
1da177e4 LT |
171 | return (bp); |
172 | } | |
173 | ||
de2a4f59 | 174 | bp = xfs_buf_get_map(target, map, nmaps, flags); |
1da177e4 LT |
175 | if (bp == NULL) { |
176 | return NULL; | |
177 | } | |
178 | ||
5a52c2a5 | 179 | ASSERT(!bp->b_error); |
1da177e4 | 180 | |
d7e84f41 CH |
181 | _xfs_trans_bjoin(tp, bp, 1); |
182 | trace_xfs_trans_get_buf(bp->b_fspriv); | |
1da177e4 LT |
183 | return (bp); |
184 | } | |
185 | ||
186 | /* | |
187 | * Get and lock the superblock buffer of this file system for the | |
188 | * given transaction. | |
189 | * | |
190 | * We don't need to use incore_match() here, because the superblock | |
191 | * buffer is a private buffer which we keep a pointer to in the | |
192 | * mount structure. | |
193 | */ | |
194 | xfs_buf_t * | |
195 | xfs_trans_getsb(xfs_trans_t *tp, | |
196 | struct xfs_mount *mp, | |
197 | int flags) | |
198 | { | |
199 | xfs_buf_t *bp; | |
200 | xfs_buf_log_item_t *bip; | |
201 | ||
202 | /* | |
203 | * Default to just trying to lock the superblock buffer | |
204 | * if tp is NULL. | |
205 | */ | |
206 | if (tp == NULL) { | |
207 | return (xfs_getsb(mp, flags)); | |
208 | } | |
209 | ||
210 | /* | |
211 | * If the superblock buffer already has this transaction | |
212 | * pointer in its b_fsprivate2 field, then we know we already | |
213 | * have it locked. In this case we just increment the lock | |
214 | * recursion count and return the buffer to the caller. | |
215 | */ | |
216 | bp = mp->m_sb_bp; | |
bf9d9013 | 217 | if (bp->b_transp == tp) { |
adadbeef | 218 | bip = bp->b_fspriv; |
1da177e4 LT |
219 | ASSERT(bip != NULL); |
220 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
221 | bip->bli_recur++; | |
0b1b213f | 222 | trace_xfs_trans_getsb_recur(bip); |
1da177e4 LT |
223 | return (bp); |
224 | } | |
225 | ||
226 | bp = xfs_getsb(mp, flags); | |
d7e84f41 | 227 | if (bp == NULL) |
1da177e4 | 228 | return NULL; |
1da177e4 | 229 | |
d7e84f41 CH |
230 | _xfs_trans_bjoin(tp, bp, 1); |
231 | trace_xfs_trans_getsb(bp->b_fspriv); | |
1da177e4 LT |
232 | return (bp); |
233 | } | |
234 | ||
235 | #ifdef DEBUG | |
236 | xfs_buftarg_t *xfs_error_target; | |
237 | int xfs_do_error; | |
238 | int xfs_req_num; | |
239 | int xfs_error_mod = 33; | |
240 | #endif | |
241 | ||
242 | /* | |
243 | * Get and lock the buffer for the caller if it is not already | |
244 | * locked within the given transaction. If it has not yet been | |
245 | * read in, read it from disk. If it is already locked | |
246 | * within the transaction and already read in, just increment its | |
247 | * lock recursion count and return a pointer to it. | |
248 | * | |
1da177e4 LT |
249 | * If the transaction pointer is NULL, make this just a normal |
250 | * read_buf() call. | |
251 | */ | |
252 | int | |
de2a4f59 DC |
253 | xfs_trans_read_buf_map( |
254 | struct xfs_mount *mp, | |
255 | struct xfs_trans *tp, | |
256 | struct xfs_buftarg *target, | |
257 | struct xfs_buf_map *map, | |
258 | int nmaps, | |
259 | xfs_buf_flags_t flags, | |
c3f8fc73 | 260 | struct xfs_buf **bpp, |
1813dd64 | 261 | const struct xfs_buf_ops *ops) |
1da177e4 LT |
262 | { |
263 | xfs_buf_t *bp; | |
264 | xfs_buf_log_item_t *bip; | |
265 | int error; | |
266 | ||
7ca790a5 | 267 | *bpp = NULL; |
de2a4f59 | 268 | if (!tp) { |
1813dd64 | 269 | bp = xfs_buf_read_map(target, map, nmaps, flags, ops); |
1da177e4 | 270 | if (!bp) |
0cadda1c | 271 | return (flags & XBF_TRYLOCK) ? |
a3f74ffb | 272 | EAGAIN : XFS_ERROR(ENOMEM); |
1da177e4 | 273 | |
5a52c2a5 CS |
274 | if (bp->b_error) { |
275 | error = bp->b_error; | |
901796af | 276 | xfs_buf_ioerror_alert(bp, __func__); |
7ca790a5 DC |
277 | XFS_BUF_UNDONE(bp); |
278 | xfs_buf_stale(bp); | |
1da177e4 LT |
279 | xfs_buf_relse(bp); |
280 | return error; | |
281 | } | |
282 | #ifdef DEBUG | |
a0f7bfd3 | 283 | if (xfs_do_error) { |
1da177e4 LT |
284 | if (xfs_error_target == target) { |
285 | if (((xfs_req_num++) % xfs_error_mod) == 0) { | |
286 | xfs_buf_relse(bp); | |
0b932ccc | 287 | xfs_debug(mp, "Returning error!"); |
1da177e4 LT |
288 | return XFS_ERROR(EIO); |
289 | } | |
290 | } | |
291 | } | |
292 | #endif | |
293 | if (XFS_FORCED_SHUTDOWN(mp)) | |
294 | goto shutdown_abort; | |
295 | *bpp = bp; | |
296 | return 0; | |
297 | } | |
298 | ||
299 | /* | |
300 | * If we find the buffer in the cache with this transaction | |
301 | * pointer in its b_fsprivate2 field, then we know we already | |
302 | * have it locked. If it is already read in we just increment | |
303 | * the lock recursion count and return the buffer to the caller. | |
304 | * If the buffer is not yet read in, then we read it in, increment | |
305 | * the lock recursion count, and return it to the caller. | |
306 | */ | |
de2a4f59 | 307 | bp = xfs_trans_buf_item_match(tp, target, map, nmaps); |
1da177e4 | 308 | if (bp != NULL) { |
0c842ad4 | 309 | ASSERT(xfs_buf_islocked(bp)); |
bf9d9013 | 310 | ASSERT(bp->b_transp == tp); |
adadbeef | 311 | ASSERT(bp->b_fspriv != NULL); |
5a52c2a5 | 312 | ASSERT(!bp->b_error); |
1da177e4 | 313 | if (!(XFS_BUF_ISDONE(bp))) { |
0b1b213f | 314 | trace_xfs_trans_read_buf_io(bp, _RET_IP_); |
1da177e4 | 315 | ASSERT(!XFS_BUF_ISASYNC(bp)); |
c3f8fc73 | 316 | ASSERT(bp->b_iodone == NULL); |
1da177e4 | 317 | XFS_BUF_READ(bp); |
1813dd64 | 318 | bp->b_ops = ops; |
1da177e4 | 319 | xfsbdstrat(tp->t_mountp, bp); |
1a1a3e97 | 320 | error = xfs_buf_iowait(bp); |
d64e31a2 | 321 | if (error) { |
901796af | 322 | xfs_buf_ioerror_alert(bp, __func__); |
1da177e4 LT |
323 | xfs_buf_relse(bp); |
324 | /* | |
d64e31a2 DC |
325 | * We can gracefully recover from most read |
326 | * errors. Ones we can't are those that happen | |
327 | * after the transaction's already dirty. | |
1da177e4 LT |
328 | */ |
329 | if (tp->t_flags & XFS_TRANS_DIRTY) | |
330 | xfs_force_shutdown(tp->t_mountp, | |
7d04a335 | 331 | SHUTDOWN_META_IO_ERROR); |
1da177e4 LT |
332 | return error; |
333 | } | |
334 | } | |
335 | /* | |
336 | * We never locked this buf ourselves, so we shouldn't | |
337 | * brelse it either. Just get out. | |
338 | */ | |
339 | if (XFS_FORCED_SHUTDOWN(mp)) { | |
0b1b213f | 340 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
1da177e4 LT |
341 | *bpp = NULL; |
342 | return XFS_ERROR(EIO); | |
343 | } | |
344 | ||
345 | ||
adadbeef | 346 | bip = bp->b_fspriv; |
1da177e4 LT |
347 | bip->bli_recur++; |
348 | ||
349 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
0b1b213f | 350 | trace_xfs_trans_read_buf_recur(bip); |
1da177e4 LT |
351 | *bpp = bp; |
352 | return 0; | |
353 | } | |
354 | ||
1813dd64 | 355 | bp = xfs_buf_read_map(target, map, nmaps, flags, ops); |
1da177e4 LT |
356 | if (bp == NULL) { |
357 | *bpp = NULL; | |
7401aafd DC |
358 | return (flags & XBF_TRYLOCK) ? |
359 | 0 : XFS_ERROR(ENOMEM); | |
1da177e4 | 360 | } |
5a52c2a5 CS |
361 | if (bp->b_error) { |
362 | error = bp->b_error; | |
c867cb61 | 363 | xfs_buf_stale(bp); |
c867cb61 | 364 | XFS_BUF_DONE(bp); |
901796af | 365 | xfs_buf_ioerror_alert(bp, __func__); |
1da177e4 | 366 | if (tp->t_flags & XFS_TRANS_DIRTY) |
7d04a335 | 367 | xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); |
1da177e4 LT |
368 | xfs_buf_relse(bp); |
369 | return error; | |
370 | } | |
371 | #ifdef DEBUG | |
372 | if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) { | |
373 | if (xfs_error_target == target) { | |
374 | if (((xfs_req_num++) % xfs_error_mod) == 0) { | |
375 | xfs_force_shutdown(tp->t_mountp, | |
7d04a335 | 376 | SHUTDOWN_META_IO_ERROR); |
1da177e4 | 377 | xfs_buf_relse(bp); |
0b932ccc | 378 | xfs_debug(mp, "Returning trans error!"); |
1da177e4 LT |
379 | return XFS_ERROR(EIO); |
380 | } | |
381 | } | |
382 | } | |
383 | #endif | |
384 | if (XFS_FORCED_SHUTDOWN(mp)) | |
385 | goto shutdown_abort; | |
386 | ||
d7e84f41 CH |
387 | _xfs_trans_bjoin(tp, bp, 1); |
388 | trace_xfs_trans_read_buf(bp->b_fspriv); | |
1da177e4 | 389 | |
1da177e4 LT |
390 | *bpp = bp; |
391 | return 0; | |
392 | ||
393 | shutdown_abort: | |
0b1b213f | 394 | trace_xfs_trans_read_buf_shut(bp, _RET_IP_); |
1da177e4 LT |
395 | xfs_buf_relse(bp); |
396 | *bpp = NULL; | |
397 | return XFS_ERROR(EIO); | |
398 | } | |
399 | ||
400 | ||
401 | /* | |
402 | * Release the buffer bp which was previously acquired with one of the | |
403 | * xfs_trans_... buffer allocation routines if the buffer has not | |
404 | * been modified within this transaction. If the buffer is modified | |
405 | * within this transaction, do decrement the recursion count but do | |
406 | * not release the buffer even if the count goes to 0. If the buffer is not | |
407 | * modified within the transaction, decrement the recursion count and | |
408 | * release the buffer if the recursion count goes to 0. | |
409 | * | |
410 | * If the buffer is to be released and it was not modified before | |
411 | * this transaction began, then free the buf_log_item associated with it. | |
412 | * | |
413 | * If the transaction pointer is NULL, make this just a normal | |
414 | * brelse() call. | |
415 | */ | |
416 | void | |
417 | xfs_trans_brelse(xfs_trans_t *tp, | |
418 | xfs_buf_t *bp) | |
419 | { | |
420 | xfs_buf_log_item_t *bip; | |
1da177e4 LT |
421 | |
422 | /* | |
423 | * Default to a normal brelse() call if the tp is NULL. | |
424 | */ | |
425 | if (tp == NULL) { | |
bf9d9013 | 426 | ASSERT(bp->b_transp == NULL); |
1da177e4 LT |
427 | xfs_buf_relse(bp); |
428 | return; | |
429 | } | |
430 | ||
bf9d9013 | 431 | ASSERT(bp->b_transp == tp); |
adadbeef | 432 | bip = bp->b_fspriv; |
1da177e4 LT |
433 | ASSERT(bip->bli_item.li_type == XFS_LI_BUF); |
434 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); | |
0f22f9d0 | 435 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); |
1da177e4 LT |
436 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
437 | ||
0b1b213f CH |
438 | trace_xfs_trans_brelse(bip); |
439 | ||
1da177e4 LT |
440 | /* |
441 | * If the release is just for a recursive lock, | |
442 | * then decrement the count and return. | |
443 | */ | |
444 | if (bip->bli_recur > 0) { | |
445 | bip->bli_recur--; | |
1da177e4 LT |
446 | return; |
447 | } | |
448 | ||
449 | /* | |
450 | * If the buffer is dirty within this transaction, we can't | |
451 | * release it until we commit. | |
452 | */ | |
e98c414f | 453 | if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY) |
1da177e4 | 454 | return; |
1da177e4 LT |
455 | |
456 | /* | |
457 | * If the buffer has been invalidated, then we can't release | |
458 | * it until the transaction commits to disk unless it is re-dirtied | |
459 | * as part of this transaction. This prevents us from pulling | |
460 | * the item from the AIL before we should. | |
461 | */ | |
0b1b213f | 462 | if (bip->bli_flags & XFS_BLI_STALE) |
1da177e4 | 463 | return; |
1da177e4 LT |
464 | |
465 | ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); | |
1da177e4 LT |
466 | |
467 | /* | |
468 | * Free up the log item descriptor tracking the released item. | |
469 | */ | |
e98c414f | 470 | xfs_trans_del_item(&bip->bli_item); |
1da177e4 LT |
471 | |
472 | /* | |
473 | * Clear the hold flag in the buf log item if it is set. | |
474 | * We wouldn't want the next user of the buffer to | |
475 | * get confused. | |
476 | */ | |
477 | if (bip->bli_flags & XFS_BLI_HOLD) { | |
478 | bip->bli_flags &= ~XFS_BLI_HOLD; | |
479 | } | |
480 | ||
481 | /* | |
482 | * Drop our reference to the buf log item. | |
483 | */ | |
484 | atomic_dec(&bip->bli_refcount); | |
485 | ||
486 | /* | |
487 | * If the buf item is not tracking data in the log, then | |
488 | * we must free it before releasing the buffer back to the | |
489 | * free pool. Before releasing the buffer to the free pool, | |
490 | * clear the transaction pointer in b_fsprivate2 to dissolve | |
491 | * its relation to this transaction. | |
492 | */ | |
493 | if (!xfs_buf_item_dirty(bip)) { | |
494 | /*** | |
495 | ASSERT(bp->b_pincount == 0); | |
496 | ***/ | |
497 | ASSERT(atomic_read(&bip->bli_refcount) == 0); | |
498 | ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL)); | |
499 | ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF)); | |
500 | xfs_buf_item_relse(bp); | |
1da177e4 LT |
501 | } |
502 | ||
5b03ff1b | 503 | bp->b_transp = NULL; |
1da177e4 | 504 | xfs_buf_relse(bp); |
1da177e4 LT |
505 | } |
506 | ||
1da177e4 LT |
507 | /* |
508 | * Mark the buffer as not needing to be unlocked when the buf item's | |
509 | * IOP_UNLOCK() routine is called. The buffer must already be locked | |
510 | * and associated with the given transaction. | |
511 | */ | |
512 | /* ARGSUSED */ | |
513 | void | |
514 | xfs_trans_bhold(xfs_trans_t *tp, | |
515 | xfs_buf_t *bp) | |
516 | { | |
adadbeef | 517 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
1da177e4 | 518 | |
bf9d9013 | 519 | ASSERT(bp->b_transp == tp); |
adadbeef | 520 | ASSERT(bip != NULL); |
1da177e4 | 521 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
0f22f9d0 | 522 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); |
1da177e4 | 523 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
adadbeef | 524 | |
1da177e4 | 525 | bip->bli_flags |= XFS_BLI_HOLD; |
0b1b213f | 526 | trace_xfs_trans_bhold(bip); |
1da177e4 LT |
527 | } |
528 | ||
efa092f3 TS |
529 | /* |
530 | * Cancel the previous buffer hold request made on this buffer | |
531 | * for this transaction. | |
532 | */ | |
533 | void | |
534 | xfs_trans_bhold_release(xfs_trans_t *tp, | |
535 | xfs_buf_t *bp) | |
536 | { | |
adadbeef | 537 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
efa092f3 | 538 | |
bf9d9013 | 539 | ASSERT(bp->b_transp == tp); |
adadbeef | 540 | ASSERT(bip != NULL); |
efa092f3 | 541 | ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); |
0f22f9d0 | 542 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); |
efa092f3 TS |
543 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
544 | ASSERT(bip->bli_flags & XFS_BLI_HOLD); | |
0b1b213f | 545 | |
adadbeef | 546 | bip->bli_flags &= ~XFS_BLI_HOLD; |
0b1b213f | 547 | trace_xfs_trans_bhold_release(bip); |
efa092f3 TS |
548 | } |
549 | ||
1da177e4 LT |
550 | /* |
551 | * This is called to mark bytes first through last inclusive of the given | |
552 | * buffer as needing to be logged when the transaction is committed. | |
553 | * The buffer must already be associated with the given transaction. | |
554 | * | |
555 | * First and last are numbers relative to the beginning of this buffer, | |
556 | * so the first byte in the buffer is numbered 0 regardless of the | |
557 | * value of b_blkno. | |
558 | */ | |
559 | void | |
560 | xfs_trans_log_buf(xfs_trans_t *tp, | |
561 | xfs_buf_t *bp, | |
562 | uint first, | |
563 | uint last) | |
564 | { | |
adadbeef | 565 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
1da177e4 | 566 | |
bf9d9013 | 567 | ASSERT(bp->b_transp == tp); |
adadbeef | 568 | ASSERT(bip != NULL); |
aa0e8833 | 569 | ASSERT(first <= last && last < BBTOB(bp->b_length)); |
cb669ca5 CH |
570 | ASSERT(bp->b_iodone == NULL || |
571 | bp->b_iodone == xfs_buf_iodone_callbacks); | |
1da177e4 LT |
572 | |
573 | /* | |
574 | * Mark the buffer as needing to be written out eventually, | |
575 | * and set its iodone function to remove the buffer's buf log | |
576 | * item from the AIL and free it when the buffer is flushed | |
577 | * to disk. See xfs_buf_attach_iodone() for more details | |
578 | * on li_cb and xfs_buf_iodone_callbacks(). | |
579 | * If we end up aborting this transaction, we trap this buffer | |
580 | * inside the b_bdstrat callback so that this won't get written to | |
581 | * disk. | |
582 | */ | |
1da177e4 LT |
583 | XFS_BUF_DONE(bp); |
584 | ||
1da177e4 | 585 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
cb669ca5 | 586 | bp->b_iodone = xfs_buf_iodone_callbacks; |
ca30b2a7 | 587 | bip->bli_item.li_cb = xfs_buf_iodone; |
1da177e4 | 588 | |
0b1b213f CH |
589 | trace_xfs_trans_log_buf(bip); |
590 | ||
1da177e4 LT |
591 | /* |
592 | * If we invalidated the buffer within this transaction, then | |
593 | * cancel the invalidation now that we're dirtying the buffer | |
594 | * again. There are no races with the code in xfs_buf_item_unpin(), | |
595 | * because we have a reference to the buffer this entire time. | |
596 | */ | |
597 | if (bip->bli_flags & XFS_BLI_STALE) { | |
1da177e4 LT |
598 | bip->bli_flags &= ~XFS_BLI_STALE; |
599 | ASSERT(XFS_BUF_ISSTALE(bp)); | |
600 | XFS_BUF_UNSTALE(bp); | |
0f22f9d0 | 601 | bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL; |
1da177e4 LT |
602 | } |
603 | ||
1da177e4 | 604 | tp->t_flags |= XFS_TRANS_DIRTY; |
e98c414f | 605 | bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
1da177e4 LT |
606 | bip->bli_flags |= XFS_BLI_LOGGED; |
607 | xfs_buf_item_log(bip, first, last); | |
1da177e4 LT |
608 | } |
609 | ||
610 | ||
611 | /* | |
43ff2122 CH |
612 | * Invalidate a buffer that is being used within a transaction. |
613 | * | |
614 | * Typically this is because the blocks in the buffer are being freed, so we | |
615 | * need to prevent it from being written out when we're done. Allowing it | |
616 | * to be written again might overwrite data in the free blocks if they are | |
617 | * reallocated to a file. | |
1da177e4 | 618 | * |
43ff2122 CH |
619 | * We prevent the buffer from being written out by marking it stale. We can't |
620 | * get rid of the buf log item at this point because the buffer may still be | |
621 | * pinned by another transaction. If that is the case, then we'll wait until | |
622 | * the buffer is committed to disk for the last time (we can tell by the ref | |
623 | * count) and free it in xfs_buf_item_unpin(). Until that happens we will | |
624 | * keep the buffer locked so that the buffer and buf log item are not reused. | |
625 | * | |
626 | * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log | |
627 | * the buf item. This will be used at recovery time to determine that copies | |
628 | * of the buffer in the log before this should not be replayed. | |
629 | * | |
630 | * We mark the item descriptor and the transaction dirty so that we'll hold | |
631 | * the buffer until after the commit. | |
632 | * | |
633 | * Since we're invalidating the buffer, we also clear the state about which | |
634 | * parts of the buffer have been logged. We also clear the flag indicating | |
635 | * that this is an inode buffer since the data in the buffer will no longer | |
636 | * be valid. | |
637 | * | |
638 | * We set the stale bit in the buffer as well since we're getting rid of it. | |
1da177e4 LT |
639 | */ |
640 | void | |
641 | xfs_trans_binval( | |
642 | xfs_trans_t *tp, | |
643 | xfs_buf_t *bp) | |
644 | { | |
adadbeef | 645 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
91e4bac0 | 646 | int i; |
1da177e4 | 647 | |
bf9d9013 | 648 | ASSERT(bp->b_transp == tp); |
adadbeef | 649 | ASSERT(bip != NULL); |
1da177e4 LT |
650 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
651 | ||
0b1b213f CH |
652 | trace_xfs_trans_binval(bip); |
653 | ||
1da177e4 LT |
654 | if (bip->bli_flags & XFS_BLI_STALE) { |
655 | /* | |
656 | * If the buffer is already invalidated, then | |
657 | * just return. | |
658 | */ | |
1da177e4 LT |
659 | ASSERT(XFS_BUF_ISSTALE(bp)); |
660 | ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); | |
0f22f9d0 | 661 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF)); |
ee1a47ab | 662 | ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_TYPE_MASK)); |
0f22f9d0 | 663 | ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); |
e98c414f | 664 | ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY); |
1da177e4 | 665 | ASSERT(tp->t_flags & XFS_TRANS_DIRTY); |
1da177e4 LT |
666 | return; |
667 | } | |
668 | ||
c867cb61 | 669 | xfs_buf_stale(bp); |
43ff2122 | 670 | |
1da177e4 | 671 | bip->bli_flags |= XFS_BLI_STALE; |
ccf7c23f | 672 | bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY); |
0f22f9d0 MT |
673 | bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF; |
674 | bip->__bli_format.blf_flags |= XFS_BLF_CANCEL; | |
ee1a47ab | 675 | bip->__bli_format.blf_flags &= ~XFS_BLF_TYPE_MASK; |
91e4bac0 MT |
676 | for (i = 0; i < bip->bli_format_count; i++) { |
677 | memset(bip->bli_formats[i].blf_data_map, 0, | |
678 | (bip->bli_formats[i].blf_map_size * sizeof(uint))); | |
679 | } | |
e98c414f | 680 | bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY; |
1da177e4 | 681 | tp->t_flags |= XFS_TRANS_DIRTY; |
1da177e4 LT |
682 | } |
683 | ||
684 | /* | |
ccf7c23f DC |
685 | * This call is used to indicate that the buffer contains on-disk inodes which |
686 | * must be handled specially during recovery. They require special handling | |
687 | * because only the di_next_unlinked from the inodes in the buffer should be | |
688 | * recovered. The rest of the data in the buffer is logged via the inodes | |
689 | * themselves. | |
1da177e4 | 690 | * |
ccf7c23f DC |
691 | * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be |
692 | * transferred to the buffer's log format structure so that we'll know what to | |
693 | * do at recovery time. | |
1da177e4 | 694 | */ |
1da177e4 LT |
695 | void |
696 | xfs_trans_inode_buf( | |
697 | xfs_trans_t *tp, | |
698 | xfs_buf_t *bp) | |
699 | { | |
adadbeef | 700 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
1da177e4 | 701 | |
bf9d9013 | 702 | ASSERT(bp->b_transp == tp); |
adadbeef | 703 | ASSERT(bip != NULL); |
1da177e4 LT |
704 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
705 | ||
ccf7c23f | 706 | bip->bli_flags |= XFS_BLI_INODE_BUF; |
93848a99 | 707 | xfs_trans_buf_set_type(tp, bp, XFS_BLF_DINO_BUF); |
1da177e4 LT |
708 | } |
709 | ||
710 | /* | |
711 | * This call is used to indicate that the buffer is going to | |
712 | * be staled and was an inode buffer. This means it gets | |
93848a99 | 713 | * special processing during unpin - where any inodes |
1da177e4 LT |
714 | * associated with the buffer should be removed from ail. |
715 | * There is also special processing during recovery, | |
716 | * any replay of the inodes in the buffer needs to be | |
717 | * prevented as the buffer may have been reused. | |
718 | */ | |
719 | void | |
720 | xfs_trans_stale_inode_buf( | |
721 | xfs_trans_t *tp, | |
722 | xfs_buf_t *bp) | |
723 | { | |
adadbeef | 724 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
1da177e4 | 725 | |
bf9d9013 | 726 | ASSERT(bp->b_transp == tp); |
adadbeef | 727 | ASSERT(bip != NULL); |
1da177e4 LT |
728 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
729 | ||
730 | bip->bli_flags |= XFS_BLI_STALE_INODE; | |
ca30b2a7 | 731 | bip->bli_item.li_cb = xfs_buf_iodone; |
93848a99 | 732 | xfs_trans_buf_set_type(tp, bp, XFS_BLF_DINO_BUF); |
1da177e4 LT |
733 | } |
734 | ||
1da177e4 LT |
735 | /* |
736 | * Mark the buffer as being one which contains newly allocated | |
737 | * inodes. We need to make sure that even if this buffer is | |
738 | * relogged as an 'inode buf' we still recover all of the inode | |
739 | * images in the face of a crash. This works in coordination with | |
740 | * xfs_buf_item_committed() to ensure that the buffer remains in the | |
741 | * AIL at its original location even after it has been relogged. | |
742 | */ | |
743 | /* ARGSUSED */ | |
744 | void | |
745 | xfs_trans_inode_alloc_buf( | |
746 | xfs_trans_t *tp, | |
747 | xfs_buf_t *bp) | |
748 | { | |
adadbeef | 749 | xfs_buf_log_item_t *bip = bp->b_fspriv; |
1da177e4 | 750 | |
bf9d9013 | 751 | ASSERT(bp->b_transp == tp); |
adadbeef | 752 | ASSERT(bip != NULL); |
1da177e4 LT |
753 | ASSERT(atomic_read(&bip->bli_refcount) > 0); |
754 | ||
755 | bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; | |
93848a99 | 756 | xfs_trans_buf_set_type(tp, bp, XFS_BLF_DINO_BUF); |
1da177e4 LT |
757 | } |
758 | ||
ee1a47ab CH |
759 | /* |
760 | * Set the type of the buffer for log recovery so that it can correctly identify | |
761 | * and hence attach the correct buffer ops to the buffer after replay. | |
762 | */ | |
763 | void | |
764 | xfs_trans_buf_set_type( | |
765 | struct xfs_trans *tp, | |
766 | struct xfs_buf *bp, | |
767 | uint type) | |
768 | { | |
769 | struct xfs_buf_log_item *bip = bp->b_fspriv; | |
770 | ||
771 | ASSERT(bp->b_transp == tp); | |
772 | ASSERT(bip != NULL); | |
773 | ASSERT(atomic_read(&bip->bli_refcount) > 0); | |
774 | ASSERT((type & XFS_BLF_TYPE_MASK) != 0); | |
775 | ||
776 | bip->__bli_format.blf_flags &= ~XFS_BLF_TYPE_MASK; | |
777 | bip->__bli_format.blf_flags |= type; | |
778 | } | |
1da177e4 LT |
779 | |
780 | /* | |
781 | * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of | |
782 | * dquots. However, unlike in inode buffer recovery, dquot buffers get | |
783 | * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). | |
784 | * The only thing that makes dquot buffers different from regular | |
785 | * buffers is that we must not replay dquot bufs when recovering | |
786 | * if a _corresponding_ quotaoff has happened. We also have to distinguish | |
787 | * between usr dquot bufs and grp dquot bufs, because usr and grp quotas | |
788 | * can be turned off independently. | |
789 | */ | |
790 | /* ARGSUSED */ | |
791 | void | |
792 | xfs_trans_dquot_buf( | |
793 | xfs_trans_t *tp, | |
794 | xfs_buf_t *bp, | |
795 | uint type) | |
796 | { | |
c1155410 DC |
797 | ASSERT(type == XFS_BLF_UDQUOT_BUF || |
798 | type == XFS_BLF_PDQUOT_BUF || | |
799 | type == XFS_BLF_GDQUOT_BUF); | |
1da177e4 | 800 | |
ee1a47ab | 801 | xfs_trans_buf_set_type(tp, bp, type); |
1da177e4 | 802 | } |