]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/xfs/xfs_trans_ail.c
xfs: correctly acount for reclaimable slabs
[mirror_ubuntu-hirsute-kernel.git] / fs / xfs / xfs_trans_ail.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
7b718769 3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
c7e8f268 4 * Copyright (c) 2008 Dave Chinner
7b718769 5 * All Rights Reserved.
1da177e4 6 */
1da177e4 7#include "xfs.h"
a844f451 8#include "xfs_fs.h"
5467b34b 9#include "xfs_shared.h"
4fb6e8ad 10#include "xfs_format.h"
239880ef
DC
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
1da177e4 13#include "xfs_mount.h"
239880ef 14#include "xfs_trans.h"
1da177e4 15#include "xfs_trans_priv.h"
9e4c109a 16#include "xfs_trace.h"
e9e899a2 17#include "xfs_errortag.h"
1da177e4 18#include "xfs_error.h"
239880ef 19#include "xfs_log.h"
1da177e4 20
1da177e4 21#ifdef DEBUG
cd4a3c50
DC
22/*
23 * Check that the list is sorted as it should be.
d686d12d
DC
24 *
25 * Called with the ail lock held, but we don't want to assert fail with it
26 * held otherwise we'll lock everything up and won't be able to debug the
27 * cause. Hence we sample and check the state under the AIL lock and return if
28 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
29 * Asserts may not be fatal, so pick the lock back up and continue onwards.
cd4a3c50
DC
30 */
31STATIC void
32xfs_ail_check(
d686d12d
DC
33 struct xfs_ail *ailp,
34 struct xfs_log_item *lip)
daebba1b 35 __must_hold(&ailp->ail_lock)
cd4a3c50 36{
d686d12d
DC
37 struct xfs_log_item *prev_lip;
38 struct xfs_log_item *next_lip;
39 xfs_lsn_t prev_lsn = NULLCOMMITLSN;
40 xfs_lsn_t next_lsn = NULLCOMMITLSN;
41 xfs_lsn_t lsn;
42 bool in_ail;
43
cd4a3c50 44
57e80956 45 if (list_empty(&ailp->ail_head))
cd4a3c50
DC
46 return;
47
48 /*
d686d12d 49 * Sample then check the next and previous entries are valid.
cd4a3c50 50 */
d686d12d
DC
51 in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
52 prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
57e80956 53 if (&prev_lip->li_ail != &ailp->ail_head)
d686d12d
DC
54 prev_lsn = prev_lip->li_lsn;
55 next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
56 if (&next_lip->li_ail != &ailp->ail_head)
57 next_lsn = next_lip->li_lsn;
58 lsn = lip->li_lsn;
cd4a3c50 59
d686d12d
DC
60 if (in_ail &&
61 (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
62 (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
63 return;
cd4a3c50 64
d686d12d
DC
65 spin_unlock(&ailp->ail_lock);
66 ASSERT(in_ail);
67 ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
68 ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
69 spin_lock(&ailp->ail_lock);
cd4a3c50
DC
70}
71#else /* !DEBUG */
de08dbc1 72#define xfs_ail_check(a,l)
1da177e4
LT
73#endif /* DEBUG */
74
cd4a3c50 75/*
fd074841
DC
76 * Return a pointer to the last item in the AIL. If the AIL is empty, then
77 * return NULL.
78 */
efe2330f 79static struct xfs_log_item *
fd074841
DC
80xfs_ail_max(
81 struct xfs_ail *ailp)
82{
57e80956 83 if (list_empty(&ailp->ail_head))
fd074841
DC
84 return NULL;
85
efe2330f 86 return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
fd074841
DC
87}
88
cd4a3c50
DC
89/*
90 * Return a pointer to the item which follows the given item in the AIL. If
91 * the given item is the last item in the list, then return NULL.
92 */
efe2330f 93static struct xfs_log_item *
cd4a3c50 94xfs_ail_next(
efe2330f
CH
95 struct xfs_ail *ailp,
96 struct xfs_log_item *lip)
cd4a3c50 97{
57e80956 98 if (lip->li_ail.next == &ailp->ail_head)
cd4a3c50
DC
99 return NULL;
100
efe2330f 101 return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
cd4a3c50 102}
1da177e4
LT
103
104/*
cd4a3c50
DC
105 * This is called by the log manager code to determine the LSN of the tail of
106 * the log. This is exactly the LSN of the first item in the AIL. If the AIL
107 * is empty, then this function returns 0.
1da177e4 108 *
cd4a3c50
DC
109 * We need the AIL lock in order to get a coherent read of the lsn of the last
110 * item in the AIL.
1da177e4
LT
111 */
112xfs_lsn_t
fd074841 113xfs_ail_min_lsn(
efe2330f 114 struct xfs_ail *ailp)
1da177e4 115{
efe2330f
CH
116 xfs_lsn_t lsn = 0;
117 struct xfs_log_item *lip;
1da177e4 118
57e80956 119 spin_lock(&ailp->ail_lock);
5b00f14f 120 lip = xfs_ail_min(ailp);
cd4a3c50 121 if (lip)
1da177e4 122 lsn = lip->li_lsn;
57e80956 123 spin_unlock(&ailp->ail_lock);
1da177e4
LT
124
125 return lsn;
126}
127
fd074841
DC
128/*
129 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
130 */
131static xfs_lsn_t
132xfs_ail_max_lsn(
efe2330f 133 struct xfs_ail *ailp)
fd074841 134{
efe2330f
CH
135 xfs_lsn_t lsn = 0;
136 struct xfs_log_item *lip;
fd074841 137
57e80956 138 spin_lock(&ailp->ail_lock);
fd074841
DC
139 lip = xfs_ail_max(ailp);
140 if (lip)
141 lsn = lip->li_lsn;
57e80956 142 spin_unlock(&ailp->ail_lock);
fd074841
DC
143
144 return lsn;
145}
146
27d8d5fe 147/*
af3e4022
DC
148 * The cursor keeps track of where our current traversal is up to by tracking
149 * the next item in the list for us. However, for this to be safe, removing an
150 * object from the AIL needs to invalidate any cursor that points to it. hence
151 * the traversal cursor needs to be linked to the struct xfs_ail so that
152 * deletion can search all the active cursors for invalidation.
27d8d5fe 153 */
5b00f14f 154STATIC void
27d8d5fe
DC
155xfs_trans_ail_cursor_init(
156 struct xfs_ail *ailp,
157 struct xfs_ail_cursor *cur)
158{
159 cur->item = NULL;
57e80956 160 list_add_tail(&cur->list, &ailp->ail_cursors);
27d8d5fe
DC
161}
162
27d8d5fe 163/*
af3e4022
DC
164 * Get the next item in the traversal and advance the cursor. If the cursor
165 * was invalidated (indicated by a lip of 1), restart the traversal.
27d8d5fe 166 */
5b00f14f 167struct xfs_log_item *
27d8d5fe
DC
168xfs_trans_ail_cursor_next(
169 struct xfs_ail *ailp,
170 struct xfs_ail_cursor *cur)
171{
172 struct xfs_log_item *lip = cur->item;
173
db9d67d6 174 if ((uintptr_t)lip & 1)
27d8d5fe 175 lip = xfs_ail_min(ailp);
16b59029
DC
176 if (lip)
177 cur->item = xfs_ail_next(ailp, lip);
27d8d5fe
DC
178 return lip;
179}
180
27d8d5fe 181/*
af3e4022
DC
182 * When the traversal is complete, we need to remove the cursor from the list
183 * of traversing cursors.
27d8d5fe
DC
184 */
185void
186xfs_trans_ail_cursor_done(
af3e4022 187 struct xfs_ail_cursor *cur)
27d8d5fe 188{
af3e4022
DC
189 cur->item = NULL;
190 list_del_init(&cur->list);
27d8d5fe
DC
191}
192
5b00f14f 193/*
af3e4022
DC
194 * Invalidate any cursor that is pointing to this item. This is called when an
195 * item is removed from the AIL. Any cursor pointing to this object is now
196 * invalid and the traversal needs to be terminated so it doesn't reference a
197 * freed object. We set the low bit of the cursor item pointer so we can
198 * distinguish between an invalidation and the end of the list when getting the
199 * next item from the cursor.
5b00f14f
DC
200 */
201STATIC void
202xfs_trans_ail_cursor_clear(
203 struct xfs_ail *ailp,
204 struct xfs_log_item *lip)
205{
206 struct xfs_ail_cursor *cur;
207
57e80956 208 list_for_each_entry(cur, &ailp->ail_cursors, list) {
5b00f14f
DC
209 if (cur->item == lip)
210 cur->item = (struct xfs_log_item *)
db9d67d6 211 ((uintptr_t)cur->item | 1);
5b00f14f
DC
212 }
213}
214
249a8c11 215/*
16b59029
DC
216 * Find the first item in the AIL with the given @lsn by searching in ascending
217 * LSN order and initialise the cursor to point to the next item for a
218 * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
219 * first item in the AIL. Returns NULL if the list is empty.
249a8c11 220 */
efe2330f 221struct xfs_log_item *
5b00f14f 222xfs_trans_ail_cursor_first(
27d8d5fe
DC
223 struct xfs_ail *ailp,
224 struct xfs_ail_cursor *cur,
225 xfs_lsn_t lsn)
249a8c11 226{
efe2330f 227 struct xfs_log_item *lip;
249a8c11 228
5b00f14f 229 xfs_trans_ail_cursor_init(ailp, cur);
16b59029
DC
230
231 if (lsn == 0) {
232 lip = xfs_ail_min(ailp);
5b00f14f 233 goto out;
16b59029 234 }
249a8c11 235
57e80956 236 list_for_each_entry(lip, &ailp->ail_head, li_ail) {
5b00f14f 237 if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
7ee49acf 238 goto out;
535f6b37 239 }
16b59029
DC
240 return NULL;
241
5b00f14f 242out:
16b59029
DC
243 if (lip)
244 cur->item = xfs_ail_next(ailp, lip);
5b00f14f 245 return lip;
249a8c11
DC
246}
247
1d8c95a3
DC
248static struct xfs_log_item *
249__xfs_trans_ail_cursor_last(
250 struct xfs_ail *ailp,
251 xfs_lsn_t lsn)
252{
efe2330f 253 struct xfs_log_item *lip;
1d8c95a3 254
57e80956 255 list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
1d8c95a3
DC
256 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
257 return lip;
258 }
259 return NULL;
260}
261
262/*
16b59029
DC
263 * Find the last item in the AIL with the given @lsn by searching in descending
264 * LSN order and initialise the cursor to point to that item. If there is no
265 * item with the value of @lsn, then it sets the cursor to the last item with an
266 * LSN lower than @lsn. Returns NULL if the list is empty.
1d8c95a3
DC
267 */
268struct xfs_log_item *
269xfs_trans_ail_cursor_last(
270 struct xfs_ail *ailp,
271 struct xfs_ail_cursor *cur,
272 xfs_lsn_t lsn)
273{
274 xfs_trans_ail_cursor_init(ailp, cur);
275 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
276 return cur->item;
277}
278
279/*
16b59029 280 * Splice the log item list into the AIL at the given LSN. We splice to the
1d8c95a3
DC
281 * tail of the given LSN to maintain insert order for push traversals. The
282 * cursor is optional, allowing repeated updates to the same LSN to avoid
e44f4112 283 * repeated traversals. This should not be called with an empty list.
cd4a3c50
DC
284 */
285static void
286xfs_ail_splice(
1d8c95a3
DC
287 struct xfs_ail *ailp,
288 struct xfs_ail_cursor *cur,
289 struct list_head *list,
290 xfs_lsn_t lsn)
cd4a3c50 291{
e44f4112
AE
292 struct xfs_log_item *lip;
293
294 ASSERT(!list_empty(list));
cd4a3c50 295
1d8c95a3 296 /*
e44f4112
AE
297 * Use the cursor to determine the insertion point if one is
298 * provided. If not, or if the one we got is not valid,
299 * find the place in the AIL where the items belong.
1d8c95a3 300 */
e44f4112 301 lip = cur ? cur->item : NULL;
db9d67d6 302 if (!lip || (uintptr_t)lip & 1)
1d8c95a3
DC
303 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
304
e44f4112
AE
305 /*
306 * If a cursor is provided, we know we're processing the AIL
307 * in lsn order, and future items to be spliced in will
308 * follow the last one being inserted now. Update the
309 * cursor to point to that last item, now while we have a
310 * reliable pointer to it.
311 */
312 if (cur)
313 cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
cd4a3c50 314
1d8c95a3 315 /*
e44f4112
AE
316 * Finally perform the splice. Unless the AIL was empty,
317 * lip points to the item in the AIL _after_ which the new
318 * items should go. If lip is null the AIL was empty, so
319 * the new items go at the head of the AIL.
1d8c95a3 320 */
e44f4112
AE
321 if (lip)
322 list_splice(list, &lip->li_ail);
323 else
57e80956 324 list_splice(list, &ailp->ail_head);
cd4a3c50
DC
325}
326
327/*
328 * Delete the given item from the AIL. Return a pointer to the item.
329 */
330static void
331xfs_ail_delete(
efe2330f
CH
332 struct xfs_ail *ailp,
333 struct xfs_log_item *lip)
cd4a3c50
DC
334{
335 xfs_ail_check(ailp, lip);
336 list_del(&lip->li_ail);
337 xfs_trans_ail_cursor_clear(ailp, lip);
338}
339
7f4d01f3
BF
340static inline uint
341xfsaild_push_item(
342 struct xfs_ail *ailp,
343 struct xfs_log_item *lip)
344{
345 /*
346 * If log item pinning is enabled, skip the push and track the item as
347 * pinned. This can help induce head-behind-tail conditions.
348 */
57e80956 349 if (XFS_TEST_ERROR(false, ailp->ail_mount, XFS_ERRTAG_LOG_ITEM_PIN))
7f4d01f3
BF
350 return XFS_ITEM_PINNED;
351
e8b78db7
CH
352 /*
353 * Consider the item pinned if a push callback is not defined so the
354 * caller will force the log. This should only happen for intent items
355 * as they are unpinned once the associated done item is committed to
356 * the on-disk log.
357 */
358 if (!lip->li_ops->iop_push)
359 return XFS_ITEM_PINNED;
57e80956 360 return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
7f4d01f3
BF
361}
362
0030807c
CH
363static long
364xfsaild_push(
365 struct xfs_ail *ailp)
249a8c11 366{
57e80956 367 xfs_mount_t *mp = ailp->ail_mount;
af3e4022 368 struct xfs_ail_cursor cur;
efe2330f 369 struct xfs_log_item *lip;
9e7004e7 370 xfs_lsn_t lsn;
fe0da767 371 xfs_lsn_t target;
43ff2122 372 long tout;
9e7004e7 373 int stuck = 0;
43ff2122 374 int flushing = 0;
9e7004e7 375 int count = 0;
1da177e4 376
670ce93f 377 /*
43ff2122
CH
378 * If we encountered pinned items or did not finish writing out all
379 * buffers the last time we ran, force the log first and wait for it
380 * before pushing again.
670ce93f 381 */
57e80956
MW
382 if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
383 (!list_empty_careful(&ailp->ail_buf_list) ||
43ff2122 384 xfs_ail_min_lsn(ailp))) {
57e80956 385 ailp->ail_log_flush = 0;
43ff2122 386
ff6d6af2 387 XFS_STATS_INC(mp, xs_push_ail_flush);
670ce93f 388 xfs_log_force(mp, XFS_LOG_SYNC);
670ce93f
DC
389 }
390
57e80956 391 spin_lock(&ailp->ail_lock);
8375f922 392
57e80956 393 /* barrier matches the ail_target update in xfs_ail_push() */
8375f922 394 smp_rmb();
57e80956
MW
395 target = ailp->ail_target;
396 ailp->ail_target_prev = target;
8375f922 397
57e80956 398 lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
211e4d43 399 if (!lip) {
1da177e4 400 /*
43ff2122
CH
401 * If the AIL is empty or our push has reached the end we are
402 * done now.
1da177e4 403 */
e4a1e29c 404 xfs_trans_ail_cursor_done(&cur);
57e80956 405 spin_unlock(&ailp->ail_lock);
9e7004e7 406 goto out_done;
1da177e4
LT
407 }
408
ff6d6af2 409 XFS_STATS_INC(mp, xs_push_ail);
1da177e4 410
249a8c11 411 lsn = lip->li_lsn;
50e86686 412 while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
249a8c11 413 int lock_result;
43ff2122 414
1da177e4 415 /*
904c17e6 416 * Note that iop_push may unlock and reacquire the AIL lock. We
43ff2122
CH
417 * rely on the AIL cursor implementation to be able to deal with
418 * the dropped lock.
1da177e4 419 */
7f4d01f3 420 lock_result = xfsaild_push_item(ailp, lip);
1da177e4 421 switch (lock_result) {
249a8c11 422 case XFS_ITEM_SUCCESS:
ff6d6af2 423 XFS_STATS_INC(mp, xs_push_ail_success);
9e4c109a
CH
424 trace_xfs_ail_push(lip);
425
57e80956 426 ailp->ail_last_pushed_lsn = lsn;
1da177e4
LT
427 break;
428
43ff2122
CH
429 case XFS_ITEM_FLUSHING:
430 /*
cf085a1b 431 * The item or its backing buffer is already being
43ff2122
CH
432 * flushed. The typical reason for that is that an
433 * inode buffer is locked because we already pushed the
434 * updates to it as part of inode clustering.
435 *
436 * We do not want to to stop flushing just because lots
cf085a1b 437 * of items are already being flushed, but we need to
43ff2122 438 * re-try the flushing relatively soon if most of the
cf085a1b 439 * AIL is being flushed.
43ff2122 440 */
ff6d6af2 441 XFS_STATS_INC(mp, xs_push_ail_flushing);
43ff2122
CH
442 trace_xfs_ail_flushing(lip);
443
444 flushing++;
57e80956 445 ailp->ail_last_pushed_lsn = lsn;
1da177e4
LT
446 break;
447
249a8c11 448 case XFS_ITEM_PINNED:
ff6d6af2 449 XFS_STATS_INC(mp, xs_push_ail_pinned);
9e4c109a
CH
450 trace_xfs_ail_pinned(lip);
451
249a8c11 452 stuck++;
57e80956 453 ailp->ail_log_flush++;
1da177e4 454 break;
249a8c11 455 case XFS_ITEM_LOCKED:
ff6d6af2 456 XFS_STATS_INC(mp, xs_push_ail_locked);
9e4c109a 457 trace_xfs_ail_locked(lip);
43ff2122 458
249a8c11 459 stuck++;
1da177e4 460 break;
249a8c11 461 default:
1da177e4
LT
462 ASSERT(0);
463 break;
464 }
465
249a8c11 466 count++;
1da177e4 467
249a8c11
DC
468 /*
469 * Are there too many items we can't do anything with?
43ff2122 470 *
249a8c11
DC
471 * If we we are skipping too many items because we can't flush
472 * them or they are already being flushed, we back off and
473 * given them time to complete whatever operation is being
474 * done. i.e. remove pressure from the AIL while we can't make
475 * progress so traversals don't slow down further inserts and
476 * removals to/from the AIL.
477 *
478 * The value of 100 is an arbitrary magic number based on
479 * observation.
480 */
481 if (stuck > 100)
482 break;
483
af3e4022 484 lip = xfs_trans_ail_cursor_next(ailp, &cur);
249a8c11
DC
485 if (lip == NULL)
486 break;
249a8c11 487 lsn = lip->li_lsn;
1da177e4 488 }
e4a1e29c 489 xfs_trans_ail_cursor_done(&cur);
57e80956 490 spin_unlock(&ailp->ail_lock);
1da177e4 491
57e80956
MW
492 if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
493 ailp->ail_log_flush++;
d808f617 494
43ff2122 495 if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
9e7004e7 496out_done:
92d9cd10 497 /*
43ff2122
CH
498 * We reached the target or the AIL is empty, so wait a bit
499 * longer for I/O to complete and remove pushed items from the
500 * AIL before we start the next scan from the start of the AIL.
92d9cd10 501 */
453eac8a 502 tout = 50;
57e80956 503 ailp->ail_last_pushed_lsn = 0;
43ff2122 504 } else if (((stuck + flushing) * 100) / count > 90) {
249a8c11 505 /*
43ff2122
CH
506 * Either there is a lot of contention on the AIL or we are
507 * stuck due to operations in progress. "Stuck" in this case
508 * is defined as >90% of the items we tried to push were stuck.
249a8c11
DC
509 *
510 * Backoff a bit more to allow some I/O to complete before
43ff2122
CH
511 * restarting from the start of the AIL. This prevents us from
512 * spinning on the same items, and if they are pinned will all
513 * the restart to issue a log force to unpin the stuck items.
249a8c11 514 */
453eac8a 515 tout = 20;
57e80956 516 ailp->ail_last_pushed_lsn = 0;
43ff2122
CH
517 } else {
518 /*
519 * Assume we have more work to do in a short while.
520 */
521 tout = 10;
1da177e4 522 }
0bf6a5bd 523
0030807c
CH
524 return tout;
525}
526
527static int
528xfsaild(
529 void *data)
530{
531 struct xfs_ail *ailp = data;
532 long tout = 0; /* milliseconds */
10a98cb1 533 unsigned int noreclaim_flag;
0030807c 534
10a98cb1 535 noreclaim_flag = memalloc_noreclaim_save();
18f1df4e 536 set_freezable();
43ff2122 537
0bd89676 538 while (1) {
0030807c 539 if (tout && tout <= 20)
0bd89676 540 set_current_state(TASK_KILLABLE);
0030807c 541 else
0bd89676
HT
542 set_current_state(TASK_INTERRUPTIBLE);
543
544 /*
efc3289c
BF
545 * Check kthread_should_stop() after we set the task state to
546 * guarantee that we either see the stop bit and exit or the
547 * task state is reset to runnable such that it's not scheduled
548 * out indefinitely and detects the stop bit at next iteration.
0bd89676
HT
549 * A memory barrier is included in above task state set to
550 * serialize again kthread_stop().
551 */
552 if (kthread_should_stop()) {
553 __set_current_state(TASK_RUNNING);
efc3289c
BF
554
555 /*
556 * The caller forces out the AIL before stopping the
557 * thread in the common case, which means the delwri
558 * queue is drained. In the shutdown case, the queue may
559 * still hold relogged buffers that haven't been
560 * submitted because they were pinned since added to the
561 * queue.
562 *
563 * Log I/O error processing stales the underlying buffer
564 * and clears the delwri state, expecting the buf to be
565 * removed on the next submission attempt. That won't
566 * happen if we're shutting down, so this is the last
567 * opportunity to release such buffers from the queue.
568 */
569 ASSERT(list_empty(&ailp->ail_buf_list) ||
570 XFS_FORCED_SHUTDOWN(ailp->ail_mount));
571 xfs_buf_delwri_cancel(&ailp->ail_buf_list);
0bd89676
HT
572 break;
573 }
8375f922 574
57e80956 575 spin_lock(&ailp->ail_lock);
8375f922
BF
576
577 /*
578 * Idle if the AIL is empty and we are not racing with a target
579 * update. We check the AIL after we set the task to a sleep
57e80956 580 * state to guarantee that we either catch an ail_target update
8375f922
BF
581 * or that a wake_up resets the state to TASK_RUNNING.
582 * Otherwise, we run the risk of sleeping indefinitely.
583 *
57e80956 584 * The barrier matches the ail_target update in xfs_ail_push().
8375f922
BF
585 */
586 smp_rmb();
587 if (!xfs_ail_min(ailp) &&
57e80956
MW
588 ailp->ail_target == ailp->ail_target_prev) {
589 spin_unlock(&ailp->ail_lock);
18f1df4e 590 freezable_schedule();
8375f922
BF
591 tout = 0;
592 continue;
593 }
57e80956 594 spin_unlock(&ailp->ail_lock);
8375f922
BF
595
596 if (tout)
18f1df4e 597 freezable_schedule_timeout(msecs_to_jiffies(tout));
8375f922
BF
598
599 __set_current_state(TASK_RUNNING);
0030807c
CH
600
601 try_to_freeze();
602
603 tout = xfsaild_push(ailp);
604 }
605
10a98cb1 606 memalloc_noreclaim_restore(noreclaim_flag);
0030807c 607 return 0;
453eac8a 608}
1da177e4 609
0bf6a5bd
DC
610/*
611 * This routine is called to move the tail of the AIL forward. It does this by
612 * trying to flush items in the AIL whose lsns are below the given
613 * threshold_lsn.
614 *
615 * The push is run asynchronously in a workqueue, which means the caller needs
616 * to handle waiting on the async flush for space to become available.
617 * We don't want to interrupt any push that is in progress, hence we only queue
cf085a1b 618 * work if we set the pushing bit appropriately.
0bf6a5bd
DC
619 *
620 * We do this unlocked - we only need to know whether there is anything in the
621 * AIL at the time we are called. We don't need to access the contents of
622 * any of the objects, so the lock is not needed.
623 */
624void
fd074841 625xfs_ail_push(
efe2330f
CH
626 struct xfs_ail *ailp,
627 xfs_lsn_t threshold_lsn)
0bf6a5bd 628{
efe2330f 629 struct xfs_log_item *lip;
0bf6a5bd
DC
630
631 lip = xfs_ail_min(ailp);
57e80956
MW
632 if (!lip || XFS_FORCED_SHUTDOWN(ailp->ail_mount) ||
633 XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
0bf6a5bd
DC
634 return;
635
636 /*
637 * Ensure that the new target is noticed in push code before it clears
638 * the XFS_AIL_PUSHING_BIT.
639 */
640 smp_wmb();
57e80956 641 xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
0030807c
CH
642 smp_wmb();
643
57e80956 644 wake_up_process(ailp->ail_task);
0bf6a5bd 645}
1da177e4 646
fd074841
DC
647/*
648 * Push out all items in the AIL immediately
649 */
650void
651xfs_ail_push_all(
652 struct xfs_ail *ailp)
653{
654 xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
655
656 if (threshold_lsn)
657 xfs_ail_push(ailp, threshold_lsn);
658}
659
211e4d43
CH
660/*
661 * Push out all items in the AIL immediately and wait until the AIL is empty.
662 */
663void
664xfs_ail_push_all_sync(
665 struct xfs_ail *ailp)
666{
667 struct xfs_log_item *lip;
668 DEFINE_WAIT(wait);
669
57e80956 670 spin_lock(&ailp->ail_lock);
211e4d43 671 while ((lip = xfs_ail_max(ailp)) != NULL) {
57e80956
MW
672 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
673 ailp->ail_target = lip->li_lsn;
674 wake_up_process(ailp->ail_task);
675 spin_unlock(&ailp->ail_lock);
211e4d43 676 schedule();
57e80956 677 spin_lock(&ailp->ail_lock);
211e4d43 678 }
57e80956 679 spin_unlock(&ailp->ail_lock);
211e4d43 680
57e80956 681 finish_wait(&ailp->ail_empty, &wait);
211e4d43
CH
682}
683
0e57f6a3
DC
684/*
685 * xfs_trans_ail_update - bulk AIL insertion operation.
686 *
687 * @xfs_trans_ail_update takes an array of log items that all need to be
688 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
689 * be added. Otherwise, it will be repositioned by removing it and re-adding
690 * it to the AIL. If we move the first item in the AIL, update the log tail to
691 * match the new minimum LSN in the AIL.
692 *
693 * This function takes the AIL lock once to execute the update operations on
694 * all the items in the array, and as such should not be called with the AIL
695 * lock held. As a result, once we have the AIL lock, we need to check each log
696 * item LSN to confirm it needs to be moved forward in the AIL.
697 *
698 * To optimise the insert operation, we delete all the items from the AIL in
699 * the first pass, moving them into a temporary list, then splice the temporary
700 * list into the correct position in the AIL. This avoids needing to do an
701 * insert operation on every item.
702 *
703 * This function must be called with the AIL lock held. The lock is dropped
704 * before returning.
705 */
706void
707xfs_trans_ail_update_bulk(
708 struct xfs_ail *ailp,
1d8c95a3 709 struct xfs_ail_cursor *cur,
0e57f6a3
DC
710 struct xfs_log_item **log_items,
711 int nr_items,
57e80956 712 xfs_lsn_t lsn) __releases(ailp->ail_lock)
0e57f6a3 713{
efe2330f 714 struct xfs_log_item *mlip;
0e57f6a3
DC
715 int mlip_changed = 0;
716 int i;
717 LIST_HEAD(tmp);
718
e44f4112 719 ASSERT(nr_items > 0); /* Not required, but true. */
0e57f6a3
DC
720 mlip = xfs_ail_min(ailp);
721
722 for (i = 0; i < nr_items; i++) {
723 struct xfs_log_item *lip = log_items[i];
22525c17 724 if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
0e57f6a3
DC
725 /* check if we really need to move the item */
726 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
727 continue;
728
750b9c90 729 trace_xfs_ail_move(lip, lip->li_lsn, lsn);
0e57f6a3
DC
730 xfs_ail_delete(ailp, lip);
731 if (mlip == lip)
732 mlip_changed = 1;
733 } else {
750b9c90 734 trace_xfs_ail_insert(lip, 0, lsn);
0e57f6a3
DC
735 }
736 lip->li_lsn = lsn;
737 list_add(&lip->li_ail, &tmp);
738 }
739
e44f4112
AE
740 if (!list_empty(&tmp))
741 xfs_ail_splice(ailp, cur, &tmp, lsn);
0e57f6a3 742
1c304625 743 if (mlip_changed) {
57e80956
MW
744 if (!XFS_FORCED_SHUTDOWN(ailp->ail_mount))
745 xlog_assign_tail_lsn_locked(ailp->ail_mount);
746 spin_unlock(&ailp->ail_lock);
1c304625 747
57e80956 748 xfs_log_space_wake(ailp->ail_mount);
1c304625 749 } else {
57e80956 750 spin_unlock(&ailp->ail_lock);
0e57f6a3 751 }
0e57f6a3
DC
752}
753
27af1bbf
CH
754bool
755xfs_ail_delete_one(
756 struct xfs_ail *ailp,
d3a304b6 757 struct xfs_log_item *lip)
27af1bbf
CH
758{
759 struct xfs_log_item *mlip = xfs_ail_min(ailp);
760
761 trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
762 xfs_ail_delete(ailp, lip);
d3a304b6 763 xfs_clear_li_failed(lip);
22525c17 764 clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
27af1bbf
CH
765 lip->li_lsn = 0;
766
767 return mlip == lip;
768}
769
770/**
771 * Remove a log items from the AIL
30136832
DC
772 *
773 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
774 * removed from the AIL. The caller is already holding the AIL lock, and done
775 * all the checks necessary to ensure the items passed in via @log_items are
776 * ready for deletion. This includes checking that the items are in the AIL.
777 *
778 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
779 * flag from the item and reset the item's lsn to 0. If we remove the first
780 * item in the AIL, update the log tail to match the new minimum LSN in the
781 * AIL.
782 *
783 * This function will not drop the AIL lock until all items are removed from
784 * the AIL to minimise the amount of lock traffic on the AIL. This does not
785 * greatly increase the AIL hold time, but does significantly reduce the amount
786 * of traffic on the lock, especially during IO completion.
787 *
788 * This function must be called with the AIL lock held. The lock is dropped
789 * before returning.
790 */
791void
27af1bbf 792xfs_trans_ail_delete(
30136832 793 struct xfs_ail *ailp,
27af1bbf 794 struct xfs_log_item *lip,
57e80956 795 int shutdown_type) __releases(ailp->ail_lock)
30136832 796{
57e80956 797 struct xfs_mount *mp = ailp->ail_mount;
27af1bbf 798 bool mlip_changed;
30136832 799
22525c17 800 if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
57e80956 801 spin_unlock(&ailp->ail_lock);
27af1bbf
CH
802 if (!XFS_FORCED_SHUTDOWN(mp)) {
803 xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
804 "%s: attempting to delete a log item that is not in the AIL",
805 __func__);
806 xfs_force_shutdown(mp, shutdown_type);
30136832 807 }
27af1bbf 808 return;
30136832
DC
809 }
810
27af1bbf 811 mlip_changed = xfs_ail_delete_one(ailp, lip);
1c304625 812 if (mlip_changed) {
27af1bbf
CH
813 if (!XFS_FORCED_SHUTDOWN(mp))
814 xlog_assign_tail_lsn_locked(mp);
57e80956
MW
815 if (list_empty(&ailp->ail_head))
816 wake_up_all(&ailp->ail_empty);
27af1bbf 817 }
1c304625 818
57e80956 819 spin_unlock(&ailp->ail_lock);
27af1bbf 820 if (mlip_changed)
57e80956 821 xfs_log_space_wake(ailp->ail_mount);
30136832 822}
1da177e4 823
249a8c11 824int
1da177e4
LT
825xfs_trans_ail_init(
826 xfs_mount_t *mp)
827{
82fa9012
DC
828 struct xfs_ail *ailp;
829
830 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
831 if (!ailp)
2451337d 832 return -ENOMEM;
82fa9012 833
57e80956
MW
834 ailp->ail_mount = mp;
835 INIT_LIST_HEAD(&ailp->ail_head);
836 INIT_LIST_HEAD(&ailp->ail_cursors);
837 spin_lock_init(&ailp->ail_lock);
838 INIT_LIST_HEAD(&ailp->ail_buf_list);
839 init_waitqueue_head(&ailp->ail_empty);
0030807c 840
57e80956 841 ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
e1d3d218 842 ailp->ail_mount->m_super->s_id);
57e80956 843 if (IS_ERR(ailp->ail_task))
0030807c
CH
844 goto out_free_ailp;
845
27d8d5fe
DC
846 mp->m_ail = ailp;
847 return 0;
0030807c
CH
848
849out_free_ailp:
850 kmem_free(ailp);
2451337d 851 return -ENOMEM;
249a8c11
DC
852}
853
854void
855xfs_trans_ail_destroy(
856 xfs_mount_t *mp)
857{
82fa9012
DC
858 struct xfs_ail *ailp = mp->m_ail;
859
57e80956 860 kthread_stop(ailp->ail_task);
82fa9012 861 kmem_free(ailp);
1da177e4 862}