]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
da6dd40d | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
71b86f56 | 16 | #include <linux/crc32.h> |
a25311c8 | 17 | #include <linux/delay.h> |
ec69b188 SW |
18 | #include <linux/kthread.h> |
19 | #include <linux/freezer.h> | |
254db57f | 20 | #include <linux/bio.h> |
885bceca | 21 | #include <linux/blkdev.h> |
4667a0ec | 22 | #include <linux/writeback.h> |
4a36d08d | 23 | #include <linux/list_sort.h> |
b3b94faa DT |
24 | |
25 | #include "gfs2.h" | |
5c676f6d | 26 | #include "incore.h" |
b3b94faa DT |
27 | #include "bmap.h" |
28 | #include "glock.h" | |
29 | #include "log.h" | |
30 | #include "lops.h" | |
31 | #include "meta_io.h" | |
5c676f6d | 32 | #include "util.h" |
71b86f56 | 33 | #include "dir.h" |
63997775 | 34 | #include "trace_gfs2.h" |
b3b94faa | 35 | |
b3b94faa DT |
36 | /** |
37 | * gfs2_struct2blk - compute stuff | |
38 | * @sdp: the filesystem | |
39 | * @nstruct: the number of structures | |
40 | * @ssize: the size of the structures | |
41 | * | |
42 | * Compute the number of log descriptor blocks needed to hold a certain number | |
43 | * of structures of a certain size. | |
44 | * | |
45 | * Returns: the number of blocks needed (minimum is always 1) | |
46 | */ | |
47 | ||
48 | unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |
49 | unsigned int ssize) | |
50 | { | |
51 | unsigned int blks; | |
52 | unsigned int first, second; | |
53 | ||
54 | blks = 1; | |
faa31ce8 | 55 | first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; |
b3b94faa DT |
56 | |
57 | if (nstruct > first) { | |
568f4c96 SW |
58 | second = (sdp->sd_sb.sb_bsize - |
59 | sizeof(struct gfs2_meta_header)) / ssize; | |
5c676f6d | 60 | blks += DIV_ROUND_UP(nstruct - first, second); |
b3b94faa DT |
61 | } |
62 | ||
63 | return blks; | |
64 | } | |
65 | ||
1e1a3d03 SW |
66 | /** |
67 | * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters | |
68 | * @mapping: The associated mapping (maybe NULL) | |
69 | * @bd: The gfs2_bufdata to remove | |
70 | * | |
c618e87a | 71 | * The ail lock _must_ be held when calling this function |
1e1a3d03 SW |
72 | * |
73 | */ | |
74 | ||
f91a0d3e | 75 | void gfs2_remove_from_ail(struct gfs2_bufdata *bd) |
1e1a3d03 | 76 | { |
16ca9412 | 77 | bd->bd_tr = NULL; |
1ad38c43 SW |
78 | list_del_init(&bd->bd_ail_st_list); |
79 | list_del_init(&bd->bd_ail_gl_list); | |
1e1a3d03 | 80 | atomic_dec(&bd->bd_gl->gl_ail_count); |
1e1a3d03 SW |
81 | brelse(bd->bd_bh); |
82 | } | |
83 | ||
ddacfaf7 SW |
84 | /** |
85 | * gfs2_ail1_start_one - Start I/O on a part of the AIL | |
86 | * @sdp: the filesystem | |
4667a0ec SW |
87 | * @wbc: The writeback control structure |
88 | * @ai: The ail structure | |
ddacfaf7 SW |
89 | * |
90 | */ | |
91 | ||
4f1de018 SW |
92 | static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, |
93 | struct writeback_control *wbc, | |
16ca9412 | 94 | struct gfs2_trans *tr) |
d6a079e8 DC |
95 | __releases(&sdp->sd_ail_lock) |
96 | __acquires(&sdp->sd_ail_lock) | |
ddacfaf7 | 97 | { |
5ac048bb | 98 | struct gfs2_glock *gl = NULL; |
4667a0ec | 99 | struct address_space *mapping; |
ddacfaf7 SW |
100 | struct gfs2_bufdata *bd, *s; |
101 | struct buffer_head *bh; | |
ddacfaf7 | 102 | |
16ca9412 | 103 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) { |
4667a0ec | 104 | bh = bd->bd_bh; |
ddacfaf7 | 105 | |
16ca9412 | 106 | gfs2_assert(sdp, bd->bd_tr == tr); |
ddacfaf7 | 107 | |
4667a0ec SW |
108 | if (!buffer_busy(bh)) { |
109 | if (!buffer_uptodate(bh)) | |
110 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 111 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
4667a0ec SW |
112 | continue; |
113 | } | |
114 | ||
115 | if (!buffer_dirty(bh)) | |
116 | continue; | |
117 | if (gl == bd->bd_gl) | |
118 | continue; | |
119 | gl = bd->bd_gl; | |
16ca9412 | 120 | list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list); |
4667a0ec | 121 | mapping = bh->b_page->mapping; |
4f1de018 SW |
122 | if (!mapping) |
123 | continue; | |
4667a0ec SW |
124 | spin_unlock(&sdp->sd_ail_lock); |
125 | generic_writepages(mapping, wbc); | |
126 | spin_lock(&sdp->sd_ail_lock); | |
127 | if (wbc->nr_to_write <= 0) | |
128 | break; | |
4f1de018 | 129 | return 1; |
4667a0ec | 130 | } |
4f1de018 SW |
131 | |
132 | return 0; | |
4667a0ec | 133 | } |
ddacfaf7 | 134 | |
ddacfaf7 | 135 | |
4667a0ec SW |
136 | /** |
137 | * gfs2_ail1_flush - start writeback of some ail1 entries | |
138 | * @sdp: The super block | |
139 | * @wbc: The writeback control structure | |
140 | * | |
141 | * Writes back some ail1 entries, according to the limits in the | |
142 | * writeback control structure | |
143 | */ | |
ddacfaf7 | 144 | |
4667a0ec SW |
145 | void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) |
146 | { | |
147 | struct list_head *head = &sdp->sd_ail1_list; | |
16ca9412 | 148 | struct gfs2_trans *tr; |
885bceca | 149 | struct blk_plug plug; |
ddacfaf7 | 150 | |
c83ae9ca | 151 | trace_gfs2_ail_flush(sdp, wbc, 1); |
885bceca | 152 | blk_start_plug(&plug); |
4667a0ec | 153 | spin_lock(&sdp->sd_ail_lock); |
4f1de018 | 154 | restart: |
16ca9412 | 155 | list_for_each_entry_reverse(tr, head, tr_list) { |
4667a0ec | 156 | if (wbc->nr_to_write <= 0) |
ddacfaf7 | 157 | break; |
16ca9412 | 158 | if (gfs2_ail1_start_one(sdp, wbc, tr)) |
4f1de018 | 159 | goto restart; |
4667a0ec SW |
160 | } |
161 | spin_unlock(&sdp->sd_ail_lock); | |
885bceca | 162 | blk_finish_plug(&plug); |
c83ae9ca | 163 | trace_gfs2_ail_flush(sdp, wbc, 0); |
4667a0ec SW |
164 | } |
165 | ||
166 | /** | |
167 | * gfs2_ail1_start - start writeback of all ail1 entries | |
168 | * @sdp: The superblock | |
169 | */ | |
170 | ||
171 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) | |
172 | { | |
173 | struct writeback_control wbc = { | |
174 | .sync_mode = WB_SYNC_NONE, | |
175 | .nr_to_write = LONG_MAX, | |
176 | .range_start = 0, | |
177 | .range_end = LLONG_MAX, | |
178 | }; | |
179 | ||
180 | return gfs2_ail1_flush(sdp, &wbc); | |
ddacfaf7 SW |
181 | } |
182 | ||
183 | /** | |
184 | * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced | |
185 | * @sdp: the filesystem | |
186 | * @ai: the AIL entry | |
187 | * | |
188 | */ | |
189 | ||
16ca9412 | 190 | static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 SW |
191 | { |
192 | struct gfs2_bufdata *bd, *s; | |
193 | struct buffer_head *bh; | |
194 | ||
16ca9412 | 195 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, |
ddacfaf7 SW |
196 | bd_ail_st_list) { |
197 | bh = bd->bd_bh; | |
16ca9412 | 198 | gfs2_assert(sdp, bd->bd_tr == tr); |
4667a0ec SW |
199 | if (buffer_busy(bh)) |
200 | continue; | |
ddacfaf7 SW |
201 | if (!buffer_uptodate(bh)) |
202 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 203 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
ddacfaf7 SW |
204 | } |
205 | ||
ddacfaf7 SW |
206 | } |
207 | ||
4667a0ec SW |
208 | /** |
209 | * gfs2_ail1_empty - Try to empty the ail1 lists | |
210 | * @sdp: The superblock | |
211 | * | |
212 | * Tries to empty the ail1 lists, starting with the oldest first | |
213 | */ | |
b3b94faa | 214 | |
4667a0ec | 215 | static int gfs2_ail1_empty(struct gfs2_sbd *sdp) |
b3b94faa | 216 | { |
16ca9412 | 217 | struct gfs2_trans *tr, *s; |
5d054964 | 218 | int oldest_tr = 1; |
b3b94faa DT |
219 | int ret; |
220 | ||
d6a079e8 | 221 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
222 | list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { |
223 | gfs2_ail1_empty_one(sdp, tr); | |
5d054964 | 224 | if (list_empty(&tr->tr_ail1_list) && oldest_tr) |
16ca9412 | 225 | list_move(&tr->tr_list, &sdp->sd_ail2_list); |
4667a0ec | 226 | else |
5d054964 | 227 | oldest_tr = 0; |
b3b94faa | 228 | } |
b3b94faa | 229 | ret = list_empty(&sdp->sd_ail1_list); |
d6a079e8 | 230 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
26b06a69 SW |
235 | static void gfs2_ail1_wait(struct gfs2_sbd *sdp) |
236 | { | |
16ca9412 | 237 | struct gfs2_trans *tr; |
26b06a69 SW |
238 | struct gfs2_bufdata *bd; |
239 | struct buffer_head *bh; | |
240 | ||
241 | spin_lock(&sdp->sd_ail_lock); | |
16ca9412 BM |
242 | list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { |
243 | list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) { | |
26b06a69 SW |
244 | bh = bd->bd_bh; |
245 | if (!buffer_locked(bh)) | |
246 | continue; | |
247 | get_bh(bh); | |
248 | spin_unlock(&sdp->sd_ail_lock); | |
249 | wait_on_buffer(bh); | |
250 | brelse(bh); | |
251 | return; | |
252 | } | |
253 | } | |
254 | spin_unlock(&sdp->sd_ail_lock); | |
255 | } | |
ddacfaf7 SW |
256 | |
257 | /** | |
258 | * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced | |
259 | * @sdp: the filesystem | |
260 | * @ai: the AIL entry | |
261 | * | |
262 | */ | |
263 | ||
16ca9412 | 264 | static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 | 265 | { |
16ca9412 | 266 | struct list_head *head = &tr->tr_ail2_list; |
ddacfaf7 SW |
267 | struct gfs2_bufdata *bd; |
268 | ||
269 | while (!list_empty(head)) { | |
270 | bd = list_entry(head->prev, struct gfs2_bufdata, | |
271 | bd_ail_st_list); | |
16ca9412 | 272 | gfs2_assert(sdp, bd->bd_tr == tr); |
f91a0d3e | 273 | gfs2_remove_from_ail(bd); |
ddacfaf7 SW |
274 | } |
275 | } | |
276 | ||
b3b94faa DT |
277 | static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) |
278 | { | |
16ca9412 | 279 | struct gfs2_trans *tr, *safe; |
b3b94faa DT |
280 | unsigned int old_tail = sdp->sd_log_tail; |
281 | int wrap = (new_tail < old_tail); | |
282 | int a, b, rm; | |
283 | ||
d6a079e8 | 284 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 285 | |
16ca9412 BM |
286 | list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { |
287 | a = (old_tail <= tr->tr_first); | |
288 | b = (tr->tr_first < new_tail); | |
b3b94faa DT |
289 | rm = (wrap) ? (a || b) : (a && b); |
290 | if (!rm) | |
291 | continue; | |
292 | ||
16ca9412 BM |
293 | gfs2_ail2_empty_one(sdp, tr); |
294 | list_del(&tr->tr_list); | |
295 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); | |
296 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); | |
297 | kfree(tr); | |
b3b94faa DT |
298 | } |
299 | ||
d6a079e8 | 300 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
301 | } |
302 | ||
24972557 BM |
303 | /** |
304 | * gfs2_log_release - Release a given number of log blocks | |
305 | * @sdp: The GFS2 superblock | |
306 | * @blks: The number of blocks | |
307 | * | |
308 | */ | |
309 | ||
310 | void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | |
311 | { | |
312 | ||
313 | atomic_add(blks, &sdp->sd_log_blks_free); | |
314 | trace_gfs2_log_blocks(sdp, blks); | |
315 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= | |
316 | sdp->sd_jdesc->jd_blocks); | |
317 | up_read(&sdp->sd_log_flush_lock); | |
318 | } | |
319 | ||
b3b94faa DT |
320 | /** |
321 | * gfs2_log_reserve - Make a log reservation | |
322 | * @sdp: The GFS2 superblock | |
323 | * @blks: The number of blocks to reserve | |
324 | * | |
89918647 | 325 | * Note that we never give out the last few blocks of the journal. Thats |
2332c443 | 326 | * due to the fact that there is a small number of header blocks |
b004157a SW |
327 | * associated with each log flush. The exact number can't be known until |
328 | * flush time, so we ensure that we have just enough free blocks at all | |
329 | * times to avoid running out during a log flush. | |
330 | * | |
5e687eac BM |
331 | * We no longer flush the log here, instead we wake up logd to do that |
332 | * for us. To avoid the thundering herd and to ensure that we deal fairly | |
333 | * with queued waiters, we use an exclusive wait. This means that when we | |
334 | * get woken with enough journal space to get our reservation, we need to | |
335 | * wake the next waiter on the list. | |
336 | * | |
b3b94faa DT |
337 | * Returns: errno |
338 | */ | |
339 | ||
340 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |
341 | { | |
2e60d768 | 342 | int ret = 0; |
5d054964 | 343 | unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); |
5e687eac BM |
344 | unsigned wanted = blks + reserved_blks; |
345 | DEFINE_WAIT(wait); | |
346 | int did_wait = 0; | |
347 | unsigned int free_blocks; | |
b3b94faa DT |
348 | |
349 | if (gfs2_assert_warn(sdp, blks) || | |
350 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | |
351 | return -EINVAL; | |
f07b3520 | 352 | atomic_add(blks, &sdp->sd_log_blks_needed); |
5e687eac BM |
353 | retry: |
354 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
355 | if (unlikely(free_blocks <= wanted)) { | |
356 | do { | |
357 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, | |
358 | TASK_UNINTERRUPTIBLE); | |
359 | wake_up(&sdp->sd_logd_waitq); | |
360 | did_wait = 1; | |
361 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) | |
362 | io_schedule(); | |
363 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
364 | } while(free_blocks <= wanted); | |
365 | finish_wait(&sdp->sd_log_waitq, &wait); | |
b3b94faa | 366 | } |
2e60d768 | 367 | atomic_inc(&sdp->sd_reserving_log); |
5e687eac | 368 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
2e60d768 BM |
369 | free_blocks - blks) != free_blocks) { |
370 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) | |
371 | wake_up(&sdp->sd_reserving_log_wait); | |
5e687eac | 372 | goto retry; |
2e60d768 | 373 | } |
f07b3520 | 374 | atomic_sub(blks, &sdp->sd_log_blks_needed); |
63997775 | 375 | trace_gfs2_log_blocks(sdp, -blks); |
5e687eac BM |
376 | |
377 | /* | |
378 | * If we waited, then so might others, wake them up _after_ we get | |
379 | * our share of the log. | |
380 | */ | |
381 | if (unlikely(did_wait)) | |
382 | wake_up(&sdp->sd_log_waitq); | |
484adff8 SW |
383 | |
384 | down_read(&sdp->sd_log_flush_lock); | |
24972557 BM |
385 | if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { |
386 | gfs2_log_release(sdp, blks); | |
2e60d768 | 387 | ret = -EROFS; |
24972557 | 388 | } |
2e60d768 BM |
389 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) |
390 | wake_up(&sdp->sd_reserving_log_wait); | |
391 | return ret; | |
b3b94faa DT |
392 | } |
393 | ||
b3b94faa DT |
394 | /** |
395 | * log_distance - Compute distance between two journal blocks | |
396 | * @sdp: The GFS2 superblock | |
397 | * @newer: The most recent journal block of the pair | |
398 | * @older: The older journal block of the pair | |
399 | * | |
400 | * Compute the distance (in the journal direction) between two | |
401 | * blocks in the journal | |
402 | * | |
403 | * Returns: the distance in blocks | |
404 | */ | |
405 | ||
faa31ce8 | 406 | static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, |
b3b94faa DT |
407 | unsigned int older) |
408 | { | |
409 | int dist; | |
410 | ||
411 | dist = newer - older; | |
412 | if (dist < 0) | |
413 | dist += sdp->sd_jdesc->jd_blocks; | |
414 | ||
415 | return dist; | |
416 | } | |
417 | ||
2332c443 RP |
418 | /** |
419 | * calc_reserved - Calculate the number of blocks to reserve when | |
420 | * refunding a transaction's unused buffers. | |
421 | * @sdp: The GFS2 superblock | |
422 | * | |
423 | * This is complex. We need to reserve room for all our currently used | |
424 | * metadata buffers (e.g. normal file I/O rewriting file time stamps) and | |
425 | * all our journaled data buffers for journaled files (e.g. files in the | |
426 | * meta_fs like rindex, or files for which chattr +j was done.) | |
427 | * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush | |
428 | * will count it as free space (sd_log_blks_free) and corruption will follow. | |
429 | * | |
430 | * We can have metadata bufs and jdata bufs in the same journal. So each | |
431 | * type gets its own log header, for which we need to reserve a block. | |
432 | * In fact, each type has the potential for needing more than one header | |
433 | * in cases where we have more buffers than will fit on a journal page. | |
434 | * Metadata journal entries take up half the space of journaled buffer entries. | |
435 | * Thus, metadata entries have buf_limit (502) and journaled buffers have | |
436 | * databuf_limit (251) before they cause a wrap around. | |
437 | * | |
438 | * Also, we need to reserve blocks for revoke journal entries and one for an | |
439 | * overall header for the lot. | |
440 | * | |
441 | * Returns: the number of blocks reserved | |
442 | */ | |
443 | static unsigned int calc_reserved(struct gfs2_sbd *sdp) | |
444 | { | |
445 | unsigned int reserved = 0; | |
022ef4fe SW |
446 | unsigned int mbuf; |
447 | unsigned int dbuf; | |
448 | struct gfs2_trans *tr = sdp->sd_log_tr; | |
2332c443 | 449 | |
022ef4fe SW |
450 | if (tr) { |
451 | mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; | |
452 | dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; | |
453 | reserved = mbuf + dbuf; | |
454 | /* Account for header blocks */ | |
455 | reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); | |
456 | reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); | |
457 | } | |
2332c443 | 458 | |
2e95e3f6 | 459 | if (sdp->sd_log_commited_revoke > 0) |
022ef4fe | 460 | reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, |
2332c443 | 461 | sizeof(u64)); |
2332c443 RP |
462 | /* One for the overall header */ |
463 | if (reserved) | |
464 | reserved++; | |
465 | return reserved; | |
466 | } | |
467 | ||
b3b94faa DT |
468 | static unsigned int current_tail(struct gfs2_sbd *sdp) |
469 | { | |
16ca9412 | 470 | struct gfs2_trans *tr; |
b3b94faa DT |
471 | unsigned int tail; |
472 | ||
d6a079e8 | 473 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 474 | |
faa31ce8 | 475 | if (list_empty(&sdp->sd_ail1_list)) { |
b3b94faa | 476 | tail = sdp->sd_log_head; |
faa31ce8 | 477 | } else { |
16ca9412 BM |
478 | tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans, |
479 | tr_list); | |
480 | tail = tr->tr_first; | |
b3b94faa DT |
481 | } |
482 | ||
d6a079e8 | 483 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
484 | |
485 | return tail; | |
486 | } | |
487 | ||
2332c443 | 488 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
b3b94faa DT |
489 | { |
490 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | |
491 | ||
492 | ail2_empty(sdp, new_tail); | |
493 | ||
fd041f0b | 494 | atomic_add(dist, &sdp->sd_log_blks_free); |
63997775 | 495 | trace_gfs2_log_blocks(sdp, dist); |
5e687eac BM |
496 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
497 | sdp->sd_jdesc->jd_blocks); | |
b3b94faa DT |
498 | |
499 | sdp->sd_log_tail = new_tail; | |
500 | } | |
501 | ||
b3b94faa | 502 | |
34cc1781 | 503 | static void log_flush_wait(struct gfs2_sbd *sdp) |
b3b94faa | 504 | { |
16615be1 SW |
505 | DEFINE_WAIT(wait); |
506 | ||
507 | if (atomic_read(&sdp->sd_log_in_flight)) { | |
508 | do { | |
509 | prepare_to_wait(&sdp->sd_log_flush_wait, &wait, | |
510 | TASK_UNINTERRUPTIBLE); | |
511 | if (atomic_read(&sdp->sd_log_in_flight)) | |
512 | io_schedule(); | |
513 | } while(atomic_read(&sdp->sd_log_in_flight)); | |
514 | finish_wait(&sdp->sd_log_flush_wait, &wait); | |
b3b94faa | 515 | } |
b3b94faa DT |
516 | } |
517 | ||
45138990 | 518 | static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) |
4a36d08d | 519 | { |
45138990 | 520 | struct gfs2_inode *ipa, *ipb; |
4a36d08d | 521 | |
45138990 SW |
522 | ipa = list_entry(a, struct gfs2_inode, i_ordered); |
523 | ipb = list_entry(b, struct gfs2_inode, i_ordered); | |
4a36d08d | 524 | |
45138990 | 525 | if (ipa->i_no_addr < ipb->i_no_addr) |
4a36d08d | 526 | return -1; |
45138990 | 527 | if (ipa->i_no_addr > ipb->i_no_addr) |
4a36d08d BP |
528 | return 1; |
529 | return 0; | |
530 | } | |
531 | ||
d7b616e2 SW |
532 | static void gfs2_ordered_write(struct gfs2_sbd *sdp) |
533 | { | |
45138990 | 534 | struct gfs2_inode *ip; |
d7b616e2 SW |
535 | LIST_HEAD(written); |
536 | ||
45138990 SW |
537 | spin_lock(&sdp->sd_ordered_lock); |
538 | list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp); | |
d7b616e2 | 539 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
540 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
541 | list_move(&ip->i_ordered, &written); | |
542 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 543 | continue; |
45138990 SW |
544 | spin_unlock(&sdp->sd_ordered_lock); |
545 | filemap_fdatawrite(ip->i_inode.i_mapping); | |
546 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
547 | } |
548 | list_splice(&written, &sdp->sd_log_le_ordered); | |
45138990 | 549 | spin_unlock(&sdp->sd_ordered_lock); |
d7b616e2 SW |
550 | } |
551 | ||
552 | static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |
553 | { | |
45138990 | 554 | struct gfs2_inode *ip; |
d7b616e2 | 555 | |
45138990 | 556 | spin_lock(&sdp->sd_ordered_lock); |
d7b616e2 | 557 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
558 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
559 | list_del(&ip->i_ordered); | |
560 | WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); | |
561 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 562 | continue; |
45138990 SW |
563 | spin_unlock(&sdp->sd_ordered_lock); |
564 | filemap_fdatawait(ip->i_inode.i_mapping); | |
565 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 | 566 | } |
45138990 SW |
567 | spin_unlock(&sdp->sd_ordered_lock); |
568 | } | |
569 | ||
570 | void gfs2_ordered_del_inode(struct gfs2_inode *ip) | |
571 | { | |
572 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | |
573 | ||
574 | spin_lock(&sdp->sd_ordered_lock); | |
575 | if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) | |
576 | list_del(&ip->i_ordered); | |
577 | spin_unlock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
578 | } |
579 | ||
5d054964 BM |
580 | void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
581 | { | |
582 | struct buffer_head *bh = bd->bd_bh; | |
583 | struct gfs2_glock *gl = bd->bd_gl; | |
584 | ||
5d054964 BM |
585 | bh->b_private = NULL; |
586 | bd->bd_blkno = bh->b_blocknr; | |
9290a9a7 BP |
587 | gfs2_remove_from_ail(bd); /* drops ref on bh */ |
588 | bd->bd_bh = NULL; | |
5d054964 BM |
589 | bd->bd_ops = &gfs2_revoke_lops; |
590 | sdp->sd_log_num_revoke++; | |
591 | atomic_inc(&gl->gl_revokes); | |
592 | set_bit(GLF_LFLUSH, &gl->gl_flags); | |
593 | list_add(&bd->bd_list, &sdp->sd_log_le_revoke); | |
594 | } | |
595 | ||
596 | void gfs2_write_revokes(struct gfs2_sbd *sdp) | |
597 | { | |
598 | struct gfs2_trans *tr; | |
599 | struct gfs2_bufdata *bd, *tmp; | |
600 | int have_revokes = 0; | |
601 | int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); | |
602 | ||
603 | gfs2_ail1_empty(sdp); | |
604 | spin_lock(&sdp->sd_ail_lock); | |
605 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
606 | list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { | |
607 | if (list_empty(&bd->bd_list)) { | |
608 | have_revokes = 1; | |
609 | goto done; | |
610 | } | |
611 | } | |
612 | } | |
613 | done: | |
614 | spin_unlock(&sdp->sd_ail_lock); | |
615 | if (have_revokes == 0) | |
616 | return; | |
617 | while (sdp->sd_log_num_revoke > max_revokes) | |
618 | max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); | |
619 | max_revokes -= sdp->sd_log_num_revoke; | |
620 | if (!sdp->sd_log_num_revoke) { | |
621 | atomic_dec(&sdp->sd_log_blks_free); | |
622 | /* If no blocks have been reserved, we need to also | |
623 | * reserve a block for the header */ | |
624 | if (!sdp->sd_log_blks_reserved) | |
625 | atomic_dec(&sdp->sd_log_blks_free); | |
626 | } | |
627 | gfs2_log_lock(sdp); | |
628 | spin_lock(&sdp->sd_ail_lock); | |
629 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
630 | list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { | |
631 | if (max_revokes == 0) | |
632 | goto out_of_blocks; | |
633 | if (!list_empty(&bd->bd_list)) | |
634 | continue; | |
635 | gfs2_add_revoke(sdp, bd); | |
636 | max_revokes--; | |
637 | } | |
638 | } | |
639 | out_of_blocks: | |
640 | spin_unlock(&sdp->sd_ail_lock); | |
641 | gfs2_log_unlock(sdp); | |
642 | ||
643 | if (!sdp->sd_log_num_revoke) { | |
644 | atomic_inc(&sdp->sd_log_blks_free); | |
645 | if (!sdp->sd_log_blks_reserved) | |
646 | atomic_inc(&sdp->sd_log_blks_free); | |
647 | } | |
648 | } | |
649 | ||
34cc1781 SW |
650 | /** |
651 | * log_write_header - Get and initialize a journal header buffer | |
652 | * @sdp: The GFS2 superblock | |
653 | * | |
654 | * Returns: the initialized log buffer descriptor | |
655 | */ | |
656 | ||
fdb76a42 | 657 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
34cc1781 | 658 | { |
34cc1781 SW |
659 | struct gfs2_log_header *lh; |
660 | unsigned int tail; | |
661 | u32 hash; | |
0f0b9b63 | 662 | int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; |
e8c92ed7 | 663 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
2e60d768 | 664 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
e8c92ed7 SW |
665 | lh = page_address(page); |
666 | clear_page(lh); | |
34cc1781 | 667 | |
2e60d768 BM |
668 | gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); |
669 | ||
34cc1781 SW |
670 | tail = current_tail(sdp); |
671 | ||
34cc1781 SW |
672 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
673 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | |
674 | lh->lh_header.__pad0 = cpu_to_be64(0); | |
675 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | |
676 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | |
677 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); | |
678 | lh->lh_flags = cpu_to_be32(flags); | |
679 | lh->lh_tail = cpu_to_be32(tail); | |
680 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | |
e8c92ed7 | 681 | hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header)); |
34cc1781 SW |
682 | lh->lh_hash = cpu_to_be32(hash); |
683 | ||
34cc1781 SW |
684 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
685 | gfs2_ordered_wait(sdp); | |
686 | log_flush_wait(sdp); | |
70fd7614 | 687 | op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
34cc1781 | 688 | } |
34cc1781 | 689 | |
e8c92ed7 SW |
690 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
691 | gfs2_log_write_page(sdp, page); | |
e1b1afa6 | 692 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags); |
e8c92ed7 | 693 | log_flush_wait(sdp); |
34cc1781 SW |
694 | |
695 | if (sdp->sd_log_tail != tail) | |
696 | log_pull_tail(sdp, tail); | |
34cc1781 SW |
697 | } |
698 | ||
b3b94faa | 699 | /** |
b09e593d | 700 | * gfs2_log_flush - flush incore transaction(s) |
b3b94faa DT |
701 | * @sdp: the filesystem |
702 | * @gl: The glock structure to flush. If NULL, flush the whole incore log | |
703 | * | |
704 | */ | |
705 | ||
24972557 BM |
706 | void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, |
707 | enum gfs2_flush_type type) | |
b3b94faa | 708 | { |
16ca9412 | 709 | struct gfs2_trans *tr; |
2e60d768 | 710 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
b3b94faa | 711 | |
484adff8 | 712 | down_write(&sdp->sd_log_flush_lock); |
f55ab26a | 713 | |
2bcd610d SW |
714 | /* Log might have been flushed while we waited for the flush lock */ |
715 | if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { | |
716 | up_write(&sdp->sd_log_flush_lock); | |
717 | return; | |
f55ab26a | 718 | } |
63997775 | 719 | trace_gfs2_log_flush(sdp, 1); |
f55ab26a | 720 | |
400ac52e BM |
721 | if (type == SHUTDOWN_FLUSH) |
722 | clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); | |
723 | ||
b1ab1e44 | 724 | sdp->sd_log_flush_head = sdp->sd_log_head; |
16ca9412 BM |
725 | tr = sdp->sd_log_tr; |
726 | if (tr) { | |
727 | sdp->sd_log_tr = NULL; | |
728 | INIT_LIST_HEAD(&tr->tr_ail1_list); | |
729 | INIT_LIST_HEAD(&tr->tr_ail2_list); | |
b1ab1e44 | 730 | tr->tr_first = sdp->sd_log_flush_head; |
2e60d768 BM |
731 | if (unlikely (state == SFS_FROZEN)) |
732 | gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); | |
16ca9412 | 733 | } |
b3b94faa | 734 | |
2e60d768 BM |
735 | if (unlikely(state == SFS_FROZEN)) |
736 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | |
b3b94faa DT |
737 | gfs2_assert_withdraw(sdp, |
738 | sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); | |
739 | ||
d7b616e2 | 740 | gfs2_ordered_write(sdp); |
d69a3c65 | 741 | lops_before_commit(sdp, tr); |
e1b1afa6 | 742 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); |
d7b616e2 | 743 | |
34cc1781 | 744 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
428fd95d | 745 | log_flush_wait(sdp); |
fdb76a42 | 746 | log_write_header(sdp, 0); |
34cc1781 | 747 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
fd041f0b | 748 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
63997775 | 749 | trace_gfs2_log_blocks(sdp, -1); |
fdb76a42 | 750 | log_write_header(sdp, 0); |
2332c443 | 751 | } |
16ca9412 | 752 | lops_after_commit(sdp, tr); |
b09e593d | 753 | |
fe1a698f SW |
754 | gfs2_log_lock(sdp); |
755 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
faa31ce8 | 756 | sdp->sd_log_blks_reserved = 0; |
faa31ce8 | 757 | sdp->sd_log_commited_revoke = 0; |
b3b94faa | 758 | |
d6a079e8 | 759 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
760 | if (tr && !list_empty(&tr->tr_ail1_list)) { |
761 | list_add(&tr->tr_list, &sdp->sd_ail1_list); | |
762 | tr = NULL; | |
b3b94faa | 763 | } |
d6a079e8 | 764 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa | 765 | gfs2_log_unlock(sdp); |
24972557 | 766 | |
24972557 BM |
767 | if (type != NORMAL_FLUSH) { |
768 | if (!sdp->sd_log_idle) { | |
769 | for (;;) { | |
770 | gfs2_ail1_start(sdp); | |
771 | gfs2_ail1_wait(sdp); | |
772 | if (gfs2_ail1_empty(sdp)) | |
773 | break; | |
774 | } | |
775 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ | |
776 | trace_gfs2_log_blocks(sdp, -1); | |
24972557 BM |
777 | log_write_header(sdp, 0); |
778 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
779 | } | |
780 | if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH) | |
781 | gfs2_log_shutdown(sdp); | |
2e60d768 BM |
782 | if (type == FREEZE_FLUSH) |
783 | atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); | |
24972557 BM |
784 | } |
785 | ||
63997775 | 786 | trace_gfs2_log_flush(sdp, 0); |
484adff8 | 787 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa | 788 | |
16ca9412 | 789 | kfree(tr); |
b3b94faa DT |
790 | } |
791 | ||
d69a3c65 SW |
792 | /** |
793 | * gfs2_merge_trans - Merge a new transaction into a cached transaction | |
794 | * @old: Original transaction to be expanded | |
795 | * @new: New transaction to be merged | |
796 | */ | |
797 | ||
798 | static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new) | |
799 | { | |
9862ca05 | 800 | WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); |
d69a3c65 SW |
801 | |
802 | old->tr_num_buf_new += new->tr_num_buf_new; | |
803 | old->tr_num_databuf_new += new->tr_num_databuf_new; | |
804 | old->tr_num_buf_rm += new->tr_num_buf_rm; | |
805 | old->tr_num_databuf_rm += new->tr_num_databuf_rm; | |
806 | old->tr_num_revoke += new->tr_num_revoke; | |
807 | old->tr_num_revoke_rm += new->tr_num_revoke_rm; | |
808 | ||
809 | list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); | |
810 | list_splice_tail_init(&new->tr_buf, &old->tr_buf); | |
811 | } | |
812 | ||
b3b94faa DT |
813 | static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
814 | { | |
2332c443 | 815 | unsigned int reserved; |
ac39aadd | 816 | unsigned int unused; |
022ef4fe | 817 | unsigned int maxres; |
b3b94faa DT |
818 | |
819 | gfs2_log_lock(sdp); | |
820 | ||
022ef4fe SW |
821 | if (sdp->sd_log_tr) { |
822 | gfs2_merge_trans(sdp->sd_log_tr, tr); | |
823 | } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { | |
9862ca05 | 824 | gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); |
022ef4fe | 825 | sdp->sd_log_tr = tr; |
9862ca05 | 826 | set_bit(TR_ATTACHED, &tr->tr_flags); |
022ef4fe SW |
827 | } |
828 | ||
b3b94faa | 829 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
2332c443 | 830 | reserved = calc_reserved(sdp); |
022ef4fe SW |
831 | maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; |
832 | gfs2_assert_withdraw(sdp, maxres >= reserved); | |
833 | unused = maxres - reserved; | |
ac39aadd | 834 | atomic_add(unused, &sdp->sd_log_blks_free); |
63997775 | 835 | trace_gfs2_log_blocks(sdp, unused); |
fd041f0b | 836 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
2332c443 | 837 | sdp->sd_jdesc->jd_blocks); |
b3b94faa DT |
838 | sdp->sd_log_blks_reserved = reserved; |
839 | ||
840 | gfs2_log_unlock(sdp); | |
841 | } | |
842 | ||
843 | /** | |
844 | * gfs2_log_commit - Commit a transaction to the log | |
845 | * @sdp: the filesystem | |
846 | * @tr: the transaction | |
847 | * | |
5e687eac BM |
848 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 |
849 | * or the total number of used blocks (pinned blocks plus AIL blocks) | |
850 | * is greater than thresh2. | |
851 | * | |
852 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | |
853 | * journal size. | |
854 | * | |
b3b94faa DT |
855 | * Returns: errno |
856 | */ | |
857 | ||
858 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
859 | { | |
860 | log_refund(sdp, tr); | |
b3b94faa | 861 | |
5e687eac BM |
862 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
863 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > | |
864 | atomic_read(&sdp->sd_log_thresh2))) | |
865 | wake_up(&sdp->sd_logd_waitq); | |
b3b94faa DT |
866 | } |
867 | ||
868 | /** | |
869 | * gfs2_log_shutdown - write a shutdown header into a journal | |
870 | * @sdp: the filesystem | |
871 | * | |
872 | */ | |
873 | ||
874 | void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |
875 | { | |
b3b94faa | 876 | gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); |
b3b94faa | 877 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
b3b94faa DT |
878 | gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); |
879 | ||
880 | sdp->sd_log_flush_head = sdp->sd_log_head; | |
b3b94faa | 881 | |
fdb76a42 | 882 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT); |
b3b94faa | 883 | |
a74604be SW |
884 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
885 | gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); | |
b3b94faa DT |
886 | |
887 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
b3b94faa | 888 | sdp->sd_log_tail = sdp->sd_log_head; |
a25311c8 SW |
889 | } |
890 | ||
5e687eac BM |
891 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
892 | { | |
f07b3520 BP |
893 | return (atomic_read(&sdp->sd_log_pinned) + |
894 | atomic_read(&sdp->sd_log_blks_needed) >= | |
895 | atomic_read(&sdp->sd_log_thresh1)); | |
5e687eac BM |
896 | } |
897 | ||
898 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | |
899 | { | |
900 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | |
f07b3520 BP |
901 | return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= |
902 | atomic_read(&sdp->sd_log_thresh2); | |
5e687eac | 903 | } |
ec69b188 SW |
904 | |
905 | /** | |
906 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | |
907 | * @sdp: Pointer to GFS2 superblock | |
908 | * | |
909 | * Also, periodically check to make sure that we're using the most recent | |
910 | * journal index. | |
911 | */ | |
912 | ||
913 | int gfs2_logd(void *data) | |
914 | { | |
915 | struct gfs2_sbd *sdp = data; | |
5e687eac BM |
916 | unsigned long t = 1; |
917 | DEFINE_WAIT(wait); | |
b63f5e84 | 918 | bool did_flush; |
ec69b188 SW |
919 | |
920 | while (!kthread_should_stop()) { | |
ec69b188 | 921 | |
b63f5e84 | 922 | did_flush = false; |
5e687eac | 923 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
4667a0ec | 924 | gfs2_ail1_empty(sdp); |
24972557 | 925 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
b63f5e84 | 926 | did_flush = true; |
5e687eac | 927 | } |
ec69b188 | 928 | |
5e687eac BM |
929 | if (gfs2_ail_flush_reqd(sdp)) { |
930 | gfs2_ail1_start(sdp); | |
26b06a69 | 931 | gfs2_ail1_wait(sdp); |
4667a0ec | 932 | gfs2_ail1_empty(sdp); |
24972557 | 933 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
b63f5e84 | 934 | did_flush = true; |
ec69b188 SW |
935 | } |
936 | ||
b63f5e84 | 937 | if (!gfs2_ail_flush_reqd(sdp) || did_flush) |
26b06a69 SW |
938 | wake_up(&sdp->sd_log_waitq); |
939 | ||
ec69b188 | 940 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
a0acae0e TH |
941 | |
942 | try_to_freeze(); | |
5e687eac BM |
943 | |
944 | do { | |
945 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | |
5f487490 | 946 | TASK_INTERRUPTIBLE); |
5e687eac BM |
947 | if (!gfs2_ail_flush_reqd(sdp) && |
948 | !gfs2_jrnl_flush_reqd(sdp) && | |
949 | !kthread_should_stop()) | |
950 | t = schedule_timeout(t); | |
951 | } while(t && !gfs2_ail_flush_reqd(sdp) && | |
952 | !gfs2_jrnl_flush_reqd(sdp) && | |
953 | !kthread_should_stop()); | |
954 | finish_wait(&sdp->sd_logd_waitq, &wait); | |
ec69b188 SW |
955 | } |
956 | ||
957 | return 0; | |
958 | } | |
959 |