]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
da6dd40d | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
71b86f56 | 16 | #include <linux/crc32.h> |
a25311c8 | 17 | #include <linux/delay.h> |
ec69b188 SW |
18 | #include <linux/kthread.h> |
19 | #include <linux/freezer.h> | |
254db57f | 20 | #include <linux/bio.h> |
4667a0ec | 21 | #include <linux/writeback.h> |
4a36d08d | 22 | #include <linux/list_sort.h> |
b3b94faa DT |
23 | |
24 | #include "gfs2.h" | |
5c676f6d | 25 | #include "incore.h" |
b3b94faa DT |
26 | #include "bmap.h" |
27 | #include "glock.h" | |
28 | #include "log.h" | |
29 | #include "lops.h" | |
30 | #include "meta_io.h" | |
5c676f6d | 31 | #include "util.h" |
71b86f56 | 32 | #include "dir.h" |
63997775 | 33 | #include "trace_gfs2.h" |
b3b94faa | 34 | |
b3b94faa DT |
35 | /** |
36 | * gfs2_struct2blk - compute stuff | |
37 | * @sdp: the filesystem | |
38 | * @nstruct: the number of structures | |
39 | * @ssize: the size of the structures | |
40 | * | |
41 | * Compute the number of log descriptor blocks needed to hold a certain number | |
42 | * of structures of a certain size. | |
43 | * | |
44 | * Returns: the number of blocks needed (minimum is always 1) | |
45 | */ | |
46 | ||
47 | unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |
48 | unsigned int ssize) | |
49 | { | |
50 | unsigned int blks; | |
51 | unsigned int first, second; | |
52 | ||
53 | blks = 1; | |
faa31ce8 | 54 | first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; |
b3b94faa DT |
55 | |
56 | if (nstruct > first) { | |
568f4c96 SW |
57 | second = (sdp->sd_sb.sb_bsize - |
58 | sizeof(struct gfs2_meta_header)) / ssize; | |
5c676f6d | 59 | blks += DIV_ROUND_UP(nstruct - first, second); |
b3b94faa DT |
60 | } |
61 | ||
62 | return blks; | |
63 | } | |
64 | ||
1e1a3d03 SW |
65 | /** |
66 | * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters | |
67 | * @mapping: The associated mapping (maybe NULL) | |
68 | * @bd: The gfs2_bufdata to remove | |
69 | * | |
c618e87a | 70 | * The ail lock _must_ be held when calling this function |
1e1a3d03 SW |
71 | * |
72 | */ | |
73 | ||
f91a0d3e | 74 | void gfs2_remove_from_ail(struct gfs2_bufdata *bd) |
1e1a3d03 SW |
75 | { |
76 | bd->bd_ail = NULL; | |
1ad38c43 SW |
77 | list_del_init(&bd->bd_ail_st_list); |
78 | list_del_init(&bd->bd_ail_gl_list); | |
1e1a3d03 | 79 | atomic_dec(&bd->bd_gl->gl_ail_count); |
1e1a3d03 SW |
80 | brelse(bd->bd_bh); |
81 | } | |
82 | ||
ddacfaf7 SW |
83 | /** |
84 | * gfs2_ail1_start_one - Start I/O on a part of the AIL | |
85 | * @sdp: the filesystem | |
4667a0ec SW |
86 | * @wbc: The writeback control structure |
87 | * @ai: The ail structure | |
ddacfaf7 SW |
88 | * |
89 | */ | |
90 | ||
4f1de018 SW |
91 | static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, |
92 | struct writeback_control *wbc, | |
93 | struct gfs2_ail *ai) | |
d6a079e8 DC |
94 | __releases(&sdp->sd_ail_lock) |
95 | __acquires(&sdp->sd_ail_lock) | |
ddacfaf7 | 96 | { |
5ac048bb | 97 | struct gfs2_glock *gl = NULL; |
4667a0ec | 98 | struct address_space *mapping; |
ddacfaf7 SW |
99 | struct gfs2_bufdata *bd, *s; |
100 | struct buffer_head *bh; | |
ddacfaf7 | 101 | |
4667a0ec SW |
102 | list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) { |
103 | bh = bd->bd_bh; | |
ddacfaf7 | 104 | |
4667a0ec | 105 | gfs2_assert(sdp, bd->bd_ail == ai); |
ddacfaf7 | 106 | |
4667a0ec SW |
107 | if (!buffer_busy(bh)) { |
108 | if (!buffer_uptodate(bh)) | |
109 | gfs2_io_error_bh(sdp, bh); | |
110 | list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); | |
111 | continue; | |
112 | } | |
113 | ||
114 | if (!buffer_dirty(bh)) | |
115 | continue; | |
116 | if (gl == bd->bd_gl) | |
117 | continue; | |
118 | gl = bd->bd_gl; | |
119 | list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list); | |
120 | mapping = bh->b_page->mapping; | |
4f1de018 SW |
121 | if (!mapping) |
122 | continue; | |
4667a0ec SW |
123 | spin_unlock(&sdp->sd_ail_lock); |
124 | generic_writepages(mapping, wbc); | |
125 | spin_lock(&sdp->sd_ail_lock); | |
126 | if (wbc->nr_to_write <= 0) | |
127 | break; | |
4f1de018 | 128 | return 1; |
4667a0ec | 129 | } |
4f1de018 SW |
130 | |
131 | return 0; | |
4667a0ec | 132 | } |
ddacfaf7 | 133 | |
ddacfaf7 | 134 | |
4667a0ec SW |
135 | /** |
136 | * gfs2_ail1_flush - start writeback of some ail1 entries | |
137 | * @sdp: The super block | |
138 | * @wbc: The writeback control structure | |
139 | * | |
140 | * Writes back some ail1 entries, according to the limits in the | |
141 | * writeback control structure | |
142 | */ | |
ddacfaf7 | 143 | |
4667a0ec SW |
144 | void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) |
145 | { | |
146 | struct list_head *head = &sdp->sd_ail1_list; | |
147 | struct gfs2_ail *ai; | |
ddacfaf7 | 148 | |
c83ae9ca | 149 | trace_gfs2_ail_flush(sdp, wbc, 1); |
4667a0ec | 150 | spin_lock(&sdp->sd_ail_lock); |
4f1de018 | 151 | restart: |
4667a0ec SW |
152 | list_for_each_entry_reverse(ai, head, ai_list) { |
153 | if (wbc->nr_to_write <= 0) | |
ddacfaf7 | 154 | break; |
4f1de018 SW |
155 | if (gfs2_ail1_start_one(sdp, wbc, ai)) |
156 | goto restart; | |
4667a0ec SW |
157 | } |
158 | spin_unlock(&sdp->sd_ail_lock); | |
c83ae9ca | 159 | trace_gfs2_ail_flush(sdp, wbc, 0); |
4667a0ec SW |
160 | } |
161 | ||
162 | /** | |
163 | * gfs2_ail1_start - start writeback of all ail1 entries | |
164 | * @sdp: The superblock | |
165 | */ | |
166 | ||
167 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) | |
168 | { | |
169 | struct writeback_control wbc = { | |
170 | .sync_mode = WB_SYNC_NONE, | |
171 | .nr_to_write = LONG_MAX, | |
172 | .range_start = 0, | |
173 | .range_end = LLONG_MAX, | |
174 | }; | |
175 | ||
176 | return gfs2_ail1_flush(sdp, &wbc); | |
ddacfaf7 SW |
177 | } |
178 | ||
179 | /** | |
180 | * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced | |
181 | * @sdp: the filesystem | |
182 | * @ai: the AIL entry | |
183 | * | |
184 | */ | |
185 | ||
4667a0ec | 186 | static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) |
ddacfaf7 SW |
187 | { |
188 | struct gfs2_bufdata *bd, *s; | |
189 | struct buffer_head *bh; | |
190 | ||
191 | list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, | |
192 | bd_ail_st_list) { | |
193 | bh = bd->bd_bh; | |
ddacfaf7 | 194 | gfs2_assert(sdp, bd->bd_ail == ai); |
4667a0ec SW |
195 | if (buffer_busy(bh)) |
196 | continue; | |
ddacfaf7 SW |
197 | if (!buffer_uptodate(bh)) |
198 | gfs2_io_error_bh(sdp, bh); | |
ddacfaf7 SW |
199 | list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list); |
200 | } | |
201 | ||
ddacfaf7 SW |
202 | } |
203 | ||
4667a0ec SW |
204 | /** |
205 | * gfs2_ail1_empty - Try to empty the ail1 lists | |
206 | * @sdp: The superblock | |
207 | * | |
208 | * Tries to empty the ail1 lists, starting with the oldest first | |
209 | */ | |
b3b94faa | 210 | |
4667a0ec | 211 | static int gfs2_ail1_empty(struct gfs2_sbd *sdp) |
b3b94faa DT |
212 | { |
213 | struct gfs2_ail *ai, *s; | |
214 | int ret; | |
215 | ||
d6a079e8 | 216 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 217 | list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) { |
4667a0ec SW |
218 | gfs2_ail1_empty_one(sdp, ai); |
219 | if (list_empty(&ai->ai_ail1_list)) | |
b3b94faa | 220 | list_move(&ai->ai_list, &sdp->sd_ail2_list); |
4667a0ec | 221 | else |
b3b94faa DT |
222 | break; |
223 | } | |
b3b94faa | 224 | ret = list_empty(&sdp->sd_ail1_list); |
d6a079e8 | 225 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
226 | |
227 | return ret; | |
228 | } | |
229 | ||
26b06a69 SW |
230 | static void gfs2_ail1_wait(struct gfs2_sbd *sdp) |
231 | { | |
232 | struct gfs2_ail *ai; | |
233 | struct gfs2_bufdata *bd; | |
234 | struct buffer_head *bh; | |
235 | ||
236 | spin_lock(&sdp->sd_ail_lock); | |
237 | list_for_each_entry_reverse(ai, &sdp->sd_ail1_list, ai_list) { | |
238 | list_for_each_entry(bd, &ai->ai_ail1_list, bd_ail_st_list) { | |
239 | bh = bd->bd_bh; | |
240 | if (!buffer_locked(bh)) | |
241 | continue; | |
242 | get_bh(bh); | |
243 | spin_unlock(&sdp->sd_ail_lock); | |
244 | wait_on_buffer(bh); | |
245 | brelse(bh); | |
246 | return; | |
247 | } | |
248 | } | |
249 | spin_unlock(&sdp->sd_ail_lock); | |
250 | } | |
ddacfaf7 SW |
251 | |
252 | /** | |
253 | * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced | |
254 | * @sdp: the filesystem | |
255 | * @ai: the AIL entry | |
256 | * | |
257 | */ | |
258 | ||
259 | static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | |
260 | { | |
261 | struct list_head *head = &ai->ai_ail2_list; | |
262 | struct gfs2_bufdata *bd; | |
263 | ||
264 | while (!list_empty(head)) { | |
265 | bd = list_entry(head->prev, struct gfs2_bufdata, | |
266 | bd_ail_st_list); | |
267 | gfs2_assert(sdp, bd->bd_ail == ai); | |
f91a0d3e | 268 | gfs2_remove_from_ail(bd); |
ddacfaf7 SW |
269 | } |
270 | } | |
271 | ||
b3b94faa DT |
272 | static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) |
273 | { | |
274 | struct gfs2_ail *ai, *safe; | |
275 | unsigned int old_tail = sdp->sd_log_tail; | |
276 | int wrap = (new_tail < old_tail); | |
277 | int a, b, rm; | |
278 | ||
d6a079e8 | 279 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa DT |
280 | |
281 | list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) { | |
282 | a = (old_tail <= ai->ai_first); | |
283 | b = (ai->ai_first < new_tail); | |
284 | rm = (wrap) ? (a || b) : (a && b); | |
285 | if (!rm) | |
286 | continue; | |
287 | ||
288 | gfs2_ail2_empty_one(sdp, ai); | |
289 | list_del(&ai->ai_list); | |
290 | gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list)); | |
291 | gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list)); | |
292 | kfree(ai); | |
293 | } | |
294 | ||
d6a079e8 | 295 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
296 | } |
297 | ||
298 | /** | |
299 | * gfs2_log_reserve - Make a log reservation | |
300 | * @sdp: The GFS2 superblock | |
301 | * @blks: The number of blocks to reserve | |
302 | * | |
89918647 | 303 | * Note that we never give out the last few blocks of the journal. Thats |
2332c443 | 304 | * due to the fact that there is a small number of header blocks |
b004157a SW |
305 | * associated with each log flush. The exact number can't be known until |
306 | * flush time, so we ensure that we have just enough free blocks at all | |
307 | * times to avoid running out during a log flush. | |
308 | * | |
5e687eac BM |
309 | * We no longer flush the log here, instead we wake up logd to do that |
310 | * for us. To avoid the thundering herd and to ensure that we deal fairly | |
311 | * with queued waiters, we use an exclusive wait. This means that when we | |
312 | * get woken with enough journal space to get our reservation, we need to | |
313 | * wake the next waiter on the list. | |
314 | * | |
b3b94faa DT |
315 | * Returns: errno |
316 | */ | |
317 | ||
318 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |
319 | { | |
89918647 | 320 | unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize); |
5e687eac BM |
321 | unsigned wanted = blks + reserved_blks; |
322 | DEFINE_WAIT(wait); | |
323 | int did_wait = 0; | |
324 | unsigned int free_blocks; | |
b3b94faa DT |
325 | |
326 | if (gfs2_assert_warn(sdp, blks) || | |
327 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | |
328 | return -EINVAL; | |
5e687eac BM |
329 | retry: |
330 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
331 | if (unlikely(free_blocks <= wanted)) { | |
332 | do { | |
333 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, | |
334 | TASK_UNINTERRUPTIBLE); | |
335 | wake_up(&sdp->sd_logd_waitq); | |
336 | did_wait = 1; | |
337 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) | |
338 | io_schedule(); | |
339 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
340 | } while(free_blocks <= wanted); | |
341 | finish_wait(&sdp->sd_log_waitq, &wait); | |
b3b94faa | 342 | } |
5e687eac BM |
343 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
344 | free_blocks - blks) != free_blocks) | |
345 | goto retry; | |
63997775 | 346 | trace_gfs2_log_blocks(sdp, -blks); |
5e687eac BM |
347 | |
348 | /* | |
349 | * If we waited, then so might others, wake them up _after_ we get | |
350 | * our share of the log. | |
351 | */ | |
352 | if (unlikely(did_wait)) | |
353 | wake_up(&sdp->sd_log_waitq); | |
484adff8 SW |
354 | |
355 | down_read(&sdp->sd_log_flush_lock); | |
b3b94faa DT |
356 | |
357 | return 0; | |
358 | } | |
359 | ||
b3b94faa DT |
360 | /** |
361 | * log_distance - Compute distance between two journal blocks | |
362 | * @sdp: The GFS2 superblock | |
363 | * @newer: The most recent journal block of the pair | |
364 | * @older: The older journal block of the pair | |
365 | * | |
366 | * Compute the distance (in the journal direction) between two | |
367 | * blocks in the journal | |
368 | * | |
369 | * Returns: the distance in blocks | |
370 | */ | |
371 | ||
faa31ce8 | 372 | static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, |
b3b94faa DT |
373 | unsigned int older) |
374 | { | |
375 | int dist; | |
376 | ||
377 | dist = newer - older; | |
378 | if (dist < 0) | |
379 | dist += sdp->sd_jdesc->jd_blocks; | |
380 | ||
381 | return dist; | |
382 | } | |
383 | ||
2332c443 RP |
384 | /** |
385 | * calc_reserved - Calculate the number of blocks to reserve when | |
386 | * refunding a transaction's unused buffers. | |
387 | * @sdp: The GFS2 superblock | |
388 | * | |
389 | * This is complex. We need to reserve room for all our currently used | |
390 | * metadata buffers (e.g. normal file I/O rewriting file time stamps) and | |
391 | * all our journaled data buffers for journaled files (e.g. files in the | |
392 | * meta_fs like rindex, or files for which chattr +j was done.) | |
393 | * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush | |
394 | * will count it as free space (sd_log_blks_free) and corruption will follow. | |
395 | * | |
396 | * We can have metadata bufs and jdata bufs in the same journal. So each | |
397 | * type gets its own log header, for which we need to reserve a block. | |
398 | * In fact, each type has the potential for needing more than one header | |
399 | * in cases where we have more buffers than will fit on a journal page. | |
400 | * Metadata journal entries take up half the space of journaled buffer entries. | |
401 | * Thus, metadata entries have buf_limit (502) and journaled buffers have | |
402 | * databuf_limit (251) before they cause a wrap around. | |
403 | * | |
404 | * Also, we need to reserve blocks for revoke journal entries and one for an | |
405 | * overall header for the lot. | |
406 | * | |
407 | * Returns: the number of blocks reserved | |
408 | */ | |
409 | static unsigned int calc_reserved(struct gfs2_sbd *sdp) | |
410 | { | |
411 | unsigned int reserved = 0; | |
412 | unsigned int mbuf_limit, metabufhdrs_needed; | |
413 | unsigned int dbuf_limit, databufhdrs_needed; | |
414 | unsigned int revokes = 0; | |
415 | ||
416 | mbuf_limit = buf_limit(sdp); | |
417 | metabufhdrs_needed = (sdp->sd_log_commited_buf + | |
418 | (mbuf_limit - 1)) / mbuf_limit; | |
419 | dbuf_limit = databuf_limit(sdp); | |
420 | databufhdrs_needed = (sdp->sd_log_commited_databuf + | |
421 | (dbuf_limit - 1)) / dbuf_limit; | |
422 | ||
2e95e3f6 | 423 | if (sdp->sd_log_commited_revoke > 0) |
2332c443 RP |
424 | revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, |
425 | sizeof(u64)); | |
426 | ||
427 | reserved = sdp->sd_log_commited_buf + metabufhdrs_needed + | |
428 | sdp->sd_log_commited_databuf + databufhdrs_needed + | |
429 | revokes; | |
430 | /* One for the overall header */ | |
431 | if (reserved) | |
432 | reserved++; | |
433 | return reserved; | |
434 | } | |
435 | ||
b3b94faa DT |
436 | static unsigned int current_tail(struct gfs2_sbd *sdp) |
437 | { | |
438 | struct gfs2_ail *ai; | |
439 | unsigned int tail; | |
440 | ||
d6a079e8 | 441 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 442 | |
faa31ce8 | 443 | if (list_empty(&sdp->sd_ail1_list)) { |
b3b94faa | 444 | tail = sdp->sd_log_head; |
faa31ce8 SW |
445 | } else { |
446 | ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list); | |
b3b94faa DT |
447 | tail = ai->ai_first; |
448 | } | |
449 | ||
d6a079e8 | 450 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
451 | |
452 | return tail; | |
453 | } | |
454 | ||
2332c443 | 455 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
b3b94faa DT |
456 | { |
457 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | |
458 | ||
459 | ail2_empty(sdp, new_tail); | |
460 | ||
fd041f0b | 461 | atomic_add(dist, &sdp->sd_log_blks_free); |
63997775 | 462 | trace_gfs2_log_blocks(sdp, dist); |
5e687eac BM |
463 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
464 | sdp->sd_jdesc->jd_blocks); | |
b3b94faa DT |
465 | |
466 | sdp->sd_log_tail = new_tail; | |
467 | } | |
468 | ||
b3b94faa | 469 | |
34cc1781 | 470 | static void log_flush_wait(struct gfs2_sbd *sdp) |
b3b94faa | 471 | { |
16615be1 SW |
472 | DEFINE_WAIT(wait); |
473 | ||
474 | if (atomic_read(&sdp->sd_log_in_flight)) { | |
475 | do { | |
476 | prepare_to_wait(&sdp->sd_log_flush_wait, &wait, | |
477 | TASK_UNINTERRUPTIBLE); | |
478 | if (atomic_read(&sdp->sd_log_in_flight)) | |
479 | io_schedule(); | |
480 | } while(atomic_read(&sdp->sd_log_in_flight)); | |
481 | finish_wait(&sdp->sd_log_flush_wait, &wait); | |
b3b94faa | 482 | } |
b3b94faa DT |
483 | } |
484 | ||
45138990 | 485 | static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) |
4a36d08d | 486 | { |
45138990 | 487 | struct gfs2_inode *ipa, *ipb; |
4a36d08d | 488 | |
45138990 SW |
489 | ipa = list_entry(a, struct gfs2_inode, i_ordered); |
490 | ipb = list_entry(b, struct gfs2_inode, i_ordered); | |
4a36d08d | 491 | |
45138990 | 492 | if (ipa->i_no_addr < ipb->i_no_addr) |
4a36d08d | 493 | return -1; |
45138990 | 494 | if (ipa->i_no_addr > ipb->i_no_addr) |
4a36d08d BP |
495 | return 1; |
496 | return 0; | |
497 | } | |
498 | ||
d7b616e2 SW |
499 | static void gfs2_ordered_write(struct gfs2_sbd *sdp) |
500 | { | |
45138990 | 501 | struct gfs2_inode *ip; |
d7b616e2 SW |
502 | LIST_HEAD(written); |
503 | ||
45138990 SW |
504 | spin_lock(&sdp->sd_ordered_lock); |
505 | list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp); | |
d7b616e2 | 506 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
507 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
508 | list_move(&ip->i_ordered, &written); | |
509 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 510 | continue; |
45138990 SW |
511 | spin_unlock(&sdp->sd_ordered_lock); |
512 | filemap_fdatawrite(ip->i_inode.i_mapping); | |
513 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
514 | } |
515 | list_splice(&written, &sdp->sd_log_le_ordered); | |
45138990 | 516 | spin_unlock(&sdp->sd_ordered_lock); |
d7b616e2 SW |
517 | } |
518 | ||
519 | static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |
520 | { | |
45138990 | 521 | struct gfs2_inode *ip; |
d7b616e2 | 522 | |
45138990 | 523 | spin_lock(&sdp->sd_ordered_lock); |
d7b616e2 | 524 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
525 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
526 | list_del(&ip->i_ordered); | |
527 | WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); | |
528 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 529 | continue; |
45138990 SW |
530 | spin_unlock(&sdp->sd_ordered_lock); |
531 | filemap_fdatawait(ip->i_inode.i_mapping); | |
532 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 | 533 | } |
45138990 SW |
534 | spin_unlock(&sdp->sd_ordered_lock); |
535 | } | |
536 | ||
537 | void gfs2_ordered_del_inode(struct gfs2_inode *ip) | |
538 | { | |
539 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | |
540 | ||
541 | spin_lock(&sdp->sd_ordered_lock); | |
542 | if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) | |
543 | list_del(&ip->i_ordered); | |
544 | spin_unlock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
545 | } |
546 | ||
34cc1781 SW |
547 | /** |
548 | * log_write_header - Get and initialize a journal header buffer | |
549 | * @sdp: The GFS2 superblock | |
550 | * | |
551 | * Returns: the initialized log buffer descriptor | |
552 | */ | |
553 | ||
fdb76a42 | 554 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
34cc1781 | 555 | { |
34cc1781 SW |
556 | struct gfs2_log_header *lh; |
557 | unsigned int tail; | |
558 | u32 hash; | |
e8c92ed7 SW |
559 | int rw = WRITE_FLUSH_FUA | REQ_META; |
560 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | |
561 | lh = page_address(page); | |
562 | clear_page(lh); | |
34cc1781 SW |
563 | |
564 | gfs2_ail1_empty(sdp); | |
565 | tail = current_tail(sdp); | |
566 | ||
34cc1781 SW |
567 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
568 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | |
569 | lh->lh_header.__pad0 = cpu_to_be64(0); | |
570 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | |
571 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | |
572 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); | |
573 | lh->lh_flags = cpu_to_be32(flags); | |
574 | lh->lh_tail = cpu_to_be32(tail); | |
575 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | |
e8c92ed7 | 576 | hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header)); |
34cc1781 SW |
577 | lh->lh_hash = cpu_to_be32(hash); |
578 | ||
34cc1781 SW |
579 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
580 | gfs2_ordered_wait(sdp); | |
581 | log_flush_wait(sdp); | |
e8c92ed7 | 582 | rw = WRITE_SYNC | REQ_META | REQ_PRIO; |
34cc1781 | 583 | } |
34cc1781 | 584 | |
e8c92ed7 SW |
585 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
586 | gfs2_log_write_page(sdp, page); | |
587 | gfs2_log_flush_bio(sdp, rw); | |
588 | log_flush_wait(sdp); | |
34cc1781 SW |
589 | |
590 | if (sdp->sd_log_tail != tail) | |
591 | log_pull_tail(sdp, tail); | |
34cc1781 SW |
592 | } |
593 | ||
b3b94faa | 594 | /** |
b09e593d | 595 | * gfs2_log_flush - flush incore transaction(s) |
b3b94faa DT |
596 | * @sdp: the filesystem |
597 | * @gl: The glock structure to flush. If NULL, flush the whole incore log | |
598 | * | |
599 | */ | |
600 | ||
ed4878e8 | 601 | void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) |
b3b94faa DT |
602 | { |
603 | struct gfs2_ail *ai; | |
604 | ||
484adff8 | 605 | down_write(&sdp->sd_log_flush_lock); |
f55ab26a | 606 | |
2bcd610d SW |
607 | /* Log might have been flushed while we waited for the flush lock */ |
608 | if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { | |
609 | up_write(&sdp->sd_log_flush_lock); | |
610 | return; | |
f55ab26a | 611 | } |
63997775 | 612 | trace_gfs2_log_flush(sdp, 1); |
f55ab26a | 613 | |
b09e593d SW |
614 | ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); |
615 | INIT_LIST_HEAD(&ai->ai_ail1_list); | |
616 | INIT_LIST_HEAD(&ai->ai_ail2_list); | |
b3b94faa | 617 | |
16615be1 SW |
618 | if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) { |
619 | printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf, | |
620 | sdp->sd_log_commited_buf); | |
621 | gfs2_assert_withdraw(sdp, 0); | |
622 | } | |
623 | if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) { | |
624 | printk(KERN_INFO "GFS2: log databuf %u %u\n", | |
625 | sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf); | |
626 | gfs2_assert_withdraw(sdp, 0); | |
627 | } | |
b3b94faa DT |
628 | gfs2_assert_withdraw(sdp, |
629 | sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); | |
630 | ||
b3b94faa DT |
631 | sdp->sd_log_flush_head = sdp->sd_log_head; |
632 | sdp->sd_log_flush_wrapped = 0; | |
633 | ai->ai_first = sdp->sd_log_flush_head; | |
634 | ||
d7b616e2 | 635 | gfs2_ordered_write(sdp); |
b3b94faa | 636 | lops_before_commit(sdp); |
e8c92ed7 | 637 | gfs2_log_flush_bio(sdp, WRITE); |
d7b616e2 | 638 | |
34cc1781 | 639 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
fdb76a42 | 640 | log_write_header(sdp, 0); |
34cc1781 | 641 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
fd041f0b | 642 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
63997775 | 643 | trace_gfs2_log_blocks(sdp, -1); |
fdb76a42 | 644 | log_write_header(sdp, 0); |
2332c443 | 645 | } |
b3b94faa | 646 | lops_after_commit(sdp, ai); |
b09e593d | 647 | |
fe1a698f SW |
648 | gfs2_log_lock(sdp); |
649 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
faa31ce8 SW |
650 | sdp->sd_log_blks_reserved = 0; |
651 | sdp->sd_log_commited_buf = 0; | |
2332c443 | 652 | sdp->sd_log_commited_databuf = 0; |
faa31ce8 | 653 | sdp->sd_log_commited_revoke = 0; |
b3b94faa | 654 | |
d6a079e8 | 655 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa DT |
656 | if (!list_empty(&ai->ai_ail1_list)) { |
657 | list_add(&ai->ai_list, &sdp->sd_ail1_list); | |
658 | ai = NULL; | |
659 | } | |
d6a079e8 | 660 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa | 661 | gfs2_log_unlock(sdp); |
63997775 | 662 | trace_gfs2_log_flush(sdp, 0); |
484adff8 | 663 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa DT |
664 | |
665 | kfree(ai); | |
666 | } | |
667 | ||
668 | static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
669 | { | |
2332c443 | 670 | unsigned int reserved; |
ac39aadd | 671 | unsigned int unused; |
b3b94faa DT |
672 | |
673 | gfs2_log_lock(sdp); | |
674 | ||
675 | sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm; | |
2332c443 RP |
676 | sdp->sd_log_commited_databuf += tr->tr_num_databuf_new - |
677 | tr->tr_num_databuf_rm; | |
678 | gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) || | |
679 | (((int)sdp->sd_log_commited_databuf) >= 0)); | |
b3b94faa | 680 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
2332c443 | 681 | reserved = calc_reserved(sdp); |
62be1f71 | 682 | gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved); |
ac39aadd | 683 | unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved; |
ac39aadd | 684 | atomic_add(unused, &sdp->sd_log_blks_free); |
63997775 | 685 | trace_gfs2_log_blocks(sdp, unused); |
fd041f0b | 686 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
2332c443 | 687 | sdp->sd_jdesc->jd_blocks); |
b3b94faa DT |
688 | sdp->sd_log_blks_reserved = reserved; |
689 | ||
690 | gfs2_log_unlock(sdp); | |
691 | } | |
692 | ||
693 | /** | |
694 | * gfs2_log_commit - Commit a transaction to the log | |
695 | * @sdp: the filesystem | |
696 | * @tr: the transaction | |
697 | * | |
5e687eac BM |
698 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 |
699 | * or the total number of used blocks (pinned blocks plus AIL blocks) | |
700 | * is greater than thresh2. | |
701 | * | |
702 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | |
703 | * journal size. | |
704 | * | |
b3b94faa DT |
705 | * Returns: errno |
706 | */ | |
707 | ||
708 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
709 | { | |
710 | log_refund(sdp, tr); | |
484adff8 | 711 | up_read(&sdp->sd_log_flush_lock); |
b3b94faa | 712 | |
5e687eac BM |
713 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
714 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > | |
715 | atomic_read(&sdp->sd_log_thresh2))) | |
716 | wake_up(&sdp->sd_logd_waitq); | |
b3b94faa DT |
717 | } |
718 | ||
719 | /** | |
720 | * gfs2_log_shutdown - write a shutdown header into a journal | |
721 | * @sdp: the filesystem | |
722 | * | |
723 | */ | |
724 | ||
725 | void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |
726 | { | |
484adff8 | 727 | down_write(&sdp->sd_log_flush_lock); |
b3b94faa | 728 | |
b3b94faa | 729 | gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); |
b3b94faa DT |
730 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf); |
731 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | |
732 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg); | |
733 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf); | |
734 | gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); | |
735 | ||
736 | sdp->sd_log_flush_head = sdp->sd_log_head; | |
737 | sdp->sd_log_flush_wrapped = 0; | |
738 | ||
fdb76a42 | 739 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT); |
b3b94faa | 740 | |
fd041f0b | 741 | gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks); |
a74604be SW |
742 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
743 | gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); | |
b3b94faa DT |
744 | |
745 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
b3b94faa DT |
746 | sdp->sd_log_tail = sdp->sd_log_head; |
747 | ||
484adff8 | 748 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa DT |
749 | } |
750 | ||
a25311c8 SW |
751 | |
752 | /** | |
753 | * gfs2_meta_syncfs - sync all the buffers in a filesystem | |
754 | * @sdp: the filesystem | |
755 | * | |
756 | */ | |
757 | ||
758 | void gfs2_meta_syncfs(struct gfs2_sbd *sdp) | |
759 | { | |
760 | gfs2_log_flush(sdp, NULL); | |
761 | for (;;) { | |
5e687eac | 762 | gfs2_ail1_start(sdp); |
26b06a69 | 763 | gfs2_ail1_wait(sdp); |
4667a0ec | 764 | if (gfs2_ail1_empty(sdp)) |
a25311c8 | 765 | break; |
a25311c8 | 766 | } |
380f7c65 | 767 | gfs2_log_flush(sdp, NULL); |
a25311c8 SW |
768 | } |
769 | ||
5e687eac BM |
770 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
771 | { | |
772 | return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1)); | |
773 | } | |
774 | ||
775 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | |
776 | { | |
777 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | |
778 | return used_blocks >= atomic_read(&sdp->sd_log_thresh2); | |
779 | } | |
ec69b188 SW |
780 | |
781 | /** | |
782 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | |
783 | * @sdp: Pointer to GFS2 superblock | |
784 | * | |
785 | * Also, periodically check to make sure that we're using the most recent | |
786 | * journal index. | |
787 | */ | |
788 | ||
789 | int gfs2_logd(void *data) | |
790 | { | |
791 | struct gfs2_sbd *sdp = data; | |
5e687eac BM |
792 | unsigned long t = 1; |
793 | DEFINE_WAIT(wait); | |
ec69b188 SW |
794 | |
795 | while (!kthread_should_stop()) { | |
ec69b188 | 796 | |
5e687eac | 797 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
4667a0ec | 798 | gfs2_ail1_empty(sdp); |
5e687eac | 799 | gfs2_log_flush(sdp, NULL); |
5e687eac | 800 | } |
ec69b188 | 801 | |
5e687eac BM |
802 | if (gfs2_ail_flush_reqd(sdp)) { |
803 | gfs2_ail1_start(sdp); | |
26b06a69 | 804 | gfs2_ail1_wait(sdp); |
4667a0ec | 805 | gfs2_ail1_empty(sdp); |
ec69b188 | 806 | gfs2_log_flush(sdp, NULL); |
ec69b188 SW |
807 | } |
808 | ||
26b06a69 SW |
809 | if (!gfs2_ail_flush_reqd(sdp)) |
810 | wake_up(&sdp->sd_log_waitq); | |
811 | ||
ec69b188 | 812 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
a0acae0e TH |
813 | |
814 | try_to_freeze(); | |
5e687eac BM |
815 | |
816 | do { | |
817 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | |
5f487490 | 818 | TASK_INTERRUPTIBLE); |
5e687eac BM |
819 | if (!gfs2_ail_flush_reqd(sdp) && |
820 | !gfs2_jrnl_flush_reqd(sdp) && | |
821 | !kthread_should_stop()) | |
822 | t = schedule_timeout(t); | |
823 | } while(t && !gfs2_ail_flush_reqd(sdp) && | |
824 | !gfs2_jrnl_flush_reqd(sdp) && | |
825 | !kthread_should_stop()); | |
826 | finish_wait(&sdp->sd_logd_waitq, &wait); | |
ec69b188 SW |
827 | } |
828 | ||
829 | return 0; | |
830 | } | |
831 |