]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/gfs2/meta_io.c
[GFS2] Update copyright date to 2006
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / meta_io.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mm.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21
22 #include "gfs2.h"
23 #include "lm_interface.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "rgrp.h"
32 #include "trans.h"
33 #include "util.h"
34
35 #define buffer_busy(bh) \
36 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
37 #define buffer_in_io(bh) \
38 ((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
39
40 static int aspace_get_block(struct inode *inode, sector_t lblock,
41 struct buffer_head *bh_result, int create)
42 {
43 gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
44 return -EOPNOTSUPP;
45 }
46
47 static int gfs2_aspace_writepage(struct page *page,
48 struct writeback_control *wbc)
49 {
50 return block_write_full_page(page, aspace_get_block, wbc);
51 }
52
53 /**
54 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
55 * @bh: the buffer we're stuck on
56 *
57 */
58
59 static void stuck_releasepage(struct buffer_head *bh)
60 {
61 struct inode *inode = bh->b_page->mapping->host;
62 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
63 struct gfs2_bufdata *bd = bh->b_private;
64 struct gfs2_glock *gl;
65
66 fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
67 fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
68 (uint64_t)bh->b_blocknr, atomic_read(&bh->b_count));
69 fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
70 fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
71
72 if (!bd)
73 return;
74
75 gl = bd->bd_gl;
76
77 fs_warn(sdp, "gl = (%u, %llu)\n",
78 gl->gl_name.ln_type, gl->gl_name.ln_number);
79
80 fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
81 (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
82 (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
83
84 if (gl->gl_ops == &gfs2_inode_glops) {
85 struct gfs2_inode *ip = gl->gl_object;
86 unsigned int x;
87
88 if (!ip)
89 return;
90
91 fs_warn(sdp, "ip = %llu %llu\n",
92 ip->i_num.no_formal_ino, ip->i_num.no_addr);
93 fs_warn(sdp, "ip->i_count = %d, ip->i_vnode = %s\n",
94 atomic_read(&ip->i_count),
95 (ip->i_vnode) ? "!NULL" : "NULL");
96
97 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
98 fs_warn(sdp, "ip->i_cache[%u] = %s\n",
99 x, (ip->i_cache[x]) ? "!NULL" : "NULL");
100 }
101 }
102
103 /**
104 * gfs2_aspace_releasepage - free the metadata associated with a page
105 * @page: the page that's being released
106 * @gfp_mask: passed from Linux VFS, ignored by us
107 *
108 * Call try_to_free_buffers() if the buffers in this page can be
109 * released.
110 *
111 * Returns: 0
112 */
113
114 static int gfs2_aspace_releasepage(struct page *page, gfp_t gfp_mask)
115 {
116 struct inode *aspace = page->mapping->host;
117 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
118 struct buffer_head *bh, *head;
119 struct gfs2_bufdata *bd;
120 unsigned long t;
121
122 if (!page_has_buffers(page))
123 goto out;
124
125 head = bh = page_buffers(page);
126 do {
127 t = jiffies;
128
129 while (atomic_read(&bh->b_count)) {
130 if (atomic_read(&aspace->i_writecount)) {
131 if (time_after_eq(jiffies, t +
132 gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
133 stuck_releasepage(bh);
134 t = jiffies;
135 }
136
137 yield();
138 continue;
139 }
140
141 return 0;
142 }
143
144 gfs2_assert_warn(sdp, !buffer_pinned(bh));
145
146 bd = bh->b_private;
147 if (bd) {
148 gfs2_assert_warn(sdp, bd->bd_bh == bh);
149 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
150 gfs2_assert_warn(sdp, list_empty(&bd->bd_le.le_list));
151 gfs2_assert_warn(sdp, !bd->bd_ail);
152 kmem_cache_free(gfs2_bufdata_cachep, bd);
153 bh->b_private = NULL;
154 }
155
156 bh = bh->b_this_page;
157 }
158 while (bh != head);
159
160 out:
161 return try_to_free_buffers(page);
162 }
163
164 static struct address_space_operations aspace_aops = {
165 .writepage = gfs2_aspace_writepage,
166 .releasepage = gfs2_aspace_releasepage,
167 };
168
169 /**
170 * gfs2_aspace_get - Create and initialize a struct inode structure
171 * @sdp: the filesystem the aspace is in
172 *
173 * Right now a struct inode is just a struct inode. Maybe Linux
174 * will supply a more lightweight address space construct (that works)
175 * in the future.
176 *
177 * Make sure pages/buffers in this aspace aren't in high memory.
178 *
179 * Returns: the aspace
180 */
181
182 struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
183 {
184 struct inode *aspace;
185
186 aspace = new_inode(sdp->sd_vfs);
187 if (aspace) {
188 mapping_set_gfp_mask(aspace->i_mapping, GFP_KERNEL);
189 aspace->i_mapping->a_ops = &aspace_aops;
190 aspace->i_size = ~0ULL;
191 aspace->u.generic_ip = NULL;
192 insert_inode_hash(aspace);
193 }
194 return aspace;
195 }
196
197 void gfs2_aspace_put(struct inode *aspace)
198 {
199 remove_inode_hash(aspace);
200 iput(aspace);
201 }
202
203 /**
204 * gfs2_ail1_start_one - Start I/O on a part of the AIL
205 * @sdp: the filesystem
206 * @tr: the part of the AIL
207 *
208 */
209
210 void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
211 {
212 struct gfs2_bufdata *bd, *s;
213 struct buffer_head *bh;
214 int retry;
215
216 BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
217
218 do {
219 retry = 0;
220
221 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
222 bd_ail_st_list) {
223 bh = bd->bd_bh;
224
225 gfs2_assert(sdp, bd->bd_ail == ai);
226
227 if (!buffer_busy(bh)) {
228 if (!buffer_uptodate(bh)) {
229 gfs2_log_unlock(sdp);
230 gfs2_io_error_bh(sdp, bh);
231 gfs2_log_lock(sdp);
232 }
233 list_move(&bd->bd_ail_st_list,
234 &ai->ai_ail2_list);
235 continue;
236 }
237
238 if (!buffer_dirty(bh))
239 continue;
240
241 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
242
243 gfs2_log_unlock(sdp);
244 wait_on_buffer(bh);
245 ll_rw_block(WRITE, 1, &bh);
246 gfs2_log_lock(sdp);
247
248 retry = 1;
249 break;
250 }
251 } while (retry);
252 }
253
254 /**
255 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
256 * @sdp: the filesystem
257 * @ai: the AIL entry
258 *
259 */
260
261 int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
262 {
263 struct gfs2_bufdata *bd, *s;
264 struct buffer_head *bh;
265
266 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
267 bd_ail_st_list) {
268 bh = bd->bd_bh;
269
270 gfs2_assert(sdp, bd->bd_ail == ai);
271
272 if (buffer_busy(bh)) {
273 if (flags & DIO_ALL)
274 continue;
275 else
276 break;
277 }
278
279 if (!buffer_uptodate(bh))
280 gfs2_io_error_bh(sdp, bh);
281
282 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
283 }
284
285 return list_empty(&ai->ai_ail1_list);
286 }
287
288 /**
289 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
290 * @sdp: the filesystem
291 * @ai: the AIL entry
292 *
293 */
294
295 void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
296 {
297 struct list_head *head = &ai->ai_ail2_list;
298 struct gfs2_bufdata *bd;
299
300 while (!list_empty(head)) {
301 bd = list_entry(head->prev, struct gfs2_bufdata,
302 bd_ail_st_list);
303 gfs2_assert(sdp, bd->bd_ail == ai);
304 bd->bd_ail = NULL;
305 list_del(&bd->bd_ail_st_list);
306 list_del(&bd->bd_ail_gl_list);
307 atomic_dec(&bd->bd_gl->gl_ail_count);
308 brelse(bd->bd_bh);
309 }
310 }
311
312 /**
313 * ail_empty_gl - remove all buffers for a given lock from the AIL
314 * @gl: the glock
315 *
316 * None of the buffers should be dirty, locked, or pinned.
317 */
318
319 void gfs2_ail_empty_gl(struct gfs2_glock *gl)
320 {
321 struct gfs2_sbd *sdp = gl->gl_sbd;
322 unsigned int blocks;
323 struct list_head *head = &gl->gl_ail_list;
324 struct gfs2_bufdata *bd;
325 struct buffer_head *bh;
326 uint64_t blkno;
327 int error;
328
329 blocks = atomic_read(&gl->gl_ail_count);
330 if (!blocks)
331 return;
332
333 error = gfs2_trans_begin(sdp, 0, blocks);
334 if (gfs2_assert_withdraw(sdp, !error))
335 return;
336
337 gfs2_log_lock(sdp);
338 while (!list_empty(head)) {
339 bd = list_entry(head->next, struct gfs2_bufdata,
340 bd_ail_gl_list);
341 bh = bd->bd_bh;
342 blkno = bh->b_blocknr;
343 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
344
345 bd->bd_ail = NULL;
346 list_del(&bd->bd_ail_st_list);
347 list_del(&bd->bd_ail_gl_list);
348 atomic_dec(&gl->gl_ail_count);
349 brelse(bh);
350 gfs2_log_unlock(sdp);
351
352 gfs2_trans_add_revoke(sdp, blkno);
353
354 gfs2_log_lock(sdp);
355 }
356 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
357 gfs2_log_unlock(sdp);
358
359 gfs2_trans_end(sdp);
360 gfs2_log_flush(sdp, NULL);
361 }
362
363 /**
364 * gfs2_meta_inval - Invalidate all buffers associated with a glock
365 * @gl: the glock
366 *
367 */
368
369 void gfs2_meta_inval(struct gfs2_glock *gl)
370 {
371 struct gfs2_sbd *sdp = gl->gl_sbd;
372 struct inode *aspace = gl->gl_aspace;
373 struct address_space *mapping = gl->gl_aspace->i_mapping;
374
375 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
376
377 atomic_inc(&aspace->i_writecount);
378 truncate_inode_pages(mapping, 0);
379 atomic_dec(&aspace->i_writecount);
380
381 gfs2_assert_withdraw(sdp, !mapping->nrpages);
382 }
383
384 /**
385 * gfs2_meta_sync - Sync all buffers associated with a glock
386 * @gl: The glock
387 * @flags: DIO_START | DIO_WAIT
388 *
389 */
390
391 void gfs2_meta_sync(struct gfs2_glock *gl, int flags)
392 {
393 struct address_space *mapping = gl->gl_aspace->i_mapping;
394 int error = 0;
395
396 if (flags & DIO_START)
397 filemap_fdatawrite(mapping);
398 if (!error && (flags & DIO_WAIT))
399 error = filemap_fdatawait(mapping);
400
401 if (error)
402 gfs2_io_error(gl->gl_sbd);
403 }
404
405 /**
406 * getbuf - Get a buffer with a given address space
407 * @sdp: the filesystem
408 * @aspace: the address space
409 * @blkno: the block number (filesystem scope)
410 * @create: 1 if the buffer should be created
411 *
412 * Returns: the buffer
413 */
414
415 static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
416 uint64_t blkno, int create)
417 {
418 struct page *page;
419 struct buffer_head *bh;
420 unsigned int shift;
421 unsigned long index;
422 unsigned int bufnum;
423
424 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
425 index = blkno >> shift; /* convert block to page */
426 bufnum = blkno - (index << shift); /* block buf index within page */
427
428 if (create) {
429 for (;;) {
430 page = grab_cache_page(aspace->i_mapping, index);
431 if (page)
432 break;
433 yield();
434 }
435 } else {
436 page = find_lock_page(aspace->i_mapping, index);
437 if (!page)
438 return NULL;
439 }
440
441 if (!page_has_buffers(page))
442 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
443
444 /* Locate header for our buffer within our page */
445 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
446 /* Do nothing */;
447 get_bh(bh);
448
449 if (!buffer_mapped(bh))
450 map_bh(bh, sdp->sd_vfs, blkno);
451
452 unlock_page(page);
453 mark_page_accessed(page);
454 page_cache_release(page);
455
456 return bh;
457 }
458
459 static void meta_prep_new(struct buffer_head *bh)
460 {
461 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
462
463 lock_buffer(bh);
464 clear_buffer_dirty(bh);
465 set_buffer_uptodate(bh);
466 unlock_buffer(bh);
467
468 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
469 }
470
471 /**
472 * gfs2_meta_new - Get a block
473 * @gl: The glock associated with this block
474 * @blkno: The block number
475 *
476 * Returns: The buffer
477 */
478
479 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, uint64_t blkno)
480 {
481 struct buffer_head *bh;
482 bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
483 meta_prep_new(bh);
484 return bh;
485 }
486
487 /**
488 * gfs2_meta_read - Read a block from disk
489 * @gl: The glock covering the block
490 * @blkno: The block number
491 * @flags: flags to gfs2_dreread()
492 * @bhp: the place where the buffer is returned (NULL on failure)
493 *
494 * Returns: errno
495 */
496
497 int gfs2_meta_read(struct gfs2_glock *gl, uint64_t blkno, int flags,
498 struct buffer_head **bhp)
499 {
500 int error;
501
502 *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
503 error = gfs2_meta_reread(gl->gl_sbd, *bhp, flags);
504 if (error)
505 brelse(*bhp);
506
507 return error;
508 }
509
510 /**
511 * gfs2_meta_reread - Reread a block from disk
512 * @sdp: the filesystem
513 * @bh: The block to read
514 * @flags: Flags that control the read
515 *
516 * Returns: errno
517 */
518
519 int gfs2_meta_reread(struct gfs2_sbd *sdp, struct buffer_head *bh, int flags)
520 {
521 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
522 return -EIO;
523
524 if (flags & DIO_FORCE)
525 clear_buffer_uptodate(bh);
526
527 if ((flags & DIO_START) && !buffer_uptodate(bh))
528 ll_rw_block(READ, 1, &bh);
529
530 if (flags & DIO_WAIT) {
531 wait_on_buffer(bh);
532
533 if (!buffer_uptodate(bh)) {
534 struct gfs2_trans *tr = current->journal_info;
535 if (tr && tr->tr_touched)
536 gfs2_io_error_bh(sdp, bh);
537 return -EIO;
538 }
539 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
540 return -EIO;
541 }
542
543 return 0;
544 }
545
546 /**
547 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
548 * @gl: the glock the buffer belongs to
549 * @bh: The buffer to be attached to
550 * @meta: Flag to indicate whether its metadata or not
551 */
552
553 void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
554 int meta)
555 {
556 struct gfs2_bufdata *bd;
557
558 if (meta)
559 lock_page(bh->b_page);
560
561 if (bh->b_private) {
562 if (meta)
563 unlock_page(bh->b_page);
564 return;
565 }
566
567 bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
568 memset(bd, 0, sizeof(struct gfs2_bufdata));
569
570 bd->bd_bh = bh;
571 bd->bd_gl = gl;
572
573 INIT_LIST_HEAD(&bd->bd_list_tr);
574 if (meta) {
575 lops_init_le(&bd->bd_le, &gfs2_buf_lops);
576 } else {
577 lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
578 get_bh(bh);
579 }
580 bh->b_private = bd;
581
582 if (meta)
583 unlock_page(bh->b_page);
584 }
585
586 /**
587 * gfs2_pin - Pin a buffer in memory
588 * @sdp: the filesystem the buffer belongs to
589 * @bh: The buffer to be pinned
590 *
591 */
592
593 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
594 {
595 struct gfs2_bufdata *bd = bh->b_private;
596
597 gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
598
599 if (test_set_buffer_pinned(bh))
600 gfs2_assert_withdraw(sdp, 0);
601
602 wait_on_buffer(bh);
603
604 /* If this buffer is in the AIL and it has already been written
605 to in-place disk block, remove it from the AIL. */
606
607 gfs2_log_lock(sdp);
608 if (bd->bd_ail && !buffer_in_io(bh))
609 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
610 gfs2_log_unlock(sdp);
611
612 clear_buffer_dirty(bh);
613 wait_on_buffer(bh);
614
615 if (!buffer_uptodate(bh))
616 gfs2_io_error_bh(sdp, bh);
617
618 get_bh(bh);
619 }
620
621 /**
622 * gfs2_unpin - Unpin a buffer
623 * @sdp: the filesystem the buffer belongs to
624 * @bh: The buffer to unpin
625 * @ai:
626 *
627 */
628
629 void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
630 struct gfs2_ail *ai)
631 {
632 struct gfs2_bufdata *bd = bh->b_private;
633
634 gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
635
636 if (!buffer_pinned(bh))
637 gfs2_assert_withdraw(sdp, 0);
638
639 mark_buffer_dirty(bh);
640 clear_buffer_pinned(bh);
641
642 gfs2_log_lock(sdp);
643 if (bd->bd_ail) {
644 list_del(&bd->bd_ail_st_list);
645 brelse(bh);
646 } else {
647 struct gfs2_glock *gl = bd->bd_gl;
648 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
649 atomic_inc(&gl->gl_ail_count);
650 }
651 bd->bd_ail = ai;
652 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
653 gfs2_log_unlock(sdp);
654 }
655
656 /**
657 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
658 * @ip: the inode who owns the buffers
659 * @bstart: the first buffer in the run
660 * @blen: the number of buffers in the run
661 *
662 */
663
664 void gfs2_meta_wipe(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
665 {
666 struct gfs2_sbd *sdp = ip->i_sbd;
667 struct inode *aspace = ip->i_gl->gl_aspace;
668 struct buffer_head *bh;
669
670 while (blen) {
671 bh = getbuf(sdp, aspace, bstart, NO_CREATE);
672 if (bh) {
673 struct gfs2_bufdata *bd = bh->b_private;
674
675 if (test_clear_buffer_pinned(bh)) {
676 struct gfs2_trans *tr = current->journal_info;
677 gfs2_log_lock(sdp);
678 list_del_init(&bd->bd_le.le_list);
679 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
680 sdp->sd_log_num_buf--;
681 gfs2_log_unlock(sdp);
682 tr->tr_num_buf_rm++;
683 brelse(bh);
684 }
685 if (bd) {
686 gfs2_log_lock(sdp);
687 if (bd->bd_ail) {
688 uint64_t blkno = bh->b_blocknr;
689 bd->bd_ail = NULL;
690 list_del(&bd->bd_ail_st_list);
691 list_del(&bd->bd_ail_gl_list);
692 atomic_dec(&bd->bd_gl->gl_ail_count);
693 brelse(bh);
694 gfs2_log_unlock(sdp);
695 gfs2_trans_add_revoke(sdp, blkno);
696 } else
697 gfs2_log_unlock(sdp);
698 }
699
700 lock_buffer(bh);
701 clear_buffer_dirty(bh);
702 clear_buffer_uptodate(bh);
703 unlock_buffer(bh);
704
705 brelse(bh);
706 }
707
708 bstart++;
709 blen--;
710 }
711 }
712
713 /**
714 * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
715 * @ip: The GFS2 inode
716 *
717 * This releases buffers that are in the most-recently-used array of
718 * blocks used for indirect block addressing for this inode.
719 */
720
721 void gfs2_meta_cache_flush(struct gfs2_inode *ip)
722 {
723 struct buffer_head **bh_slot;
724 unsigned int x;
725
726 spin_lock(&ip->i_spin);
727
728 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
729 bh_slot = &ip->i_cache[x];
730 if (!*bh_slot)
731 break;
732 brelse(*bh_slot);
733 *bh_slot = NULL;
734 }
735
736 spin_unlock(&ip->i_spin);
737 }
738
739 /**
740 * gfs2_meta_indirect_buffer - Get a metadata buffer
741 * @ip: The GFS2 inode
742 * @height: The level of this buf in the metadata (indir addr) tree (if any)
743 * @num: The block number (device relative) of the buffer
744 * @new: Non-zero if we may create a new buffer
745 * @bhp: the buffer is returned here
746 *
747 * Try to use the gfs2_inode's MRU metadata tree cache.
748 *
749 * Returns: errno
750 */
751
752 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, uint64_t num,
753 int new, struct buffer_head **bhp)
754 {
755 struct buffer_head *bh, **bh_slot = ip->i_cache + height;
756 int error;
757
758 spin_lock(&ip->i_spin);
759 bh = *bh_slot;
760 if (bh) {
761 if (bh->b_blocknr == num)
762 get_bh(bh);
763 else
764 bh = NULL;
765 }
766 spin_unlock(&ip->i_spin);
767
768 if (bh) {
769 if (new)
770 meta_prep_new(bh);
771 else {
772 error = gfs2_meta_reread(ip->i_sbd, bh,
773 DIO_START | DIO_WAIT);
774 if (error) {
775 brelse(bh);
776 return error;
777 }
778 }
779 } else {
780 if (new)
781 bh = gfs2_meta_new(ip->i_gl, num);
782 else {
783 error = gfs2_meta_read(ip->i_gl, num,
784 DIO_START | DIO_WAIT, &bh);
785 if (error)
786 return error;
787 }
788
789 spin_lock(&ip->i_spin);
790 if (*bh_slot != bh) {
791 brelse(*bh_slot);
792 *bh_slot = bh;
793 get_bh(bh);
794 }
795 spin_unlock(&ip->i_spin);
796 }
797
798 if (new) {
799 if (gfs2_assert_warn(ip->i_sbd, height)) {
800 brelse(bh);
801 return -EIO;
802 }
803 gfs2_trans_add_bh(ip->i_gl, bh, 1);
804 gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
805 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
806
807 } else if (gfs2_metatype_check(ip->i_sbd, bh,
808 (height) ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)) {
809 brelse(bh);
810 return -EIO;
811 }
812
813 *bhp = bh;
814
815 return 0;
816 }
817
818 /**
819 * gfs2_meta_ra - start readahead on an extent of a file
820 * @gl: the glock the blocks belong to
821 * @dblock: the starting disk block
822 * @extlen: the number of blocks in the extent
823 *
824 */
825
826 void gfs2_meta_ra(struct gfs2_glock *gl, uint64_t dblock, uint32_t extlen)
827 {
828 struct gfs2_sbd *sdp = gl->gl_sbd;
829 struct inode *aspace = gl->gl_aspace;
830 struct buffer_head *first_bh, *bh;
831 uint32_t max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
832 sdp->sd_sb.sb_bsize_shift;
833 int error;
834
835 if (!extlen || !max_ra)
836 return;
837 if (extlen > max_ra)
838 extlen = max_ra;
839
840 first_bh = getbuf(sdp, aspace, dblock, CREATE);
841
842 if (buffer_uptodate(first_bh))
843 goto out;
844 if (!buffer_locked(first_bh)) {
845 error = gfs2_meta_reread(sdp, first_bh, DIO_START);
846 if (error)
847 goto out;
848 }
849
850 dblock++;
851 extlen--;
852
853 while (extlen) {
854 bh = getbuf(sdp, aspace, dblock, CREATE);
855
856 if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
857 error = gfs2_meta_reread(sdp, bh, DIO_START);
858 brelse(bh);
859 if (error)
860 goto out;
861 } else
862 brelse(bh);
863
864 dblock++;
865 extlen--;
866
867 if (buffer_uptodate(first_bh))
868 break;
869 }
870
871 out:
872 brelse(first_bh);
873 }
874
875 /**
876 * gfs2_meta_syncfs - sync all the buffers in a filesystem
877 * @sdp: the filesystem
878 *
879 */
880
881 void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
882 {
883 gfs2_log_flush(sdp, NULL);
884 for (;;) {
885 gfs2_ail1_start(sdp, DIO_ALL);
886 if (gfs2_ail1_empty(sdp, DIO_ALL))
887 break;
888 msleep(10);
889 }
890 }
891