2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/crc32.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
20 #include "lm_interface.h"
38 * gfs2_tune_init - Fill a gfs2_tune structure with default values
43 void gfs2_tune_init(struct gfs2_tune
*gt
)
45 spin_lock_init(>
->gt_spin
);
48 gt
->gt_ilimit_tries
= 3;
49 gt
->gt_ilimit_min
= 1;
50 gt
->gt_demote_secs
= 300;
51 gt
->gt_incore_log_blocks
= 1024;
52 gt
->gt_log_flush_secs
= 60;
53 gt
->gt_jindex_refresh_secs
= 60;
54 gt
->gt_scand_secs
= 15;
55 gt
->gt_recoverd_secs
= 60;
57 gt
->gt_quotad_secs
= 5;
58 gt
->gt_quota_simul_sync
= 64;
59 gt
->gt_quota_warn_period
= 10;
60 gt
->gt_quota_scale_num
= 1;
61 gt
->gt_quota_scale_den
= 1;
62 gt
->gt_quota_cache_secs
= 300;
63 gt
->gt_quota_quantum
= 60;
64 gt
->gt_atime_quantum
= 3600;
65 gt
->gt_new_files_jdata
= 0;
66 gt
->gt_new_files_directio
= 0;
67 gt
->gt_max_atomic_write
= 4 << 20;
68 gt
->gt_max_readahead
= 1 << 18;
69 gt
->gt_lockdump_size
= 131072;
70 gt
->gt_stall_secs
= 600;
71 gt
->gt_complain_secs
= 10;
72 gt
->gt_reclaim_limit
= 5000;
73 gt
->gt_entries_per_readdir
= 32;
74 gt
->gt_prefetch_secs
= 10;
75 gt
->gt_greedy_default
= HZ
/ 10;
76 gt
->gt_greedy_quantum
= HZ
/ 40;
77 gt
->gt_greedy_max
= HZ
/ 4;
78 gt
->gt_statfs_quantum
= 30;
79 gt
->gt_statfs_slow
= 0;
83 * gfs2_check_sb - Check superblock
84 * @sdp: the filesystem
86 * @silent: Don't print a message if the check fails
88 * Checks the version code of the FS is one that we understand how to
89 * read and that the sizes of the various on-disk structures have not
93 int gfs2_check_sb(struct gfs2_sbd
*sdp
, struct gfs2_sb
*sb
, int silent
)
97 if (sb
->sb_header
.mh_magic
!= GFS2_MAGIC
||
98 sb
->sb_header
.mh_type
!= GFS2_METATYPE_SB
) {
100 printk(KERN_WARNING
"GFS2: not a GFS2 filesystem\n");
104 /* If format numbers match exactly, we're done. */
106 if (sb
->sb_fs_format
== GFS2_FORMAT_FS
&&
107 sb
->sb_multihost_format
== GFS2_FORMAT_MULTI
)
110 if (sb
->sb_fs_format
!= GFS2_FORMAT_FS
) {
111 for (x
= 0; gfs2_old_fs_formats
[x
]; x
++)
112 if (gfs2_old_fs_formats
[x
] == sb
->sb_fs_format
)
115 if (!gfs2_old_fs_formats
[x
]) {
117 "GFS2: code version (%u, %u) is incompatible "
118 "with ondisk format (%u, %u)\n",
119 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
120 sb
->sb_fs_format
, sb
->sb_multihost_format
);
122 "GFS2: I don't know how to upgrade this FS\n");
127 if (sb
->sb_multihost_format
!= GFS2_FORMAT_MULTI
) {
128 for (x
= 0; gfs2_old_multihost_formats
[x
]; x
++)
129 if (gfs2_old_multihost_formats
[x
] ==
130 sb
->sb_multihost_format
)
133 if (!gfs2_old_multihost_formats
[x
]) {
135 "GFS2: code version (%u, %u) is incompatible "
136 "with ondisk format (%u, %u)\n",
137 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
138 sb
->sb_fs_format
, sb
->sb_multihost_format
);
140 "GFS2: I don't know how to upgrade this FS\n");
145 if (!sdp
->sd_args
.ar_upgrade
) {
147 "GFS2: code version (%u, %u) is incompatible "
148 "with ondisk format (%u, %u)\n",
149 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
150 sb
->sb_fs_format
, sb
->sb_multihost_format
);
152 "GFS2: Use the \"upgrade\" mount option to upgrade "
154 printk(KERN_INFO
"GFS2: See the manual for more details\n");
162 static int end_bio_io_page(struct bio
*bio
, unsigned int bytes_done
, int error
)
164 struct page
*page
= bio
->bi_private
;
169 SetPageUptodate(page
);
171 printk(KERN_WARNING
"gfs2: error %d reading superblock\n", error
);
176 static struct page
*gfs2_read_super(struct super_block
*sb
, sector_t sector
)
181 page
= alloc_page(GFP_KERNEL
);
185 ClearPageUptodate(page
);
186 ClearPageDirty(page
);
189 bio
= bio_alloc(GFP_KERNEL
, 1);
190 if (unlikely(!bio
)) {
195 bio
->bi_sector
= sector
;
196 bio
->bi_bdev
= sb
->s_bdev
;
197 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
199 bio
->bi_end_io
= end_bio_io_page
;
200 bio
->bi_private
= page
;
201 submit_bio(READ_SYNC
, bio
);
202 wait_on_page_locked(page
);
204 if (!PageUptodate(page
)) {
212 * gfs2_read_sb - Read super block
213 * @sdp: The GFS2 superblock
214 * @gl: the glock for the superblock (assumed to be held)
215 * @silent: Don't print message if mount fails
219 int gfs2_read_sb(struct gfs2_sbd
*sdp
, struct gfs2_glock
*gl
, int silent
)
221 uint32_t hash_blocks
, ind_blocks
, leaf_blocks
;
228 page
= gfs2_read_super(sdp
->sd_vfs
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
);
231 fs_err(sdp
, "can't read superblock\n");
235 gfs2_sb_in(&sdp
->sd_sb
, sb
);
239 error
= gfs2_check_sb(sdp
, &sdp
->sd_sb
, silent
);
243 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
244 GFS2_BASIC_BLOCK_SHIFT
;
245 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
246 sdp
->sd_diptrs
= (sdp
->sd_sb
.sb_bsize
-
247 sizeof(struct gfs2_dinode
)) / sizeof(uint64_t);
248 sdp
->sd_inptrs
= (sdp
->sd_sb
.sb_bsize
-
249 sizeof(struct gfs2_meta_header
)) / sizeof(uint64_t);
250 sdp
->sd_jbsize
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
);
251 sdp
->sd_hash_bsize
= sdp
->sd_sb
.sb_bsize
/ 2;
252 sdp
->sd_hash_bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
- 1;
253 sdp
->sd_hash_ptrs
= sdp
->sd_hash_bsize
/ sizeof(uint64_t);
254 sdp
->sd_qc_per_block
= (sdp
->sd_sb
.sb_bsize
-
255 sizeof(struct gfs2_meta_header
)) /
256 sizeof(struct gfs2_quota_change
);
258 /* Compute maximum reservation required to add a entry to a directory */
260 hash_blocks
= DIV_ROUND_UP(sizeof(uint64_t) * (1 << GFS2_DIR_MAX_DEPTH
),
264 for (tmp_blocks
= hash_blocks
; tmp_blocks
> sdp
->sd_diptrs
;) {
265 tmp_blocks
= DIV_ROUND_UP(tmp_blocks
, sdp
->sd_inptrs
);
266 ind_blocks
+= tmp_blocks
;
269 leaf_blocks
= 2 + GFS2_DIR_MAX_DEPTH
;
271 sdp
->sd_max_dirres
= hash_blocks
+ ind_blocks
+ leaf_blocks
;
273 sdp
->sd_heightsize
[0] = sdp
->sd_sb
.sb_bsize
-
274 sizeof(struct gfs2_dinode
);
275 sdp
->sd_heightsize
[1] = sdp
->sd_sb
.sb_bsize
* sdp
->sd_diptrs
;
280 space
= sdp
->sd_heightsize
[x
- 1] * sdp
->sd_inptrs
;
282 m
= do_div(d
, sdp
->sd_inptrs
);
284 if (d
!= sdp
->sd_heightsize
[x
- 1] || m
)
286 sdp
->sd_heightsize
[x
] = space
;
288 sdp
->sd_max_height
= x
;
289 gfs2_assert(sdp
, sdp
->sd_max_height
<= GFS2_MAX_META_HEIGHT
);
291 sdp
->sd_jheightsize
[0] = sdp
->sd_sb
.sb_bsize
-
292 sizeof(struct gfs2_dinode
);
293 sdp
->sd_jheightsize
[1] = sdp
->sd_jbsize
* sdp
->sd_diptrs
;
298 space
= sdp
->sd_jheightsize
[x
- 1] * sdp
->sd_inptrs
;
300 m
= do_div(d
, sdp
->sd_inptrs
);
302 if (d
!= sdp
->sd_jheightsize
[x
- 1] || m
)
304 sdp
->sd_jheightsize
[x
] = space
;
306 sdp
->sd_max_jheight
= x
;
307 gfs2_assert(sdp
, sdp
->sd_max_jheight
<= GFS2_MAX_META_HEIGHT
);
313 * gfs2_jindex_hold - Grab a lock on the jindex
314 * @sdp: The GFS2 superblock
315 * @ji_gh: the holder for the jindex glock
317 * This is very similar to the gfs2_rindex_hold() function, except that
318 * in general we hold the jindex lock for longer periods of time and
319 * we grab it far less frequently (in general) then the rgrp lock.
324 int gfs2_jindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ji_gh
)
326 struct gfs2_inode
*dip
= GFS2_I(sdp
->sd_jindex
);
329 struct gfs2_jdesc
*jd
;
334 mutex_lock(&sdp
->sd_jindex_mutex
);
337 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
,
338 GL_LOCAL_EXCL
, ji_gh
);
342 name
.len
= sprintf(buf
, "journal%u", sdp
->sd_journals
);
343 name
.hash
= gfs2_disk_hash(name
.name
, name
.len
);
345 error
= gfs2_dir_search(sdp
->sd_jindex
, &name
, NULL
, NULL
);
346 if (error
== -ENOENT
) {
351 gfs2_glock_dq_uninit(ji_gh
);
357 jd
= kzalloc(sizeof(struct gfs2_jdesc
), GFP_KERNEL
);
361 jd
->jd_inode
= gfs2_lookupi(sdp
->sd_jindex
, &name
, 1, NULL
);
362 if (!jd
->jd_inode
|| IS_ERR(jd
->jd_inode
)) {
366 error
= PTR_ERR(jd
->jd_inode
);
371 spin_lock(&sdp
->sd_jindex_spin
);
372 jd
->jd_jid
= sdp
->sd_journals
++;
373 list_add_tail(&jd
->jd_list
, &sdp
->sd_jindex_list
);
374 spin_unlock(&sdp
->sd_jindex_spin
);
377 mutex_unlock(&sdp
->sd_jindex_mutex
);
383 * gfs2_jindex_free - Clear all the journal index information
384 * @sdp: The GFS2 superblock
388 void gfs2_jindex_free(struct gfs2_sbd
*sdp
)
390 struct list_head list
;
391 struct gfs2_jdesc
*jd
;
393 spin_lock(&sdp
->sd_jindex_spin
);
394 list_add(&list
, &sdp
->sd_jindex_list
);
395 list_del_init(&sdp
->sd_jindex_list
);
396 sdp
->sd_journals
= 0;
397 spin_unlock(&sdp
->sd_jindex_spin
);
399 while (!list_empty(&list
)) {
400 jd
= list_entry(list
.next
, struct gfs2_jdesc
, jd_list
);
401 list_del(&jd
->jd_list
);
407 static struct gfs2_jdesc
*jdesc_find_i(struct list_head
*head
, unsigned int jid
)
409 struct gfs2_jdesc
*jd
;
412 list_for_each_entry(jd
, head
, jd_list
) {
413 if (jd
->jd_jid
== jid
) {
425 struct gfs2_jdesc
*gfs2_jdesc_find(struct gfs2_sbd
*sdp
, unsigned int jid
)
427 struct gfs2_jdesc
*jd
;
429 spin_lock(&sdp
->sd_jindex_spin
);
430 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
431 spin_unlock(&sdp
->sd_jindex_spin
);
436 void gfs2_jdesc_make_dirty(struct gfs2_sbd
*sdp
, unsigned int jid
)
438 struct gfs2_jdesc
*jd
;
440 spin_lock(&sdp
->sd_jindex_spin
);
441 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
444 spin_unlock(&sdp
->sd_jindex_spin
);
447 struct gfs2_jdesc
*gfs2_jdesc_find_dirty(struct gfs2_sbd
*sdp
)
449 struct gfs2_jdesc
*jd
;
452 spin_lock(&sdp
->sd_jindex_spin
);
454 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
461 spin_unlock(&sdp
->sd_jindex_spin
);
469 int gfs2_jdesc_check(struct gfs2_jdesc
*jd
)
471 struct gfs2_inode
*ip
= GFS2_I(jd
->jd_inode
);
472 struct gfs2_sbd
*sdp
= GFS2_SB(jd
->jd_inode
);
476 if (ip
->i_di
.di_size
< (8 << 20) || ip
->i_di
.di_size
> (1 << 30) ||
477 (ip
->i_di
.di_size
& (sdp
->sd_sb
.sb_bsize
- 1))) {
478 gfs2_consist_inode(ip
);
481 jd
->jd_blocks
= ip
->i_di
.di_size
>> sdp
->sd_sb
.sb_bsize_shift
;
483 error
= gfs2_write_alloc_required(ip
, 0, ip
->i_di
.di_size
, &ar
);
485 gfs2_consist_inode(ip
);
493 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
494 * @sdp: the filesystem
499 int gfs2_make_fs_rw(struct gfs2_sbd
*sdp
)
501 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
502 struct gfs2_glock
*j_gl
= ip
->i_gl
;
503 struct gfs2_holder t_gh
;
504 struct gfs2_log_header head
;
507 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
,
508 GL_LOCAL_EXCL
, &t_gh
);
512 gfs2_meta_cache_flush(ip
);
513 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
| DIO_DATA
);
515 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
519 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
525 /* Initialize some head of the log stuff */
526 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
527 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
529 error
= gfs2_quota_init(sdp
);
533 set_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
535 gfs2_glock_dq_uninit(&t_gh
);
542 t_gh
.gh_flags
|= GL_NOCACHE
;
543 gfs2_glock_dq_uninit(&t_gh
);
549 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
550 * @sdp: the filesystem
555 int gfs2_make_fs_ro(struct gfs2_sbd
*sdp
)
557 struct gfs2_holder t_gh
;
560 gfs2_quota_sync(sdp
);
561 gfs2_statfs_sync(sdp
);
563 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
,
564 GL_LOCAL_EXCL
| GL_NOCACHE
,
566 if (error
&& !test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
569 gfs2_meta_syncfs(sdp
);
570 gfs2_log_shutdown(sdp
);
572 clear_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
575 gfs2_glock_dq_uninit(&t_gh
);
577 gfs2_quota_cleanup(sdp
);
582 int gfs2_statfs_init(struct gfs2_sbd
*sdp
)
584 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
585 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
586 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
587 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
588 struct buffer_head
*m_bh
, *l_bh
;
589 struct gfs2_holder gh
;
592 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
597 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
601 if (sdp
->sd_args
.ar_spectator
) {
602 spin_lock(&sdp
->sd_statfs_spin
);
603 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
604 sizeof(struct gfs2_dinode
));
605 spin_unlock(&sdp
->sd_statfs_spin
);
607 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
611 spin_lock(&sdp
->sd_statfs_spin
);
612 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
613 sizeof(struct gfs2_dinode
));
614 gfs2_statfs_change_in(l_sc
, l_bh
->b_data
+
615 sizeof(struct gfs2_dinode
));
616 spin_unlock(&sdp
->sd_statfs_spin
);
625 gfs2_glock_dq_uninit(&gh
);
630 void gfs2_statfs_change(struct gfs2_sbd
*sdp
, int64_t total
, int64_t free
,
633 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
634 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
635 struct buffer_head
*l_bh
;
638 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
642 mutex_lock(&sdp
->sd_statfs_mutex
);
643 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
, 1);
644 mutex_unlock(&sdp
->sd_statfs_mutex
);
646 spin_lock(&sdp
->sd_statfs_spin
);
647 l_sc
->sc_total
+= total
;
648 l_sc
->sc_free
+= free
;
649 l_sc
->sc_dinodes
+= dinodes
;
650 gfs2_statfs_change_out(l_sc
, l_bh
->b_data
+
651 sizeof(struct gfs2_dinode
));
652 spin_unlock(&sdp
->sd_statfs_spin
);
657 int gfs2_statfs_sync(struct gfs2_sbd
*sdp
)
659 struct gfs2_inode
*m_ip
= GFS2_I(sdp
->sd_statfs_inode
);
660 struct gfs2_inode
*l_ip
= GFS2_I(sdp
->sd_sc_inode
);
661 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
662 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
663 struct gfs2_holder gh
;
664 struct buffer_head
*m_bh
, *l_bh
;
667 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
672 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
676 spin_lock(&sdp
->sd_statfs_spin
);
677 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
678 sizeof(struct gfs2_dinode
));
679 if (!l_sc
->sc_total
&& !l_sc
->sc_free
&& !l_sc
->sc_dinodes
) {
680 spin_unlock(&sdp
->sd_statfs_spin
);
683 spin_unlock(&sdp
->sd_statfs_spin
);
685 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
689 error
= gfs2_trans_begin(sdp
, 2 * RES_DINODE
, 0);
693 mutex_lock(&sdp
->sd_statfs_mutex
);
694 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
, 1);
695 mutex_unlock(&sdp
->sd_statfs_mutex
);
697 spin_lock(&sdp
->sd_statfs_spin
);
698 m_sc
->sc_total
+= l_sc
->sc_total
;
699 m_sc
->sc_free
+= l_sc
->sc_free
;
700 m_sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
701 memset(l_sc
, 0, sizeof(struct gfs2_statfs_change
));
702 memset(l_bh
->b_data
+ sizeof(struct gfs2_dinode
),
703 0, sizeof(struct gfs2_statfs_change
));
704 spin_unlock(&sdp
->sd_statfs_spin
);
706 gfs2_trans_add_bh(m_ip
->i_gl
, m_bh
, 1);
707 gfs2_statfs_change_out(m_sc
, m_bh
->b_data
+ sizeof(struct gfs2_dinode
));
718 gfs2_glock_dq_uninit(&gh
);
724 * gfs2_statfs_i - Do a statfs
725 * @sdp: the filesystem
726 * @sg: the sg structure
731 int gfs2_statfs_i(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change
*sc
)
733 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
734 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
736 spin_lock(&sdp
->sd_statfs_spin
);
739 sc
->sc_total
+= l_sc
->sc_total
;
740 sc
->sc_free
+= l_sc
->sc_free
;
741 sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
743 spin_unlock(&sdp
->sd_statfs_spin
);
747 if (sc
->sc_free
> sc
->sc_total
)
748 sc
->sc_free
= sc
->sc_total
;
749 if (sc
->sc_dinodes
< 0)
756 * statfs_fill - fill in the sg for a given RG
758 * @sc: the sc structure
760 * Returns: 0 on success, -ESTALE if the LVB is invalid
763 static int statfs_slow_fill(struct gfs2_rgrpd
*rgd
,
764 struct gfs2_statfs_change
*sc
)
766 gfs2_rgrp_verify(rgd
);
767 sc
->sc_total
+= rgd
->rd_ri
.ri_data
;
768 sc
->sc_free
+= rgd
->rd_rg
.rg_free
;
769 sc
->sc_dinodes
+= rgd
->rd_rg
.rg_dinodes
;
774 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
775 * @sdp: the filesystem
776 * @sc: the sc info that will be returned
778 * Any error (other than a signal) will cause this routine to fall back
779 * to the synchronous version.
781 * FIXME: This really shouldn't busy wait like this.
786 int gfs2_statfs_slow(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change
*sc
)
788 struct gfs2_holder ri_gh
;
789 struct gfs2_rgrpd
*rgd_next
;
790 struct gfs2_holder
*gha
, *gh
;
791 unsigned int slots
= 64;
796 memset(sc
, 0, sizeof(struct gfs2_statfs_change
));
797 gha
= kcalloc(slots
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
801 error
= gfs2_rindex_hold(sdp
, &ri_gh
);
805 rgd_next
= gfs2_rgrpd_get_first(sdp
);
810 for (x
= 0; x
< slots
; x
++) {
813 if (gh
->gh_gl
&& gfs2_glock_poll(gh
)) {
814 err
= gfs2_glock_wait(gh
);
816 gfs2_holder_uninit(gh
);
820 error
= statfs_slow_fill(
821 gh
->gh_gl
->gl_object
, sc
);
822 gfs2_glock_dq_uninit(gh
);
828 else if (rgd_next
&& !error
) {
829 error
= gfs2_glock_nq_init(rgd_next
->rd_gl
,
833 rgd_next
= gfs2_rgrpd_get_next(rgd_next
);
837 if (signal_pending(current
))
838 error
= -ERESTARTSYS
;
847 gfs2_glock_dq_uninit(&ri_gh
);
856 struct list_head list
;
857 struct gfs2_holder gh
;
861 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
863 * @sdp: the file system
864 * @state: the state to put the transaction lock into
865 * @t_gh: the hold on the transaction lock
870 static int gfs2_lock_fs_check_clean(struct gfs2_sbd
*sdp
,
871 struct gfs2_holder
*t_gh
)
873 struct gfs2_inode
*ip
;
874 struct gfs2_holder ji_gh
;
875 struct gfs2_jdesc
*jd
;
878 struct gfs2_log_header lh
;
881 error
= gfs2_jindex_hold(sdp
, &ji_gh
);
885 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
886 lfcc
= kmalloc(sizeof(struct lfcc
), GFP_KERNEL
);
891 ip
= GFS2_I(jd
->jd_inode
);
892 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &lfcc
->gh
);
897 list_add(&lfcc
->list
, &list
);
900 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_DEFERRED
,
901 LM_FLAG_PRIORITY
| GL_NOCACHE
,
904 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
905 error
= gfs2_jdesc_check(jd
);
908 error
= gfs2_find_jhead(jd
, &lh
);
911 if (!(lh
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
918 gfs2_glock_dq_uninit(t_gh
);
921 while (!list_empty(&list
)) {
922 lfcc
= list_entry(list
.next
, struct lfcc
, list
);
923 list_del(&lfcc
->list
);
924 gfs2_glock_dq_uninit(&lfcc
->gh
);
927 gfs2_glock_dq_uninit(&ji_gh
);
933 * gfs2_freeze_fs - freezes the file system
934 * @sdp: the file system
936 * This function flushes data and meta data for all machines by
937 * aquiring the transaction log exclusively. All journals are
938 * ensured to be in a clean state as well.
943 int gfs2_freeze_fs(struct gfs2_sbd
*sdp
)
947 mutex_lock(&sdp
->sd_freeze_lock
);
949 if (!sdp
->sd_freeze_count
++) {
950 error
= gfs2_lock_fs_check_clean(sdp
, &sdp
->sd_freeze_gh
);
952 sdp
->sd_freeze_count
--;
955 mutex_unlock(&sdp
->sd_freeze_lock
);
961 * gfs2_unfreeze_fs - unfreezes the file system
962 * @sdp: the file system
964 * This function allows the file system to proceed by unlocking
965 * the exclusively held transaction lock. Other GFS2 nodes are
966 * now free to acquire the lock shared and go on with their lives.
970 void gfs2_unfreeze_fs(struct gfs2_sbd
*sdp
)
972 mutex_lock(&sdp
->sd_freeze_lock
);
974 if (sdp
->sd_freeze_count
&& !--sdp
->sd_freeze_count
)
975 gfs2_glock_dq_uninit(&sdp
->sd_freeze_gh
);
977 mutex_unlock(&sdp
->sd_freeze_lock
);