2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/crc32.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/semaphore.h>
20 #include "lm_interface.h"
39 * gfs2_tune_init - Fill a gfs2_tune structure with default values
44 void gfs2_tune_init(struct gfs2_tune
*gt
)
46 spin_lock_init(>
->gt_spin
);
49 gt
->gt_ilimit_tries
= 3;
50 gt
->gt_ilimit_min
= 1;
51 gt
->gt_demote_secs
= 300;
52 gt
->gt_incore_log_blocks
= 1024;
53 gt
->gt_log_flush_secs
= 60;
54 gt
->gt_jindex_refresh_secs
= 60;
55 gt
->gt_scand_secs
= 15;
56 gt
->gt_recoverd_secs
= 60;
58 gt
->gt_quotad_secs
= 5;
59 gt
->gt_inoded_secs
= 15;
60 gt
->gt_quota_simul_sync
= 64;
61 gt
->gt_quota_warn_period
= 10;
62 gt
->gt_quota_scale_num
= 1;
63 gt
->gt_quota_scale_den
= 1;
64 gt
->gt_quota_cache_secs
= 300;
65 gt
->gt_quota_quantum
= 60;
66 gt
->gt_atime_quantum
= 3600;
67 gt
->gt_new_files_jdata
= 0;
68 gt
->gt_new_files_directio
= 0;
69 gt
->gt_max_atomic_write
= 4 << 20;
70 gt
->gt_max_readahead
= 1 << 18;
71 gt
->gt_lockdump_size
= 131072;
72 gt
->gt_stall_secs
= 600;
73 gt
->gt_complain_secs
= 10;
74 gt
->gt_reclaim_limit
= 5000;
75 gt
->gt_entries_per_readdir
= 32;
76 gt
->gt_prefetch_secs
= 10;
77 gt
->gt_greedy_default
= HZ
/ 10;
78 gt
->gt_greedy_quantum
= HZ
/ 40;
79 gt
->gt_greedy_max
= HZ
/ 4;
80 gt
->gt_statfs_quantum
= 30;
81 gt
->gt_statfs_slow
= 0;
85 * gfs2_check_sb - Check superblock
86 * @sdp: the filesystem
88 * @silent: Don't print a message if the check fails
90 * Checks the version code of the FS is one that we understand how to
91 * read and that the sizes of the various on-disk structures have not
95 int gfs2_check_sb(struct gfs2_sbd
*sdp
, struct gfs2_sb
*sb
, int silent
)
99 if (sb
->sb_header
.mh_magic
!= GFS2_MAGIC
||
100 sb
->sb_header
.mh_type
!= GFS2_METATYPE_SB
) {
102 printk(KERN_WARNING
"GFS2: not a GFS2 filesystem\n");
106 /* If format numbers match exactly, we're done. */
108 if (sb
->sb_fs_format
== GFS2_FORMAT_FS
&&
109 sb
->sb_multihost_format
== GFS2_FORMAT_MULTI
)
112 if (sb
->sb_fs_format
!= GFS2_FORMAT_FS
) {
113 for (x
= 0; gfs2_old_fs_formats
[x
]; x
++)
114 if (gfs2_old_fs_formats
[x
] == sb
->sb_fs_format
)
117 if (!gfs2_old_fs_formats
[x
]) {
119 "GFS2: code version (%u, %u) is incompatible "
120 "with ondisk format (%u, %u)\n",
121 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
122 sb
->sb_fs_format
, sb
->sb_multihost_format
);
124 "GFS2: I don't know how to upgrade this FS\n");
129 if (sb
->sb_multihost_format
!= GFS2_FORMAT_MULTI
) {
130 for (x
= 0; gfs2_old_multihost_formats
[x
]; x
++)
131 if (gfs2_old_multihost_formats
[x
] ==
132 sb
->sb_multihost_format
)
135 if (!gfs2_old_multihost_formats
[x
]) {
137 "GFS2: code version (%u, %u) is incompatible "
138 "with ondisk format (%u, %u)\n",
139 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
140 sb
->sb_fs_format
, sb
->sb_multihost_format
);
142 "GFS2: I don't know how to upgrade this FS\n");
147 if (!sdp
->sd_args
.ar_upgrade
) {
149 "GFS2: code version (%u, %u) is incompatible "
150 "with ondisk format (%u, %u)\n",
151 GFS2_FORMAT_FS
, GFS2_FORMAT_MULTI
,
152 sb
->sb_fs_format
, sb
->sb_multihost_format
);
154 "GFS2: Use the \"upgrade\" mount option to upgrade "
156 printk(KERN_INFO
"GFS2: See the manual for more details\n");
164 * gfs2_read_sb - Read super block
165 * @sdp: The GFS2 superblock
166 * @gl: the glock for the superblock (assumed to be held)
167 * @silent: Don't print message if mount fails
171 int gfs2_read_sb(struct gfs2_sbd
*sdp
, struct gfs2_glock
*gl
, int silent
)
173 struct buffer_head
*bh
;
174 uint32_t hash_blocks
, ind_blocks
, leaf_blocks
;
179 error
= gfs2_meta_read(gl
, GFS2_SB_ADDR
>> sdp
->sd_fsb2bb_shift
,
180 DIO_FORCE
| DIO_START
| DIO_WAIT
, &bh
);
183 fs_err(sdp
, "can't read superblock\n");
187 gfs2_assert(sdp
, sizeof(struct gfs2_sb
) <= bh
->b_size
);
188 gfs2_sb_in(&sdp
->sd_sb
, bh
->b_data
);
191 error
= gfs2_check_sb(sdp
, &sdp
->sd_sb
, silent
);
195 sdp
->sd_fsb2bb_shift
= sdp
->sd_sb
.sb_bsize_shift
-
196 GFS2_BASIC_BLOCK_SHIFT
;
197 sdp
->sd_fsb2bb
= 1 << sdp
->sd_fsb2bb_shift
;
198 sdp
->sd_diptrs
= (sdp
->sd_sb
.sb_bsize
-
199 sizeof(struct gfs2_dinode
)) / sizeof(uint64_t);
200 sdp
->sd_inptrs
= (sdp
->sd_sb
.sb_bsize
-
201 sizeof(struct gfs2_meta_header
)) / sizeof(uint64_t);
202 sdp
->sd_jbsize
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
);
203 sdp
->sd_hash_bsize
= sdp
->sd_sb
.sb_bsize
/ 2;
204 sdp
->sd_hash_bsize_shift
= sdp
->sd_sb
.sb_bsize_shift
- 1;
205 sdp
->sd_hash_ptrs
= sdp
->sd_hash_bsize
/ sizeof(uint64_t);
206 sdp
->sd_ut_per_block
= (sdp
->sd_sb
.sb_bsize
-
207 sizeof(struct gfs2_meta_header
)) /
208 sizeof(struct gfs2_unlinked_tag
);
209 sdp
->sd_qc_per_block
= (sdp
->sd_sb
.sb_bsize
-
210 sizeof(struct gfs2_meta_header
)) /
211 sizeof(struct gfs2_quota_change
);
213 /* Compute maximum reservation required to add a entry to a directory */
215 hash_blocks
= DIV_ROUND_UP(sizeof(uint64_t) * (1 << GFS2_DIR_MAX_DEPTH
),
219 for (tmp_blocks
= hash_blocks
; tmp_blocks
> sdp
->sd_diptrs
;) {
220 tmp_blocks
= DIV_ROUND_UP(tmp_blocks
, sdp
->sd_inptrs
);
221 ind_blocks
+= tmp_blocks
;
224 leaf_blocks
= 2 + GFS2_DIR_MAX_DEPTH
;
226 sdp
->sd_max_dirres
= hash_blocks
+ ind_blocks
+ leaf_blocks
;
228 sdp
->sd_heightsize
[0] = sdp
->sd_sb
.sb_bsize
-
229 sizeof(struct gfs2_dinode
);
230 sdp
->sd_heightsize
[1] = sdp
->sd_sb
.sb_bsize
* sdp
->sd_diptrs
;
235 space
= sdp
->sd_heightsize
[x
- 1] * sdp
->sd_inptrs
;
237 m
= do_div(d
, sdp
->sd_inptrs
);
239 if (d
!= sdp
->sd_heightsize
[x
- 1] || m
)
241 sdp
->sd_heightsize
[x
] = space
;
243 sdp
->sd_max_height
= x
;
244 gfs2_assert(sdp
, sdp
->sd_max_height
<= GFS2_MAX_META_HEIGHT
);
246 sdp
->sd_jheightsize
[0] = sdp
->sd_sb
.sb_bsize
-
247 sizeof(struct gfs2_dinode
);
248 sdp
->sd_jheightsize
[1] = sdp
->sd_jbsize
* sdp
->sd_diptrs
;
253 space
= sdp
->sd_jheightsize
[x
- 1] * sdp
->sd_inptrs
;
255 m
= do_div(d
, sdp
->sd_inptrs
);
257 if (d
!= sdp
->sd_jheightsize
[x
- 1] || m
)
259 sdp
->sd_jheightsize
[x
] = space
;
261 sdp
->sd_max_jheight
= x
;
262 gfs2_assert(sdp
, sdp
->sd_max_jheight
<= GFS2_MAX_META_HEIGHT
);
267 int gfs2_do_upgrade(struct gfs2_sbd
*sdp
, struct gfs2_glock
*sb_gl
)
273 * gfs2_jindex_hold - Grab a lock on the jindex
274 * @sdp: The GFS2 superblock
275 * @ji_gh: the holder for the jindex glock
277 * This is very similar to the gfs2_rindex_hold() function, except that
278 * in general we hold the jindex lock for longer periods of time and
279 * we grab it far less frequently (in general) then the rgrp lock.
284 int gfs2_jindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ji_gh
)
286 struct gfs2_inode
*dip
= sdp
->sd_jindex
->u
.generic_ip
;
289 struct gfs2_jdesc
*jd
;
294 mutex_lock(&sdp
->sd_jindex_mutex
);
297 error
= gfs2_glock_nq_init(dip
->i_gl
, LM_ST_SHARED
,
298 GL_LOCAL_EXCL
, ji_gh
);
302 name
.len
= sprintf(buf
, "journal%u", sdp
->sd_journals
);
303 name
.hash
= gfs2_disk_hash(name
.name
, name
.len
);
305 error
= gfs2_dir_search(sdp
->sd_jindex
,
307 if (error
== -ENOENT
) {
312 gfs2_glock_dq_uninit(ji_gh
);
318 jd
= kzalloc(sizeof(struct gfs2_jdesc
), GFP_KERNEL
);
322 jd
->jd_inode
= gfs2_lookupi(sdp
->sd_jindex
, &name
, 1, NULL
);
323 if (!jd
->jd_inode
|| IS_ERR(jd
->jd_inode
)) {
327 error
= PTR_ERR(jd
->jd_inode
);
332 spin_lock(&sdp
->sd_jindex_spin
);
333 jd
->jd_jid
= sdp
->sd_journals
++;
334 list_add_tail(&jd
->jd_list
, &sdp
->sd_jindex_list
);
335 spin_unlock(&sdp
->sd_jindex_spin
);
338 mutex_unlock(&sdp
->sd_jindex_mutex
);
344 * gfs2_jindex_free - Clear all the journal index information
345 * @sdp: The GFS2 superblock
349 void gfs2_jindex_free(struct gfs2_sbd
*sdp
)
351 struct list_head list
;
352 struct gfs2_jdesc
*jd
;
354 spin_lock(&sdp
->sd_jindex_spin
);
355 list_add(&list
, &sdp
->sd_jindex_list
);
356 list_del_init(&sdp
->sd_jindex_list
);
357 sdp
->sd_journals
= 0;
358 spin_unlock(&sdp
->sd_jindex_spin
);
360 while (!list_empty(&list
)) {
361 jd
= list_entry(list
.next
, struct gfs2_jdesc
, jd_list
);
362 list_del(&jd
->jd_list
);
368 static struct gfs2_jdesc
*jdesc_find_i(struct list_head
*head
, unsigned int jid
)
370 struct gfs2_jdesc
*jd
;
373 list_for_each_entry(jd
, head
, jd_list
) {
374 if (jd
->jd_jid
== jid
) {
386 struct gfs2_jdesc
*gfs2_jdesc_find(struct gfs2_sbd
*sdp
, unsigned int jid
)
388 struct gfs2_jdesc
*jd
;
390 spin_lock(&sdp
->sd_jindex_spin
);
391 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
392 spin_unlock(&sdp
->sd_jindex_spin
);
397 void gfs2_jdesc_make_dirty(struct gfs2_sbd
*sdp
, unsigned int jid
)
399 struct gfs2_jdesc
*jd
;
401 spin_lock(&sdp
->sd_jindex_spin
);
402 jd
= jdesc_find_i(&sdp
->sd_jindex_list
, jid
);
405 spin_unlock(&sdp
->sd_jindex_spin
);
408 struct gfs2_jdesc
*gfs2_jdesc_find_dirty(struct gfs2_sbd
*sdp
)
410 struct gfs2_jdesc
*jd
;
413 spin_lock(&sdp
->sd_jindex_spin
);
415 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
422 spin_unlock(&sdp
->sd_jindex_spin
);
430 int gfs2_jdesc_check(struct gfs2_jdesc
*jd
)
432 struct gfs2_inode
*ip
= jd
->jd_inode
->u
.generic_ip
;
433 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
437 if (ip
->i_di
.di_size
< (8 << 20) ||
438 ip
->i_di
.di_size
> (1 << 30) ||
439 (ip
->i_di
.di_size
& (sdp
->sd_sb
.sb_bsize
- 1))) {
440 gfs2_consist_inode(ip
);
443 jd
->jd_blocks
= ip
->i_di
.di_size
>> sdp
->sd_sb
.sb_bsize_shift
;
445 error
= gfs2_write_alloc_required(ip
,
449 gfs2_consist_inode(ip
);
457 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
458 * @sdp: the filesystem
463 int gfs2_make_fs_rw(struct gfs2_sbd
*sdp
)
465 struct gfs2_inode
*ip
= sdp
->sd_jdesc
->jd_inode
->u
.generic_ip
;
466 struct gfs2_glock
*j_gl
= ip
->i_gl
;
467 struct gfs2_holder t_gh
;
468 struct gfs2_log_header head
;
471 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
,
472 GL_LOCAL_EXCL
, &t_gh
);
476 gfs2_meta_cache_flush(ip
);
477 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
| DIO_DATA
);
479 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
483 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
489 /* Initialize some head of the log stuff */
490 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
491 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
493 error
= gfs2_unlinked_init(sdp
);
496 error
= gfs2_quota_init(sdp
);
500 set_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
502 gfs2_glock_dq_uninit(&t_gh
);
507 gfs2_unlinked_cleanup(sdp
);
510 t_gh
.gh_flags
|= GL_NOCACHE
;
511 gfs2_glock_dq_uninit(&t_gh
);
517 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
518 * @sdp: the filesystem
523 int gfs2_make_fs_ro(struct gfs2_sbd
*sdp
)
525 struct gfs2_holder t_gh
;
528 gfs2_unlinked_dealloc(sdp
);
529 gfs2_quota_sync(sdp
);
530 gfs2_statfs_sync(sdp
);
532 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_SHARED
,
533 GL_LOCAL_EXCL
| GL_NOCACHE
,
535 if (error
&& !test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
538 gfs2_meta_syncfs(sdp
);
539 gfs2_log_shutdown(sdp
);
541 clear_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
);
544 gfs2_glock_dq_uninit(&t_gh
);
546 gfs2_unlinked_cleanup(sdp
);
547 gfs2_quota_cleanup(sdp
);
552 int gfs2_statfs_init(struct gfs2_sbd
*sdp
)
554 struct gfs2_inode
*m_ip
= sdp
->sd_statfs_inode
->u
.generic_ip
;
555 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
556 struct gfs2_inode
*l_ip
= sdp
->sd_sc_inode
->u
.generic_ip
;
557 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
558 struct buffer_head
*m_bh
, *l_bh
;
559 struct gfs2_holder gh
;
562 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
567 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
571 if (sdp
->sd_args
.ar_spectator
) {
572 spin_lock(&sdp
->sd_statfs_spin
);
573 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
574 sizeof(struct gfs2_dinode
));
575 spin_unlock(&sdp
->sd_statfs_spin
);
577 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
581 spin_lock(&sdp
->sd_statfs_spin
);
582 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
583 sizeof(struct gfs2_dinode
));
584 gfs2_statfs_change_in(l_sc
, l_bh
->b_data
+
585 sizeof(struct gfs2_dinode
));
586 spin_unlock(&sdp
->sd_statfs_spin
);
595 gfs2_glock_dq_uninit(&gh
);
600 void gfs2_statfs_change(struct gfs2_sbd
*sdp
, int64_t total
, int64_t free
,
603 struct gfs2_inode
*l_ip
= sdp
->sd_sc_inode
->u
.generic_ip
;
604 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
605 struct buffer_head
*l_bh
;
608 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
612 mutex_lock(&sdp
->sd_statfs_mutex
);
613 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
, 1);
614 mutex_unlock(&sdp
->sd_statfs_mutex
);
616 spin_lock(&sdp
->sd_statfs_spin
);
617 l_sc
->sc_total
+= total
;
618 l_sc
->sc_free
+= free
;
619 l_sc
->sc_dinodes
+= dinodes
;
620 gfs2_statfs_change_out(l_sc
, l_bh
->b_data
+
621 sizeof(struct gfs2_dinode
));
622 spin_unlock(&sdp
->sd_statfs_spin
);
627 int gfs2_statfs_sync(struct gfs2_sbd
*sdp
)
629 struct gfs2_inode
*m_ip
= sdp
->sd_statfs_inode
->u
.generic_ip
;
630 struct gfs2_inode
*l_ip
= sdp
->sd_sc_inode
->u
.generic_ip
;
631 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
632 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
633 struct gfs2_holder gh
;
634 struct buffer_head
*m_bh
, *l_bh
;
637 error
= gfs2_glock_nq_init(m_ip
->i_gl
, LM_ST_EXCLUSIVE
, GL_NOCACHE
,
642 error
= gfs2_meta_inode_buffer(m_ip
, &m_bh
);
646 spin_lock(&sdp
->sd_statfs_spin
);
647 gfs2_statfs_change_in(m_sc
, m_bh
->b_data
+
648 sizeof(struct gfs2_dinode
));
649 if (!l_sc
->sc_total
&& !l_sc
->sc_free
&& !l_sc
->sc_dinodes
) {
650 spin_unlock(&sdp
->sd_statfs_spin
);
653 spin_unlock(&sdp
->sd_statfs_spin
);
655 error
= gfs2_meta_inode_buffer(l_ip
, &l_bh
);
659 error
= gfs2_trans_begin(sdp
, 2 * RES_DINODE
, 0);
663 mutex_lock(&sdp
->sd_statfs_mutex
);
664 gfs2_trans_add_bh(l_ip
->i_gl
, l_bh
, 1);
665 mutex_unlock(&sdp
->sd_statfs_mutex
);
667 spin_lock(&sdp
->sd_statfs_spin
);
668 m_sc
->sc_total
+= l_sc
->sc_total
;
669 m_sc
->sc_free
+= l_sc
->sc_free
;
670 m_sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
671 memset(l_sc
, 0, sizeof(struct gfs2_statfs_change
));
672 memset(l_bh
->b_data
+ sizeof(struct gfs2_dinode
),
673 0, sizeof(struct gfs2_statfs_change
));
674 spin_unlock(&sdp
->sd_statfs_spin
);
676 gfs2_trans_add_bh(m_ip
->i_gl
, m_bh
, 1);
677 gfs2_statfs_change_out(m_sc
, m_bh
->b_data
+ sizeof(struct gfs2_dinode
));
688 gfs2_glock_dq_uninit(&gh
);
694 * gfs2_statfs_i - Do a statfs
695 * @sdp: the filesystem
696 * @sg: the sg structure
701 int gfs2_statfs_i(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change
*sc
)
703 struct gfs2_statfs_change
*m_sc
= &sdp
->sd_statfs_master
;
704 struct gfs2_statfs_change
*l_sc
= &sdp
->sd_statfs_local
;
706 spin_lock(&sdp
->sd_statfs_spin
);
709 sc
->sc_total
+= l_sc
->sc_total
;
710 sc
->sc_free
+= l_sc
->sc_free
;
711 sc
->sc_dinodes
+= l_sc
->sc_dinodes
;
713 spin_unlock(&sdp
->sd_statfs_spin
);
717 if (sc
->sc_free
> sc
->sc_total
)
718 sc
->sc_free
= sc
->sc_total
;
719 if (sc
->sc_dinodes
< 0)
726 * statfs_fill - fill in the sg for a given RG
728 * @sc: the sc structure
730 * Returns: 0 on success, -ESTALE if the LVB is invalid
733 static int statfs_slow_fill(struct gfs2_rgrpd
*rgd
,
734 struct gfs2_statfs_change
*sc
)
736 gfs2_rgrp_verify(rgd
);
737 sc
->sc_total
+= rgd
->rd_ri
.ri_data
;
738 sc
->sc_free
+= rgd
->rd_rg
.rg_free
;
739 sc
->sc_dinodes
+= rgd
->rd_rg
.rg_dinodes
;
744 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
745 * @sdp: the filesystem
746 * @sc: the sc info that will be returned
748 * Any error (other than a signal) will cause this routine to fall back
749 * to the synchronous version.
751 * FIXME: This really shouldn't busy wait like this.
756 int gfs2_statfs_slow(struct gfs2_sbd
*sdp
, struct gfs2_statfs_change
*sc
)
758 struct gfs2_holder ri_gh
;
759 struct gfs2_rgrpd
*rgd_next
;
760 struct gfs2_holder
*gha
, *gh
;
761 unsigned int slots
= 64;
766 memset(sc
, 0, sizeof(struct gfs2_statfs_change
));
767 gha
= kcalloc(slots
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
771 error
= gfs2_rindex_hold(sdp
, &ri_gh
);
775 rgd_next
= gfs2_rgrpd_get_first(sdp
);
780 for (x
= 0; x
< slots
; x
++) {
783 if (gh
->gh_gl
&& gfs2_glock_poll(gh
)) {
784 err
= gfs2_glock_wait(gh
);
786 gfs2_holder_uninit(gh
);
790 error
= statfs_slow_fill(
791 gh
->gh_gl
->gl_object
, sc
);
792 gfs2_glock_dq_uninit(gh
);
798 else if (rgd_next
&& !error
) {
799 error
= gfs2_glock_nq_init(rgd_next
->rd_gl
,
803 rgd_next
= gfs2_rgrpd_get_next(rgd_next
);
807 if (signal_pending(current
))
808 error
= -ERESTARTSYS
;
817 gfs2_glock_dq_uninit(&ri_gh
);
826 struct list_head list
;
827 struct gfs2_holder gh
;
831 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
833 * @sdp: the file system
834 * @state: the state to put the transaction lock into
835 * @t_gh: the hold on the transaction lock
840 int gfs2_lock_fs_check_clean(struct gfs2_sbd
*sdp
, struct gfs2_holder
*t_gh
)
842 struct gfs2_inode
*ip
;
843 struct gfs2_holder ji_gh
;
844 struct gfs2_jdesc
*jd
;
847 struct gfs2_log_header lh
;
850 error
= gfs2_jindex_hold(sdp
, &ji_gh
);
854 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
855 lfcc
= kmalloc(sizeof(struct lfcc
), GFP_KERNEL
);
860 ip
= jd
->jd_inode
->u
.generic_ip
;
861 error
= gfs2_glock_nq_init(ip
->i_gl
,
868 list_add(&lfcc
->list
, &list
);
871 error
= gfs2_glock_nq_init(sdp
->sd_trans_gl
, LM_ST_DEFERRED
,
872 LM_FLAG_PRIORITY
| GL_NOCACHE
,
875 list_for_each_entry(jd
, &sdp
->sd_jindex_list
, jd_list
) {
876 error
= gfs2_jdesc_check(jd
);
879 error
= gfs2_find_jhead(jd
, &lh
);
882 if (!(lh
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
)) {
889 gfs2_glock_dq_uninit(t_gh
);
892 while (!list_empty(&list
)) {
893 lfcc
= list_entry(list
.next
, struct lfcc
, list
);
894 list_del(&lfcc
->list
);
895 gfs2_glock_dq_uninit(&lfcc
->gh
);
898 gfs2_glock_dq_uninit(&ji_gh
);
904 * gfs2_freeze_fs - freezes the file system
905 * @sdp: the file system
907 * This function flushes data and meta data for all machines by
908 * aquiring the transaction log exclusively. All journals are
909 * ensured to be in a clean state as well.
914 int gfs2_freeze_fs(struct gfs2_sbd
*sdp
)
918 mutex_lock(&sdp
->sd_freeze_lock
);
920 if (!sdp
->sd_freeze_count
++) {
921 error
= gfs2_lock_fs_check_clean(sdp
, &sdp
->sd_freeze_gh
);
923 sdp
->sd_freeze_count
--;
926 mutex_unlock(&sdp
->sd_freeze_lock
);
932 * gfs2_unfreeze_fs - unfreezes the file system
933 * @sdp: the file system
935 * This function allows the file system to proceed by unlocking
936 * the exclusively held transaction lock. Other GFS2 nodes are
937 * now free to acquire the lock shared and go on with their lives.
941 void gfs2_unfreeze_fs(struct gfs2_sbd
*sdp
)
943 mutex_lock(&sdp
->sd_freeze_lock
);
945 if (sdp
->sd_freeze_count
&& !--sdp
->sd_freeze_count
)
946 gfs2_glock_dq_uninit(&sdp
->sd_freeze_gh
);
948 mutex_unlock(&sdp
->sd_freeze_lock
);