2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
18 #include "lm_interface.h"
32 * meta_go_sync - sync out the metadata for this glock
36 * Called when demoting or unlocking an EX glock. We must flush
37 * to disk all dirty buffers/pages relating to this glock, and must not
38 * not return to caller to demote/unlock the glock until I/O is complete.
41 static void meta_go_sync(struct gfs2_glock
*gl
, int flags
)
43 if (!(flags
& DIO_METADATA
))
46 if (test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
)) {
47 gfs2_log_flush(gl
->gl_sbd
, gl
);
48 gfs2_meta_sync(gl
, flags
| DIO_START
| DIO_WAIT
);
49 if (flags
& DIO_RELEASE
)
50 gfs2_ail_empty_gl(gl
);
53 clear_bit(GLF_SYNC
, &gl
->gl_flags
);
57 * meta_go_inval - invalidate the metadata for this glock
63 static void meta_go_inval(struct gfs2_glock
*gl
, int flags
)
65 if (!(flags
& DIO_METADATA
))
73 * meta_go_demote_ok - Check to see if it's ok to unlock a glock
76 * Returns: 1 if we have no cached data; ok to demote meta glock
79 static int meta_go_demote_ok(struct gfs2_glock
*gl
)
81 return !gl
->gl_aspace
->i_mapping
->nrpages
;
85 * inode_go_xmote_th - promote/demote a glock
87 * @state: the requested state
92 static void inode_go_xmote_th(struct gfs2_glock
*gl
, unsigned int state
,
95 if (gl
->gl_state
!= LM_ST_UNLOCKED
)
97 gfs2_glock_xmote_th(gl
, state
, flags
);
101 * inode_go_xmote_bh - After promoting/demoting a glock
106 static void inode_go_xmote_bh(struct gfs2_glock
*gl
)
108 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
109 struct buffer_head
*bh
;
112 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
113 (!gh
|| !(gh
->gh_flags
& GL_SKIP
))) {
114 error
= gfs2_meta_read(gl
, gl
->gl_name
.ln_number
, DIO_START
,
122 * inode_go_drop_th - unlock a glock
125 * Invoked from rq_demote().
126 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
127 * is being purged from our node's glock cache; we're dropping lock.
130 static void inode_go_drop_th(struct gfs2_glock
*gl
)
133 gfs2_glock_drop_th(gl
);
137 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
138 * @gl: the glock protecting the inode
143 static void inode_go_sync(struct gfs2_glock
*gl
, int flags
)
145 int meta
= (flags
& DIO_METADATA
);
146 int data
= (flags
& DIO_DATA
);
148 if (test_bit(GLF_DIRTY
, &gl
->gl_flags
)) {
150 gfs2_page_sync(gl
, flags
| DIO_START
);
151 gfs2_log_flush(gl
->gl_sbd
, gl
);
152 gfs2_meta_sync(gl
, flags
| DIO_START
| DIO_WAIT
);
153 gfs2_page_sync(gl
, flags
| DIO_WAIT
);
154 clear_bit(GLF_DIRTY
, &gl
->gl_flags
);
156 gfs2_log_flush(gl
->gl_sbd
, gl
);
157 gfs2_meta_sync(gl
, flags
| DIO_START
| DIO_WAIT
);
159 gfs2_page_sync(gl
, flags
| DIO_START
| DIO_WAIT
);
160 if (flags
& DIO_RELEASE
)
161 gfs2_ail_empty_gl(gl
);
164 clear_bit(GLF_SYNC
, &gl
->gl_flags
);
168 * inode_go_inval - prepare a inode glock to be released
174 static void inode_go_inval(struct gfs2_glock
*gl
, int flags
)
176 int meta
= (flags
& DIO_METADATA
);
177 int data
= (flags
& DIO_DATA
);
188 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
191 * Returns: 1 if it's ok
194 static int inode_go_demote_ok(struct gfs2_glock
*gl
)
196 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
199 if (!gl
->gl_object
&& !gl
->gl_aspace
->i_mapping
->nrpages
)
201 else if (!sdp
->sd_args
.ar_localcaching
&&
202 time_after_eq(jiffies
, gl
->gl_stamp
+
203 gfs2_tune_get(sdp
, gt_demote_secs
) * HZ
))
210 * inode_go_lock - operation done after an inode lock is locked by a process
217 static int inode_go_lock(struct gfs2_holder
*gh
)
219 struct gfs2_glock
*gl
= gh
->gh_gl
;
220 struct gfs2_inode
*ip
= gl
->gl_object
;
226 if (ip
->i_vn
!= gl
->gl_vn
) {
227 error
= gfs2_inode_refresh(ip
);
230 gfs2_inode_attr_in(ip
);
233 if ((ip
->i_di
.di_flags
& GFS2_DIF_TRUNC_IN_PROG
) &&
234 (gl
->gl_state
== LM_ST_EXCLUSIVE
) &&
235 (gh
->gh_flags
& GL_LOCAL_EXCL
))
236 error
= gfs2_truncatei_resume(ip
);
242 * inode_go_unlock - operation done before an inode lock is unlocked by a
249 static void inode_go_unlock(struct gfs2_holder
*gh
)
251 struct gfs2_glock
*gl
= gh
->gh_gl
;
252 struct gfs2_inode
*ip
= gl
->gl_object
;
254 if (ip
&& test_bit(GLF_DIRTY
, &gl
->gl_flags
))
255 gfs2_inode_attr_in(ip
);
258 gfs2_meta_cache_flush(ip
);
267 static void inode_greedy(struct gfs2_glock
*gl
)
269 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
270 struct gfs2_inode
*ip
= gl
->gl_object
;
271 unsigned int quantum
= gfs2_tune_get(sdp
, gt_greedy_quantum
);
272 unsigned int max
= gfs2_tune_get(sdp
, gt_greedy_max
);
273 unsigned int new_time
;
275 spin_lock(&ip
->i_spin
);
277 if (time_after(ip
->i_last_pfault
+ quantum
, jiffies
)) {
278 new_time
= ip
->i_greedy
+ quantum
;
282 new_time
= ip
->i_greedy
- quantum
;
283 if (!new_time
|| new_time
> max
)
287 ip
->i_greedy
= new_time
;
289 spin_unlock(&ip
->i_spin
);
295 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
298 * Returns: 1 if it's ok
301 static int rgrp_go_demote_ok(struct gfs2_glock
*gl
)
303 return !gl
->gl_aspace
->i_mapping
->nrpages
;
307 * rgrp_go_lock - operation done after an rgrp lock is locked by
308 * a first holder on this node.
315 static int rgrp_go_lock(struct gfs2_holder
*gh
)
317 return gfs2_rgrp_bh_get(gh
->gh_gl
->gl_object
);
321 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
322 * a last holder on this node.
328 static void rgrp_go_unlock(struct gfs2_holder
*gh
)
330 gfs2_rgrp_bh_put(gh
->gh_gl
->gl_object
);
334 * trans_go_xmote_th - promote/demote the transaction glock
336 * @state: the requested state
341 static void trans_go_xmote_th(struct gfs2_glock
*gl
, unsigned int state
,
344 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
346 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
347 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
348 gfs2_meta_syncfs(sdp
);
349 gfs2_log_shutdown(sdp
);
352 gfs2_glock_xmote_th(gl
, state
, flags
);
356 * trans_go_xmote_bh - After promoting/demoting the transaction glock
361 static void trans_go_xmote_bh(struct gfs2_glock
*gl
)
363 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
364 struct gfs2_inode
*ip
= sdp
->sd_jdesc
->jd_inode
->u
.generic_ip
;
365 struct gfs2_glock
*j_gl
= ip
->i_gl
;
366 struct gfs2_log_header head
;
369 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
370 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
371 gfs2_meta_cache_flush(sdp
->sd_jdesc
->jd_inode
->u
.generic_ip
);
372 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
| DIO_DATA
);
374 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
377 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
))
380 /* Initialize some head of the log stuff */
381 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) {
382 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
383 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
389 * trans_go_drop_th - unlock the transaction glock
392 * We want to sync the device even with localcaching. Remember
393 * that localcaching journal replay only marks buffers dirty.
396 static void trans_go_drop_th(struct gfs2_glock
*gl
)
398 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
400 if (test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
401 gfs2_meta_syncfs(sdp
);
402 gfs2_log_shutdown(sdp
);
405 gfs2_glock_drop_th(gl
);
409 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
412 * Returns: 1 if it's ok
415 static int quota_go_demote_ok(struct gfs2_glock
*gl
)
417 return !atomic_read(&gl
->gl_lvb_count
);
420 struct gfs2_glock_operations gfs2_meta_glops
= {
421 .go_xmote_th
= gfs2_glock_xmote_th
,
422 .go_drop_th
= gfs2_glock_drop_th
,
423 .go_sync
= meta_go_sync
,
424 .go_inval
= meta_go_inval
,
425 .go_demote_ok
= meta_go_demote_ok
,
426 .go_type
= LM_TYPE_META
429 struct gfs2_glock_operations gfs2_inode_glops
= {
430 .go_xmote_th
= inode_go_xmote_th
,
431 .go_xmote_bh
= inode_go_xmote_bh
,
432 .go_drop_th
= inode_go_drop_th
,
433 .go_sync
= inode_go_sync
,
434 .go_inval
= inode_go_inval
,
435 .go_demote_ok
= inode_go_demote_ok
,
436 .go_lock
= inode_go_lock
,
437 .go_unlock
= inode_go_unlock
,
438 .go_greedy
= inode_greedy
,
439 .go_type
= LM_TYPE_INODE
442 struct gfs2_glock_operations gfs2_rgrp_glops
= {
443 .go_xmote_th
= gfs2_glock_xmote_th
,
444 .go_drop_th
= gfs2_glock_drop_th
,
445 .go_sync
= meta_go_sync
,
446 .go_inval
= meta_go_inval
,
447 .go_demote_ok
= rgrp_go_demote_ok
,
448 .go_lock
= rgrp_go_lock
,
449 .go_unlock
= rgrp_go_unlock
,
450 .go_type
= LM_TYPE_RGRP
453 struct gfs2_glock_operations gfs2_trans_glops
= {
454 .go_xmote_th
= trans_go_xmote_th
,
455 .go_xmote_bh
= trans_go_xmote_bh
,
456 .go_drop_th
= trans_go_drop_th
,
457 .go_type
= LM_TYPE_NONDISK
460 struct gfs2_glock_operations gfs2_iopen_glops
= {
461 .go_xmote_th
= gfs2_glock_xmote_th
,
462 .go_drop_th
= gfs2_glock_drop_th
,
463 .go_callback
= gfs2_iopen_go_callback
,
464 .go_type
= LM_TYPE_IOPEN
467 struct gfs2_glock_operations gfs2_flock_glops
= {
468 .go_xmote_th
= gfs2_glock_xmote_th
,
469 .go_drop_th
= gfs2_glock_drop_th
,
470 .go_type
= LM_TYPE_FLOCK
473 struct gfs2_glock_operations gfs2_nondisk_glops
= {
474 .go_xmote_th
= gfs2_glock_xmote_th
,
475 .go_drop_th
= gfs2_glock_drop_th
,
476 .go_type
= LM_TYPE_NONDISK
479 struct gfs2_glock_operations gfs2_quota_glops
= {
480 .go_xmote_th
= gfs2_glock_xmote_th
,
481 .go_drop_th
= gfs2_glock_drop_th
,
482 .go_demote_ok
= quota_go_demote_ok
,
483 .go_type
= LM_TYPE_QUOTA
486 struct gfs2_glock_operations gfs2_journal_glops
= {
487 .go_xmote_th
= gfs2_glock_xmote_th
,
488 .go_drop_th
= gfs2_glock_drop_th
,
489 .go_type
= LM_TYPE_JOURNAL