]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/gfs2/glock.c
GFS2: Clean up duplicated setattr code
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
cf45b752 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
b3b94faa
DT
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/sort.h>
16#include <linux/jhash.h>
d0dc80db 17#include <linux/kallsyms.h>
5c676f6d 18#include <linux/gfs2_ondisk.h>
24264434 19#include <linux/list.h>
fee852e3 20#include <linux/wait.h>
95d97b7d 21#include <linux/module.h>
b3b94faa 22#include <asm/uaccess.h>
7c52b166
RP
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
8fbbfd21
SW
25#include <linux/kthread.h>
26#include <linux/freezer.h>
c4f68a13
BM
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
b3b94faa
DT
29
30#include "gfs2.h"
5c676f6d 31#include "incore.h"
b3b94faa
DT
32#include "glock.h"
33#include "glops.h"
34#include "inode.h"
b3b94faa
DT
35#include "lops.h"
36#include "meta_io.h"
37#include "quota.h"
38#include "super.h"
5c676f6d 39#include "util.h"
813e0c46 40#include "bmap.h"
63997775
SW
41#define CREATE_TRACE_POINTS
42#include "trace_gfs2.h"
b3b94faa 43
37b2fa6a 44struct gfs2_gl_hash_bucket {
b6397893 45 struct hlist_head hb_list;
37b2fa6a
SW
46};
47
6802e340
SW
48struct gfs2_glock_iter {
49 int hash; /* hash bucket index */
50 struct gfs2_sbd *sdp; /* incore superblock */
51 struct gfs2_glock *gl; /* current glock struct */
52 char string[512]; /* scratch space */
7c52b166
RP
53};
54
b3b94faa
DT
55typedef void (*glock_examiner) (struct gfs2_glock * gl);
56
08bc2dbc 57static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
6802e340
SW
58static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
c4f68a13 61
7c52b166 62static struct dentry *gfs2_root;
c4f68a13 63static struct workqueue_struct *glock_workqueue;
b94a170e 64struct workqueue_struct *gfs2_delete_workqueue;
97cc1025
SW
65static LIST_HEAD(lru_list);
66static atomic_t lru_count = ATOMIC_INIT(0);
eb8374e7 67static DEFINE_SPINLOCK(lru_lock);
08bc2dbc 68
b6397893 69#define GFS2_GL_HASH_SHIFT 15
087efdd3
SW
70#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
71#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
72
85d1da67 73static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
04b933f2 74static struct dentry *gfs2_root;
087efdd3
SW
75
76/*
77 * Despite what you might think, the numbers below are not arbitrary :-)
78 * They are taken from the ipv4 routing hash code, which is well tested
79 * and thus should be nearly optimal. Later on we might tweek the numbers
80 * but for now this should be fine.
81 *
82 * The reason for putting the locks in a separate array from the list heads
83 * is that we can have fewer locks than list heads and save memory. We use
84 * the same hash function for both, but with a different hash mask.
85 */
86#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
87 defined(CONFIG_PROVE_LOCKING)
88
89#ifdef CONFIG_LOCKDEP
90# define GL_HASH_LOCK_SZ 256
91#else
92# if NR_CPUS >= 32
93# define GL_HASH_LOCK_SZ 4096
94# elif NR_CPUS >= 16
95# define GL_HASH_LOCK_SZ 2048
96# elif NR_CPUS >= 8
97# define GL_HASH_LOCK_SZ 1024
98# elif NR_CPUS >= 4
99# define GL_HASH_LOCK_SZ 512
100# else
101# define GL_HASH_LOCK_SZ 256
102# endif
103#endif
104
105/* We never want more locks than chains */
106#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
107# undef GL_HASH_LOCK_SZ
108# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
109#endif
110
111static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
112
113static inline rwlock_t *gl_lock_addr(unsigned int x)
114{
94610610 115 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
087efdd3
SW
116}
117#else /* not SMP, so no spinlocks required */
0ac23069 118static inline rwlock_t *gl_lock_addr(unsigned int x)
087efdd3
SW
119{
120 return NULL;
121}
122#endif
85d1da67 123
b3b94faa
DT
124/**
125 * gl_hash() - Turn glock number into hash bucket number
126 * @lock: The glock number
127 *
128 * Returns: The number of the corresponding hash bucket
129 */
130
b8547856
SW
131static unsigned int gl_hash(const struct gfs2_sbd *sdp,
132 const struct lm_lockname *name)
b3b94faa
DT
133{
134 unsigned int h;
135
cd915493 136 h = jhash(&name->ln_number, sizeof(u64), 0);
b3b94faa 137 h = jhash(&name->ln_type, sizeof(unsigned int), h);
b8547856 138 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
b3b94faa
DT
139 h &= GFS2_GL_HASH_MASK;
140
141 return h;
142}
143
144/**
145 * glock_free() - Perform a few checks and then release struct gfs2_glock
146 * @gl: The glock to release
147 *
148 * Also calls lock module to release its internal structure for this glock.
149 *
150 */
151
152static void glock_free(struct gfs2_glock *gl)
153{
154 struct gfs2_sbd *sdp = gl->gl_sbd;
009d8518
SW
155 struct address_space *mapping = gfs2_glock2aspace(gl);
156 struct kmem_cache *cachep = gfs2_glock_cachep;
b3b94faa 157
009d8518 158 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
63997775 159 trace_gfs2_glock_put(gl);
009d8518
SW
160 if (mapping)
161 cachep = gfs2_glock_aspace_cachep;
162 sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
b3b94faa
DT
163}
164
165/**
166 * gfs2_glock_hold() - increment reference count on glock
167 * @gl: The glock to hold
168 *
169 */
170
b94a170e 171void gfs2_glock_hold(struct gfs2_glock *gl)
b3b94faa 172{
d8348de0 173 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
16feb9fe 174 atomic_inc(&gl->gl_ref);
b3b94faa
DT
175}
176
8ff22a6f
BM
177/**
178 * demote_ok - Check to see if it's ok to unlock a glock
179 * @gl: the glock
180 *
181 * Returns: 1 if it's ok
182 */
183
184static int demote_ok(const struct gfs2_glock *gl)
185{
186 const struct gfs2_glock_operations *glops = gl->gl_ops;
187
188 if (gl->gl_state == LM_ST_UNLOCKED)
189 return 0;
190 if (!list_empty(&gl->gl_holders))
191 return 0;
192 if (glops->go_demote_ok)
193 return glops->go_demote_ok(gl);
194 return 1;
195}
196
97cc1025
SW
197/**
198 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
199 * @gl: the glock
200 *
201 */
202
203static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
204{
8ff22a6f
BM
205 int may_reclaim;
206 may_reclaim = (demote_ok(gl) &&
207 (atomic_read(&gl->gl_ref) == 1 ||
208 (gl->gl_name.ln_type == LM_TYPE_INODE &&
209 atomic_read(&gl->gl_ref) <= 2)));
97cc1025 210 spin_lock(&lru_lock);
8ff22a6f 211 if (list_empty(&gl->gl_lru) && may_reclaim) {
97cc1025
SW
212 list_add_tail(&gl->gl_lru, &lru_list);
213 atomic_inc(&lru_count);
214 }
215 spin_unlock(&lru_lock);
216}
217
8ff22a6f
BM
218/**
219 * gfs2_glock_put_nolock() - Decrement reference count on glock
220 * @gl: The glock to put
221 *
222 * This function should only be used if the caller has its own reference
223 * to the glock, in addition to the one it is dropping.
224 */
225
b94a170e 226void gfs2_glock_put_nolock(struct gfs2_glock *gl)
8ff22a6f
BM
227{
228 if (atomic_dec_and_test(&gl->gl_ref))
229 GLOCK_BUG_ON(gl, 1);
230 gfs2_glock_schedule_for_reclaim(gl);
231}
232
b3b94faa
DT
233/**
234 * gfs2_glock_put() - Decrement reference count on glock
235 * @gl: The glock to put
236 *
237 */
238
239int gfs2_glock_put(struct gfs2_glock *gl)
240{
b3b94faa
DT
241 int rv = 0;
242
087efdd3 243 write_lock(gl_lock_addr(gl->gl_hash));
26bb7505 244 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
b6397893 245 hlist_del(&gl->gl_list);
97cc1025
SW
246 if (!list_empty(&gl->gl_lru)) {
247 list_del_init(&gl->gl_lru);
248 atomic_dec(&lru_count);
249 }
250 spin_unlock(&lru_lock);
26bb7505 251 write_unlock(gl_lock_addr(gl->gl_hash));
6802e340 252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
b3b94faa
DT
253 glock_free(gl);
254 rv = 1;
255 goto out;
256 }
8ff22a6f
BM
257 spin_lock(&gl->gl_spin);
258 gfs2_glock_schedule_for_reclaim(gl);
259 spin_unlock(&gl->gl_spin);
d8348de0 260 write_unlock(gl_lock_addr(gl->gl_hash));
a2242db0 261out:
b3b94faa
DT
262 return rv;
263}
264
b3b94faa
DT
265/**
266 * search_bucket() - Find struct gfs2_glock by lock number
267 * @bucket: the bucket to search
268 * @name: The lock name
269 *
270 * Returns: NULL, or the struct gfs2_glock with the requested number
271 */
272
37b2fa6a 273static struct gfs2_glock *search_bucket(unsigned int hash,
899be4d3 274 const struct gfs2_sbd *sdp,
d6a53727 275 const struct lm_lockname *name)
b3b94faa
DT
276{
277 struct gfs2_glock *gl;
b6397893 278 struct hlist_node *h;
b3b94faa 279
b6397893 280 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
b3b94faa
DT
281 if (!lm_name_equal(&gl->gl_name, name))
282 continue;
899be4d3
SW
283 if (gl->gl_sbd != sdp)
284 continue;
b3b94faa 285
16feb9fe 286 atomic_inc(&gl->gl_ref);
b3b94faa
DT
287
288 return gl;
289 }
290
291 return NULL;
292}
293
6802e340
SW
294/**
295 * may_grant - check if its ok to grant a new lock
296 * @gl: The glock
297 * @gh: The lock request which we wish to grant
298 *
299 * Returns: true if its ok to grant the lock
300 */
301
302static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
303{
304 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
305 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
306 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
307 return 0;
308 if (gl->gl_state == gh->gh_state)
309 return 1;
310 if (gh->gh_flags & GL_EXACT)
311 return 0;
209806ab
SW
312 if (gl->gl_state == LM_ST_EXCLUSIVE) {
313 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
314 return 1;
315 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
316 return 1;
317 }
6802e340
SW
318 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
319 return 1;
320 return 0;
321}
322
323static void gfs2_holder_wake(struct gfs2_holder *gh)
324{
325 clear_bit(HIF_WAIT, &gh->gh_iflags);
326 smp_mb__after_clear_bit();
327 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
328}
329
d5341a92
SW
330/**
331 * do_error - Something unexpected has happened during a lock request
332 *
333 */
334
335static inline void do_error(struct gfs2_glock *gl, const int ret)
336{
337 struct gfs2_holder *gh, *tmp;
338
339 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
340 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
341 continue;
342 if (ret & LM_OUT_ERROR)
343 gh->gh_error = -EIO;
344 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
345 gh->gh_error = GLR_TRYFAILED;
346 else
347 continue;
348 list_del_init(&gh->gh_list);
349 trace_gfs2_glock_queue(gh, 0);
350 gfs2_holder_wake(gh);
351 }
352}
353
6802e340
SW
354/**
355 * do_promote - promote as many requests as possible on the current queue
356 * @gl: The glock
357 *
813e0c46
SW
358 * Returns: 1 if there is a blocked holder at the head of the list, or 2
359 * if a type specific operation is underway.
6802e340
SW
360 */
361
362static int do_promote(struct gfs2_glock *gl)
55ba474d
HH
363__releases(&gl->gl_spin)
364__acquires(&gl->gl_spin)
6802e340
SW
365{
366 const struct gfs2_glock_operations *glops = gl->gl_ops;
367 struct gfs2_holder *gh, *tmp;
368 int ret;
369
370restart:
371 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
372 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
373 continue;
374 if (may_grant(gl, gh)) {
375 if (gh->gh_list.prev == &gl->gl_holders &&
376 glops->go_lock) {
377 spin_unlock(&gl->gl_spin);
378 /* FIXME: eliminate this eventually */
379 ret = glops->go_lock(gh);
380 spin_lock(&gl->gl_spin);
381 if (ret) {
813e0c46
SW
382 if (ret == 1)
383 return 2;
6802e340
SW
384 gh->gh_error = ret;
385 list_del_init(&gh->gh_list);
63997775 386 trace_gfs2_glock_queue(gh, 0);
6802e340
SW
387 gfs2_holder_wake(gh);
388 goto restart;
389 }
390 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 391 trace_gfs2_promote(gh, 1);
6802e340
SW
392 gfs2_holder_wake(gh);
393 goto restart;
394 }
395 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 396 trace_gfs2_promote(gh, 0);
6802e340
SW
397 gfs2_holder_wake(gh);
398 continue;
399 }
400 if (gh->gh_list.prev == &gl->gl_holders)
401 return 1;
d5341a92 402 do_error(gl, 0);
6802e340
SW
403 break;
404 }
405 return 0;
406}
407
6802e340
SW
408/**
409 * find_first_waiter - find the first gh that's waiting for the glock
410 * @gl: the glock
411 */
412
413static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
414{
415 struct gfs2_holder *gh;
416
417 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
418 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
419 return gh;
420 }
421 return NULL;
422}
423
424/**
425 * state_change - record that the glock is now in a different state
426 * @gl: the glock
427 * @new_state the new state
428 *
429 */
430
431static void state_change(struct gfs2_glock *gl, unsigned int new_state)
432{
433 int held1, held2;
434
435 held1 = (gl->gl_state != LM_ST_UNLOCKED);
436 held2 = (new_state != LM_ST_UNLOCKED);
437
438 if (held1 != held2) {
439 if (held2)
440 gfs2_glock_hold(gl);
441 else
8ff22a6f 442 gfs2_glock_put_nolock(gl);
6802e340 443 }
7b5e3d5f
SW
444 if (held1 && held2 && list_empty(&gl->gl_holders))
445 clear_bit(GLF_QUEUED, &gl->gl_flags);
6802e340
SW
446
447 gl->gl_state = new_state;
448 gl->gl_tchange = jiffies;
449}
450
451static void gfs2_demote_wake(struct gfs2_glock *gl)
452{
453 gl->gl_demote_state = LM_ST_EXCLUSIVE;
454 clear_bit(GLF_DEMOTE, &gl->gl_flags);
455 smp_mb__after_clear_bit();
456 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
457}
458
459/**
460 * finish_xmote - The DLM has replied to one of our lock requests
461 * @gl: The glock
462 * @ret: The status from the DLM
463 *
464 */
465
466static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
467{
468 const struct gfs2_glock_operations *glops = gl->gl_ops;
469 struct gfs2_holder *gh;
470 unsigned state = ret & LM_OUT_ST_MASK;
813e0c46 471 int rv;
6802e340
SW
472
473 spin_lock(&gl->gl_spin);
63997775 474 trace_gfs2_glock_state_change(gl, state);
6802e340
SW
475 state_change(gl, state);
476 gh = find_first_waiter(gl);
477
478 /* Demote to UN request arrived during demote to SH or DF */
479 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
480 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
481 gl->gl_target = LM_ST_UNLOCKED;
482
483 /* Check for state != intended state */
484 if (unlikely(state != gl->gl_target)) {
485 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
486 /* move to back of queue and try next entry */
487 if (ret & LM_OUT_CANCELED) {
488 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
489 list_move_tail(&gh->gh_list, &gl->gl_holders);
490 gh = find_first_waiter(gl);
491 gl->gl_target = gh->gh_state;
492 goto retry;
493 }
494 /* Some error or failed "try lock" - report it */
495 if ((ret & LM_OUT_ERROR) ||
496 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
497 gl->gl_target = gl->gl_state;
498 do_error(gl, ret);
499 goto out;
500 }
501 }
502 switch(state) {
503 /* Unlocked due to conversion deadlock, try again */
504 case LM_ST_UNLOCKED:
505retry:
506 do_xmote(gl, gh, gl->gl_target);
507 break;
508 /* Conversion fails, unlock and try again */
509 case LM_ST_SHARED:
510 case LM_ST_DEFERRED:
511 do_xmote(gl, gh, LM_ST_UNLOCKED);
512 break;
513 default: /* Everything else */
514 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
515 GLOCK_BUG_ON(gl, 1);
516 }
517 spin_unlock(&gl->gl_spin);
6802e340
SW
518 return;
519 }
520
521 /* Fast path - we got what we asked for */
522 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
523 gfs2_demote_wake(gl);
524 if (state != LM_ST_UNLOCKED) {
525 if (glops->go_xmote_bh) {
6802e340
SW
526 spin_unlock(&gl->gl_spin);
527 rv = glops->go_xmote_bh(gl, gh);
6802e340
SW
528 spin_lock(&gl->gl_spin);
529 if (rv) {
530 do_error(gl, rv);
531 goto out;
532 }
533 }
813e0c46
SW
534 rv = do_promote(gl);
535 if (rv == 2)
536 goto out_locked;
6802e340
SW
537 }
538out:
539 clear_bit(GLF_LOCK, &gl->gl_flags);
813e0c46 540out_locked:
6802e340 541 spin_unlock(&gl->gl_spin);
6802e340
SW
542}
543
544static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
f057f6cd 545 unsigned int req_state,
6802e340
SW
546 unsigned int flags)
547{
548 int ret = LM_OUT_ERROR;
048bca22
SW
549
550 if (!sdp->sd_lockstruct.ls_ops->lm_lock)
551 return req_state == LM_ST_UNLOCKED ? 0 : req_state;
552
6802e340 553 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
f057f6cd 554 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
6802e340
SW
555 req_state, flags);
556 return ret;
557}
558
559/**
560 * do_xmote - Calls the DLM to change the state of a lock
561 * @gl: The lock state
562 * @gh: The holder (only for promotes)
563 * @target: The target lock state
564 *
565 */
566
567static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
55ba474d
HH
568__releases(&gl->gl_spin)
569__acquires(&gl->gl_spin)
6802e340
SW
570{
571 const struct gfs2_glock_operations *glops = gl->gl_ops;
572 struct gfs2_sbd *sdp = gl->gl_sbd;
573 unsigned int lck_flags = gh ? gh->gh_flags : 0;
574 int ret;
575
576 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
577 LM_FLAG_PRIORITY);
578 BUG_ON(gl->gl_state == target);
579 BUG_ON(gl->gl_state == gl->gl_target);
580 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
581 glops->go_inval) {
582 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
583 do_error(gl, 0); /* Fail queued try locks */
584 }
585 spin_unlock(&gl->gl_spin);
586 if (glops->go_xmote_th)
587 glops->go_xmote_th(gl);
588 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
589 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
590 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
591
592 gfs2_glock_hold(gl);
593 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
594 gl->gl_state == LM_ST_DEFERRED) &&
595 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
596 lck_flags |= LM_FLAG_TRY_1CB;
f057f6cd 597 ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
6802e340
SW
598
599 if (!(ret & LM_OUT_ASYNC)) {
600 finish_xmote(gl, ret);
6802e340
SW
601 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
602 gfs2_glock_put(gl);
603 } else {
604 GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
605 }
606 spin_lock(&gl->gl_spin);
607}
608
609/**
610 * find_first_holder - find the first "holder" gh
611 * @gl: the glock
612 */
613
614static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
615{
616 struct gfs2_holder *gh;
617
618 if (!list_empty(&gl->gl_holders)) {
619 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
620 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
621 return gh;
622 }
623 return NULL;
624}
625
626/**
627 * run_queue - do all outstanding tasks related to a glock
628 * @gl: The glock in question
629 * @nonblock: True if we must not block in run_queue
630 *
631 */
632
633static void run_queue(struct gfs2_glock *gl, const int nonblock)
55ba474d
HH
634__releases(&gl->gl_spin)
635__acquires(&gl->gl_spin)
6802e340
SW
636{
637 struct gfs2_holder *gh = NULL;
813e0c46 638 int ret;
6802e340
SW
639
640 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
641 return;
642
643 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
644
645 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
646 gl->gl_demote_state != gl->gl_state) {
647 if (find_first_holder(gl))
d8348de0 648 goto out_unlock;
6802e340
SW
649 if (nonblock)
650 goto out_sched;
651 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
265d529c 652 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
6802e340
SW
653 gl->gl_target = gl->gl_demote_state;
654 } else {
655 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
656 gfs2_demote_wake(gl);
813e0c46
SW
657 ret = do_promote(gl);
658 if (ret == 0)
d8348de0 659 goto out_unlock;
813e0c46 660 if (ret == 2)
a228df63 661 goto out;
6802e340
SW
662 gh = find_first_waiter(gl);
663 gl->gl_target = gh->gh_state;
664 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
665 do_error(gl, 0); /* Fail queued try locks */
666 }
667 do_xmote(gl, gh, gl->gl_target);
a228df63 668out:
6802e340
SW
669 return;
670
671out_sched:
7e71c55e
SW
672 clear_bit(GLF_LOCK, &gl->gl_flags);
673 smp_mb__after_clear_bit();
6802e340
SW
674 gfs2_glock_hold(gl);
675 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
8ff22a6f 676 gfs2_glock_put_nolock(gl);
7e71c55e
SW
677 return;
678
d8348de0 679out_unlock:
6802e340 680 clear_bit(GLF_LOCK, &gl->gl_flags);
7e71c55e
SW
681 smp_mb__after_clear_bit();
682 return;
6802e340
SW
683}
684
b94a170e
BM
685static void delete_work_func(struct work_struct *work)
686{
687 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
688 struct gfs2_sbd *sdp = gl->gl_sbd;
044b9414 689 struct gfs2_inode *ip;
b94a170e 690 struct inode *inode;
044b9414
SW
691 u64 no_addr = gl->gl_name.ln_number;
692
693 ip = gl->gl_object;
694 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
b94a170e 695
b94a170e 696 if (ip)
b94a170e 697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
044b9414
SW
698 else
699 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
700 if (inode && !IS_ERR(inode)) {
701 d_prune_aliases(inode);
702 iput(inode);
b94a170e
BM
703 }
704 gfs2_glock_put(gl);
705}
706
c4f68a13
BM
707static void glock_work_func(struct work_struct *work)
708{
6802e340 709 unsigned long delay = 0;
c4f68a13 710 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
26bb7505 711 int drop_ref = 0;
c4f68a13 712
26bb7505 713 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
6802e340 714 finish_xmote(gl, gl->gl_reply);
26bb7505
SW
715 drop_ref = 1;
716 }
c4f68a13 717 spin_lock(&gl->gl_spin);
265d529c
SW
718 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
719 gl->gl_state != LM_ST_UNLOCKED &&
720 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
6802e340
SW
721 unsigned long holdtime, now = jiffies;
722 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
723 if (time_before(now, holdtime))
724 delay = holdtime - now;
725 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
726 }
727 run_queue(gl, 0);
c4f68a13 728 spin_unlock(&gl->gl_spin);
6802e340
SW
729 if (!delay ||
730 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
731 gfs2_glock_put(gl);
26bb7505
SW
732 if (drop_ref)
733 gfs2_glock_put(gl);
c4f68a13
BM
734}
735
b3b94faa
DT
736/**
737 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
738 * @sdp: The GFS2 superblock
739 * @number: the lock number
740 * @glops: The glock_operations to use
741 * @create: If 0, don't create the glock if it doesn't exist
742 * @glp: the glock is returned here
743 *
744 * This does not lock a glock, just finds/creates structures for one.
745 *
746 * Returns: errno
747 */
748
cd915493 749int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
8fb4b536 750 const struct gfs2_glock_operations *glops, int create,
b3b94faa
DT
751 struct gfs2_glock **glp)
752{
009d8518 753 struct super_block *s = sdp->sd_vfs;
37b2fa6a 754 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
b3b94faa 755 struct gfs2_glock *gl, *tmp;
37b2fa6a 756 unsigned int hash = gl_hash(sdp, &name);
009d8518 757 struct address_space *mapping;
b3b94faa 758
087efdd3 759 read_lock(gl_lock_addr(hash));
37b2fa6a 760 gl = search_bucket(hash, sdp, &name);
087efdd3 761 read_unlock(gl_lock_addr(hash));
b3b94faa 762
64d576ba
SW
763 *glp = gl;
764 if (gl)
b3b94faa 765 return 0;
64d576ba
SW
766 if (!create)
767 return -ENOENT;
b3b94faa 768
009d8518
SW
769 if (glops->go_flags & GLOF_ASPACE)
770 gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
771 else
772 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
b3b94faa
DT
773 if (!gl)
774 return -ENOMEM;
775
8f05228e 776 atomic_inc(&sdp->sd_glock_disposal);
ec45d9f5 777 gl->gl_flags = 0;
b3b94faa 778 gl->gl_name = name;
16feb9fe 779 atomic_set(&gl->gl_ref, 1);
b3b94faa 780 gl->gl_state = LM_ST_UNLOCKED;
6802e340 781 gl->gl_target = LM_ST_UNLOCKED;
c4f68a13 782 gl->gl_demote_state = LM_ST_EXCLUSIVE;
37b2fa6a 783 gl->gl_hash = hash;
b3b94faa 784 gl->gl_ops = glops;
f057f6cd
SW
785 snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
786 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
787 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
c4f68a13 788 gl->gl_tchange = jiffies;
ec45d9f5 789 gl->gl_object = NULL;
b3b94faa 790 gl->gl_sbd = sdp;
c4f68a13 791 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
b94a170e 792 INIT_WORK(&gl->gl_delete, delete_work_func);
b3b94faa 793
009d8518
SW
794 mapping = gfs2_glock2aspace(gl);
795 if (mapping) {
796 mapping->a_ops = &gfs2_meta_aops;
797 mapping->host = s->s_bdev->bd_inode;
798 mapping->flags = 0;
799 mapping_set_gfp_mask(mapping, GFP_NOFS);
800 mapping->assoc_mapping = NULL;
801 mapping->backing_dev_info = s->s_bdi;
802 mapping->writeback_index = 0;
b3b94faa
DT
803 }
804
087efdd3 805 write_lock(gl_lock_addr(hash));
37b2fa6a 806 tmp = search_bucket(hash, sdp, &name);
b3b94faa 807 if (tmp) {
087efdd3 808 write_unlock(gl_lock_addr(hash));
b3b94faa
DT
809 glock_free(gl);
810 gl = tmp;
811 } else {
b6397893 812 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
087efdd3 813 write_unlock(gl_lock_addr(hash));
b3b94faa
DT
814 }
815
816 *glp = gl;
817
818 return 0;
b3b94faa
DT
819}
820
821/**
822 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
823 * @gl: the glock
824 * @state: the state we're requesting
825 * @flags: the modifier flags
826 * @gh: the holder structure
827 *
828 */
829
190562bd 830void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
b3b94faa
DT
831 struct gfs2_holder *gh)
832{
833 INIT_LIST_HEAD(&gh->gh_list);
834 gh->gh_gl = gl;
d0dc80db 835 gh->gh_ip = (unsigned long)__builtin_return_address(0);
b1e058da 836 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
837 gh->gh_state = state;
838 gh->gh_flags = flags;
839 gh->gh_error = 0;
840 gh->gh_iflags = 0;
b3b94faa
DT
841 gfs2_glock_hold(gl);
842}
843
844/**
845 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
846 * @state: the state we're requesting
847 * @flags: the modifier flags
848 * @gh: the holder structure
849 *
850 * Don't mess with the glock.
851 *
852 */
853
190562bd 854void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
b3b94faa
DT
855{
856 gh->gh_state = state;
579b78a4 857 gh->gh_flags = flags;
3b8249f6 858 gh->gh_iflags = 0;
d0dc80db 859 gh->gh_ip = (unsigned long)__builtin_return_address(0);
1a0eae88
BP
860 if (gh->gh_owner_pid)
861 put_pid(gh->gh_owner_pid);
862 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
863}
864
865/**
866 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
867 * @gh: the holder structure
868 *
869 */
870
871void gfs2_holder_uninit(struct gfs2_holder *gh)
872{
b1e058da 873 put_pid(gh->gh_owner_pid);
b3b94faa
DT
874 gfs2_glock_put(gh->gh_gl);
875 gh->gh_gl = NULL;
d0dc80db 876 gh->gh_ip = 0;
b3b94faa
DT
877}
878
fe64d517
SW
879/**
880 * gfs2_glock_holder_wait
881 * @word: unused
882 *
883 * This function and gfs2_glock_demote_wait both show up in the WCHAN
884 * field. Thus I've separated these otherwise identical functions in
885 * order to be more informative to the user.
886 */
887
888static int gfs2_glock_holder_wait(void *word)
fee852e3
SW
889{
890 schedule();
891 return 0;
892}
893
fe64d517
SW
894static int gfs2_glock_demote_wait(void *word)
895{
896 schedule();
897 return 0;
898}
899
6802e340 900static void wait_on_holder(struct gfs2_holder *gh)
da755fdb 901{
6802e340 902 might_sleep();
fe64d517 903 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
da755fdb
SW
904}
905
6802e340 906static void wait_on_demote(struct gfs2_glock *gl)
b3b94faa 907{
6802e340 908 might_sleep();
fe64d517 909 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
b3b94faa
DT
910}
911
912/**
6802e340
SW
913 * handle_callback - process a demote request
914 * @gl: the glock
915 * @state: the state the caller wants us to change to
b3b94faa 916 *
6802e340
SW
917 * There are only two requests that we are going to see in actual
918 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
b3b94faa
DT
919 */
920
6802e340 921static void handle_callback(struct gfs2_glock *gl, unsigned int state,
97cc1025 922 unsigned long delay)
b3b94faa 923{
6802e340 924 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
b3b94faa 925
6802e340
SW
926 set_bit(bit, &gl->gl_flags);
927 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
928 gl->gl_demote_state = state;
929 gl->gl_demote_time = jiffies;
6802e340
SW
930 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
931 gl->gl_demote_state != state) {
932 gl->gl_demote_state = LM_ST_UNLOCKED;
b3b94faa 933 }
b94a170e
BM
934 if (gl->gl_ops->go_callback)
935 gl->gl_ops->go_callback(gl);
63997775 936 trace_gfs2_demote_rq(gl);
b3b94faa
DT
937}
938
939/**
6802e340 940 * gfs2_glock_wait - wait on a glock acquisition
b3b94faa
DT
941 * @gh: the glock holder
942 *
943 * Returns: 0 on success
944 */
945
6802e340 946int gfs2_glock_wait(struct gfs2_holder *gh)
b3b94faa 947{
fee852e3 948 wait_on_holder(gh);
b3b94faa
DT
949 return gh->gh_error;
950}
951
6802e340 952void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
7c52b166
RP
953{
954 va_list args;
955
956 va_start(args, fmt);
6802e340
SW
957 if (seq) {
958 struct gfs2_glock_iter *gi = seq->private;
7c52b166 959 vsprintf(gi->string, fmt, args);
6802e340
SW
960 seq_printf(seq, gi->string);
961 } else {
962 printk(KERN_ERR " ");
7c52b166 963 vprintk(fmt, args);
6802e340 964 }
7c52b166
RP
965 va_end(args);
966}
967
b3b94faa
DT
968/**
969 * add_to_queue - Add a holder to the wait queue (but look for recursion)
970 * @gh: the holder structure to add
971 *
6802e340
SW
972 * Eventually we should move the recursive locking trap to a
973 * debugging option or something like that. This is the fast
974 * path and needs to have the minimum number of distractions.
975 *
b3b94faa
DT
976 */
977
6802e340 978static inline void add_to_queue(struct gfs2_holder *gh)
55ba474d
HH
979__releases(&gl->gl_spin)
980__acquires(&gl->gl_spin)
b3b94faa
DT
981{
982 struct gfs2_glock *gl = gh->gh_gl;
6802e340
SW
983 struct gfs2_sbd *sdp = gl->gl_sbd;
984 struct list_head *insert_pt = NULL;
985 struct gfs2_holder *gh2;
986 int try_lock = 0;
b3b94faa 987
b1e058da 988 BUG_ON(gh->gh_owner_pid == NULL);
fee852e3
SW
989 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
990 BUG();
190562bd 991
6802e340
SW
992 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
993 if (test_bit(GLF_LOCK, &gl->gl_flags))
994 try_lock = 1;
995 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
996 goto fail;
997 }
998
999 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1000 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1001 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1002 goto trap_recursive;
1003 if (try_lock &&
1004 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
1005 !may_grant(gl, gh)) {
1006fail:
1007 gh->gh_error = GLR_TRYFAILED;
1008 gfs2_holder_wake(gh);
1009 return;
b4c20166 1010 }
6802e340
SW
1011 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1012 continue;
1013 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1014 insert_pt = &gh2->gh_list;
1015 }
7b5e3d5f 1016 set_bit(GLF_QUEUED, &gl->gl_flags);
6802e340
SW
1017 if (likely(insert_pt == NULL)) {
1018 list_add_tail(&gh->gh_list, &gl->gl_holders);
1019 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1020 goto do_cancel;
1021 return;
1022 }
63997775 1023 trace_gfs2_glock_queue(gh, 1);
6802e340
SW
1024 list_add_tail(&gh->gh_list, insert_pt);
1025do_cancel:
1026 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1027 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1028 spin_unlock(&gl->gl_spin);
048bca22 1029 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
f057f6cd 1030 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
6802e340 1031 spin_lock(&gl->gl_spin);
b3b94faa 1032 }
6802e340 1033 return;
b3b94faa 1034
6802e340
SW
1035trap_recursive:
1036 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1037 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1038 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1039 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1040 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1041 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1042 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1043 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1044 __dump_glock(NULL, gl);
1045 BUG();
b3b94faa
DT
1046}
1047
1048/**
1049 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1050 * @gh: the holder structure
1051 *
1052 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1053 *
1054 * Returns: 0, GLR_TRYFAILED, or errno on failure
1055 */
1056
1057int gfs2_glock_nq(struct gfs2_holder *gh)
1058{
1059 struct gfs2_glock *gl = gh->gh_gl;
1060 struct gfs2_sbd *sdp = gl->gl_sbd;
1061 int error = 0;
1062
6802e340 1063 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
b3b94faa 1064 return -EIO;
b3b94faa 1065
b3b94faa
DT
1066 spin_lock(&gl->gl_spin);
1067 add_to_queue(gh);
0809f6ec
SW
1068 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1069 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1070 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
6802e340 1071 run_queue(gl, 1);
b3b94faa
DT
1072 spin_unlock(&gl->gl_spin);
1073
6802e340
SW
1074 if (!(gh->gh_flags & GL_ASYNC))
1075 error = gfs2_glock_wait(gh);
b3b94faa 1076
b3b94faa
DT
1077 return error;
1078}
1079
1080/**
1081 * gfs2_glock_poll - poll to see if an async request has been completed
1082 * @gh: the holder
1083 *
1084 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1085 */
1086
1087int gfs2_glock_poll(struct gfs2_holder *gh)
1088{
6802e340 1089 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
b3b94faa
DT
1090}
1091
1092/**
1093 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1094 * @gh: the glock holder
1095 *
1096 */
1097
1098void gfs2_glock_dq(struct gfs2_holder *gh)
1099{
1100 struct gfs2_glock *gl = gh->gh_gl;
8fb4b536 1101 const struct gfs2_glock_operations *glops = gl->gl_ops;
c4f68a13 1102 unsigned delay = 0;
6802e340 1103 int fast_path = 0;
b3b94faa 1104
6802e340 1105 spin_lock(&gl->gl_spin);
b3b94faa 1106 if (gh->gh_flags & GL_NOCACHE)
97cc1025 1107 handle_callback(gl, LM_ST_UNLOCKED, 0);
b3b94faa 1108
b3b94faa 1109 list_del_init(&gh->gh_list);
6802e340 1110 if (find_first_holder(gl) == NULL) {
3042a2cc 1111 if (glops->go_unlock) {
6802e340 1112 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
3042a2cc 1113 spin_unlock(&gl->gl_spin);
b3b94faa 1114 glops->go_unlock(gh);
3042a2cc 1115 spin_lock(&gl->gl_spin);
6802e340 1116 clear_bit(GLF_LOCK, &gl->gl_flags);
3042a2cc 1117 }
6802e340
SW
1118 if (list_empty(&gl->gl_holders) &&
1119 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1120 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1121 fast_path = 1;
b3b94faa 1122 }
63997775 1123 trace_gfs2_glock_queue(gh, 0);
b3b94faa 1124 spin_unlock(&gl->gl_spin);
6802e340
SW
1125 if (likely(fast_path))
1126 return;
c4f68a13
BM
1127
1128 gfs2_glock_hold(gl);
1129 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1130 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1131 delay = gl->gl_ops->go_min_hold_time;
1132 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1133 gfs2_glock_put(gl);
b3b94faa
DT
1134}
1135
d93cfa98
AD
1136void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1137{
1138 struct gfs2_glock *gl = gh->gh_gl;
1139 gfs2_glock_dq(gh);
1140 wait_on_demote(gl);
1141}
1142
b3b94faa
DT
1143/**
1144 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1145 * @gh: the holder structure
1146 *
1147 */
1148
1149void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1150{
1151 gfs2_glock_dq(gh);
1152 gfs2_holder_uninit(gh);
1153}
1154
1155/**
1156 * gfs2_glock_nq_num - acquire a glock based on lock number
1157 * @sdp: the filesystem
1158 * @number: the lock number
1159 * @glops: the glock operations for the type of glock
1160 * @state: the state to acquire the glock in
1161 * @flags: modifier flags for the aquisition
1162 * @gh: the struct gfs2_holder
1163 *
1164 * Returns: errno
1165 */
1166
cd915493 1167int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
8fb4b536
SW
1168 const struct gfs2_glock_operations *glops,
1169 unsigned int state, int flags, struct gfs2_holder *gh)
b3b94faa
DT
1170{
1171 struct gfs2_glock *gl;
1172 int error;
1173
1174 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1175 if (!error) {
1176 error = gfs2_glock_nq_init(gl, state, flags, gh);
1177 gfs2_glock_put(gl);
1178 }
1179
1180 return error;
1181}
1182
1183/**
1184 * glock_compare - Compare two struct gfs2_glock structures for sorting
1185 * @arg_a: the first structure
1186 * @arg_b: the second structure
1187 *
1188 */
1189
1190static int glock_compare(const void *arg_a, const void *arg_b)
1191{
a5e08a9e
SW
1192 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1193 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1194 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1195 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
b3b94faa
DT
1196
1197 if (a->ln_number > b->ln_number)
a5e08a9e
SW
1198 return 1;
1199 if (a->ln_number < b->ln_number)
1200 return -1;
1c0f4872 1201 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
a5e08a9e 1202 return 0;
b3b94faa
DT
1203}
1204
1205/**
1206 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1207 * @num_gh: the number of structures
1208 * @ghs: an array of struct gfs2_holder structures
1209 *
1210 * Returns: 0 on success (all glocks acquired),
1211 * errno on failure (no glocks acquired)
1212 */
1213
1214static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1215 struct gfs2_holder **p)
1216{
1217 unsigned int x;
1218 int error = 0;
1219
1220 for (x = 0; x < num_gh; x++)
1221 p[x] = &ghs[x];
1222
1223 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1224
1225 for (x = 0; x < num_gh; x++) {
1226 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1227
1228 error = gfs2_glock_nq(p[x]);
1229 if (error) {
1230 while (x--)
1231 gfs2_glock_dq(p[x]);
1232 break;
1233 }
1234 }
1235
1236 return error;
1237}
1238
1239/**
1240 * gfs2_glock_nq_m - acquire multiple glocks
1241 * @num_gh: the number of structures
1242 * @ghs: an array of struct gfs2_holder structures
1243 *
b3b94faa
DT
1244 *
1245 * Returns: 0 on success (all glocks acquired),
1246 * errno on failure (no glocks acquired)
1247 */
1248
1249int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1250{
eaf5bd3c
SW
1251 struct gfs2_holder *tmp[4];
1252 struct gfs2_holder **pph = tmp;
b3b94faa
DT
1253 int error = 0;
1254
eaf5bd3c
SW
1255 switch(num_gh) {
1256 case 0:
b3b94faa 1257 return 0;
eaf5bd3c 1258 case 1:
b3b94faa
DT
1259 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1260 return gfs2_glock_nq(ghs);
eaf5bd3c
SW
1261 default:
1262 if (num_gh <= 4)
b3b94faa 1263 break;
eaf5bd3c
SW
1264 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1265 if (!pph)
1266 return -ENOMEM;
b3b94faa
DT
1267 }
1268
eaf5bd3c 1269 error = nq_m_sync(num_gh, ghs, pph);
b3b94faa 1270
eaf5bd3c
SW
1271 if (pph != tmp)
1272 kfree(pph);
b3b94faa
DT
1273
1274 return error;
1275}
1276
1277/**
1278 * gfs2_glock_dq_m - release multiple glocks
1279 * @num_gh: the number of structures
1280 * @ghs: an array of struct gfs2_holder structures
1281 *
1282 */
1283
1284void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1285{
1286 unsigned int x;
1287
1288 for (x = 0; x < num_gh; x++)
1289 gfs2_glock_dq(&ghs[x]);
1290}
1291
1292/**
1293 * gfs2_glock_dq_uninit_m - release multiple glocks
1294 * @num_gh: the number of structures
1295 * @ghs: an array of struct gfs2_holder structures
1296 *
1297 */
1298
1299void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1300{
1301 unsigned int x;
1302
1303 for (x = 0; x < num_gh; x++)
1304 gfs2_glock_dq_uninit(&ghs[x]);
1305}
1306
f057f6cd 1307void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
da755fdb 1308{
c4f68a13
BM
1309 unsigned long delay = 0;
1310 unsigned long holdtime;
1311 unsigned long now = jiffies;
b3b94faa 1312
f057f6cd 1313 gfs2_glock_hold(gl);
c4f68a13 1314 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
7b5e3d5f
SW
1315 if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1316 if (time_before(now, holdtime))
1317 delay = holdtime - now;
1318 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1319 delay = gl->gl_ops->go_min_hold_time;
1320 }
b3b94faa 1321
6802e340 1322 spin_lock(&gl->gl_spin);
97cc1025 1323 handle_callback(gl, state, delay);
6802e340 1324 spin_unlock(&gl->gl_spin);
c4f68a13
BM
1325 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1326 gfs2_glock_put(gl);
b3b94faa
DT
1327}
1328
0809f6ec
SW
1329/**
1330 * gfs2_should_freeze - Figure out if glock should be frozen
1331 * @gl: The glock in question
1332 *
1333 * Glocks are not frozen if (a) the result of the dlm operation is
1334 * an error, (b) the locking operation was an unlock operation or
1335 * (c) if there is a "noexp" flagged request anywhere in the queue
1336 *
1337 * Returns: 1 if freezing should occur, 0 otherwise
1338 */
1339
1340static int gfs2_should_freeze(const struct gfs2_glock *gl)
1341{
1342 const struct gfs2_holder *gh;
1343
1344 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1345 return 0;
1346 if (gl->gl_target == LM_ST_UNLOCKED)
1347 return 0;
1348
1349 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1350 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1351 continue;
1352 if (LM_FLAG_NOEXP & gh->gh_flags)
1353 return 0;
1354 }
1355
1356 return 1;
1357}
1358
b3b94faa 1359/**
f057f6cd
SW
1360 * gfs2_glock_complete - Callback used by locking
1361 * @gl: Pointer to the glock
1362 * @ret: The return value from the dlm
b3b94faa 1363 *
b3b94faa
DT
1364 */
1365
f057f6cd 1366void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
b3b94faa 1367{
f057f6cd 1368 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
0809f6ec 1369
f057f6cd 1370 gl->gl_reply = ret;
0809f6ec 1371
f057f6cd 1372 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
f057f6cd 1373 spin_lock(&gl->gl_spin);
0809f6ec 1374 if (gfs2_should_freeze(gl)) {
f057f6cd 1375 set_bit(GLF_FROZEN, &gl->gl_flags);
0809f6ec 1376 spin_unlock(&gl->gl_spin);
b3b94faa 1377 return;
0809f6ec
SW
1378 }
1379 spin_unlock(&gl->gl_spin);
b3b94faa 1380 }
f057f6cd
SW
1381 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1382 gfs2_glock_hold(gl);
1383 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1384 gfs2_glock_put(gl);
b3b94faa
DT
1385}
1386
b3b94faa 1387
7f8275d0 1388static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
b3b94faa
DT
1389{
1390 struct gfs2_glock *gl;
97cc1025
SW
1391 int may_demote;
1392 int nr_skipped = 0;
97cc1025 1393 LIST_HEAD(skipped);
b3b94faa 1394
97cc1025
SW
1395 if (nr == 0)
1396 goto out;
b3b94faa 1397
97cc1025
SW
1398 if (!(gfp_mask & __GFP_FS))
1399 return -1;
b3b94faa 1400
97cc1025
SW
1401 spin_lock(&lru_lock);
1402 while(nr && !list_empty(&lru_list)) {
1403 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1404 list_del_init(&gl->gl_lru);
1405 atomic_dec(&lru_count);
1406
1407 /* Test for being demotable */
1408 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1409 gfs2_glock_hold(gl);
97cc1025
SW
1410 spin_unlock(&lru_lock);
1411 spin_lock(&gl->gl_spin);
1412 may_demote = demote_ok(gl);
97cc1025
SW
1413 if (may_demote) {
1414 handle_callback(gl, LM_ST_UNLOCKED, 0);
1415 nr--;
97cc1025 1416 }
7e71c55e
SW
1417 clear_bit(GLF_LOCK, &gl->gl_flags);
1418 smp_mb__after_clear_bit();
2163b1e6 1419 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
b94a170e
BM
1420 gfs2_glock_put_nolock(gl);
1421 spin_unlock(&gl->gl_spin);
97cc1025 1422 spin_lock(&lru_lock);
2163b1e6 1423 continue;
97cc1025 1424 }
2163b1e6
SW
1425 nr_skipped++;
1426 list_add(&gl->gl_lru, &skipped);
b3b94faa 1427 }
97cc1025
SW
1428 list_splice(&skipped, &lru_list);
1429 atomic_add(nr_skipped, &lru_count);
1430 spin_unlock(&lru_lock);
1431out:
1432 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
b3b94faa
DT
1433}
1434
97cc1025
SW
1435static struct shrinker glock_shrinker = {
1436 .shrink = gfs2_shrink_glock_memory,
1437 .seeks = DEFAULT_SEEKS,
1438};
1439
b3b94faa
DT
1440/**
1441 * examine_bucket - Call a function for glock in a hash bucket
1442 * @examiner: the function
1443 * @sdp: the filesystem
1444 * @bucket: the bucket
1445 *
1446 * Returns: 1 if the bucket has entries
1447 */
1448
1449static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
37b2fa6a 1450 unsigned int hash)
b3b94faa 1451{
24264434
SW
1452 struct gfs2_glock *gl, *prev = NULL;
1453 int has_entries = 0;
b6397893 1454 struct hlist_head *head = &gl_hash_table[hash].hb_list;
b3b94faa 1455
24264434 1456 read_lock(gl_lock_addr(hash));
b6397893
SW
1457 /* Can't use hlist_for_each_entry - don't want prefetch here */
1458 if (hlist_empty(head))
24264434 1459 goto out;
b6397893
SW
1460 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1461 while(1) {
8fbbfd21 1462 if (!sdp || gl->gl_sbd == sdp) {
b3b94faa 1463 gfs2_glock_hold(gl);
24264434
SW
1464 read_unlock(gl_lock_addr(hash));
1465 if (prev)
1466 gfs2_glock_put(prev);
1467 prev = gl;
1468 examiner(gl);
a8336344 1469 has_entries = 1;
24264434 1470 read_lock(gl_lock_addr(hash));
b3b94faa 1471 }
b6397893
SW
1472 if (gl->gl_list.next == NULL)
1473 break;
24264434 1474 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
b3b94faa 1475 }
24264434
SW
1476out:
1477 read_unlock(gl_lock_addr(hash));
1478 if (prev)
1479 gfs2_glock_put(prev);
8fbbfd21 1480 cond_resched();
24264434 1481 return has_entries;
b3b94faa
DT
1482}
1483
f057f6cd
SW
1484
1485/**
1486 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1487 * @gl: The glock to thaw
1488 *
1489 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1490 * so this has to result in the ref count being dropped by one.
1491 */
1492
1493static void thaw_glock(struct gfs2_glock *gl)
1494{
1495 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1496 return;
f057f6cd
SW
1497 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1498 gfs2_glock_hold(gl);
1499 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1500 gfs2_glock_put(gl);
f057f6cd
SW
1501}
1502
b3b94faa
DT
1503/**
1504 * clear_glock - look at a glock and see if we can free it from glock cache
1505 * @gl: the glock to look at
1506 *
1507 */
1508
1509static void clear_glock(struct gfs2_glock *gl)
1510{
97cc1025
SW
1511 spin_lock(&lru_lock);
1512 if (!list_empty(&gl->gl_lru)) {
1513 list_del_init(&gl->gl_lru);
1514 atomic_dec(&lru_count);
b3b94faa 1515 }
97cc1025 1516 spin_unlock(&lru_lock);
b3b94faa 1517
6802e340 1518 spin_lock(&gl->gl_spin);
c741c455 1519 if (gl->gl_state != LM_ST_UNLOCKED)
97cc1025 1520 handle_callback(gl, LM_ST_UNLOCKED, 0);
6802e340
SW
1521 spin_unlock(&gl->gl_spin);
1522 gfs2_glock_hold(gl);
1523 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1524 gfs2_glock_put(gl);
b3b94faa
DT
1525}
1526
f057f6cd
SW
1527/**
1528 * gfs2_glock_thaw - Thaw any frozen glocks
1529 * @sdp: The super block
1530 *
1531 */
1532
1533void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1534{
1535 unsigned x;
1536
1537 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1538 examine_bucket(thaw_glock, sdp, x);
1539}
1540
b3b94faa
DT
1541/**
1542 * gfs2_gl_hash_clear - Empty out the glock hash table
1543 * @sdp: the filesystem
1544 * @wait: wait until it's all gone
1545 *
1bdad606 1546 * Called when unmounting the filesystem.
b3b94faa
DT
1547 */
1548
fefc03bf 1549void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
b3b94faa 1550{
b3b94faa 1551 unsigned int x;
b3b94faa 1552
c1184f8a
SW
1553 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1554 examine_bucket(clear_glock, sdp, x);
8f05228e
SW
1555 flush_workqueue(glock_workqueue);
1556 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1557 gfs2_dump_lockstate(sdp);
b3b94faa
DT
1558}
1559
813e0c46
SW
1560void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1561{
1562 struct gfs2_glock *gl = ip->i_gl;
1563 int ret;
1564
1565 ret = gfs2_truncatei_resume(ip);
1566 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1567
1568 spin_lock(&gl->gl_spin);
1569 clear_bit(GLF_LOCK, &gl->gl_flags);
1570 run_queue(gl, 1);
1571 spin_unlock(&gl->gl_spin);
1572}
1573
6802e340 1574static const char *state2str(unsigned state)
04b933f2 1575{
6802e340
SW
1576 switch(state) {
1577 case LM_ST_UNLOCKED:
1578 return "UN";
1579 case LM_ST_SHARED:
1580 return "SH";
1581 case LM_ST_DEFERRED:
1582 return "DF";
1583 case LM_ST_EXCLUSIVE:
1584 return "EX";
1585 }
1586 return "??";
1587}
1588
1589static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1590{
1591 char *p = buf;
1592 if (flags & LM_FLAG_TRY)
1593 *p++ = 't';
1594 if (flags & LM_FLAG_TRY_1CB)
1595 *p++ = 'T';
1596 if (flags & LM_FLAG_NOEXP)
1597 *p++ = 'e';
1598 if (flags & LM_FLAG_ANY)
f057f6cd 1599 *p++ = 'A';
6802e340
SW
1600 if (flags & LM_FLAG_PRIORITY)
1601 *p++ = 'p';
1602 if (flags & GL_ASYNC)
1603 *p++ = 'a';
1604 if (flags & GL_EXACT)
1605 *p++ = 'E';
6802e340
SW
1606 if (flags & GL_NOCACHE)
1607 *p++ = 'c';
1608 if (test_bit(HIF_HOLDER, &iflags))
1609 *p++ = 'H';
1610 if (test_bit(HIF_WAIT, &iflags))
1611 *p++ = 'W';
1612 if (test_bit(HIF_FIRST, &iflags))
1613 *p++ = 'F';
1614 *p = 0;
1615 return buf;
04b933f2
RP
1616}
1617
b3b94faa
DT
1618/**
1619 * dump_holder - print information about a glock holder
6802e340 1620 * @seq: the seq_file struct
b3b94faa
DT
1621 * @gh: the glock holder
1622 *
1623 * Returns: 0 on success, -ENOBUFS when we run out of space
1624 */
1625
6802e340 1626static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
b3b94faa 1627{
6802e340 1628 struct task_struct *gh_owner = NULL;
6802e340 1629 char flags_buf[32];
b3b94faa 1630
6802e340 1631 if (gh->gh_owner_pid)
b1e058da 1632 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
cc18152e
JP
1633 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1634 state2str(gh->gh_state),
1635 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1636 gh->gh_error,
1637 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1638 gh_owner ? gh_owner->comm : "(ended)",
1639 (void *)gh->gh_ip);
7c52b166 1640 return 0;
b3b94faa
DT
1641}
1642
6802e340
SW
1643static const char *gflags2str(char *buf, const unsigned long *gflags)
1644{
1645 char *p = buf;
1646 if (test_bit(GLF_LOCK, gflags))
1647 *p++ = 'l';
6802e340
SW
1648 if (test_bit(GLF_DEMOTE, gflags))
1649 *p++ = 'D';
1650 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1651 *p++ = 'd';
1652 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1653 *p++ = 'p';
1654 if (test_bit(GLF_DIRTY, gflags))
1655 *p++ = 'y';
1656 if (test_bit(GLF_LFLUSH, gflags))
1657 *p++ = 'f';
1658 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1659 *p++ = 'i';
1660 if (test_bit(GLF_REPLY_PENDING, gflags))
1661 *p++ = 'r';
f057f6cd 1662 if (test_bit(GLF_INITIAL, gflags))
d8348de0 1663 *p++ = 'I';
f057f6cd
SW
1664 if (test_bit(GLF_FROZEN, gflags))
1665 *p++ = 'F';
7b5e3d5f
SW
1666 if (test_bit(GLF_QUEUED, gflags))
1667 *p++ = 'q';
6802e340
SW
1668 *p = 0;
1669 return buf;
b3b94faa
DT
1670}
1671
1672/**
6802e340
SW
1673 * __dump_glock - print information about a glock
1674 * @seq: The seq_file struct
b3b94faa 1675 * @gl: the glock
6802e340
SW
1676 *
1677 * The file format is as follows:
1678 * One line per object, capital letters are used to indicate objects
1679 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1680 * other objects are indented by a single space and follow the glock to
1681 * which they are related. Fields are indicated by lower case letters
1682 * followed by a colon and the field value, except for strings which are in
1683 * [] so that its possible to see if they are composed of spaces for
1684 * example. The field's are n = number (id of the object), f = flags,
1685 * t = type, s = state, r = refcount, e = error, p = pid.
b3b94faa
DT
1686 *
1687 * Returns: 0 on success, -ENOBUFS when we run out of space
1688 */
1689
6802e340 1690static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
b3b94faa 1691{
6802e340
SW
1692 const struct gfs2_glock_operations *glops = gl->gl_ops;
1693 unsigned long long dtime;
1694 const struct gfs2_holder *gh;
1695 char gflags_buf[32];
1696 int error = 0;
b3b94faa 1697
6802e340
SW
1698 dtime = jiffies - gl->gl_demote_time;
1699 dtime *= 1000000/HZ; /* demote time in uSec */
1700 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1701 dtime = 0;
4818972e 1702 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
6802e340
SW
1703 state2str(gl->gl_state),
1704 gl->gl_name.ln_type,
1705 (unsigned long long)gl->gl_name.ln_number,
1706 gflags2str(gflags_buf, &gl->gl_flags),
1707 state2str(gl->gl_target),
1708 state2str(gl->gl_demote_state), dtime,
6802e340
SW
1709 atomic_read(&gl->gl_ail_count),
1710 atomic_read(&gl->gl_ref));
b3b94faa 1711
b3b94faa 1712 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
6802e340 1713 error = dump_holder(seq, gh);
b3b94faa
DT
1714 if (error)
1715 goto out;
1716 }
6802e340
SW
1717 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1718 error = glops->go_dump(seq, gl);
a91ea69f 1719out:
b3b94faa
DT
1720 return error;
1721}
1722
6802e340
SW
1723static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1724{
1725 int ret;
1726 spin_lock(&gl->gl_spin);
1727 ret = __dump_glock(seq, gl);
1728 spin_unlock(&gl->gl_spin);
1729 return ret;
1730}
1731
b3b94faa
DT
1732/**
1733 * gfs2_dump_lockstate - print out the current lockstate
1734 * @sdp: the filesystem
1735 * @ub: the buffer to copy the information into
1736 *
1737 * If @ub is NULL, dump the lockstate to the console.
1738 *
1739 */
1740
08bc2dbc 1741static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
b3b94faa 1742{
b3b94faa 1743 struct gfs2_glock *gl;
b6397893 1744 struct hlist_node *h;
b3b94faa
DT
1745 unsigned int x;
1746 int error = 0;
1747
1748 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
b3b94faa 1749
087efdd3 1750 read_lock(gl_lock_addr(x));
b3b94faa 1751
b6397893 1752 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
85d1da67
SW
1753 if (gl->gl_sbd != sdp)
1754 continue;
b3b94faa 1755
7c52b166 1756 error = dump_glock(NULL, gl);
b3b94faa
DT
1757 if (error)
1758 break;
1759 }
1760
087efdd3 1761 read_unlock(gl_lock_addr(x));
b3b94faa
DT
1762
1763 if (error)
1764 break;
1765 }
1766
1767
1768 return error;
1769}
1770
8fbbfd21 1771
85d1da67
SW
1772int __init gfs2_glock_init(void)
1773{
1774 unsigned i;
1775 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
b6397893 1776 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
85d1da67 1777 }
087efdd3
SW
1778#ifdef GL_HASH_LOCK_SZ
1779 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1780 rwlock_init(&gl_hash_locks[i]);
1781 }
1782#endif
8fbbfd21 1783
d2115778 1784 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
9fa0ea9f 1785 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
97cc1025 1786 if (IS_ERR(glock_workqueue))
c4f68a13 1787 return PTR_ERR(glock_workqueue);
d2115778
SW
1788 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1789 WQ_MEM_RECLAIM | WQ_FREEZEABLE,
1790 0);
b94a170e
BM
1791 if (IS_ERR(gfs2_delete_workqueue)) {
1792 destroy_workqueue(glock_workqueue);
1793 return PTR_ERR(gfs2_delete_workqueue);
1794 }
97cc1025
SW
1795
1796 register_shrinker(&glock_shrinker);
c4f68a13 1797
85d1da67
SW
1798 return 0;
1799}
1800
8fbbfd21
SW
1801void gfs2_glock_exit(void)
1802{
97cc1025 1803 unregister_shrinker(&glock_shrinker);
c4f68a13 1804 destroy_workqueue(glock_workqueue);
b94a170e 1805 destroy_workqueue(gfs2_delete_workqueue);
8fbbfd21
SW
1806}
1807
6802e340 1808static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
7c52b166 1809{
7b08fc62
SW
1810 struct gfs2_glock *gl;
1811
a947e033 1812restart:
7a0079d9 1813 read_lock(gl_lock_addr(gi->hash));
7b08fc62
SW
1814 gl = gi->gl;
1815 if (gl) {
a947e033
AD
1816 gi->gl = hlist_entry(gl->gl_list.next,
1817 struct gfs2_glock, gl_list);
c1e817d0
SW
1818 } else {
1819 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1820 struct gfs2_glock, gl_list);
7c52b166 1821 }
c1e817d0
SW
1822 if (gi->gl)
1823 gfs2_glock_hold(gi->gl);
7a0079d9 1824 read_unlock(gl_lock_addr(gi->hash));
7b08fc62
SW
1825 if (gl)
1826 gfs2_glock_put(gl);
6802e340 1827 while (gi->gl == NULL) {
c1e817d0 1828 gi->hash++;
7b08fc62
SW
1829 if (gi->hash >= GFS2_GL_HASH_SIZE)
1830 return 1;
1831 read_lock(gl_lock_addr(gi->hash));
1832 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1833 struct gfs2_glock, gl_list);
1834 if (gi->gl)
1835 gfs2_glock_hold(gi->gl);
1836 read_unlock(gl_lock_addr(gi->hash));
1837 }
a947e033
AD
1838
1839 if (gi->sdp != gi->gl->gl_sbd)
1840 goto restart;
1841
7c52b166
RP
1842 return 0;
1843}
1844
6802e340 1845static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
7c52b166 1846{
7b08fc62
SW
1847 if (gi->gl)
1848 gfs2_glock_put(gi->gl);
a947e033 1849 gi->gl = NULL;
7c52b166
RP
1850}
1851
6802e340 1852static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
7c52b166 1853{
6802e340 1854 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1855 loff_t n = *pos;
1856
6802e340 1857 gi->hash = 0;
7c52b166 1858
6802e340 1859 do {
7c52b166
RP
1860 if (gfs2_glock_iter_next(gi)) {
1861 gfs2_glock_iter_free(gi);
1862 return NULL;
1863 }
6802e340 1864 } while (n--);
7c52b166 1865
6802e340 1866 return gi->gl;
7c52b166
RP
1867}
1868
6802e340 1869static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
7c52b166
RP
1870 loff_t *pos)
1871{
6802e340 1872 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1873
1874 (*pos)++;
1875
1876 if (gfs2_glock_iter_next(gi)) {
1877 gfs2_glock_iter_free(gi);
1878 return NULL;
1879 }
1880
6802e340 1881 return gi->gl;
7c52b166
RP
1882}
1883
6802e340 1884static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
7c52b166 1885{
6802e340
SW
1886 struct gfs2_glock_iter *gi = seq->private;
1887 gfs2_glock_iter_free(gi);
7c52b166
RP
1888}
1889
6802e340 1890static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
7c52b166 1891{
6802e340 1892 return dump_glock(seq, iter_ptr);
7c52b166
RP
1893}
1894
4ef29002 1895static const struct seq_operations gfs2_glock_seq_ops = {
7c52b166
RP
1896 .start = gfs2_glock_seq_start,
1897 .next = gfs2_glock_seq_next,
1898 .stop = gfs2_glock_seq_stop,
1899 .show = gfs2_glock_seq_show,
1900};
1901
1902static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1903{
6802e340
SW
1904 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1905 sizeof(struct gfs2_glock_iter));
1906 if (ret == 0) {
1907 struct seq_file *seq = file->private_data;
1908 struct gfs2_glock_iter *gi = seq->private;
1909 gi->sdp = inode->i_private;
1910 }
1911 return ret;
7c52b166
RP
1912}
1913
1914static const struct file_operations gfs2_debug_fops = {
1915 .owner = THIS_MODULE,
1916 .open = gfs2_debugfs_open,
1917 .read = seq_read,
1918 .llseek = seq_lseek,
6802e340 1919 .release = seq_release_private,
7c52b166
RP
1920};
1921
1922int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1923{
5f882096
RP
1924 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1925 if (!sdp->debugfs_dir)
1926 return -ENOMEM;
1927 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1928 S_IFREG | S_IRUGO,
1929 sdp->debugfs_dir, sdp,
1930 &gfs2_debug_fops);
1931 if (!sdp->debugfs_dentry_glocks)
7c52b166
RP
1932 return -ENOMEM;
1933
1934 return 0;
1935}
1936
1937void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1938{
5f882096
RP
1939 if (sdp && sdp->debugfs_dir) {
1940 if (sdp->debugfs_dentry_glocks) {
1941 debugfs_remove(sdp->debugfs_dentry_glocks);
1942 sdp->debugfs_dentry_glocks = NULL;
1943 }
1944 debugfs_remove(sdp->debugfs_dir);
1945 sdp->debugfs_dir = NULL;
1946 }
7c52b166
RP
1947}
1948
1949int gfs2_register_debugfs(void)
1950{
1951 gfs2_root = debugfs_create_dir("gfs2", NULL);
1952 return gfs2_root ? 0 : -ENOMEM;
1953}
1954
1955void gfs2_unregister_debugfs(void)
1956{
1957 debugfs_remove(gfs2_root);
5f882096 1958 gfs2_root = NULL;
7c52b166 1959}