]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/gfs2/glock.c
[GFS2] panic after can't parse mount arguments
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
d0dc80db 18#include <linux/kallsyms.h>
5c676f6d 19#include <linux/gfs2_ondisk.h>
24264434 20#include <linux/list.h>
7d308590 21#include <linux/lm_interface.h>
fee852e3 22#include <linux/wait.h>
95d97b7d 23#include <linux/module.h>
61be084e 24#include <linux/rwsem.h>
b3b94faa 25#include <asm/uaccess.h>
7c52b166
RP
26#include <linux/seq_file.h>
27#include <linux/debugfs.h>
8fbbfd21
SW
28#include <linux/kthread.h>
29#include <linux/freezer.h>
b3b94faa
DT
30
31#include "gfs2.h"
5c676f6d 32#include "incore.h"
b3b94faa
DT
33#include "glock.h"
34#include "glops.h"
35#include "inode.h"
36#include "lm.h"
37#include "lops.h"
38#include "meta_io.h"
39#include "quota.h"
40#include "super.h"
5c676f6d 41#include "util.h"
b3b94faa 42
37b2fa6a 43struct gfs2_gl_hash_bucket {
b6397893 44 struct hlist_head hb_list;
37b2fa6a
SW
45};
46
7c52b166
RP
47struct glock_iter {
48 int hash; /* hash bucket index */
49 struct gfs2_sbd *sdp; /* incore superblock */
50 struct gfs2_glock *gl; /* current glock struct */
7c52b166
RP
51 struct seq_file *seq; /* sequence file for debugfs */
52 char string[512]; /* scratch space */
53};
54
b3b94faa
DT
55typedef void (*glock_examiner) (struct gfs2_glock * gl);
56
08bc2dbc 57static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
04b933f2 58static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
3b8249f6 59static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
b5d32bea 60static void gfs2_glock_drop_th(struct gfs2_glock *gl);
61be084e 61static DECLARE_RWSEM(gfs2_umount_flush_sem);
7c52b166 62static struct dentry *gfs2_root;
8fbbfd21
SW
63static struct task_struct *scand_process;
64static unsigned int scand_secs = 5;
08bc2dbc 65
b6397893 66#define GFS2_GL_HASH_SHIFT 15
087efdd3
SW
67#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
68#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
69
85d1da67 70static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
04b933f2 71static struct dentry *gfs2_root;
087efdd3
SW
72
73/*
74 * Despite what you might think, the numbers below are not arbitrary :-)
75 * They are taken from the ipv4 routing hash code, which is well tested
76 * and thus should be nearly optimal. Later on we might tweek the numbers
77 * but for now this should be fine.
78 *
79 * The reason for putting the locks in a separate array from the list heads
80 * is that we can have fewer locks than list heads and save memory. We use
81 * the same hash function for both, but with a different hash mask.
82 */
83#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
84 defined(CONFIG_PROVE_LOCKING)
85
86#ifdef CONFIG_LOCKDEP
87# define GL_HASH_LOCK_SZ 256
88#else
89# if NR_CPUS >= 32
90# define GL_HASH_LOCK_SZ 4096
91# elif NR_CPUS >= 16
92# define GL_HASH_LOCK_SZ 2048
93# elif NR_CPUS >= 8
94# define GL_HASH_LOCK_SZ 1024
95# elif NR_CPUS >= 4
96# define GL_HASH_LOCK_SZ 512
97# else
98# define GL_HASH_LOCK_SZ 256
99# endif
100#endif
101
102/* We never want more locks than chains */
103#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
104# undef GL_HASH_LOCK_SZ
105# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
106#endif
107
108static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
109
110static inline rwlock_t *gl_lock_addr(unsigned int x)
111{
94610610 112 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
087efdd3
SW
113}
114#else /* not SMP, so no spinlocks required */
0ac23069 115static inline rwlock_t *gl_lock_addr(unsigned int x)
087efdd3
SW
116{
117 return NULL;
118}
119#endif
85d1da67 120
b3b94faa
DT
121/**
122 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
123 * @actual: the current state of the lock
124 * @requested: the lock state that was requested by the caller
125 * @flags: the modifier flags passed in by the caller
126 *
127 * Returns: 1 if the locks are compatible, 0 otherwise
128 */
129
130static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
131 int flags)
132{
133 if (actual == requested)
134 return 1;
135
136 if (flags & GL_EXACT)
137 return 0;
138
139 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
140 return 1;
141
142 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
143 return 1;
144
145 return 0;
146}
147
148/**
149 * gl_hash() - Turn glock number into hash bucket number
150 * @lock: The glock number
151 *
152 * Returns: The number of the corresponding hash bucket
153 */
154
b8547856
SW
155static unsigned int gl_hash(const struct gfs2_sbd *sdp,
156 const struct lm_lockname *name)
b3b94faa
DT
157{
158 unsigned int h;
159
cd915493 160 h = jhash(&name->ln_number, sizeof(u64), 0);
b3b94faa 161 h = jhash(&name->ln_type, sizeof(unsigned int), h);
b8547856 162 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
b3b94faa
DT
163 h &= GFS2_GL_HASH_MASK;
164
165 return h;
166}
167
168/**
169 * glock_free() - Perform a few checks and then release struct gfs2_glock
170 * @gl: The glock to release
171 *
172 * Also calls lock module to release its internal structure for this glock.
173 *
174 */
175
176static void glock_free(struct gfs2_glock *gl)
177{
178 struct gfs2_sbd *sdp = gl->gl_sbd;
179 struct inode *aspace = gl->gl_aspace;
180
181 gfs2_lm_put_lock(sdp, gl->gl_lock);
182
183 if (aspace)
184 gfs2_aspace_put(aspace);
185
186 kmem_cache_free(gfs2_glock_cachep, gl);
b3b94faa
DT
187}
188
189/**
190 * gfs2_glock_hold() - increment reference count on glock
191 * @gl: The glock to hold
192 *
193 */
194
195void gfs2_glock_hold(struct gfs2_glock *gl)
196{
16feb9fe 197 atomic_inc(&gl->gl_ref);
b3b94faa
DT
198}
199
200/**
201 * gfs2_glock_put() - Decrement reference count on glock
202 * @gl: The glock to put
203 *
204 */
205
206int gfs2_glock_put(struct gfs2_glock *gl)
207{
b3b94faa 208 int rv = 0;
16feb9fe 209 struct gfs2_sbd *sdp = gl->gl_sbd;
b3b94faa 210
087efdd3 211 write_lock(gl_lock_addr(gl->gl_hash));
16feb9fe 212 if (atomic_dec_and_test(&gl->gl_ref)) {
b6397893 213 hlist_del(&gl->gl_list);
087efdd3 214 write_unlock(gl_lock_addr(gl->gl_hash));
190562bd 215 BUG_ON(spin_is_locked(&gl->gl_spin));
16feb9fe
SW
216 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
217 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
218 gfs2_assert(sdp, list_empty(&gl->gl_holders));
219 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
16feb9fe 220 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
b3b94faa
DT
221 glock_free(gl);
222 rv = 1;
223 goto out;
224 }
087efdd3 225 write_unlock(gl_lock_addr(gl->gl_hash));
a2242db0 226out:
b3b94faa
DT
227 return rv;
228}
229
b3b94faa
DT
230/**
231 * search_bucket() - Find struct gfs2_glock by lock number
232 * @bucket: the bucket to search
233 * @name: The lock name
234 *
235 * Returns: NULL, or the struct gfs2_glock with the requested number
236 */
237
37b2fa6a 238static struct gfs2_glock *search_bucket(unsigned int hash,
899be4d3 239 const struct gfs2_sbd *sdp,
d6a53727 240 const struct lm_lockname *name)
b3b94faa
DT
241{
242 struct gfs2_glock *gl;
b6397893 243 struct hlist_node *h;
b3b94faa 244
b6397893 245 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
b3b94faa
DT
246 if (!lm_name_equal(&gl->gl_name, name))
247 continue;
899be4d3
SW
248 if (gl->gl_sbd != sdp)
249 continue;
b3b94faa 250
16feb9fe 251 atomic_inc(&gl->gl_ref);
b3b94faa
DT
252
253 return gl;
254 }
255
256 return NULL;
257}
258
259/**
260 * gfs2_glock_find() - Find glock by lock number
261 * @sdp: The GFS2 superblock
262 * @name: The lock name
263 *
264 * Returns: NULL, or the struct gfs2_glock with the requested number
265 */
266
85d1da67 267static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
d6a53727 268 const struct lm_lockname *name)
b3b94faa 269{
37b2fa6a 270 unsigned int hash = gl_hash(sdp, name);
b3b94faa
DT
271 struct gfs2_glock *gl;
272
087efdd3 273 read_lock(gl_lock_addr(hash));
37b2fa6a 274 gl = search_bucket(hash, sdp, name);
087efdd3 275 read_unlock(gl_lock_addr(hash));
b3b94faa
DT
276
277 return gl;
278}
279
280/**
281 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
282 * @sdp: The GFS2 superblock
283 * @number: the lock number
284 * @glops: The glock_operations to use
285 * @create: If 0, don't create the glock if it doesn't exist
286 * @glp: the glock is returned here
287 *
288 * This does not lock a glock, just finds/creates structures for one.
289 *
290 * Returns: errno
291 */
292
cd915493 293int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
8fb4b536 294 const struct gfs2_glock_operations *glops, int create,
b3b94faa
DT
295 struct gfs2_glock **glp)
296{
37b2fa6a 297 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
b3b94faa 298 struct gfs2_glock *gl, *tmp;
37b2fa6a 299 unsigned int hash = gl_hash(sdp, &name);
b3b94faa
DT
300 int error;
301
087efdd3 302 read_lock(gl_lock_addr(hash));
37b2fa6a 303 gl = search_bucket(hash, sdp, &name);
087efdd3 304 read_unlock(gl_lock_addr(hash));
b3b94faa
DT
305
306 if (gl || !create) {
307 *glp = gl;
308 return 0;
309 }
310
311 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
312 if (!gl)
313 return -ENOMEM;
314
ec45d9f5 315 gl->gl_flags = 0;
b3b94faa 316 gl->gl_name = name;
16feb9fe 317 atomic_set(&gl->gl_ref, 1);
b3b94faa 318 gl->gl_state = LM_ST_UNLOCKED;
37b2fa6a 319 gl->gl_hash = hash;
04b933f2 320 gl->gl_owner_pid = 0;
320dd101 321 gl->gl_ip = 0;
b3b94faa 322 gl->gl_ops = glops;
ec45d9f5
SW
323 gl->gl_req_gh = NULL;
324 gl->gl_req_bh = NULL;
325 gl->gl_vn = 0;
326 gl->gl_stamp = jiffies;
327 gl->gl_object = NULL;
b3b94faa 328 gl->gl_sbd = sdp;
ec45d9f5 329 gl->gl_aspace = NULL;
b3b94faa 330 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
b3b94faa
DT
331
332 /* If this glock protects actual on-disk data or metadata blocks,
333 create a VFS inode to manage the pages/buffers holding them. */
50299965 334 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
b3b94faa
DT
335 gl->gl_aspace = gfs2_aspace_get(sdp);
336 if (!gl->gl_aspace) {
337 error = -ENOMEM;
338 goto fail;
339 }
340 }
341
342 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
343 if (error)
344 goto fail_aspace;
345
087efdd3 346 write_lock(gl_lock_addr(hash));
37b2fa6a 347 tmp = search_bucket(hash, sdp, &name);
b3b94faa 348 if (tmp) {
087efdd3 349 write_unlock(gl_lock_addr(hash));
b3b94faa
DT
350 glock_free(gl);
351 gl = tmp;
352 } else {
b6397893 353 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
087efdd3 354 write_unlock(gl_lock_addr(hash));
b3b94faa
DT
355 }
356
357 *glp = gl;
358
359 return 0;
360
ec45d9f5 361fail_aspace:
b3b94faa
DT
362 if (gl->gl_aspace)
363 gfs2_aspace_put(gl->gl_aspace);
ec45d9f5 364fail:
907b9bce 365 kmem_cache_free(gfs2_glock_cachep, gl);
b3b94faa
DT
366 return error;
367}
368
369/**
370 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
371 * @gl: the glock
372 * @state: the state we're requesting
373 * @flags: the modifier flags
374 * @gh: the holder structure
375 *
376 */
377
190562bd 378void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
b3b94faa
DT
379 struct gfs2_holder *gh)
380{
381 INIT_LIST_HEAD(&gh->gh_list);
382 gh->gh_gl = gl;
d0dc80db 383 gh->gh_ip = (unsigned long)__builtin_return_address(0);
04b933f2 384 gh->gh_owner_pid = current->pid;
b3b94faa
DT
385 gh->gh_state = state;
386 gh->gh_flags = flags;
387 gh->gh_error = 0;
388 gh->gh_iflags = 0;
b3b94faa
DT
389 gfs2_glock_hold(gl);
390}
391
392/**
393 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
394 * @state: the state we're requesting
395 * @flags: the modifier flags
396 * @gh: the holder structure
397 *
398 * Don't mess with the glock.
399 *
400 */
401
190562bd 402void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
b3b94faa
DT
403{
404 gh->gh_state = state;
579b78a4 405 gh->gh_flags = flags;
3b8249f6 406 gh->gh_iflags = 0;
d0dc80db 407 gh->gh_ip = (unsigned long)__builtin_return_address(0);
b3b94faa
DT
408}
409
410/**
411 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
412 * @gh: the holder structure
413 *
414 */
415
416void gfs2_holder_uninit(struct gfs2_holder *gh)
417{
418 gfs2_glock_put(gh->gh_gl);
419 gh->gh_gl = NULL;
d0dc80db 420 gh->gh_ip = 0;
b3b94faa
DT
421}
422
3b8249f6 423static void gfs2_holder_wake(struct gfs2_holder *gh)
fee852e3 424{
fee852e3 425 clear_bit(HIF_WAIT, &gh->gh_iflags);
d93cfa98 426 smp_mb__after_clear_bit();
fee852e3
SW
427 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
428}
429
d93cfa98 430static int just_schedule(void *word)
fee852e3
SW
431{
432 schedule();
433 return 0;
434}
435
436static void wait_on_holder(struct gfs2_holder *gh)
437{
438 might_sleep();
d93cfa98
AD
439 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
440}
441
442static void gfs2_demote_wake(struct gfs2_glock *gl)
443{
444 clear_bit(GLF_DEMOTE, &gl->gl_flags);
445 smp_mb__after_clear_bit();
446 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
447}
448
449static void wait_on_demote(struct gfs2_glock *gl)
450{
451 might_sleep();
452 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
fee852e3
SW
453}
454
b3b94faa
DT
455/**
456 * rq_mutex - process a mutex request in the queue
457 * @gh: the glock holder
458 *
459 * Returns: 1 if the queue is blocked
460 */
461
462static int rq_mutex(struct gfs2_holder *gh)
463{
464 struct gfs2_glock *gl = gh->gh_gl;
465
466 list_del_init(&gh->gh_list);
467 /* gh->gh_error never examined. */
468 set_bit(GLF_LOCK, &gl->gl_flags);
d043e190 469 clear_bit(HIF_WAIT, &gh->gh_iflags);
fee852e3
SW
470 smp_mb();
471 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
b3b94faa
DT
472
473 return 1;
474}
475
476/**
477 * rq_promote - process a promote request in the queue
478 * @gh: the glock holder
479 *
480 * Acquire a new inter-node lock, or change a lock state to more restrictive.
481 *
482 * Returns: 1 if the queue is blocked
483 */
484
485static int rq_promote(struct gfs2_holder *gh)
486{
487 struct gfs2_glock *gl = gh->gh_gl;
488 struct gfs2_sbd *sdp = gl->gl_sbd;
b3b94faa
DT
489
490 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
491 if (list_empty(&gl->gl_holders)) {
492 gl->gl_req_gh = gh;
493 set_bit(GLF_LOCK, &gl->gl_flags);
494 spin_unlock(&gl->gl_spin);
495
496 if (atomic_read(&sdp->sd_reclaim_count) >
497 gfs2_tune_get(sdp, gt_reclaim_limit) &&
498 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
499 gfs2_reclaim_glock(sdp);
500 gfs2_reclaim_glock(sdp);
501 }
502
3b8249f6 503 gfs2_glock_xmote_th(gh->gh_gl, gh);
b3b94faa
DT
504 spin_lock(&gl->gl_spin);
505 }
506 return 1;
507 }
508
509 if (list_empty(&gl->gl_holders)) {
510 set_bit(HIF_FIRST, &gh->gh_iflags);
511 set_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
512 } else {
513 struct gfs2_holder *next_gh;
1c0f4872 514 if (gh->gh_state == LM_ST_EXCLUSIVE)
b3b94faa
DT
515 return 1;
516 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
517 gh_list);
1c0f4872 518 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
b3b94faa 519 return 1;
b3b94faa
DT
520 }
521
522 list_move_tail(&gh->gh_list, &gl->gl_holders);
523 gh->gh_error = 0;
524 set_bit(HIF_HOLDER, &gh->gh_iflags);
525
3b8249f6 526 gfs2_holder_wake(gh);
b3b94faa
DT
527
528 return 0;
529}
530
531/**
532 * rq_demote - process a demote request in the queue
533 * @gh: the glock holder
534 *
535 * Returns: 1 if the queue is blocked
536 */
537
3b8249f6 538static int rq_demote(struct gfs2_glock *gl)
b3b94faa 539{
b3b94faa
DT
540 if (!list_empty(&gl->gl_holders))
541 return 1;
542
3b8249f6
SW
543 if (gl->gl_state == gl->gl_demote_state ||
544 gl->gl_state == LM_ST_UNLOCKED) {
d93cfa98 545 gfs2_demote_wake(gl);
3b8249f6 546 return 0;
b3b94faa 547 }
3b8249f6 548 set_bit(GLF_LOCK, &gl->gl_flags);
3b8249f6 549 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
87124e58
SW
550 gl->gl_state != LM_ST_EXCLUSIVE) {
551 spin_unlock(&gl->gl_spin);
3b8249f6 552 gfs2_glock_drop_th(gl);
87124e58
SW
553 } else {
554 spin_unlock(&gl->gl_spin);
3b8249f6 555 gfs2_glock_xmote_th(gl, NULL);
87124e58 556 }
3b8249f6 557 spin_lock(&gl->gl_spin);
b3b94faa
DT
558
559 return 0;
560}
561
b3b94faa
DT
562/**
563 * run_queue - process holder structures on a glock
564 * @gl: the glock
565 *
566 */
b3b94faa
DT
567static void run_queue(struct gfs2_glock *gl)
568{
569 struct gfs2_holder *gh;
570 int blocked = 1;
571
572 for (;;) {
573 if (test_bit(GLF_LOCK, &gl->gl_flags))
574 break;
575
576 if (!list_empty(&gl->gl_waiters1)) {
577 gh = list_entry(gl->gl_waiters1.next,
578 struct gfs2_holder, gh_list);
579
580 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
581 blocked = rq_mutex(gh);
582 else
583 gfs2_assert_warn(gl->gl_sbd, 0);
584
3b8249f6
SW
585 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
586 blocked = rq_demote(gl);
b3b94faa
DT
587 } else if (!list_empty(&gl->gl_waiters3)) {
588 gh = list_entry(gl->gl_waiters3.next,
589 struct gfs2_holder, gh_list);
590
591 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
592 blocked = rq_promote(gh);
593 else
594 gfs2_assert_warn(gl->gl_sbd, 0);
595
596 } else
597 break;
598
599 if (blocked)
600 break;
601 }
602}
603
604/**
605 * gfs2_glmutex_lock - acquire a local lock on a glock
606 * @gl: the glock
607 *
608 * Gives caller exclusive access to manipulate a glock structure.
609 */
610
feaa7bba 611static void gfs2_glmutex_lock(struct gfs2_glock *gl)
b3b94faa
DT
612{
613 struct gfs2_holder gh;
614
615 gfs2_holder_init(gl, 0, 0, &gh);
616 set_bit(HIF_MUTEX, &gh.gh_iflags);
fee852e3
SW
617 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
618 BUG();
b3b94faa
DT
619
620 spin_lock(&gl->gl_spin);
85d1da67 621 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
b3b94faa 622 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
85d1da67 623 } else {
04b933f2 624 gl->gl_owner_pid = current->pid;
320dd101 625 gl->gl_ip = (unsigned long)__builtin_return_address(0);
fee852e3
SW
626 clear_bit(HIF_WAIT, &gh.gh_iflags);
627 smp_mb();
628 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
320dd101 629 }
b3b94faa
DT
630 spin_unlock(&gl->gl_spin);
631
fee852e3 632 wait_on_holder(&gh);
b3b94faa
DT
633 gfs2_holder_uninit(&gh);
634}
635
636/**
637 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
638 * @gl: the glock
639 *
640 * Returns: 1 if the glock is acquired
641 */
642
08bc2dbc 643static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
b3b94faa
DT
644{
645 int acquired = 1;
646
647 spin_lock(&gl->gl_spin);
85d1da67 648 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
b3b94faa 649 acquired = 0;
85d1da67 650 } else {
04b933f2 651 gl->gl_owner_pid = current->pid;
320dd101
SW
652 gl->gl_ip = (unsigned long)__builtin_return_address(0);
653 }
b3b94faa
DT
654 spin_unlock(&gl->gl_spin);
655
656 return acquired;
657}
658
659/**
660 * gfs2_glmutex_unlock - release a local lock on a glock
661 * @gl: the glock
662 *
663 */
664
feaa7bba 665static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
b3b94faa
DT
666{
667 spin_lock(&gl->gl_spin);
668 clear_bit(GLF_LOCK, &gl->gl_flags);
04b933f2 669 gl->gl_owner_pid = 0;
320dd101 670 gl->gl_ip = 0;
b3b94faa 671 run_queue(gl);
190562bd 672 BUG_ON(!spin_is_locked(&gl->gl_spin));
b3b94faa
DT
673 spin_unlock(&gl->gl_spin);
674}
675
676/**
3b8249f6 677 * handle_callback - process a demote request
b3b94faa
DT
678 * @gl: the glock
679 * @state: the state the caller wants us to change to
680 *
3b8249f6
SW
681 * There are only two requests that we are going to see in actual
682 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
b3b94faa
DT
683 */
684
d93cfa98 685static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
b3b94faa 686{
b3b94faa 687 spin_lock(&gl->gl_spin);
3b8249f6
SW
688 if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
689 gl->gl_demote_state = state;
690 gl->gl_demote_time = jiffies;
d93cfa98
AD
691 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
692 gl->gl_object) {
693 struct inode *inode = igrab(gl->gl_object);
694 spin_unlock(&gl->gl_spin);
695 if (inode) {
696 d_prune_aliases(inode);
697 iput(inode);
698 }
699 return;
700 }
26caee5b
JW
701 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
702 gl->gl_demote_state != state) {
703 gl->gl_demote_state = LM_ST_UNLOCKED;
b3b94faa 704 }
b3b94faa 705 spin_unlock(&gl->gl_spin);
b3b94faa
DT
706}
707
708/**
709 * state_change - record that the glock is now in a different state
710 * @gl: the glock
711 * @new_state the new state
712 *
713 */
714
715static void state_change(struct gfs2_glock *gl, unsigned int new_state)
716{
b3b94faa
DT
717 int held1, held2;
718
719 held1 = (gl->gl_state != LM_ST_UNLOCKED);
720 held2 = (new_state != LM_ST_UNLOCKED);
721
722 if (held1 != held2) {
6a6b3d01 723 if (held2)
b3b94faa 724 gfs2_glock_hold(gl);
6a6b3d01 725 else
b3b94faa 726 gfs2_glock_put(gl);
b3b94faa
DT
727 }
728
729 gl->gl_state = new_state;
730}
731
732/**
733 * xmote_bh - Called after the lock module is done acquiring a lock
734 * @gl: The glock in question
735 * @ret: the int returned from the lock module
736 *
737 */
738
739static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
740{
741 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 742 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
743 struct gfs2_holder *gh = gl->gl_req_gh;
744 int prev_state = gl->gl_state;
745 int op_done = 1;
746
747 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 748 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
749 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
750
751 state_change(gl, ret & LM_OUT_ST_MASK);
752
753 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
754 if (glops->go_inval)
1a14d3a6 755 glops->go_inval(gl, DIO_METADATA);
b3b94faa
DT
756 } else if (gl->gl_state == LM_ST_DEFERRED) {
757 /* We might not want to do this here.
758 Look at moving to the inode glops. */
759 if (glops->go_inval)
1a14d3a6 760 glops->go_inval(gl, 0);
b3b94faa
DT
761 }
762
763 /* Deal with each possible exit condition */
764
3b8249f6 765 if (!gh) {
b3b94faa 766 gl->gl_stamp = jiffies;
87124e58 767 if (ret & LM_OUT_CANCELED) {
3b8249f6 768 op_done = 0;
87124e58
SW
769 } else {
770 spin_lock(&gl->gl_spin);
771 if (gl->gl_state != gl->gl_demote_state) {
772 gl->gl_req_bh = NULL;
773 spin_unlock(&gl->gl_spin);
774 gfs2_glock_drop_th(gl);
775 gfs2_glock_put(gl);
776 return;
777 }
d93cfa98 778 gfs2_demote_wake(gl);
87124e58
SW
779 spin_unlock(&gl->gl_spin);
780 }
3b8249f6 781 } else {
b3b94faa
DT
782 spin_lock(&gl->gl_spin);
783 list_del_init(&gh->gh_list);
784 gh->gh_error = -EIO;
3b8249f6
SW
785 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
786 goto out;
787 gh->gh_error = GLR_CANCELED;
788 if (ret & LM_OUT_CANCELED)
789 goto out;
790 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
791 list_add_tail(&gh->gh_list, &gl->gl_holders);
b3b94faa 792 gh->gh_error = 0;
3b8249f6
SW
793 set_bit(HIF_HOLDER, &gh->gh_iflags);
794 set_bit(HIF_FIRST, &gh->gh_iflags);
795 op_done = 0;
796 goto out;
b3b94faa 797 }
b3b94faa 798 gh->gh_error = GLR_TRYFAILED;
3b8249f6
SW
799 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
800 goto out;
801 gh->gh_error = -EINVAL;
b3b94faa
DT
802 if (gfs2_assert_withdraw(sdp, 0) == -1)
803 fs_err(sdp, "ret = 0x%.8X\n", ret);
3b8249f6
SW
804out:
805 spin_unlock(&gl->gl_spin);
b3b94faa
DT
806 }
807
808 if (glops->go_xmote_bh)
809 glops->go_xmote_bh(gl);
810
811 if (op_done) {
812 spin_lock(&gl->gl_spin);
813 gl->gl_req_gh = NULL;
814 gl->gl_req_bh = NULL;
815 clear_bit(GLF_LOCK, &gl->gl_flags);
816 run_queue(gl);
817 spin_unlock(&gl->gl_spin);
818 }
819
820 gfs2_glock_put(gl);
821
fee852e3 822 if (gh)
3b8249f6 823 gfs2_holder_wake(gh);
b3b94faa
DT
824}
825
826/**
827 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
828 * @gl: The glock in question
829 * @state: the requested state
830 * @flags: modifier flags to the lock call
831 *
832 */
833
87124e58 834static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
b3b94faa
DT
835{
836 struct gfs2_sbd *sdp = gl->gl_sbd;
3b8249f6
SW
837 int flags = gh ? gh->gh_flags : 0;
838 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
8fb4b536 839 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
840 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
841 LM_FLAG_NOEXP | LM_FLAG_ANY |
842 LM_FLAG_PRIORITY);
843 unsigned int lck_ret;
844
b5d32bea
SW
845 if (glops->go_xmote_th)
846 glops->go_xmote_th(gl);
847
b3b94faa 848 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 849 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
850 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
851 gfs2_assert_warn(sdp, state != gl->gl_state);
852
b3b94faa
DT
853 gfs2_glock_hold(gl);
854 gl->gl_req_bh = xmote_bh;
855
ec45d9f5 856 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
b3b94faa
DT
857
858 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
859 return;
860
861 if (lck_ret & LM_OUT_ASYNC)
862 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
863 else
864 xmote_bh(gl, lck_ret);
865}
866
867/**
868 * drop_bh - Called after a lock module unlock completes
869 * @gl: the glock
870 * @ret: the return status
871 *
872 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
873 * Doesn't drop the reference on the glock the top half took out
874 *
875 */
876
877static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
878{
879 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 880 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
881 struct gfs2_holder *gh = gl->gl_req_gh;
882
b3b94faa 883 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 884 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
885 gfs2_assert_warn(sdp, !ret);
886
887 state_change(gl, LM_ST_UNLOCKED);
d93cfa98 888 gfs2_demote_wake(gl);
b3b94faa
DT
889
890 if (glops->go_inval)
1a14d3a6 891 glops->go_inval(gl, DIO_METADATA);
b3b94faa
DT
892
893 if (gh) {
894 spin_lock(&gl->gl_spin);
895 list_del_init(&gh->gh_list);
896 gh->gh_error = 0;
897 spin_unlock(&gl->gl_spin);
898 }
899
b3b94faa
DT
900 spin_lock(&gl->gl_spin);
901 gl->gl_req_gh = NULL;
902 gl->gl_req_bh = NULL;
903 clear_bit(GLF_LOCK, &gl->gl_flags);
904 run_queue(gl);
905 spin_unlock(&gl->gl_spin);
906
907 gfs2_glock_put(gl);
908
fee852e3 909 if (gh)
3b8249f6 910 gfs2_holder_wake(gh);
b3b94faa
DT
911}
912
913/**
914 * gfs2_glock_drop_th - call into the lock module to unlock a lock
915 * @gl: the glock
916 *
917 */
918
b5d32bea 919static void gfs2_glock_drop_th(struct gfs2_glock *gl)
b3b94faa
DT
920{
921 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 922 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
923 unsigned int ret;
924
b5d32bea
SW
925 if (glops->go_drop_th)
926 glops->go_drop_th(gl);
927
b3b94faa 928 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 929 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
930 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
931
b3b94faa
DT
932 gfs2_glock_hold(gl);
933 gl->gl_req_bh = drop_bh;
934
b3b94faa
DT
935 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
936
937 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
938 return;
939
940 if (!ret)
941 drop_bh(gl, ret);
942 else
943 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
944}
945
946/**
947 * do_cancels - cancel requests for locks stuck waiting on an expire flag
948 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
949 *
950 * Don't cancel GL_NOCANCEL requests.
951 */
952
953static void do_cancels(struct gfs2_holder *gh)
954{
955 struct gfs2_glock *gl = gh->gh_gl;
956
957 spin_lock(&gl->gl_spin);
958
959 while (gl->gl_req_gh != gh &&
960 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
961 !list_empty(&gh->gh_list)) {
50299965
SW
962 if (gl->gl_req_bh && !(gl->gl_req_gh &&
963 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
b3b94faa
DT
964 spin_unlock(&gl->gl_spin);
965 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
966 msleep(100);
967 spin_lock(&gl->gl_spin);
968 } else {
969 spin_unlock(&gl->gl_spin);
970 msleep(100);
971 spin_lock(&gl->gl_spin);
972 }
973 }
974
975 spin_unlock(&gl->gl_spin);
976}
977
978/**
979 * glock_wait_internal - wait on a glock acquisition
980 * @gh: the glock holder
981 *
982 * Returns: 0 on success
983 */
984
985static int glock_wait_internal(struct gfs2_holder *gh)
986{
987 struct gfs2_glock *gl = gh->gh_gl;
988 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 989 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
990
991 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
992 return -EIO;
993
994 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
995 spin_lock(&gl->gl_spin);
996 if (gl->gl_req_gh != gh &&
997 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
998 !list_empty(&gh->gh_list)) {
999 list_del_init(&gh->gh_list);
1000 gh->gh_error = GLR_TRYFAILED;
b3b94faa
DT
1001 run_queue(gl);
1002 spin_unlock(&gl->gl_spin);
1003 return gh->gh_error;
1004 }
1005 spin_unlock(&gl->gl_spin);
1006 }
1007
1008 if (gh->gh_flags & LM_FLAG_PRIORITY)
1009 do_cancels(gh);
1010
fee852e3 1011 wait_on_holder(gh);
b3b94faa
DT
1012 if (gh->gh_error)
1013 return gh->gh_error;
1014
1015 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
85d1da67 1016 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
b3b94faa
DT
1017 gh->gh_flags));
1018
1019 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1020 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1021
1022 if (glops->go_lock) {
1023 gh->gh_error = glops->go_lock(gh);
1024 if (gh->gh_error) {
1025 spin_lock(&gl->gl_spin);
1026 list_del_init(&gh->gh_list);
b3b94faa
DT
1027 spin_unlock(&gl->gl_spin);
1028 }
1029 }
1030
1031 spin_lock(&gl->gl_spin);
1032 gl->gl_req_gh = NULL;
1033 gl->gl_req_bh = NULL;
1034 clear_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
1035 run_queue(gl);
1036 spin_unlock(&gl->gl_spin);
1037 }
1038
1039 return gh->gh_error;
1040}
1041
1042static inline struct gfs2_holder *
04b933f2 1043find_holder_by_owner(struct list_head *head, pid_t pid)
b3b94faa
DT
1044{
1045 struct gfs2_holder *gh;
1046
1047 list_for_each_entry(gh, head, gh_list) {
04b933f2 1048 if (gh->gh_owner_pid == pid)
b3b94faa
DT
1049 return gh;
1050 }
1051
1052 return NULL;
1053}
1054
7c52b166
RP
1055static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1056{
1057 va_list args;
1058
1059 va_start(args, fmt);
1060 if (gi) {
1061 vsprintf(gi->string, fmt, args);
1062 seq_printf(gi->seq, gi->string);
1063 }
1064 else
1065 vprintk(fmt, args);
1066 va_end(args);
1067}
1068
b3b94faa
DT
1069/**
1070 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1071 * @gh: the holder structure to add
1072 *
1073 */
1074
1075static void add_to_queue(struct gfs2_holder *gh)
1076{
1077 struct gfs2_glock *gl = gh->gh_gl;
1078 struct gfs2_holder *existing;
1079
04b933f2 1080 BUG_ON(!gh->gh_owner_pid);
fee852e3
SW
1081 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1082 BUG();
190562bd 1083
04b933f2 1084 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
b3b94faa 1085 if (existing) {
5965b1f4 1086 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
04b933f2 1087 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
907b9bce 1088 printk(KERN_INFO "lock type : %d lock state : %d\n",
86384605 1089 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
5965b1f4 1090 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
04b933f2 1091 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
907b9bce 1092 printk(KERN_INFO "lock type : %d lock state : %d\n",
86384605 1093 gl->gl_name.ln_type, gl->gl_state);
5965b1f4 1094 BUG();
b3b94faa
DT
1095 }
1096
04b933f2 1097 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
b3b94faa 1098 if (existing) {
5965b1f4
SW
1099 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1100 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1101 BUG();
b3b94faa
DT
1102 }
1103
b3b94faa
DT
1104 if (gh->gh_flags & LM_FLAG_PRIORITY)
1105 list_add(&gh->gh_list, &gl->gl_waiters3);
1106 else
907b9bce 1107 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
b3b94faa
DT
1108}
1109
1110/**
1111 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1112 * @gh: the holder structure
1113 *
1114 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1115 *
1116 * Returns: 0, GLR_TRYFAILED, or errno on failure
1117 */
1118
1119int gfs2_glock_nq(struct gfs2_holder *gh)
1120{
1121 struct gfs2_glock *gl = gh->gh_gl;
1122 struct gfs2_sbd *sdp = gl->gl_sbd;
1123 int error = 0;
1124
320dd101 1125restart:
b3b94faa
DT
1126 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1127 set_bit(HIF_ABORTED, &gh->gh_iflags);
1128 return -EIO;
1129 }
1130
1131 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1132
1133 spin_lock(&gl->gl_spin);
1134 add_to_queue(gh);
1135 run_queue(gl);
1136 spin_unlock(&gl->gl_spin);
1137
1138 if (!(gh->gh_flags & GL_ASYNC)) {
1139 error = glock_wait_internal(gh);
1140 if (error == GLR_CANCELED) {
190562bd 1141 msleep(100);
b3b94faa
DT
1142 goto restart;
1143 }
1144 }
1145
b3b94faa
DT
1146 return error;
1147}
1148
1149/**
1150 * gfs2_glock_poll - poll to see if an async request has been completed
1151 * @gh: the holder
1152 *
1153 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1154 */
1155
1156int gfs2_glock_poll(struct gfs2_holder *gh)
1157{
1158 struct gfs2_glock *gl = gh->gh_gl;
1159 int ready = 0;
1160
1161 spin_lock(&gl->gl_spin);
1162
1163 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1164 ready = 1;
1165 else if (list_empty(&gh->gh_list)) {
1166 if (gh->gh_error == GLR_CANCELED) {
1167 spin_unlock(&gl->gl_spin);
190562bd 1168 msleep(100);
b3b94faa
DT
1169 if (gfs2_glock_nq(gh))
1170 return 1;
1171 return 0;
1172 } else
1173 ready = 1;
1174 }
1175
1176 spin_unlock(&gl->gl_spin);
1177
1178 return ready;
1179}
1180
1181/**
1182 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1183 * @gh: the holder structure
1184 *
1185 * Returns: 0, GLR_TRYFAILED, or errno on failure
1186 */
1187
1188int gfs2_glock_wait(struct gfs2_holder *gh)
1189{
1190 int error;
1191
1192 error = glock_wait_internal(gh);
1193 if (error == GLR_CANCELED) {
190562bd 1194 msleep(100);
b3b94faa
DT
1195 gh->gh_flags &= ~GL_ASYNC;
1196 error = gfs2_glock_nq(gh);
1197 }
1198
1199 return error;
1200}
1201
1202/**
1203 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1204 * @gh: the glock holder
1205 *
1206 */
1207
1208void gfs2_glock_dq(struct gfs2_holder *gh)
1209{
1210 struct gfs2_glock *gl = gh->gh_gl;
8fb4b536 1211 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa 1212
b3b94faa 1213 if (gh->gh_flags & GL_NOCACHE)
d93cfa98 1214 handle_callback(gl, LM_ST_UNLOCKED, 0);
b3b94faa
DT
1215
1216 gfs2_glmutex_lock(gl);
1217
1218 spin_lock(&gl->gl_spin);
1219 list_del_init(&gh->gh_list);
1220
1221 if (list_empty(&gl->gl_holders)) {
1222 spin_unlock(&gl->gl_spin);
1223
1224 if (glops->go_unlock)
1225 glops->go_unlock(gh);
1226
b3b94faa 1227 spin_lock(&gl->gl_spin);
3b8249f6 1228 gl->gl_stamp = jiffies;
b3b94faa
DT
1229 }
1230
1231 clear_bit(GLF_LOCK, &gl->gl_flags);
1232 run_queue(gl);
1233 spin_unlock(&gl->gl_spin);
1234}
1235
d93cfa98
AD
1236void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1237{
1238 struct gfs2_glock *gl = gh->gh_gl;
1239 gfs2_glock_dq(gh);
1240 wait_on_demote(gl);
1241}
1242
b3b94faa
DT
1243/**
1244 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1245 * @gh: the holder structure
1246 *
1247 */
1248
1249void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1250{
1251 gfs2_glock_dq(gh);
1252 gfs2_holder_uninit(gh);
1253}
1254
1255/**
1256 * gfs2_glock_nq_num - acquire a glock based on lock number
1257 * @sdp: the filesystem
1258 * @number: the lock number
1259 * @glops: the glock operations for the type of glock
1260 * @state: the state to acquire the glock in
1261 * @flags: modifier flags for the aquisition
1262 * @gh: the struct gfs2_holder
1263 *
1264 * Returns: errno
1265 */
1266
cd915493 1267int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
8fb4b536
SW
1268 const struct gfs2_glock_operations *glops,
1269 unsigned int state, int flags, struct gfs2_holder *gh)
b3b94faa
DT
1270{
1271 struct gfs2_glock *gl;
1272 int error;
1273
1274 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1275 if (!error) {
1276 error = gfs2_glock_nq_init(gl, state, flags, gh);
1277 gfs2_glock_put(gl);
1278 }
1279
1280 return error;
1281}
1282
1283/**
1284 * glock_compare - Compare two struct gfs2_glock structures for sorting
1285 * @arg_a: the first structure
1286 * @arg_b: the second structure
1287 *
1288 */
1289
1290static int glock_compare(const void *arg_a, const void *arg_b)
1291{
a5e08a9e
SW
1292 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1293 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1294 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1295 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
b3b94faa
DT
1296
1297 if (a->ln_number > b->ln_number)
a5e08a9e
SW
1298 return 1;
1299 if (a->ln_number < b->ln_number)
1300 return -1;
1c0f4872 1301 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
a5e08a9e 1302 return 0;
b3b94faa
DT
1303}
1304
1305/**
1306 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1307 * @num_gh: the number of structures
1308 * @ghs: an array of struct gfs2_holder structures
1309 *
1310 * Returns: 0 on success (all glocks acquired),
1311 * errno on failure (no glocks acquired)
1312 */
1313
1314static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1315 struct gfs2_holder **p)
1316{
1317 unsigned int x;
1318 int error = 0;
1319
1320 for (x = 0; x < num_gh; x++)
1321 p[x] = &ghs[x];
1322
1323 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1324
1325 for (x = 0; x < num_gh; x++) {
1326 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1327
1328 error = gfs2_glock_nq(p[x]);
1329 if (error) {
1330 while (x--)
1331 gfs2_glock_dq(p[x]);
1332 break;
1333 }
1334 }
1335
1336 return error;
1337}
1338
1339/**
1340 * gfs2_glock_nq_m - acquire multiple glocks
1341 * @num_gh: the number of structures
1342 * @ghs: an array of struct gfs2_holder structures
1343 *
b3b94faa
DT
1344 *
1345 * Returns: 0 on success (all glocks acquired),
1346 * errno on failure (no glocks acquired)
1347 */
1348
1349int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1350{
eaf5bd3c
SW
1351 struct gfs2_holder *tmp[4];
1352 struct gfs2_holder **pph = tmp;
b3b94faa
DT
1353 int error = 0;
1354
eaf5bd3c
SW
1355 switch(num_gh) {
1356 case 0:
b3b94faa 1357 return 0;
eaf5bd3c 1358 case 1:
b3b94faa
DT
1359 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1360 return gfs2_glock_nq(ghs);
eaf5bd3c
SW
1361 default:
1362 if (num_gh <= 4)
b3b94faa 1363 break;
eaf5bd3c
SW
1364 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1365 if (!pph)
1366 return -ENOMEM;
b3b94faa
DT
1367 }
1368
eaf5bd3c 1369 error = nq_m_sync(num_gh, ghs, pph);
b3b94faa 1370
eaf5bd3c
SW
1371 if (pph != tmp)
1372 kfree(pph);
b3b94faa
DT
1373
1374 return error;
1375}
1376
1377/**
1378 * gfs2_glock_dq_m - release multiple glocks
1379 * @num_gh: the number of structures
1380 * @ghs: an array of struct gfs2_holder structures
1381 *
1382 */
1383
1384void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1385{
1386 unsigned int x;
1387
1388 for (x = 0; x < num_gh; x++)
1389 gfs2_glock_dq(&ghs[x]);
1390}
1391
1392/**
1393 * gfs2_glock_dq_uninit_m - release multiple glocks
1394 * @num_gh: the number of structures
1395 * @ghs: an array of struct gfs2_holder structures
1396 *
1397 */
1398
1399void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1400{
1401 unsigned int x;
1402
1403 for (x = 0; x < num_gh; x++)
1404 gfs2_glock_dq_uninit(&ghs[x]);
1405}
1406
b3b94faa
DT
1407/**
1408 * gfs2_lvb_hold - attach a LVB from a glock
1409 * @gl: The glock in question
1410 *
1411 */
1412
1413int gfs2_lvb_hold(struct gfs2_glock *gl)
1414{
1415 int error;
1416
1417 gfs2_glmutex_lock(gl);
1418
1419 if (!atomic_read(&gl->gl_lvb_count)) {
1420 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1421 if (error) {
1422 gfs2_glmutex_unlock(gl);
1423 return error;
1424 }
1425 gfs2_glock_hold(gl);
1426 }
1427 atomic_inc(&gl->gl_lvb_count);
1428
1429 gfs2_glmutex_unlock(gl);
1430
1431 return 0;
1432}
1433
1434/**
1435 * gfs2_lvb_unhold - detach a LVB from a glock
1436 * @gl: The glock in question
1437 *
1438 */
1439
1440void gfs2_lvb_unhold(struct gfs2_glock *gl)
1441{
1442 gfs2_glock_hold(gl);
1443 gfs2_glmutex_lock(gl);
1444
1445 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1446 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1447 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1448 gl->gl_lvb = NULL;
1449 gfs2_glock_put(gl);
1450 }
1451
1452 gfs2_glmutex_unlock(gl);
1453 gfs2_glock_put(gl);
1454}
1455
b3b94faa
DT
1456static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1457 unsigned int state)
1458{
1459 struct gfs2_glock *gl;
1460
1461 gl = gfs2_glock_find(sdp, name);
1462 if (!gl)
1463 return;
1464
d93cfa98 1465 handle_callback(gl, state, 1);
b3b94faa
DT
1466
1467 spin_lock(&gl->gl_spin);
1468 run_queue(gl);
1469 spin_unlock(&gl->gl_spin);
1470
1471 gfs2_glock_put(gl);
1472}
1473
1474/**
1475 * gfs2_glock_cb - Callback used by locking module
1c089c32 1476 * @sdp: Pointer to the superblock
b3b94faa
DT
1477 * @type: Type of callback
1478 * @data: Type dependent data pointer
1479 *
1480 * Called by the locking module when it wants to tell us something.
1481 * Either we need to drop a lock, one of our ASYNC requests completed, or
1482 * a journal from another client needs to be recovered.
1483 */
1484
9b47c11d 1485void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
b3b94faa 1486{
9b47c11d 1487 struct gfs2_sbd *sdp = cb_data;
b3b94faa 1488
b3b94faa
DT
1489 switch (type) {
1490 case LM_CB_NEED_E:
e7f5c01c 1491 blocking_cb(sdp, data, LM_ST_UNLOCKED);
b3b94faa
DT
1492 return;
1493
1494 case LM_CB_NEED_D:
e7f5c01c 1495 blocking_cb(sdp, data, LM_ST_DEFERRED);
b3b94faa
DT
1496 return;
1497
1498 case LM_CB_NEED_S:
e7f5c01c 1499 blocking_cb(sdp, data, LM_ST_SHARED);
b3b94faa
DT
1500 return;
1501
1502 case LM_CB_ASYNC: {
e7f5c01c 1503 struct lm_async_cb *async = data;
b3b94faa
DT
1504 struct gfs2_glock *gl;
1505
61be084e 1506 down_read(&gfs2_umount_flush_sem);
b3b94faa
DT
1507 gl = gfs2_glock_find(sdp, &async->lc_name);
1508 if (gfs2_assert_warn(sdp, gl))
1509 return;
1510 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1511 gl->gl_req_bh(gl, async->lc_ret);
1512 gfs2_glock_put(gl);
61be084e 1513 up_read(&gfs2_umount_flush_sem);
b3b94faa
DT
1514 return;
1515 }
1516
1517 case LM_CB_NEED_RECOVERY:
1518 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1519 if (sdp->sd_recoverd_process)
1520 wake_up_process(sdp->sd_recoverd_process);
1521 return;
1522
1523 case LM_CB_DROPLOCKS:
1524 gfs2_gl_hash_clear(sdp, NO_WAIT);
1525 gfs2_quota_scan(sdp);
1526 return;
1527
1528 default:
1529 gfs2_assert_warn(sdp, 0);
1530 return;
1531 }
1532}
1533
b3b94faa
DT
1534/**
1535 * demote_ok - Check to see if it's ok to unlock a glock
1536 * @gl: the glock
1537 *
1538 * Returns: 1 if it's ok
1539 */
1540
1541static int demote_ok(struct gfs2_glock *gl)
1542{
8fb4b536 1543 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
1544 int demote = 1;
1545
1546 if (test_bit(GLF_STICKY, &gl->gl_flags))
1547 demote = 0;
b3b94faa
DT
1548 else if (glops->go_demote_ok)
1549 demote = glops->go_demote_ok(gl);
1550
1551 return demote;
1552}
1553
1554/**
1555 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1556 * @gl: the glock
1557 *
1558 */
1559
1560void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1561{
1562 struct gfs2_sbd *sdp = gl->gl_sbd;
1563
1564 spin_lock(&sdp->sd_reclaim_lock);
1565 if (list_empty(&gl->gl_reclaim)) {
1566 gfs2_glock_hold(gl);
1567 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1568 atomic_inc(&sdp->sd_reclaim_count);
1569 }
1570 spin_unlock(&sdp->sd_reclaim_lock);
1571
1572 wake_up(&sdp->sd_reclaim_wq);
1573}
1574
1575/**
1576 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1577 * @sdp: the filesystem
1578 *
1579 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1580 * different glock and we notice that there are a lot of glocks in the
1581 * reclaim list.
1582 *
1583 */
1584
1585void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1586{
1587 struct gfs2_glock *gl;
1588
1589 spin_lock(&sdp->sd_reclaim_lock);
1590 if (list_empty(&sdp->sd_reclaim_list)) {
1591 spin_unlock(&sdp->sd_reclaim_lock);
1592 return;
1593 }
1594 gl = list_entry(sdp->sd_reclaim_list.next,
1595 struct gfs2_glock, gl_reclaim);
1596 list_del_init(&gl->gl_reclaim);
1597 spin_unlock(&sdp->sd_reclaim_lock);
1598
1599 atomic_dec(&sdp->sd_reclaim_count);
1600 atomic_inc(&sdp->sd_reclaimed);
1601
1602 if (gfs2_glmutex_trylock(gl)) {
12132933 1603 if (list_empty(&gl->gl_holders) &&
50299965 1604 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
d93cfa98 1605 handle_callback(gl, LM_ST_UNLOCKED, 0);
b3b94faa
DT
1606 gfs2_glmutex_unlock(gl);
1607 }
1608
1609 gfs2_glock_put(gl);
1610}
1611
1612/**
1613 * examine_bucket - Call a function for glock in a hash bucket
1614 * @examiner: the function
1615 * @sdp: the filesystem
1616 * @bucket: the bucket
1617 *
1618 * Returns: 1 if the bucket has entries
1619 */
1620
1621static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
37b2fa6a 1622 unsigned int hash)
b3b94faa 1623{
24264434
SW
1624 struct gfs2_glock *gl, *prev = NULL;
1625 int has_entries = 0;
b6397893 1626 struct hlist_head *head = &gl_hash_table[hash].hb_list;
b3b94faa 1627
24264434 1628 read_lock(gl_lock_addr(hash));
b6397893
SW
1629 /* Can't use hlist_for_each_entry - don't want prefetch here */
1630 if (hlist_empty(head))
24264434 1631 goto out;
b6397893
SW
1632 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1633 while(1) {
8fbbfd21 1634 if (!sdp || gl->gl_sbd == sdp) {
b3b94faa 1635 gfs2_glock_hold(gl);
24264434
SW
1636 read_unlock(gl_lock_addr(hash));
1637 if (prev)
1638 gfs2_glock_put(prev);
1639 prev = gl;
1640 examiner(gl);
a8336344 1641 has_entries = 1;
24264434 1642 read_lock(gl_lock_addr(hash));
b3b94faa 1643 }
b6397893
SW
1644 if (gl->gl_list.next == NULL)
1645 break;
24264434 1646 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
b3b94faa 1647 }
24264434
SW
1648out:
1649 read_unlock(gl_lock_addr(hash));
1650 if (prev)
1651 gfs2_glock_put(prev);
8fbbfd21 1652 cond_resched();
24264434 1653 return has_entries;
b3b94faa
DT
1654}
1655
1656/**
1657 * scan_glock - look at a glock and see if we can reclaim it
1658 * @gl: the glock to look at
1659 *
1660 */
1661
1662static void scan_glock(struct gfs2_glock *gl)
1663{
b004157a 1664 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
24264434 1665 return;
a2242db0 1666
b3b94faa 1667 if (gfs2_glmutex_trylock(gl)) {
12132933 1668 if (list_empty(&gl->gl_holders) &&
24264434 1669 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
b3b94faa 1670 goto out_schedule;
b3b94faa
DT
1671 gfs2_glmutex_unlock(gl);
1672 }
b3b94faa
DT
1673 return;
1674
627add2d 1675out_schedule:
b3b94faa
DT
1676 gfs2_glmutex_unlock(gl);
1677 gfs2_glock_schedule_for_reclaim(gl);
b3b94faa
DT
1678}
1679
b3b94faa
DT
1680/**
1681 * clear_glock - look at a glock and see if we can free it from glock cache
1682 * @gl: the glock to look at
1683 *
1684 */
1685
1686static void clear_glock(struct gfs2_glock *gl)
1687{
1688 struct gfs2_sbd *sdp = gl->gl_sbd;
1689 int released;
1690
1691 spin_lock(&sdp->sd_reclaim_lock);
1692 if (!list_empty(&gl->gl_reclaim)) {
1693 list_del_init(&gl->gl_reclaim);
1694 atomic_dec(&sdp->sd_reclaim_count);
190562bd 1695 spin_unlock(&sdp->sd_reclaim_lock);
b3b94faa
DT
1696 released = gfs2_glock_put(gl);
1697 gfs2_assert(sdp, !released);
190562bd
SW
1698 } else {
1699 spin_unlock(&sdp->sd_reclaim_lock);
b3b94faa 1700 }
b3b94faa
DT
1701
1702 if (gfs2_glmutex_trylock(gl)) {
90101c31 1703 if (list_empty(&gl->gl_holders) &&
b3b94faa 1704 gl->gl_state != LM_ST_UNLOCKED)
d93cfa98 1705 handle_callback(gl, LM_ST_UNLOCKED, 0);
b3b94faa
DT
1706 gfs2_glmutex_unlock(gl);
1707 }
b3b94faa
DT
1708}
1709
1710/**
1711 * gfs2_gl_hash_clear - Empty out the glock hash table
1712 * @sdp: the filesystem
1713 * @wait: wait until it's all gone
1714 *
1715 * Called when unmounting the filesystem, or when inter-node lock manager
1716 * requests DROPLOCKS because it is running out of capacity.
1717 */
1718
1719void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1720{
1721 unsigned long t;
1722 unsigned int x;
1723 int cont;
1724
1725 t = jiffies;
1726
1727 for (;;) {
1728 cont = 0;
24264434 1729 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
907b9bce 1730 if (examine_bucket(clear_glock, sdp, x))
b3b94faa 1731 cont = 1;
24264434 1732 }
b3b94faa
DT
1733
1734 if (!wait || !cont)
1735 break;
1736
1737 if (time_after_eq(jiffies,
1738 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1739 fs_warn(sdp, "Unmount seems to be stalled. "
1740 "Dumping lock state...\n");
1741 gfs2_dump_lockstate(sdp);
1742 t = jiffies;
1743 }
1744
61be084e 1745 down_write(&gfs2_umount_flush_sem);
b3b94faa 1746 invalidate_inodes(sdp->sd_vfs);
61be084e 1747 up_write(&gfs2_umount_flush_sem);
fd88de56 1748 msleep(10);
b3b94faa
DT
1749 }
1750}
1751
1752/*
1753 * Diagnostic routines to help debug distributed deadlock
1754 */
1755
04b933f2
RP
1756static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1757 unsigned long address)
1758{
7a0079d9 1759 char buffer[KSYM_SYMBOL_LEN];
04b933f2 1760
7a0079d9
RP
1761 sprint_symbol(buffer, address);
1762 print_dbg(gi, fmt, buffer);
04b933f2
RP
1763}
1764
b3b94faa
DT
1765/**
1766 * dump_holder - print information about a glock holder
1767 * @str: a string naming the type of holder
1768 * @gh: the glock holder
1769 *
1770 * Returns: 0 on success, -ENOBUFS when we run out of space
1771 */
1772
7c52b166
RP
1773static int dump_holder(struct glock_iter *gi, char *str,
1774 struct gfs2_holder *gh)
b3b94faa
DT
1775{
1776 unsigned int x;
04b933f2 1777 struct task_struct *gh_owner;
b3b94faa 1778
7c52b166 1779 print_dbg(gi, " %s\n", str);
04b933f2
RP
1780 if (gh->gh_owner_pid) {
1781 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1782 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1783 if (gh_owner)
1784 print_dbg(gi, "(%s)\n", gh_owner->comm);
1785 else
1786 print_dbg(gi, "(ended)\n");
1787 } else
1788 print_dbg(gi, " owner = -1\n");
7c52b166
RP
1789 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1790 print_dbg(gi, " gh_flags =");
b3b94faa
DT
1791 for (x = 0; x < 32; x++)
1792 if (gh->gh_flags & (1 << x))
7c52b166
RP
1793 print_dbg(gi, " %u", x);
1794 print_dbg(gi, " \n");
1795 print_dbg(gi, " error = %d\n", gh->gh_error);
1796 print_dbg(gi, " gh_iflags =");
b3b94faa
DT
1797 for (x = 0; x < 32; x++)
1798 if (test_bit(x, &gh->gh_iflags))
7c52b166
RP
1799 print_dbg(gi, " %u", x);
1800 print_dbg(gi, " \n");
04b933f2 1801 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
b3b94faa 1802
7c52b166 1803 return 0;
b3b94faa
DT
1804}
1805
1806/**
1807 * dump_inode - print information about an inode
1808 * @ip: the inode
1809 *
1810 * Returns: 0 on success, -ENOBUFS when we run out of space
1811 */
1812
7c52b166 1813static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
b3b94faa
DT
1814{
1815 unsigned int x;
b3b94faa 1816
7c52b166
RP
1817 print_dbg(gi, " Inode:\n");
1818 print_dbg(gi, " num = %llu/%llu\n",
dbb7cae2
SW
1819 (unsigned long long)ip->i_no_formal_ino,
1820 (unsigned long long)ip->i_no_addr);
7c52b166
RP
1821 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
1822 print_dbg(gi, " i_flags =");
b3b94faa
DT
1823 for (x = 0; x < 32; x++)
1824 if (test_bit(x, &ip->i_flags))
7c52b166
RP
1825 print_dbg(gi, " %u", x);
1826 print_dbg(gi, " \n");
1827 return 0;
b3b94faa
DT
1828}
1829
1830/**
1831 * dump_glock - print information about a glock
1832 * @gl: the glock
1833 * @count: where we are in the buffer
1834 *
1835 * Returns: 0 on success, -ENOBUFS when we run out of space
1836 */
1837
7c52b166 1838static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
b3b94faa
DT
1839{
1840 struct gfs2_holder *gh;
1841 unsigned int x;
1842 int error = -ENOBUFS;
04b933f2 1843 struct task_struct *gl_owner;
b3b94faa
DT
1844
1845 spin_lock(&gl->gl_spin);
1846
a947e033 1847 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
7c52b166
RP
1848 (unsigned long long)gl->gl_name.ln_number);
1849 print_dbg(gi, " gl_flags =");
85d1da67 1850 for (x = 0; x < 32; x++) {
b3b94faa 1851 if (test_bit(x, &gl->gl_flags))
7c52b166
RP
1852 print_dbg(gi, " %u", x);
1853 }
04b933f2
RP
1854 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1855 print_dbg(gi, " (unlocked)");
7c52b166
RP
1856 print_dbg(gi, " \n");
1857 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1858 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
04b933f2
RP
1859 if (gl->gl_owner_pid) {
1860 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1861 if (gl_owner)
1862 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1863 gl->gl_owner_pid, gl_owner->comm);
1864 else
1865 print_dbg(gi, " gl_owner = %d (ended)\n",
1866 gl->gl_owner_pid);
1867 } else
1868 print_dbg(gi, " gl_owner = -1\n");
7c52b166
RP
1869 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1870 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1871 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1872 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1873 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1874 print_dbg(gi, " le = %s\n",
b3b94faa 1875 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
7c52b166
RP
1876 print_dbg(gi, " reclaim = %s\n",
1877 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
b3b94faa 1878 if (gl->gl_aspace)
7c52b166
RP
1879 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1880 gl->gl_aspace->i_mapping->nrpages);
b3b94faa 1881 else
7c52b166
RP
1882 print_dbg(gi, " aspace = no\n");
1883 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
b3b94faa 1884 if (gl->gl_req_gh) {
7c52b166 1885 error = dump_holder(gi, "Request", gl->gl_req_gh);
b3b94faa
DT
1886 if (error)
1887 goto out;
1888 }
1889 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
7c52b166 1890 error = dump_holder(gi, "Holder", gh);
b3b94faa
DT
1891 if (error)
1892 goto out;
1893 }
1894 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
7c52b166 1895 error = dump_holder(gi, "Waiter1", gh);
b3b94faa
DT
1896 if (error)
1897 goto out;
1898 }
b3b94faa 1899 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
7c52b166 1900 error = dump_holder(gi, "Waiter3", gh);
b3b94faa
DT
1901 if (error)
1902 goto out;
1903 }
3b8249f6
SW
1904 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1905 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
cd81a4ba
RP
1906 gl->gl_demote_state, (unsigned long long)
1907 (jiffies - gl->gl_demote_time)*(1000000/HZ));
3b8249f6 1908 }
5c676f6d 1909 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
b3b94faa 1910 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
7c52b166
RP
1911 list_empty(&gl->gl_holders)) {
1912 error = dump_inode(gi, gl->gl_object);
b3b94faa
DT
1913 if (error)
1914 goto out;
1915 } else {
1916 error = -ENOBUFS;
7c52b166 1917 print_dbg(gi, " Inode: busy\n");
b3b94faa
DT
1918 }
1919 }
1920
1921 error = 0;
1922
a91ea69f 1923out:
b3b94faa 1924 spin_unlock(&gl->gl_spin);
b3b94faa
DT
1925 return error;
1926}
1927
1928/**
1929 * gfs2_dump_lockstate - print out the current lockstate
1930 * @sdp: the filesystem
1931 * @ub: the buffer to copy the information into
1932 *
1933 * If @ub is NULL, dump the lockstate to the console.
1934 *
1935 */
1936
08bc2dbc 1937static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
b3b94faa 1938{
b3b94faa 1939 struct gfs2_glock *gl;
b6397893 1940 struct hlist_node *h;
b3b94faa
DT
1941 unsigned int x;
1942 int error = 0;
1943
1944 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
b3b94faa 1945
087efdd3 1946 read_lock(gl_lock_addr(x));
b3b94faa 1947
b6397893 1948 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
85d1da67
SW
1949 if (gl->gl_sbd != sdp)
1950 continue;
b3b94faa 1951
7c52b166 1952 error = dump_glock(NULL, gl);
b3b94faa
DT
1953 if (error)
1954 break;
1955 }
1956
087efdd3 1957 read_unlock(gl_lock_addr(x));
b3b94faa
DT
1958
1959 if (error)
1960 break;
1961 }
1962
1963
1964 return error;
1965}
1966
8fbbfd21
SW
1967/**
1968 * gfs2_scand - Look for cached glocks and inodes to toss from memory
1969 * @sdp: Pointer to GFS2 superblock
1970 *
1971 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1972 * See gfs2_glockd()
1973 */
1974
1975static int gfs2_scand(void *data)
1976{
1977 unsigned x;
1978 unsigned delay;
1979
1980 while (!kthread_should_stop()) {
1981 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1982 examine_bucket(scan_glock, NULL, x);
1983 if (freezing(current))
1984 refrigerator();
1985 delay = scand_secs;
1986 if (delay < 1)
1987 delay = 1;
1988 schedule_timeout_interruptible(delay * HZ);
1989 }
1990
1991 return 0;
1992}
1993
1994
1995
85d1da67
SW
1996int __init gfs2_glock_init(void)
1997{
1998 unsigned i;
1999 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
b6397893 2000 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
85d1da67 2001 }
087efdd3
SW
2002#ifdef GL_HASH_LOCK_SZ
2003 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2004 rwlock_init(&gl_hash_locks[i]);
2005 }
2006#endif
8fbbfd21
SW
2007
2008 scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2009 if (IS_ERR(scand_process))
2010 return PTR_ERR(scand_process);
2011
85d1da67
SW
2012 return 0;
2013}
2014
8fbbfd21
SW
2015void gfs2_glock_exit(void)
2016{
2017 kthread_stop(scand_process);
2018}
2019
2020module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2021MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2022
7c52b166
RP
2023static int gfs2_glock_iter_next(struct glock_iter *gi)
2024{
7b08fc62
SW
2025 struct gfs2_glock *gl;
2026
a947e033 2027restart:
7a0079d9 2028 read_lock(gl_lock_addr(gi->hash));
7b08fc62
SW
2029 gl = gi->gl;
2030 if (gl) {
a947e033
AD
2031 gi->gl = hlist_entry(gl->gl_list.next,
2032 struct gfs2_glock, gl_list);
7c52b166 2033 if (gi->gl)
7b08fc62 2034 gfs2_glock_hold(gi->gl);
7c52b166 2035 }
7a0079d9 2036 read_unlock(gl_lock_addr(gi->hash));
7b08fc62
SW
2037 if (gl)
2038 gfs2_glock_put(gl);
a947e033 2039 if (gl && gi->gl == NULL)
7b08fc62 2040 gi->hash++;
a947e033 2041 while(gi->gl == NULL) {
7b08fc62
SW
2042 if (gi->hash >= GFS2_GL_HASH_SIZE)
2043 return 1;
2044 read_lock(gl_lock_addr(gi->hash));
2045 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2046 struct gfs2_glock, gl_list);
2047 if (gi->gl)
2048 gfs2_glock_hold(gi->gl);
2049 read_unlock(gl_lock_addr(gi->hash));
a947e033 2050 gi->hash++;
7b08fc62 2051 }
a947e033
AD
2052
2053 if (gi->sdp != gi->gl->gl_sbd)
2054 goto restart;
2055
7c52b166
RP
2056 return 0;
2057}
2058
2059static void gfs2_glock_iter_free(struct glock_iter *gi)
2060{
7b08fc62
SW
2061 if (gi->gl)
2062 gfs2_glock_put(gi->gl);
7c52b166
RP
2063 kfree(gi);
2064}
2065
2066static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2067{
2068 struct glock_iter *gi;
2069
2070 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2071 if (!gi)
2072 return NULL;
2073
2074 gi->sdp = sdp;
2075 gi->hash = 0;
7c52b166 2076 gi->seq = NULL;
a947e033 2077 gi->gl = NULL;
7c52b166
RP
2078 memset(gi->string, 0, sizeof(gi->string));
2079
a947e033 2080 if (gfs2_glock_iter_next(gi)) {
7c52b166
RP
2081 gfs2_glock_iter_free(gi);
2082 return NULL;
2083 }
2084
2085 return gi;
2086}
2087
2088static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2089{
2090 struct glock_iter *gi;
2091 loff_t n = *pos;
2092
2093 gi = gfs2_glock_iter_init(file->private);
2094 if (!gi)
2095 return NULL;
2096
7b08fc62 2097 while(n--) {
7c52b166
RP
2098 if (gfs2_glock_iter_next(gi)) {
2099 gfs2_glock_iter_free(gi);
2100 return NULL;
2101 }
2102 }
2103
2104 return gi;
2105}
2106
2107static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2108 loff_t *pos)
2109{
2110 struct glock_iter *gi = iter_ptr;
2111
2112 (*pos)++;
2113
2114 if (gfs2_glock_iter_next(gi)) {
2115 gfs2_glock_iter_free(gi);
2116 return NULL;
2117 }
2118
2119 return gi;
2120}
2121
2122static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2123{
7b08fc62
SW
2124 struct glock_iter *gi = iter_ptr;
2125 if (gi)
2126 gfs2_glock_iter_free(gi);
7c52b166
RP
2127}
2128
2129static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2130{
2131 struct glock_iter *gi = iter_ptr;
2132
2133 gi->seq = file;
2134 dump_glock(gi, gi->gl);
2135
2136 return 0;
2137}
2138
4ef29002 2139static const struct seq_operations gfs2_glock_seq_ops = {
7c52b166
RP
2140 .start = gfs2_glock_seq_start,
2141 .next = gfs2_glock_seq_next,
2142 .stop = gfs2_glock_seq_stop,
2143 .show = gfs2_glock_seq_show,
2144};
2145
2146static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2147{
2148 struct seq_file *seq;
2149 int ret;
2150
2151 ret = seq_open(file, &gfs2_glock_seq_ops);
2152 if (ret)
2153 return ret;
2154
2155 seq = file->private_data;
2156 seq->private = inode->i_private;
2157
2158 return 0;
2159}
2160
2161static const struct file_operations gfs2_debug_fops = {
2162 .owner = THIS_MODULE,
2163 .open = gfs2_debugfs_open,
2164 .read = seq_read,
2165 .llseek = seq_lseek,
2166 .release = seq_release
2167};
2168
2169int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2170{
5f882096
RP
2171 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2172 if (!sdp->debugfs_dir)
2173 return -ENOMEM;
2174 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2175 S_IFREG | S_IRUGO,
2176 sdp->debugfs_dir, sdp,
2177 &gfs2_debug_fops);
2178 if (!sdp->debugfs_dentry_glocks)
7c52b166
RP
2179 return -ENOMEM;
2180
2181 return 0;
2182}
2183
2184void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2185{
5f882096
RP
2186 if (sdp && sdp->debugfs_dir) {
2187 if (sdp->debugfs_dentry_glocks) {
2188 debugfs_remove(sdp->debugfs_dentry_glocks);
2189 sdp->debugfs_dentry_glocks = NULL;
2190 }
2191 debugfs_remove(sdp->debugfs_dir);
2192 sdp->debugfs_dir = NULL;
2193 }
7c52b166
RP
2194}
2195
2196int gfs2_register_debugfs(void)
2197{
2198 gfs2_root = debugfs_create_dir("gfs2", NULL);
2199 return gfs2_root ? 0 : -ENOMEM;
2200}
2201
2202void gfs2_unregister_debugfs(void)
2203{
2204 debugfs_remove(gfs2_root);
5f882096 2205 gfs2_root = NULL;
7c52b166 2206}