]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/gfs2/glock.c
[GFS2] Fix runtime issue with UP kernels
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / glock.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lm.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44
45 struct gfs2_gl_hash_bucket {
46 struct hlist_head hb_list;
47 };
48
49 struct glock_iter {
50 int hash; /* hash bucket index */
51 struct gfs2_sbd *sdp; /* incore superblock */
52 struct gfs2_glock *gl; /* current glock struct */
53 struct seq_file *seq; /* sequence file for debugfs */
54 char string[512]; /* scratch space */
55 };
56
57 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
62 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
63 static void run_queue(struct gfs2_glock *gl);
64
65 static DECLARE_RWSEM(gfs2_umount_flush_sem);
66 static struct dentry *gfs2_root;
67 static struct task_struct *scand_process;
68 static unsigned int scand_secs = 5;
69 static struct workqueue_struct *glock_workqueue;
70
71 #define GFS2_GL_HASH_SHIFT 15
72 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79 * Despite what you might think, the numbers below are not arbitrary :-)
80 * They are taken from the ipv4 routing hash code, which is well tested
81 * and thus should be nearly optimal. Later on we might tweek the numbers
82 * but for now this should be fine.
83 *
84 * The reason for putting the locks in a separate array from the list heads
85 * is that we can have fewer locks than list heads and save memory. We use
86 * the same hash function for both, but with a different hash mask.
87 */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89 defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ 256
93 #else
94 # if NR_CPUS >= 32
95 # define GL_HASH_LOCK_SZ 4096
96 # elif NR_CPUS >= 16
97 # define GL_HASH_LOCK_SZ 2048
98 # elif NR_CPUS >= 8
99 # define GL_HASH_LOCK_SZ 1024
100 # elif NR_CPUS >= 4
101 # define GL_HASH_LOCK_SZ 512
102 # else
103 # define GL_HASH_LOCK_SZ 256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122 return NULL;
123 }
124 #endif
125
126 /**
127 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128 * @actual: the current state of the lock
129 * @requested: the lock state that was requested by the caller
130 * @flags: the modifier flags passed in by the caller
131 *
132 * Returns: 1 if the locks are compatible, 0 otherwise
133 */
134
135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136 int flags)
137 {
138 if (actual == requested)
139 return 1;
140
141 if (flags & GL_EXACT)
142 return 0;
143
144 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145 return 1;
146
147 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148 return 1;
149
150 return 0;
151 }
152
153 /**
154 * gl_hash() - Turn glock number into hash bucket number
155 * @lock: The glock number
156 *
157 * Returns: The number of the corresponding hash bucket
158 */
159
160 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161 const struct lm_lockname *name)
162 {
163 unsigned int h;
164
165 h = jhash(&name->ln_number, sizeof(u64), 0);
166 h = jhash(&name->ln_type, sizeof(unsigned int), h);
167 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
168 h &= GFS2_GL_HASH_MASK;
169
170 return h;
171 }
172
173 /**
174 * glock_free() - Perform a few checks and then release struct gfs2_glock
175 * @gl: The glock to release
176 *
177 * Also calls lock module to release its internal structure for this glock.
178 *
179 */
180
181 static void glock_free(struct gfs2_glock *gl)
182 {
183 struct gfs2_sbd *sdp = gl->gl_sbd;
184 struct inode *aspace = gl->gl_aspace;
185
186 gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188 if (aspace)
189 gfs2_aspace_put(aspace);
190
191 kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195 * gfs2_glock_hold() - increment reference count on glock
196 * @gl: The glock to hold
197 *
198 */
199
200 void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202 atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206 * gfs2_glock_put() - Decrement reference count on glock
207 * @gl: The glock to put
208 *
209 */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213 int rv = 0;
214 struct gfs2_sbd *sdp = gl->gl_sbd;
215
216 write_lock(gl_lock_addr(gl->gl_hash));
217 if (atomic_dec_and_test(&gl->gl_ref)) {
218 hlist_del(&gl->gl_list);
219 write_unlock(gl_lock_addr(gl->gl_hash));
220 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
221 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
222 gfs2_assert(sdp, list_empty(&gl->gl_holders));
223 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
224 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
225 glock_free(gl);
226 rv = 1;
227 goto out;
228 }
229 write_unlock(gl_lock_addr(gl->gl_hash));
230 out:
231 return rv;
232 }
233
234 /**
235 * search_bucket() - Find struct gfs2_glock by lock number
236 * @bucket: the bucket to search
237 * @name: The lock name
238 *
239 * Returns: NULL, or the struct gfs2_glock with the requested number
240 */
241
242 static struct gfs2_glock *search_bucket(unsigned int hash,
243 const struct gfs2_sbd *sdp,
244 const struct lm_lockname *name)
245 {
246 struct gfs2_glock *gl;
247 struct hlist_node *h;
248
249 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
250 if (!lm_name_equal(&gl->gl_name, name))
251 continue;
252 if (gl->gl_sbd != sdp)
253 continue;
254
255 atomic_inc(&gl->gl_ref);
256
257 return gl;
258 }
259
260 return NULL;
261 }
262
263 /**
264 * gfs2_glock_find() - Find glock by lock number
265 * @sdp: The GFS2 superblock
266 * @name: The lock name
267 *
268 * Returns: NULL, or the struct gfs2_glock with the requested number
269 */
270
271 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
272 const struct lm_lockname *name)
273 {
274 unsigned int hash = gl_hash(sdp, name);
275 struct gfs2_glock *gl;
276
277 read_lock(gl_lock_addr(hash));
278 gl = search_bucket(hash, sdp, name);
279 read_unlock(gl_lock_addr(hash));
280
281 return gl;
282 }
283
284 static void glock_work_func(struct work_struct *work)
285 {
286 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287
288 spin_lock(&gl->gl_spin);
289 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
290 set_bit(GLF_DEMOTE, &gl->gl_flags);
291 run_queue(gl);
292 spin_unlock(&gl->gl_spin);
293 gfs2_glock_put(gl);
294 }
295
296 /**
297 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
298 * @sdp: The GFS2 superblock
299 * @number: the lock number
300 * @glops: The glock_operations to use
301 * @create: If 0, don't create the glock if it doesn't exist
302 * @glp: the glock is returned here
303 *
304 * This does not lock a glock, just finds/creates structures for one.
305 *
306 * Returns: errno
307 */
308
309 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
310 const struct gfs2_glock_operations *glops, int create,
311 struct gfs2_glock **glp)
312 {
313 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
314 struct gfs2_glock *gl, *tmp;
315 unsigned int hash = gl_hash(sdp, &name);
316 int error;
317
318 read_lock(gl_lock_addr(hash));
319 gl = search_bucket(hash, sdp, &name);
320 read_unlock(gl_lock_addr(hash));
321
322 if (gl || !create) {
323 *glp = gl;
324 return 0;
325 }
326
327 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
328 if (!gl)
329 return -ENOMEM;
330
331 gl->gl_flags = 0;
332 gl->gl_name = name;
333 atomic_set(&gl->gl_ref, 1);
334 gl->gl_state = LM_ST_UNLOCKED;
335 gl->gl_demote_state = LM_ST_EXCLUSIVE;
336 gl->gl_hash = hash;
337 gl->gl_owner_pid = 0;
338 gl->gl_ip = 0;
339 gl->gl_ops = glops;
340 gl->gl_req_gh = NULL;
341 gl->gl_req_bh = NULL;
342 gl->gl_vn = 0;
343 gl->gl_stamp = jiffies;
344 gl->gl_tchange = jiffies;
345 gl->gl_object = NULL;
346 gl->gl_sbd = sdp;
347 gl->gl_aspace = NULL;
348 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
349
350 /* If this glock protects actual on-disk data or metadata blocks,
351 create a VFS inode to manage the pages/buffers holding them. */
352 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
353 gl->gl_aspace = gfs2_aspace_get(sdp);
354 if (!gl->gl_aspace) {
355 error = -ENOMEM;
356 goto fail;
357 }
358 }
359
360 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
361 if (error)
362 goto fail_aspace;
363
364 write_lock(gl_lock_addr(hash));
365 tmp = search_bucket(hash, sdp, &name);
366 if (tmp) {
367 write_unlock(gl_lock_addr(hash));
368 glock_free(gl);
369 gl = tmp;
370 } else {
371 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
372 write_unlock(gl_lock_addr(hash));
373 }
374
375 *glp = gl;
376
377 return 0;
378
379 fail_aspace:
380 if (gl->gl_aspace)
381 gfs2_aspace_put(gl->gl_aspace);
382 fail:
383 kmem_cache_free(gfs2_glock_cachep, gl);
384 return error;
385 }
386
387 /**
388 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
389 * @gl: the glock
390 * @state: the state we're requesting
391 * @flags: the modifier flags
392 * @gh: the holder structure
393 *
394 */
395
396 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
397 struct gfs2_holder *gh)
398 {
399 INIT_LIST_HEAD(&gh->gh_list);
400 gh->gh_gl = gl;
401 gh->gh_ip = (unsigned long)__builtin_return_address(0);
402 gh->gh_owner_pid = current->pid;
403 gh->gh_state = state;
404 gh->gh_flags = flags;
405 gh->gh_error = 0;
406 gh->gh_iflags = 0;
407 gfs2_glock_hold(gl);
408 }
409
410 /**
411 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
412 * @state: the state we're requesting
413 * @flags: the modifier flags
414 * @gh: the holder structure
415 *
416 * Don't mess with the glock.
417 *
418 */
419
420 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
421 {
422 gh->gh_state = state;
423 gh->gh_flags = flags;
424 gh->gh_iflags = 0;
425 gh->gh_ip = (unsigned long)__builtin_return_address(0);
426 }
427
428 /**
429 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
430 * @gh: the holder structure
431 *
432 */
433
434 void gfs2_holder_uninit(struct gfs2_holder *gh)
435 {
436 gfs2_glock_put(gh->gh_gl);
437 gh->gh_gl = NULL;
438 gh->gh_ip = 0;
439 }
440
441 static void gfs2_holder_wake(struct gfs2_holder *gh)
442 {
443 clear_bit(HIF_WAIT, &gh->gh_iflags);
444 smp_mb__after_clear_bit();
445 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
446 }
447
448 static int just_schedule(void *word)
449 {
450 schedule();
451 return 0;
452 }
453
454 static void wait_on_holder(struct gfs2_holder *gh)
455 {
456 might_sleep();
457 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
458 }
459
460 static void gfs2_demote_wake(struct gfs2_glock *gl)
461 {
462 gl->gl_demote_state = LM_ST_EXCLUSIVE;
463 clear_bit(GLF_DEMOTE, &gl->gl_flags);
464 smp_mb__after_clear_bit();
465 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
466 }
467
468 static void wait_on_demote(struct gfs2_glock *gl)
469 {
470 might_sleep();
471 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
472 }
473
474 /**
475 * rq_mutex - process a mutex request in the queue
476 * @gh: the glock holder
477 *
478 * Returns: 1 if the queue is blocked
479 */
480
481 static int rq_mutex(struct gfs2_holder *gh)
482 {
483 struct gfs2_glock *gl = gh->gh_gl;
484
485 list_del_init(&gh->gh_list);
486 /* gh->gh_error never examined. */
487 set_bit(GLF_LOCK, &gl->gl_flags);
488 clear_bit(HIF_WAIT, &gh->gh_iflags);
489 smp_mb();
490 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
491
492 return 1;
493 }
494
495 /**
496 * rq_promote - process a promote request in the queue
497 * @gh: the glock holder
498 *
499 * Acquire a new inter-node lock, or change a lock state to more restrictive.
500 *
501 * Returns: 1 if the queue is blocked
502 */
503
504 static int rq_promote(struct gfs2_holder *gh)
505 {
506 struct gfs2_glock *gl = gh->gh_gl;
507
508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
509 if (list_empty(&gl->gl_holders)) {
510 gl->gl_req_gh = gh;
511 set_bit(GLF_LOCK, &gl->gl_flags);
512 spin_unlock(&gl->gl_spin);
513 gfs2_glock_xmote_th(gh->gh_gl, gh);
514 spin_lock(&gl->gl_spin);
515 }
516 return 1;
517 }
518
519 if (list_empty(&gl->gl_holders)) {
520 set_bit(HIF_FIRST, &gh->gh_iflags);
521 set_bit(GLF_LOCK, &gl->gl_flags);
522 } else {
523 struct gfs2_holder *next_gh;
524 if (gh->gh_state == LM_ST_EXCLUSIVE)
525 return 1;
526 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
527 gh_list);
528 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
529 return 1;
530 }
531
532 list_move_tail(&gh->gh_list, &gl->gl_holders);
533 gh->gh_error = 0;
534 set_bit(HIF_HOLDER, &gh->gh_iflags);
535
536 gfs2_holder_wake(gh);
537
538 return 0;
539 }
540
541 /**
542 * rq_demote - process a demote request in the queue
543 * @gh: the glock holder
544 *
545 * Returns: 1 if the queue is blocked
546 */
547
548 static int rq_demote(struct gfs2_glock *gl)
549 {
550 if (!list_empty(&gl->gl_holders))
551 return 1;
552
553 if (gl->gl_state == gl->gl_demote_state ||
554 gl->gl_state == LM_ST_UNLOCKED) {
555 gfs2_demote_wake(gl);
556 return 0;
557 }
558
559 set_bit(GLF_LOCK, &gl->gl_flags);
560 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
561
562 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
563 gl->gl_state != LM_ST_EXCLUSIVE) {
564 spin_unlock(&gl->gl_spin);
565 gfs2_glock_drop_th(gl);
566 } else {
567 spin_unlock(&gl->gl_spin);
568 gfs2_glock_xmote_th(gl, NULL);
569 }
570
571 spin_lock(&gl->gl_spin);
572 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
573
574 return 0;
575 }
576
577 /**
578 * run_queue - process holder structures on a glock
579 * @gl: the glock
580 *
581 */
582 static void run_queue(struct gfs2_glock *gl)
583 {
584 struct gfs2_holder *gh;
585 int blocked = 1;
586
587 for (;;) {
588 if (test_bit(GLF_LOCK, &gl->gl_flags))
589 break;
590
591 if (!list_empty(&gl->gl_waiters1)) {
592 gh = list_entry(gl->gl_waiters1.next,
593 struct gfs2_holder, gh_list);
594 blocked = rq_mutex(gh);
595 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
596 blocked = rq_demote(gl);
597 if (gl->gl_waiters2 && !blocked) {
598 set_bit(GLF_DEMOTE, &gl->gl_flags);
599 gl->gl_demote_state = LM_ST_UNLOCKED;
600 }
601 gl->gl_waiters2 = 0;
602 } else if (!list_empty(&gl->gl_waiters3)) {
603 gh = list_entry(gl->gl_waiters3.next,
604 struct gfs2_holder, gh_list);
605 blocked = rq_promote(gh);
606 } else
607 break;
608
609 if (blocked)
610 break;
611 }
612 }
613
614 /**
615 * gfs2_glmutex_lock - acquire a local lock on a glock
616 * @gl: the glock
617 *
618 * Gives caller exclusive access to manipulate a glock structure.
619 */
620
621 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
622 {
623 struct gfs2_holder gh;
624
625 gfs2_holder_init(gl, 0, 0, &gh);
626 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
627 BUG();
628
629 spin_lock(&gl->gl_spin);
630 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
631 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
632 } else {
633 gl->gl_owner_pid = current->pid;
634 gl->gl_ip = (unsigned long)__builtin_return_address(0);
635 clear_bit(HIF_WAIT, &gh.gh_iflags);
636 smp_mb();
637 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
638 }
639 spin_unlock(&gl->gl_spin);
640
641 wait_on_holder(&gh);
642 gfs2_holder_uninit(&gh);
643 }
644
645 /**
646 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
647 * @gl: the glock
648 *
649 * Returns: 1 if the glock is acquired
650 */
651
652 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
653 {
654 int acquired = 1;
655
656 spin_lock(&gl->gl_spin);
657 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
658 acquired = 0;
659 } else {
660 gl->gl_owner_pid = current->pid;
661 gl->gl_ip = (unsigned long)__builtin_return_address(0);
662 }
663 spin_unlock(&gl->gl_spin);
664
665 return acquired;
666 }
667
668 /**
669 * gfs2_glmutex_unlock - release a local lock on a glock
670 * @gl: the glock
671 *
672 */
673
674 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
675 {
676 spin_lock(&gl->gl_spin);
677 clear_bit(GLF_LOCK, &gl->gl_flags);
678 gl->gl_owner_pid = 0;
679 gl->gl_ip = 0;
680 run_queue(gl);
681 spin_unlock(&gl->gl_spin);
682 }
683
684 /**
685 * handle_callback - process a demote request
686 * @gl: the glock
687 * @state: the state the caller wants us to change to
688 *
689 * There are only two requests that we are going to see in actual
690 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
691 */
692
693 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
694 int remote, unsigned long delay)
695 {
696 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
697
698 spin_lock(&gl->gl_spin);
699 set_bit(bit, &gl->gl_flags);
700 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
701 gl->gl_demote_state = state;
702 gl->gl_demote_time = jiffies;
703 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
704 gl->gl_object) {
705 gfs2_glock_schedule_for_reclaim(gl);
706 spin_unlock(&gl->gl_spin);
707 return;
708 }
709 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
710 gl->gl_demote_state != state) {
711 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
712 gl->gl_waiters2 = 1;
713 else
714 gl->gl_demote_state = LM_ST_UNLOCKED;
715 }
716 spin_unlock(&gl->gl_spin);
717 }
718
719 /**
720 * state_change - record that the glock is now in a different state
721 * @gl: the glock
722 * @new_state the new state
723 *
724 */
725
726 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
727 {
728 int held1, held2;
729
730 held1 = (gl->gl_state != LM_ST_UNLOCKED);
731 held2 = (new_state != LM_ST_UNLOCKED);
732
733 if (held1 != held2) {
734 if (held2)
735 gfs2_glock_hold(gl);
736 else
737 gfs2_glock_put(gl);
738 }
739
740 gl->gl_state = new_state;
741 gl->gl_tchange = jiffies;
742 }
743
744 /**
745 * xmote_bh - Called after the lock module is done acquiring a lock
746 * @gl: The glock in question
747 * @ret: the int returned from the lock module
748 *
749 */
750
751 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
752 {
753 struct gfs2_sbd *sdp = gl->gl_sbd;
754 const struct gfs2_glock_operations *glops = gl->gl_ops;
755 struct gfs2_holder *gh = gl->gl_req_gh;
756 int prev_state = gl->gl_state;
757 int op_done = 1;
758
759 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
760 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
761 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
762
763 state_change(gl, ret & LM_OUT_ST_MASK);
764
765 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
766 if (glops->go_inval)
767 glops->go_inval(gl, DIO_METADATA);
768 } else if (gl->gl_state == LM_ST_DEFERRED) {
769 /* We might not want to do this here.
770 Look at moving to the inode glops. */
771 if (glops->go_inval)
772 glops->go_inval(gl, 0);
773 }
774
775 /* Deal with each possible exit condition */
776
777 if (!gh) {
778 gl->gl_stamp = jiffies;
779 if (ret & LM_OUT_CANCELED) {
780 op_done = 0;
781 } else {
782 spin_lock(&gl->gl_spin);
783 if (gl->gl_state != gl->gl_demote_state) {
784 gl->gl_req_bh = NULL;
785 spin_unlock(&gl->gl_spin);
786 gfs2_glock_drop_th(gl);
787 gfs2_glock_put(gl);
788 return;
789 }
790 gfs2_demote_wake(gl);
791 spin_unlock(&gl->gl_spin);
792 }
793 } else {
794 spin_lock(&gl->gl_spin);
795 list_del_init(&gh->gh_list);
796 gh->gh_error = -EIO;
797 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
798 goto out;
799 gh->gh_error = GLR_CANCELED;
800 if (ret & LM_OUT_CANCELED)
801 goto out;
802 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
803 list_add_tail(&gh->gh_list, &gl->gl_holders);
804 gh->gh_error = 0;
805 set_bit(HIF_HOLDER, &gh->gh_iflags);
806 set_bit(HIF_FIRST, &gh->gh_iflags);
807 op_done = 0;
808 goto out;
809 }
810 gh->gh_error = GLR_TRYFAILED;
811 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
812 goto out;
813 gh->gh_error = -EINVAL;
814 if (gfs2_assert_withdraw(sdp, 0) == -1)
815 fs_err(sdp, "ret = 0x%.8X\n", ret);
816 out:
817 spin_unlock(&gl->gl_spin);
818 }
819
820 if (glops->go_xmote_bh)
821 glops->go_xmote_bh(gl);
822
823 if (op_done) {
824 spin_lock(&gl->gl_spin);
825 gl->gl_req_gh = NULL;
826 gl->gl_req_bh = NULL;
827 clear_bit(GLF_LOCK, &gl->gl_flags);
828 spin_unlock(&gl->gl_spin);
829 }
830
831 gfs2_glock_put(gl);
832
833 if (gh)
834 gfs2_holder_wake(gh);
835 }
836
837 /**
838 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
839 * @gl: The glock in question
840 * @state: the requested state
841 * @flags: modifier flags to the lock call
842 *
843 */
844
845 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
846 {
847 struct gfs2_sbd *sdp = gl->gl_sbd;
848 int flags = gh ? gh->gh_flags : 0;
849 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
850 const struct gfs2_glock_operations *glops = gl->gl_ops;
851 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
852 LM_FLAG_NOEXP | LM_FLAG_ANY |
853 LM_FLAG_PRIORITY);
854 unsigned int lck_ret;
855
856 if (glops->go_xmote_th)
857 glops->go_xmote_th(gl);
858
859 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
860 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
861 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
862 gfs2_assert_warn(sdp, state != gl->gl_state);
863
864 gfs2_glock_hold(gl);
865 gl->gl_req_bh = xmote_bh;
866
867 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
868
869 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
870 return;
871
872 if (lck_ret & LM_OUT_ASYNC)
873 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
874 else
875 xmote_bh(gl, lck_ret);
876 }
877
878 /**
879 * drop_bh - Called after a lock module unlock completes
880 * @gl: the glock
881 * @ret: the return status
882 *
883 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
884 * Doesn't drop the reference on the glock the top half took out
885 *
886 */
887
888 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
889 {
890 struct gfs2_sbd *sdp = gl->gl_sbd;
891 const struct gfs2_glock_operations *glops = gl->gl_ops;
892 struct gfs2_holder *gh = gl->gl_req_gh;
893
894 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
895 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
896 gfs2_assert_warn(sdp, !ret);
897
898 state_change(gl, LM_ST_UNLOCKED);
899
900 if (glops->go_inval)
901 glops->go_inval(gl, DIO_METADATA);
902
903 if (gh) {
904 spin_lock(&gl->gl_spin);
905 list_del_init(&gh->gh_list);
906 gh->gh_error = 0;
907 spin_unlock(&gl->gl_spin);
908 }
909
910 spin_lock(&gl->gl_spin);
911 gfs2_demote_wake(gl);
912 gl->gl_req_gh = NULL;
913 gl->gl_req_bh = NULL;
914 clear_bit(GLF_LOCK, &gl->gl_flags);
915 spin_unlock(&gl->gl_spin);
916
917 gfs2_glock_put(gl);
918
919 if (gh)
920 gfs2_holder_wake(gh);
921 }
922
923 /**
924 * gfs2_glock_drop_th - call into the lock module to unlock a lock
925 * @gl: the glock
926 *
927 */
928
929 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
930 {
931 struct gfs2_sbd *sdp = gl->gl_sbd;
932 const struct gfs2_glock_operations *glops = gl->gl_ops;
933 unsigned int ret;
934
935 if (glops->go_xmote_th)
936 glops->go_xmote_th(gl);
937
938 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
939 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
940 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
941
942 gfs2_glock_hold(gl);
943 gl->gl_req_bh = drop_bh;
944
945 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
946
947 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
948 return;
949
950 if (!ret)
951 drop_bh(gl, ret);
952 else
953 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
954 }
955
956 /**
957 * do_cancels - cancel requests for locks stuck waiting on an expire flag
958 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
959 *
960 * Don't cancel GL_NOCANCEL requests.
961 */
962
963 static void do_cancels(struct gfs2_holder *gh)
964 {
965 struct gfs2_glock *gl = gh->gh_gl;
966
967 spin_lock(&gl->gl_spin);
968
969 while (gl->gl_req_gh != gh &&
970 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
971 !list_empty(&gh->gh_list)) {
972 if (gl->gl_req_bh && !(gl->gl_req_gh &&
973 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
974 spin_unlock(&gl->gl_spin);
975 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
976 msleep(100);
977 spin_lock(&gl->gl_spin);
978 } else {
979 spin_unlock(&gl->gl_spin);
980 msleep(100);
981 spin_lock(&gl->gl_spin);
982 }
983 }
984
985 spin_unlock(&gl->gl_spin);
986 }
987
988 /**
989 * glock_wait_internal - wait on a glock acquisition
990 * @gh: the glock holder
991 *
992 * Returns: 0 on success
993 */
994
995 static int glock_wait_internal(struct gfs2_holder *gh)
996 {
997 struct gfs2_glock *gl = gh->gh_gl;
998 struct gfs2_sbd *sdp = gl->gl_sbd;
999 const struct gfs2_glock_operations *glops = gl->gl_ops;
1000
1001 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1002 return -EIO;
1003
1004 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1005 spin_lock(&gl->gl_spin);
1006 if (gl->gl_req_gh != gh &&
1007 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1008 !list_empty(&gh->gh_list)) {
1009 list_del_init(&gh->gh_list);
1010 gh->gh_error = GLR_TRYFAILED;
1011 run_queue(gl);
1012 spin_unlock(&gl->gl_spin);
1013 return gh->gh_error;
1014 }
1015 spin_unlock(&gl->gl_spin);
1016 }
1017
1018 if (gh->gh_flags & LM_FLAG_PRIORITY)
1019 do_cancels(gh);
1020
1021 wait_on_holder(gh);
1022 if (gh->gh_error)
1023 return gh->gh_error;
1024
1025 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1026 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1027 gh->gh_flags));
1028
1029 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1030 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1031
1032 if (glops->go_lock) {
1033 gh->gh_error = glops->go_lock(gh);
1034 if (gh->gh_error) {
1035 spin_lock(&gl->gl_spin);
1036 list_del_init(&gh->gh_list);
1037 spin_unlock(&gl->gl_spin);
1038 }
1039 }
1040
1041 spin_lock(&gl->gl_spin);
1042 gl->gl_req_gh = NULL;
1043 gl->gl_req_bh = NULL;
1044 clear_bit(GLF_LOCK, &gl->gl_flags);
1045 run_queue(gl);
1046 spin_unlock(&gl->gl_spin);
1047 }
1048
1049 return gh->gh_error;
1050 }
1051
1052 static inline struct gfs2_holder *
1053 find_holder_by_owner(struct list_head *head, pid_t pid)
1054 {
1055 struct gfs2_holder *gh;
1056
1057 list_for_each_entry(gh, head, gh_list) {
1058 if (gh->gh_owner_pid == pid)
1059 return gh;
1060 }
1061
1062 return NULL;
1063 }
1064
1065 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1066 {
1067 va_list args;
1068
1069 va_start(args, fmt);
1070 if (gi) {
1071 vsprintf(gi->string, fmt, args);
1072 seq_printf(gi->seq, gi->string);
1073 }
1074 else
1075 vprintk(fmt, args);
1076 va_end(args);
1077 }
1078
1079 /**
1080 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1081 * @gh: the holder structure to add
1082 *
1083 */
1084
1085 static void add_to_queue(struct gfs2_holder *gh)
1086 {
1087 struct gfs2_glock *gl = gh->gh_gl;
1088 struct gfs2_holder *existing;
1089
1090 BUG_ON(!gh->gh_owner_pid);
1091 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1092 BUG();
1093
1094 if (!(gh->gh_flags & GL_FLOCK)) {
1095 existing = find_holder_by_owner(&gl->gl_holders,
1096 gh->gh_owner_pid);
1097 if (existing) {
1098 print_symbol(KERN_WARNING "original: %s\n",
1099 existing->gh_ip);
1100 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1101 printk(KERN_INFO "lock type : %d lock state : %d\n",
1102 existing->gh_gl->gl_name.ln_type,
1103 existing->gh_gl->gl_state);
1104 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1105 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1106 printk(KERN_INFO "lock type : %d lock state : %d\n",
1107 gl->gl_name.ln_type, gl->gl_state);
1108 BUG();
1109 }
1110
1111 existing = find_holder_by_owner(&gl->gl_waiters3,
1112 gh->gh_owner_pid);
1113 if (existing) {
1114 print_symbol(KERN_WARNING "original: %s\n",
1115 existing->gh_ip);
1116 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1117 BUG();
1118 }
1119 }
1120
1121 if (gh->gh_flags & LM_FLAG_PRIORITY)
1122 list_add(&gh->gh_list, &gl->gl_waiters3);
1123 else
1124 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1125 }
1126
1127 /**
1128 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1129 * @gh: the holder structure
1130 *
1131 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1132 *
1133 * Returns: 0, GLR_TRYFAILED, or errno on failure
1134 */
1135
1136 int gfs2_glock_nq(struct gfs2_holder *gh)
1137 {
1138 struct gfs2_glock *gl = gh->gh_gl;
1139 struct gfs2_sbd *sdp = gl->gl_sbd;
1140 int error = 0;
1141
1142 restart:
1143 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1144 set_bit(HIF_ABORTED, &gh->gh_iflags);
1145 return -EIO;
1146 }
1147
1148 spin_lock(&gl->gl_spin);
1149 add_to_queue(gh);
1150 run_queue(gl);
1151 spin_unlock(&gl->gl_spin);
1152
1153 if (!(gh->gh_flags & GL_ASYNC)) {
1154 error = glock_wait_internal(gh);
1155 if (error == GLR_CANCELED) {
1156 msleep(100);
1157 goto restart;
1158 }
1159 }
1160
1161 return error;
1162 }
1163
1164 /**
1165 * gfs2_glock_poll - poll to see if an async request has been completed
1166 * @gh: the holder
1167 *
1168 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1169 */
1170
1171 int gfs2_glock_poll(struct gfs2_holder *gh)
1172 {
1173 struct gfs2_glock *gl = gh->gh_gl;
1174 int ready = 0;
1175
1176 spin_lock(&gl->gl_spin);
1177
1178 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1179 ready = 1;
1180 else if (list_empty(&gh->gh_list)) {
1181 if (gh->gh_error == GLR_CANCELED) {
1182 spin_unlock(&gl->gl_spin);
1183 msleep(100);
1184 if (gfs2_glock_nq(gh))
1185 return 1;
1186 return 0;
1187 } else
1188 ready = 1;
1189 }
1190
1191 spin_unlock(&gl->gl_spin);
1192
1193 return ready;
1194 }
1195
1196 /**
1197 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1198 * @gh: the holder structure
1199 *
1200 * Returns: 0, GLR_TRYFAILED, or errno on failure
1201 */
1202
1203 int gfs2_glock_wait(struct gfs2_holder *gh)
1204 {
1205 int error;
1206
1207 error = glock_wait_internal(gh);
1208 if (error == GLR_CANCELED) {
1209 msleep(100);
1210 gh->gh_flags &= ~GL_ASYNC;
1211 error = gfs2_glock_nq(gh);
1212 }
1213
1214 return error;
1215 }
1216
1217 /**
1218 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1219 * @gh: the glock holder
1220 *
1221 */
1222
1223 void gfs2_glock_dq(struct gfs2_holder *gh)
1224 {
1225 struct gfs2_glock *gl = gh->gh_gl;
1226 const struct gfs2_glock_operations *glops = gl->gl_ops;
1227 unsigned delay = 0;
1228
1229 if (gh->gh_flags & GL_NOCACHE)
1230 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1231
1232 gfs2_glmutex_lock(gl);
1233
1234 spin_lock(&gl->gl_spin);
1235 list_del_init(&gh->gh_list);
1236
1237 if (list_empty(&gl->gl_holders)) {
1238 if (glops->go_unlock) {
1239 spin_unlock(&gl->gl_spin);
1240 glops->go_unlock(gh);
1241 spin_lock(&gl->gl_spin);
1242 }
1243 gl->gl_stamp = jiffies;
1244 }
1245
1246 clear_bit(GLF_LOCK, &gl->gl_flags);
1247 spin_unlock(&gl->gl_spin);
1248
1249 gfs2_glock_hold(gl);
1250 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1251 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1252 delay = gl->gl_ops->go_min_hold_time;
1253 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1254 gfs2_glock_put(gl);
1255 }
1256
1257 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1258 {
1259 struct gfs2_glock *gl = gh->gh_gl;
1260 gfs2_glock_dq(gh);
1261 wait_on_demote(gl);
1262 }
1263
1264 /**
1265 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1266 * @gh: the holder structure
1267 *
1268 */
1269
1270 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1271 {
1272 gfs2_glock_dq(gh);
1273 gfs2_holder_uninit(gh);
1274 }
1275
1276 /**
1277 * gfs2_glock_nq_num - acquire a glock based on lock number
1278 * @sdp: the filesystem
1279 * @number: the lock number
1280 * @glops: the glock operations for the type of glock
1281 * @state: the state to acquire the glock in
1282 * @flags: modifier flags for the aquisition
1283 * @gh: the struct gfs2_holder
1284 *
1285 * Returns: errno
1286 */
1287
1288 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1289 const struct gfs2_glock_operations *glops,
1290 unsigned int state, int flags, struct gfs2_holder *gh)
1291 {
1292 struct gfs2_glock *gl;
1293 int error;
1294
1295 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1296 if (!error) {
1297 error = gfs2_glock_nq_init(gl, state, flags, gh);
1298 gfs2_glock_put(gl);
1299 }
1300
1301 return error;
1302 }
1303
1304 /**
1305 * glock_compare - Compare two struct gfs2_glock structures for sorting
1306 * @arg_a: the first structure
1307 * @arg_b: the second structure
1308 *
1309 */
1310
1311 static int glock_compare(const void *arg_a, const void *arg_b)
1312 {
1313 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1314 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1315 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1316 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1317
1318 if (a->ln_number > b->ln_number)
1319 return 1;
1320 if (a->ln_number < b->ln_number)
1321 return -1;
1322 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1323 return 0;
1324 }
1325
1326 /**
1327 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1328 * @num_gh: the number of structures
1329 * @ghs: an array of struct gfs2_holder structures
1330 *
1331 * Returns: 0 on success (all glocks acquired),
1332 * errno on failure (no glocks acquired)
1333 */
1334
1335 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1336 struct gfs2_holder **p)
1337 {
1338 unsigned int x;
1339 int error = 0;
1340
1341 for (x = 0; x < num_gh; x++)
1342 p[x] = &ghs[x];
1343
1344 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1345
1346 for (x = 0; x < num_gh; x++) {
1347 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1348
1349 error = gfs2_glock_nq(p[x]);
1350 if (error) {
1351 while (x--)
1352 gfs2_glock_dq(p[x]);
1353 break;
1354 }
1355 }
1356
1357 return error;
1358 }
1359
1360 /**
1361 * gfs2_glock_nq_m - acquire multiple glocks
1362 * @num_gh: the number of structures
1363 * @ghs: an array of struct gfs2_holder structures
1364 *
1365 *
1366 * Returns: 0 on success (all glocks acquired),
1367 * errno on failure (no glocks acquired)
1368 */
1369
1370 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1371 {
1372 struct gfs2_holder *tmp[4];
1373 struct gfs2_holder **pph = tmp;
1374 int error = 0;
1375
1376 switch(num_gh) {
1377 case 0:
1378 return 0;
1379 case 1:
1380 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1381 return gfs2_glock_nq(ghs);
1382 default:
1383 if (num_gh <= 4)
1384 break;
1385 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1386 if (!pph)
1387 return -ENOMEM;
1388 }
1389
1390 error = nq_m_sync(num_gh, ghs, pph);
1391
1392 if (pph != tmp)
1393 kfree(pph);
1394
1395 return error;
1396 }
1397
1398 /**
1399 * gfs2_glock_dq_m - release multiple glocks
1400 * @num_gh: the number of structures
1401 * @ghs: an array of struct gfs2_holder structures
1402 *
1403 */
1404
1405 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1406 {
1407 unsigned int x;
1408
1409 for (x = 0; x < num_gh; x++)
1410 gfs2_glock_dq(&ghs[x]);
1411 }
1412
1413 /**
1414 * gfs2_glock_dq_uninit_m - release multiple glocks
1415 * @num_gh: the number of structures
1416 * @ghs: an array of struct gfs2_holder structures
1417 *
1418 */
1419
1420 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1421 {
1422 unsigned int x;
1423
1424 for (x = 0; x < num_gh; x++)
1425 gfs2_glock_dq_uninit(&ghs[x]);
1426 }
1427
1428 /**
1429 * gfs2_lvb_hold - attach a LVB from a glock
1430 * @gl: The glock in question
1431 *
1432 */
1433
1434 int gfs2_lvb_hold(struct gfs2_glock *gl)
1435 {
1436 int error;
1437
1438 gfs2_glmutex_lock(gl);
1439
1440 if (!atomic_read(&gl->gl_lvb_count)) {
1441 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1442 if (error) {
1443 gfs2_glmutex_unlock(gl);
1444 return error;
1445 }
1446 gfs2_glock_hold(gl);
1447 }
1448 atomic_inc(&gl->gl_lvb_count);
1449
1450 gfs2_glmutex_unlock(gl);
1451
1452 return 0;
1453 }
1454
1455 /**
1456 * gfs2_lvb_unhold - detach a LVB from a glock
1457 * @gl: The glock in question
1458 *
1459 */
1460
1461 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1462 {
1463 gfs2_glock_hold(gl);
1464 gfs2_glmutex_lock(gl);
1465
1466 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1467 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1468 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1469 gl->gl_lvb = NULL;
1470 gfs2_glock_put(gl);
1471 }
1472
1473 gfs2_glmutex_unlock(gl);
1474 gfs2_glock_put(gl);
1475 }
1476
1477 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1478 unsigned int state)
1479 {
1480 struct gfs2_glock *gl;
1481 unsigned long delay = 0;
1482 unsigned long holdtime;
1483 unsigned long now = jiffies;
1484
1485 gl = gfs2_glock_find(sdp, name);
1486 if (!gl)
1487 return;
1488
1489 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1490 if (time_before(now, holdtime))
1491 delay = holdtime - now;
1492
1493 handle_callback(gl, state, 1, delay);
1494 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1495 gfs2_glock_put(gl);
1496 }
1497
1498 /**
1499 * gfs2_glock_cb - Callback used by locking module
1500 * @sdp: Pointer to the superblock
1501 * @type: Type of callback
1502 * @data: Type dependent data pointer
1503 *
1504 * Called by the locking module when it wants to tell us something.
1505 * Either we need to drop a lock, one of our ASYNC requests completed, or
1506 * a journal from another client needs to be recovered.
1507 */
1508
1509 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1510 {
1511 struct gfs2_sbd *sdp = cb_data;
1512
1513 switch (type) {
1514 case LM_CB_NEED_E:
1515 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1516 return;
1517
1518 case LM_CB_NEED_D:
1519 blocking_cb(sdp, data, LM_ST_DEFERRED);
1520 return;
1521
1522 case LM_CB_NEED_S:
1523 blocking_cb(sdp, data, LM_ST_SHARED);
1524 return;
1525
1526 case LM_CB_ASYNC: {
1527 struct lm_async_cb *async = data;
1528 struct gfs2_glock *gl;
1529
1530 down_read(&gfs2_umount_flush_sem);
1531 gl = gfs2_glock_find(sdp, &async->lc_name);
1532 if (gfs2_assert_warn(sdp, gl))
1533 return;
1534 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1535 gl->gl_req_bh(gl, async->lc_ret);
1536 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1537 gfs2_glock_put(gl);
1538 up_read(&gfs2_umount_flush_sem);
1539 return;
1540 }
1541
1542 case LM_CB_NEED_RECOVERY:
1543 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1544 if (sdp->sd_recoverd_process)
1545 wake_up_process(sdp->sd_recoverd_process);
1546 return;
1547
1548 case LM_CB_DROPLOCKS:
1549 gfs2_gl_hash_clear(sdp, NO_WAIT);
1550 gfs2_quota_scan(sdp);
1551 return;
1552
1553 default:
1554 gfs2_assert_warn(sdp, 0);
1555 return;
1556 }
1557 }
1558
1559 /**
1560 * demote_ok - Check to see if it's ok to unlock a glock
1561 * @gl: the glock
1562 *
1563 * Returns: 1 if it's ok
1564 */
1565
1566 static int demote_ok(struct gfs2_glock *gl)
1567 {
1568 const struct gfs2_glock_operations *glops = gl->gl_ops;
1569 int demote = 1;
1570
1571 if (test_bit(GLF_STICKY, &gl->gl_flags))
1572 demote = 0;
1573 else if (glops->go_demote_ok)
1574 demote = glops->go_demote_ok(gl);
1575
1576 return demote;
1577 }
1578
1579 /**
1580 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1581 * @gl: the glock
1582 *
1583 */
1584
1585 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1586 {
1587 struct gfs2_sbd *sdp = gl->gl_sbd;
1588
1589 spin_lock(&sdp->sd_reclaim_lock);
1590 if (list_empty(&gl->gl_reclaim)) {
1591 gfs2_glock_hold(gl);
1592 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1593 atomic_inc(&sdp->sd_reclaim_count);
1594 }
1595 spin_unlock(&sdp->sd_reclaim_lock);
1596
1597 wake_up(&sdp->sd_reclaim_wq);
1598 }
1599
1600 /**
1601 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1602 * @sdp: the filesystem
1603 *
1604 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1605 * different glock and we notice that there are a lot of glocks in the
1606 * reclaim list.
1607 *
1608 */
1609
1610 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1611 {
1612 struct gfs2_glock *gl;
1613
1614 spin_lock(&sdp->sd_reclaim_lock);
1615 if (list_empty(&sdp->sd_reclaim_list)) {
1616 spin_unlock(&sdp->sd_reclaim_lock);
1617 return;
1618 }
1619 gl = list_entry(sdp->sd_reclaim_list.next,
1620 struct gfs2_glock, gl_reclaim);
1621 list_del_init(&gl->gl_reclaim);
1622 spin_unlock(&sdp->sd_reclaim_lock);
1623
1624 atomic_dec(&sdp->sd_reclaim_count);
1625 atomic_inc(&sdp->sd_reclaimed);
1626
1627 if (gfs2_glmutex_trylock(gl)) {
1628 if (list_empty(&gl->gl_holders) &&
1629 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1630 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1631 gfs2_glmutex_unlock(gl);
1632 }
1633
1634 gfs2_glock_put(gl);
1635 }
1636
1637 /**
1638 * examine_bucket - Call a function for glock in a hash bucket
1639 * @examiner: the function
1640 * @sdp: the filesystem
1641 * @bucket: the bucket
1642 *
1643 * Returns: 1 if the bucket has entries
1644 */
1645
1646 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1647 unsigned int hash)
1648 {
1649 struct gfs2_glock *gl, *prev = NULL;
1650 int has_entries = 0;
1651 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1652
1653 read_lock(gl_lock_addr(hash));
1654 /* Can't use hlist_for_each_entry - don't want prefetch here */
1655 if (hlist_empty(head))
1656 goto out;
1657 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1658 while(1) {
1659 if (!sdp || gl->gl_sbd == sdp) {
1660 gfs2_glock_hold(gl);
1661 read_unlock(gl_lock_addr(hash));
1662 if (prev)
1663 gfs2_glock_put(prev);
1664 prev = gl;
1665 examiner(gl);
1666 has_entries = 1;
1667 read_lock(gl_lock_addr(hash));
1668 }
1669 if (gl->gl_list.next == NULL)
1670 break;
1671 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1672 }
1673 out:
1674 read_unlock(gl_lock_addr(hash));
1675 if (prev)
1676 gfs2_glock_put(prev);
1677 cond_resched();
1678 return has_entries;
1679 }
1680
1681 /**
1682 * scan_glock - look at a glock and see if we can reclaim it
1683 * @gl: the glock to look at
1684 *
1685 */
1686
1687 static void scan_glock(struct gfs2_glock *gl)
1688 {
1689 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1690 return;
1691
1692 if (gfs2_glmutex_trylock(gl)) {
1693 if (list_empty(&gl->gl_holders) &&
1694 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1695 goto out_schedule;
1696 gfs2_glmutex_unlock(gl);
1697 }
1698 return;
1699
1700 out_schedule:
1701 gfs2_glmutex_unlock(gl);
1702 gfs2_glock_schedule_for_reclaim(gl);
1703 }
1704
1705 /**
1706 * clear_glock - look at a glock and see if we can free it from glock cache
1707 * @gl: the glock to look at
1708 *
1709 */
1710
1711 static void clear_glock(struct gfs2_glock *gl)
1712 {
1713 struct gfs2_sbd *sdp = gl->gl_sbd;
1714 int released;
1715
1716 spin_lock(&sdp->sd_reclaim_lock);
1717 if (!list_empty(&gl->gl_reclaim)) {
1718 list_del_init(&gl->gl_reclaim);
1719 atomic_dec(&sdp->sd_reclaim_count);
1720 spin_unlock(&sdp->sd_reclaim_lock);
1721 released = gfs2_glock_put(gl);
1722 gfs2_assert(sdp, !released);
1723 } else {
1724 spin_unlock(&sdp->sd_reclaim_lock);
1725 }
1726
1727 if (gfs2_glmutex_trylock(gl)) {
1728 if (list_empty(&gl->gl_holders) &&
1729 gl->gl_state != LM_ST_UNLOCKED)
1730 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1731 gfs2_glmutex_unlock(gl);
1732 }
1733 }
1734
1735 /**
1736 * gfs2_gl_hash_clear - Empty out the glock hash table
1737 * @sdp: the filesystem
1738 * @wait: wait until it's all gone
1739 *
1740 * Called when unmounting the filesystem, or when inter-node lock manager
1741 * requests DROPLOCKS because it is running out of capacity.
1742 */
1743
1744 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1745 {
1746 unsigned long t;
1747 unsigned int x;
1748 int cont;
1749
1750 t = jiffies;
1751
1752 for (;;) {
1753 cont = 0;
1754 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1755 if (examine_bucket(clear_glock, sdp, x))
1756 cont = 1;
1757 }
1758
1759 if (!wait || !cont)
1760 break;
1761
1762 if (time_after_eq(jiffies,
1763 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1764 fs_warn(sdp, "Unmount seems to be stalled. "
1765 "Dumping lock state...\n");
1766 gfs2_dump_lockstate(sdp);
1767 t = jiffies;
1768 }
1769
1770 down_write(&gfs2_umount_flush_sem);
1771 invalidate_inodes(sdp->sd_vfs);
1772 up_write(&gfs2_umount_flush_sem);
1773 msleep(10);
1774 }
1775 }
1776
1777 /*
1778 * Diagnostic routines to help debug distributed deadlock
1779 */
1780
1781 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1782 unsigned long address)
1783 {
1784 char buffer[KSYM_SYMBOL_LEN];
1785
1786 sprint_symbol(buffer, address);
1787 print_dbg(gi, fmt, buffer);
1788 }
1789
1790 /**
1791 * dump_holder - print information about a glock holder
1792 * @str: a string naming the type of holder
1793 * @gh: the glock holder
1794 *
1795 * Returns: 0 on success, -ENOBUFS when we run out of space
1796 */
1797
1798 static int dump_holder(struct glock_iter *gi, char *str,
1799 struct gfs2_holder *gh)
1800 {
1801 unsigned int x;
1802 struct task_struct *gh_owner;
1803
1804 print_dbg(gi, " %s\n", str);
1805 if (gh->gh_owner_pid) {
1806 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1807 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1808 if (gh_owner)
1809 print_dbg(gi, "(%s)\n", gh_owner->comm);
1810 else
1811 print_dbg(gi, "(ended)\n");
1812 } else
1813 print_dbg(gi, " owner = -1\n");
1814 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1815 print_dbg(gi, " gh_flags =");
1816 for (x = 0; x < 32; x++)
1817 if (gh->gh_flags & (1 << x))
1818 print_dbg(gi, " %u", x);
1819 print_dbg(gi, " \n");
1820 print_dbg(gi, " error = %d\n", gh->gh_error);
1821 print_dbg(gi, " gh_iflags =");
1822 for (x = 0; x < 32; x++)
1823 if (test_bit(x, &gh->gh_iflags))
1824 print_dbg(gi, " %u", x);
1825 print_dbg(gi, " \n");
1826 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
1827
1828 return 0;
1829 }
1830
1831 /**
1832 * dump_inode - print information about an inode
1833 * @ip: the inode
1834 *
1835 * Returns: 0 on success, -ENOBUFS when we run out of space
1836 */
1837
1838 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1839 {
1840 unsigned int x;
1841
1842 print_dbg(gi, " Inode:\n");
1843 print_dbg(gi, " num = %llu/%llu\n",
1844 (unsigned long long)ip->i_no_formal_ino,
1845 (unsigned long long)ip->i_no_addr);
1846 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
1847 print_dbg(gi, " i_flags =");
1848 for (x = 0; x < 32; x++)
1849 if (test_bit(x, &ip->i_flags))
1850 print_dbg(gi, " %u", x);
1851 print_dbg(gi, " \n");
1852 return 0;
1853 }
1854
1855 /**
1856 * dump_glock - print information about a glock
1857 * @gl: the glock
1858 * @count: where we are in the buffer
1859 *
1860 * Returns: 0 on success, -ENOBUFS when we run out of space
1861 */
1862
1863 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1864 {
1865 struct gfs2_holder *gh;
1866 unsigned int x;
1867 int error = -ENOBUFS;
1868 struct task_struct *gl_owner;
1869
1870 spin_lock(&gl->gl_spin);
1871
1872 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1873 (unsigned long long)gl->gl_name.ln_number);
1874 print_dbg(gi, " gl_flags =");
1875 for (x = 0; x < 32; x++) {
1876 if (test_bit(x, &gl->gl_flags))
1877 print_dbg(gi, " %u", x);
1878 }
1879 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1880 print_dbg(gi, " (unlocked)");
1881 print_dbg(gi, " \n");
1882 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1883 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1884 if (gl->gl_owner_pid) {
1885 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1886 if (gl_owner)
1887 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1888 gl->gl_owner_pid, gl_owner->comm);
1889 else
1890 print_dbg(gi, " gl_owner = %d (ended)\n",
1891 gl->gl_owner_pid);
1892 } else
1893 print_dbg(gi, " gl_owner = -1\n");
1894 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1895 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1896 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1897 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1898 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1899 print_dbg(gi, " reclaim = %s\n",
1900 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1901 if (gl->gl_aspace)
1902 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1903 gl->gl_aspace->i_mapping->nrpages);
1904 else
1905 print_dbg(gi, " aspace = no\n");
1906 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
1907 if (gl->gl_req_gh) {
1908 error = dump_holder(gi, "Request", gl->gl_req_gh);
1909 if (error)
1910 goto out;
1911 }
1912 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1913 error = dump_holder(gi, "Holder", gh);
1914 if (error)
1915 goto out;
1916 }
1917 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1918 error = dump_holder(gi, "Waiter1", gh);
1919 if (error)
1920 goto out;
1921 }
1922 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1923 error = dump_holder(gi, "Waiter3", gh);
1924 if (error)
1925 goto out;
1926 }
1927 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1928 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
1929 gl->gl_demote_state, (unsigned long long)
1930 (jiffies - gl->gl_demote_time)*(1000000/HZ));
1931 }
1932 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1933 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1934 list_empty(&gl->gl_holders)) {
1935 error = dump_inode(gi, gl->gl_object);
1936 if (error)
1937 goto out;
1938 } else {
1939 error = -ENOBUFS;
1940 print_dbg(gi, " Inode: busy\n");
1941 }
1942 }
1943
1944 error = 0;
1945
1946 out:
1947 spin_unlock(&gl->gl_spin);
1948 return error;
1949 }
1950
1951 /**
1952 * gfs2_dump_lockstate - print out the current lockstate
1953 * @sdp: the filesystem
1954 * @ub: the buffer to copy the information into
1955 *
1956 * If @ub is NULL, dump the lockstate to the console.
1957 *
1958 */
1959
1960 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1961 {
1962 struct gfs2_glock *gl;
1963 struct hlist_node *h;
1964 unsigned int x;
1965 int error = 0;
1966
1967 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1968
1969 read_lock(gl_lock_addr(x));
1970
1971 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1972 if (gl->gl_sbd != sdp)
1973 continue;
1974
1975 error = dump_glock(NULL, gl);
1976 if (error)
1977 break;
1978 }
1979
1980 read_unlock(gl_lock_addr(x));
1981
1982 if (error)
1983 break;
1984 }
1985
1986
1987 return error;
1988 }
1989
1990 /**
1991 * gfs2_scand - Look for cached glocks and inodes to toss from memory
1992 * @sdp: Pointer to GFS2 superblock
1993 *
1994 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1995 * See gfs2_glockd()
1996 */
1997
1998 static int gfs2_scand(void *data)
1999 {
2000 unsigned x;
2001 unsigned delay;
2002
2003 while (!kthread_should_stop()) {
2004 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2005 examine_bucket(scan_glock, NULL, x);
2006 if (freezing(current))
2007 refrigerator();
2008 delay = scand_secs;
2009 if (delay < 1)
2010 delay = 1;
2011 schedule_timeout_interruptible(delay * HZ);
2012 }
2013
2014 return 0;
2015 }
2016
2017
2018
2019 int __init gfs2_glock_init(void)
2020 {
2021 unsigned i;
2022 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2023 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2024 }
2025 #ifdef GL_HASH_LOCK_SZ
2026 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2027 rwlock_init(&gl_hash_locks[i]);
2028 }
2029 #endif
2030
2031 scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2032 if (IS_ERR(scand_process))
2033 return PTR_ERR(scand_process);
2034
2035 glock_workqueue = create_workqueue("glock_workqueue");
2036 if (IS_ERR(glock_workqueue)) {
2037 kthread_stop(scand_process);
2038 return PTR_ERR(glock_workqueue);
2039 }
2040
2041 return 0;
2042 }
2043
2044 void gfs2_glock_exit(void)
2045 {
2046 destroy_workqueue(glock_workqueue);
2047 kthread_stop(scand_process);
2048 }
2049
2050 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2051 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2052
2053 static int gfs2_glock_iter_next(struct glock_iter *gi)
2054 {
2055 struct gfs2_glock *gl;
2056
2057 restart:
2058 read_lock(gl_lock_addr(gi->hash));
2059 gl = gi->gl;
2060 if (gl) {
2061 gi->gl = hlist_entry(gl->gl_list.next,
2062 struct gfs2_glock, gl_list);
2063 if (gi->gl)
2064 gfs2_glock_hold(gi->gl);
2065 }
2066 read_unlock(gl_lock_addr(gi->hash));
2067 if (gl)
2068 gfs2_glock_put(gl);
2069 if (gl && gi->gl == NULL)
2070 gi->hash++;
2071 while(gi->gl == NULL) {
2072 if (gi->hash >= GFS2_GL_HASH_SIZE)
2073 return 1;
2074 read_lock(gl_lock_addr(gi->hash));
2075 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2076 struct gfs2_glock, gl_list);
2077 if (gi->gl)
2078 gfs2_glock_hold(gi->gl);
2079 read_unlock(gl_lock_addr(gi->hash));
2080 gi->hash++;
2081 }
2082
2083 if (gi->sdp != gi->gl->gl_sbd)
2084 goto restart;
2085
2086 return 0;
2087 }
2088
2089 static void gfs2_glock_iter_free(struct glock_iter *gi)
2090 {
2091 if (gi->gl)
2092 gfs2_glock_put(gi->gl);
2093 kfree(gi);
2094 }
2095
2096 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2097 {
2098 struct glock_iter *gi;
2099
2100 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2101 if (!gi)
2102 return NULL;
2103
2104 gi->sdp = sdp;
2105 gi->hash = 0;
2106 gi->seq = NULL;
2107 gi->gl = NULL;
2108 memset(gi->string, 0, sizeof(gi->string));
2109
2110 if (gfs2_glock_iter_next(gi)) {
2111 gfs2_glock_iter_free(gi);
2112 return NULL;
2113 }
2114
2115 return gi;
2116 }
2117
2118 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2119 {
2120 struct glock_iter *gi;
2121 loff_t n = *pos;
2122
2123 gi = gfs2_glock_iter_init(file->private);
2124 if (!gi)
2125 return NULL;
2126
2127 while(n--) {
2128 if (gfs2_glock_iter_next(gi)) {
2129 gfs2_glock_iter_free(gi);
2130 return NULL;
2131 }
2132 }
2133
2134 return gi;
2135 }
2136
2137 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2138 loff_t *pos)
2139 {
2140 struct glock_iter *gi = iter_ptr;
2141
2142 (*pos)++;
2143
2144 if (gfs2_glock_iter_next(gi)) {
2145 gfs2_glock_iter_free(gi);
2146 return NULL;
2147 }
2148
2149 return gi;
2150 }
2151
2152 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2153 {
2154 struct glock_iter *gi = iter_ptr;
2155 if (gi)
2156 gfs2_glock_iter_free(gi);
2157 }
2158
2159 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2160 {
2161 struct glock_iter *gi = iter_ptr;
2162
2163 gi->seq = file;
2164 dump_glock(gi, gi->gl);
2165
2166 return 0;
2167 }
2168
2169 static const struct seq_operations gfs2_glock_seq_ops = {
2170 .start = gfs2_glock_seq_start,
2171 .next = gfs2_glock_seq_next,
2172 .stop = gfs2_glock_seq_stop,
2173 .show = gfs2_glock_seq_show,
2174 };
2175
2176 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2177 {
2178 struct seq_file *seq;
2179 int ret;
2180
2181 ret = seq_open(file, &gfs2_glock_seq_ops);
2182 if (ret)
2183 return ret;
2184
2185 seq = file->private_data;
2186 seq->private = inode->i_private;
2187
2188 return 0;
2189 }
2190
2191 static const struct file_operations gfs2_debug_fops = {
2192 .owner = THIS_MODULE,
2193 .open = gfs2_debugfs_open,
2194 .read = seq_read,
2195 .llseek = seq_lseek,
2196 .release = seq_release
2197 };
2198
2199 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2200 {
2201 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2202 if (!sdp->debugfs_dir)
2203 return -ENOMEM;
2204 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2205 S_IFREG | S_IRUGO,
2206 sdp->debugfs_dir, sdp,
2207 &gfs2_debug_fops);
2208 if (!sdp->debugfs_dentry_glocks)
2209 return -ENOMEM;
2210
2211 return 0;
2212 }
2213
2214 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2215 {
2216 if (sdp && sdp->debugfs_dir) {
2217 if (sdp->debugfs_dentry_glocks) {
2218 debugfs_remove(sdp->debugfs_dentry_glocks);
2219 sdp->debugfs_dentry_glocks = NULL;
2220 }
2221 debugfs_remove(sdp->debugfs_dir);
2222 sdp->debugfs_dir = NULL;
2223 }
2224 }
2225
2226 int gfs2_register_debugfs(void)
2227 {
2228 gfs2_root = debugfs_create_dir("gfs2", NULL);
2229 return gfs2_root ? 0 : -ENOMEM;
2230 }
2231
2232 void gfs2_unregister_debugfs(void)
2233 {
2234 debugfs_remove(gfs2_root);
2235 gfs2_root = NULL;
2236 }