]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/gfs2/glock.c
[GFS2] Fix a couple of refcount leaks.
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / glock.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/uaccess.h>
22
23 #include "gfs2.h"
24 #include "lm_interface.h"
25 #include "incore.h"
26 #include "glock.h"
27 #include "glops.h"
28 #include "inode.h"
29 #include "lm.h"
30 #include "lops.h"
31 #include "meta_io.h"
32 #include "quota.h"
33 #include "super.h"
34 #include "util.h"
35
36 /* Must be kept in sync with the beginning of struct gfs2_glock */
37 struct glock_plug {
38 struct list_head gl_list;
39 unsigned long gl_flags;
40 };
41
42 struct greedy {
43 struct gfs2_holder gr_gh;
44 struct work_struct gr_work;
45 };
46
47 typedef void (*glock_examiner) (struct gfs2_glock * gl);
48
49 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
50 static int dump_glock(struct gfs2_glock *gl);
51
52 /**
53 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
54 * @actual: the current state of the lock
55 * @requested: the lock state that was requested by the caller
56 * @flags: the modifier flags passed in by the caller
57 *
58 * Returns: 1 if the locks are compatible, 0 otherwise
59 */
60
61 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
62 int flags)
63 {
64 if (actual == requested)
65 return 1;
66
67 if (flags & GL_EXACT)
68 return 0;
69
70 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
71 return 1;
72
73 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
74 return 1;
75
76 return 0;
77 }
78
79 /**
80 * gl_hash() - Turn glock number into hash bucket number
81 * @lock: The glock number
82 *
83 * Returns: The number of the corresponding hash bucket
84 */
85
86 static unsigned int gl_hash(struct lm_lockname *name)
87 {
88 unsigned int h;
89
90 h = jhash(&name->ln_number, sizeof(uint64_t), 0);
91 h = jhash(&name->ln_type, sizeof(unsigned int), h);
92 h &= GFS2_GL_HASH_MASK;
93
94 return h;
95 }
96
97 /**
98 * glock_free() - Perform a few checks and then release struct gfs2_glock
99 * @gl: The glock to release
100 *
101 * Also calls lock module to release its internal structure for this glock.
102 *
103 */
104
105 static void glock_free(struct gfs2_glock *gl)
106 {
107 struct gfs2_sbd *sdp = gl->gl_sbd;
108 struct inode *aspace = gl->gl_aspace;
109
110 gfs2_lm_put_lock(sdp, gl->gl_lock);
111
112 if (aspace)
113 gfs2_aspace_put(aspace);
114
115 kmem_cache_free(gfs2_glock_cachep, gl);
116 }
117
118 /**
119 * gfs2_glock_hold() - increment reference count on glock
120 * @gl: The glock to hold
121 *
122 */
123
124 void gfs2_glock_hold(struct gfs2_glock *gl)
125 {
126 kref_get(&gl->gl_ref);
127 }
128
129 /* All work is done after the return from kref_put() so we
130 can release the write_lock before the free. */
131
132 static void kill_glock(struct kref *kref)
133 {
134 struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
135 struct gfs2_sbd *sdp = gl->gl_sbd;
136
137 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
138 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
139 gfs2_assert(sdp, list_empty(&gl->gl_holders));
140 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
141 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
142 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
143 }
144
145 /**
146 * gfs2_glock_put() - Decrement reference count on glock
147 * @gl: The glock to put
148 *
149 */
150
151 int gfs2_glock_put(struct gfs2_glock *gl)
152 {
153 struct gfs2_sbd *sdp = gl->gl_sbd;
154 struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
155 int rv = 0;
156
157 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
158
159 write_lock(&bucket->hb_lock);
160 if (kref_put(&gl->gl_ref, kill_glock)) {
161 list_del_init(&gl->gl_list);
162 write_unlock(&bucket->hb_lock);
163 BUG_ON(spin_is_locked(&gl->gl_spin));
164 glock_free(gl);
165 rv = 1;
166 goto out;
167 }
168 write_unlock(&bucket->hb_lock);
169 out:
170 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
171 return rv;
172 }
173
174 /**
175 * queue_empty - check to see if a glock's queue is empty
176 * @gl: the glock
177 * @head: the head of the queue to check
178 *
179 * This function protects the list in the event that a process already
180 * has a holder on the list and is adding a second holder for itself.
181 * The glmutex lock is what generally prevents processes from working
182 * on the same glock at once, but the special case of adding a second
183 * holder for yourself ("recursive" locking) doesn't involve locking
184 * glmutex, making the spin lock necessary.
185 *
186 * Returns: 1 if the queue is empty
187 */
188
189 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
190 {
191 int empty;
192 spin_lock(&gl->gl_spin);
193 empty = list_empty(head);
194 spin_unlock(&gl->gl_spin);
195 return empty;
196 }
197
198 /**
199 * search_bucket() - Find struct gfs2_glock by lock number
200 * @bucket: the bucket to search
201 * @name: The lock name
202 *
203 * Returns: NULL, or the struct gfs2_glock with the requested number
204 */
205
206 static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
207 struct lm_lockname *name)
208 {
209 struct gfs2_glock *gl;
210
211 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
212 if (test_bit(GLF_PLUG, &gl->gl_flags))
213 continue;
214 if (!lm_name_equal(&gl->gl_name, name))
215 continue;
216
217 kref_get(&gl->gl_ref);
218
219 return gl;
220 }
221
222 return NULL;
223 }
224
225 /**
226 * gfs2_glock_find() - Find glock by lock number
227 * @sdp: The GFS2 superblock
228 * @name: The lock name
229 *
230 * Returns: NULL, or the struct gfs2_glock with the requested number
231 */
232
233 static struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
234 struct lm_lockname *name)
235 {
236 struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
237 struct gfs2_glock *gl;
238
239 read_lock(&bucket->hb_lock);
240 gl = search_bucket(bucket, name);
241 read_unlock(&bucket->hb_lock);
242
243 return gl;
244 }
245
246 /**
247 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
248 * @sdp: The GFS2 superblock
249 * @number: the lock number
250 * @glops: The glock_operations to use
251 * @create: If 0, don't create the glock if it doesn't exist
252 * @glp: the glock is returned here
253 *
254 * This does not lock a glock, just finds/creates structures for one.
255 *
256 * Returns: errno
257 */
258
259 int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
260 struct gfs2_glock_operations *glops, int create,
261 struct gfs2_glock **glp)
262 {
263 struct lm_lockname name;
264 struct gfs2_glock *gl, *tmp;
265 struct gfs2_gl_hash_bucket *bucket;
266 int error;
267
268 name.ln_number = number;
269 name.ln_type = glops->go_type;
270 bucket = &sdp->sd_gl_hash[gl_hash(&name)];
271
272 read_lock(&bucket->hb_lock);
273 gl = search_bucket(bucket, &name);
274 read_unlock(&bucket->hb_lock);
275
276 if (gl || !create) {
277 *glp = gl;
278 return 0;
279 }
280
281 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
282 if (!gl)
283 return -ENOMEM;
284
285 memset(gl, 0, sizeof(struct gfs2_glock));
286
287 INIT_LIST_HEAD(&gl->gl_list);
288 gl->gl_name = name;
289 kref_init(&gl->gl_ref);
290
291 spin_lock_init(&gl->gl_spin);
292
293 gl->gl_state = LM_ST_UNLOCKED;
294 gl->gl_owner = NULL;
295 gl->gl_ip = 0;
296 INIT_LIST_HEAD(&gl->gl_holders);
297 INIT_LIST_HEAD(&gl->gl_waiters1);
298 INIT_LIST_HEAD(&gl->gl_waiters2);
299 INIT_LIST_HEAD(&gl->gl_waiters3);
300
301 gl->gl_ops = glops;
302
303 gl->gl_bucket = bucket;
304 INIT_LIST_HEAD(&gl->gl_reclaim);
305
306 gl->gl_sbd = sdp;
307
308 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
309 INIT_LIST_HEAD(&gl->gl_ail_list);
310
311 /* If this glock protects actual on-disk data or metadata blocks,
312 create a VFS inode to manage the pages/buffers holding them. */
313 if (glops == &gfs2_inode_glops ||
314 glops == &gfs2_rgrp_glops) {
315 gl->gl_aspace = gfs2_aspace_get(sdp);
316 if (!gl->gl_aspace) {
317 error = -ENOMEM;
318 goto fail;
319 }
320 }
321
322 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
323 if (error)
324 goto fail_aspace;
325
326 write_lock(&bucket->hb_lock);
327 tmp = search_bucket(bucket, &name);
328 if (tmp) {
329 write_unlock(&bucket->hb_lock);
330 glock_free(gl);
331 gl = tmp;
332 } else {
333 list_add_tail(&gl->gl_list, &bucket->hb_list);
334 write_unlock(&bucket->hb_lock);
335 }
336
337 *glp = gl;
338
339 return 0;
340
341 fail_aspace:
342 if (gl->gl_aspace)
343 gfs2_aspace_put(gl->gl_aspace);
344
345 fail:
346 kmem_cache_free(gfs2_glock_cachep, gl);
347
348 return error;
349 }
350
351 /**
352 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
353 * @gl: the glock
354 * @state: the state we're requesting
355 * @flags: the modifier flags
356 * @gh: the holder structure
357 *
358 */
359
360 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
361 struct gfs2_holder *gh)
362 {
363 INIT_LIST_HEAD(&gh->gh_list);
364 gh->gh_gl = gl;
365 gh->gh_ip = (unsigned long)__builtin_return_address(0);
366 gh->gh_owner = current;
367 gh->gh_state = state;
368 gh->gh_flags = flags;
369 gh->gh_error = 0;
370 gh->gh_iflags = 0;
371 init_completion(&gh->gh_wait);
372
373 if (gh->gh_state == LM_ST_EXCLUSIVE)
374 gh->gh_flags |= GL_LOCAL_EXCL;
375
376 gfs2_glock_hold(gl);
377 }
378
379 /**
380 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
381 * @state: the state we're requesting
382 * @flags: the modifier flags
383 * @gh: the holder structure
384 *
385 * Don't mess with the glock.
386 *
387 */
388
389 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
390 {
391 gh->gh_state = state;
392 gh->gh_flags = flags;
393 if (gh->gh_state == LM_ST_EXCLUSIVE)
394 gh->gh_flags |= GL_LOCAL_EXCL;
395
396 gh->gh_iflags &= 1 << HIF_ALLOCED;
397 gh->gh_ip = (unsigned long)__builtin_return_address(0);
398 }
399
400 /**
401 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
402 * @gh: the holder structure
403 *
404 */
405
406 void gfs2_holder_uninit(struct gfs2_holder *gh)
407 {
408 gfs2_glock_put(gh->gh_gl);
409 gh->gh_gl = NULL;
410 gh->gh_ip = 0;
411 }
412
413 /**
414 * gfs2_holder_get - get a struct gfs2_holder structure
415 * @gl: the glock
416 * @state: the state we're requesting
417 * @flags: the modifier flags
418 * @gfp_flags:
419 *
420 * Figure out how big an impact this function has. Either:
421 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
422 * 2) Leave it like it is
423 *
424 * Returns: the holder structure, NULL on ENOMEM
425 */
426
427 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
428 unsigned int state,
429 int flags, gfp_t gfp_flags)
430 {
431 struct gfs2_holder *gh;
432
433 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
434 if (!gh)
435 return NULL;
436
437 gfs2_holder_init(gl, state, flags, gh);
438 set_bit(HIF_ALLOCED, &gh->gh_iflags);
439 gh->gh_ip = (unsigned long)__builtin_return_address(0);
440 return gh;
441 }
442
443 /**
444 * gfs2_holder_put - get rid of a struct gfs2_holder structure
445 * @gh: the holder structure
446 *
447 */
448
449 static void gfs2_holder_put(struct gfs2_holder *gh)
450 {
451 gfs2_holder_uninit(gh);
452 kfree(gh);
453 }
454
455 /**
456 * rq_mutex - process a mutex request in the queue
457 * @gh: the glock holder
458 *
459 * Returns: 1 if the queue is blocked
460 */
461
462 static int rq_mutex(struct gfs2_holder *gh)
463 {
464 struct gfs2_glock *gl = gh->gh_gl;
465
466 list_del_init(&gh->gh_list);
467 /* gh->gh_error never examined. */
468 set_bit(GLF_LOCK, &gl->gl_flags);
469 complete(&gh->gh_wait);
470
471 return 1;
472 }
473
474 /**
475 * rq_promote - process a promote request in the queue
476 * @gh: the glock holder
477 *
478 * Acquire a new inter-node lock, or change a lock state to more restrictive.
479 *
480 * Returns: 1 if the queue is blocked
481 */
482
483 static int rq_promote(struct gfs2_holder *gh)
484 {
485 struct gfs2_glock *gl = gh->gh_gl;
486 struct gfs2_sbd *sdp = gl->gl_sbd;
487 struct gfs2_glock_operations *glops = gl->gl_ops;
488
489 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
490 if (list_empty(&gl->gl_holders)) {
491 gl->gl_req_gh = gh;
492 set_bit(GLF_LOCK, &gl->gl_flags);
493 spin_unlock(&gl->gl_spin);
494
495 if (atomic_read(&sdp->sd_reclaim_count) >
496 gfs2_tune_get(sdp, gt_reclaim_limit) &&
497 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
498 gfs2_reclaim_glock(sdp);
499 gfs2_reclaim_glock(sdp);
500 }
501
502 glops->go_xmote_th(gl, gh->gh_state,
503 gh->gh_flags);
504
505 spin_lock(&gl->gl_spin);
506 }
507 return 1;
508 }
509
510 if (list_empty(&gl->gl_holders)) {
511 set_bit(HIF_FIRST, &gh->gh_iflags);
512 set_bit(GLF_LOCK, &gl->gl_flags);
513 } else {
514 struct gfs2_holder *next_gh;
515 if (gh->gh_flags & GL_LOCAL_EXCL)
516 return 1;
517 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
518 gh_list);
519 if (next_gh->gh_flags & GL_LOCAL_EXCL)
520 return 1;
521 }
522
523 list_move_tail(&gh->gh_list, &gl->gl_holders);
524 gh->gh_error = 0;
525 set_bit(HIF_HOLDER, &gh->gh_iflags);
526
527 complete(&gh->gh_wait);
528
529 return 0;
530 }
531
532 /**
533 * rq_demote - process a demote request in the queue
534 * @gh: the glock holder
535 *
536 * Returns: 1 if the queue is blocked
537 */
538
539 static int rq_demote(struct gfs2_holder *gh)
540 {
541 struct gfs2_glock *gl = gh->gh_gl;
542 struct gfs2_glock_operations *glops = gl->gl_ops;
543
544 if (!list_empty(&gl->gl_holders))
545 return 1;
546
547 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
548 list_del_init(&gh->gh_list);
549 gh->gh_error = 0;
550 spin_unlock(&gl->gl_spin);
551 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
552 gfs2_holder_put(gh);
553 else
554 complete(&gh->gh_wait);
555 spin_lock(&gl->gl_spin);
556 } else {
557 gl->gl_req_gh = gh;
558 set_bit(GLF_LOCK, &gl->gl_flags);
559 spin_unlock(&gl->gl_spin);
560
561 if (gh->gh_state == LM_ST_UNLOCKED ||
562 gl->gl_state != LM_ST_EXCLUSIVE)
563 glops->go_drop_th(gl);
564 else
565 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
566
567 spin_lock(&gl->gl_spin);
568 }
569
570 return 0;
571 }
572
573 /**
574 * rq_greedy - process a queued request to drop greedy status
575 * @gh: the glock holder
576 *
577 * Returns: 1 if the queue is blocked
578 */
579
580 static int rq_greedy(struct gfs2_holder *gh)
581 {
582 struct gfs2_glock *gl = gh->gh_gl;
583
584 list_del_init(&gh->gh_list);
585 /* gh->gh_error never examined. */
586 clear_bit(GLF_GREEDY, &gl->gl_flags);
587 spin_unlock(&gl->gl_spin);
588
589 gfs2_holder_uninit(gh);
590 kfree(container_of(gh, struct greedy, gr_gh));
591
592 spin_lock(&gl->gl_spin);
593
594 return 0;
595 }
596
597 /**
598 * run_queue - process holder structures on a glock
599 * @gl: the glock
600 *
601 */
602 static void run_queue(struct gfs2_glock *gl)
603 {
604 struct gfs2_holder *gh;
605 int blocked = 1;
606
607 for (;;) {
608 if (test_bit(GLF_LOCK, &gl->gl_flags))
609 break;
610
611 if (!list_empty(&gl->gl_waiters1)) {
612 gh = list_entry(gl->gl_waiters1.next,
613 struct gfs2_holder, gh_list);
614
615 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
616 blocked = rq_mutex(gh);
617 else
618 gfs2_assert_warn(gl->gl_sbd, 0);
619
620 } else if (!list_empty(&gl->gl_waiters2) &&
621 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
622 gh = list_entry(gl->gl_waiters2.next,
623 struct gfs2_holder, gh_list);
624
625 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
626 blocked = rq_demote(gh);
627 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
628 blocked = rq_greedy(gh);
629 else
630 gfs2_assert_warn(gl->gl_sbd, 0);
631
632 } else if (!list_empty(&gl->gl_waiters3)) {
633 gh = list_entry(gl->gl_waiters3.next,
634 struct gfs2_holder, gh_list);
635
636 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
637 blocked = rq_promote(gh);
638 else
639 gfs2_assert_warn(gl->gl_sbd, 0);
640
641 } else
642 break;
643
644 if (blocked)
645 break;
646 }
647 }
648
649 /**
650 * gfs2_glmutex_lock - acquire a local lock on a glock
651 * @gl: the glock
652 *
653 * Gives caller exclusive access to manipulate a glock structure.
654 */
655
656 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
657 {
658 struct gfs2_holder gh;
659
660 gfs2_holder_init(gl, 0, 0, &gh);
661 set_bit(HIF_MUTEX, &gh.gh_iflags);
662
663 spin_lock(&gl->gl_spin);
664 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
665 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
666 else {
667 gl->gl_owner = current;
668 gl->gl_ip = (unsigned long)__builtin_return_address(0);
669 complete(&gh.gh_wait);
670 }
671 spin_unlock(&gl->gl_spin);
672
673 wait_for_completion(&gh.gh_wait);
674 gfs2_holder_uninit(&gh);
675 }
676
677 /**
678 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
679 * @gl: the glock
680 *
681 * Returns: 1 if the glock is acquired
682 */
683
684 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
685 {
686 int acquired = 1;
687
688 spin_lock(&gl->gl_spin);
689 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
690 acquired = 0;
691 else {
692 gl->gl_owner = current;
693 gl->gl_ip = (unsigned long)__builtin_return_address(0);
694 }
695 spin_unlock(&gl->gl_spin);
696
697 return acquired;
698 }
699
700 /**
701 * gfs2_glmutex_unlock - release a local lock on a glock
702 * @gl: the glock
703 *
704 */
705
706 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
707 {
708 spin_lock(&gl->gl_spin);
709 clear_bit(GLF_LOCK, &gl->gl_flags);
710 gl->gl_owner = NULL;
711 gl->gl_ip = 0;
712 run_queue(gl);
713 BUG_ON(!spin_is_locked(&gl->gl_spin));
714 spin_unlock(&gl->gl_spin);
715 }
716
717 /**
718 * handle_callback - add a demote request to a lock's queue
719 * @gl: the glock
720 * @state: the state the caller wants us to change to
721 *
722 * Note: This may fail sliently if we are out of memory.
723 */
724
725 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
726 {
727 struct gfs2_holder *gh, *new_gh = NULL;
728
729 restart:
730 spin_lock(&gl->gl_spin);
731
732 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
733 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
734 gl->gl_req_gh != gh) {
735 if (gh->gh_state != state)
736 gh->gh_state = LM_ST_UNLOCKED;
737 goto out;
738 }
739 }
740
741 if (new_gh) {
742 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
743 new_gh = NULL;
744 } else {
745 spin_unlock(&gl->gl_spin);
746
747 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
748 if (!new_gh)
749 return;
750 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
751 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
752
753 goto restart;
754 }
755
756 out:
757 spin_unlock(&gl->gl_spin);
758
759 if (new_gh)
760 gfs2_holder_put(new_gh);
761 }
762
763 void gfs2_glock_inode_squish(struct inode *inode)
764 {
765 struct gfs2_holder gh;
766 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
767 gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
768 set_bit(HIF_DEMOTE, &gh.gh_iflags);
769 spin_lock(&gl->gl_spin);
770 gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
771 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
772 run_queue(gl);
773 spin_unlock(&gl->gl_spin);
774 wait_for_completion(&gh.gh_wait);
775 gfs2_holder_uninit(&gh);
776 }
777
778 /**
779 * state_change - record that the glock is now in a different state
780 * @gl: the glock
781 * @new_state the new state
782 *
783 */
784
785 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
786 {
787 int held1, held2;
788
789 held1 = (gl->gl_state != LM_ST_UNLOCKED);
790 held2 = (new_state != LM_ST_UNLOCKED);
791
792 if (held1 != held2) {
793 if (held2)
794 gfs2_glock_hold(gl);
795 else
796 gfs2_glock_put(gl);
797 }
798
799 gl->gl_state = new_state;
800 }
801
802 /**
803 * xmote_bh - Called after the lock module is done acquiring a lock
804 * @gl: The glock in question
805 * @ret: the int returned from the lock module
806 *
807 */
808
809 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
810 {
811 struct gfs2_sbd *sdp = gl->gl_sbd;
812 struct gfs2_glock_operations *glops = gl->gl_ops;
813 struct gfs2_holder *gh = gl->gl_req_gh;
814 int prev_state = gl->gl_state;
815 int op_done = 1;
816
817 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
818 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
819 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
820
821 state_change(gl, ret & LM_OUT_ST_MASK);
822
823 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
824 if (glops->go_inval)
825 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
826 } else if (gl->gl_state == LM_ST_DEFERRED) {
827 /* We might not want to do this here.
828 Look at moving to the inode glops. */
829 if (glops->go_inval)
830 glops->go_inval(gl, DIO_DATA);
831 }
832
833 /* Deal with each possible exit condition */
834
835 if (!gh)
836 gl->gl_stamp = jiffies;
837
838 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
839 spin_lock(&gl->gl_spin);
840 list_del_init(&gh->gh_list);
841 gh->gh_error = -EIO;
842 spin_unlock(&gl->gl_spin);
843
844 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
845 spin_lock(&gl->gl_spin);
846 list_del_init(&gh->gh_list);
847 if (gl->gl_state == gh->gh_state ||
848 gl->gl_state == LM_ST_UNLOCKED)
849 gh->gh_error = 0;
850 else {
851 if (gfs2_assert_warn(sdp, gh->gh_flags &
852 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
853 fs_warn(sdp, "ret = 0x%.8X\n", ret);
854 gh->gh_error = GLR_TRYFAILED;
855 }
856 spin_unlock(&gl->gl_spin);
857
858 if (ret & LM_OUT_CANCELED)
859 handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
860
861 } else if (ret & LM_OUT_CANCELED) {
862 spin_lock(&gl->gl_spin);
863 list_del_init(&gh->gh_list);
864 gh->gh_error = GLR_CANCELED;
865 spin_unlock(&gl->gl_spin);
866
867 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
868 spin_lock(&gl->gl_spin);
869 list_move_tail(&gh->gh_list, &gl->gl_holders);
870 gh->gh_error = 0;
871 set_bit(HIF_HOLDER, &gh->gh_iflags);
872 spin_unlock(&gl->gl_spin);
873
874 set_bit(HIF_FIRST, &gh->gh_iflags);
875
876 op_done = 0;
877
878 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
879 spin_lock(&gl->gl_spin);
880 list_del_init(&gh->gh_list);
881 gh->gh_error = GLR_TRYFAILED;
882 spin_unlock(&gl->gl_spin);
883
884 } else {
885 if (gfs2_assert_withdraw(sdp, 0) == -1)
886 fs_err(sdp, "ret = 0x%.8X\n", ret);
887 }
888
889 if (glops->go_xmote_bh)
890 glops->go_xmote_bh(gl);
891
892 if (op_done) {
893 spin_lock(&gl->gl_spin);
894 gl->gl_req_gh = NULL;
895 gl->gl_req_bh = NULL;
896 clear_bit(GLF_LOCK, &gl->gl_flags);
897 run_queue(gl);
898 spin_unlock(&gl->gl_spin);
899 }
900
901 gfs2_glock_put(gl);
902
903 if (gh) {
904 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
905 gfs2_holder_put(gh);
906 else
907 complete(&gh->gh_wait);
908 }
909 }
910
911 /**
912 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
913 * @gl: The glock in question
914 * @state: the requested state
915 * @flags: modifier flags to the lock call
916 *
917 */
918
919 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
920 {
921 struct gfs2_sbd *sdp = gl->gl_sbd;
922 struct gfs2_glock_operations *glops = gl->gl_ops;
923 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
924 LM_FLAG_NOEXP | LM_FLAG_ANY |
925 LM_FLAG_PRIORITY);
926 unsigned int lck_ret;
927
928 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
929 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
930 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
931 gfs2_assert_warn(sdp, state != gl->gl_state);
932
933 if (gl->gl_state == LM_ST_EXCLUSIVE) {
934 if (glops->go_sync)
935 glops->go_sync(gl,
936 DIO_METADATA | DIO_DATA | DIO_RELEASE);
937 }
938
939 gfs2_glock_hold(gl);
940 gl->gl_req_bh = xmote_bh;
941
942 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
943 lck_flags);
944
945 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
946 return;
947
948 if (lck_ret & LM_OUT_ASYNC)
949 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
950 else
951 xmote_bh(gl, lck_ret);
952 }
953
954 /**
955 * drop_bh - Called after a lock module unlock completes
956 * @gl: the glock
957 * @ret: the return status
958 *
959 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
960 * Doesn't drop the reference on the glock the top half took out
961 *
962 */
963
964 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
965 {
966 struct gfs2_sbd *sdp = gl->gl_sbd;
967 struct gfs2_glock_operations *glops = gl->gl_ops;
968 struct gfs2_holder *gh = gl->gl_req_gh;
969
970 clear_bit(GLF_PREFETCH, &gl->gl_flags);
971
972 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
973 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
974 gfs2_assert_warn(sdp, !ret);
975
976 state_change(gl, LM_ST_UNLOCKED);
977
978 if (glops->go_inval)
979 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
980
981 if (gh) {
982 spin_lock(&gl->gl_spin);
983 list_del_init(&gh->gh_list);
984 gh->gh_error = 0;
985 spin_unlock(&gl->gl_spin);
986 }
987
988 if (glops->go_drop_bh)
989 glops->go_drop_bh(gl);
990
991 spin_lock(&gl->gl_spin);
992 gl->gl_req_gh = NULL;
993 gl->gl_req_bh = NULL;
994 clear_bit(GLF_LOCK, &gl->gl_flags);
995 run_queue(gl);
996 spin_unlock(&gl->gl_spin);
997
998 gfs2_glock_put(gl);
999
1000 if (gh) {
1001 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1002 gfs2_holder_put(gh);
1003 else
1004 complete(&gh->gh_wait);
1005 }
1006 }
1007
1008 /**
1009 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1010 * @gl: the glock
1011 *
1012 */
1013
1014 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1015 {
1016 struct gfs2_sbd *sdp = gl->gl_sbd;
1017 struct gfs2_glock_operations *glops = gl->gl_ops;
1018 unsigned int ret;
1019
1020 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1021 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1022 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1023
1024 if (gl->gl_state == LM_ST_EXCLUSIVE) {
1025 if (glops->go_sync)
1026 glops->go_sync(gl,
1027 DIO_METADATA | DIO_DATA | DIO_RELEASE);
1028 }
1029
1030 gfs2_glock_hold(gl);
1031 gl->gl_req_bh = drop_bh;
1032
1033 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1034
1035 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1036 return;
1037
1038 if (!ret)
1039 drop_bh(gl, ret);
1040 else
1041 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1042 }
1043
1044 /**
1045 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1046 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1047 *
1048 * Don't cancel GL_NOCANCEL requests.
1049 */
1050
1051 static void do_cancels(struct gfs2_holder *gh)
1052 {
1053 struct gfs2_glock *gl = gh->gh_gl;
1054
1055 spin_lock(&gl->gl_spin);
1056
1057 while (gl->gl_req_gh != gh &&
1058 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1059 !list_empty(&gh->gh_list)) {
1060 if (gl->gl_req_bh &&
1061 !(gl->gl_req_gh &&
1062 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1063 spin_unlock(&gl->gl_spin);
1064 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1065 msleep(100);
1066 spin_lock(&gl->gl_spin);
1067 } else {
1068 spin_unlock(&gl->gl_spin);
1069 msleep(100);
1070 spin_lock(&gl->gl_spin);
1071 }
1072 }
1073
1074 spin_unlock(&gl->gl_spin);
1075 }
1076
1077 /**
1078 * glock_wait_internal - wait on a glock acquisition
1079 * @gh: the glock holder
1080 *
1081 * Returns: 0 on success
1082 */
1083
1084 static int glock_wait_internal(struct gfs2_holder *gh)
1085 {
1086 struct gfs2_glock *gl = gh->gh_gl;
1087 struct gfs2_sbd *sdp = gl->gl_sbd;
1088 struct gfs2_glock_operations *glops = gl->gl_ops;
1089
1090 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1091 return -EIO;
1092
1093 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1094 spin_lock(&gl->gl_spin);
1095 if (gl->gl_req_gh != gh &&
1096 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1097 !list_empty(&gh->gh_list)) {
1098 list_del_init(&gh->gh_list);
1099 gh->gh_error = GLR_TRYFAILED;
1100 run_queue(gl);
1101 spin_unlock(&gl->gl_spin);
1102 return gh->gh_error;
1103 }
1104 spin_unlock(&gl->gl_spin);
1105 }
1106
1107 if (gh->gh_flags & LM_FLAG_PRIORITY)
1108 do_cancels(gh);
1109
1110 wait_for_completion(&gh->gh_wait);
1111
1112 if (gh->gh_error)
1113 return gh->gh_error;
1114
1115 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1116 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
1117 gh->gh_state,
1118 gh->gh_flags));
1119
1120 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1121 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1122
1123 if (glops->go_lock) {
1124 gh->gh_error = glops->go_lock(gh);
1125 if (gh->gh_error) {
1126 spin_lock(&gl->gl_spin);
1127 list_del_init(&gh->gh_list);
1128 spin_unlock(&gl->gl_spin);
1129 }
1130 }
1131
1132 spin_lock(&gl->gl_spin);
1133 gl->gl_req_gh = NULL;
1134 gl->gl_req_bh = NULL;
1135 clear_bit(GLF_LOCK, &gl->gl_flags);
1136 run_queue(gl);
1137 spin_unlock(&gl->gl_spin);
1138 }
1139
1140 return gh->gh_error;
1141 }
1142
1143 static inline struct gfs2_holder *
1144 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1145 {
1146 struct gfs2_holder *gh;
1147
1148 list_for_each_entry(gh, head, gh_list) {
1149 if (gh->gh_owner == owner)
1150 return gh;
1151 }
1152
1153 return NULL;
1154 }
1155
1156 /**
1157 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1158 * @gh: the holder structure to add
1159 *
1160 */
1161
1162 static void add_to_queue(struct gfs2_holder *gh)
1163 {
1164 struct gfs2_glock *gl = gh->gh_gl;
1165 struct gfs2_holder *existing;
1166
1167 BUG_ON(!gh->gh_owner);
1168
1169 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1170 if (existing) {
1171 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1172 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1173 BUG();
1174 }
1175
1176 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1177 if (existing) {
1178 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1179 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1180 BUG();
1181 }
1182
1183 if (gh->gh_flags & LM_FLAG_PRIORITY)
1184 list_add(&gh->gh_list, &gl->gl_waiters3);
1185 else
1186 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1187 }
1188
1189 /**
1190 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1191 * @gh: the holder structure
1192 *
1193 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1194 *
1195 * Returns: 0, GLR_TRYFAILED, or errno on failure
1196 */
1197
1198 int gfs2_glock_nq(struct gfs2_holder *gh)
1199 {
1200 struct gfs2_glock *gl = gh->gh_gl;
1201 struct gfs2_sbd *sdp = gl->gl_sbd;
1202 int error = 0;
1203
1204 restart:
1205 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1206 set_bit(HIF_ABORTED, &gh->gh_iflags);
1207 return -EIO;
1208 }
1209
1210 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1211
1212 spin_lock(&gl->gl_spin);
1213 add_to_queue(gh);
1214 run_queue(gl);
1215 spin_unlock(&gl->gl_spin);
1216
1217 if (!(gh->gh_flags & GL_ASYNC)) {
1218 error = glock_wait_internal(gh);
1219 if (error == GLR_CANCELED) {
1220 msleep(100);
1221 goto restart;
1222 }
1223 }
1224
1225 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1226
1227 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1228 dump_glock(gl);
1229
1230 return error;
1231 }
1232
1233 /**
1234 * gfs2_glock_poll - poll to see if an async request has been completed
1235 * @gh: the holder
1236 *
1237 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1238 */
1239
1240 int gfs2_glock_poll(struct gfs2_holder *gh)
1241 {
1242 struct gfs2_glock *gl = gh->gh_gl;
1243 int ready = 0;
1244
1245 spin_lock(&gl->gl_spin);
1246
1247 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1248 ready = 1;
1249 else if (list_empty(&gh->gh_list)) {
1250 if (gh->gh_error == GLR_CANCELED) {
1251 spin_unlock(&gl->gl_spin);
1252 msleep(100);
1253 if (gfs2_glock_nq(gh))
1254 return 1;
1255 return 0;
1256 } else
1257 ready = 1;
1258 }
1259
1260 spin_unlock(&gl->gl_spin);
1261
1262 return ready;
1263 }
1264
1265 /**
1266 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1267 * @gh: the holder structure
1268 *
1269 * Returns: 0, GLR_TRYFAILED, or errno on failure
1270 */
1271
1272 int gfs2_glock_wait(struct gfs2_holder *gh)
1273 {
1274 int error;
1275
1276 error = glock_wait_internal(gh);
1277 if (error == GLR_CANCELED) {
1278 msleep(100);
1279 gh->gh_flags &= ~GL_ASYNC;
1280 error = gfs2_glock_nq(gh);
1281 }
1282
1283 return error;
1284 }
1285
1286 /**
1287 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1288 * @gh: the glock holder
1289 *
1290 */
1291
1292 void gfs2_glock_dq(struct gfs2_holder *gh)
1293 {
1294 struct gfs2_glock *gl = gh->gh_gl;
1295 struct gfs2_glock_operations *glops = gl->gl_ops;
1296
1297 if (gh->gh_flags & GL_SYNC)
1298 set_bit(GLF_SYNC, &gl->gl_flags);
1299
1300 if (gh->gh_flags & GL_NOCACHE)
1301 handle_callback(gl, LM_ST_UNLOCKED);
1302
1303 gfs2_glmutex_lock(gl);
1304
1305 spin_lock(&gl->gl_spin);
1306 list_del_init(&gh->gh_list);
1307
1308 if (list_empty(&gl->gl_holders)) {
1309 spin_unlock(&gl->gl_spin);
1310
1311 if (glops->go_unlock)
1312 glops->go_unlock(gh);
1313
1314 if (test_bit(GLF_SYNC, &gl->gl_flags)) {
1315 if (glops->go_sync)
1316 glops->go_sync(gl, DIO_METADATA | DIO_DATA);
1317 }
1318
1319 gl->gl_stamp = jiffies;
1320
1321 spin_lock(&gl->gl_spin);
1322 }
1323
1324 clear_bit(GLF_LOCK, &gl->gl_flags);
1325 run_queue(gl);
1326 spin_unlock(&gl->gl_spin);
1327 }
1328
1329 /**
1330 * gfs2_glock_prefetch - Try to prefetch a glock
1331 * @gl: the glock
1332 * @state: the state to prefetch in
1333 * @flags: flags passed to go_xmote_th()
1334 *
1335 */
1336
1337 static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1338 int flags)
1339 {
1340 struct gfs2_glock_operations *glops = gl->gl_ops;
1341
1342 spin_lock(&gl->gl_spin);
1343
1344 if (test_bit(GLF_LOCK, &gl->gl_flags) ||
1345 !list_empty(&gl->gl_holders) ||
1346 !list_empty(&gl->gl_waiters1) ||
1347 !list_empty(&gl->gl_waiters2) ||
1348 !list_empty(&gl->gl_waiters3) ||
1349 relaxed_state_ok(gl->gl_state, state, flags)) {
1350 spin_unlock(&gl->gl_spin);
1351 return;
1352 }
1353
1354 set_bit(GLF_PREFETCH, &gl->gl_flags);
1355 set_bit(GLF_LOCK, &gl->gl_flags);
1356 spin_unlock(&gl->gl_spin);
1357
1358 glops->go_xmote_th(gl, state, flags);
1359 }
1360
1361 static void greedy_work(void *data)
1362 {
1363 struct greedy *gr = data;
1364 struct gfs2_holder *gh = &gr->gr_gh;
1365 struct gfs2_glock *gl = gh->gh_gl;
1366 struct gfs2_glock_operations *glops = gl->gl_ops;
1367
1368 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1369
1370 if (glops->go_greedy)
1371 glops->go_greedy(gl);
1372
1373 spin_lock(&gl->gl_spin);
1374
1375 if (list_empty(&gl->gl_waiters2)) {
1376 clear_bit(GLF_GREEDY, &gl->gl_flags);
1377 spin_unlock(&gl->gl_spin);
1378 gfs2_holder_uninit(gh);
1379 kfree(gr);
1380 } else {
1381 gfs2_glock_hold(gl);
1382 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1383 run_queue(gl);
1384 spin_unlock(&gl->gl_spin);
1385 gfs2_glock_put(gl);
1386 }
1387 }
1388
1389 /**
1390 * gfs2_glock_be_greedy -
1391 * @gl:
1392 * @time:
1393 *
1394 * Returns: 0 if go_greedy will be called, 1 otherwise
1395 */
1396
1397 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1398 {
1399 struct greedy *gr;
1400 struct gfs2_holder *gh;
1401
1402 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1403 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1404 return 1;
1405
1406 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1407 if (!gr) {
1408 clear_bit(GLF_GREEDY, &gl->gl_flags);
1409 return 1;
1410 }
1411 gh = &gr->gr_gh;
1412
1413 gfs2_holder_init(gl, 0, 0, gh);
1414 set_bit(HIF_GREEDY, &gh->gh_iflags);
1415 INIT_WORK(&gr->gr_work, greedy_work, gr);
1416
1417 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1418 schedule_delayed_work(&gr->gr_work, time);
1419
1420 return 0;
1421 }
1422
1423 /**
1424 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1425 * @gh: the holder structure
1426 *
1427 */
1428
1429 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1430 {
1431 gfs2_glock_dq(gh);
1432 gfs2_holder_uninit(gh);
1433 }
1434
1435 /**
1436 * gfs2_glock_nq_num - acquire a glock based on lock number
1437 * @sdp: the filesystem
1438 * @number: the lock number
1439 * @glops: the glock operations for the type of glock
1440 * @state: the state to acquire the glock in
1441 * @flags: modifier flags for the aquisition
1442 * @gh: the struct gfs2_holder
1443 *
1444 * Returns: errno
1445 */
1446
1447 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
1448 struct gfs2_glock_operations *glops, unsigned int state,
1449 int flags, struct gfs2_holder *gh)
1450 {
1451 struct gfs2_glock *gl;
1452 int error;
1453
1454 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1455 if (!error) {
1456 error = gfs2_glock_nq_init(gl, state, flags, gh);
1457 gfs2_glock_put(gl);
1458 }
1459
1460 return error;
1461 }
1462
1463 /**
1464 * glock_compare - Compare two struct gfs2_glock structures for sorting
1465 * @arg_a: the first structure
1466 * @arg_b: the second structure
1467 *
1468 */
1469
1470 static int glock_compare(const void *arg_a, const void *arg_b)
1471 {
1472 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1473 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1474 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1475 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1476 int ret = 0;
1477
1478 if (a->ln_number > b->ln_number)
1479 ret = 1;
1480 else if (a->ln_number < b->ln_number)
1481 ret = -1;
1482 else {
1483 if (gh_a->gh_state == LM_ST_SHARED &&
1484 gh_b->gh_state == LM_ST_EXCLUSIVE)
1485 ret = 1;
1486 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1487 (gh_b->gh_flags & GL_LOCAL_EXCL))
1488 ret = 1;
1489 }
1490
1491 return ret;
1492 }
1493
1494 /**
1495 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1496 * @num_gh: the number of structures
1497 * @ghs: an array of struct gfs2_holder structures
1498 *
1499 * Returns: 0 on success (all glocks acquired),
1500 * errno on failure (no glocks acquired)
1501 */
1502
1503 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1504 struct gfs2_holder **p)
1505 {
1506 unsigned int x;
1507 int error = 0;
1508
1509 for (x = 0; x < num_gh; x++)
1510 p[x] = &ghs[x];
1511
1512 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1513
1514 for (x = 0; x < num_gh; x++) {
1515 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1516
1517 error = gfs2_glock_nq(p[x]);
1518 if (error) {
1519 while (x--)
1520 gfs2_glock_dq(p[x]);
1521 break;
1522 }
1523 }
1524
1525 return error;
1526 }
1527
1528 /**
1529 * gfs2_glock_nq_m - acquire multiple glocks
1530 * @num_gh: the number of structures
1531 * @ghs: an array of struct gfs2_holder structures
1532 *
1533 * Figure out how big an impact this function has. Either:
1534 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1535 * 2) Forget async stuff and just call nq_m_sync()
1536 * 3) Leave it like it is
1537 *
1538 * Returns: 0 on success (all glocks acquired),
1539 * errno on failure (no glocks acquired)
1540 */
1541
1542 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1543 {
1544 int *e;
1545 unsigned int x;
1546 int borked = 0, serious = 0;
1547 int error = 0;
1548
1549 if (!num_gh)
1550 return 0;
1551
1552 if (num_gh == 1) {
1553 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1554 return gfs2_glock_nq(ghs);
1555 }
1556
1557 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1558 if (!e)
1559 return -ENOMEM;
1560
1561 for (x = 0; x < num_gh; x++) {
1562 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1563 error = gfs2_glock_nq(&ghs[x]);
1564 if (error) {
1565 borked = 1;
1566 serious = error;
1567 num_gh = x;
1568 break;
1569 }
1570 }
1571
1572 for (x = 0; x < num_gh; x++) {
1573 error = e[x] = glock_wait_internal(&ghs[x]);
1574 if (error) {
1575 borked = 1;
1576 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1577 serious = error;
1578 }
1579 }
1580
1581 if (!borked) {
1582 kfree(e);
1583 return 0;
1584 }
1585
1586 for (x = 0; x < num_gh; x++)
1587 if (!e[x])
1588 gfs2_glock_dq(&ghs[x]);
1589
1590 if (serious)
1591 error = serious;
1592 else {
1593 for (x = 0; x < num_gh; x++)
1594 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1595 &ghs[x]);
1596 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1597 }
1598
1599 kfree(e);
1600
1601 return error;
1602 }
1603
1604 /**
1605 * gfs2_glock_dq_m - release multiple glocks
1606 * @num_gh: the number of structures
1607 * @ghs: an array of struct gfs2_holder structures
1608 *
1609 */
1610
1611 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1612 {
1613 unsigned int x;
1614
1615 for (x = 0; x < num_gh; x++)
1616 gfs2_glock_dq(&ghs[x]);
1617 }
1618
1619 /**
1620 * gfs2_glock_dq_uninit_m - release multiple glocks
1621 * @num_gh: the number of structures
1622 * @ghs: an array of struct gfs2_holder structures
1623 *
1624 */
1625
1626 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1627 {
1628 unsigned int x;
1629
1630 for (x = 0; x < num_gh; x++)
1631 gfs2_glock_dq_uninit(&ghs[x]);
1632 }
1633
1634 /**
1635 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1636 * @sdp: the filesystem
1637 * @number: the lock number
1638 * @glops: the glock operations for the type of glock
1639 * @state: the state to acquire the glock in
1640 * @flags: modifier flags for the aquisition
1641 *
1642 * Returns: errno
1643 */
1644
1645 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
1646 struct gfs2_glock_operations *glops,
1647 unsigned int state, int flags)
1648 {
1649 struct gfs2_glock *gl;
1650 int error;
1651
1652 if (atomic_read(&sdp->sd_reclaim_count) <
1653 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1654 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1655 if (!error) {
1656 gfs2_glock_prefetch(gl, state, flags);
1657 gfs2_glock_put(gl);
1658 }
1659 }
1660 }
1661
1662 /**
1663 * gfs2_lvb_hold - attach a LVB from a glock
1664 * @gl: The glock in question
1665 *
1666 */
1667
1668 int gfs2_lvb_hold(struct gfs2_glock *gl)
1669 {
1670 int error;
1671
1672 gfs2_glmutex_lock(gl);
1673
1674 if (!atomic_read(&gl->gl_lvb_count)) {
1675 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1676 if (error) {
1677 gfs2_glmutex_unlock(gl);
1678 return error;
1679 }
1680 gfs2_glock_hold(gl);
1681 }
1682 atomic_inc(&gl->gl_lvb_count);
1683
1684 gfs2_glmutex_unlock(gl);
1685
1686 return 0;
1687 }
1688
1689 /**
1690 * gfs2_lvb_unhold - detach a LVB from a glock
1691 * @gl: The glock in question
1692 *
1693 */
1694
1695 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1696 {
1697 gfs2_glock_hold(gl);
1698 gfs2_glmutex_lock(gl);
1699
1700 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1701 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1702 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1703 gl->gl_lvb = NULL;
1704 gfs2_glock_put(gl);
1705 }
1706
1707 gfs2_glmutex_unlock(gl);
1708 gfs2_glock_put(gl);
1709 }
1710
1711 #if 0
1712 void gfs2_lvb_sync(struct gfs2_glock *gl)
1713 {
1714 gfs2_glmutex_lock(gl);
1715
1716 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
1717 if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
1718 gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1719
1720 gfs2_glmutex_unlock(gl);
1721 }
1722 #endif /* 0 */
1723
1724 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1725 unsigned int state)
1726 {
1727 struct gfs2_glock *gl;
1728
1729 gl = gfs2_glock_find(sdp, name);
1730 if (!gl)
1731 return;
1732
1733 if (gl->gl_ops->go_callback)
1734 gl->gl_ops->go_callback(gl, state);
1735 handle_callback(gl, state);
1736
1737 spin_lock(&gl->gl_spin);
1738 run_queue(gl);
1739 spin_unlock(&gl->gl_spin);
1740
1741 gfs2_glock_put(gl);
1742 }
1743
1744 /**
1745 * gfs2_glock_cb - Callback used by locking module
1746 * @fsdata: Pointer to the superblock
1747 * @type: Type of callback
1748 * @data: Type dependent data pointer
1749 *
1750 * Called by the locking module when it wants to tell us something.
1751 * Either we need to drop a lock, one of our ASYNC requests completed, or
1752 * a journal from another client needs to be recovered.
1753 */
1754
1755 void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
1756 {
1757 struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
1758
1759 switch (type) {
1760 case LM_CB_NEED_E:
1761 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1762 return;
1763
1764 case LM_CB_NEED_D:
1765 blocking_cb(sdp, data, LM_ST_DEFERRED);
1766 return;
1767
1768 case LM_CB_NEED_S:
1769 blocking_cb(sdp, data, LM_ST_SHARED);
1770 return;
1771
1772 case LM_CB_ASYNC: {
1773 struct lm_async_cb *async = data;
1774 struct gfs2_glock *gl;
1775
1776 gl = gfs2_glock_find(sdp, &async->lc_name);
1777 if (gfs2_assert_warn(sdp, gl))
1778 return;
1779 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1780 gl->gl_req_bh(gl, async->lc_ret);
1781 gfs2_glock_put(gl);
1782 return;
1783 }
1784
1785 case LM_CB_NEED_RECOVERY:
1786 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1787 if (sdp->sd_recoverd_process)
1788 wake_up_process(sdp->sd_recoverd_process);
1789 return;
1790
1791 case LM_CB_DROPLOCKS:
1792 gfs2_gl_hash_clear(sdp, NO_WAIT);
1793 gfs2_quota_scan(sdp);
1794 return;
1795
1796 default:
1797 gfs2_assert_warn(sdp, 0);
1798 return;
1799 }
1800 }
1801
1802 /**
1803 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1804 * iopen glock from memory
1805 * @io_gl: the iopen glock
1806 * @state: the state into which the glock should be put
1807 *
1808 */
1809
1810 void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1811 {
1812
1813 if (state != LM_ST_UNLOCKED)
1814 return;
1815 /* FIXME: remove this? */
1816 }
1817
1818 /**
1819 * demote_ok - Check to see if it's ok to unlock a glock
1820 * @gl: the glock
1821 *
1822 * Returns: 1 if it's ok
1823 */
1824
1825 static int demote_ok(struct gfs2_glock *gl)
1826 {
1827 struct gfs2_sbd *sdp = gl->gl_sbd;
1828 struct gfs2_glock_operations *glops = gl->gl_ops;
1829 int demote = 1;
1830
1831 if (test_bit(GLF_STICKY, &gl->gl_flags))
1832 demote = 0;
1833 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1834 demote = time_after_eq(jiffies,
1835 gl->gl_stamp +
1836 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1837 else if (glops->go_demote_ok)
1838 demote = glops->go_demote_ok(gl);
1839
1840 return demote;
1841 }
1842
1843 /**
1844 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1845 * @gl: the glock
1846 *
1847 */
1848
1849 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1850 {
1851 struct gfs2_sbd *sdp = gl->gl_sbd;
1852
1853 spin_lock(&sdp->sd_reclaim_lock);
1854 if (list_empty(&gl->gl_reclaim)) {
1855 gfs2_glock_hold(gl);
1856 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1857 atomic_inc(&sdp->sd_reclaim_count);
1858 }
1859 spin_unlock(&sdp->sd_reclaim_lock);
1860
1861 wake_up(&sdp->sd_reclaim_wq);
1862 }
1863
1864 /**
1865 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1866 * @sdp: the filesystem
1867 *
1868 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1869 * different glock and we notice that there are a lot of glocks in the
1870 * reclaim list.
1871 *
1872 */
1873
1874 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1875 {
1876 struct gfs2_glock *gl;
1877
1878 spin_lock(&sdp->sd_reclaim_lock);
1879 if (list_empty(&sdp->sd_reclaim_list)) {
1880 spin_unlock(&sdp->sd_reclaim_lock);
1881 return;
1882 }
1883 gl = list_entry(sdp->sd_reclaim_list.next,
1884 struct gfs2_glock, gl_reclaim);
1885 list_del_init(&gl->gl_reclaim);
1886 spin_unlock(&sdp->sd_reclaim_lock);
1887
1888 atomic_dec(&sdp->sd_reclaim_count);
1889 atomic_inc(&sdp->sd_reclaimed);
1890
1891 if (gfs2_glmutex_trylock(gl)) {
1892 if (queue_empty(gl, &gl->gl_holders) &&
1893 gl->gl_state != LM_ST_UNLOCKED &&
1894 demote_ok(gl))
1895 handle_callback(gl, LM_ST_UNLOCKED);
1896 gfs2_glmutex_unlock(gl);
1897 }
1898
1899 gfs2_glock_put(gl);
1900 }
1901
1902 /**
1903 * examine_bucket - Call a function for glock in a hash bucket
1904 * @examiner: the function
1905 * @sdp: the filesystem
1906 * @bucket: the bucket
1907 *
1908 * Returns: 1 if the bucket has entries
1909 */
1910
1911 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1912 struct gfs2_gl_hash_bucket *bucket)
1913 {
1914 struct glock_plug plug;
1915 struct list_head *tmp;
1916 struct gfs2_glock *gl;
1917 int entries;
1918
1919 /* Add "plug" to end of bucket list, work back up list from there */
1920 memset(&plug.gl_flags, 0, sizeof(unsigned long));
1921 set_bit(GLF_PLUG, &plug.gl_flags);
1922
1923 write_lock(&bucket->hb_lock);
1924 list_add(&plug.gl_list, &bucket->hb_list);
1925 write_unlock(&bucket->hb_lock);
1926
1927 for (;;) {
1928 write_lock(&bucket->hb_lock);
1929
1930 for (;;) {
1931 tmp = plug.gl_list.next;
1932
1933 if (tmp == &bucket->hb_list) {
1934 list_del(&plug.gl_list);
1935 entries = !list_empty(&bucket->hb_list);
1936 write_unlock(&bucket->hb_lock);
1937 return entries;
1938 }
1939 gl = list_entry(tmp, struct gfs2_glock, gl_list);
1940
1941 /* Move plug up list */
1942 list_move(&plug.gl_list, &gl->gl_list);
1943
1944 if (test_bit(GLF_PLUG, &gl->gl_flags))
1945 continue;
1946
1947 /* examiner() must glock_put() */
1948 gfs2_glock_hold(gl);
1949
1950 break;
1951 }
1952
1953 write_unlock(&bucket->hb_lock);
1954
1955 examiner(gl);
1956 }
1957 }
1958
1959 /**
1960 * scan_glock - look at a glock and see if we can reclaim it
1961 * @gl: the glock to look at
1962 *
1963 */
1964
1965 static void scan_glock(struct gfs2_glock *gl)
1966 {
1967 if (gfs2_glmutex_trylock(gl)) {
1968 if (gl->gl_ops == &gfs2_inode_glops)
1969 goto out;
1970 if (queue_empty(gl, &gl->gl_holders) &&
1971 gl->gl_state != LM_ST_UNLOCKED &&
1972 demote_ok(gl))
1973 goto out_schedule;
1974 out:
1975 gfs2_glmutex_unlock(gl);
1976 }
1977
1978 gfs2_glock_put(gl);
1979
1980 return;
1981
1982 out_schedule:
1983 gfs2_glmutex_unlock(gl);
1984 gfs2_glock_schedule_for_reclaim(gl);
1985 gfs2_glock_put(gl);
1986 }
1987
1988 /**
1989 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1990 * @sdp: the filesystem
1991 *
1992 */
1993
1994 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1995 {
1996 unsigned int x;
1997
1998 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1999 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
2000 cond_resched();
2001 }
2002 }
2003
2004 /**
2005 * clear_glock - look at a glock and see if we can free it from glock cache
2006 * @gl: the glock to look at
2007 *
2008 */
2009
2010 static void clear_glock(struct gfs2_glock *gl)
2011 {
2012 struct gfs2_sbd *sdp = gl->gl_sbd;
2013 int released;
2014
2015 spin_lock(&sdp->sd_reclaim_lock);
2016 if (!list_empty(&gl->gl_reclaim)) {
2017 list_del_init(&gl->gl_reclaim);
2018 atomic_dec(&sdp->sd_reclaim_count);
2019 spin_unlock(&sdp->sd_reclaim_lock);
2020 released = gfs2_glock_put(gl);
2021 gfs2_assert(sdp, !released);
2022 } else {
2023 spin_unlock(&sdp->sd_reclaim_lock);
2024 }
2025
2026 if (gfs2_glmutex_trylock(gl)) {
2027 if (queue_empty(gl, &gl->gl_holders) &&
2028 gl->gl_state != LM_ST_UNLOCKED)
2029 handle_callback(gl, LM_ST_UNLOCKED);
2030
2031 gfs2_glmutex_unlock(gl);
2032 }
2033
2034 gfs2_glock_put(gl);
2035 }
2036
2037 /**
2038 * gfs2_gl_hash_clear - Empty out the glock hash table
2039 * @sdp: the filesystem
2040 * @wait: wait until it's all gone
2041 *
2042 * Called when unmounting the filesystem, or when inter-node lock manager
2043 * requests DROPLOCKS because it is running out of capacity.
2044 */
2045
2046 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2047 {
2048 unsigned long t;
2049 unsigned int x;
2050 int cont;
2051
2052 t = jiffies;
2053
2054 for (;;) {
2055 cont = 0;
2056
2057 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2058 if (examine_bucket(clear_glock, sdp,
2059 &sdp->sd_gl_hash[x]))
2060 cont = 1;
2061
2062 if (!wait || !cont)
2063 break;
2064
2065 if (time_after_eq(jiffies,
2066 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2067 fs_warn(sdp, "Unmount seems to be stalled. "
2068 "Dumping lock state...\n");
2069 gfs2_dump_lockstate(sdp);
2070 t = jiffies;
2071 }
2072
2073 /* invalidate_inodes() requires that the sb inodes list
2074 not change, but an async completion callback for an
2075 unlock can occur which does glock_put() which
2076 can call iput() which will change the sb inodes list.
2077 invalidate_inodes_mutex prevents glock_put()'s during
2078 an invalidate_inodes() */
2079
2080 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
2081 invalidate_inodes(sdp->sd_vfs);
2082 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
2083 msleep(10);
2084 }
2085 }
2086
2087 /*
2088 * Diagnostic routines to help debug distributed deadlock
2089 */
2090
2091 /**
2092 * dump_holder - print information about a glock holder
2093 * @str: a string naming the type of holder
2094 * @gh: the glock holder
2095 *
2096 * Returns: 0 on success, -ENOBUFS when we run out of space
2097 */
2098
2099 static int dump_holder(char *str, struct gfs2_holder *gh)
2100 {
2101 unsigned int x;
2102 int error = -ENOBUFS;
2103
2104 printk(KERN_INFO " %s\n", str);
2105 printk(KERN_INFO " owner = %ld\n",
2106 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2107 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
2108 printk(KERN_INFO " gh_flags =");
2109 for (x = 0; x < 32; x++)
2110 if (gh->gh_flags & (1 << x))
2111 printk(" %u", x);
2112 printk(" \n");
2113 printk(KERN_INFO " error = %d\n", gh->gh_error);
2114 printk(KERN_INFO " gh_iflags =");
2115 for (x = 0; x < 32; x++)
2116 if (test_bit(x, &gh->gh_iflags))
2117 printk(" %u", x);
2118 printk(" \n");
2119 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
2120
2121 error = 0;
2122
2123 return error;
2124 }
2125
2126 /**
2127 * dump_inode - print information about an inode
2128 * @ip: the inode
2129 *
2130 * Returns: 0 on success, -ENOBUFS when we run out of space
2131 */
2132
2133 static int dump_inode(struct gfs2_inode *ip)
2134 {
2135 unsigned int x;
2136 int error = -ENOBUFS;
2137
2138 printk(KERN_INFO " Inode:\n");
2139 printk(KERN_INFO " num = %llu %llu\n",
2140 (unsigned long long)ip->i_num.no_formal_ino,
2141 (unsigned long long)ip->i_num.no_addr);
2142 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
2143 printk(KERN_INFO " i_flags =");
2144 for (x = 0; x < 32; x++)
2145 if (test_bit(x, &ip->i_flags))
2146 printk(" %u", x);
2147 printk(" \n");
2148
2149 error = 0;
2150
2151 return error;
2152 }
2153
2154 /**
2155 * dump_glock - print information about a glock
2156 * @gl: the glock
2157 * @count: where we are in the buffer
2158 *
2159 * Returns: 0 on success, -ENOBUFS when we run out of space
2160 */
2161
2162 static int dump_glock(struct gfs2_glock *gl)
2163 {
2164 struct gfs2_holder *gh;
2165 unsigned int x;
2166 int error = -ENOBUFS;
2167
2168 spin_lock(&gl->gl_spin);
2169
2170 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n",
2171 gl,
2172 gl->gl_name.ln_type,
2173 (unsigned long long)gl->gl_name.ln_number);
2174 printk(KERN_INFO " gl_flags =");
2175 for (x = 0; x < 32; x++)
2176 if (test_bit(x, &gl->gl_flags))
2177 printk(" %u", x);
2178 printk(" \n");
2179 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2180 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
2181 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
2182 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
2183 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2184 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2185 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2186 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
2187 printk(KERN_INFO " le = %s\n",
2188 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2189 printk(KERN_INFO " reclaim = %s\n",
2190 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2191 if (gl->gl_aspace)
2192 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n",
2193 gl->gl_aspace,
2194 gl->gl_aspace->i_mapping->nrpages);
2195 else
2196 printk(KERN_INFO " aspace = no\n");
2197 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
2198 if (gl->gl_req_gh) {
2199 error = dump_holder("Request", gl->gl_req_gh);
2200 if (error)
2201 goto out;
2202 }
2203 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2204 error = dump_holder("Holder", gh);
2205 if (error)
2206 goto out;
2207 }
2208 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2209 error = dump_holder("Waiter1", gh);
2210 if (error)
2211 goto out;
2212 }
2213 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2214 error = dump_holder("Waiter2", gh);
2215 if (error)
2216 goto out;
2217 }
2218 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2219 error = dump_holder("Waiter3", gh);
2220 if (error)
2221 goto out;
2222 }
2223 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2224 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2225 list_empty(&gl->gl_holders)) {
2226 error = dump_inode(gl->gl_object);
2227 if (error)
2228 goto out;
2229 } else {
2230 error = -ENOBUFS;
2231 printk(KERN_INFO " Inode: busy\n");
2232 }
2233 }
2234
2235 error = 0;
2236
2237 out:
2238 spin_unlock(&gl->gl_spin);
2239
2240 return error;
2241 }
2242
2243 /**
2244 * gfs2_dump_lockstate - print out the current lockstate
2245 * @sdp: the filesystem
2246 * @ub: the buffer to copy the information into
2247 *
2248 * If @ub is NULL, dump the lockstate to the console.
2249 *
2250 */
2251
2252 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2253 {
2254 struct gfs2_gl_hash_bucket *bucket;
2255 struct gfs2_glock *gl;
2256 unsigned int x;
2257 int error = 0;
2258
2259 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2260 bucket = &sdp->sd_gl_hash[x];
2261
2262 read_lock(&bucket->hb_lock);
2263
2264 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2265 if (test_bit(GLF_PLUG, &gl->gl_flags))
2266 continue;
2267
2268 error = dump_glock(gl);
2269 if (error)
2270 break;
2271 }
2272
2273 read_unlock(&bucket->hb_lock);
2274
2275 if (error)
2276 break;
2277 }
2278
2279
2280 return error;
2281 }
2282