]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/gfs2/glock.c
[GFS2] Add sync_page to metadata address space operations
[mirror_ubuntu-bionic-kernel.git] / fs / gfs2 / glock.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lm.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44
45 struct gfs2_gl_hash_bucket {
46 struct hlist_head hb_list;
47 };
48
49 struct glock_iter {
50 int hash; /* hash bucket index */
51 struct gfs2_sbd *sdp; /* incore superblock */
52 struct gfs2_glock *gl; /* current glock struct */
53 struct seq_file *seq; /* sequence file for debugfs */
54 char string[512]; /* scratch space */
55 };
56
57 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
62 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
63 static void run_queue(struct gfs2_glock *gl);
64
65 static DECLARE_RWSEM(gfs2_umount_flush_sem);
66 static struct dentry *gfs2_root;
67 static struct task_struct *scand_process;
68 static unsigned int scand_secs = 5;
69 static struct workqueue_struct *glock_workqueue;
70
71 #define GFS2_GL_HASH_SHIFT 15
72 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79 * Despite what you might think, the numbers below are not arbitrary :-)
80 * They are taken from the ipv4 routing hash code, which is well tested
81 * and thus should be nearly optimal. Later on we might tweek the numbers
82 * but for now this should be fine.
83 *
84 * The reason for putting the locks in a separate array from the list heads
85 * is that we can have fewer locks than list heads and save memory. We use
86 * the same hash function for both, but with a different hash mask.
87 */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89 defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ 256
93 #else
94 # if NR_CPUS >= 32
95 # define GL_HASH_LOCK_SZ 4096
96 # elif NR_CPUS >= 16
97 # define GL_HASH_LOCK_SZ 2048
98 # elif NR_CPUS >= 8
99 # define GL_HASH_LOCK_SZ 1024
100 # elif NR_CPUS >= 4
101 # define GL_HASH_LOCK_SZ 512
102 # else
103 # define GL_HASH_LOCK_SZ 256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122 return NULL;
123 }
124 #endif
125
126 /**
127 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128 * @actual: the current state of the lock
129 * @requested: the lock state that was requested by the caller
130 * @flags: the modifier flags passed in by the caller
131 *
132 * Returns: 1 if the locks are compatible, 0 otherwise
133 */
134
135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136 int flags)
137 {
138 if (actual == requested)
139 return 1;
140
141 if (flags & GL_EXACT)
142 return 0;
143
144 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145 return 1;
146
147 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148 return 1;
149
150 return 0;
151 }
152
153 /**
154 * gl_hash() - Turn glock number into hash bucket number
155 * @lock: The glock number
156 *
157 * Returns: The number of the corresponding hash bucket
158 */
159
160 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161 const struct lm_lockname *name)
162 {
163 unsigned int h;
164
165 h = jhash(&name->ln_number, sizeof(u64), 0);
166 h = jhash(&name->ln_type, sizeof(unsigned int), h);
167 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
168 h &= GFS2_GL_HASH_MASK;
169
170 return h;
171 }
172
173 /**
174 * glock_free() - Perform a few checks and then release struct gfs2_glock
175 * @gl: The glock to release
176 *
177 * Also calls lock module to release its internal structure for this glock.
178 *
179 */
180
181 static void glock_free(struct gfs2_glock *gl)
182 {
183 struct gfs2_sbd *sdp = gl->gl_sbd;
184 struct inode *aspace = gl->gl_aspace;
185
186 gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188 if (aspace)
189 gfs2_aspace_put(aspace);
190
191 kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195 * gfs2_glock_hold() - increment reference count on glock
196 * @gl: The glock to hold
197 *
198 */
199
200 void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202 atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206 * gfs2_glock_put() - Decrement reference count on glock
207 * @gl: The glock to put
208 *
209 */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213 int rv = 0;
214 struct gfs2_sbd *sdp = gl->gl_sbd;
215
216 write_lock(gl_lock_addr(gl->gl_hash));
217 if (atomic_dec_and_test(&gl->gl_ref)) {
218 hlist_del(&gl->gl_list);
219 write_unlock(gl_lock_addr(gl->gl_hash));
220 BUG_ON(spin_is_locked(&gl->gl_spin));
221 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
222 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
223 gfs2_assert(sdp, list_empty(&gl->gl_holders));
224 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
225 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
226 glock_free(gl);
227 rv = 1;
228 goto out;
229 }
230 write_unlock(gl_lock_addr(gl->gl_hash));
231 out:
232 return rv;
233 }
234
235 /**
236 * search_bucket() - Find struct gfs2_glock by lock number
237 * @bucket: the bucket to search
238 * @name: The lock name
239 *
240 * Returns: NULL, or the struct gfs2_glock with the requested number
241 */
242
243 static struct gfs2_glock *search_bucket(unsigned int hash,
244 const struct gfs2_sbd *sdp,
245 const struct lm_lockname *name)
246 {
247 struct gfs2_glock *gl;
248 struct hlist_node *h;
249
250 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
251 if (!lm_name_equal(&gl->gl_name, name))
252 continue;
253 if (gl->gl_sbd != sdp)
254 continue;
255
256 atomic_inc(&gl->gl_ref);
257
258 return gl;
259 }
260
261 return NULL;
262 }
263
264 /**
265 * gfs2_glock_find() - Find glock by lock number
266 * @sdp: The GFS2 superblock
267 * @name: The lock name
268 *
269 * Returns: NULL, or the struct gfs2_glock with the requested number
270 */
271
272 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
273 const struct lm_lockname *name)
274 {
275 unsigned int hash = gl_hash(sdp, name);
276 struct gfs2_glock *gl;
277
278 read_lock(gl_lock_addr(hash));
279 gl = search_bucket(hash, sdp, name);
280 read_unlock(gl_lock_addr(hash));
281
282 return gl;
283 }
284
285 static void glock_work_func(struct work_struct *work)
286 {
287 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
288
289 spin_lock(&gl->gl_spin);
290 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
291 set_bit(GLF_DEMOTE, &gl->gl_flags);
292 run_queue(gl);
293 spin_unlock(&gl->gl_spin);
294 gfs2_glock_put(gl);
295 }
296
297 /**
298 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
299 * @sdp: The GFS2 superblock
300 * @number: the lock number
301 * @glops: The glock_operations to use
302 * @create: If 0, don't create the glock if it doesn't exist
303 * @glp: the glock is returned here
304 *
305 * This does not lock a glock, just finds/creates structures for one.
306 *
307 * Returns: errno
308 */
309
310 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
311 const struct gfs2_glock_operations *glops, int create,
312 struct gfs2_glock **glp)
313 {
314 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
315 struct gfs2_glock *gl, *tmp;
316 unsigned int hash = gl_hash(sdp, &name);
317 int error;
318
319 read_lock(gl_lock_addr(hash));
320 gl = search_bucket(hash, sdp, &name);
321 read_unlock(gl_lock_addr(hash));
322
323 if (gl || !create) {
324 *glp = gl;
325 return 0;
326 }
327
328 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
329 if (!gl)
330 return -ENOMEM;
331
332 gl->gl_flags = 0;
333 gl->gl_name = name;
334 atomic_set(&gl->gl_ref, 1);
335 gl->gl_state = LM_ST_UNLOCKED;
336 gl->gl_demote_state = LM_ST_EXCLUSIVE;
337 gl->gl_hash = hash;
338 gl->gl_owner_pid = 0;
339 gl->gl_ip = 0;
340 gl->gl_ops = glops;
341 gl->gl_req_gh = NULL;
342 gl->gl_req_bh = NULL;
343 gl->gl_vn = 0;
344 gl->gl_stamp = jiffies;
345 gl->gl_tchange = jiffies;
346 gl->gl_object = NULL;
347 gl->gl_sbd = sdp;
348 gl->gl_aspace = NULL;
349 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
350 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
351
352 /* If this glock protects actual on-disk data or metadata blocks,
353 create a VFS inode to manage the pages/buffers holding them. */
354 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
355 gl->gl_aspace = gfs2_aspace_get(sdp);
356 if (!gl->gl_aspace) {
357 error = -ENOMEM;
358 goto fail;
359 }
360 }
361
362 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
363 if (error)
364 goto fail_aspace;
365
366 write_lock(gl_lock_addr(hash));
367 tmp = search_bucket(hash, sdp, &name);
368 if (tmp) {
369 write_unlock(gl_lock_addr(hash));
370 glock_free(gl);
371 gl = tmp;
372 } else {
373 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
374 write_unlock(gl_lock_addr(hash));
375 }
376
377 *glp = gl;
378
379 return 0;
380
381 fail_aspace:
382 if (gl->gl_aspace)
383 gfs2_aspace_put(gl->gl_aspace);
384 fail:
385 kmem_cache_free(gfs2_glock_cachep, gl);
386 return error;
387 }
388
389 /**
390 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
391 * @gl: the glock
392 * @state: the state we're requesting
393 * @flags: the modifier flags
394 * @gh: the holder structure
395 *
396 */
397
398 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
399 struct gfs2_holder *gh)
400 {
401 INIT_LIST_HEAD(&gh->gh_list);
402 gh->gh_gl = gl;
403 gh->gh_ip = (unsigned long)__builtin_return_address(0);
404 gh->gh_owner_pid = current->pid;
405 gh->gh_state = state;
406 gh->gh_flags = flags;
407 gh->gh_error = 0;
408 gh->gh_iflags = 0;
409 gfs2_glock_hold(gl);
410 }
411
412 /**
413 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
414 * @state: the state we're requesting
415 * @flags: the modifier flags
416 * @gh: the holder structure
417 *
418 * Don't mess with the glock.
419 *
420 */
421
422 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
423 {
424 gh->gh_state = state;
425 gh->gh_flags = flags;
426 gh->gh_iflags = 0;
427 gh->gh_ip = (unsigned long)__builtin_return_address(0);
428 }
429
430 /**
431 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
432 * @gh: the holder structure
433 *
434 */
435
436 void gfs2_holder_uninit(struct gfs2_holder *gh)
437 {
438 gfs2_glock_put(gh->gh_gl);
439 gh->gh_gl = NULL;
440 gh->gh_ip = 0;
441 }
442
443 static void gfs2_holder_wake(struct gfs2_holder *gh)
444 {
445 clear_bit(HIF_WAIT, &gh->gh_iflags);
446 smp_mb__after_clear_bit();
447 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
448 }
449
450 static int just_schedule(void *word)
451 {
452 schedule();
453 return 0;
454 }
455
456 static void wait_on_holder(struct gfs2_holder *gh)
457 {
458 might_sleep();
459 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
460 }
461
462 static void gfs2_demote_wake(struct gfs2_glock *gl)
463 {
464 BUG_ON(!spin_is_locked(&gl->gl_spin));
465 gl->gl_demote_state = LM_ST_EXCLUSIVE;
466 clear_bit(GLF_DEMOTE, &gl->gl_flags);
467 smp_mb__after_clear_bit();
468 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
469 }
470
471 static void wait_on_demote(struct gfs2_glock *gl)
472 {
473 might_sleep();
474 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
475 }
476
477 /**
478 * rq_mutex - process a mutex request in the queue
479 * @gh: the glock holder
480 *
481 * Returns: 1 if the queue is blocked
482 */
483
484 static int rq_mutex(struct gfs2_holder *gh)
485 {
486 struct gfs2_glock *gl = gh->gh_gl;
487
488 list_del_init(&gh->gh_list);
489 /* gh->gh_error never examined. */
490 set_bit(GLF_LOCK, &gl->gl_flags);
491 clear_bit(HIF_WAIT, &gh->gh_iflags);
492 smp_mb();
493 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
494
495 return 1;
496 }
497
498 /**
499 * rq_promote - process a promote request in the queue
500 * @gh: the glock holder
501 *
502 * Acquire a new inter-node lock, or change a lock state to more restrictive.
503 *
504 * Returns: 1 if the queue is blocked
505 */
506
507 static int rq_promote(struct gfs2_holder *gh)
508 {
509 struct gfs2_glock *gl = gh->gh_gl;
510
511 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
512 if (list_empty(&gl->gl_holders)) {
513 gl->gl_req_gh = gh;
514 set_bit(GLF_LOCK, &gl->gl_flags);
515 spin_unlock(&gl->gl_spin);
516 gfs2_glock_xmote_th(gh->gh_gl, gh);
517 spin_lock(&gl->gl_spin);
518 }
519 return 1;
520 }
521
522 if (list_empty(&gl->gl_holders)) {
523 set_bit(HIF_FIRST, &gh->gh_iflags);
524 set_bit(GLF_LOCK, &gl->gl_flags);
525 } else {
526 struct gfs2_holder *next_gh;
527 if (gh->gh_state == LM_ST_EXCLUSIVE)
528 return 1;
529 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
530 gh_list);
531 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
532 return 1;
533 }
534
535 list_move_tail(&gh->gh_list, &gl->gl_holders);
536 gh->gh_error = 0;
537 set_bit(HIF_HOLDER, &gh->gh_iflags);
538
539 gfs2_holder_wake(gh);
540
541 return 0;
542 }
543
544 /**
545 * rq_demote - process a demote request in the queue
546 * @gh: the glock holder
547 *
548 * Returns: 1 if the queue is blocked
549 */
550
551 static int rq_demote(struct gfs2_glock *gl)
552 {
553 if (!list_empty(&gl->gl_holders))
554 return 1;
555
556 if (gl->gl_state == gl->gl_demote_state ||
557 gl->gl_state == LM_ST_UNLOCKED) {
558 gfs2_demote_wake(gl);
559 return 0;
560 }
561
562 set_bit(GLF_LOCK, &gl->gl_flags);
563 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
564
565 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
566 gl->gl_state != LM_ST_EXCLUSIVE) {
567 spin_unlock(&gl->gl_spin);
568 gfs2_glock_drop_th(gl);
569 } else {
570 spin_unlock(&gl->gl_spin);
571 gfs2_glock_xmote_th(gl, NULL);
572 }
573
574 spin_lock(&gl->gl_spin);
575 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
576
577 return 0;
578 }
579
580 /**
581 * run_queue - process holder structures on a glock
582 * @gl: the glock
583 *
584 */
585 static void run_queue(struct gfs2_glock *gl)
586 {
587 struct gfs2_holder *gh;
588 int blocked = 1;
589
590 for (;;) {
591 if (test_bit(GLF_LOCK, &gl->gl_flags))
592 break;
593
594 if (!list_empty(&gl->gl_waiters1)) {
595 gh = list_entry(gl->gl_waiters1.next,
596 struct gfs2_holder, gh_list);
597
598 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
599 blocked = rq_mutex(gh);
600 else
601 gfs2_assert_warn(gl->gl_sbd, 0);
602
603 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
604 blocked = rq_demote(gl);
605 if (gl->gl_waiters2 && !blocked) {
606 set_bit(GLF_DEMOTE, &gl->gl_flags);
607 gl->gl_demote_state = LM_ST_UNLOCKED;
608 }
609 gl->gl_waiters2 = 0;
610 } else if (!list_empty(&gl->gl_waiters3)) {
611 gh = list_entry(gl->gl_waiters3.next,
612 struct gfs2_holder, gh_list);
613
614 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
615 blocked = rq_promote(gh);
616 else
617 gfs2_assert_warn(gl->gl_sbd, 0);
618
619 } else
620 break;
621
622 if (blocked)
623 break;
624 }
625 }
626
627 /**
628 * gfs2_glmutex_lock - acquire a local lock on a glock
629 * @gl: the glock
630 *
631 * Gives caller exclusive access to manipulate a glock structure.
632 */
633
634 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
635 {
636 struct gfs2_holder gh;
637
638 gfs2_holder_init(gl, 0, 0, &gh);
639 set_bit(HIF_MUTEX, &gh.gh_iflags);
640 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
641 BUG();
642
643 spin_lock(&gl->gl_spin);
644 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
645 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
646 } else {
647 gl->gl_owner_pid = current->pid;
648 gl->gl_ip = (unsigned long)__builtin_return_address(0);
649 clear_bit(HIF_WAIT, &gh.gh_iflags);
650 smp_mb();
651 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
652 }
653 spin_unlock(&gl->gl_spin);
654
655 wait_on_holder(&gh);
656 gfs2_holder_uninit(&gh);
657 }
658
659 /**
660 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
661 * @gl: the glock
662 *
663 * Returns: 1 if the glock is acquired
664 */
665
666 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
667 {
668 int acquired = 1;
669
670 spin_lock(&gl->gl_spin);
671 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
672 acquired = 0;
673 } else {
674 gl->gl_owner_pid = current->pid;
675 gl->gl_ip = (unsigned long)__builtin_return_address(0);
676 }
677 spin_unlock(&gl->gl_spin);
678
679 return acquired;
680 }
681
682 /**
683 * gfs2_glmutex_unlock - release a local lock on a glock
684 * @gl: the glock
685 *
686 */
687
688 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
689 {
690 spin_lock(&gl->gl_spin);
691 clear_bit(GLF_LOCK, &gl->gl_flags);
692 gl->gl_owner_pid = 0;
693 gl->gl_ip = 0;
694 run_queue(gl);
695 BUG_ON(!spin_is_locked(&gl->gl_spin));
696 spin_unlock(&gl->gl_spin);
697 }
698
699 /**
700 * handle_callback - process a demote request
701 * @gl: the glock
702 * @state: the state the caller wants us to change to
703 *
704 * There are only two requests that we are going to see in actual
705 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
706 */
707
708 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
709 int remote, unsigned long delay)
710 {
711 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
712
713 spin_lock(&gl->gl_spin);
714 set_bit(bit, &gl->gl_flags);
715 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
716 gl->gl_demote_state = state;
717 gl->gl_demote_time = jiffies;
718 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
719 gl->gl_object) {
720 gfs2_glock_schedule_for_reclaim(gl);
721 spin_unlock(&gl->gl_spin);
722 return;
723 }
724 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
725 gl->gl_demote_state != state) {
726 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
727 gl->gl_waiters2 = 1;
728 else
729 gl->gl_demote_state = LM_ST_UNLOCKED;
730 }
731 spin_unlock(&gl->gl_spin);
732 }
733
734 /**
735 * state_change - record that the glock is now in a different state
736 * @gl: the glock
737 * @new_state the new state
738 *
739 */
740
741 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
742 {
743 int held1, held2;
744
745 held1 = (gl->gl_state != LM_ST_UNLOCKED);
746 held2 = (new_state != LM_ST_UNLOCKED);
747
748 if (held1 != held2) {
749 if (held2)
750 gfs2_glock_hold(gl);
751 else
752 gfs2_glock_put(gl);
753 }
754
755 gl->gl_state = new_state;
756 gl->gl_tchange = jiffies;
757 }
758
759 /**
760 * xmote_bh - Called after the lock module is done acquiring a lock
761 * @gl: The glock in question
762 * @ret: the int returned from the lock module
763 *
764 */
765
766 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
767 {
768 struct gfs2_sbd *sdp = gl->gl_sbd;
769 const struct gfs2_glock_operations *glops = gl->gl_ops;
770 struct gfs2_holder *gh = gl->gl_req_gh;
771 int prev_state = gl->gl_state;
772 int op_done = 1;
773
774 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
775 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
776 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
777
778 state_change(gl, ret & LM_OUT_ST_MASK);
779
780 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
781 if (glops->go_inval)
782 glops->go_inval(gl, DIO_METADATA);
783 } else if (gl->gl_state == LM_ST_DEFERRED) {
784 /* We might not want to do this here.
785 Look at moving to the inode glops. */
786 if (glops->go_inval)
787 glops->go_inval(gl, 0);
788 }
789
790 /* Deal with each possible exit condition */
791
792 if (!gh) {
793 gl->gl_stamp = jiffies;
794 if (ret & LM_OUT_CANCELED) {
795 op_done = 0;
796 } else {
797 spin_lock(&gl->gl_spin);
798 if (gl->gl_state != gl->gl_demote_state) {
799 gl->gl_req_bh = NULL;
800 spin_unlock(&gl->gl_spin);
801 gfs2_glock_drop_th(gl);
802 gfs2_glock_put(gl);
803 return;
804 }
805 gfs2_demote_wake(gl);
806 spin_unlock(&gl->gl_spin);
807 }
808 } else {
809 spin_lock(&gl->gl_spin);
810 list_del_init(&gh->gh_list);
811 gh->gh_error = -EIO;
812 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
813 goto out;
814 gh->gh_error = GLR_CANCELED;
815 if (ret & LM_OUT_CANCELED)
816 goto out;
817 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
818 list_add_tail(&gh->gh_list, &gl->gl_holders);
819 gh->gh_error = 0;
820 set_bit(HIF_HOLDER, &gh->gh_iflags);
821 set_bit(HIF_FIRST, &gh->gh_iflags);
822 op_done = 0;
823 goto out;
824 }
825 gh->gh_error = GLR_TRYFAILED;
826 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
827 goto out;
828 gh->gh_error = -EINVAL;
829 if (gfs2_assert_withdraw(sdp, 0) == -1)
830 fs_err(sdp, "ret = 0x%.8X\n", ret);
831 out:
832 spin_unlock(&gl->gl_spin);
833 }
834
835 if (glops->go_xmote_bh)
836 glops->go_xmote_bh(gl);
837
838 if (op_done) {
839 spin_lock(&gl->gl_spin);
840 gl->gl_req_gh = NULL;
841 gl->gl_req_bh = NULL;
842 clear_bit(GLF_LOCK, &gl->gl_flags);
843 spin_unlock(&gl->gl_spin);
844 }
845
846 gfs2_glock_put(gl);
847
848 if (gh)
849 gfs2_holder_wake(gh);
850 }
851
852 /**
853 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
854 * @gl: The glock in question
855 * @state: the requested state
856 * @flags: modifier flags to the lock call
857 *
858 */
859
860 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
861 {
862 struct gfs2_sbd *sdp = gl->gl_sbd;
863 int flags = gh ? gh->gh_flags : 0;
864 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
865 const struct gfs2_glock_operations *glops = gl->gl_ops;
866 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
867 LM_FLAG_NOEXP | LM_FLAG_ANY |
868 LM_FLAG_PRIORITY);
869 unsigned int lck_ret;
870
871 if (glops->go_xmote_th)
872 glops->go_xmote_th(gl);
873
874 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
875 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
876 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
877 gfs2_assert_warn(sdp, state != gl->gl_state);
878
879 gfs2_glock_hold(gl);
880 gl->gl_req_bh = xmote_bh;
881
882 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
883
884 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
885 return;
886
887 if (lck_ret & LM_OUT_ASYNC)
888 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
889 else
890 xmote_bh(gl, lck_ret);
891 }
892
893 /**
894 * drop_bh - Called after a lock module unlock completes
895 * @gl: the glock
896 * @ret: the return status
897 *
898 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
899 * Doesn't drop the reference on the glock the top half took out
900 *
901 */
902
903 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
904 {
905 struct gfs2_sbd *sdp = gl->gl_sbd;
906 const struct gfs2_glock_operations *glops = gl->gl_ops;
907 struct gfs2_holder *gh = gl->gl_req_gh;
908
909 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
910 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
911 gfs2_assert_warn(sdp, !ret);
912
913 state_change(gl, LM_ST_UNLOCKED);
914
915 if (glops->go_inval)
916 glops->go_inval(gl, DIO_METADATA);
917
918 if (gh) {
919 spin_lock(&gl->gl_spin);
920 list_del_init(&gh->gh_list);
921 gh->gh_error = 0;
922 spin_unlock(&gl->gl_spin);
923 }
924
925 spin_lock(&gl->gl_spin);
926 gfs2_demote_wake(gl);
927 gl->gl_req_gh = NULL;
928 gl->gl_req_bh = NULL;
929 clear_bit(GLF_LOCK, &gl->gl_flags);
930 spin_unlock(&gl->gl_spin);
931
932 gfs2_glock_put(gl);
933
934 if (gh)
935 gfs2_holder_wake(gh);
936 }
937
938 /**
939 * gfs2_glock_drop_th - call into the lock module to unlock a lock
940 * @gl: the glock
941 *
942 */
943
944 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
945 {
946 struct gfs2_sbd *sdp = gl->gl_sbd;
947 const struct gfs2_glock_operations *glops = gl->gl_ops;
948 unsigned int ret;
949
950 if (glops->go_drop_th)
951 glops->go_drop_th(gl);
952
953 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
954 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
955 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
956
957 gfs2_glock_hold(gl);
958 gl->gl_req_bh = drop_bh;
959
960 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
961
962 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
963 return;
964
965 if (!ret)
966 drop_bh(gl, ret);
967 else
968 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
969 }
970
971 /**
972 * do_cancels - cancel requests for locks stuck waiting on an expire flag
973 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
974 *
975 * Don't cancel GL_NOCANCEL requests.
976 */
977
978 static void do_cancels(struct gfs2_holder *gh)
979 {
980 struct gfs2_glock *gl = gh->gh_gl;
981
982 spin_lock(&gl->gl_spin);
983
984 while (gl->gl_req_gh != gh &&
985 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
986 !list_empty(&gh->gh_list)) {
987 if (gl->gl_req_bh && !(gl->gl_req_gh &&
988 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
989 spin_unlock(&gl->gl_spin);
990 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
991 msleep(100);
992 spin_lock(&gl->gl_spin);
993 } else {
994 spin_unlock(&gl->gl_spin);
995 msleep(100);
996 spin_lock(&gl->gl_spin);
997 }
998 }
999
1000 spin_unlock(&gl->gl_spin);
1001 }
1002
1003 /**
1004 * glock_wait_internal - wait on a glock acquisition
1005 * @gh: the glock holder
1006 *
1007 * Returns: 0 on success
1008 */
1009
1010 static int glock_wait_internal(struct gfs2_holder *gh)
1011 {
1012 struct gfs2_glock *gl = gh->gh_gl;
1013 struct gfs2_sbd *sdp = gl->gl_sbd;
1014 const struct gfs2_glock_operations *glops = gl->gl_ops;
1015
1016 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1017 return -EIO;
1018
1019 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1020 spin_lock(&gl->gl_spin);
1021 if (gl->gl_req_gh != gh &&
1022 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1023 !list_empty(&gh->gh_list)) {
1024 list_del_init(&gh->gh_list);
1025 gh->gh_error = GLR_TRYFAILED;
1026 run_queue(gl);
1027 spin_unlock(&gl->gl_spin);
1028 return gh->gh_error;
1029 }
1030 spin_unlock(&gl->gl_spin);
1031 }
1032
1033 if (gh->gh_flags & LM_FLAG_PRIORITY)
1034 do_cancels(gh);
1035
1036 wait_on_holder(gh);
1037 if (gh->gh_error)
1038 return gh->gh_error;
1039
1040 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1041 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1042 gh->gh_flags));
1043
1044 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1045 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1046
1047 if (glops->go_lock) {
1048 gh->gh_error = glops->go_lock(gh);
1049 if (gh->gh_error) {
1050 spin_lock(&gl->gl_spin);
1051 list_del_init(&gh->gh_list);
1052 spin_unlock(&gl->gl_spin);
1053 }
1054 }
1055
1056 spin_lock(&gl->gl_spin);
1057 gl->gl_req_gh = NULL;
1058 gl->gl_req_bh = NULL;
1059 clear_bit(GLF_LOCK, &gl->gl_flags);
1060 run_queue(gl);
1061 spin_unlock(&gl->gl_spin);
1062 }
1063
1064 return gh->gh_error;
1065 }
1066
1067 static inline struct gfs2_holder *
1068 find_holder_by_owner(struct list_head *head, pid_t pid)
1069 {
1070 struct gfs2_holder *gh;
1071
1072 list_for_each_entry(gh, head, gh_list) {
1073 if (gh->gh_owner_pid == pid)
1074 return gh;
1075 }
1076
1077 return NULL;
1078 }
1079
1080 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1081 {
1082 va_list args;
1083
1084 va_start(args, fmt);
1085 if (gi) {
1086 vsprintf(gi->string, fmt, args);
1087 seq_printf(gi->seq, gi->string);
1088 }
1089 else
1090 vprintk(fmt, args);
1091 va_end(args);
1092 }
1093
1094 /**
1095 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1096 * @gh: the holder structure to add
1097 *
1098 */
1099
1100 static void add_to_queue(struct gfs2_holder *gh)
1101 {
1102 struct gfs2_glock *gl = gh->gh_gl;
1103 struct gfs2_holder *existing;
1104
1105 BUG_ON(!gh->gh_owner_pid);
1106 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1107 BUG();
1108
1109 if (!(gh->gh_flags & GL_FLOCK)) {
1110 existing = find_holder_by_owner(&gl->gl_holders,
1111 gh->gh_owner_pid);
1112 if (existing) {
1113 print_symbol(KERN_WARNING "original: %s\n",
1114 existing->gh_ip);
1115 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1116 printk(KERN_INFO "lock type : %d lock state : %d\n",
1117 existing->gh_gl->gl_name.ln_type,
1118 existing->gh_gl->gl_state);
1119 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1120 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1121 printk(KERN_INFO "lock type : %d lock state : %d\n",
1122 gl->gl_name.ln_type, gl->gl_state);
1123 BUG();
1124 }
1125
1126 existing = find_holder_by_owner(&gl->gl_waiters3,
1127 gh->gh_owner_pid);
1128 if (existing) {
1129 print_symbol(KERN_WARNING "original: %s\n",
1130 existing->gh_ip);
1131 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1132 BUG();
1133 }
1134 }
1135
1136 if (gh->gh_flags & LM_FLAG_PRIORITY)
1137 list_add(&gh->gh_list, &gl->gl_waiters3);
1138 else
1139 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1140 }
1141
1142 /**
1143 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1144 * @gh: the holder structure
1145 *
1146 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1147 *
1148 * Returns: 0, GLR_TRYFAILED, or errno on failure
1149 */
1150
1151 int gfs2_glock_nq(struct gfs2_holder *gh)
1152 {
1153 struct gfs2_glock *gl = gh->gh_gl;
1154 struct gfs2_sbd *sdp = gl->gl_sbd;
1155 int error = 0;
1156
1157 restart:
1158 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1159 set_bit(HIF_ABORTED, &gh->gh_iflags);
1160 return -EIO;
1161 }
1162
1163 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1164
1165 spin_lock(&gl->gl_spin);
1166 add_to_queue(gh);
1167 run_queue(gl);
1168 spin_unlock(&gl->gl_spin);
1169
1170 if (!(gh->gh_flags & GL_ASYNC)) {
1171 error = glock_wait_internal(gh);
1172 if (error == GLR_CANCELED) {
1173 msleep(100);
1174 goto restart;
1175 }
1176 }
1177
1178 return error;
1179 }
1180
1181 /**
1182 * gfs2_glock_poll - poll to see if an async request has been completed
1183 * @gh: the holder
1184 *
1185 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1186 */
1187
1188 int gfs2_glock_poll(struct gfs2_holder *gh)
1189 {
1190 struct gfs2_glock *gl = gh->gh_gl;
1191 int ready = 0;
1192
1193 spin_lock(&gl->gl_spin);
1194
1195 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1196 ready = 1;
1197 else if (list_empty(&gh->gh_list)) {
1198 if (gh->gh_error == GLR_CANCELED) {
1199 spin_unlock(&gl->gl_spin);
1200 msleep(100);
1201 if (gfs2_glock_nq(gh))
1202 return 1;
1203 return 0;
1204 } else
1205 ready = 1;
1206 }
1207
1208 spin_unlock(&gl->gl_spin);
1209
1210 return ready;
1211 }
1212
1213 /**
1214 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1215 * @gh: the holder structure
1216 *
1217 * Returns: 0, GLR_TRYFAILED, or errno on failure
1218 */
1219
1220 int gfs2_glock_wait(struct gfs2_holder *gh)
1221 {
1222 int error;
1223
1224 error = glock_wait_internal(gh);
1225 if (error == GLR_CANCELED) {
1226 msleep(100);
1227 gh->gh_flags &= ~GL_ASYNC;
1228 error = gfs2_glock_nq(gh);
1229 }
1230
1231 return error;
1232 }
1233
1234 /**
1235 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1236 * @gh: the glock holder
1237 *
1238 */
1239
1240 void gfs2_glock_dq(struct gfs2_holder *gh)
1241 {
1242 struct gfs2_glock *gl = gh->gh_gl;
1243 const struct gfs2_glock_operations *glops = gl->gl_ops;
1244 unsigned delay = 0;
1245
1246 if (gh->gh_flags & GL_NOCACHE)
1247 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1248
1249 gfs2_glmutex_lock(gl);
1250
1251 spin_lock(&gl->gl_spin);
1252 list_del_init(&gh->gh_list);
1253
1254 if (list_empty(&gl->gl_holders)) {
1255 spin_unlock(&gl->gl_spin);
1256
1257 if (glops->go_unlock)
1258 glops->go_unlock(gh);
1259
1260 spin_lock(&gl->gl_spin);
1261 gl->gl_stamp = jiffies;
1262 }
1263
1264 clear_bit(GLF_LOCK, &gl->gl_flags);
1265 spin_unlock(&gl->gl_spin);
1266
1267 gfs2_glock_hold(gl);
1268 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1269 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1270 delay = gl->gl_ops->go_min_hold_time;
1271 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1272 gfs2_glock_put(gl);
1273 }
1274
1275 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1276 {
1277 struct gfs2_glock *gl = gh->gh_gl;
1278 gfs2_glock_dq(gh);
1279 wait_on_demote(gl);
1280 }
1281
1282 /**
1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1284 * @gh: the holder structure
1285 *
1286 */
1287
1288 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1289 {
1290 gfs2_glock_dq(gh);
1291 gfs2_holder_uninit(gh);
1292 }
1293
1294 /**
1295 * gfs2_glock_nq_num - acquire a glock based on lock number
1296 * @sdp: the filesystem
1297 * @number: the lock number
1298 * @glops: the glock operations for the type of glock
1299 * @state: the state to acquire the glock in
1300 * @flags: modifier flags for the aquisition
1301 * @gh: the struct gfs2_holder
1302 *
1303 * Returns: errno
1304 */
1305
1306 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1307 const struct gfs2_glock_operations *glops,
1308 unsigned int state, int flags, struct gfs2_holder *gh)
1309 {
1310 struct gfs2_glock *gl;
1311 int error;
1312
1313 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1314 if (!error) {
1315 error = gfs2_glock_nq_init(gl, state, flags, gh);
1316 gfs2_glock_put(gl);
1317 }
1318
1319 return error;
1320 }
1321
1322 /**
1323 * glock_compare - Compare two struct gfs2_glock structures for sorting
1324 * @arg_a: the first structure
1325 * @arg_b: the second structure
1326 *
1327 */
1328
1329 static int glock_compare(const void *arg_a, const void *arg_b)
1330 {
1331 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1332 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1333 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1334 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1335
1336 if (a->ln_number > b->ln_number)
1337 return 1;
1338 if (a->ln_number < b->ln_number)
1339 return -1;
1340 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1341 return 0;
1342 }
1343
1344 /**
1345 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1346 * @num_gh: the number of structures
1347 * @ghs: an array of struct gfs2_holder structures
1348 *
1349 * Returns: 0 on success (all glocks acquired),
1350 * errno on failure (no glocks acquired)
1351 */
1352
1353 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1354 struct gfs2_holder **p)
1355 {
1356 unsigned int x;
1357 int error = 0;
1358
1359 for (x = 0; x < num_gh; x++)
1360 p[x] = &ghs[x];
1361
1362 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1363
1364 for (x = 0; x < num_gh; x++) {
1365 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1366
1367 error = gfs2_glock_nq(p[x]);
1368 if (error) {
1369 while (x--)
1370 gfs2_glock_dq(p[x]);
1371 break;
1372 }
1373 }
1374
1375 return error;
1376 }
1377
1378 /**
1379 * gfs2_glock_nq_m - acquire multiple glocks
1380 * @num_gh: the number of structures
1381 * @ghs: an array of struct gfs2_holder structures
1382 *
1383 *
1384 * Returns: 0 on success (all glocks acquired),
1385 * errno on failure (no glocks acquired)
1386 */
1387
1388 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1389 {
1390 struct gfs2_holder *tmp[4];
1391 struct gfs2_holder **pph = tmp;
1392 int error = 0;
1393
1394 switch(num_gh) {
1395 case 0:
1396 return 0;
1397 case 1:
1398 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1399 return gfs2_glock_nq(ghs);
1400 default:
1401 if (num_gh <= 4)
1402 break;
1403 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1404 if (!pph)
1405 return -ENOMEM;
1406 }
1407
1408 error = nq_m_sync(num_gh, ghs, pph);
1409
1410 if (pph != tmp)
1411 kfree(pph);
1412
1413 return error;
1414 }
1415
1416 /**
1417 * gfs2_glock_dq_m - release multiple glocks
1418 * @num_gh: the number of structures
1419 * @ghs: an array of struct gfs2_holder structures
1420 *
1421 */
1422
1423 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1424 {
1425 unsigned int x;
1426
1427 for (x = 0; x < num_gh; x++)
1428 gfs2_glock_dq(&ghs[x]);
1429 }
1430
1431 /**
1432 * gfs2_glock_dq_uninit_m - release multiple glocks
1433 * @num_gh: the number of structures
1434 * @ghs: an array of struct gfs2_holder structures
1435 *
1436 */
1437
1438 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1439 {
1440 unsigned int x;
1441
1442 for (x = 0; x < num_gh; x++)
1443 gfs2_glock_dq_uninit(&ghs[x]);
1444 }
1445
1446 /**
1447 * gfs2_lvb_hold - attach a LVB from a glock
1448 * @gl: The glock in question
1449 *
1450 */
1451
1452 int gfs2_lvb_hold(struct gfs2_glock *gl)
1453 {
1454 int error;
1455
1456 gfs2_glmutex_lock(gl);
1457
1458 if (!atomic_read(&gl->gl_lvb_count)) {
1459 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1460 if (error) {
1461 gfs2_glmutex_unlock(gl);
1462 return error;
1463 }
1464 gfs2_glock_hold(gl);
1465 }
1466 atomic_inc(&gl->gl_lvb_count);
1467
1468 gfs2_glmutex_unlock(gl);
1469
1470 return 0;
1471 }
1472
1473 /**
1474 * gfs2_lvb_unhold - detach a LVB from a glock
1475 * @gl: The glock in question
1476 *
1477 */
1478
1479 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1480 {
1481 gfs2_glock_hold(gl);
1482 gfs2_glmutex_lock(gl);
1483
1484 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1485 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1486 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1487 gl->gl_lvb = NULL;
1488 gfs2_glock_put(gl);
1489 }
1490
1491 gfs2_glmutex_unlock(gl);
1492 gfs2_glock_put(gl);
1493 }
1494
1495 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1496 unsigned int state)
1497 {
1498 struct gfs2_glock *gl;
1499 unsigned long delay = 0;
1500 unsigned long holdtime;
1501 unsigned long now = jiffies;
1502
1503 gl = gfs2_glock_find(sdp, name);
1504 if (!gl)
1505 return;
1506
1507 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1508 if (time_before(now, holdtime))
1509 delay = holdtime - now;
1510
1511 handle_callback(gl, state, 1, delay);
1512 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1513 gfs2_glock_put(gl);
1514 }
1515
1516 /**
1517 * gfs2_glock_cb - Callback used by locking module
1518 * @sdp: Pointer to the superblock
1519 * @type: Type of callback
1520 * @data: Type dependent data pointer
1521 *
1522 * Called by the locking module when it wants to tell us something.
1523 * Either we need to drop a lock, one of our ASYNC requests completed, or
1524 * a journal from another client needs to be recovered.
1525 */
1526
1527 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1528 {
1529 struct gfs2_sbd *sdp = cb_data;
1530
1531 switch (type) {
1532 case LM_CB_NEED_E:
1533 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1534 return;
1535
1536 case LM_CB_NEED_D:
1537 blocking_cb(sdp, data, LM_ST_DEFERRED);
1538 return;
1539
1540 case LM_CB_NEED_S:
1541 blocking_cb(sdp, data, LM_ST_SHARED);
1542 return;
1543
1544 case LM_CB_ASYNC: {
1545 struct lm_async_cb *async = data;
1546 struct gfs2_glock *gl;
1547
1548 down_read(&gfs2_umount_flush_sem);
1549 gl = gfs2_glock_find(sdp, &async->lc_name);
1550 if (gfs2_assert_warn(sdp, gl))
1551 return;
1552 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1553 gl->gl_req_bh(gl, async->lc_ret);
1554 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1555 gfs2_glock_put(gl);
1556 up_read(&gfs2_umount_flush_sem);
1557 return;
1558 }
1559
1560 case LM_CB_NEED_RECOVERY:
1561 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1562 if (sdp->sd_recoverd_process)
1563 wake_up_process(sdp->sd_recoverd_process);
1564 return;
1565
1566 case LM_CB_DROPLOCKS:
1567 gfs2_gl_hash_clear(sdp, NO_WAIT);
1568 gfs2_quota_scan(sdp);
1569 return;
1570
1571 default:
1572 gfs2_assert_warn(sdp, 0);
1573 return;
1574 }
1575 }
1576
1577 /**
1578 * demote_ok - Check to see if it's ok to unlock a glock
1579 * @gl: the glock
1580 *
1581 * Returns: 1 if it's ok
1582 */
1583
1584 static int demote_ok(struct gfs2_glock *gl)
1585 {
1586 const struct gfs2_glock_operations *glops = gl->gl_ops;
1587 int demote = 1;
1588
1589 if (test_bit(GLF_STICKY, &gl->gl_flags))
1590 demote = 0;
1591 else if (glops->go_demote_ok)
1592 demote = glops->go_demote_ok(gl);
1593
1594 return demote;
1595 }
1596
1597 /**
1598 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1599 * @gl: the glock
1600 *
1601 */
1602
1603 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1604 {
1605 struct gfs2_sbd *sdp = gl->gl_sbd;
1606
1607 spin_lock(&sdp->sd_reclaim_lock);
1608 if (list_empty(&gl->gl_reclaim)) {
1609 gfs2_glock_hold(gl);
1610 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1611 atomic_inc(&sdp->sd_reclaim_count);
1612 }
1613 spin_unlock(&sdp->sd_reclaim_lock);
1614
1615 wake_up(&sdp->sd_reclaim_wq);
1616 }
1617
1618 /**
1619 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1620 * @sdp: the filesystem
1621 *
1622 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1623 * different glock and we notice that there are a lot of glocks in the
1624 * reclaim list.
1625 *
1626 */
1627
1628 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1629 {
1630 struct gfs2_glock *gl;
1631
1632 spin_lock(&sdp->sd_reclaim_lock);
1633 if (list_empty(&sdp->sd_reclaim_list)) {
1634 spin_unlock(&sdp->sd_reclaim_lock);
1635 return;
1636 }
1637 gl = list_entry(sdp->sd_reclaim_list.next,
1638 struct gfs2_glock, gl_reclaim);
1639 list_del_init(&gl->gl_reclaim);
1640 spin_unlock(&sdp->sd_reclaim_lock);
1641
1642 atomic_dec(&sdp->sd_reclaim_count);
1643 atomic_inc(&sdp->sd_reclaimed);
1644
1645 if (gfs2_glmutex_trylock(gl)) {
1646 if (list_empty(&gl->gl_holders) &&
1647 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1648 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1649 gfs2_glmutex_unlock(gl);
1650 }
1651
1652 gfs2_glock_put(gl);
1653 }
1654
1655 /**
1656 * examine_bucket - Call a function for glock in a hash bucket
1657 * @examiner: the function
1658 * @sdp: the filesystem
1659 * @bucket: the bucket
1660 *
1661 * Returns: 1 if the bucket has entries
1662 */
1663
1664 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1665 unsigned int hash)
1666 {
1667 struct gfs2_glock *gl, *prev = NULL;
1668 int has_entries = 0;
1669 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1670
1671 read_lock(gl_lock_addr(hash));
1672 /* Can't use hlist_for_each_entry - don't want prefetch here */
1673 if (hlist_empty(head))
1674 goto out;
1675 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1676 while(1) {
1677 if (!sdp || gl->gl_sbd == sdp) {
1678 gfs2_glock_hold(gl);
1679 read_unlock(gl_lock_addr(hash));
1680 if (prev)
1681 gfs2_glock_put(prev);
1682 prev = gl;
1683 examiner(gl);
1684 has_entries = 1;
1685 read_lock(gl_lock_addr(hash));
1686 }
1687 if (gl->gl_list.next == NULL)
1688 break;
1689 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1690 }
1691 out:
1692 read_unlock(gl_lock_addr(hash));
1693 if (prev)
1694 gfs2_glock_put(prev);
1695 cond_resched();
1696 return has_entries;
1697 }
1698
1699 /**
1700 * scan_glock - look at a glock and see if we can reclaim it
1701 * @gl: the glock to look at
1702 *
1703 */
1704
1705 static void scan_glock(struct gfs2_glock *gl)
1706 {
1707 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1708 return;
1709
1710 if (gfs2_glmutex_trylock(gl)) {
1711 if (list_empty(&gl->gl_holders) &&
1712 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1713 goto out_schedule;
1714 gfs2_glmutex_unlock(gl);
1715 }
1716 return;
1717
1718 out_schedule:
1719 gfs2_glmutex_unlock(gl);
1720 gfs2_glock_schedule_for_reclaim(gl);
1721 }
1722
1723 /**
1724 * clear_glock - look at a glock and see if we can free it from glock cache
1725 * @gl: the glock to look at
1726 *
1727 */
1728
1729 static void clear_glock(struct gfs2_glock *gl)
1730 {
1731 struct gfs2_sbd *sdp = gl->gl_sbd;
1732 int released;
1733
1734 spin_lock(&sdp->sd_reclaim_lock);
1735 if (!list_empty(&gl->gl_reclaim)) {
1736 list_del_init(&gl->gl_reclaim);
1737 atomic_dec(&sdp->sd_reclaim_count);
1738 spin_unlock(&sdp->sd_reclaim_lock);
1739 released = gfs2_glock_put(gl);
1740 gfs2_assert(sdp, !released);
1741 } else {
1742 spin_unlock(&sdp->sd_reclaim_lock);
1743 }
1744
1745 if (gfs2_glmutex_trylock(gl)) {
1746 if (list_empty(&gl->gl_holders) &&
1747 gl->gl_state != LM_ST_UNLOCKED)
1748 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1749 gfs2_glmutex_unlock(gl);
1750 }
1751 }
1752
1753 /**
1754 * gfs2_gl_hash_clear - Empty out the glock hash table
1755 * @sdp: the filesystem
1756 * @wait: wait until it's all gone
1757 *
1758 * Called when unmounting the filesystem, or when inter-node lock manager
1759 * requests DROPLOCKS because it is running out of capacity.
1760 */
1761
1762 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1763 {
1764 unsigned long t;
1765 unsigned int x;
1766 int cont;
1767
1768 t = jiffies;
1769
1770 for (;;) {
1771 cont = 0;
1772 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1773 if (examine_bucket(clear_glock, sdp, x))
1774 cont = 1;
1775 }
1776
1777 if (!wait || !cont)
1778 break;
1779
1780 if (time_after_eq(jiffies,
1781 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1782 fs_warn(sdp, "Unmount seems to be stalled. "
1783 "Dumping lock state...\n");
1784 gfs2_dump_lockstate(sdp);
1785 t = jiffies;
1786 }
1787
1788 down_write(&gfs2_umount_flush_sem);
1789 invalidate_inodes(sdp->sd_vfs);
1790 up_write(&gfs2_umount_flush_sem);
1791 msleep(10);
1792 }
1793 }
1794
1795 /*
1796 * Diagnostic routines to help debug distributed deadlock
1797 */
1798
1799 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1800 unsigned long address)
1801 {
1802 char buffer[KSYM_SYMBOL_LEN];
1803
1804 sprint_symbol(buffer, address);
1805 print_dbg(gi, fmt, buffer);
1806 }
1807
1808 /**
1809 * dump_holder - print information about a glock holder
1810 * @str: a string naming the type of holder
1811 * @gh: the glock holder
1812 *
1813 * Returns: 0 on success, -ENOBUFS when we run out of space
1814 */
1815
1816 static int dump_holder(struct glock_iter *gi, char *str,
1817 struct gfs2_holder *gh)
1818 {
1819 unsigned int x;
1820 struct task_struct *gh_owner;
1821
1822 print_dbg(gi, " %s\n", str);
1823 if (gh->gh_owner_pid) {
1824 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1825 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1826 if (gh_owner)
1827 print_dbg(gi, "(%s)\n", gh_owner->comm);
1828 else
1829 print_dbg(gi, "(ended)\n");
1830 } else
1831 print_dbg(gi, " owner = -1\n");
1832 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1833 print_dbg(gi, " gh_flags =");
1834 for (x = 0; x < 32; x++)
1835 if (gh->gh_flags & (1 << x))
1836 print_dbg(gi, " %u", x);
1837 print_dbg(gi, " \n");
1838 print_dbg(gi, " error = %d\n", gh->gh_error);
1839 print_dbg(gi, " gh_iflags =");
1840 for (x = 0; x < 32; x++)
1841 if (test_bit(x, &gh->gh_iflags))
1842 print_dbg(gi, " %u", x);
1843 print_dbg(gi, " \n");
1844 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
1845
1846 return 0;
1847 }
1848
1849 /**
1850 * dump_inode - print information about an inode
1851 * @ip: the inode
1852 *
1853 * Returns: 0 on success, -ENOBUFS when we run out of space
1854 */
1855
1856 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1857 {
1858 unsigned int x;
1859
1860 print_dbg(gi, " Inode:\n");
1861 print_dbg(gi, " num = %llu/%llu\n",
1862 (unsigned long long)ip->i_no_formal_ino,
1863 (unsigned long long)ip->i_no_addr);
1864 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
1865 print_dbg(gi, " i_flags =");
1866 for (x = 0; x < 32; x++)
1867 if (test_bit(x, &ip->i_flags))
1868 print_dbg(gi, " %u", x);
1869 print_dbg(gi, " \n");
1870 return 0;
1871 }
1872
1873 /**
1874 * dump_glock - print information about a glock
1875 * @gl: the glock
1876 * @count: where we are in the buffer
1877 *
1878 * Returns: 0 on success, -ENOBUFS when we run out of space
1879 */
1880
1881 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1882 {
1883 struct gfs2_holder *gh;
1884 unsigned int x;
1885 int error = -ENOBUFS;
1886 struct task_struct *gl_owner;
1887
1888 spin_lock(&gl->gl_spin);
1889
1890 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1891 (unsigned long long)gl->gl_name.ln_number);
1892 print_dbg(gi, " gl_flags =");
1893 for (x = 0; x < 32; x++) {
1894 if (test_bit(x, &gl->gl_flags))
1895 print_dbg(gi, " %u", x);
1896 }
1897 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1898 print_dbg(gi, " (unlocked)");
1899 print_dbg(gi, " \n");
1900 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1901 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
1902 if (gl->gl_owner_pid) {
1903 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1904 if (gl_owner)
1905 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1906 gl->gl_owner_pid, gl_owner->comm);
1907 else
1908 print_dbg(gi, " gl_owner = %d (ended)\n",
1909 gl->gl_owner_pid);
1910 } else
1911 print_dbg(gi, " gl_owner = -1\n");
1912 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1913 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1914 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1915 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1916 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
1917 print_dbg(gi, " le = %s\n",
1918 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1919 print_dbg(gi, " reclaim = %s\n",
1920 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1921 if (gl->gl_aspace)
1922 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1923 gl->gl_aspace->i_mapping->nrpages);
1924 else
1925 print_dbg(gi, " aspace = no\n");
1926 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
1927 if (gl->gl_req_gh) {
1928 error = dump_holder(gi, "Request", gl->gl_req_gh);
1929 if (error)
1930 goto out;
1931 }
1932 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1933 error = dump_holder(gi, "Holder", gh);
1934 if (error)
1935 goto out;
1936 }
1937 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1938 error = dump_holder(gi, "Waiter1", gh);
1939 if (error)
1940 goto out;
1941 }
1942 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1943 error = dump_holder(gi, "Waiter3", gh);
1944 if (error)
1945 goto out;
1946 }
1947 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1948 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
1949 gl->gl_demote_state, (unsigned long long)
1950 (jiffies - gl->gl_demote_time)*(1000000/HZ));
1951 }
1952 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1953 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1954 list_empty(&gl->gl_holders)) {
1955 error = dump_inode(gi, gl->gl_object);
1956 if (error)
1957 goto out;
1958 } else {
1959 error = -ENOBUFS;
1960 print_dbg(gi, " Inode: busy\n");
1961 }
1962 }
1963
1964 error = 0;
1965
1966 out:
1967 spin_unlock(&gl->gl_spin);
1968 return error;
1969 }
1970
1971 /**
1972 * gfs2_dump_lockstate - print out the current lockstate
1973 * @sdp: the filesystem
1974 * @ub: the buffer to copy the information into
1975 *
1976 * If @ub is NULL, dump the lockstate to the console.
1977 *
1978 */
1979
1980 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1981 {
1982 struct gfs2_glock *gl;
1983 struct hlist_node *h;
1984 unsigned int x;
1985 int error = 0;
1986
1987 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1988
1989 read_lock(gl_lock_addr(x));
1990
1991 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1992 if (gl->gl_sbd != sdp)
1993 continue;
1994
1995 error = dump_glock(NULL, gl);
1996 if (error)
1997 break;
1998 }
1999
2000 read_unlock(gl_lock_addr(x));
2001
2002 if (error)
2003 break;
2004 }
2005
2006
2007 return error;
2008 }
2009
2010 /**
2011 * gfs2_scand - Look for cached glocks and inodes to toss from memory
2012 * @sdp: Pointer to GFS2 superblock
2013 *
2014 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2015 * See gfs2_glockd()
2016 */
2017
2018 static int gfs2_scand(void *data)
2019 {
2020 unsigned x;
2021 unsigned delay;
2022
2023 while (!kthread_should_stop()) {
2024 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2025 examine_bucket(scan_glock, NULL, x);
2026 if (freezing(current))
2027 refrigerator();
2028 delay = scand_secs;
2029 if (delay < 1)
2030 delay = 1;
2031 schedule_timeout_interruptible(delay * HZ);
2032 }
2033
2034 return 0;
2035 }
2036
2037
2038
2039 int __init gfs2_glock_init(void)
2040 {
2041 unsigned i;
2042 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2043 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2044 }
2045 #ifdef GL_HASH_LOCK_SZ
2046 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2047 rwlock_init(&gl_hash_locks[i]);
2048 }
2049 #endif
2050
2051 scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2052 if (IS_ERR(scand_process))
2053 return PTR_ERR(scand_process);
2054
2055 glock_workqueue = create_workqueue("glock_workqueue");
2056 if (IS_ERR(glock_workqueue)) {
2057 kthread_stop(scand_process);
2058 return PTR_ERR(glock_workqueue);
2059 }
2060
2061 return 0;
2062 }
2063
2064 void gfs2_glock_exit(void)
2065 {
2066 destroy_workqueue(glock_workqueue);
2067 kthread_stop(scand_process);
2068 }
2069
2070 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2071 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2072
2073 static int gfs2_glock_iter_next(struct glock_iter *gi)
2074 {
2075 struct gfs2_glock *gl;
2076
2077 restart:
2078 read_lock(gl_lock_addr(gi->hash));
2079 gl = gi->gl;
2080 if (gl) {
2081 gi->gl = hlist_entry(gl->gl_list.next,
2082 struct gfs2_glock, gl_list);
2083 if (gi->gl)
2084 gfs2_glock_hold(gi->gl);
2085 }
2086 read_unlock(gl_lock_addr(gi->hash));
2087 if (gl)
2088 gfs2_glock_put(gl);
2089 if (gl && gi->gl == NULL)
2090 gi->hash++;
2091 while(gi->gl == NULL) {
2092 if (gi->hash >= GFS2_GL_HASH_SIZE)
2093 return 1;
2094 read_lock(gl_lock_addr(gi->hash));
2095 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2096 struct gfs2_glock, gl_list);
2097 if (gi->gl)
2098 gfs2_glock_hold(gi->gl);
2099 read_unlock(gl_lock_addr(gi->hash));
2100 gi->hash++;
2101 }
2102
2103 if (gi->sdp != gi->gl->gl_sbd)
2104 goto restart;
2105
2106 return 0;
2107 }
2108
2109 static void gfs2_glock_iter_free(struct glock_iter *gi)
2110 {
2111 if (gi->gl)
2112 gfs2_glock_put(gi->gl);
2113 kfree(gi);
2114 }
2115
2116 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2117 {
2118 struct glock_iter *gi;
2119
2120 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2121 if (!gi)
2122 return NULL;
2123
2124 gi->sdp = sdp;
2125 gi->hash = 0;
2126 gi->seq = NULL;
2127 gi->gl = NULL;
2128 memset(gi->string, 0, sizeof(gi->string));
2129
2130 if (gfs2_glock_iter_next(gi)) {
2131 gfs2_glock_iter_free(gi);
2132 return NULL;
2133 }
2134
2135 return gi;
2136 }
2137
2138 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2139 {
2140 struct glock_iter *gi;
2141 loff_t n = *pos;
2142
2143 gi = gfs2_glock_iter_init(file->private);
2144 if (!gi)
2145 return NULL;
2146
2147 while(n--) {
2148 if (gfs2_glock_iter_next(gi)) {
2149 gfs2_glock_iter_free(gi);
2150 return NULL;
2151 }
2152 }
2153
2154 return gi;
2155 }
2156
2157 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2158 loff_t *pos)
2159 {
2160 struct glock_iter *gi = iter_ptr;
2161
2162 (*pos)++;
2163
2164 if (gfs2_glock_iter_next(gi)) {
2165 gfs2_glock_iter_free(gi);
2166 return NULL;
2167 }
2168
2169 return gi;
2170 }
2171
2172 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2173 {
2174 struct glock_iter *gi = iter_ptr;
2175 if (gi)
2176 gfs2_glock_iter_free(gi);
2177 }
2178
2179 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2180 {
2181 struct glock_iter *gi = iter_ptr;
2182
2183 gi->seq = file;
2184 dump_glock(gi, gi->gl);
2185
2186 return 0;
2187 }
2188
2189 static const struct seq_operations gfs2_glock_seq_ops = {
2190 .start = gfs2_glock_seq_start,
2191 .next = gfs2_glock_seq_next,
2192 .stop = gfs2_glock_seq_stop,
2193 .show = gfs2_glock_seq_show,
2194 };
2195
2196 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2197 {
2198 struct seq_file *seq;
2199 int ret;
2200
2201 ret = seq_open(file, &gfs2_glock_seq_ops);
2202 if (ret)
2203 return ret;
2204
2205 seq = file->private_data;
2206 seq->private = inode->i_private;
2207
2208 return 0;
2209 }
2210
2211 static const struct file_operations gfs2_debug_fops = {
2212 .owner = THIS_MODULE,
2213 .open = gfs2_debugfs_open,
2214 .read = seq_read,
2215 .llseek = seq_lseek,
2216 .release = seq_release
2217 };
2218
2219 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2220 {
2221 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2222 if (!sdp->debugfs_dir)
2223 return -ENOMEM;
2224 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2225 S_IFREG | S_IRUGO,
2226 sdp->debugfs_dir, sdp,
2227 &gfs2_debug_fops);
2228 if (!sdp->debugfs_dentry_glocks)
2229 return -ENOMEM;
2230
2231 return 0;
2232 }
2233
2234 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2235 {
2236 if (sdp && sdp->debugfs_dir) {
2237 if (sdp->debugfs_dentry_glocks) {
2238 debugfs_remove(sdp->debugfs_dentry_glocks);
2239 sdp->debugfs_dentry_glocks = NULL;
2240 }
2241 debugfs_remove(sdp->debugfs_dir);
2242 sdp->debugfs_dir = NULL;
2243 }
2244 }
2245
2246 int gfs2_register_debugfs(void)
2247 {
2248 gfs2_root = debugfs_create_dir("gfs2", NULL);
2249 return gfs2_root ? 0 : -ENOMEM;
2250 }
2251
2252 void gfs2_unregister_debugfs(void)
2253 {
2254 debugfs_remove(gfs2_root);
2255 gfs2_root = NULL;
2256 }