]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/gfs2/glock.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / fs / gfs2 / glock.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/buffer_head.h>
16 #include <linux/delay.h>
17 #include <linux/sort.h>
18 #include <linux/jhash.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/list.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/uaccess.h>
25 #include <linux/seq_file.h>
26 #include <linux/debugfs.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/workqueue.h>
30 #include <linux/jiffies.h>
31 #include <linux/rcupdate.h>
32 #include <linux/rculist_bl.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/percpu.h>
35 #include <linux/list_sort.h>
36 #include <linux/lockref.h>
37 #include <linux/rhashtable.h>
38
39 #include "gfs2.h"
40 #include "incore.h"
41 #include "glock.h"
42 #include "glops.h"
43 #include "inode.h"
44 #include "lops.h"
45 #include "meta_io.h"
46 #include "quota.h"
47 #include "super.h"
48 #include "util.h"
49 #include "bmap.h"
50 #define CREATE_TRACE_POINTS
51 #include "trace_gfs2.h"
52
53 struct gfs2_glock_iter {
54 struct gfs2_sbd *sdp; /* incore superblock */
55 struct rhashtable_iter hti; /* rhashtable iterator */
56 struct gfs2_glock *gl; /* current glock struct */
57 loff_t last_pos; /* last position */
58 };
59
60 typedef void (*glock_examiner) (struct gfs2_glock * gl);
61
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
63
64 static struct dentry *gfs2_root;
65 static struct workqueue_struct *glock_workqueue;
66 struct workqueue_struct *gfs2_delete_workqueue;
67 static LIST_HEAD(lru_list);
68 static atomic_t lru_count = ATOMIC_INIT(0);
69 static DEFINE_SPINLOCK(lru_lock);
70
71 #define GFS2_GL_HASH_SHIFT 15
72 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
73
74 static struct rhashtable_params ht_parms = {
75 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
76 .key_len = sizeof(struct lm_lockname),
77 .key_offset = offsetof(struct gfs2_glock, gl_name),
78 .head_offset = offsetof(struct gfs2_glock, gl_node),
79 };
80
81 static struct rhashtable gl_hash_table;
82
83 void gfs2_glock_free(struct gfs2_glock *gl)
84 {
85 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
86
87 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
88 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
89 } else {
90 kfree(gl->gl_lksb.sb_lvbptr);
91 kmem_cache_free(gfs2_glock_cachep, gl);
92 }
93 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
94 wake_up(&sdp->sd_glock_wait);
95 }
96
97 /**
98 * gfs2_glock_hold() - increment reference count on glock
99 * @gl: The glock to hold
100 *
101 */
102
103 static void gfs2_glock_hold(struct gfs2_glock *gl)
104 {
105 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
106 lockref_get(&gl->gl_lockref);
107 }
108
109 /**
110 * demote_ok - Check to see if it's ok to unlock a glock
111 * @gl: the glock
112 *
113 * Returns: 1 if it's ok
114 */
115
116 static int demote_ok(const struct gfs2_glock *gl)
117 {
118 const struct gfs2_glock_operations *glops = gl->gl_ops;
119
120 if (gl->gl_state == LM_ST_UNLOCKED)
121 return 0;
122 if (!list_empty(&gl->gl_holders))
123 return 0;
124 if (glops->go_demote_ok)
125 return glops->go_demote_ok(gl);
126 return 1;
127 }
128
129
130 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
131 {
132 spin_lock(&lru_lock);
133
134 if (!list_empty(&gl->gl_lru))
135 list_del_init(&gl->gl_lru);
136 else
137 atomic_inc(&lru_count);
138
139 list_add_tail(&gl->gl_lru, &lru_list);
140 set_bit(GLF_LRU, &gl->gl_flags);
141 spin_unlock(&lru_lock);
142 }
143
144 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
145 {
146 spin_lock(&lru_lock);
147 if (!list_empty(&gl->gl_lru)) {
148 list_del_init(&gl->gl_lru);
149 atomic_dec(&lru_count);
150 clear_bit(GLF_LRU, &gl->gl_flags);
151 }
152 spin_unlock(&lru_lock);
153 }
154
155 /**
156 * gfs2_glock_put() - Decrement reference count on glock
157 * @gl: The glock to put
158 *
159 */
160
161 void gfs2_glock_put(struct gfs2_glock *gl)
162 {
163 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
164 struct address_space *mapping = gfs2_glock2aspace(gl);
165
166 if (lockref_put_or_lock(&gl->gl_lockref))
167 return;
168
169 lockref_mark_dead(&gl->gl_lockref);
170
171 gfs2_glock_remove_from_lru(gl);
172 spin_unlock(&gl->gl_lockref.lock);
173 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
174 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
175 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
176 trace_gfs2_glock_put(gl);
177 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
178 }
179
180 /**
181 * may_grant - check if its ok to grant a new lock
182 * @gl: The glock
183 * @gh: The lock request which we wish to grant
184 *
185 * Returns: true if its ok to grant the lock
186 */
187
188 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
189 {
190 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
191 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
192 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
193 return 0;
194 if (gl->gl_state == gh->gh_state)
195 return 1;
196 if (gh->gh_flags & GL_EXACT)
197 return 0;
198 if (gl->gl_state == LM_ST_EXCLUSIVE) {
199 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
200 return 1;
201 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
202 return 1;
203 }
204 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
205 return 1;
206 return 0;
207 }
208
209 static void gfs2_holder_wake(struct gfs2_holder *gh)
210 {
211 clear_bit(HIF_WAIT, &gh->gh_iflags);
212 smp_mb__after_atomic();
213 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
214 }
215
216 /**
217 * do_error - Something unexpected has happened during a lock request
218 *
219 */
220
221 static void do_error(struct gfs2_glock *gl, const int ret)
222 {
223 struct gfs2_holder *gh, *tmp;
224
225 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
226 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
227 continue;
228 if (ret & LM_OUT_ERROR)
229 gh->gh_error = -EIO;
230 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
231 gh->gh_error = GLR_TRYFAILED;
232 else
233 continue;
234 list_del_init(&gh->gh_list);
235 trace_gfs2_glock_queue(gh, 0);
236 gfs2_holder_wake(gh);
237 }
238 }
239
240 /**
241 * do_promote - promote as many requests as possible on the current queue
242 * @gl: The glock
243 *
244 * Returns: 1 if there is a blocked holder at the head of the list, or 2
245 * if a type specific operation is underway.
246 */
247
248 static int do_promote(struct gfs2_glock *gl)
249 __releases(&gl->gl_lockref.lock)
250 __acquires(&gl->gl_lockref.lock)
251 {
252 const struct gfs2_glock_operations *glops = gl->gl_ops;
253 struct gfs2_holder *gh, *tmp;
254 int ret;
255
256 restart:
257 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
258 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
259 continue;
260 if (may_grant(gl, gh)) {
261 if (gh->gh_list.prev == &gl->gl_holders &&
262 glops->go_lock) {
263 spin_unlock(&gl->gl_lockref.lock);
264 /* FIXME: eliminate this eventually */
265 ret = glops->go_lock(gh);
266 spin_lock(&gl->gl_lockref.lock);
267 if (ret) {
268 if (ret == 1)
269 return 2;
270 gh->gh_error = ret;
271 list_del_init(&gh->gh_list);
272 trace_gfs2_glock_queue(gh, 0);
273 gfs2_holder_wake(gh);
274 goto restart;
275 }
276 set_bit(HIF_HOLDER, &gh->gh_iflags);
277 trace_gfs2_promote(gh, 1);
278 gfs2_holder_wake(gh);
279 goto restart;
280 }
281 set_bit(HIF_HOLDER, &gh->gh_iflags);
282 trace_gfs2_promote(gh, 0);
283 gfs2_holder_wake(gh);
284 continue;
285 }
286 if (gh->gh_list.prev == &gl->gl_holders)
287 return 1;
288 do_error(gl, 0);
289 break;
290 }
291 return 0;
292 }
293
294 /**
295 * find_first_waiter - find the first gh that's waiting for the glock
296 * @gl: the glock
297 */
298
299 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
300 {
301 struct gfs2_holder *gh;
302
303 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
304 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
305 return gh;
306 }
307 return NULL;
308 }
309
310 /**
311 * state_change - record that the glock is now in a different state
312 * @gl: the glock
313 * @new_state the new state
314 *
315 */
316
317 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
318 {
319 int held1, held2;
320
321 held1 = (gl->gl_state != LM_ST_UNLOCKED);
322 held2 = (new_state != LM_ST_UNLOCKED);
323
324 if (held1 != held2) {
325 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
326 if (held2)
327 gl->gl_lockref.count++;
328 else
329 gl->gl_lockref.count--;
330 }
331 if (held1 && held2 && list_empty(&gl->gl_holders))
332 clear_bit(GLF_QUEUED, &gl->gl_flags);
333
334 if (new_state != gl->gl_target)
335 /* shorten our minimum hold time */
336 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
337 GL_GLOCK_MIN_HOLD);
338 gl->gl_state = new_state;
339 gl->gl_tchange = jiffies;
340 }
341
342 static void gfs2_demote_wake(struct gfs2_glock *gl)
343 {
344 gl->gl_demote_state = LM_ST_EXCLUSIVE;
345 clear_bit(GLF_DEMOTE, &gl->gl_flags);
346 smp_mb__after_atomic();
347 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
348 }
349
350 /**
351 * finish_xmote - The DLM has replied to one of our lock requests
352 * @gl: The glock
353 * @ret: The status from the DLM
354 *
355 */
356
357 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
358 {
359 const struct gfs2_glock_operations *glops = gl->gl_ops;
360 struct gfs2_holder *gh;
361 unsigned state = ret & LM_OUT_ST_MASK;
362 int rv;
363
364 spin_lock(&gl->gl_lockref.lock);
365 trace_gfs2_glock_state_change(gl, state);
366 state_change(gl, state);
367 gh = find_first_waiter(gl);
368
369 /* Demote to UN request arrived during demote to SH or DF */
370 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
371 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
372 gl->gl_target = LM_ST_UNLOCKED;
373
374 /* Check for state != intended state */
375 if (unlikely(state != gl->gl_target)) {
376 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
377 /* move to back of queue and try next entry */
378 if (ret & LM_OUT_CANCELED) {
379 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
380 list_move_tail(&gh->gh_list, &gl->gl_holders);
381 gh = find_first_waiter(gl);
382 gl->gl_target = gh->gh_state;
383 goto retry;
384 }
385 /* Some error or failed "try lock" - report it */
386 if ((ret & LM_OUT_ERROR) ||
387 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
388 gl->gl_target = gl->gl_state;
389 do_error(gl, ret);
390 goto out;
391 }
392 }
393 switch(state) {
394 /* Unlocked due to conversion deadlock, try again */
395 case LM_ST_UNLOCKED:
396 retry:
397 do_xmote(gl, gh, gl->gl_target);
398 break;
399 /* Conversion fails, unlock and try again */
400 case LM_ST_SHARED:
401 case LM_ST_DEFERRED:
402 do_xmote(gl, gh, LM_ST_UNLOCKED);
403 break;
404 default: /* Everything else */
405 pr_err("wanted %u got %u\n", gl->gl_target, state);
406 GLOCK_BUG_ON(gl, 1);
407 }
408 spin_unlock(&gl->gl_lockref.lock);
409 return;
410 }
411
412 /* Fast path - we got what we asked for */
413 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
414 gfs2_demote_wake(gl);
415 if (state != LM_ST_UNLOCKED) {
416 if (glops->go_xmote_bh) {
417 spin_unlock(&gl->gl_lockref.lock);
418 rv = glops->go_xmote_bh(gl, gh);
419 spin_lock(&gl->gl_lockref.lock);
420 if (rv) {
421 do_error(gl, rv);
422 goto out;
423 }
424 }
425 rv = do_promote(gl);
426 if (rv == 2)
427 goto out_locked;
428 }
429 out:
430 clear_bit(GLF_LOCK, &gl->gl_flags);
431 out_locked:
432 spin_unlock(&gl->gl_lockref.lock);
433 }
434
435 /**
436 * do_xmote - Calls the DLM to change the state of a lock
437 * @gl: The lock state
438 * @gh: The holder (only for promotes)
439 * @target: The target lock state
440 *
441 */
442
443 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
444 __releases(&gl->gl_lockref.lock)
445 __acquires(&gl->gl_lockref.lock)
446 {
447 const struct gfs2_glock_operations *glops = gl->gl_ops;
448 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
449 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
450 int ret;
451
452 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
453 LM_FLAG_PRIORITY);
454 GLOCK_BUG_ON(gl, gl->gl_state == target);
455 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
456 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
457 glops->go_inval) {
458 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
459 do_error(gl, 0); /* Fail queued try locks */
460 }
461 gl->gl_req = target;
462 set_bit(GLF_BLOCKING, &gl->gl_flags);
463 if ((gl->gl_req == LM_ST_UNLOCKED) ||
464 (gl->gl_state == LM_ST_EXCLUSIVE) ||
465 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
466 clear_bit(GLF_BLOCKING, &gl->gl_flags);
467 spin_unlock(&gl->gl_lockref.lock);
468 if (glops->go_sync)
469 glops->go_sync(gl);
470 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
471 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
472 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
473
474 gfs2_glock_hold(gl);
475 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
476 /* lock_dlm */
477 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
478 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
479 target == LM_ST_UNLOCKED &&
480 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
481 finish_xmote(gl, target);
482 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
483 gfs2_glock_put(gl);
484 }
485 else if (ret) {
486 pr_err("lm_lock ret %d\n", ret);
487 GLOCK_BUG_ON(gl, 1);
488 }
489 } else { /* lock_nolock */
490 finish_xmote(gl, target);
491 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
492 gfs2_glock_put(gl);
493 }
494
495 spin_lock(&gl->gl_lockref.lock);
496 }
497
498 /**
499 * find_first_holder - find the first "holder" gh
500 * @gl: the glock
501 */
502
503 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
504 {
505 struct gfs2_holder *gh;
506
507 if (!list_empty(&gl->gl_holders)) {
508 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
509 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
510 return gh;
511 }
512 return NULL;
513 }
514
515 /**
516 * run_queue - do all outstanding tasks related to a glock
517 * @gl: The glock in question
518 * @nonblock: True if we must not block in run_queue
519 *
520 */
521
522 static void run_queue(struct gfs2_glock *gl, const int nonblock)
523 __releases(&gl->gl_lockref.lock)
524 __acquires(&gl->gl_lockref.lock)
525 {
526 struct gfs2_holder *gh = NULL;
527 int ret;
528
529 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
530 return;
531
532 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
533
534 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
535 gl->gl_demote_state != gl->gl_state) {
536 if (find_first_holder(gl))
537 goto out_unlock;
538 if (nonblock)
539 goto out_sched;
540 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
541 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
542 gl->gl_target = gl->gl_demote_state;
543 } else {
544 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
545 gfs2_demote_wake(gl);
546 ret = do_promote(gl);
547 if (ret == 0)
548 goto out_unlock;
549 if (ret == 2)
550 goto out;
551 gh = find_first_waiter(gl);
552 gl->gl_target = gh->gh_state;
553 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
554 do_error(gl, 0); /* Fail queued try locks */
555 }
556 do_xmote(gl, gh, gl->gl_target);
557 out:
558 return;
559
560 out_sched:
561 clear_bit(GLF_LOCK, &gl->gl_flags);
562 smp_mb__after_atomic();
563 gl->gl_lockref.count++;
564 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
565 gl->gl_lockref.count--;
566 return;
567
568 out_unlock:
569 clear_bit(GLF_LOCK, &gl->gl_flags);
570 smp_mb__after_atomic();
571 return;
572 }
573
574 static void delete_work_func(struct work_struct *work)
575 {
576 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
577 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
578 struct inode *inode;
579 u64 no_addr = gl->gl_name.ln_number;
580
581 /* If someone's using this glock to create a new dinode, the block must
582 have been freed by another node, then re-used, in which case our
583 iopen callback is too late after the fact. Ignore it. */
584 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
585 goto out;
586
587 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
588 if (inode && !IS_ERR(inode)) {
589 d_prune_aliases(inode);
590 iput(inode);
591 }
592 out:
593 gfs2_glock_put(gl);
594 }
595
596 static void glock_work_func(struct work_struct *work)
597 {
598 unsigned long delay = 0;
599 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
600 int drop_ref = 0;
601
602 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
603 finish_xmote(gl, gl->gl_reply);
604 drop_ref = 1;
605 }
606 spin_lock(&gl->gl_lockref.lock);
607 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
608 gl->gl_state != LM_ST_UNLOCKED &&
609 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
610 unsigned long holdtime, now = jiffies;
611
612 holdtime = gl->gl_tchange + gl->gl_hold_time;
613 if (time_before(now, holdtime))
614 delay = holdtime - now;
615
616 if (!delay) {
617 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
618 set_bit(GLF_DEMOTE, &gl->gl_flags);
619 }
620 }
621 run_queue(gl, 0);
622 spin_unlock(&gl->gl_lockref.lock);
623 if (!delay)
624 gfs2_glock_put(gl);
625 else {
626 if (gl->gl_name.ln_type != LM_TYPE_INODE)
627 delay = 0;
628 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
629 gfs2_glock_put(gl);
630 }
631 if (drop_ref)
632 gfs2_glock_put(gl);
633 }
634
635 /**
636 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
637 * @sdp: The GFS2 superblock
638 * @number: the lock number
639 * @glops: The glock_operations to use
640 * @create: If 0, don't create the glock if it doesn't exist
641 * @glp: the glock is returned here
642 *
643 * This does not lock a glock, just finds/creates structures for one.
644 *
645 * Returns: errno
646 */
647
648 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
649 const struct gfs2_glock_operations *glops, int create,
650 struct gfs2_glock **glp)
651 {
652 struct super_block *s = sdp->sd_vfs;
653 struct lm_lockname name = { .ln_number = number,
654 .ln_type = glops->go_type,
655 .ln_sbd = sdp };
656 struct gfs2_glock *gl, *tmp = NULL;
657 struct address_space *mapping;
658 struct kmem_cache *cachep;
659 int ret, tries = 0;
660
661 gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
662 if (gl && !lockref_get_not_dead(&gl->gl_lockref))
663 gl = NULL;
664
665 *glp = gl;
666 if (gl)
667 return 0;
668 if (!create)
669 return -ENOENT;
670
671 if (glops->go_flags & GLOF_ASPACE)
672 cachep = gfs2_glock_aspace_cachep;
673 else
674 cachep = gfs2_glock_cachep;
675 gl = kmem_cache_alloc(cachep, GFP_NOFS);
676 if (!gl)
677 return -ENOMEM;
678
679 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
680
681 if (glops->go_flags & GLOF_LVB) {
682 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
683 if (!gl->gl_lksb.sb_lvbptr) {
684 kmem_cache_free(cachep, gl);
685 return -ENOMEM;
686 }
687 }
688
689 atomic_inc(&sdp->sd_glock_disposal);
690 gl->gl_node.next = NULL;
691 gl->gl_flags = 0;
692 gl->gl_name = name;
693 gl->gl_lockref.count = 1;
694 gl->gl_state = LM_ST_UNLOCKED;
695 gl->gl_target = LM_ST_UNLOCKED;
696 gl->gl_demote_state = LM_ST_EXCLUSIVE;
697 gl->gl_ops = glops;
698 gl->gl_dstamp = 0;
699 preempt_disable();
700 /* We use the global stats to estimate the initial per-glock stats */
701 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
702 preempt_enable();
703 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
704 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
705 gl->gl_tchange = jiffies;
706 gl->gl_object = NULL;
707 gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
708 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
709 INIT_WORK(&gl->gl_delete, delete_work_func);
710
711 mapping = gfs2_glock2aspace(gl);
712 if (mapping) {
713 mapping->a_ops = &gfs2_meta_aops;
714 mapping->host = s->s_bdev->bd_inode;
715 mapping->flags = 0;
716 mapping_set_gfp_mask(mapping, GFP_NOFS);
717 mapping->private_data = NULL;
718 mapping->writeback_index = 0;
719 }
720
721 again:
722 ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
723 ht_parms);
724 if (ret == 0) {
725 *glp = gl;
726 return 0;
727 }
728
729 if (ret == -EEXIST) {
730 ret = 0;
731 tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
732 if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
733 if (++tries < 100) {
734 cond_resched();
735 goto again;
736 }
737 tmp = NULL;
738 ret = -ENOMEM;
739 }
740 } else {
741 WARN_ON_ONCE(ret);
742 }
743 kfree(gl->gl_lksb.sb_lvbptr);
744 kmem_cache_free(cachep, gl);
745 atomic_dec(&sdp->sd_glock_disposal);
746 *glp = tmp;
747
748 return ret;
749 }
750
751 /**
752 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
753 * @gl: the glock
754 * @state: the state we're requesting
755 * @flags: the modifier flags
756 * @gh: the holder structure
757 *
758 */
759
760 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
761 struct gfs2_holder *gh)
762 {
763 INIT_LIST_HEAD(&gh->gh_list);
764 gh->gh_gl = gl;
765 gh->gh_ip = _RET_IP_;
766 gh->gh_owner_pid = get_pid(task_pid(current));
767 gh->gh_state = state;
768 gh->gh_flags = flags;
769 gh->gh_error = 0;
770 gh->gh_iflags = 0;
771 gfs2_glock_hold(gl);
772 }
773
774 /**
775 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
776 * @state: the state we're requesting
777 * @flags: the modifier flags
778 * @gh: the holder structure
779 *
780 * Don't mess with the glock.
781 *
782 */
783
784 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
785 {
786 gh->gh_state = state;
787 gh->gh_flags = flags;
788 gh->gh_iflags = 0;
789 gh->gh_ip = _RET_IP_;
790 put_pid(gh->gh_owner_pid);
791 gh->gh_owner_pid = get_pid(task_pid(current));
792 }
793
794 /**
795 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
796 * @gh: the holder structure
797 *
798 */
799
800 void gfs2_holder_uninit(struct gfs2_holder *gh)
801 {
802 put_pid(gh->gh_owner_pid);
803 gfs2_glock_put(gh->gh_gl);
804 gfs2_holder_mark_uninitialized(gh);
805 gh->gh_ip = 0;
806 }
807
808 /**
809 * gfs2_glock_wait - wait on a glock acquisition
810 * @gh: the glock holder
811 *
812 * Returns: 0 on success
813 */
814
815 int gfs2_glock_wait(struct gfs2_holder *gh)
816 {
817 unsigned long time1 = jiffies;
818
819 might_sleep();
820 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
821 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
822 /* Lengthen the minimum hold time. */
823 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
824 GL_GLOCK_HOLD_INCR,
825 GL_GLOCK_MAX_HOLD);
826 return gh->gh_error;
827 }
828
829 /**
830 * handle_callback - process a demote request
831 * @gl: the glock
832 * @state: the state the caller wants us to change to
833 *
834 * There are only two requests that we are going to see in actual
835 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
836 */
837
838 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
839 unsigned long delay, bool remote)
840 {
841 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
842
843 set_bit(bit, &gl->gl_flags);
844 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
845 gl->gl_demote_state = state;
846 gl->gl_demote_time = jiffies;
847 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
848 gl->gl_demote_state != state) {
849 gl->gl_demote_state = LM_ST_UNLOCKED;
850 }
851 if (gl->gl_ops->go_callback)
852 gl->gl_ops->go_callback(gl, remote);
853 trace_gfs2_demote_rq(gl, remote);
854 }
855
856 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
857 {
858 struct va_format vaf;
859 va_list args;
860
861 va_start(args, fmt);
862
863 if (seq) {
864 seq_vprintf(seq, fmt, args);
865 } else {
866 vaf.fmt = fmt;
867 vaf.va = &args;
868
869 pr_err("%pV", &vaf);
870 }
871
872 va_end(args);
873 }
874
875 /**
876 * add_to_queue - Add a holder to the wait queue (but look for recursion)
877 * @gh: the holder structure to add
878 *
879 * Eventually we should move the recursive locking trap to a
880 * debugging option or something like that. This is the fast
881 * path and needs to have the minimum number of distractions.
882 *
883 */
884
885 static inline void add_to_queue(struct gfs2_holder *gh)
886 __releases(&gl->gl_lockref.lock)
887 __acquires(&gl->gl_lockref.lock)
888 {
889 struct gfs2_glock *gl = gh->gh_gl;
890 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
891 struct list_head *insert_pt = NULL;
892 struct gfs2_holder *gh2;
893 int try_futile = 0;
894
895 BUG_ON(gh->gh_owner_pid == NULL);
896 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
897 BUG();
898
899 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
900 if (test_bit(GLF_LOCK, &gl->gl_flags))
901 try_futile = !may_grant(gl, gh);
902 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
903 goto fail;
904 }
905
906 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
907 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
908 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
909 goto trap_recursive;
910 if (try_futile &&
911 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
912 fail:
913 gh->gh_error = GLR_TRYFAILED;
914 gfs2_holder_wake(gh);
915 return;
916 }
917 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
918 continue;
919 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
920 insert_pt = &gh2->gh_list;
921 }
922 set_bit(GLF_QUEUED, &gl->gl_flags);
923 trace_gfs2_glock_queue(gh, 1);
924 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
925 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
926 if (likely(insert_pt == NULL)) {
927 list_add_tail(&gh->gh_list, &gl->gl_holders);
928 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
929 goto do_cancel;
930 return;
931 }
932 list_add_tail(&gh->gh_list, insert_pt);
933 do_cancel:
934 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
935 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
936 spin_unlock(&gl->gl_lockref.lock);
937 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
938 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
939 spin_lock(&gl->gl_lockref.lock);
940 }
941 return;
942
943 trap_recursive:
944 pr_err("original: %pSR\n", (void *)gh2->gh_ip);
945 pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
946 pr_err("lock type: %d req lock state : %d\n",
947 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
948 pr_err("new: %pSR\n", (void *)gh->gh_ip);
949 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
950 pr_err("lock type: %d req lock state : %d\n",
951 gh->gh_gl->gl_name.ln_type, gh->gh_state);
952 gfs2_dump_glock(NULL, gl);
953 BUG();
954 }
955
956 /**
957 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
958 * @gh: the holder structure
959 *
960 * if (gh->gh_flags & GL_ASYNC), this never returns an error
961 *
962 * Returns: 0, GLR_TRYFAILED, or errno on failure
963 */
964
965 int gfs2_glock_nq(struct gfs2_holder *gh)
966 {
967 struct gfs2_glock *gl = gh->gh_gl;
968 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
969 int error = 0;
970
971 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
972 return -EIO;
973
974 if (test_bit(GLF_LRU, &gl->gl_flags))
975 gfs2_glock_remove_from_lru(gl);
976
977 spin_lock(&gl->gl_lockref.lock);
978 add_to_queue(gh);
979 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
980 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
981 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
982 gl->gl_lockref.count++;
983 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
984 gl->gl_lockref.count--;
985 }
986 run_queue(gl, 1);
987 spin_unlock(&gl->gl_lockref.lock);
988
989 if (!(gh->gh_flags & GL_ASYNC))
990 error = gfs2_glock_wait(gh);
991
992 return error;
993 }
994
995 /**
996 * gfs2_glock_poll - poll to see if an async request has been completed
997 * @gh: the holder
998 *
999 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1000 */
1001
1002 int gfs2_glock_poll(struct gfs2_holder *gh)
1003 {
1004 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1005 }
1006
1007 /**
1008 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1009 * @gh: the glock holder
1010 *
1011 */
1012
1013 void gfs2_glock_dq(struct gfs2_holder *gh)
1014 {
1015 struct gfs2_glock *gl = gh->gh_gl;
1016 const struct gfs2_glock_operations *glops = gl->gl_ops;
1017 unsigned delay = 0;
1018 int fast_path = 0;
1019
1020 spin_lock(&gl->gl_lockref.lock);
1021 if (gh->gh_flags & GL_NOCACHE)
1022 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1023
1024 list_del_init(&gh->gh_list);
1025 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1026 if (find_first_holder(gl) == NULL) {
1027 if (glops->go_unlock) {
1028 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1029 spin_unlock(&gl->gl_lockref.lock);
1030 glops->go_unlock(gh);
1031 spin_lock(&gl->gl_lockref.lock);
1032 clear_bit(GLF_LOCK, &gl->gl_flags);
1033 }
1034 if (list_empty(&gl->gl_holders) &&
1035 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1036 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1037 fast_path = 1;
1038 }
1039 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1040 (glops->go_flags & GLOF_LRU))
1041 gfs2_glock_add_to_lru(gl);
1042
1043 trace_gfs2_glock_queue(gh, 0);
1044 spin_unlock(&gl->gl_lockref.lock);
1045 if (likely(fast_path))
1046 return;
1047
1048 gfs2_glock_hold(gl);
1049 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1050 !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1051 gl->gl_name.ln_type == LM_TYPE_INODE)
1052 delay = gl->gl_hold_time;
1053 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1054 gfs2_glock_put(gl);
1055 }
1056
1057 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1058 {
1059 struct gfs2_glock *gl = gh->gh_gl;
1060 gfs2_glock_dq(gh);
1061 might_sleep();
1062 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1063 }
1064
1065 /**
1066 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1067 * @gh: the holder structure
1068 *
1069 */
1070
1071 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1072 {
1073 gfs2_glock_dq(gh);
1074 gfs2_holder_uninit(gh);
1075 }
1076
1077 /**
1078 * gfs2_glock_nq_num - acquire a glock based on lock number
1079 * @sdp: the filesystem
1080 * @number: the lock number
1081 * @glops: the glock operations for the type of glock
1082 * @state: the state to acquire the glock in
1083 * @flags: modifier flags for the acquisition
1084 * @gh: the struct gfs2_holder
1085 *
1086 * Returns: errno
1087 */
1088
1089 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1090 const struct gfs2_glock_operations *glops,
1091 unsigned int state, u16 flags, struct gfs2_holder *gh)
1092 {
1093 struct gfs2_glock *gl;
1094 int error;
1095
1096 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1097 if (!error) {
1098 error = gfs2_glock_nq_init(gl, state, flags, gh);
1099 gfs2_glock_put(gl);
1100 }
1101
1102 return error;
1103 }
1104
1105 /**
1106 * glock_compare - Compare two struct gfs2_glock structures for sorting
1107 * @arg_a: the first structure
1108 * @arg_b: the second structure
1109 *
1110 */
1111
1112 static int glock_compare(const void *arg_a, const void *arg_b)
1113 {
1114 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1115 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1116 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1117 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1118
1119 if (a->ln_number > b->ln_number)
1120 return 1;
1121 if (a->ln_number < b->ln_number)
1122 return -1;
1123 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1124 return 0;
1125 }
1126
1127 /**
1128 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1129 * @num_gh: the number of structures
1130 * @ghs: an array of struct gfs2_holder structures
1131 *
1132 * Returns: 0 on success (all glocks acquired),
1133 * errno on failure (no glocks acquired)
1134 */
1135
1136 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1137 struct gfs2_holder **p)
1138 {
1139 unsigned int x;
1140 int error = 0;
1141
1142 for (x = 0; x < num_gh; x++)
1143 p[x] = &ghs[x];
1144
1145 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1146
1147 for (x = 0; x < num_gh; x++) {
1148 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1149
1150 error = gfs2_glock_nq(p[x]);
1151 if (error) {
1152 while (x--)
1153 gfs2_glock_dq(p[x]);
1154 break;
1155 }
1156 }
1157
1158 return error;
1159 }
1160
1161 /**
1162 * gfs2_glock_nq_m - acquire multiple glocks
1163 * @num_gh: the number of structures
1164 * @ghs: an array of struct gfs2_holder structures
1165 *
1166 *
1167 * Returns: 0 on success (all glocks acquired),
1168 * errno on failure (no glocks acquired)
1169 */
1170
1171 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1172 {
1173 struct gfs2_holder *tmp[4];
1174 struct gfs2_holder **pph = tmp;
1175 int error = 0;
1176
1177 switch(num_gh) {
1178 case 0:
1179 return 0;
1180 case 1:
1181 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1182 return gfs2_glock_nq(ghs);
1183 default:
1184 if (num_gh <= 4)
1185 break;
1186 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1187 if (!pph)
1188 return -ENOMEM;
1189 }
1190
1191 error = nq_m_sync(num_gh, ghs, pph);
1192
1193 if (pph != tmp)
1194 kfree(pph);
1195
1196 return error;
1197 }
1198
1199 /**
1200 * gfs2_glock_dq_m - release multiple glocks
1201 * @num_gh: the number of structures
1202 * @ghs: an array of struct gfs2_holder structures
1203 *
1204 */
1205
1206 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1207 {
1208 while (num_gh--)
1209 gfs2_glock_dq(&ghs[num_gh]);
1210 }
1211
1212 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1213 {
1214 unsigned long delay = 0;
1215 unsigned long holdtime;
1216 unsigned long now = jiffies;
1217
1218 gfs2_glock_hold(gl);
1219 holdtime = gl->gl_tchange + gl->gl_hold_time;
1220 if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1221 gl->gl_name.ln_type == LM_TYPE_INODE) {
1222 if (time_before(now, holdtime))
1223 delay = holdtime - now;
1224 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1225 delay = gl->gl_hold_time;
1226 }
1227
1228 spin_lock(&gl->gl_lockref.lock);
1229 handle_callback(gl, state, delay, true);
1230 spin_unlock(&gl->gl_lockref.lock);
1231 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1232 gfs2_glock_put(gl);
1233 }
1234
1235 /**
1236 * gfs2_should_freeze - Figure out if glock should be frozen
1237 * @gl: The glock in question
1238 *
1239 * Glocks are not frozen if (a) the result of the dlm operation is
1240 * an error, (b) the locking operation was an unlock operation or
1241 * (c) if there is a "noexp" flagged request anywhere in the queue
1242 *
1243 * Returns: 1 if freezing should occur, 0 otherwise
1244 */
1245
1246 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1247 {
1248 const struct gfs2_holder *gh;
1249
1250 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1251 return 0;
1252 if (gl->gl_target == LM_ST_UNLOCKED)
1253 return 0;
1254
1255 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1256 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1257 continue;
1258 if (LM_FLAG_NOEXP & gh->gh_flags)
1259 return 0;
1260 }
1261
1262 return 1;
1263 }
1264
1265 /**
1266 * gfs2_glock_complete - Callback used by locking
1267 * @gl: Pointer to the glock
1268 * @ret: The return value from the dlm
1269 *
1270 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1271 * to use a bitfield shared with other glock state fields.
1272 */
1273
1274 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1275 {
1276 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1277
1278 spin_lock(&gl->gl_lockref.lock);
1279 gl->gl_reply = ret;
1280
1281 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1282 if (gfs2_should_freeze(gl)) {
1283 set_bit(GLF_FROZEN, &gl->gl_flags);
1284 spin_unlock(&gl->gl_lockref.lock);
1285 return;
1286 }
1287 }
1288
1289 gl->gl_lockref.count++;
1290 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1291 spin_unlock(&gl->gl_lockref.lock);
1292
1293 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1294 gfs2_glock_put(gl);
1295 }
1296
1297 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1298 {
1299 struct gfs2_glock *gla, *glb;
1300
1301 gla = list_entry(a, struct gfs2_glock, gl_lru);
1302 glb = list_entry(b, struct gfs2_glock, gl_lru);
1303
1304 if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1305 return 1;
1306 if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1307 return -1;
1308
1309 return 0;
1310 }
1311
1312 /**
1313 * gfs2_dispose_glock_lru - Demote a list of glocks
1314 * @list: The list to dispose of
1315 *
1316 * Disposing of glocks may involve disk accesses, so that here we sort
1317 * the glocks by number (i.e. disk location of the inodes) so that if
1318 * there are any such accesses, they'll be sent in order (mostly).
1319 *
1320 * Must be called under the lru_lock, but may drop and retake this
1321 * lock. While the lru_lock is dropped, entries may vanish from the
1322 * list, but no new entries will appear on the list (since it is
1323 * private)
1324 */
1325
1326 static void gfs2_dispose_glock_lru(struct list_head *list)
1327 __releases(&lru_lock)
1328 __acquires(&lru_lock)
1329 {
1330 struct gfs2_glock *gl;
1331
1332 list_sort(NULL, list, glock_cmp);
1333
1334 while(!list_empty(list)) {
1335 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1336 list_del_init(&gl->gl_lru);
1337 if (!spin_trylock(&gl->gl_lockref.lock)) {
1338 add_back_to_lru:
1339 list_add(&gl->gl_lru, &lru_list);
1340 atomic_inc(&lru_count);
1341 continue;
1342 }
1343 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1344 spin_unlock(&gl->gl_lockref.lock);
1345 goto add_back_to_lru;
1346 }
1347 clear_bit(GLF_LRU, &gl->gl_flags);
1348 gl->gl_lockref.count++;
1349 if (demote_ok(gl))
1350 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1351 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1352 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1353 gl->gl_lockref.count--;
1354 spin_unlock(&gl->gl_lockref.lock);
1355 cond_resched_lock(&lru_lock);
1356 }
1357 }
1358
1359 /**
1360 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1361 * @nr: The number of entries to scan
1362 *
1363 * This function selects the entries on the LRU which are able to
1364 * be demoted, and then kicks off the process by calling
1365 * gfs2_dispose_glock_lru() above.
1366 */
1367
1368 static long gfs2_scan_glock_lru(int nr)
1369 {
1370 struct gfs2_glock *gl;
1371 LIST_HEAD(skipped);
1372 LIST_HEAD(dispose);
1373 long freed = 0;
1374
1375 spin_lock(&lru_lock);
1376 while ((nr-- >= 0) && !list_empty(&lru_list)) {
1377 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1378
1379 /* Test for being demotable */
1380 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1381 list_move(&gl->gl_lru, &dispose);
1382 atomic_dec(&lru_count);
1383 freed++;
1384 continue;
1385 }
1386
1387 list_move(&gl->gl_lru, &skipped);
1388 }
1389 list_splice(&skipped, &lru_list);
1390 if (!list_empty(&dispose))
1391 gfs2_dispose_glock_lru(&dispose);
1392 spin_unlock(&lru_lock);
1393
1394 return freed;
1395 }
1396
1397 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1398 struct shrink_control *sc)
1399 {
1400 if (!(sc->gfp_mask & __GFP_FS))
1401 return SHRINK_STOP;
1402 return gfs2_scan_glock_lru(sc->nr_to_scan);
1403 }
1404
1405 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1406 struct shrink_control *sc)
1407 {
1408 return vfs_pressure_ratio(atomic_read(&lru_count));
1409 }
1410
1411 static struct shrinker glock_shrinker = {
1412 .seeks = DEFAULT_SEEKS,
1413 .count_objects = gfs2_glock_shrink_count,
1414 .scan_objects = gfs2_glock_shrink_scan,
1415 };
1416
1417 /**
1418 * examine_bucket - Call a function for glock in a hash bucket
1419 * @examiner: the function
1420 * @sdp: the filesystem
1421 * @bucket: the bucket
1422 *
1423 */
1424
1425 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1426 {
1427 struct gfs2_glock *gl;
1428 struct rhash_head *pos;
1429 const struct bucket_table *tbl;
1430 int i;
1431
1432 rcu_read_lock();
1433 tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
1434 for (i = 0; i < tbl->size; i++) {
1435 rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
1436 if ((gl->gl_name.ln_sbd == sdp) &&
1437 lockref_get_not_dead(&gl->gl_lockref))
1438 examiner(gl);
1439 }
1440 }
1441 rcu_read_unlock();
1442 cond_resched();
1443 }
1444
1445 /**
1446 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1447 * @gl: The glock to thaw
1448 *
1449 */
1450
1451 static void thaw_glock(struct gfs2_glock *gl)
1452 {
1453 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1454 goto out;
1455 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1456 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1457 out:
1458 gfs2_glock_put(gl);
1459 }
1460 }
1461
1462 /**
1463 * clear_glock - look at a glock and see if we can free it from glock cache
1464 * @gl: the glock to look at
1465 *
1466 */
1467
1468 static void clear_glock(struct gfs2_glock *gl)
1469 {
1470 gfs2_glock_remove_from_lru(gl);
1471
1472 spin_lock(&gl->gl_lockref.lock);
1473 if (gl->gl_state != LM_ST_UNLOCKED)
1474 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1475 spin_unlock(&gl->gl_lockref.lock);
1476 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1477 gfs2_glock_put(gl);
1478 }
1479
1480 /**
1481 * gfs2_glock_thaw - Thaw any frozen glocks
1482 * @sdp: The super block
1483 *
1484 */
1485
1486 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1487 {
1488 glock_hash_walk(thaw_glock, sdp);
1489 }
1490
1491 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1492 {
1493 spin_lock(&gl->gl_lockref.lock);
1494 gfs2_dump_glock(seq, gl);
1495 spin_unlock(&gl->gl_lockref.lock);
1496 }
1497
1498 static void dump_glock_func(struct gfs2_glock *gl)
1499 {
1500 dump_glock(NULL, gl);
1501 }
1502
1503 /**
1504 * gfs2_gl_hash_clear - Empty out the glock hash table
1505 * @sdp: the filesystem
1506 * @wait: wait until it's all gone
1507 *
1508 * Called when unmounting the filesystem.
1509 */
1510
1511 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1512 {
1513 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1514 flush_workqueue(glock_workqueue);
1515 glock_hash_walk(clear_glock, sdp);
1516 flush_workqueue(glock_workqueue);
1517 wait_event_timeout(sdp->sd_glock_wait,
1518 atomic_read(&sdp->sd_glock_disposal) == 0,
1519 HZ * 600);
1520 glock_hash_walk(dump_glock_func, sdp);
1521 }
1522
1523 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1524 {
1525 struct gfs2_glock *gl = ip->i_gl;
1526 int ret;
1527
1528 ret = gfs2_truncatei_resume(ip);
1529 gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1530
1531 spin_lock(&gl->gl_lockref.lock);
1532 clear_bit(GLF_LOCK, &gl->gl_flags);
1533 run_queue(gl, 1);
1534 spin_unlock(&gl->gl_lockref.lock);
1535 }
1536
1537 static const char *state2str(unsigned state)
1538 {
1539 switch(state) {
1540 case LM_ST_UNLOCKED:
1541 return "UN";
1542 case LM_ST_SHARED:
1543 return "SH";
1544 case LM_ST_DEFERRED:
1545 return "DF";
1546 case LM_ST_EXCLUSIVE:
1547 return "EX";
1548 }
1549 return "??";
1550 }
1551
1552 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1553 {
1554 char *p = buf;
1555 if (flags & LM_FLAG_TRY)
1556 *p++ = 't';
1557 if (flags & LM_FLAG_TRY_1CB)
1558 *p++ = 'T';
1559 if (flags & LM_FLAG_NOEXP)
1560 *p++ = 'e';
1561 if (flags & LM_FLAG_ANY)
1562 *p++ = 'A';
1563 if (flags & LM_FLAG_PRIORITY)
1564 *p++ = 'p';
1565 if (flags & GL_ASYNC)
1566 *p++ = 'a';
1567 if (flags & GL_EXACT)
1568 *p++ = 'E';
1569 if (flags & GL_NOCACHE)
1570 *p++ = 'c';
1571 if (test_bit(HIF_HOLDER, &iflags))
1572 *p++ = 'H';
1573 if (test_bit(HIF_WAIT, &iflags))
1574 *p++ = 'W';
1575 if (test_bit(HIF_FIRST, &iflags))
1576 *p++ = 'F';
1577 *p = 0;
1578 return buf;
1579 }
1580
1581 /**
1582 * dump_holder - print information about a glock holder
1583 * @seq: the seq_file struct
1584 * @gh: the glock holder
1585 *
1586 */
1587
1588 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1589 {
1590 struct task_struct *gh_owner = NULL;
1591 char flags_buf[32];
1592
1593 rcu_read_lock();
1594 if (gh->gh_owner_pid)
1595 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1596 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1597 state2str(gh->gh_state),
1598 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1599 gh->gh_error,
1600 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1601 gh_owner ? gh_owner->comm : "(ended)",
1602 (void *)gh->gh_ip);
1603 rcu_read_unlock();
1604 }
1605
1606 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1607 {
1608 const unsigned long *gflags = &gl->gl_flags;
1609 char *p = buf;
1610
1611 if (test_bit(GLF_LOCK, gflags))
1612 *p++ = 'l';
1613 if (test_bit(GLF_DEMOTE, gflags))
1614 *p++ = 'D';
1615 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1616 *p++ = 'd';
1617 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1618 *p++ = 'p';
1619 if (test_bit(GLF_DIRTY, gflags))
1620 *p++ = 'y';
1621 if (test_bit(GLF_LFLUSH, gflags))
1622 *p++ = 'f';
1623 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1624 *p++ = 'i';
1625 if (test_bit(GLF_REPLY_PENDING, gflags))
1626 *p++ = 'r';
1627 if (test_bit(GLF_INITIAL, gflags))
1628 *p++ = 'I';
1629 if (test_bit(GLF_FROZEN, gflags))
1630 *p++ = 'F';
1631 if (test_bit(GLF_QUEUED, gflags))
1632 *p++ = 'q';
1633 if (test_bit(GLF_LRU, gflags))
1634 *p++ = 'L';
1635 if (gl->gl_object)
1636 *p++ = 'o';
1637 if (test_bit(GLF_BLOCKING, gflags))
1638 *p++ = 'b';
1639 *p = 0;
1640 return buf;
1641 }
1642
1643 /**
1644 * gfs2_dump_glock - print information about a glock
1645 * @seq: The seq_file struct
1646 * @gl: the glock
1647 *
1648 * The file format is as follows:
1649 * One line per object, capital letters are used to indicate objects
1650 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1651 * other objects are indented by a single space and follow the glock to
1652 * which they are related. Fields are indicated by lower case letters
1653 * followed by a colon and the field value, except for strings which are in
1654 * [] so that its possible to see if they are composed of spaces for
1655 * example. The field's are n = number (id of the object), f = flags,
1656 * t = type, s = state, r = refcount, e = error, p = pid.
1657 *
1658 */
1659
1660 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1661 {
1662 const struct gfs2_glock_operations *glops = gl->gl_ops;
1663 unsigned long long dtime;
1664 const struct gfs2_holder *gh;
1665 char gflags_buf[32];
1666
1667 dtime = jiffies - gl->gl_demote_time;
1668 dtime *= 1000000/HZ; /* demote time in uSec */
1669 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1670 dtime = 0;
1671 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1672 state2str(gl->gl_state),
1673 gl->gl_name.ln_type,
1674 (unsigned long long)gl->gl_name.ln_number,
1675 gflags2str(gflags_buf, gl),
1676 state2str(gl->gl_target),
1677 state2str(gl->gl_demote_state), dtime,
1678 atomic_read(&gl->gl_ail_count),
1679 atomic_read(&gl->gl_revokes),
1680 (int)gl->gl_lockref.count, gl->gl_hold_time);
1681
1682 list_for_each_entry(gh, &gl->gl_holders, gh_list)
1683 dump_holder(seq, gh);
1684
1685 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1686 glops->go_dump(seq, gl);
1687 }
1688
1689 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1690 {
1691 struct gfs2_glock *gl = iter_ptr;
1692
1693 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1694 gl->gl_name.ln_type,
1695 (unsigned long long)gl->gl_name.ln_number,
1696 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1697 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1698 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1699 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1700 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1701 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1702 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1703 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1704 return 0;
1705 }
1706
1707 static const char *gfs2_gltype[] = {
1708 "type",
1709 "reserved",
1710 "nondisk",
1711 "inode",
1712 "rgrp",
1713 "meta",
1714 "iopen",
1715 "flock",
1716 "plock",
1717 "quota",
1718 "journal",
1719 };
1720
1721 static const char *gfs2_stype[] = {
1722 [GFS2_LKS_SRTT] = "srtt",
1723 [GFS2_LKS_SRTTVAR] = "srttvar",
1724 [GFS2_LKS_SRTTB] = "srttb",
1725 [GFS2_LKS_SRTTVARB] = "srttvarb",
1726 [GFS2_LKS_SIRT] = "sirt",
1727 [GFS2_LKS_SIRTVAR] = "sirtvar",
1728 [GFS2_LKS_DCOUNT] = "dlm",
1729 [GFS2_LKS_QCOUNT] = "queue",
1730 };
1731
1732 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1733
1734 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1735 {
1736 struct gfs2_sbd *sdp = seq->private;
1737 loff_t pos = *(loff_t *)iter_ptr;
1738 unsigned index = pos >> 3;
1739 unsigned subindex = pos & 0x07;
1740 int i;
1741
1742 if (index == 0 && subindex != 0)
1743 return 0;
1744
1745 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1746 (index == 0) ? "cpu": gfs2_stype[subindex]);
1747
1748 for_each_possible_cpu(i) {
1749 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1750
1751 if (index == 0)
1752 seq_printf(seq, " %15u", i);
1753 else
1754 seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1755 lkstats[index - 1].stats[subindex]);
1756 }
1757 seq_putc(seq, '\n');
1758 return 0;
1759 }
1760
1761 int __init gfs2_glock_init(void)
1762 {
1763 int ret;
1764
1765 ret = rhashtable_init(&gl_hash_table, &ht_parms);
1766 if (ret < 0)
1767 return ret;
1768
1769 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1770 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1771 if (!glock_workqueue) {
1772 rhashtable_destroy(&gl_hash_table);
1773 return -ENOMEM;
1774 }
1775 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1776 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1777 0);
1778 if (!gfs2_delete_workqueue) {
1779 destroy_workqueue(glock_workqueue);
1780 rhashtable_destroy(&gl_hash_table);
1781 return -ENOMEM;
1782 }
1783
1784 ret = register_shrinker(&glock_shrinker);
1785 if (ret) {
1786 destroy_workqueue(gfs2_delete_workqueue);
1787 destroy_workqueue(glock_workqueue);
1788 rhashtable_destroy(&gl_hash_table);
1789 return ret;
1790 }
1791
1792 return 0;
1793 }
1794
1795 void gfs2_glock_exit(void)
1796 {
1797 unregister_shrinker(&glock_shrinker);
1798 rhashtable_destroy(&gl_hash_table);
1799 destroy_workqueue(glock_workqueue);
1800 destroy_workqueue(gfs2_delete_workqueue);
1801 }
1802
1803 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1804 {
1805 while ((gi->gl = rhashtable_walk_next(&gi->hti))) {
1806 if (IS_ERR(gi->gl)) {
1807 if (PTR_ERR(gi->gl) == -EAGAIN)
1808 continue;
1809 gi->gl = NULL;
1810 return;
1811 }
1812 /* Skip entries for other sb and dead entries */
1813 if (gi->sdp == gi->gl->gl_name.ln_sbd &&
1814 !__lockref_is_dead(&gi->gl->gl_lockref))
1815 return;
1816 }
1817 }
1818
1819 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1820 {
1821 struct gfs2_glock_iter *gi = seq->private;
1822 loff_t n = *pos;
1823 int ret;
1824
1825 if (gi->last_pos <= *pos)
1826 n = (*pos - gi->last_pos);
1827
1828 ret = rhashtable_walk_start(&gi->hti);
1829 if (ret)
1830 return NULL;
1831
1832 do {
1833 gfs2_glock_iter_next(gi);
1834 } while (gi->gl && n--);
1835
1836 gi->last_pos = *pos;
1837 return gi->gl;
1838 }
1839
1840 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1841 loff_t *pos)
1842 {
1843 struct gfs2_glock_iter *gi = seq->private;
1844
1845 (*pos)++;
1846 gi->last_pos = *pos;
1847 gfs2_glock_iter_next(gi);
1848 return gi->gl;
1849 }
1850
1851 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1852 {
1853 struct gfs2_glock_iter *gi = seq->private;
1854
1855 gi->gl = NULL;
1856 rhashtable_walk_stop(&gi->hti);
1857 }
1858
1859 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1860 {
1861 dump_glock(seq, iter_ptr);
1862 return 0;
1863 }
1864
1865 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1866 {
1867 preempt_disable();
1868 if (*pos >= GFS2_NR_SBSTATS)
1869 return NULL;
1870 return pos;
1871 }
1872
1873 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1874 loff_t *pos)
1875 {
1876 (*pos)++;
1877 if (*pos >= GFS2_NR_SBSTATS)
1878 return NULL;
1879 return pos;
1880 }
1881
1882 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1883 {
1884 preempt_enable();
1885 }
1886
1887 static const struct seq_operations gfs2_glock_seq_ops = {
1888 .start = gfs2_glock_seq_start,
1889 .next = gfs2_glock_seq_next,
1890 .stop = gfs2_glock_seq_stop,
1891 .show = gfs2_glock_seq_show,
1892 };
1893
1894 static const struct seq_operations gfs2_glstats_seq_ops = {
1895 .start = gfs2_glock_seq_start,
1896 .next = gfs2_glock_seq_next,
1897 .stop = gfs2_glock_seq_stop,
1898 .show = gfs2_glstats_seq_show,
1899 };
1900
1901 static const struct seq_operations gfs2_sbstats_seq_ops = {
1902 .start = gfs2_sbstats_seq_start,
1903 .next = gfs2_sbstats_seq_next,
1904 .stop = gfs2_sbstats_seq_stop,
1905 .show = gfs2_sbstats_seq_show,
1906 };
1907
1908 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1909
1910 static int gfs2_glocks_open(struct inode *inode, struct file *file)
1911 {
1912 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1913 sizeof(struct gfs2_glock_iter));
1914 if (ret == 0) {
1915 struct seq_file *seq = file->private_data;
1916 struct gfs2_glock_iter *gi = seq->private;
1917
1918 gi->sdp = inode->i_private;
1919 gi->last_pos = 0;
1920 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1921 if (seq->buf)
1922 seq->size = GFS2_SEQ_GOODSIZE;
1923 gi->gl = NULL;
1924 ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
1925 }
1926 return ret;
1927 }
1928
1929 static int gfs2_glocks_release(struct inode *inode, struct file *file)
1930 {
1931 struct seq_file *seq = file->private_data;
1932 struct gfs2_glock_iter *gi = seq->private;
1933
1934 gi->gl = NULL;
1935 rhashtable_walk_exit(&gi->hti);
1936 return seq_release_private(inode, file);
1937 }
1938
1939 static int gfs2_glstats_open(struct inode *inode, struct file *file)
1940 {
1941 int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1942 sizeof(struct gfs2_glock_iter));
1943 if (ret == 0) {
1944 struct seq_file *seq = file->private_data;
1945 struct gfs2_glock_iter *gi = seq->private;
1946 gi->sdp = inode->i_private;
1947 gi->last_pos = 0;
1948 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1949 if (seq->buf)
1950 seq->size = GFS2_SEQ_GOODSIZE;
1951 gi->gl = NULL;
1952 ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL);
1953 }
1954 return ret;
1955 }
1956
1957 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1958 {
1959 int ret = seq_open(file, &gfs2_sbstats_seq_ops);
1960 if (ret == 0) {
1961 struct seq_file *seq = file->private_data;
1962 seq->private = inode->i_private; /* sdp */
1963 }
1964 return ret;
1965 }
1966
1967 static const struct file_operations gfs2_glocks_fops = {
1968 .owner = THIS_MODULE,
1969 .open = gfs2_glocks_open,
1970 .read = seq_read,
1971 .llseek = seq_lseek,
1972 .release = gfs2_glocks_release,
1973 };
1974
1975 static const struct file_operations gfs2_glstats_fops = {
1976 .owner = THIS_MODULE,
1977 .open = gfs2_glstats_open,
1978 .read = seq_read,
1979 .llseek = seq_lseek,
1980 .release = gfs2_glocks_release,
1981 };
1982
1983 static const struct file_operations gfs2_sbstats_fops = {
1984 .owner = THIS_MODULE,
1985 .open = gfs2_sbstats_open,
1986 .read = seq_read,
1987 .llseek = seq_lseek,
1988 .release = seq_release,
1989 };
1990
1991 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1992 {
1993 struct dentry *dent;
1994
1995 dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1996 if (IS_ERR_OR_NULL(dent))
1997 goto fail;
1998 sdp->debugfs_dir = dent;
1999
2000 dent = debugfs_create_file("glocks",
2001 S_IFREG | S_IRUGO,
2002 sdp->debugfs_dir, sdp,
2003 &gfs2_glocks_fops);
2004 if (IS_ERR_OR_NULL(dent))
2005 goto fail;
2006 sdp->debugfs_dentry_glocks = dent;
2007
2008 dent = debugfs_create_file("glstats",
2009 S_IFREG | S_IRUGO,
2010 sdp->debugfs_dir, sdp,
2011 &gfs2_glstats_fops);
2012 if (IS_ERR_OR_NULL(dent))
2013 goto fail;
2014 sdp->debugfs_dentry_glstats = dent;
2015
2016 dent = debugfs_create_file("sbstats",
2017 S_IFREG | S_IRUGO,
2018 sdp->debugfs_dir, sdp,
2019 &gfs2_sbstats_fops);
2020 if (IS_ERR_OR_NULL(dent))
2021 goto fail;
2022 sdp->debugfs_dentry_sbstats = dent;
2023
2024 return 0;
2025 fail:
2026 gfs2_delete_debugfs_file(sdp);
2027 return dent ? PTR_ERR(dent) : -ENOMEM;
2028 }
2029
2030 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2031 {
2032 if (sdp->debugfs_dir) {
2033 if (sdp->debugfs_dentry_glocks) {
2034 debugfs_remove(sdp->debugfs_dentry_glocks);
2035 sdp->debugfs_dentry_glocks = NULL;
2036 }
2037 if (sdp->debugfs_dentry_glstats) {
2038 debugfs_remove(sdp->debugfs_dentry_glstats);
2039 sdp->debugfs_dentry_glstats = NULL;
2040 }
2041 if (sdp->debugfs_dentry_sbstats) {
2042 debugfs_remove(sdp->debugfs_dentry_sbstats);
2043 sdp->debugfs_dentry_sbstats = NULL;
2044 }
2045 debugfs_remove(sdp->debugfs_dir);
2046 sdp->debugfs_dir = NULL;
2047 }
2048 }
2049
2050 int gfs2_register_debugfs(void)
2051 {
2052 gfs2_root = debugfs_create_dir("gfs2", NULL);
2053 if (IS_ERR(gfs2_root))
2054 return PTR_ERR(gfs2_root);
2055 return gfs2_root ? 0 : -ENOMEM;
2056 }
2057
2058 void gfs2_unregister_debugfs(void)
2059 {
2060 debugfs_remove(gfs2_root);
2061 gfs2_root = NULL;
2062 }