]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
3a8a9a10 | 3 | * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/sort.h> | |
17 | #include <linux/jhash.h> | |
18 | #include <linux/kref.h> | |
d0dc80db | 19 | #include <linux/kallsyms.h> |
5c676f6d | 20 | #include <linux/gfs2_ondisk.h> |
b3b94faa DT |
21 | #include <asm/uaccess.h> |
22 | ||
23 | #include "gfs2.h" | |
5c676f6d SW |
24 | #include "lm_interface.h" |
25 | #include "incore.h" | |
b3b94faa DT |
26 | #include "glock.h" |
27 | #include "glops.h" | |
28 | #include "inode.h" | |
29 | #include "lm.h" | |
30 | #include "lops.h" | |
31 | #include "meta_io.h" | |
32 | #include "quota.h" | |
33 | #include "super.h" | |
5c676f6d | 34 | #include "util.h" |
b3b94faa DT |
35 | |
36 | /* Must be kept in sync with the beginning of struct gfs2_glock */ | |
37 | struct glock_plug { | |
38 | struct list_head gl_list; | |
39 | unsigned long gl_flags; | |
40 | }; | |
41 | ||
42 | struct greedy { | |
43 | struct gfs2_holder gr_gh; | |
44 | struct work_struct gr_work; | |
45 | }; | |
46 | ||
37b2fa6a SW |
47 | struct gfs2_gl_hash_bucket { |
48 | struct list_head hb_list; | |
49 | }; | |
50 | ||
b3b94faa DT |
51 | typedef void (*glock_examiner) (struct gfs2_glock * gl); |
52 | ||
08bc2dbc | 53 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); |
320dd101 | 54 | static int dump_glock(struct gfs2_glock *gl); |
08bc2dbc | 55 | |
85d1da67 | 56 | static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; |
37b2fa6a | 57 | static rwlock_t gl_hash_locks[GFS2_GL_HASH_SIZE]; |
85d1da67 | 58 | |
b3b94faa DT |
59 | /** |
60 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? | |
61 | * @actual: the current state of the lock | |
62 | * @requested: the lock state that was requested by the caller | |
63 | * @flags: the modifier flags passed in by the caller | |
64 | * | |
65 | * Returns: 1 if the locks are compatible, 0 otherwise | |
66 | */ | |
67 | ||
68 | static inline int relaxed_state_ok(unsigned int actual, unsigned requested, | |
69 | int flags) | |
70 | { | |
71 | if (actual == requested) | |
72 | return 1; | |
73 | ||
74 | if (flags & GL_EXACT) | |
75 | return 0; | |
76 | ||
77 | if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) | |
78 | return 1; | |
79 | ||
80 | if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) | |
81 | return 1; | |
82 | ||
83 | return 0; | |
84 | } | |
85 | ||
86 | /** | |
87 | * gl_hash() - Turn glock number into hash bucket number | |
88 | * @lock: The glock number | |
89 | * | |
90 | * Returns: The number of the corresponding hash bucket | |
91 | */ | |
92 | ||
b8547856 SW |
93 | static unsigned int gl_hash(const struct gfs2_sbd *sdp, |
94 | const struct lm_lockname *name) | |
b3b94faa DT |
95 | { |
96 | unsigned int h; | |
97 | ||
cd915493 | 98 | h = jhash(&name->ln_number, sizeof(u64), 0); |
b3b94faa | 99 | h = jhash(&name->ln_type, sizeof(unsigned int), h); |
b8547856 | 100 | h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); |
b3b94faa DT |
101 | h &= GFS2_GL_HASH_MASK; |
102 | ||
103 | return h; | |
104 | } | |
105 | ||
106 | /** | |
107 | * glock_free() - Perform a few checks and then release struct gfs2_glock | |
108 | * @gl: The glock to release | |
109 | * | |
110 | * Also calls lock module to release its internal structure for this glock. | |
111 | * | |
112 | */ | |
113 | ||
114 | static void glock_free(struct gfs2_glock *gl) | |
115 | { | |
116 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
117 | struct inode *aspace = gl->gl_aspace; | |
118 | ||
119 | gfs2_lm_put_lock(sdp, gl->gl_lock); | |
120 | ||
121 | if (aspace) | |
122 | gfs2_aspace_put(aspace); | |
123 | ||
124 | kmem_cache_free(gfs2_glock_cachep, gl); | |
b3b94faa DT |
125 | } |
126 | ||
127 | /** | |
128 | * gfs2_glock_hold() - increment reference count on glock | |
129 | * @gl: The glock to hold | |
130 | * | |
131 | */ | |
132 | ||
133 | void gfs2_glock_hold(struct gfs2_glock *gl) | |
134 | { | |
135 | kref_get(&gl->gl_ref); | |
136 | } | |
137 | ||
138 | /* All work is done after the return from kref_put() so we | |
139 | can release the write_lock before the free. */ | |
140 | ||
141 | static void kill_glock(struct kref *kref) | |
142 | { | |
143 | struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref); | |
144 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
145 | ||
146 | gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); | |
147 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); | |
148 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); | |
149 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); | |
150 | gfs2_assert(sdp, list_empty(&gl->gl_waiters2)); | |
151 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); | |
152 | } | |
153 | ||
154 | /** | |
155 | * gfs2_glock_put() - Decrement reference count on glock | |
156 | * @gl: The glock to put | |
157 | * | |
158 | */ | |
159 | ||
160 | int gfs2_glock_put(struct gfs2_glock *gl) | |
161 | { | |
b3b94faa DT |
162 | int rv = 0; |
163 | ||
37b2fa6a | 164 | write_lock(&gl_hash_locks[gl->gl_hash]); |
b3b94faa | 165 | if (kref_put(&gl->gl_ref, kill_glock)) { |
37b2fa6a SW |
166 | list_del_init(&gl_hash_table[gl->gl_hash].hb_list); |
167 | write_unlock(&gl_hash_locks[gl->gl_hash]); | |
190562bd | 168 | BUG_ON(spin_is_locked(&gl->gl_spin)); |
b3b94faa DT |
169 | glock_free(gl); |
170 | rv = 1; | |
171 | goto out; | |
172 | } | |
37b2fa6a | 173 | write_unlock(&gl_hash_locks[gl->gl_hash]); |
a2242db0 | 174 | out: |
b3b94faa DT |
175 | return rv; |
176 | } | |
177 | ||
178 | /** | |
179 | * queue_empty - check to see if a glock's queue is empty | |
180 | * @gl: the glock | |
181 | * @head: the head of the queue to check | |
182 | * | |
183 | * This function protects the list in the event that a process already | |
184 | * has a holder on the list and is adding a second holder for itself. | |
185 | * The glmutex lock is what generally prevents processes from working | |
186 | * on the same glock at once, but the special case of adding a second | |
187 | * holder for yourself ("recursive" locking) doesn't involve locking | |
188 | * glmutex, making the spin lock necessary. | |
189 | * | |
190 | * Returns: 1 if the queue is empty | |
191 | */ | |
192 | ||
193 | static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head) | |
194 | { | |
195 | int empty; | |
196 | spin_lock(&gl->gl_spin); | |
197 | empty = list_empty(head); | |
198 | spin_unlock(&gl->gl_spin); | |
199 | return empty; | |
200 | } | |
201 | ||
202 | /** | |
203 | * search_bucket() - Find struct gfs2_glock by lock number | |
204 | * @bucket: the bucket to search | |
205 | * @name: The lock name | |
206 | * | |
207 | * Returns: NULL, or the struct gfs2_glock with the requested number | |
208 | */ | |
209 | ||
37b2fa6a | 210 | static struct gfs2_glock *search_bucket(unsigned int hash, |
899be4d3 | 211 | const struct gfs2_sbd *sdp, |
d6a53727 | 212 | const struct lm_lockname *name) |
b3b94faa DT |
213 | { |
214 | struct gfs2_glock *gl; | |
215 | ||
37b2fa6a | 216 | list_for_each_entry(gl, &gl_hash_table[hash].hb_list, gl_list) { |
b3b94faa DT |
217 | if (test_bit(GLF_PLUG, &gl->gl_flags)) |
218 | continue; | |
219 | if (!lm_name_equal(&gl->gl_name, name)) | |
220 | continue; | |
899be4d3 SW |
221 | if (gl->gl_sbd != sdp) |
222 | continue; | |
b3b94faa DT |
223 | |
224 | kref_get(&gl->gl_ref); | |
225 | ||
226 | return gl; | |
227 | } | |
228 | ||
229 | return NULL; | |
230 | } | |
231 | ||
232 | /** | |
233 | * gfs2_glock_find() - Find glock by lock number | |
234 | * @sdp: The GFS2 superblock | |
235 | * @name: The lock name | |
236 | * | |
237 | * Returns: NULL, or the struct gfs2_glock with the requested number | |
238 | */ | |
239 | ||
85d1da67 | 240 | static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, |
d6a53727 | 241 | const struct lm_lockname *name) |
b3b94faa | 242 | { |
37b2fa6a | 243 | unsigned int hash = gl_hash(sdp, name); |
b3b94faa DT |
244 | struct gfs2_glock *gl; |
245 | ||
37b2fa6a SW |
246 | read_lock(&gl_hash_locks[hash]); |
247 | gl = search_bucket(hash, sdp, name); | |
248 | read_unlock(&gl_hash_locks[hash]); | |
b3b94faa DT |
249 | |
250 | return gl; | |
251 | } | |
252 | ||
253 | /** | |
254 | * gfs2_glock_get() - Get a glock, or create one if one doesn't exist | |
255 | * @sdp: The GFS2 superblock | |
256 | * @number: the lock number | |
257 | * @glops: The glock_operations to use | |
258 | * @create: If 0, don't create the glock if it doesn't exist | |
259 | * @glp: the glock is returned here | |
260 | * | |
261 | * This does not lock a glock, just finds/creates structures for one. | |
262 | * | |
263 | * Returns: errno | |
264 | */ | |
265 | ||
cd915493 | 266 | int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, |
8fb4b536 | 267 | const struct gfs2_glock_operations *glops, int create, |
b3b94faa DT |
268 | struct gfs2_glock **glp) |
269 | { | |
37b2fa6a | 270 | struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; |
b3b94faa | 271 | struct gfs2_glock *gl, *tmp; |
37b2fa6a | 272 | unsigned int hash = gl_hash(sdp, &name); |
b3b94faa DT |
273 | int error; |
274 | ||
37b2fa6a SW |
275 | read_lock(&gl_hash_locks[hash]); |
276 | gl = search_bucket(hash, sdp, &name); | |
277 | read_unlock(&gl_hash_locks[hash]); | |
b3b94faa DT |
278 | |
279 | if (gl || !create) { | |
280 | *glp = gl; | |
281 | return 0; | |
282 | } | |
283 | ||
284 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); | |
285 | if (!gl) | |
286 | return -ENOMEM; | |
287 | ||
ec45d9f5 | 288 | gl->gl_flags = 0; |
b3b94faa DT |
289 | gl->gl_name = name; |
290 | kref_init(&gl->gl_ref); | |
b3b94faa | 291 | gl->gl_state = LM_ST_UNLOCKED; |
37b2fa6a | 292 | gl->gl_hash = hash; |
320dd101 SW |
293 | gl->gl_owner = NULL; |
294 | gl->gl_ip = 0; | |
b3b94faa | 295 | gl->gl_ops = glops; |
ec45d9f5 SW |
296 | gl->gl_req_gh = NULL; |
297 | gl->gl_req_bh = NULL; | |
298 | gl->gl_vn = 0; | |
299 | gl->gl_stamp = jiffies; | |
300 | gl->gl_object = NULL; | |
b3b94faa | 301 | gl->gl_sbd = sdp; |
ec45d9f5 | 302 | gl->gl_aspace = NULL; |
b3b94faa | 303 | lops_init_le(&gl->gl_le, &gfs2_glock_lops); |
b3b94faa DT |
304 | |
305 | /* If this glock protects actual on-disk data or metadata blocks, | |
306 | create a VFS inode to manage the pages/buffers holding them. */ | |
50299965 | 307 | if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { |
b3b94faa DT |
308 | gl->gl_aspace = gfs2_aspace_get(sdp); |
309 | if (!gl->gl_aspace) { | |
310 | error = -ENOMEM; | |
311 | goto fail; | |
312 | } | |
313 | } | |
314 | ||
315 | error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); | |
316 | if (error) | |
317 | goto fail_aspace; | |
318 | ||
37b2fa6a SW |
319 | write_lock(&gl_hash_locks[hash]); |
320 | tmp = search_bucket(hash, sdp, &name); | |
b3b94faa | 321 | if (tmp) { |
37b2fa6a | 322 | write_unlock(&gl_hash_locks[hash]); |
b3b94faa DT |
323 | glock_free(gl); |
324 | gl = tmp; | |
325 | } else { | |
37b2fa6a SW |
326 | list_add_tail(&gl->gl_list, &gl_hash_table[hash].hb_list); |
327 | write_unlock(&gl_hash_locks[hash]); | |
b3b94faa DT |
328 | } |
329 | ||
330 | *glp = gl; | |
331 | ||
332 | return 0; | |
333 | ||
ec45d9f5 | 334 | fail_aspace: |
b3b94faa DT |
335 | if (gl->gl_aspace) |
336 | gfs2_aspace_put(gl->gl_aspace); | |
ec45d9f5 | 337 | fail: |
b3b94faa | 338 | kmem_cache_free(gfs2_glock_cachep, gl); |
b3b94faa DT |
339 | return error; |
340 | } | |
341 | ||
342 | /** | |
343 | * gfs2_holder_init - initialize a struct gfs2_holder in the default way | |
344 | * @gl: the glock | |
345 | * @state: the state we're requesting | |
346 | * @flags: the modifier flags | |
347 | * @gh: the holder structure | |
348 | * | |
349 | */ | |
350 | ||
190562bd | 351 | void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, |
b3b94faa DT |
352 | struct gfs2_holder *gh) |
353 | { | |
354 | INIT_LIST_HEAD(&gh->gh_list); | |
355 | gh->gh_gl = gl; | |
d0dc80db | 356 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
190562bd | 357 | gh->gh_owner = current; |
b3b94faa DT |
358 | gh->gh_state = state; |
359 | gh->gh_flags = flags; | |
360 | gh->gh_error = 0; | |
361 | gh->gh_iflags = 0; | |
362 | init_completion(&gh->gh_wait); | |
363 | ||
364 | if (gh->gh_state == LM_ST_EXCLUSIVE) | |
365 | gh->gh_flags |= GL_LOCAL_EXCL; | |
366 | ||
367 | gfs2_glock_hold(gl); | |
368 | } | |
369 | ||
370 | /** | |
371 | * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it | |
372 | * @state: the state we're requesting | |
373 | * @flags: the modifier flags | |
374 | * @gh: the holder structure | |
375 | * | |
376 | * Don't mess with the glock. | |
377 | * | |
378 | */ | |
379 | ||
190562bd | 380 | void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) |
b3b94faa DT |
381 | { |
382 | gh->gh_state = state; | |
579b78a4 | 383 | gh->gh_flags = flags; |
b3b94faa DT |
384 | if (gh->gh_state == LM_ST_EXCLUSIVE) |
385 | gh->gh_flags |= GL_LOCAL_EXCL; | |
386 | ||
387 | gh->gh_iflags &= 1 << HIF_ALLOCED; | |
d0dc80db | 388 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
b3b94faa DT |
389 | } |
390 | ||
391 | /** | |
392 | * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) | |
393 | * @gh: the holder structure | |
394 | * | |
395 | */ | |
396 | ||
397 | void gfs2_holder_uninit(struct gfs2_holder *gh) | |
398 | { | |
399 | gfs2_glock_put(gh->gh_gl); | |
400 | gh->gh_gl = NULL; | |
d0dc80db | 401 | gh->gh_ip = 0; |
b3b94faa DT |
402 | } |
403 | ||
404 | /** | |
405 | * gfs2_holder_get - get a struct gfs2_holder structure | |
406 | * @gl: the glock | |
407 | * @state: the state we're requesting | |
408 | * @flags: the modifier flags | |
af18ddb8 | 409 | * @gfp_flags: |
b3b94faa DT |
410 | * |
411 | * Figure out how big an impact this function has. Either: | |
412 | * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd | |
413 | * 2) Leave it like it is | |
414 | * | |
415 | * Returns: the holder structure, NULL on ENOMEM | |
416 | */ | |
417 | ||
08bc2dbc AB |
418 | static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, |
419 | unsigned int state, | |
420 | int flags, gfp_t gfp_flags) | |
b3b94faa DT |
421 | { |
422 | struct gfs2_holder *gh; | |
423 | ||
424 | gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags); | |
425 | if (!gh) | |
426 | return NULL; | |
427 | ||
428 | gfs2_holder_init(gl, state, flags, gh); | |
429 | set_bit(HIF_ALLOCED, &gh->gh_iflags); | |
d0dc80db | 430 | gh->gh_ip = (unsigned long)__builtin_return_address(0); |
b3b94faa DT |
431 | return gh; |
432 | } | |
433 | ||
434 | /** | |
435 | * gfs2_holder_put - get rid of a struct gfs2_holder structure | |
436 | * @gh: the holder structure | |
437 | * | |
438 | */ | |
439 | ||
08bc2dbc | 440 | static void gfs2_holder_put(struct gfs2_holder *gh) |
b3b94faa DT |
441 | { |
442 | gfs2_holder_uninit(gh); | |
443 | kfree(gh); | |
444 | } | |
445 | ||
b3b94faa DT |
446 | /** |
447 | * rq_mutex - process a mutex request in the queue | |
448 | * @gh: the glock holder | |
449 | * | |
450 | * Returns: 1 if the queue is blocked | |
451 | */ | |
452 | ||
453 | static int rq_mutex(struct gfs2_holder *gh) | |
454 | { | |
455 | struct gfs2_glock *gl = gh->gh_gl; | |
456 | ||
457 | list_del_init(&gh->gh_list); | |
458 | /* gh->gh_error never examined. */ | |
459 | set_bit(GLF_LOCK, &gl->gl_flags); | |
460 | complete(&gh->gh_wait); | |
461 | ||
462 | return 1; | |
463 | } | |
464 | ||
465 | /** | |
466 | * rq_promote - process a promote request in the queue | |
467 | * @gh: the glock holder | |
468 | * | |
469 | * Acquire a new inter-node lock, or change a lock state to more restrictive. | |
470 | * | |
471 | * Returns: 1 if the queue is blocked | |
472 | */ | |
473 | ||
474 | static int rq_promote(struct gfs2_holder *gh) | |
475 | { | |
476 | struct gfs2_glock *gl = gh->gh_gl; | |
477 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 478 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
479 | |
480 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | |
481 | if (list_empty(&gl->gl_holders)) { | |
482 | gl->gl_req_gh = gh; | |
483 | set_bit(GLF_LOCK, &gl->gl_flags); | |
484 | spin_unlock(&gl->gl_spin); | |
485 | ||
486 | if (atomic_read(&sdp->sd_reclaim_count) > | |
487 | gfs2_tune_get(sdp, gt_reclaim_limit) && | |
488 | !(gh->gh_flags & LM_FLAG_PRIORITY)) { | |
489 | gfs2_reclaim_glock(sdp); | |
490 | gfs2_reclaim_glock(sdp); | |
491 | } | |
492 | ||
ec45d9f5 | 493 | glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); |
b3b94faa DT |
494 | spin_lock(&gl->gl_spin); |
495 | } | |
496 | return 1; | |
497 | } | |
498 | ||
499 | if (list_empty(&gl->gl_holders)) { | |
500 | set_bit(HIF_FIRST, &gh->gh_iflags); | |
501 | set_bit(GLF_LOCK, &gl->gl_flags); | |
b3b94faa DT |
502 | } else { |
503 | struct gfs2_holder *next_gh; | |
504 | if (gh->gh_flags & GL_LOCAL_EXCL) | |
505 | return 1; | |
506 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, | |
507 | gh_list); | |
508 | if (next_gh->gh_flags & GL_LOCAL_EXCL) | |
509 | return 1; | |
b3b94faa DT |
510 | } |
511 | ||
512 | list_move_tail(&gh->gh_list, &gl->gl_holders); | |
513 | gh->gh_error = 0; | |
514 | set_bit(HIF_HOLDER, &gh->gh_iflags); | |
515 | ||
b3b94faa DT |
516 | complete(&gh->gh_wait); |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | /** | |
522 | * rq_demote - process a demote request in the queue | |
523 | * @gh: the glock holder | |
524 | * | |
525 | * Returns: 1 if the queue is blocked | |
526 | */ | |
527 | ||
528 | static int rq_demote(struct gfs2_holder *gh) | |
529 | { | |
530 | struct gfs2_glock *gl = gh->gh_gl; | |
8fb4b536 | 531 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
532 | |
533 | if (!list_empty(&gl->gl_holders)) | |
534 | return 1; | |
535 | ||
536 | if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) { | |
537 | list_del_init(&gh->gh_list); | |
538 | gh->gh_error = 0; | |
539 | spin_unlock(&gl->gl_spin); | |
540 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | |
541 | gfs2_holder_put(gh); | |
542 | else | |
543 | complete(&gh->gh_wait); | |
544 | spin_lock(&gl->gl_spin); | |
545 | } else { | |
546 | gl->gl_req_gh = gh; | |
547 | set_bit(GLF_LOCK, &gl->gl_flags); | |
548 | spin_unlock(&gl->gl_spin); | |
549 | ||
550 | if (gh->gh_state == LM_ST_UNLOCKED || | |
551 | gl->gl_state != LM_ST_EXCLUSIVE) | |
552 | glops->go_drop_th(gl); | |
553 | else | |
554 | glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); | |
555 | ||
556 | spin_lock(&gl->gl_spin); | |
557 | } | |
558 | ||
559 | return 0; | |
560 | } | |
561 | ||
562 | /** | |
563 | * rq_greedy - process a queued request to drop greedy status | |
564 | * @gh: the glock holder | |
565 | * | |
566 | * Returns: 1 if the queue is blocked | |
567 | */ | |
568 | ||
569 | static int rq_greedy(struct gfs2_holder *gh) | |
570 | { | |
571 | struct gfs2_glock *gl = gh->gh_gl; | |
572 | ||
573 | list_del_init(&gh->gh_list); | |
574 | /* gh->gh_error never examined. */ | |
575 | clear_bit(GLF_GREEDY, &gl->gl_flags); | |
576 | spin_unlock(&gl->gl_spin); | |
577 | ||
578 | gfs2_holder_uninit(gh); | |
579 | kfree(container_of(gh, struct greedy, gr_gh)); | |
580 | ||
581 | spin_lock(&gl->gl_spin); | |
582 | ||
583 | return 0; | |
584 | } | |
585 | ||
586 | /** | |
587 | * run_queue - process holder structures on a glock | |
588 | * @gl: the glock | |
589 | * | |
590 | */ | |
b3b94faa DT |
591 | static void run_queue(struct gfs2_glock *gl) |
592 | { | |
593 | struct gfs2_holder *gh; | |
594 | int blocked = 1; | |
595 | ||
596 | for (;;) { | |
597 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | |
598 | break; | |
599 | ||
600 | if (!list_empty(&gl->gl_waiters1)) { | |
601 | gh = list_entry(gl->gl_waiters1.next, | |
602 | struct gfs2_holder, gh_list); | |
603 | ||
604 | if (test_bit(HIF_MUTEX, &gh->gh_iflags)) | |
605 | blocked = rq_mutex(gh); | |
606 | else | |
607 | gfs2_assert_warn(gl->gl_sbd, 0); | |
608 | ||
609 | } else if (!list_empty(&gl->gl_waiters2) && | |
610 | !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) { | |
611 | gh = list_entry(gl->gl_waiters2.next, | |
612 | struct gfs2_holder, gh_list); | |
613 | ||
614 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) | |
615 | blocked = rq_demote(gh); | |
616 | else if (test_bit(HIF_GREEDY, &gh->gh_iflags)) | |
617 | blocked = rq_greedy(gh); | |
618 | else | |
619 | gfs2_assert_warn(gl->gl_sbd, 0); | |
620 | ||
621 | } else if (!list_empty(&gl->gl_waiters3)) { | |
622 | gh = list_entry(gl->gl_waiters3.next, | |
623 | struct gfs2_holder, gh_list); | |
624 | ||
625 | if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) | |
626 | blocked = rq_promote(gh); | |
627 | else | |
628 | gfs2_assert_warn(gl->gl_sbd, 0); | |
629 | ||
630 | } else | |
631 | break; | |
632 | ||
633 | if (blocked) | |
634 | break; | |
635 | } | |
636 | } | |
637 | ||
638 | /** | |
639 | * gfs2_glmutex_lock - acquire a local lock on a glock | |
640 | * @gl: the glock | |
641 | * | |
642 | * Gives caller exclusive access to manipulate a glock structure. | |
643 | */ | |
644 | ||
feaa7bba | 645 | static void gfs2_glmutex_lock(struct gfs2_glock *gl) |
b3b94faa DT |
646 | { |
647 | struct gfs2_holder gh; | |
648 | ||
649 | gfs2_holder_init(gl, 0, 0, &gh); | |
650 | set_bit(HIF_MUTEX, &gh.gh_iflags); | |
651 | ||
652 | spin_lock(&gl->gl_spin); | |
85d1da67 | 653 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
b3b94faa | 654 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); |
85d1da67 | 655 | } else { |
320dd101 SW |
656 | gl->gl_owner = current; |
657 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | |
b3b94faa | 658 | complete(&gh.gh_wait); |
320dd101 | 659 | } |
b3b94faa DT |
660 | spin_unlock(&gl->gl_spin); |
661 | ||
662 | wait_for_completion(&gh.gh_wait); | |
663 | gfs2_holder_uninit(&gh); | |
664 | } | |
665 | ||
666 | /** | |
667 | * gfs2_glmutex_trylock - try to acquire a local lock on a glock | |
668 | * @gl: the glock | |
669 | * | |
670 | * Returns: 1 if the glock is acquired | |
671 | */ | |
672 | ||
08bc2dbc | 673 | static int gfs2_glmutex_trylock(struct gfs2_glock *gl) |
b3b94faa DT |
674 | { |
675 | int acquired = 1; | |
676 | ||
677 | spin_lock(&gl->gl_spin); | |
85d1da67 | 678 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
b3b94faa | 679 | acquired = 0; |
85d1da67 | 680 | } else { |
320dd101 SW |
681 | gl->gl_owner = current; |
682 | gl->gl_ip = (unsigned long)__builtin_return_address(0); | |
683 | } | |
b3b94faa DT |
684 | spin_unlock(&gl->gl_spin); |
685 | ||
686 | return acquired; | |
687 | } | |
688 | ||
689 | /** | |
690 | * gfs2_glmutex_unlock - release a local lock on a glock | |
691 | * @gl: the glock | |
692 | * | |
693 | */ | |
694 | ||
feaa7bba | 695 | static void gfs2_glmutex_unlock(struct gfs2_glock *gl) |
b3b94faa DT |
696 | { |
697 | spin_lock(&gl->gl_spin); | |
698 | clear_bit(GLF_LOCK, &gl->gl_flags); | |
320dd101 SW |
699 | gl->gl_owner = NULL; |
700 | gl->gl_ip = 0; | |
b3b94faa | 701 | run_queue(gl); |
190562bd | 702 | BUG_ON(!spin_is_locked(&gl->gl_spin)); |
b3b94faa DT |
703 | spin_unlock(&gl->gl_spin); |
704 | } | |
705 | ||
706 | /** | |
707 | * handle_callback - add a demote request to a lock's queue | |
708 | * @gl: the glock | |
709 | * @state: the state the caller wants us to change to | |
710 | * | |
af18ddb8 | 711 | * Note: This may fail sliently if we are out of memory. |
b3b94faa DT |
712 | */ |
713 | ||
714 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) | |
715 | { | |
716 | struct gfs2_holder *gh, *new_gh = NULL; | |
717 | ||
feaa7bba | 718 | restart: |
b3b94faa DT |
719 | spin_lock(&gl->gl_spin); |
720 | ||
721 | list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { | |
722 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags) && | |
723 | gl->gl_req_gh != gh) { | |
724 | if (gh->gh_state != state) | |
725 | gh->gh_state = LM_ST_UNLOCKED; | |
726 | goto out; | |
727 | } | |
728 | } | |
729 | ||
730 | if (new_gh) { | |
731 | list_add_tail(&new_gh->gh_list, &gl->gl_waiters2); | |
732 | new_gh = NULL; | |
733 | } else { | |
734 | spin_unlock(&gl->gl_spin); | |
735 | ||
af18ddb8 SW |
736 | new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL); |
737 | if (!new_gh) | |
738 | return; | |
b3b94faa DT |
739 | set_bit(HIF_DEMOTE, &new_gh->gh_iflags); |
740 | set_bit(HIF_DEALLOC, &new_gh->gh_iflags); | |
741 | ||
742 | goto restart; | |
743 | } | |
744 | ||
feaa7bba | 745 | out: |
b3b94faa DT |
746 | spin_unlock(&gl->gl_spin); |
747 | ||
748 | if (new_gh) | |
749 | gfs2_holder_put(new_gh); | |
750 | } | |
751 | ||
feaa7bba SW |
752 | void gfs2_glock_inode_squish(struct inode *inode) |
753 | { | |
754 | struct gfs2_holder gh; | |
755 | struct gfs2_glock *gl = GFS2_I(inode)->i_gl; | |
756 | gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh); | |
757 | set_bit(HIF_DEMOTE, &gh.gh_iflags); | |
758 | spin_lock(&gl->gl_spin); | |
759 | gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders)); | |
760 | list_add_tail(&gh.gh_list, &gl->gl_waiters2); | |
761 | run_queue(gl); | |
762 | spin_unlock(&gl->gl_spin); | |
5dd9feaf | 763 | wait_for_completion(&gh.gh_wait); |
feaa7bba SW |
764 | gfs2_holder_uninit(&gh); |
765 | } | |
766 | ||
b3b94faa DT |
767 | /** |
768 | * state_change - record that the glock is now in a different state | |
769 | * @gl: the glock | |
770 | * @new_state the new state | |
771 | * | |
772 | */ | |
773 | ||
774 | static void state_change(struct gfs2_glock *gl, unsigned int new_state) | |
775 | { | |
b3b94faa DT |
776 | int held1, held2; |
777 | ||
778 | held1 = (gl->gl_state != LM_ST_UNLOCKED); | |
779 | held2 = (new_state != LM_ST_UNLOCKED); | |
780 | ||
781 | if (held1 != held2) { | |
6a6b3d01 | 782 | if (held2) |
b3b94faa | 783 | gfs2_glock_hold(gl); |
6a6b3d01 | 784 | else |
b3b94faa | 785 | gfs2_glock_put(gl); |
b3b94faa DT |
786 | } |
787 | ||
788 | gl->gl_state = new_state; | |
789 | } | |
790 | ||
791 | /** | |
792 | * xmote_bh - Called after the lock module is done acquiring a lock | |
793 | * @gl: The glock in question | |
794 | * @ret: the int returned from the lock module | |
795 | * | |
796 | */ | |
797 | ||
798 | static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | |
799 | { | |
800 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 801 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
802 | struct gfs2_holder *gh = gl->gl_req_gh; |
803 | int prev_state = gl->gl_state; | |
804 | int op_done = 1; | |
805 | ||
806 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | |
807 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | |
808 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); | |
809 | ||
810 | state_change(gl, ret & LM_OUT_ST_MASK); | |
811 | ||
812 | if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { | |
813 | if (glops->go_inval) | |
814 | glops->go_inval(gl, DIO_METADATA | DIO_DATA); | |
815 | } else if (gl->gl_state == LM_ST_DEFERRED) { | |
816 | /* We might not want to do this here. | |
817 | Look at moving to the inode glops. */ | |
818 | if (glops->go_inval) | |
819 | glops->go_inval(gl, DIO_DATA); | |
820 | } | |
821 | ||
822 | /* Deal with each possible exit condition */ | |
823 | ||
824 | if (!gh) | |
825 | gl->gl_stamp = jiffies; | |
b3b94faa DT |
826 | else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { |
827 | spin_lock(&gl->gl_spin); | |
828 | list_del_init(&gh->gh_list); | |
829 | gh->gh_error = -EIO; | |
b3b94faa | 830 | spin_unlock(&gl->gl_spin); |
b3b94faa DT |
831 | } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { |
832 | spin_lock(&gl->gl_spin); | |
833 | list_del_init(&gh->gh_list); | |
834 | if (gl->gl_state == gh->gh_state || | |
85d1da67 | 835 | gl->gl_state == LM_ST_UNLOCKED) { |
b3b94faa | 836 | gh->gh_error = 0; |
85d1da67 | 837 | } else { |
b3b94faa DT |
838 | if (gfs2_assert_warn(sdp, gh->gh_flags & |
839 | (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) | |
840 | fs_warn(sdp, "ret = 0x%.8X\n", ret); | |
841 | gh->gh_error = GLR_TRYFAILED; | |
842 | } | |
843 | spin_unlock(&gl->gl_spin); | |
844 | ||
845 | if (ret & LM_OUT_CANCELED) | |
50299965 | 846 | handle_callback(gl, LM_ST_UNLOCKED); |
b3b94faa DT |
847 | |
848 | } else if (ret & LM_OUT_CANCELED) { | |
849 | spin_lock(&gl->gl_spin); | |
850 | list_del_init(&gh->gh_list); | |
851 | gh->gh_error = GLR_CANCELED; | |
b3b94faa DT |
852 | spin_unlock(&gl->gl_spin); |
853 | ||
854 | } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | |
855 | spin_lock(&gl->gl_spin); | |
856 | list_move_tail(&gh->gh_list, &gl->gl_holders); | |
857 | gh->gh_error = 0; | |
858 | set_bit(HIF_HOLDER, &gh->gh_iflags); | |
859 | spin_unlock(&gl->gl_spin); | |
860 | ||
861 | set_bit(HIF_FIRST, &gh->gh_iflags); | |
862 | ||
863 | op_done = 0; | |
864 | ||
865 | } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | |
866 | spin_lock(&gl->gl_spin); | |
867 | list_del_init(&gh->gh_list); | |
868 | gh->gh_error = GLR_TRYFAILED; | |
b3b94faa DT |
869 | spin_unlock(&gl->gl_spin); |
870 | ||
871 | } else { | |
872 | if (gfs2_assert_withdraw(sdp, 0) == -1) | |
873 | fs_err(sdp, "ret = 0x%.8X\n", ret); | |
874 | } | |
875 | ||
876 | if (glops->go_xmote_bh) | |
877 | glops->go_xmote_bh(gl); | |
878 | ||
879 | if (op_done) { | |
880 | spin_lock(&gl->gl_spin); | |
881 | gl->gl_req_gh = NULL; | |
882 | gl->gl_req_bh = NULL; | |
883 | clear_bit(GLF_LOCK, &gl->gl_flags); | |
884 | run_queue(gl); | |
885 | spin_unlock(&gl->gl_spin); | |
886 | } | |
887 | ||
888 | gfs2_glock_put(gl); | |
889 | ||
890 | if (gh) { | |
891 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | |
892 | gfs2_holder_put(gh); | |
893 | else | |
894 | complete(&gh->gh_wait); | |
895 | } | |
896 | } | |
897 | ||
898 | /** | |
899 | * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock | |
900 | * @gl: The glock in question | |
901 | * @state: the requested state | |
902 | * @flags: modifier flags to the lock call | |
903 | * | |
904 | */ | |
905 | ||
906 | void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) | |
907 | { | |
908 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 909 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
910 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | |
911 | LM_FLAG_NOEXP | LM_FLAG_ANY | | |
912 | LM_FLAG_PRIORITY); | |
913 | unsigned int lck_ret; | |
914 | ||
915 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | |
916 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | |
917 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); | |
918 | gfs2_assert_warn(sdp, state != gl->gl_state); | |
919 | ||
50299965 SW |
920 | if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) |
921 | glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); | |
b3b94faa DT |
922 | |
923 | gfs2_glock_hold(gl); | |
924 | gl->gl_req_bh = xmote_bh; | |
925 | ||
ec45d9f5 | 926 | lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); |
b3b94faa DT |
927 | |
928 | if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) | |
929 | return; | |
930 | ||
931 | if (lck_ret & LM_OUT_ASYNC) | |
932 | gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); | |
933 | else | |
934 | xmote_bh(gl, lck_ret); | |
935 | } | |
936 | ||
937 | /** | |
938 | * drop_bh - Called after a lock module unlock completes | |
939 | * @gl: the glock | |
940 | * @ret: the return status | |
941 | * | |
942 | * Doesn't wake up the process waiting on the struct gfs2_holder (if any) | |
943 | * Doesn't drop the reference on the glock the top half took out | |
944 | * | |
945 | */ | |
946 | ||
947 | static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | |
948 | { | |
949 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 950 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
951 | struct gfs2_holder *gh = gl->gl_req_gh; |
952 | ||
953 | clear_bit(GLF_PREFETCH, &gl->gl_flags); | |
954 | ||
955 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | |
956 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | |
957 | gfs2_assert_warn(sdp, !ret); | |
958 | ||
959 | state_change(gl, LM_ST_UNLOCKED); | |
960 | ||
961 | if (glops->go_inval) | |
962 | glops->go_inval(gl, DIO_METADATA | DIO_DATA); | |
963 | ||
964 | if (gh) { | |
965 | spin_lock(&gl->gl_spin); | |
966 | list_del_init(&gh->gh_list); | |
967 | gh->gh_error = 0; | |
968 | spin_unlock(&gl->gl_spin); | |
969 | } | |
970 | ||
971 | if (glops->go_drop_bh) | |
972 | glops->go_drop_bh(gl); | |
973 | ||
974 | spin_lock(&gl->gl_spin); | |
975 | gl->gl_req_gh = NULL; | |
976 | gl->gl_req_bh = NULL; | |
977 | clear_bit(GLF_LOCK, &gl->gl_flags); | |
978 | run_queue(gl); | |
979 | spin_unlock(&gl->gl_spin); | |
980 | ||
981 | gfs2_glock_put(gl); | |
982 | ||
983 | if (gh) { | |
984 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | |
985 | gfs2_holder_put(gh); | |
986 | else | |
987 | complete(&gh->gh_wait); | |
988 | } | |
989 | } | |
990 | ||
991 | /** | |
992 | * gfs2_glock_drop_th - call into the lock module to unlock a lock | |
993 | * @gl: the glock | |
994 | * | |
995 | */ | |
996 | ||
997 | void gfs2_glock_drop_th(struct gfs2_glock *gl) | |
998 | { | |
999 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 1000 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
1001 | unsigned int ret; |
1002 | ||
1003 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | |
1004 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | |
1005 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); | |
1006 | ||
50299965 SW |
1007 | if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) |
1008 | glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); | |
b3b94faa DT |
1009 | |
1010 | gfs2_glock_hold(gl); | |
1011 | gl->gl_req_bh = drop_bh; | |
1012 | ||
b3b94faa DT |
1013 | ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); |
1014 | ||
1015 | if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) | |
1016 | return; | |
1017 | ||
1018 | if (!ret) | |
1019 | drop_bh(gl, ret); | |
1020 | else | |
1021 | gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); | |
1022 | } | |
1023 | ||
1024 | /** | |
1025 | * do_cancels - cancel requests for locks stuck waiting on an expire flag | |
1026 | * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock | |
1027 | * | |
1028 | * Don't cancel GL_NOCANCEL requests. | |
1029 | */ | |
1030 | ||
1031 | static void do_cancels(struct gfs2_holder *gh) | |
1032 | { | |
1033 | struct gfs2_glock *gl = gh->gh_gl; | |
1034 | ||
1035 | spin_lock(&gl->gl_spin); | |
1036 | ||
1037 | while (gl->gl_req_gh != gh && | |
1038 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | |
1039 | !list_empty(&gh->gh_list)) { | |
50299965 SW |
1040 | if (gl->gl_req_bh && !(gl->gl_req_gh && |
1041 | (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { | |
b3b94faa DT |
1042 | spin_unlock(&gl->gl_spin); |
1043 | gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); | |
1044 | msleep(100); | |
1045 | spin_lock(&gl->gl_spin); | |
1046 | } else { | |
1047 | spin_unlock(&gl->gl_spin); | |
1048 | msleep(100); | |
1049 | spin_lock(&gl->gl_spin); | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | spin_unlock(&gl->gl_spin); | |
1054 | } | |
1055 | ||
1056 | /** | |
1057 | * glock_wait_internal - wait on a glock acquisition | |
1058 | * @gh: the glock holder | |
1059 | * | |
1060 | * Returns: 0 on success | |
1061 | */ | |
1062 | ||
1063 | static int glock_wait_internal(struct gfs2_holder *gh) | |
1064 | { | |
1065 | struct gfs2_glock *gl = gh->gh_gl; | |
1066 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 1067 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
1068 | |
1069 | if (test_bit(HIF_ABORTED, &gh->gh_iflags)) | |
1070 | return -EIO; | |
1071 | ||
1072 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | |
1073 | spin_lock(&gl->gl_spin); | |
1074 | if (gl->gl_req_gh != gh && | |
1075 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | |
1076 | !list_empty(&gh->gh_list)) { | |
1077 | list_del_init(&gh->gh_list); | |
1078 | gh->gh_error = GLR_TRYFAILED; | |
b3b94faa DT |
1079 | run_queue(gl); |
1080 | spin_unlock(&gl->gl_spin); | |
1081 | return gh->gh_error; | |
1082 | } | |
1083 | spin_unlock(&gl->gl_spin); | |
1084 | } | |
1085 | ||
1086 | if (gh->gh_flags & LM_FLAG_PRIORITY) | |
1087 | do_cancels(gh); | |
1088 | ||
1089 | wait_for_completion(&gh->gh_wait); | |
1090 | ||
1091 | if (gh->gh_error) | |
1092 | return gh->gh_error; | |
1093 | ||
1094 | gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); | |
85d1da67 | 1095 | gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, |
b3b94faa DT |
1096 | gh->gh_flags)); |
1097 | ||
1098 | if (test_bit(HIF_FIRST, &gh->gh_iflags)) { | |
1099 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | |
1100 | ||
1101 | if (glops->go_lock) { | |
1102 | gh->gh_error = glops->go_lock(gh); | |
1103 | if (gh->gh_error) { | |
1104 | spin_lock(&gl->gl_spin); | |
1105 | list_del_init(&gh->gh_list); | |
b3b94faa DT |
1106 | spin_unlock(&gl->gl_spin); |
1107 | } | |
1108 | } | |
1109 | ||
1110 | spin_lock(&gl->gl_spin); | |
1111 | gl->gl_req_gh = NULL; | |
1112 | gl->gl_req_bh = NULL; | |
1113 | clear_bit(GLF_LOCK, &gl->gl_flags); | |
b3b94faa DT |
1114 | run_queue(gl); |
1115 | spin_unlock(&gl->gl_spin); | |
1116 | } | |
1117 | ||
1118 | return gh->gh_error; | |
1119 | } | |
1120 | ||
1121 | static inline struct gfs2_holder * | |
1122 | find_holder_by_owner(struct list_head *head, struct task_struct *owner) | |
1123 | { | |
1124 | struct gfs2_holder *gh; | |
1125 | ||
1126 | list_for_each_entry(gh, head, gh_list) { | |
1127 | if (gh->gh_owner == owner) | |
1128 | return gh; | |
1129 | } | |
1130 | ||
1131 | return NULL; | |
1132 | } | |
1133 | ||
b3b94faa DT |
1134 | /** |
1135 | * add_to_queue - Add a holder to the wait queue (but look for recursion) | |
1136 | * @gh: the holder structure to add | |
1137 | * | |
1138 | */ | |
1139 | ||
1140 | static void add_to_queue(struct gfs2_holder *gh) | |
1141 | { | |
1142 | struct gfs2_glock *gl = gh->gh_gl; | |
1143 | struct gfs2_holder *existing; | |
1144 | ||
190562bd SW |
1145 | BUG_ON(!gh->gh_owner); |
1146 | ||
b3b94faa DT |
1147 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); |
1148 | if (existing) { | |
5965b1f4 | 1149 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
86384605 AD |
1150 | printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid); |
1151 | printk(KERN_INFO "lock type : %d lock state : %d\n", | |
1152 | existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state); | |
5965b1f4 | 1153 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); |
86384605 AD |
1154 | printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid); |
1155 | printk(KERN_INFO "lock type : %d lock state : %d\n", | |
1156 | gl->gl_name.ln_type, gl->gl_state); | |
5965b1f4 | 1157 | BUG(); |
b3b94faa DT |
1158 | } |
1159 | ||
1160 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); | |
1161 | if (existing) { | |
5965b1f4 SW |
1162 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); |
1163 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | |
1164 | BUG(); | |
b3b94faa DT |
1165 | } |
1166 | ||
b3b94faa DT |
1167 | if (gh->gh_flags & LM_FLAG_PRIORITY) |
1168 | list_add(&gh->gh_list, &gl->gl_waiters3); | |
1169 | else | |
1170 | list_add_tail(&gh->gh_list, &gl->gl_waiters3); | |
1171 | } | |
1172 | ||
1173 | /** | |
1174 | * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) | |
1175 | * @gh: the holder structure | |
1176 | * | |
1177 | * if (gh->gh_flags & GL_ASYNC), this never returns an error | |
1178 | * | |
1179 | * Returns: 0, GLR_TRYFAILED, or errno on failure | |
1180 | */ | |
1181 | ||
1182 | int gfs2_glock_nq(struct gfs2_holder *gh) | |
1183 | { | |
1184 | struct gfs2_glock *gl = gh->gh_gl; | |
1185 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
1186 | int error = 0; | |
1187 | ||
320dd101 | 1188 | restart: |
b3b94faa DT |
1189 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { |
1190 | set_bit(HIF_ABORTED, &gh->gh_iflags); | |
1191 | return -EIO; | |
1192 | } | |
1193 | ||
1194 | set_bit(HIF_PROMOTE, &gh->gh_iflags); | |
1195 | ||
1196 | spin_lock(&gl->gl_spin); | |
1197 | add_to_queue(gh); | |
1198 | run_queue(gl); | |
1199 | spin_unlock(&gl->gl_spin); | |
1200 | ||
1201 | if (!(gh->gh_flags & GL_ASYNC)) { | |
1202 | error = glock_wait_internal(gh); | |
1203 | if (error == GLR_CANCELED) { | |
190562bd | 1204 | msleep(100); |
b3b94faa DT |
1205 | goto restart; |
1206 | } | |
1207 | } | |
1208 | ||
1209 | clear_bit(GLF_PREFETCH, &gl->gl_flags); | |
1210 | ||
320dd101 SW |
1211 | if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP)) |
1212 | dump_glock(gl); | |
1213 | ||
b3b94faa DT |
1214 | return error; |
1215 | } | |
1216 | ||
1217 | /** | |
1218 | * gfs2_glock_poll - poll to see if an async request has been completed | |
1219 | * @gh: the holder | |
1220 | * | |
1221 | * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on | |
1222 | */ | |
1223 | ||
1224 | int gfs2_glock_poll(struct gfs2_holder *gh) | |
1225 | { | |
1226 | struct gfs2_glock *gl = gh->gh_gl; | |
1227 | int ready = 0; | |
1228 | ||
1229 | spin_lock(&gl->gl_spin); | |
1230 | ||
1231 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | |
1232 | ready = 1; | |
1233 | else if (list_empty(&gh->gh_list)) { | |
1234 | if (gh->gh_error == GLR_CANCELED) { | |
1235 | spin_unlock(&gl->gl_spin); | |
190562bd | 1236 | msleep(100); |
b3b94faa DT |
1237 | if (gfs2_glock_nq(gh)) |
1238 | return 1; | |
1239 | return 0; | |
1240 | } else | |
1241 | ready = 1; | |
1242 | } | |
1243 | ||
1244 | spin_unlock(&gl->gl_spin); | |
1245 | ||
1246 | return ready; | |
1247 | } | |
1248 | ||
1249 | /** | |
1250 | * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC | |
1251 | * @gh: the holder structure | |
1252 | * | |
1253 | * Returns: 0, GLR_TRYFAILED, or errno on failure | |
1254 | */ | |
1255 | ||
1256 | int gfs2_glock_wait(struct gfs2_holder *gh) | |
1257 | { | |
1258 | int error; | |
1259 | ||
1260 | error = glock_wait_internal(gh); | |
1261 | if (error == GLR_CANCELED) { | |
190562bd | 1262 | msleep(100); |
b3b94faa DT |
1263 | gh->gh_flags &= ~GL_ASYNC; |
1264 | error = gfs2_glock_nq(gh); | |
1265 | } | |
1266 | ||
1267 | return error; | |
1268 | } | |
1269 | ||
1270 | /** | |
1271 | * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) | |
1272 | * @gh: the glock holder | |
1273 | * | |
1274 | */ | |
1275 | ||
1276 | void gfs2_glock_dq(struct gfs2_holder *gh) | |
1277 | { | |
1278 | struct gfs2_glock *gl = gh->gh_gl; | |
8fb4b536 | 1279 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa | 1280 | |
b3b94faa DT |
1281 | if (gh->gh_flags & GL_NOCACHE) |
1282 | handle_callback(gl, LM_ST_UNLOCKED); | |
1283 | ||
1284 | gfs2_glmutex_lock(gl); | |
1285 | ||
1286 | spin_lock(&gl->gl_spin); | |
1287 | list_del_init(&gh->gh_list); | |
1288 | ||
1289 | if (list_empty(&gl->gl_holders)) { | |
1290 | spin_unlock(&gl->gl_spin); | |
1291 | ||
1292 | if (glops->go_unlock) | |
1293 | glops->go_unlock(gh); | |
1294 | ||
b3b94faa DT |
1295 | gl->gl_stamp = jiffies; |
1296 | ||
1297 | spin_lock(&gl->gl_spin); | |
1298 | } | |
1299 | ||
1300 | clear_bit(GLF_LOCK, &gl->gl_flags); | |
1301 | run_queue(gl); | |
1302 | spin_unlock(&gl->gl_spin); | |
1303 | } | |
1304 | ||
1305 | /** | |
1306 | * gfs2_glock_prefetch - Try to prefetch a glock | |
1307 | * @gl: the glock | |
1308 | * @state: the state to prefetch in | |
1309 | * @flags: flags passed to go_xmote_th() | |
1310 | * | |
1311 | */ | |
1312 | ||
08bc2dbc AB |
1313 | static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, |
1314 | int flags) | |
b3b94faa | 1315 | { |
8fb4b536 | 1316 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
1317 | |
1318 | spin_lock(&gl->gl_spin); | |
1319 | ||
50299965 SW |
1320 | if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) || |
1321 | !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) || | |
b3b94faa DT |
1322 | !list_empty(&gl->gl_waiters3) || |
1323 | relaxed_state_ok(gl->gl_state, state, flags)) { | |
1324 | spin_unlock(&gl->gl_spin); | |
1325 | return; | |
1326 | } | |
1327 | ||
1328 | set_bit(GLF_PREFETCH, &gl->gl_flags); | |
1329 | set_bit(GLF_LOCK, &gl->gl_flags); | |
1330 | spin_unlock(&gl->gl_spin); | |
1331 | ||
1332 | glops->go_xmote_th(gl, state, flags); | |
b3b94faa DT |
1333 | } |
1334 | ||
b3b94faa DT |
1335 | static void greedy_work(void *data) |
1336 | { | |
e7f5c01c | 1337 | struct greedy *gr = data; |
b3b94faa DT |
1338 | struct gfs2_holder *gh = &gr->gr_gh; |
1339 | struct gfs2_glock *gl = gh->gh_gl; | |
8fb4b536 | 1340 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
1341 | |
1342 | clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | |
1343 | ||
1344 | if (glops->go_greedy) | |
1345 | glops->go_greedy(gl); | |
1346 | ||
1347 | spin_lock(&gl->gl_spin); | |
1348 | ||
1349 | if (list_empty(&gl->gl_waiters2)) { | |
1350 | clear_bit(GLF_GREEDY, &gl->gl_flags); | |
1351 | spin_unlock(&gl->gl_spin); | |
1352 | gfs2_holder_uninit(gh); | |
1353 | kfree(gr); | |
1354 | } else { | |
1355 | gfs2_glock_hold(gl); | |
1356 | list_add_tail(&gh->gh_list, &gl->gl_waiters2); | |
1357 | run_queue(gl); | |
1358 | spin_unlock(&gl->gl_spin); | |
1359 | gfs2_glock_put(gl); | |
1360 | } | |
1361 | } | |
1362 | ||
1363 | /** | |
1364 | * gfs2_glock_be_greedy - | |
1365 | * @gl: | |
1366 | * @time: | |
1367 | * | |
1368 | * Returns: 0 if go_greedy will be called, 1 otherwise | |
1369 | */ | |
1370 | ||
1371 | int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) | |
1372 | { | |
1373 | struct greedy *gr; | |
1374 | struct gfs2_holder *gh; | |
1375 | ||
feaa7bba | 1376 | if (!time || gl->gl_sbd->sd_args.ar_localcaching || |
b3b94faa DT |
1377 | test_and_set_bit(GLF_GREEDY, &gl->gl_flags)) |
1378 | return 1; | |
1379 | ||
1380 | gr = kmalloc(sizeof(struct greedy), GFP_KERNEL); | |
1381 | if (!gr) { | |
1382 | clear_bit(GLF_GREEDY, &gl->gl_flags); | |
1383 | return 1; | |
1384 | } | |
1385 | gh = &gr->gr_gh; | |
1386 | ||
579b78a4 | 1387 | gfs2_holder_init(gl, 0, 0, gh); |
b3b94faa DT |
1388 | set_bit(HIF_GREEDY, &gh->gh_iflags); |
1389 | INIT_WORK(&gr->gr_work, greedy_work, gr); | |
1390 | ||
1391 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | |
1392 | schedule_delayed_work(&gr->gr_work, time); | |
1393 | ||
1394 | return 0; | |
1395 | } | |
1396 | ||
b3b94faa DT |
1397 | /** |
1398 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it | |
1399 | * @gh: the holder structure | |
1400 | * | |
1401 | */ | |
1402 | ||
1403 | void gfs2_glock_dq_uninit(struct gfs2_holder *gh) | |
1404 | { | |
1405 | gfs2_glock_dq(gh); | |
1406 | gfs2_holder_uninit(gh); | |
1407 | } | |
1408 | ||
1409 | /** | |
1410 | * gfs2_glock_nq_num - acquire a glock based on lock number | |
1411 | * @sdp: the filesystem | |
1412 | * @number: the lock number | |
1413 | * @glops: the glock operations for the type of glock | |
1414 | * @state: the state to acquire the glock in | |
1415 | * @flags: modifier flags for the aquisition | |
1416 | * @gh: the struct gfs2_holder | |
1417 | * | |
1418 | * Returns: errno | |
1419 | */ | |
1420 | ||
cd915493 | 1421 | int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, |
8fb4b536 SW |
1422 | const struct gfs2_glock_operations *glops, |
1423 | unsigned int state, int flags, struct gfs2_holder *gh) | |
b3b94faa DT |
1424 | { |
1425 | struct gfs2_glock *gl; | |
1426 | int error; | |
1427 | ||
1428 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); | |
1429 | if (!error) { | |
1430 | error = gfs2_glock_nq_init(gl, state, flags, gh); | |
1431 | gfs2_glock_put(gl); | |
1432 | } | |
1433 | ||
1434 | return error; | |
1435 | } | |
1436 | ||
1437 | /** | |
1438 | * glock_compare - Compare two struct gfs2_glock structures for sorting | |
1439 | * @arg_a: the first structure | |
1440 | * @arg_b: the second structure | |
1441 | * | |
1442 | */ | |
1443 | ||
1444 | static int glock_compare(const void *arg_a, const void *arg_b) | |
1445 | { | |
1446 | struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a; | |
1447 | struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b; | |
1448 | struct lm_lockname *a = &gh_a->gh_gl->gl_name; | |
1449 | struct lm_lockname *b = &gh_b->gh_gl->gl_name; | |
1450 | int ret = 0; | |
1451 | ||
1452 | if (a->ln_number > b->ln_number) | |
1453 | ret = 1; | |
1454 | else if (a->ln_number < b->ln_number) | |
1455 | ret = -1; | |
1456 | else { | |
1457 | if (gh_a->gh_state == LM_ST_SHARED && | |
1458 | gh_b->gh_state == LM_ST_EXCLUSIVE) | |
1459 | ret = 1; | |
1460 | else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && | |
1461 | (gh_b->gh_flags & GL_LOCAL_EXCL)) | |
1462 | ret = 1; | |
1463 | } | |
1464 | ||
1465 | return ret; | |
1466 | } | |
1467 | ||
1468 | /** | |
1469 | * nq_m_sync - synchonously acquire more than one glock in deadlock free order | |
1470 | * @num_gh: the number of structures | |
1471 | * @ghs: an array of struct gfs2_holder structures | |
1472 | * | |
1473 | * Returns: 0 on success (all glocks acquired), | |
1474 | * errno on failure (no glocks acquired) | |
1475 | */ | |
1476 | ||
1477 | static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, | |
1478 | struct gfs2_holder **p) | |
1479 | { | |
1480 | unsigned int x; | |
1481 | int error = 0; | |
1482 | ||
1483 | for (x = 0; x < num_gh; x++) | |
1484 | p[x] = &ghs[x]; | |
1485 | ||
1486 | sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); | |
1487 | ||
1488 | for (x = 0; x < num_gh; x++) { | |
1489 | p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | |
1490 | ||
1491 | error = gfs2_glock_nq(p[x]); | |
1492 | if (error) { | |
1493 | while (x--) | |
1494 | gfs2_glock_dq(p[x]); | |
1495 | break; | |
1496 | } | |
1497 | } | |
1498 | ||
1499 | return error; | |
1500 | } | |
1501 | ||
1502 | /** | |
1503 | * gfs2_glock_nq_m - acquire multiple glocks | |
1504 | * @num_gh: the number of structures | |
1505 | * @ghs: an array of struct gfs2_holder structures | |
1506 | * | |
1507 | * Figure out how big an impact this function has. Either: | |
1508 | * 1) Replace this code with code that calls gfs2_glock_prefetch() | |
1509 | * 2) Forget async stuff and just call nq_m_sync() | |
1510 | * 3) Leave it like it is | |
1511 | * | |
1512 | * Returns: 0 on success (all glocks acquired), | |
1513 | * errno on failure (no glocks acquired) | |
1514 | */ | |
1515 | ||
1516 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) | |
1517 | { | |
1518 | int *e; | |
1519 | unsigned int x; | |
1520 | int borked = 0, serious = 0; | |
1521 | int error = 0; | |
1522 | ||
1523 | if (!num_gh) | |
1524 | return 0; | |
1525 | ||
1526 | if (num_gh == 1) { | |
1527 | ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | |
1528 | return gfs2_glock_nq(ghs); | |
1529 | } | |
1530 | ||
1531 | e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); | |
1532 | if (!e) | |
1533 | return -ENOMEM; | |
1534 | ||
1535 | for (x = 0; x < num_gh; x++) { | |
1536 | ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC; | |
1537 | error = gfs2_glock_nq(&ghs[x]); | |
1538 | if (error) { | |
1539 | borked = 1; | |
1540 | serious = error; | |
1541 | num_gh = x; | |
1542 | break; | |
1543 | } | |
1544 | } | |
1545 | ||
1546 | for (x = 0; x < num_gh; x++) { | |
1547 | error = e[x] = glock_wait_internal(&ghs[x]); | |
1548 | if (error) { | |
1549 | borked = 1; | |
1550 | if (error != GLR_TRYFAILED && error != GLR_CANCELED) | |
1551 | serious = error; | |
1552 | } | |
1553 | } | |
1554 | ||
1555 | if (!borked) { | |
1556 | kfree(e); | |
1557 | return 0; | |
1558 | } | |
1559 | ||
1560 | for (x = 0; x < num_gh; x++) | |
1561 | if (!e[x]) | |
1562 | gfs2_glock_dq(&ghs[x]); | |
1563 | ||
1564 | if (serious) | |
1565 | error = serious; | |
1566 | else { | |
1567 | for (x = 0; x < num_gh; x++) | |
1568 | gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags, | |
1569 | &ghs[x]); | |
1570 | error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e); | |
1571 | } | |
1572 | ||
1573 | kfree(e); | |
1574 | ||
1575 | return error; | |
1576 | } | |
1577 | ||
1578 | /** | |
1579 | * gfs2_glock_dq_m - release multiple glocks | |
1580 | * @num_gh: the number of structures | |
1581 | * @ghs: an array of struct gfs2_holder structures | |
1582 | * | |
1583 | */ | |
1584 | ||
1585 | void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) | |
1586 | { | |
1587 | unsigned int x; | |
1588 | ||
1589 | for (x = 0; x < num_gh; x++) | |
1590 | gfs2_glock_dq(&ghs[x]); | |
1591 | } | |
1592 | ||
1593 | /** | |
1594 | * gfs2_glock_dq_uninit_m - release multiple glocks | |
1595 | * @num_gh: the number of structures | |
1596 | * @ghs: an array of struct gfs2_holder structures | |
1597 | * | |
1598 | */ | |
1599 | ||
1600 | void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) | |
1601 | { | |
1602 | unsigned int x; | |
1603 | ||
1604 | for (x = 0; x < num_gh; x++) | |
1605 | gfs2_glock_dq_uninit(&ghs[x]); | |
1606 | } | |
1607 | ||
1608 | /** | |
1609 | * gfs2_glock_prefetch_num - prefetch a glock based on lock number | |
1610 | * @sdp: the filesystem | |
1611 | * @number: the lock number | |
1612 | * @glops: the glock operations for the type of glock | |
1613 | * @state: the state to acquire the glock in | |
1614 | * @flags: modifier flags for the aquisition | |
1615 | * | |
1616 | * Returns: errno | |
1617 | */ | |
1618 | ||
cd915493 | 1619 | void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number, |
8fb4b536 | 1620 | const struct gfs2_glock_operations *glops, |
b3b94faa DT |
1621 | unsigned int state, int flags) |
1622 | { | |
1623 | struct gfs2_glock *gl; | |
1624 | int error; | |
1625 | ||
1626 | if (atomic_read(&sdp->sd_reclaim_count) < | |
1627 | gfs2_tune_get(sdp, gt_reclaim_limit)) { | |
1628 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); | |
1629 | if (!error) { | |
1630 | gfs2_glock_prefetch(gl, state, flags); | |
1631 | gfs2_glock_put(gl); | |
1632 | } | |
1633 | } | |
1634 | } | |
1635 | ||
1636 | /** | |
1637 | * gfs2_lvb_hold - attach a LVB from a glock | |
1638 | * @gl: The glock in question | |
1639 | * | |
1640 | */ | |
1641 | ||
1642 | int gfs2_lvb_hold(struct gfs2_glock *gl) | |
1643 | { | |
1644 | int error; | |
1645 | ||
1646 | gfs2_glmutex_lock(gl); | |
1647 | ||
1648 | if (!atomic_read(&gl->gl_lvb_count)) { | |
1649 | error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); | |
1650 | if (error) { | |
1651 | gfs2_glmutex_unlock(gl); | |
1652 | return error; | |
1653 | } | |
1654 | gfs2_glock_hold(gl); | |
1655 | } | |
1656 | atomic_inc(&gl->gl_lvb_count); | |
1657 | ||
1658 | gfs2_glmutex_unlock(gl); | |
1659 | ||
1660 | return 0; | |
1661 | } | |
1662 | ||
1663 | /** | |
1664 | * gfs2_lvb_unhold - detach a LVB from a glock | |
1665 | * @gl: The glock in question | |
1666 | * | |
1667 | */ | |
1668 | ||
1669 | void gfs2_lvb_unhold(struct gfs2_glock *gl) | |
1670 | { | |
1671 | gfs2_glock_hold(gl); | |
1672 | gfs2_glmutex_lock(gl); | |
1673 | ||
1674 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); | |
1675 | if (atomic_dec_and_test(&gl->gl_lvb_count)) { | |
1676 | gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); | |
1677 | gl->gl_lvb = NULL; | |
1678 | gfs2_glock_put(gl); | |
1679 | } | |
1680 | ||
1681 | gfs2_glmutex_unlock(gl); | |
1682 | gfs2_glock_put(gl); | |
1683 | } | |
1684 | ||
b3b94faa DT |
1685 | static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, |
1686 | unsigned int state) | |
1687 | { | |
1688 | struct gfs2_glock *gl; | |
1689 | ||
1690 | gl = gfs2_glock_find(sdp, name); | |
1691 | if (!gl) | |
1692 | return; | |
1693 | ||
1694 | if (gl->gl_ops->go_callback) | |
1695 | gl->gl_ops->go_callback(gl, state); | |
1696 | handle_callback(gl, state); | |
1697 | ||
1698 | spin_lock(&gl->gl_spin); | |
1699 | run_queue(gl); | |
1700 | spin_unlock(&gl->gl_spin); | |
1701 | ||
1702 | gfs2_glock_put(gl); | |
1703 | } | |
1704 | ||
1705 | /** | |
1706 | * gfs2_glock_cb - Callback used by locking module | |
1c089c32 | 1707 | * @sdp: Pointer to the superblock |
b3b94faa DT |
1708 | * @type: Type of callback |
1709 | * @data: Type dependent data pointer | |
1710 | * | |
1711 | * Called by the locking module when it wants to tell us something. | |
1712 | * Either we need to drop a lock, one of our ASYNC requests completed, or | |
1713 | * a journal from another client needs to be recovered. | |
1714 | */ | |
1715 | ||
9b47c11d | 1716 | void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) |
b3b94faa | 1717 | { |
9b47c11d | 1718 | struct gfs2_sbd *sdp = cb_data; |
b3b94faa | 1719 | |
b3b94faa DT |
1720 | switch (type) { |
1721 | case LM_CB_NEED_E: | |
e7f5c01c | 1722 | blocking_cb(sdp, data, LM_ST_UNLOCKED); |
b3b94faa DT |
1723 | return; |
1724 | ||
1725 | case LM_CB_NEED_D: | |
e7f5c01c | 1726 | blocking_cb(sdp, data, LM_ST_DEFERRED); |
b3b94faa DT |
1727 | return; |
1728 | ||
1729 | case LM_CB_NEED_S: | |
e7f5c01c | 1730 | blocking_cb(sdp, data, LM_ST_SHARED); |
b3b94faa DT |
1731 | return; |
1732 | ||
1733 | case LM_CB_ASYNC: { | |
e7f5c01c | 1734 | struct lm_async_cb *async = data; |
b3b94faa DT |
1735 | struct gfs2_glock *gl; |
1736 | ||
1737 | gl = gfs2_glock_find(sdp, &async->lc_name); | |
1738 | if (gfs2_assert_warn(sdp, gl)) | |
1739 | return; | |
1740 | if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) | |
1741 | gl->gl_req_bh(gl, async->lc_ret); | |
1742 | gfs2_glock_put(gl); | |
b3b94faa DT |
1743 | return; |
1744 | } | |
1745 | ||
1746 | case LM_CB_NEED_RECOVERY: | |
1747 | gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); | |
1748 | if (sdp->sd_recoverd_process) | |
1749 | wake_up_process(sdp->sd_recoverd_process); | |
1750 | return; | |
1751 | ||
1752 | case LM_CB_DROPLOCKS: | |
1753 | gfs2_gl_hash_clear(sdp, NO_WAIT); | |
1754 | gfs2_quota_scan(sdp); | |
1755 | return; | |
1756 | ||
1757 | default: | |
1758 | gfs2_assert_warn(sdp, 0); | |
1759 | return; | |
1760 | } | |
1761 | } | |
1762 | ||
b3b94faa DT |
1763 | /** |
1764 | * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an | |
1765 | * iopen glock from memory | |
1766 | * @io_gl: the iopen glock | |
1767 | * @state: the state into which the glock should be put | |
1768 | * | |
1769 | */ | |
1770 | ||
1771 | void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state) | |
1772 | { | |
b3b94faa DT |
1773 | |
1774 | if (state != LM_ST_UNLOCKED) | |
1775 | return; | |
feaa7bba | 1776 | /* FIXME: remove this? */ |
b3b94faa DT |
1777 | } |
1778 | ||
1779 | /** | |
1780 | * demote_ok - Check to see if it's ok to unlock a glock | |
1781 | * @gl: the glock | |
1782 | * | |
1783 | * Returns: 1 if it's ok | |
1784 | */ | |
1785 | ||
1786 | static int demote_ok(struct gfs2_glock *gl) | |
1787 | { | |
1788 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
8fb4b536 | 1789 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
b3b94faa DT |
1790 | int demote = 1; |
1791 | ||
1792 | if (test_bit(GLF_STICKY, &gl->gl_flags)) | |
1793 | demote = 0; | |
1794 | else if (test_bit(GLF_PREFETCH, &gl->gl_flags)) | |
50299965 | 1795 | demote = time_after_eq(jiffies, gl->gl_stamp + |
b3b94faa DT |
1796 | gfs2_tune_get(sdp, gt_prefetch_secs) * HZ); |
1797 | else if (glops->go_demote_ok) | |
1798 | demote = glops->go_demote_ok(gl); | |
1799 | ||
1800 | return demote; | |
1801 | } | |
1802 | ||
1803 | /** | |
1804 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | |
1805 | * @gl: the glock | |
1806 | * | |
1807 | */ | |
1808 | ||
1809 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | |
1810 | { | |
1811 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
1812 | ||
1813 | spin_lock(&sdp->sd_reclaim_lock); | |
1814 | if (list_empty(&gl->gl_reclaim)) { | |
1815 | gfs2_glock_hold(gl); | |
1816 | list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); | |
1817 | atomic_inc(&sdp->sd_reclaim_count); | |
1818 | } | |
1819 | spin_unlock(&sdp->sd_reclaim_lock); | |
1820 | ||
1821 | wake_up(&sdp->sd_reclaim_wq); | |
1822 | } | |
1823 | ||
1824 | /** | |
1825 | * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list | |
1826 | * @sdp: the filesystem | |
1827 | * | |
1828 | * Called from gfs2_glockd() glock reclaim daemon, or when promoting a | |
1829 | * different glock and we notice that there are a lot of glocks in the | |
1830 | * reclaim list. | |
1831 | * | |
1832 | */ | |
1833 | ||
1834 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | |
1835 | { | |
1836 | struct gfs2_glock *gl; | |
1837 | ||
1838 | spin_lock(&sdp->sd_reclaim_lock); | |
1839 | if (list_empty(&sdp->sd_reclaim_list)) { | |
1840 | spin_unlock(&sdp->sd_reclaim_lock); | |
1841 | return; | |
1842 | } | |
1843 | gl = list_entry(sdp->sd_reclaim_list.next, | |
1844 | struct gfs2_glock, gl_reclaim); | |
1845 | list_del_init(&gl->gl_reclaim); | |
1846 | spin_unlock(&sdp->sd_reclaim_lock); | |
1847 | ||
1848 | atomic_dec(&sdp->sd_reclaim_count); | |
1849 | atomic_inc(&sdp->sd_reclaimed); | |
1850 | ||
1851 | if (gfs2_glmutex_trylock(gl)) { | |
b3b94faa | 1852 | if (queue_empty(gl, &gl->gl_holders) && |
50299965 | 1853 | gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) |
b3b94faa DT |
1854 | handle_callback(gl, LM_ST_UNLOCKED); |
1855 | gfs2_glmutex_unlock(gl); | |
1856 | } | |
1857 | ||
1858 | gfs2_glock_put(gl); | |
1859 | } | |
1860 | ||
1861 | /** | |
1862 | * examine_bucket - Call a function for glock in a hash bucket | |
1863 | * @examiner: the function | |
1864 | * @sdp: the filesystem | |
1865 | * @bucket: the bucket | |
1866 | * | |
1867 | * Returns: 1 if the bucket has entries | |
1868 | */ | |
1869 | ||
1870 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |
37b2fa6a | 1871 | unsigned int hash) |
b3b94faa DT |
1872 | { |
1873 | struct glock_plug plug; | |
1874 | struct list_head *tmp; | |
1875 | struct gfs2_glock *gl; | |
1876 | int entries; | |
1877 | ||
1878 | /* Add "plug" to end of bucket list, work back up list from there */ | |
1879 | memset(&plug.gl_flags, 0, sizeof(unsigned long)); | |
1880 | set_bit(GLF_PLUG, &plug.gl_flags); | |
1881 | ||
37b2fa6a SW |
1882 | write_lock(&gl_hash_locks[hash]); |
1883 | list_add(&plug.gl_list, &gl_hash_table[hash].hb_list); | |
1884 | write_unlock(&gl_hash_locks[hash]); | |
b3b94faa DT |
1885 | |
1886 | for (;;) { | |
37b2fa6a | 1887 | write_lock(&gl_hash_locks[hash]); |
b3b94faa DT |
1888 | |
1889 | for (;;) { | |
1890 | tmp = plug.gl_list.next; | |
1891 | ||
37b2fa6a | 1892 | if (tmp == &gl_hash_table[hash].hb_list) { |
b3b94faa | 1893 | list_del(&plug.gl_list); |
37b2fa6a SW |
1894 | entries = !list_empty(&gl_hash_table[hash].hb_list); |
1895 | write_unlock(&gl_hash_locks[hash]); | |
b3b94faa DT |
1896 | return entries; |
1897 | } | |
1898 | gl = list_entry(tmp, struct gfs2_glock, gl_list); | |
1899 | ||
1900 | /* Move plug up list */ | |
1901 | list_move(&plug.gl_list, &gl->gl_list); | |
1902 | ||
1903 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | |
1904 | continue; | |
85d1da67 SW |
1905 | if (gl->gl_sbd != sdp) |
1906 | continue; | |
b3b94faa DT |
1907 | |
1908 | /* examiner() must glock_put() */ | |
1909 | gfs2_glock_hold(gl); | |
1910 | ||
1911 | break; | |
1912 | } | |
1913 | ||
37b2fa6a | 1914 | write_unlock(&gl_hash_locks[hash]); |
b3b94faa DT |
1915 | |
1916 | examiner(gl); | |
1917 | } | |
1918 | } | |
1919 | ||
1920 | /** | |
1921 | * scan_glock - look at a glock and see if we can reclaim it | |
1922 | * @gl: the glock to look at | |
1923 | * | |
1924 | */ | |
1925 | ||
1926 | static void scan_glock(struct gfs2_glock *gl) | |
1927 | { | |
a2242db0 SW |
1928 | if (gl->gl_ops == &gfs2_inode_glops) |
1929 | goto out; | |
1930 | ||
b3b94faa | 1931 | if (gfs2_glmutex_trylock(gl)) { |
b3b94faa DT |
1932 | if (queue_empty(gl, &gl->gl_holders) && |
1933 | gl->gl_state != LM_ST_UNLOCKED && | |
1934 | demote_ok(gl)) | |
1935 | goto out_schedule; | |
b3b94faa DT |
1936 | gfs2_glmutex_unlock(gl); |
1937 | } | |
a2242db0 | 1938 | out: |
b3b94faa | 1939 | gfs2_glock_put(gl); |
b3b94faa DT |
1940 | return; |
1941 | ||
627add2d | 1942 | out_schedule: |
b3b94faa DT |
1943 | gfs2_glmutex_unlock(gl); |
1944 | gfs2_glock_schedule_for_reclaim(gl); | |
1945 | gfs2_glock_put(gl); | |
1946 | } | |
1947 | ||
1948 | /** | |
1949 | * gfs2_scand_internal - Look for glocks and inodes to toss from memory | |
1950 | * @sdp: the filesystem | |
1951 | * | |
1952 | */ | |
1953 | ||
1954 | void gfs2_scand_internal(struct gfs2_sbd *sdp) | |
1955 | { | |
1956 | unsigned int x; | |
1957 | ||
1958 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | |
37b2fa6a | 1959 | examine_bucket(scan_glock, sdp, x); |
b3b94faa DT |
1960 | cond_resched(); |
1961 | } | |
1962 | } | |
1963 | ||
1964 | /** | |
1965 | * clear_glock - look at a glock and see if we can free it from glock cache | |
1966 | * @gl: the glock to look at | |
1967 | * | |
1968 | */ | |
1969 | ||
1970 | static void clear_glock(struct gfs2_glock *gl) | |
1971 | { | |
1972 | struct gfs2_sbd *sdp = gl->gl_sbd; | |
1973 | int released; | |
1974 | ||
1975 | spin_lock(&sdp->sd_reclaim_lock); | |
1976 | if (!list_empty(&gl->gl_reclaim)) { | |
1977 | list_del_init(&gl->gl_reclaim); | |
1978 | atomic_dec(&sdp->sd_reclaim_count); | |
190562bd | 1979 | spin_unlock(&sdp->sd_reclaim_lock); |
b3b94faa DT |
1980 | released = gfs2_glock_put(gl); |
1981 | gfs2_assert(sdp, !released); | |
190562bd SW |
1982 | } else { |
1983 | spin_unlock(&sdp->sd_reclaim_lock); | |
b3b94faa | 1984 | } |
b3b94faa DT |
1985 | |
1986 | if (gfs2_glmutex_trylock(gl)) { | |
b3b94faa DT |
1987 | if (queue_empty(gl, &gl->gl_holders) && |
1988 | gl->gl_state != LM_ST_UNLOCKED) | |
1989 | handle_callback(gl, LM_ST_UNLOCKED); | |
1990 | ||
1991 | gfs2_glmutex_unlock(gl); | |
1992 | } | |
1993 | ||
1994 | gfs2_glock_put(gl); | |
1995 | } | |
1996 | ||
1997 | /** | |
1998 | * gfs2_gl_hash_clear - Empty out the glock hash table | |
1999 | * @sdp: the filesystem | |
2000 | * @wait: wait until it's all gone | |
2001 | * | |
2002 | * Called when unmounting the filesystem, or when inter-node lock manager | |
2003 | * requests DROPLOCKS because it is running out of capacity. | |
2004 | */ | |
2005 | ||
2006 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |
2007 | { | |
2008 | unsigned long t; | |
2009 | unsigned int x; | |
2010 | int cont; | |
2011 | ||
2012 | t = jiffies; | |
2013 | ||
2014 | for (;;) { | |
2015 | cont = 0; | |
2016 | ||
2017 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | |
37b2fa6a | 2018 | if (examine_bucket(clear_glock, sdp, x)) |
b3b94faa DT |
2019 | cont = 1; |
2020 | ||
2021 | if (!wait || !cont) | |
2022 | break; | |
2023 | ||
2024 | if (time_after_eq(jiffies, | |
2025 | t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { | |
2026 | fs_warn(sdp, "Unmount seems to be stalled. " | |
2027 | "Dumping lock state...\n"); | |
2028 | gfs2_dump_lockstate(sdp); | |
2029 | t = jiffies; | |
2030 | } | |
2031 | ||
b3b94faa | 2032 | invalidate_inodes(sdp->sd_vfs); |
fd88de56 | 2033 | msleep(10); |
b3b94faa DT |
2034 | } |
2035 | } | |
2036 | ||
2037 | /* | |
2038 | * Diagnostic routines to help debug distributed deadlock | |
2039 | */ | |
2040 | ||
2041 | /** | |
2042 | * dump_holder - print information about a glock holder | |
2043 | * @str: a string naming the type of holder | |
2044 | * @gh: the glock holder | |
2045 | * | |
2046 | * Returns: 0 on success, -ENOBUFS when we run out of space | |
2047 | */ | |
2048 | ||
2049 | static int dump_holder(char *str, struct gfs2_holder *gh) | |
2050 | { | |
2051 | unsigned int x; | |
2052 | int error = -ENOBUFS; | |
2053 | ||
d92a8d48 SW |
2054 | printk(KERN_INFO " %s\n", str); |
2055 | printk(KERN_INFO " owner = %ld\n", | |
b3b94faa | 2056 | (gh->gh_owner) ? (long)gh->gh_owner->pid : -1); |
d92a8d48 SW |
2057 | printk(KERN_INFO " gh_state = %u\n", gh->gh_state); |
2058 | printk(KERN_INFO " gh_flags ="); | |
b3b94faa DT |
2059 | for (x = 0; x < 32; x++) |
2060 | if (gh->gh_flags & (1 << x)) | |
2061 | printk(" %u", x); | |
2062 | printk(" \n"); | |
d92a8d48 SW |
2063 | printk(KERN_INFO " error = %d\n", gh->gh_error); |
2064 | printk(KERN_INFO " gh_iflags ="); | |
b3b94faa DT |
2065 | for (x = 0; x < 32; x++) |
2066 | if (test_bit(x, &gh->gh_iflags)) | |
2067 | printk(" %u", x); | |
2068 | printk(" \n"); | |
d0dc80db | 2069 | print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip); |
b3b94faa DT |
2070 | |
2071 | error = 0; | |
2072 | ||
2073 | return error; | |
2074 | } | |
2075 | ||
2076 | /** | |
2077 | * dump_inode - print information about an inode | |
2078 | * @ip: the inode | |
2079 | * | |
2080 | * Returns: 0 on success, -ENOBUFS when we run out of space | |
2081 | */ | |
2082 | ||
2083 | static int dump_inode(struct gfs2_inode *ip) | |
2084 | { | |
2085 | unsigned int x; | |
2086 | int error = -ENOBUFS; | |
2087 | ||
d92a8d48 SW |
2088 | printk(KERN_INFO " Inode:\n"); |
2089 | printk(KERN_INFO " num = %llu %llu\n", | |
382066da SW |
2090 | (unsigned long long)ip->i_num.no_formal_ino, |
2091 | (unsigned long long)ip->i_num.no_addr); | |
d92a8d48 | 2092 | printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode)); |
d92a8d48 | 2093 | printk(KERN_INFO " i_flags ="); |
b3b94faa DT |
2094 | for (x = 0; x < 32; x++) |
2095 | if (test_bit(x, &ip->i_flags)) | |
2096 | printk(" %u", x); | |
2097 | printk(" \n"); | |
b3b94faa DT |
2098 | |
2099 | error = 0; | |
2100 | ||
2101 | return error; | |
2102 | } | |
2103 | ||
2104 | /** | |
2105 | * dump_glock - print information about a glock | |
2106 | * @gl: the glock | |
2107 | * @count: where we are in the buffer | |
2108 | * | |
2109 | * Returns: 0 on success, -ENOBUFS when we run out of space | |
2110 | */ | |
2111 | ||
2112 | static int dump_glock(struct gfs2_glock *gl) | |
2113 | { | |
2114 | struct gfs2_holder *gh; | |
2115 | unsigned int x; | |
2116 | int error = -ENOBUFS; | |
2117 | ||
2118 | spin_lock(&gl->gl_spin); | |
2119 | ||
85d1da67 | 2120 | printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type, |
382066da | 2121 | (unsigned long long)gl->gl_name.ln_number); |
d92a8d48 | 2122 | printk(KERN_INFO " gl_flags ="); |
85d1da67 | 2123 | for (x = 0; x < 32; x++) { |
b3b94faa DT |
2124 | if (test_bit(x, &gl->gl_flags)) |
2125 | printk(" %u", x); | |
85d1da67 | 2126 | } |
b3b94faa | 2127 | printk(" \n"); |
d92a8d48 SW |
2128 | printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount)); |
2129 | printk(KERN_INFO " gl_state = %u\n", gl->gl_state); | |
320dd101 SW |
2130 | printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm); |
2131 | print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip); | |
d92a8d48 SW |
2132 | printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); |
2133 | printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); | |
2134 | printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); | |
2135 | printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no"); | |
2136 | printk(KERN_INFO " le = %s\n", | |
b3b94faa | 2137 | (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); |
d92a8d48 | 2138 | printk(KERN_INFO " reclaim = %s\n", |
b3b94faa DT |
2139 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); |
2140 | if (gl->gl_aspace) | |
85d1da67 | 2141 | printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, |
88721877 | 2142 | gl->gl_aspace->i_mapping->nrpages); |
b3b94faa | 2143 | else |
d92a8d48 SW |
2144 | printk(KERN_INFO " aspace = no\n"); |
2145 | printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count)); | |
b3b94faa DT |
2146 | if (gl->gl_req_gh) { |
2147 | error = dump_holder("Request", gl->gl_req_gh); | |
2148 | if (error) | |
2149 | goto out; | |
2150 | } | |
2151 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | |
2152 | error = dump_holder("Holder", gh); | |
2153 | if (error) | |
2154 | goto out; | |
2155 | } | |
2156 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { | |
2157 | error = dump_holder("Waiter1", gh); | |
2158 | if (error) | |
2159 | goto out; | |
2160 | } | |
2161 | list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { | |
2162 | error = dump_holder("Waiter2", gh); | |
2163 | if (error) | |
2164 | goto out; | |
2165 | } | |
2166 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { | |
2167 | error = dump_holder("Waiter3", gh); | |
2168 | if (error) | |
2169 | goto out; | |
2170 | } | |
5c676f6d | 2171 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { |
b3b94faa DT |
2172 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && |
2173 | list_empty(&gl->gl_holders)) { | |
5c676f6d | 2174 | error = dump_inode(gl->gl_object); |
b3b94faa DT |
2175 | if (error) |
2176 | goto out; | |
2177 | } else { | |
2178 | error = -ENOBUFS; | |
d92a8d48 | 2179 | printk(KERN_INFO " Inode: busy\n"); |
b3b94faa DT |
2180 | } |
2181 | } | |
2182 | ||
2183 | error = 0; | |
2184 | ||
a91ea69f | 2185 | out: |
b3b94faa | 2186 | spin_unlock(&gl->gl_spin); |
b3b94faa DT |
2187 | return error; |
2188 | } | |
2189 | ||
2190 | /** | |
2191 | * gfs2_dump_lockstate - print out the current lockstate | |
2192 | * @sdp: the filesystem | |
2193 | * @ub: the buffer to copy the information into | |
2194 | * | |
2195 | * If @ub is NULL, dump the lockstate to the console. | |
2196 | * | |
2197 | */ | |
2198 | ||
08bc2dbc | 2199 | static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) |
b3b94faa | 2200 | { |
b3b94faa DT |
2201 | struct gfs2_glock *gl; |
2202 | unsigned int x; | |
2203 | int error = 0; | |
2204 | ||
2205 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | |
b3b94faa | 2206 | |
37b2fa6a | 2207 | read_lock(&gl_hash_locks[x]); |
b3b94faa | 2208 | |
37b2fa6a | 2209 | list_for_each_entry(gl, &gl_hash_table[x].hb_list, gl_list) { |
b3b94faa DT |
2210 | if (test_bit(GLF_PLUG, &gl->gl_flags)) |
2211 | continue; | |
85d1da67 SW |
2212 | if (gl->gl_sbd != sdp) |
2213 | continue; | |
b3b94faa DT |
2214 | |
2215 | error = dump_glock(gl); | |
2216 | if (error) | |
2217 | break; | |
2218 | } | |
2219 | ||
37b2fa6a | 2220 | read_unlock(&gl_hash_locks[x]); |
b3b94faa DT |
2221 | |
2222 | if (error) | |
2223 | break; | |
2224 | } | |
2225 | ||
2226 | ||
2227 | return error; | |
2228 | } | |
2229 | ||
85d1da67 SW |
2230 | int __init gfs2_glock_init(void) |
2231 | { | |
2232 | unsigned i; | |
2233 | for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { | |
37b2fa6a SW |
2234 | rwlock_init(&gl_hash_locks[i]); |
2235 | INIT_LIST_HEAD(&gl_hash_table[i].hb_list); | |
85d1da67 SW |
2236 | } |
2237 | return 0; | |
2238 | } | |
2239 |