]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/gfs2/locking/dlm/lock.c
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
12 static char junk_lvb
[GDLM_LVB_SIZE
];
14 static void queue_complete(struct gdlm_lock
*lp
)
16 struct gdlm_ls
*ls
= lp
->ls
;
18 clear_bit(LFL_ACTIVE
, &lp
->flags
);
20 spin_lock(&ls
->async_lock
);
21 list_add_tail(&lp
->clist
, &ls
->complete
);
22 spin_unlock(&ls
->async_lock
);
23 wake_up(&ls
->thread_wait
);
26 static inline void gdlm_ast(void *astarg
)
28 queue_complete((struct gdlm_lock
*) astarg
);
31 static inline void gdlm_bast(void *astarg
, int mode
)
33 struct gdlm_lock
*lp
= astarg
;
34 struct gdlm_ls
*ls
= lp
->ls
;
37 printk(KERN_INFO
"lock_dlm: bast mode zero %x,%llx\n",
38 lp
->lockname
.ln_type
, lp
->lockname
.ln_number
);
42 spin_lock(&ls
->async_lock
);
44 list_add_tail(&lp
->blist
, &ls
->blocking
);
46 } else if (lp
->bast_mode
< mode
)
48 spin_unlock(&ls
->async_lock
);
49 wake_up(&ls
->thread_wait
);
52 void gdlm_queue_delayed(struct gdlm_lock
*lp
)
54 struct gdlm_ls
*ls
= lp
->ls
;
56 spin_lock(&ls
->async_lock
);
57 list_add_tail(&lp
->delay_list
, &ls
->delayed
);
58 spin_unlock(&ls
->async_lock
);
61 /* convert gfs lock-state to dlm lock-mode */
63 static int16_t make_mode(int16_t lmstate
)
75 gdlm_assert(0, "unknown LM state %d", lmstate
);
79 /* convert dlm lock-mode to gfs lock-state */
81 int16_t gdlm_make_lmstate(int16_t dlmmode
)
86 return LM_ST_UNLOCKED
;
88 return LM_ST_EXCLUSIVE
;
90 return LM_ST_DEFERRED
;
94 gdlm_assert(0, "unknown DLM mode %d", dlmmode
);
98 /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
99 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
101 static void check_cur_state(struct gdlm_lock
*lp
, unsigned int cur_state
)
103 int16_t cur
= make_mode(cur_state
);
104 if (lp
->cur
!= DLM_LOCK_IV
)
105 gdlm_assert(lp
->cur
== cur
, "%d, %d", lp
->cur
, cur
);
108 static inline unsigned int make_flags(struct gdlm_lock
*lp
,
109 unsigned int gfs_flags
,
110 int16_t cur
, int16_t req
)
112 unsigned int lkf
= 0;
114 if (gfs_flags
& LM_FLAG_TRY
)
115 lkf
|= DLM_LKF_NOQUEUE
;
117 if (gfs_flags
& LM_FLAG_TRY_1CB
) {
118 lkf
|= DLM_LKF_NOQUEUE
;
119 lkf
|= DLM_LKF_NOQUEUEBAST
;
122 if (gfs_flags
& LM_FLAG_PRIORITY
) {
123 lkf
|= DLM_LKF_NOORDER
;
124 lkf
|= DLM_LKF_HEADQUE
;
127 if (gfs_flags
& LM_FLAG_ANY
) {
128 if (req
== DLM_LOCK_PR
)
129 lkf
|= DLM_LKF_ALTCW
;
130 else if (req
== DLM_LOCK_CW
)
131 lkf
|= DLM_LKF_ALTPR
;
134 if (lp
->lksb
.sb_lkid
!= 0) {
135 lkf
|= DLM_LKF_CONVERT
;
137 /* Conversion deadlock avoidance by DLM */
139 if (!test_bit(LFL_FORCE_PROMOTE
, &lp
->flags
) &&
140 !(lkf
& DLM_LKF_NOQUEUE
) &&
141 cur
> DLM_LOCK_NL
&& req
> DLM_LOCK_NL
&& cur
!= req
)
142 lkf
|= DLM_LKF_CONVDEADLK
;
146 lkf
|= DLM_LKF_VALBLK
;
151 /* make_strname - convert GFS lock numbers to a string */
153 static inline void make_strname(struct lm_lockname
*lockname
,
154 struct gdlm_strname
*str
)
156 sprintf(str
->name
, "%8x%16llx", lockname
->ln_type
,
157 lockname
->ln_number
);
158 str
->namelen
= GDLM_STRNAME_BYTES
;
161 int gdlm_create_lp(struct gdlm_ls
*ls
, struct lm_lockname
*name
,
162 struct gdlm_lock
**lpp
)
164 struct gdlm_lock
*lp
;
166 lp
= kzalloc(sizeof(struct gdlm_lock
), GFP_KERNEL
);
170 lp
->lockname
= *name
;
172 lp
->cur
= DLM_LOCK_IV
;
174 lp
->hold_null
= NULL
;
175 init_completion(&lp
->ast_wait
);
176 INIT_LIST_HEAD(&lp
->clist
);
177 INIT_LIST_HEAD(&lp
->blist
);
178 INIT_LIST_HEAD(&lp
->delay_list
);
180 spin_lock(&ls
->async_lock
);
181 list_add(&lp
->all_list
, &ls
->all_locks
);
182 ls
->all_locks_count
++;
183 spin_unlock(&ls
->async_lock
);
189 void gdlm_delete_lp(struct gdlm_lock
*lp
)
191 struct gdlm_ls
*ls
= lp
->ls
;
193 spin_lock(&ls
->async_lock
);
194 if (!list_empty(&lp
->clist
))
195 list_del_init(&lp
->clist
);
196 if (!list_empty(&lp
->blist
))
197 list_del_init(&lp
->blist
);
198 if (!list_empty(&lp
->delay_list
))
199 list_del_init(&lp
->delay_list
);
200 gdlm_assert(!list_empty(&lp
->all_list
),
201 "%x,%llx", lp
->lockname
.ln_type
, lp
->lockname
.ln_number
);
202 list_del_init(&lp
->all_list
);
203 ls
->all_locks_count
--;
204 spin_unlock(&ls
->async_lock
);
209 int gdlm_get_lock(lm_lockspace_t
*lockspace
, struct lm_lockname
*name
,
212 struct gdlm_lock
*lp
;
215 error
= gdlm_create_lp((struct gdlm_ls
*) lockspace
, name
, &lp
);
217 *lockp
= (lm_lock_t
*) lp
;
221 void gdlm_put_lock(lm_lock_t
*lock
)
223 gdlm_delete_lp((struct gdlm_lock
*) lock
);
226 unsigned int gdlm_do_lock(struct gdlm_lock
*lp
)
228 struct gdlm_ls
*ls
= lp
->ls
;
229 struct gdlm_strname str
;
233 * When recovery is in progress, delay lock requests for submission
234 * once recovery is done. Requests for recovery (NOEXP) and unlocks
238 if (test_bit(DFL_BLOCK_LOCKS
, &ls
->flags
) &&
239 !test_bit(LFL_NOBLOCK
, &lp
->flags
) && lp
->req
!= DLM_LOCK_NL
) {
240 gdlm_queue_delayed(lp
);
245 * Submit the actual lock request.
248 if (test_bit(LFL_NOBAST
, &lp
->flags
))
251 make_strname(&lp
->lockname
, &str
);
253 set_bit(LFL_ACTIVE
, &lp
->flags
);
255 log_debug("lk %x,%llx id %x %d,%d %x", lp
->lockname
.ln_type
,
256 lp
->lockname
.ln_number
, lp
->lksb
.sb_lkid
,
257 lp
->cur
, lp
->req
, lp
->lkf
);
259 error
= dlm_lock(ls
->dlm_lockspace
, lp
->req
, &lp
->lksb
, lp
->lkf
,
260 str
.name
, str
.namelen
, 0, gdlm_ast
, (void *) lp
,
261 bast
? gdlm_bast
: NULL
);
263 if ((error
== -EAGAIN
) && (lp
->lkf
& DLM_LKF_NOQUEUE
)) {
264 lp
->lksb
.sb_status
= -EAGAIN
;
270 log_debug("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
271 "flags=%lx", ls
->fsname
, lp
->lockname
.ln_type
,
272 lp
->lockname
.ln_number
, error
, lp
->cur
, lp
->req
,
279 unsigned int gdlm_do_unlock(struct gdlm_lock
*lp
)
281 struct gdlm_ls
*ls
= lp
->ls
;
282 unsigned int lkf
= 0;
285 set_bit(LFL_DLM_UNLOCK
, &lp
->flags
);
286 set_bit(LFL_ACTIVE
, &lp
->flags
);
289 lkf
= DLM_LKF_VALBLK
;
291 log_debug("un %x,%llx %x %d %x", lp
->lockname
.ln_type
,
292 lp
->lockname
.ln_number
, lp
->lksb
.sb_lkid
, lp
->cur
, lkf
);
294 error
= dlm_unlock(ls
->dlm_lockspace
, lp
->lksb
.sb_lkid
, lkf
, NULL
, lp
);
297 log_debug("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
298 "flags=%lx", ls
->fsname
, lp
->lockname
.ln_type
,
299 lp
->lockname
.ln_number
, error
, lp
->cur
, lp
->req
,
306 unsigned int gdlm_lock(lm_lock_t
*lock
, unsigned int cur_state
,
307 unsigned int req_state
, unsigned int flags
)
309 struct gdlm_lock
*lp
= (struct gdlm_lock
*) lock
;
311 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
312 if (flags
& LM_FLAG_NOEXP
)
313 set_bit(LFL_NOBLOCK
, &lp
->flags
);
315 check_cur_state(lp
, cur_state
);
316 lp
->req
= make_mode(req_state
);
317 lp
->lkf
= make_flags(lp
, flags
, lp
->cur
, lp
->req
);
319 return gdlm_do_lock(lp
);
322 unsigned int gdlm_unlock(lm_lock_t
*lock
, unsigned int cur_state
)
324 struct gdlm_lock
*lp
= (struct gdlm_lock
*) lock
;
326 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
327 if (lp
->cur
== DLM_LOCK_IV
)
329 return gdlm_do_unlock(lp
);
332 void gdlm_cancel(lm_lock_t
*lock
)
334 struct gdlm_lock
*lp
= (struct gdlm_lock
*) lock
;
335 struct gdlm_ls
*ls
= lp
->ls
;
336 int error
, delay_list
= 0;
338 if (test_bit(LFL_DLM_CANCEL
, &lp
->flags
))
341 log_info("gdlm_cancel %x,%llx flags %lx",
342 lp
->lockname
.ln_type
, lp
->lockname
.ln_number
, lp
->flags
);
344 spin_lock(&ls
->async_lock
);
345 if (!list_empty(&lp
->delay_list
)) {
346 list_del_init(&lp
->delay_list
);
349 spin_unlock(&ls
->async_lock
);
352 set_bit(LFL_CANCEL
, &lp
->flags
);
353 set_bit(LFL_ACTIVE
, &lp
->flags
);
358 if (!test_bit(LFL_ACTIVE
, &lp
->flags
) ||
359 test_bit(LFL_DLM_UNLOCK
, &lp
->flags
)) {
360 log_info("gdlm_cancel skip %x,%llx flags %lx",
361 lp
->lockname
.ln_type
, lp
->lockname
.ln_number
,
366 /* the lock is blocked in the dlm */
368 set_bit(LFL_DLM_CANCEL
, &lp
->flags
);
369 set_bit(LFL_ACTIVE
, &lp
->flags
);
371 error
= dlm_unlock(ls
->dlm_lockspace
, lp
->lksb
.sb_lkid
, DLM_LKF_CANCEL
,
374 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error
,
375 lp
->lockname
.ln_type
, lp
->lockname
.ln_number
, lp
->flags
);
378 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
381 int gdlm_add_lvb(struct gdlm_lock
*lp
)
385 lvb
= kzalloc(GDLM_LVB_SIZE
, GFP_KERNEL
);
389 lp
->lksb
.sb_lvbptr
= lvb
;
394 void gdlm_del_lvb(struct gdlm_lock
*lp
)
398 lp
->lksb
.sb_lvbptr
= NULL
;
401 /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
402 the completion) because gfs won't call hold_lvb() during a callback (from
403 the context of a lock_dlm thread). */
405 static int hold_null_lock(struct gdlm_lock
*lp
)
407 struct gdlm_lock
*lpn
= NULL
;
411 printk(KERN_INFO
"lock_dlm: lvb already held\n");
415 error
= gdlm_create_lp(lp
->ls
, &lp
->lockname
, &lpn
);
419 lpn
->lksb
.sb_lvbptr
= junk_lvb
;
422 lpn
->req
= DLM_LOCK_NL
;
423 lpn
->lkf
= DLM_LKF_VALBLK
| DLM_LKF_EXPEDITE
;
424 set_bit(LFL_NOBAST
, &lpn
->flags
);
425 set_bit(LFL_INLOCK
, &lpn
->flags
);
427 init_completion(&lpn
->ast_wait
);
429 wait_for_completion(&lpn
->ast_wait
);
430 error
= lp
->lksb
.sb_status
;
432 printk(KERN_INFO
"lock_dlm: hold_null_lock dlm error %d\n",
442 /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
443 the completion) because gfs may call unhold_lvb() during a callback (from
444 the context of a lock_dlm thread) which could cause a deadlock since the
445 other lock_dlm thread could be engaged in recovery. */
447 static void unhold_null_lock(struct gdlm_lock
*lp
)
449 struct gdlm_lock
*lpn
= lp
->hold_null
;
451 gdlm_assert(lpn
, "%x,%llx",
452 lp
->lockname
.ln_type
, lp
->lockname
.ln_number
);
453 lpn
->lksb
.sb_lvbptr
= NULL
;
455 set_bit(LFL_UNLOCK_DELETE
, &lpn
->flags
);
457 lp
->hold_null
= NULL
;
460 /* Acquire a NL lock because gfs requires the value block to remain
461 intact on the resource while the lvb is "held" even if it's holding no locks
464 int gdlm_hold_lvb(lm_lock_t
*lock
, char **lvbp
)
466 struct gdlm_lock
*lp
= (struct gdlm_lock
*) lock
;
469 error
= gdlm_add_lvb(lp
);
475 error
= hold_null_lock(lp
);
482 void gdlm_unhold_lvb(lm_lock_t
*lock
, char *lvb
)
484 struct gdlm_lock
*lp
= (struct gdlm_lock
*) lock
;
486 unhold_null_lock(lp
);
490 void gdlm_sync_lvb(lm_lock_t
*lock
, char *lvb
)
492 struct gdlm_lock
*lp
= (struct gdlm_lock
*) lock
;
494 if (lp
->cur
!= DLM_LOCK_EX
)
497 init_completion(&lp
->ast_wait
);
498 set_bit(LFL_SYNC_LVB
, &lp
->flags
);
500 lp
->req
= DLM_LOCK_EX
;
501 lp
->lkf
= make_flags(lp
, 0, lp
->cur
, lp
->req
);
504 wait_for_completion(&lp
->ast_wait
);
507 void gdlm_submit_delayed(struct gdlm_ls
*ls
)
509 struct gdlm_lock
*lp
, *safe
;
511 spin_lock(&ls
->async_lock
);
512 list_for_each_entry_safe(lp
, safe
, &ls
->delayed
, delay_list
) {
513 list_del_init(&lp
->delay_list
);
514 list_add_tail(&lp
->delay_list
, &ls
->submit
);
516 spin_unlock(&ls
->async_lock
);
517 wake_up(&ls
->thread_wait
);
520 int gdlm_release_all_locks(struct gdlm_ls
*ls
)
522 struct gdlm_lock
*lp
, *safe
;
525 spin_lock(&ls
->async_lock
);
526 list_for_each_entry_safe(lp
, safe
, &ls
->all_locks
, all_list
) {
527 list_del_init(&lp
->all_list
);
529 if (lp
->lvb
&& lp
->lvb
!= junk_lvb
)
534 spin_unlock(&ls
->async_lock
);