1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
41 #include "ocfs2_lockingver.h"
46 #include "extent_map.h"
48 #include "heartbeat.h"
51 #include "stackglue.h"
57 #include "buffer_head_io.h"
59 struct ocfs2_mask_waiter
{
60 struct list_head mw_item
;
62 struct completion mw_complete
;
63 unsigned long mw_mask
;
64 unsigned long mw_goal
;
65 #ifdef CONFIG_OCFS2_FS_STATS
66 unsigned long long mw_lock_start
;
70 static struct ocfs2_super
*ocfs2_get_dentry_osb(struct ocfs2_lock_res
*lockres
);
71 static struct ocfs2_super
*ocfs2_get_inode_osb(struct ocfs2_lock_res
*lockres
);
72 static struct ocfs2_super
*ocfs2_get_file_osb(struct ocfs2_lock_res
*lockres
);
73 static struct ocfs2_super
*ocfs2_get_qinfo_osb(struct ocfs2_lock_res
*lockres
);
76 * Return value from ->downconvert_worker functions.
78 * These control the precise actions of ocfs2_unblock_lock()
79 * and ocfs2_process_blocked_lock()
82 enum ocfs2_unblock_action
{
83 UNBLOCK_CONTINUE
= 0, /* Continue downconvert */
84 UNBLOCK_CONTINUE_POST
= 1, /* Continue downconvert, fire
85 * ->post_unlock callback */
86 UNBLOCK_STOP_POST
= 2, /* Do not downconvert, fire
87 * ->post_unlock() callback. */
90 struct ocfs2_unblock_ctl
{
92 enum ocfs2_unblock_action unblock_action
;
95 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res
*lockres
,
97 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res
*lockres
);
99 static int ocfs2_data_convert_worker(struct ocfs2_lock_res
*lockres
,
102 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res
*lockres
,
105 static void ocfs2_dentry_post_unlock(struct ocfs2_super
*osb
,
106 struct ocfs2_lock_res
*lockres
);
108 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res
*lockres
);
110 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
112 /* This aids in debugging situations where a bad LVB might be involved. */
113 static void ocfs2_dump_meta_lvb_info(u64 level
,
114 const char *function
,
116 struct ocfs2_lock_res
*lockres
)
118 struct ocfs2_meta_lvb
*lvb
=
119 (struct ocfs2_meta_lvb
*)ocfs2_dlm_lvb(&lockres
->l_lksb
);
121 mlog(level
, "LVB information for %s (called from %s:%u):\n",
122 lockres
->l_name
, function
, line
);
123 mlog(level
, "version: %u, clusters: %u, generation: 0x%x\n",
124 lvb
->lvb_version
, be32_to_cpu(lvb
->lvb_iclusters
),
125 be32_to_cpu(lvb
->lvb_igeneration
));
126 mlog(level
, "size: %llu, uid %u, gid %u, mode 0x%x\n",
127 (unsigned long long)be64_to_cpu(lvb
->lvb_isize
),
128 be32_to_cpu(lvb
->lvb_iuid
), be32_to_cpu(lvb
->lvb_igid
),
129 be16_to_cpu(lvb
->lvb_imode
));
130 mlog(level
, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
131 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb
->lvb_inlink
),
132 (long long)be64_to_cpu(lvb
->lvb_iatime_packed
),
133 (long long)be64_to_cpu(lvb
->lvb_ictime_packed
),
134 (long long)be64_to_cpu(lvb
->lvb_imtime_packed
),
135 be32_to_cpu(lvb
->lvb_iattr
));
140 * OCFS2 Lock Resource Operations
142 * These fine tune the behavior of the generic dlmglue locking infrastructure.
144 * The most basic of lock types can point ->l_priv to their respective
145 * struct ocfs2_super and allow the default actions to manage things.
147 * Right now, each lock type also needs to implement an init function,
148 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
149 * should be called when the lock is no longer needed (i.e., object
152 struct ocfs2_lock_res_ops
{
154 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
155 * this callback if ->l_priv is not an ocfs2_super pointer
157 struct ocfs2_super
* (*get_osb
)(struct ocfs2_lock_res
*);
160 * Optionally called in the downconvert thread after a
161 * successful downconvert. The lockres will not be referenced
162 * after this callback is called, so it is safe to free
165 * The exact semantics of when this is called are controlled
166 * by ->downconvert_worker()
168 void (*post_unlock
)(struct ocfs2_super
*, struct ocfs2_lock_res
*);
171 * Allow a lock type to add checks to determine whether it is
172 * safe to downconvert a lock. Return 0 to re-queue the
173 * downconvert at a later time, nonzero to continue.
175 * For most locks, the default checks that there are no
176 * incompatible holders are sufficient.
178 * Called with the lockres spinlock held.
180 int (*check_downconvert
)(struct ocfs2_lock_res
*, int);
183 * Allows a lock type to populate the lock value block. This
184 * is called on downconvert, and when we drop a lock.
186 * Locks that want to use this should set LOCK_TYPE_USES_LVB
187 * in the flags field.
189 * Called with the lockres spinlock held.
191 void (*set_lvb
)(struct ocfs2_lock_res
*);
194 * Called from the downconvert thread when it is determined
195 * that a lock will be downconverted. This is called without
196 * any locks held so the function can do work that might
197 * schedule (syncing out data, etc).
199 * This should return any one of the ocfs2_unblock_action
200 * values, depending on what it wants the thread to do.
202 int (*downconvert_worker
)(struct ocfs2_lock_res
*, int);
205 * LOCK_TYPE_* flags which describe the specific requirements
206 * of a lock type. Descriptions of each individual flag follow.
212 * Some locks want to "refresh" potentially stale data when a
213 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
214 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
215 * individual lockres l_flags member from the ast function. It is
216 * expected that the locking wrapper will clear the
217 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
219 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
222 * Indicate that a lock type makes use of the lock value block. The
223 * ->set_lvb lock type callback must be defined.
225 #define LOCK_TYPE_USES_LVB 0x2
227 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops
= {
228 .get_osb
= ocfs2_get_inode_osb
,
232 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops
= {
233 .get_osb
= ocfs2_get_inode_osb
,
234 .check_downconvert
= ocfs2_check_meta_downconvert
,
235 .set_lvb
= ocfs2_set_meta_lvb
,
236 .downconvert_worker
= ocfs2_data_convert_worker
,
237 .flags
= LOCK_TYPE_REQUIRES_REFRESH
|LOCK_TYPE_USES_LVB
,
240 static struct ocfs2_lock_res_ops ocfs2_super_lops
= {
241 .flags
= LOCK_TYPE_REQUIRES_REFRESH
,
244 static struct ocfs2_lock_res_ops ocfs2_rename_lops
= {
248 static struct ocfs2_lock_res_ops ocfs2_dentry_lops
= {
249 .get_osb
= ocfs2_get_dentry_osb
,
250 .post_unlock
= ocfs2_dentry_post_unlock
,
251 .downconvert_worker
= ocfs2_dentry_convert_worker
,
255 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops
= {
256 .get_osb
= ocfs2_get_inode_osb
,
260 static struct ocfs2_lock_res_ops ocfs2_flock_lops
= {
261 .get_osb
= ocfs2_get_file_osb
,
265 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops
= {
266 .set_lvb
= ocfs2_set_qinfo_lvb
,
267 .get_osb
= ocfs2_get_qinfo_osb
,
268 .flags
= LOCK_TYPE_REQUIRES_REFRESH
| LOCK_TYPE_USES_LVB
,
271 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res
*lockres
)
273 return lockres
->l_type
== OCFS2_LOCK_TYPE_META
||
274 lockres
->l_type
== OCFS2_LOCK_TYPE_RW
||
275 lockres
->l_type
== OCFS2_LOCK_TYPE_OPEN
;
278 static inline struct inode
*ocfs2_lock_res_inode(struct ocfs2_lock_res
*lockres
)
280 BUG_ON(!ocfs2_is_inode_lock(lockres
));
282 return (struct inode
*) lockres
->l_priv
;
285 static inline struct ocfs2_dentry_lock
*ocfs2_lock_res_dl(struct ocfs2_lock_res
*lockres
)
287 BUG_ON(lockres
->l_type
!= OCFS2_LOCK_TYPE_DENTRY
);
289 return (struct ocfs2_dentry_lock
*)lockres
->l_priv
;
292 static inline struct ocfs2_mem_dqinfo
*ocfs2_lock_res_qinfo(struct ocfs2_lock_res
*lockres
)
294 BUG_ON(lockres
->l_type
!= OCFS2_LOCK_TYPE_QINFO
);
296 return (struct ocfs2_mem_dqinfo
*)lockres
->l_priv
;
299 static inline struct ocfs2_super
*ocfs2_get_lockres_osb(struct ocfs2_lock_res
*lockres
)
301 if (lockres
->l_ops
->get_osb
)
302 return lockres
->l_ops
->get_osb(lockres
);
304 return (struct ocfs2_super
*)lockres
->l_priv
;
307 static int ocfs2_lock_create(struct ocfs2_super
*osb
,
308 struct ocfs2_lock_res
*lockres
,
311 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res
*lockres
,
313 static void ocfs2_cluster_unlock(struct ocfs2_super
*osb
,
314 struct ocfs2_lock_res
*lockres
,
316 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
*lockres
);
317 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res
*lockres
);
318 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res
*lockres
);
319 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res
*lockres
, int level
);
320 static void ocfs2_schedule_blocked_lock(struct ocfs2_super
*osb
,
321 struct ocfs2_lock_res
*lockres
);
322 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res
*lockres
,
324 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
325 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
326 _err, _func, _lockres->l_name); \
328 static int ocfs2_downconvert_thread(void *arg
);
329 static void ocfs2_downconvert_on_unlock(struct ocfs2_super
*osb
,
330 struct ocfs2_lock_res
*lockres
);
331 static int ocfs2_inode_lock_update(struct inode
*inode
,
332 struct buffer_head
**bh
);
333 static void ocfs2_drop_osb_locks(struct ocfs2_super
*osb
);
334 static inline int ocfs2_highest_compat_lock_level(int level
);
335 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res
*lockres
,
337 static int ocfs2_downconvert_lock(struct ocfs2_super
*osb
,
338 struct ocfs2_lock_res
*lockres
,
341 unsigned int generation
);
342 static int ocfs2_prepare_cancel_convert(struct ocfs2_super
*osb
,
343 struct ocfs2_lock_res
*lockres
);
344 static int ocfs2_cancel_convert(struct ocfs2_super
*osb
,
345 struct ocfs2_lock_res
*lockres
);
348 static void ocfs2_build_lock_name(enum ocfs2_lock_type type
,
357 BUG_ON(type
>= OCFS2_NUM_LOCK_TYPES
);
359 len
= snprintf(name
, OCFS2_LOCK_ID_MAX_LEN
, "%c%s%016llx%08x",
360 ocfs2_lock_type_char(type
), OCFS2_LOCK_ID_PAD
,
361 (long long)blkno
, generation
);
363 BUG_ON(len
!= (OCFS2_LOCK_ID_MAX_LEN
- 1));
365 mlog(0, "built lock resource with name: %s\n", name
);
370 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock
);
372 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res
*res
,
373 struct ocfs2_dlm_debug
*dlm_debug
)
375 mlog(0, "Add tracking for lockres %s\n", res
->l_name
);
377 spin_lock(&ocfs2_dlm_tracking_lock
);
378 list_add(&res
->l_debug_list
, &dlm_debug
->d_lockres_tracking
);
379 spin_unlock(&ocfs2_dlm_tracking_lock
);
382 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res
*res
)
384 spin_lock(&ocfs2_dlm_tracking_lock
);
385 if (!list_empty(&res
->l_debug_list
))
386 list_del_init(&res
->l_debug_list
);
387 spin_unlock(&ocfs2_dlm_tracking_lock
);
390 #ifdef CONFIG_OCFS2_FS_STATS
391 static void ocfs2_init_lock_stats(struct ocfs2_lock_res
*res
)
393 res
->l_lock_num_prmode
= 0;
394 res
->l_lock_num_prmode_failed
= 0;
395 res
->l_lock_total_prmode
= 0;
396 res
->l_lock_max_prmode
= 0;
397 res
->l_lock_num_exmode
= 0;
398 res
->l_lock_num_exmode_failed
= 0;
399 res
->l_lock_total_exmode
= 0;
400 res
->l_lock_max_exmode
= 0;
401 res
->l_lock_refresh
= 0;
404 static void ocfs2_update_lock_stats(struct ocfs2_lock_res
*res
, int level
,
405 struct ocfs2_mask_waiter
*mw
, int ret
)
407 unsigned long long *num
, *sum
;
408 unsigned int *max
, *failed
;
409 struct timespec ts
= current_kernel_time();
410 unsigned long long time
= timespec_to_ns(&ts
) - mw
->mw_lock_start
;
412 if (level
== LKM_PRMODE
) {
413 num
= &res
->l_lock_num_prmode
;
414 sum
= &res
->l_lock_total_prmode
;
415 max
= &res
->l_lock_max_prmode
;
416 failed
= &res
->l_lock_num_prmode_failed
;
417 } else if (level
== LKM_EXMODE
) {
418 num
= &res
->l_lock_num_exmode
;
419 sum
= &res
->l_lock_total_exmode
;
420 max
= &res
->l_lock_max_exmode
;
421 failed
= &res
->l_lock_num_exmode_failed
;
433 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res
*lockres
)
435 lockres
->l_lock_refresh
++;
438 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter
*mw
)
440 struct timespec ts
= current_kernel_time();
441 mw
->mw_lock_start
= timespec_to_ns(&ts
);
444 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res
*res
)
447 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res
*res
,
448 int level
, struct ocfs2_mask_waiter
*mw
, int ret
)
451 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res
*lockres
)
454 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter
*mw
)
459 static void ocfs2_lock_res_init_common(struct ocfs2_super
*osb
,
460 struct ocfs2_lock_res
*res
,
461 enum ocfs2_lock_type type
,
462 struct ocfs2_lock_res_ops
*ops
,
469 res
->l_level
= DLM_LOCK_IV
;
470 res
->l_requested
= DLM_LOCK_IV
;
471 res
->l_blocking
= DLM_LOCK_IV
;
472 res
->l_action
= OCFS2_AST_INVALID
;
473 res
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
475 res
->l_flags
= OCFS2_LOCK_INITIALIZED
;
477 ocfs2_add_lockres_tracking(res
, osb
->osb_dlm_debug
);
479 ocfs2_init_lock_stats(res
);
482 void ocfs2_lock_res_init_once(struct ocfs2_lock_res
*res
)
484 /* This also clears out the lock status block */
485 memset(res
, 0, sizeof(struct ocfs2_lock_res
));
486 spin_lock_init(&res
->l_lock
);
487 init_waitqueue_head(&res
->l_event
);
488 INIT_LIST_HEAD(&res
->l_blocked_list
);
489 INIT_LIST_HEAD(&res
->l_mask_waiters
);
492 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res
*res
,
493 enum ocfs2_lock_type type
,
494 unsigned int generation
,
497 struct ocfs2_lock_res_ops
*ops
;
500 case OCFS2_LOCK_TYPE_RW
:
501 ops
= &ocfs2_inode_rw_lops
;
503 case OCFS2_LOCK_TYPE_META
:
504 ops
= &ocfs2_inode_inode_lops
;
506 case OCFS2_LOCK_TYPE_OPEN
:
507 ops
= &ocfs2_inode_open_lops
;
510 mlog_bug_on_msg(1, "type: %d\n", type
);
511 ops
= NULL
; /* thanks, gcc */
515 ocfs2_build_lock_name(type
, OCFS2_I(inode
)->ip_blkno
,
516 generation
, res
->l_name
);
517 ocfs2_lock_res_init_common(OCFS2_SB(inode
->i_sb
), res
, type
, ops
, inode
);
520 static struct ocfs2_super
*ocfs2_get_inode_osb(struct ocfs2_lock_res
*lockres
)
522 struct inode
*inode
= ocfs2_lock_res_inode(lockres
);
524 return OCFS2_SB(inode
->i_sb
);
527 static struct ocfs2_super
*ocfs2_get_qinfo_osb(struct ocfs2_lock_res
*lockres
)
529 struct ocfs2_mem_dqinfo
*info
= lockres
->l_priv
;
531 return OCFS2_SB(info
->dqi_gi
.dqi_sb
);
534 static struct ocfs2_super
*ocfs2_get_file_osb(struct ocfs2_lock_res
*lockres
)
536 struct ocfs2_file_private
*fp
= lockres
->l_priv
;
538 return OCFS2_SB(fp
->fp_file
->f_mapping
->host
->i_sb
);
541 static __u64
ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res
*lockres
)
543 __be64 inode_blkno_be
;
545 memcpy(&inode_blkno_be
, &lockres
->l_name
[OCFS2_DENTRY_LOCK_INO_START
],
548 return be64_to_cpu(inode_blkno_be
);
551 static struct ocfs2_super
*ocfs2_get_dentry_osb(struct ocfs2_lock_res
*lockres
)
553 struct ocfs2_dentry_lock
*dl
= lockres
->l_priv
;
555 return OCFS2_SB(dl
->dl_inode
->i_sb
);
558 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock
*dl
,
559 u64 parent
, struct inode
*inode
)
562 u64 inode_blkno
= OCFS2_I(inode
)->ip_blkno
;
563 __be64 inode_blkno_be
= cpu_to_be64(inode_blkno
);
564 struct ocfs2_lock_res
*lockres
= &dl
->dl_lockres
;
566 ocfs2_lock_res_init_once(lockres
);
569 * Unfortunately, the standard lock naming scheme won't work
570 * here because we have two 16 byte values to use. Instead,
571 * we'll stuff the inode number as a binary value. We still
572 * want error prints to show something without garbling the
573 * display, so drop a null byte in there before the inode
574 * number. A future version of OCFS2 will likely use all
575 * binary lock names. The stringified names have been a
576 * tremendous aid in debugging, but now that the debugfs
577 * interface exists, we can mangle things there if need be.
579 * NOTE: We also drop the standard "pad" value (the total lock
580 * name size stays the same though - the last part is all
581 * zeros due to the memset in ocfs2_lock_res_init_once()
583 len
= snprintf(lockres
->l_name
, OCFS2_DENTRY_LOCK_INO_START
,
585 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY
),
588 BUG_ON(len
!= (OCFS2_DENTRY_LOCK_INO_START
- 1));
590 memcpy(&lockres
->l_name
[OCFS2_DENTRY_LOCK_INO_START
], &inode_blkno_be
,
593 ocfs2_lock_res_init_common(OCFS2_SB(inode
->i_sb
), lockres
,
594 OCFS2_LOCK_TYPE_DENTRY
, &ocfs2_dentry_lops
,
598 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res
*res
,
599 struct ocfs2_super
*osb
)
601 /* Superblock lockres doesn't come from a slab so we call init
602 * once on it manually. */
603 ocfs2_lock_res_init_once(res
);
604 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER
, OCFS2_SUPER_BLOCK_BLKNO
,
606 ocfs2_lock_res_init_common(osb
, res
, OCFS2_LOCK_TYPE_SUPER
,
607 &ocfs2_super_lops
, osb
);
610 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res
*res
,
611 struct ocfs2_super
*osb
)
613 /* Rename lockres doesn't come from a slab so we call init
614 * once on it manually. */
615 ocfs2_lock_res_init_once(res
);
616 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME
, 0, 0, res
->l_name
);
617 ocfs2_lock_res_init_common(osb
, res
, OCFS2_LOCK_TYPE_RENAME
,
618 &ocfs2_rename_lops
, osb
);
621 void ocfs2_file_lock_res_init(struct ocfs2_lock_res
*lockres
,
622 struct ocfs2_file_private
*fp
)
624 struct inode
*inode
= fp
->fp_file
->f_mapping
->host
;
625 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
627 ocfs2_lock_res_init_once(lockres
);
628 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK
, oi
->ip_blkno
,
629 inode
->i_generation
, lockres
->l_name
);
630 ocfs2_lock_res_init_common(OCFS2_SB(inode
->i_sb
), lockres
,
631 OCFS2_LOCK_TYPE_FLOCK
, &ocfs2_flock_lops
,
633 lockres
->l_flags
|= OCFS2_LOCK_NOCACHE
;
636 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res
*lockres
,
637 struct ocfs2_mem_dqinfo
*info
)
639 ocfs2_lock_res_init_once(lockres
);
640 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO
, info
->dqi_gi
.dqi_type
,
642 ocfs2_lock_res_init_common(OCFS2_SB(info
->dqi_gi
.dqi_sb
), lockres
,
643 OCFS2_LOCK_TYPE_QINFO
, &ocfs2_qinfo_lops
,
647 void ocfs2_lock_res_free(struct ocfs2_lock_res
*res
)
651 if (!(res
->l_flags
& OCFS2_LOCK_INITIALIZED
))
654 ocfs2_remove_lockres_tracking(res
);
656 mlog_bug_on_msg(!list_empty(&res
->l_blocked_list
),
657 "Lockres %s is on the blocked list\n",
659 mlog_bug_on_msg(!list_empty(&res
->l_mask_waiters
),
660 "Lockres %s has mask waiters pending\n",
662 mlog_bug_on_msg(spin_is_locked(&res
->l_lock
),
663 "Lockres %s is locked\n",
665 mlog_bug_on_msg(res
->l_ro_holders
,
666 "Lockres %s has %u ro holders\n",
667 res
->l_name
, res
->l_ro_holders
);
668 mlog_bug_on_msg(res
->l_ex_holders
,
669 "Lockres %s has %u ex holders\n",
670 res
->l_name
, res
->l_ex_holders
);
672 /* Need to clear out the lock status block for the dlm */
673 memset(&res
->l_lksb
, 0, sizeof(res
->l_lksb
));
679 static inline void ocfs2_inc_holders(struct ocfs2_lock_res
*lockres
,
688 lockres
->l_ex_holders
++;
691 lockres
->l_ro_holders
++;
700 static inline void ocfs2_dec_holders(struct ocfs2_lock_res
*lockres
,
709 BUG_ON(!lockres
->l_ex_holders
);
710 lockres
->l_ex_holders
--;
713 BUG_ON(!lockres
->l_ro_holders
);
714 lockres
->l_ro_holders
--;
722 /* WARNING: This function lives in a world where the only three lock
723 * levels are EX, PR, and NL. It *will* have to be adjusted when more
724 * lock types are added. */
725 static inline int ocfs2_highest_compat_lock_level(int level
)
727 int new_level
= DLM_LOCK_EX
;
729 if (level
== DLM_LOCK_EX
)
730 new_level
= DLM_LOCK_NL
;
731 else if (level
== DLM_LOCK_PR
)
732 new_level
= DLM_LOCK_PR
;
736 static void lockres_set_flags(struct ocfs2_lock_res
*lockres
,
737 unsigned long newflags
)
739 struct ocfs2_mask_waiter
*mw
, *tmp
;
741 assert_spin_locked(&lockres
->l_lock
);
743 lockres
->l_flags
= newflags
;
745 list_for_each_entry_safe(mw
, tmp
, &lockres
->l_mask_waiters
, mw_item
) {
746 if ((lockres
->l_flags
& mw
->mw_mask
) != mw
->mw_goal
)
749 list_del_init(&mw
->mw_item
);
751 complete(&mw
->mw_complete
);
754 static void lockres_or_flags(struct ocfs2_lock_res
*lockres
, unsigned long or)
756 lockres_set_flags(lockres
, lockres
->l_flags
| or);
758 static void lockres_clear_flags(struct ocfs2_lock_res
*lockres
,
761 lockres_set_flags(lockres
, lockres
->l_flags
& ~clear
);
764 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res
*lockres
)
768 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
));
769 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
));
770 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BLOCKED
));
771 BUG_ON(lockres
->l_blocking
<= DLM_LOCK_NL
);
773 lockres
->l_level
= lockres
->l_requested
;
774 if (lockres
->l_level
<=
775 ocfs2_highest_compat_lock_level(lockres
->l_blocking
)) {
776 lockres
->l_blocking
= DLM_LOCK_NL
;
777 lockres_clear_flags(lockres
, OCFS2_LOCK_BLOCKED
);
779 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
784 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res
*lockres
)
788 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
));
789 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
));
791 /* Convert from RO to EX doesn't really need anything as our
792 * information is already up to data. Convert from NL to
793 * *anything* however should mark ourselves as needing an
795 if (lockres
->l_level
== DLM_LOCK_NL
&&
796 lockres
->l_ops
->flags
& LOCK_TYPE_REQUIRES_REFRESH
)
797 lockres_or_flags(lockres
, OCFS2_LOCK_NEEDS_REFRESH
);
799 lockres
->l_level
= lockres
->l_requested
;
800 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
805 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res
*lockres
)
809 BUG_ON((!(lockres
->l_flags
& OCFS2_LOCK_BUSY
)));
810 BUG_ON(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
);
812 if (lockres
->l_requested
> DLM_LOCK_NL
&&
813 !(lockres
->l_flags
& OCFS2_LOCK_LOCAL
) &&
814 lockres
->l_ops
->flags
& LOCK_TYPE_REQUIRES_REFRESH
)
815 lockres_or_flags(lockres
, OCFS2_LOCK_NEEDS_REFRESH
);
817 lockres
->l_level
= lockres
->l_requested
;
818 lockres_or_flags(lockres
, OCFS2_LOCK_ATTACHED
);
819 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
824 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res
*lockres
,
827 int needs_downconvert
= 0;
830 assert_spin_locked(&lockres
->l_lock
);
832 lockres_or_flags(lockres
, OCFS2_LOCK_BLOCKED
);
834 if (level
> lockres
->l_blocking
) {
835 /* only schedule a downconvert if we haven't already scheduled
836 * one that goes low enough to satisfy the level we're
837 * blocking. this also catches the case where we get
839 if (ocfs2_highest_compat_lock_level(level
) <
840 ocfs2_highest_compat_lock_level(lockres
->l_blocking
))
841 needs_downconvert
= 1;
843 lockres
->l_blocking
= level
;
846 mlog_exit(needs_downconvert
);
847 return needs_downconvert
;
851 * OCFS2_LOCK_PENDING and l_pending_gen.
853 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
854 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
855 * for more details on the race.
857 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
858 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
859 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
860 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
861 * the caller is going to try to clear PENDING again. If nothing else is
862 * happening, __lockres_clear_pending() sees PENDING is unset and does
865 * But what if another path (eg downconvert thread) has just started a
866 * new locking action? The other path has re-set PENDING. Our path
867 * cannot clear PENDING, because that will re-open the original race
873 * ocfs2_cluster_lock()
878 * ocfs2_locking_ast() ocfs2_downconvert_thread()
879 * clear PENDING ocfs2_unblock_lock()
882 * ocfs2_prepare_downconvert()
892 * So as you can see, we now have a window where l_lock is not held,
893 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
895 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
896 * set by ocfs2_prepare_downconvert(). That wasn't nice.
898 * To solve this we introduce l_pending_gen. A call to
899 * lockres_clear_pending() will only do so when it is passed a generation
900 * number that matches the lockres. lockres_set_pending() will return the
901 * current generation number. When ocfs2_cluster_lock() goes to clear
902 * PENDING, it passes the generation it got from set_pending(). In our
903 * example above, the generation numbers will *not* match. Thus,
904 * ocfs2_cluster_lock() will not clear the PENDING set by
905 * ocfs2_prepare_downconvert().
908 /* Unlocked version for ocfs2_locking_ast() */
909 static void __lockres_clear_pending(struct ocfs2_lock_res
*lockres
,
910 unsigned int generation
,
911 struct ocfs2_super
*osb
)
913 assert_spin_locked(&lockres
->l_lock
);
916 * The ast and locking functions can race us here. The winner
917 * will clear pending, the loser will not.
919 if (!(lockres
->l_flags
& OCFS2_LOCK_PENDING
) ||
920 (lockres
->l_pending_gen
!= generation
))
923 lockres_clear_flags(lockres
, OCFS2_LOCK_PENDING
);
924 lockres
->l_pending_gen
++;
927 * The downconvert thread may have skipped us because we
928 * were PENDING. Wake it up.
930 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
)
931 ocfs2_wake_downconvert_thread(osb
);
934 /* Locked version for callers of ocfs2_dlm_lock() */
935 static void lockres_clear_pending(struct ocfs2_lock_res
*lockres
,
936 unsigned int generation
,
937 struct ocfs2_super
*osb
)
941 spin_lock_irqsave(&lockres
->l_lock
, flags
);
942 __lockres_clear_pending(lockres
, generation
, osb
);
943 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
946 static unsigned int lockres_set_pending(struct ocfs2_lock_res
*lockres
)
948 assert_spin_locked(&lockres
->l_lock
);
949 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
));
951 lockres_or_flags(lockres
, OCFS2_LOCK_PENDING
);
953 return lockres
->l_pending_gen
;
957 static void ocfs2_blocking_ast(void *opaque
, int level
)
959 struct ocfs2_lock_res
*lockres
= opaque
;
960 struct ocfs2_super
*osb
= ocfs2_get_lockres_osb(lockres
);
961 int needs_downconvert
;
964 BUG_ON(level
<= DLM_LOCK_NL
);
966 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
967 lockres
->l_name
, level
, lockres
->l_level
,
968 ocfs2_lock_type_string(lockres
->l_type
));
971 * We can skip the bast for locks which don't enable caching -
972 * they'll be dropped at the earliest possible time anyway.
974 if (lockres
->l_flags
& OCFS2_LOCK_NOCACHE
)
977 spin_lock_irqsave(&lockres
->l_lock
, flags
);
978 needs_downconvert
= ocfs2_generic_handle_bast(lockres
, level
);
979 if (needs_downconvert
)
980 ocfs2_schedule_blocked_lock(osb
, lockres
);
981 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
983 wake_up(&lockres
->l_event
);
985 ocfs2_wake_downconvert_thread(osb
);
988 static void ocfs2_locking_ast(void *opaque
)
990 struct ocfs2_lock_res
*lockres
= opaque
;
991 struct ocfs2_super
*osb
= ocfs2_get_lockres_osb(lockres
);
995 spin_lock_irqsave(&lockres
->l_lock
, flags
);
997 status
= ocfs2_dlm_lock_status(&lockres
->l_lksb
);
999 if (status
== -EAGAIN
) {
1000 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
1005 mlog(ML_ERROR
, "lockres %s: lksb status value of %d!\n",
1006 lockres
->l_name
, status
);
1007 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1011 switch(lockres
->l_action
) {
1012 case OCFS2_AST_ATTACH
:
1013 ocfs2_generic_handle_attach_action(lockres
);
1014 lockres_clear_flags(lockres
, OCFS2_LOCK_LOCAL
);
1016 case OCFS2_AST_CONVERT
:
1017 ocfs2_generic_handle_convert_action(lockres
);
1019 case OCFS2_AST_DOWNCONVERT
:
1020 ocfs2_generic_handle_downconvert_action(lockres
);
1023 mlog(ML_ERROR
, "lockres %s: ast fired with invalid action: %u "
1024 "lockres flags = 0x%lx, unlock action: %u\n",
1025 lockres
->l_name
, lockres
->l_action
, lockres
->l_flags
,
1026 lockres
->l_unlock_action
);
1030 /* set it to something invalid so if we get called again we
1032 lockres
->l_action
= OCFS2_AST_INVALID
;
1034 /* Did we try to cancel this lock? Clear that state */
1035 if (lockres
->l_unlock_action
== OCFS2_UNLOCK_CANCEL_CONVERT
)
1036 lockres
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
1039 * We may have beaten the locking functions here. We certainly
1040 * know that dlm_lock() has been called :-)
1041 * Because we can't have two lock calls in flight at once, we
1042 * can use lockres->l_pending_gen.
1044 __lockres_clear_pending(lockres
, lockres
->l_pending_gen
, osb
);
1046 wake_up(&lockres
->l_event
);
1047 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1050 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res
*lockres
,
1053 unsigned long flags
;
1056 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1057 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
1059 lockres
->l_action
= OCFS2_AST_INVALID
;
1061 lockres
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
1062 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1064 wake_up(&lockres
->l_event
);
1068 /* Note: If we detect another process working on the lock (i.e.,
1069 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1070 * to do the right thing in that case.
1072 static int ocfs2_lock_create(struct ocfs2_super
*osb
,
1073 struct ocfs2_lock_res
*lockres
,
1078 unsigned long flags
;
1083 mlog(0, "lock %s, level = %d, flags = %u\n", lockres
->l_name
, level
,
1086 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1087 if ((lockres
->l_flags
& OCFS2_LOCK_ATTACHED
) ||
1088 (lockres
->l_flags
& OCFS2_LOCK_BUSY
)) {
1089 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1093 lockres
->l_action
= OCFS2_AST_ATTACH
;
1094 lockres
->l_requested
= level
;
1095 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
1096 gen
= lockres_set_pending(lockres
);
1097 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1099 ret
= ocfs2_dlm_lock(osb
->cconn
,
1104 OCFS2_LOCK_ID_MAX_LEN
- 1,
1106 lockres_clear_pending(lockres
, gen
, osb
);
1108 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret
, lockres
);
1109 ocfs2_recover_from_dlm_error(lockres
, 1);
1112 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres
->l_name
);
1119 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res
*lockres
,
1122 unsigned long flags
;
1125 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1126 ret
= lockres
->l_flags
& flag
;
1127 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1132 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res
*lockres
)
1135 wait_event(lockres
->l_event
,
1136 !ocfs2_check_wait_flag(lockres
, OCFS2_LOCK_BUSY
));
1139 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res
*lockres
)
1142 wait_event(lockres
->l_event
,
1143 !ocfs2_check_wait_flag(lockres
, OCFS2_LOCK_REFRESHING
));
1146 /* predict what lock level we'll be dropping down to on behalf
1147 * of another node, and return true if the currently wanted
1148 * level will be compatible with it. */
1149 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res
*lockres
,
1152 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BLOCKED
));
1154 return wanted
<= ocfs2_highest_compat_lock_level(lockres
->l_blocking
);
1157 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter
*mw
)
1159 INIT_LIST_HEAD(&mw
->mw_item
);
1160 init_completion(&mw
->mw_complete
);
1161 ocfs2_init_start_time(mw
);
1164 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter
*mw
)
1166 wait_for_completion(&mw
->mw_complete
);
1167 /* Re-arm the completion in case we want to wait on it again */
1168 INIT_COMPLETION(mw
->mw_complete
);
1169 return mw
->mw_status
;
1172 static void lockres_add_mask_waiter(struct ocfs2_lock_res
*lockres
,
1173 struct ocfs2_mask_waiter
*mw
,
1177 BUG_ON(!list_empty(&mw
->mw_item
));
1179 assert_spin_locked(&lockres
->l_lock
);
1181 list_add_tail(&mw
->mw_item
, &lockres
->l_mask_waiters
);
1186 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1187 * if the mask still hadn't reached its goal */
1188 static int lockres_remove_mask_waiter(struct ocfs2_lock_res
*lockres
,
1189 struct ocfs2_mask_waiter
*mw
)
1191 unsigned long flags
;
1194 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1195 if (!list_empty(&mw
->mw_item
)) {
1196 if ((lockres
->l_flags
& mw
->mw_mask
) != mw
->mw_goal
)
1199 list_del_init(&mw
->mw_item
);
1200 init_completion(&mw
->mw_complete
);
1202 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1208 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter
*mw
,
1209 struct ocfs2_lock_res
*lockres
)
1213 ret
= wait_for_completion_interruptible(&mw
->mw_complete
);
1215 lockres_remove_mask_waiter(lockres
, mw
);
1217 ret
= mw
->mw_status
;
1218 /* Re-arm the completion in case we want to wait on it again */
1219 INIT_COMPLETION(mw
->mw_complete
);
1223 static int ocfs2_cluster_lock(struct ocfs2_super
*osb
,
1224 struct ocfs2_lock_res
*lockres
,
1229 struct ocfs2_mask_waiter mw
;
1230 int wait
, catch_signals
= !(osb
->s_mount_opt
& OCFS2_MOUNT_NOINTR
);
1231 int ret
= 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1232 unsigned long flags
;
1234 int noqueue_attempted
= 0;
1238 ocfs2_init_mask_waiter(&mw
);
1240 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
)
1241 lkm_flags
|= DLM_LKF_VALBLK
;
1246 if (catch_signals
&& signal_pending(current
)) {
1251 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1253 mlog_bug_on_msg(lockres
->l_flags
& OCFS2_LOCK_FREEING
,
1254 "Cluster lock called on freeing lockres %s! flags "
1255 "0x%lx\n", lockres
->l_name
, lockres
->l_flags
);
1257 /* We only compare against the currently granted level
1258 * here. If the lock is blocked waiting on a downconvert,
1259 * we'll get caught below. */
1260 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
&&
1261 level
> lockres
->l_level
) {
1262 /* is someone sitting in dlm_lock? If so, wait on
1264 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1269 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
&&
1270 !ocfs2_may_continue_on_blocked_lock(lockres
, level
)) {
1271 /* is the lock is currently blocked on behalf of
1273 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BLOCKED
, 0);
1278 if (level
> lockres
->l_level
) {
1279 if (noqueue_attempted
> 0) {
1283 if (lkm_flags
& DLM_LKF_NOQUEUE
)
1284 noqueue_attempted
= 1;
1286 if (lockres
->l_action
!= OCFS2_AST_INVALID
)
1287 mlog(ML_ERROR
, "lockres %s has action %u pending\n",
1288 lockres
->l_name
, lockres
->l_action
);
1290 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
)) {
1291 lockres
->l_action
= OCFS2_AST_ATTACH
;
1292 lkm_flags
&= ~DLM_LKF_CONVERT
;
1294 lockres
->l_action
= OCFS2_AST_CONVERT
;
1295 lkm_flags
|= DLM_LKF_CONVERT
;
1298 lockres
->l_requested
= level
;
1299 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
1300 gen
= lockres_set_pending(lockres
);
1301 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1303 BUG_ON(level
== DLM_LOCK_IV
);
1304 BUG_ON(level
== DLM_LOCK_NL
);
1306 mlog(0, "lock %s, convert from %d to level = %d\n",
1307 lockres
->l_name
, lockres
->l_level
, level
);
1309 /* call dlm_lock to upgrade lock now */
1310 ret
= ocfs2_dlm_lock(osb
->cconn
,
1315 OCFS2_LOCK_ID_MAX_LEN
- 1,
1317 lockres_clear_pending(lockres
, gen
, osb
);
1319 if (!(lkm_flags
& DLM_LKF_NOQUEUE
) ||
1321 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1324 ocfs2_recover_from_dlm_error(lockres
, 1);
1328 mlog(0, "lock %s, successfull return from ocfs2_dlm_lock\n",
1331 /* At this point we've gone inside the dlm and need to
1332 * complete our work regardless. */
1335 /* wait for busy to clear and carry on */
1339 /* Ok, if we get here then we're good to go. */
1340 ocfs2_inc_holders(lockres
, level
);
1344 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1347 * This is helping work around a lock inversion between the page lock
1348 * and dlm locks. One path holds the page lock while calling aops
1349 * which block acquiring dlm locks. The voting thread holds dlm
1350 * locks while acquiring page locks while down converting data locks.
1351 * This block is helping an aop path notice the inversion and back
1352 * off to unlock its page lock before trying the dlm lock again.
1354 if (wait
&& arg_flags
& OCFS2_LOCK_NONBLOCK
&&
1355 mw
.mw_mask
& (OCFS2_LOCK_BUSY
|OCFS2_LOCK_BLOCKED
)) {
1357 if (lockres_remove_mask_waiter(lockres
, &mw
))
1363 ret
= ocfs2_wait_for_mask(&mw
);
1368 ocfs2_update_lock_stats(lockres
, level
, &mw
, ret
);
1374 static void ocfs2_cluster_unlock(struct ocfs2_super
*osb
,
1375 struct ocfs2_lock_res
*lockres
,
1378 unsigned long flags
;
1381 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1382 ocfs2_dec_holders(lockres
, level
);
1383 ocfs2_downconvert_on_unlock(osb
, lockres
);
1384 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1388 static int ocfs2_create_new_lock(struct ocfs2_super
*osb
,
1389 struct ocfs2_lock_res
*lockres
,
1393 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1394 unsigned long flags
;
1395 u32 lkm_flags
= local
? DLM_LKF_LOCAL
: 0;
1397 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1398 BUG_ON(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
);
1399 lockres_or_flags(lockres
, OCFS2_LOCK_LOCAL
);
1400 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1402 return ocfs2_lock_create(osb
, lockres
, level
, lkm_flags
);
1405 /* Grants us an EX lock on the data and metadata resources, skipping
1406 * the normal cluster directory lookup. Use this ONLY on newly created
1407 * inodes which other nodes can't possibly see, and which haven't been
1408 * hashed in the inode hash yet. This can give us a good performance
1409 * increase as it'll skip the network broadcast normally associated
1410 * with creating a new lock resource. */
1411 int ocfs2_create_new_inode_locks(struct inode
*inode
)
1414 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1417 BUG_ON(!ocfs2_inode_is_new(inode
));
1421 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
1423 /* NOTE: That we don't increment any of the holder counts, nor
1424 * do we add anything to a journal handle. Since this is
1425 * supposed to be a new inode which the cluster doesn't know
1426 * about yet, there is no need to. As far as the LVB handling
1427 * is concerned, this is basically like acquiring an EX lock
1428 * on a resource which has an invalid one -- we'll set it
1429 * valid when we release the EX. */
1431 ret
= ocfs2_create_new_lock(osb
, &OCFS2_I(inode
)->ip_rw_lockres
, 1, 1);
1438 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1439 * don't use a generation in their lock names.
1441 ret
= ocfs2_create_new_lock(osb
, &OCFS2_I(inode
)->ip_inode_lockres
, 1, 0);
1447 ret
= ocfs2_create_new_lock(osb
, &OCFS2_I(inode
)->ip_open_lockres
, 0, 0);
1458 int ocfs2_rw_lock(struct inode
*inode
, int write
)
1461 struct ocfs2_lock_res
*lockres
;
1462 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1468 mlog(0, "inode %llu take %s RW lock\n",
1469 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1470 write
? "EXMODE" : "PRMODE");
1472 if (ocfs2_mount_local(osb
))
1475 lockres
= &OCFS2_I(inode
)->ip_rw_lockres
;
1477 level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1479 status
= ocfs2_cluster_lock(OCFS2_SB(inode
->i_sb
), lockres
, level
, 0,
1488 void ocfs2_rw_unlock(struct inode
*inode
, int write
)
1490 int level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1491 struct ocfs2_lock_res
*lockres
= &OCFS2_I(inode
)->ip_rw_lockres
;
1492 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1496 mlog(0, "inode %llu drop %s RW lock\n",
1497 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1498 write
? "EXMODE" : "PRMODE");
1500 if (!ocfs2_mount_local(osb
))
1501 ocfs2_cluster_unlock(OCFS2_SB(inode
->i_sb
), lockres
, level
);
1507 * ocfs2_open_lock always get PR mode lock.
1509 int ocfs2_open_lock(struct inode
*inode
)
1512 struct ocfs2_lock_res
*lockres
;
1513 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1519 mlog(0, "inode %llu take PRMODE open lock\n",
1520 (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
1522 if (ocfs2_mount_local(osb
))
1525 lockres
= &OCFS2_I(inode
)->ip_open_lockres
;
1527 status
= ocfs2_cluster_lock(OCFS2_SB(inode
->i_sb
), lockres
,
1537 int ocfs2_try_open_lock(struct inode
*inode
, int write
)
1539 int status
= 0, level
;
1540 struct ocfs2_lock_res
*lockres
;
1541 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1547 mlog(0, "inode %llu try to take %s open lock\n",
1548 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
1549 write
? "EXMODE" : "PRMODE");
1551 if (ocfs2_mount_local(osb
))
1554 lockres
= &OCFS2_I(inode
)->ip_open_lockres
;
1556 level
= write
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1559 * The file system may already holding a PRMODE/EXMODE open lock.
1560 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1561 * other nodes and the -EAGAIN will indicate to the caller that
1562 * this inode is still in use.
1564 status
= ocfs2_cluster_lock(OCFS2_SB(inode
->i_sb
), lockres
,
1565 level
, DLM_LKF_NOQUEUE
, 0);
1573 * ocfs2_open_unlock unlock PR and EX mode open locks.
1575 void ocfs2_open_unlock(struct inode
*inode
)
1577 struct ocfs2_lock_res
*lockres
= &OCFS2_I(inode
)->ip_open_lockres
;
1578 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
1582 mlog(0, "inode %llu drop open lock\n",
1583 (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
1585 if (ocfs2_mount_local(osb
))
1588 if(lockres
->l_ro_holders
)
1589 ocfs2_cluster_unlock(OCFS2_SB(inode
->i_sb
), lockres
,
1591 if(lockres
->l_ex_holders
)
1592 ocfs2_cluster_unlock(OCFS2_SB(inode
->i_sb
), lockres
,
1599 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res
*lockres
,
1603 struct ocfs2_super
*osb
= ocfs2_get_lockres_osb(lockres
);
1604 unsigned long flags
;
1605 struct ocfs2_mask_waiter mw
;
1607 ocfs2_init_mask_waiter(&mw
);
1610 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1611 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
) {
1612 ret
= ocfs2_prepare_cancel_convert(osb
, lockres
);
1614 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1615 ret
= ocfs2_cancel_convert(osb
, lockres
);
1622 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1623 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1625 ocfs2_wait_for_mask(&mw
);
1631 * We may still have gotten the lock, in which case there's no
1632 * point to restarting the syscall.
1634 if (lockres
->l_level
== level
)
1637 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret
,
1638 lockres
->l_flags
, lockres
->l_level
, lockres
->l_action
);
1640 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1647 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1648 * flock() calls. The locking approach this requires is sufficiently
1649 * different from all other cluster lock types that we implement a
1650 * seperate path to the "low-level" dlm calls. In particular:
1652 * - No optimization of lock levels is done - we take at exactly
1653 * what's been requested.
1655 * - No lock caching is employed. We immediately downconvert to
1656 * no-lock at unlock time. This also means flock locks never go on
1657 * the blocking list).
1659 * - Since userspace can trivially deadlock itself with flock, we make
1660 * sure to allow cancellation of a misbehaving applications flock()
1663 * - Access to any flock lockres doesn't require concurrency, so we
1664 * can simplify the code by requiring the caller to guarantee
1665 * serialization of dlmglue flock calls.
1667 int ocfs2_file_lock(struct file
*file
, int ex
, int trylock
)
1669 int ret
, level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
1670 unsigned int lkm_flags
= trylock
? DLM_LKF_NOQUEUE
: 0;
1671 unsigned long flags
;
1672 struct ocfs2_file_private
*fp
= file
->private_data
;
1673 struct ocfs2_lock_res
*lockres
= &fp
->fp_flock
;
1674 struct ocfs2_super
*osb
= OCFS2_SB(file
->f_mapping
->host
->i_sb
);
1675 struct ocfs2_mask_waiter mw
;
1677 ocfs2_init_mask_waiter(&mw
);
1679 if ((lockres
->l_flags
& OCFS2_LOCK_BUSY
) ||
1680 (lockres
->l_level
> DLM_LOCK_NL
)) {
1682 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1683 "level: %u\n", lockres
->l_name
, lockres
->l_flags
,
1688 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1689 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
)) {
1690 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1691 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1694 * Get the lock at NLMODE to start - that way we
1695 * can cancel the upconvert request if need be.
1697 ret
= ocfs2_lock_create(osb
, lockres
, DLM_LOCK_NL
, 0);
1703 ret
= ocfs2_wait_for_mask(&mw
);
1708 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1711 lockres
->l_action
= OCFS2_AST_CONVERT
;
1712 lkm_flags
|= DLM_LKF_CONVERT
;
1713 lockres
->l_requested
= level
;
1714 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
1716 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1717 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1719 ret
= ocfs2_dlm_lock(osb
->cconn
, level
, &lockres
->l_lksb
, lkm_flags
,
1720 lockres
->l_name
, OCFS2_LOCK_ID_MAX_LEN
- 1,
1723 if (!trylock
|| (ret
!= -EAGAIN
)) {
1724 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret
, lockres
);
1728 ocfs2_recover_from_dlm_error(lockres
, 1);
1729 lockres_remove_mask_waiter(lockres
, &mw
);
1733 ret
= ocfs2_wait_for_mask_interruptible(&mw
, lockres
);
1734 if (ret
== -ERESTARTSYS
) {
1736 * Userspace can cause deadlock itself with
1737 * flock(). Current behavior locally is to allow the
1738 * deadlock, but abort the system call if a signal is
1739 * received. We follow this example, otherwise a
1740 * poorly written program could sit in kernel until
1743 * Handling this is a bit more complicated for Ocfs2
1744 * though. We can't exit this function with an
1745 * outstanding lock request, so a cancel convert is
1746 * required. We intentionally overwrite 'ret' - if the
1747 * cancel fails and the lock was granted, it's easier
1748 * to just bubble sucess back up to the user.
1750 ret
= ocfs2_flock_handle_signal(lockres
, level
);
1751 } else if (!ret
&& (level
> lockres
->l_level
)) {
1752 /* Trylock failed asynchronously */
1759 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1760 lockres
->l_name
, ex
, trylock
, ret
);
1764 void ocfs2_file_unlock(struct file
*file
)
1768 unsigned long flags
;
1769 struct ocfs2_file_private
*fp
= file
->private_data
;
1770 struct ocfs2_lock_res
*lockres
= &fp
->fp_flock
;
1771 struct ocfs2_super
*osb
= OCFS2_SB(file
->f_mapping
->host
->i_sb
);
1772 struct ocfs2_mask_waiter mw
;
1774 ocfs2_init_mask_waiter(&mw
);
1776 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
))
1779 if (lockres
->l_level
== DLM_LOCK_NL
)
1782 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1783 lockres
->l_name
, lockres
->l_flags
, lockres
->l_level
,
1786 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1788 * Fake a blocking ast for the downconvert code.
1790 lockres_or_flags(lockres
, OCFS2_LOCK_BLOCKED
);
1791 lockres
->l_blocking
= DLM_LOCK_EX
;
1793 gen
= ocfs2_prepare_downconvert(lockres
, DLM_LOCK_NL
);
1794 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_BUSY
, 0);
1795 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1797 ret
= ocfs2_downconvert_lock(osb
, lockres
, DLM_LOCK_NL
, 0, gen
);
1803 ret
= ocfs2_wait_for_mask(&mw
);
1808 static void ocfs2_downconvert_on_unlock(struct ocfs2_super
*osb
,
1809 struct ocfs2_lock_res
*lockres
)
1815 /* If we know that another node is waiting on our lock, kick
1816 * the downconvert thread * pre-emptively when we reach a release
1818 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
) {
1819 switch(lockres
->l_blocking
) {
1821 if (!lockres
->l_ex_holders
&& !lockres
->l_ro_holders
)
1825 if (!lockres
->l_ex_holders
)
1834 ocfs2_wake_downconvert_thread(osb
);
1839 #define OCFS2_SEC_BITS 34
1840 #define OCFS2_SEC_SHIFT (64 - 34)
1841 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1843 /* LVB only has room for 64 bits of time here so we pack it for
1845 static u64
ocfs2_pack_timespec(struct timespec
*spec
)
1848 u64 sec
= spec
->tv_sec
;
1849 u32 nsec
= spec
->tv_nsec
;
1851 res
= (sec
<< OCFS2_SEC_SHIFT
) | (nsec
& OCFS2_NSEC_MASK
);
1856 /* Call this with the lockres locked. I am reasonably sure we don't
1857 * need ip_lock in this function as anyone who would be changing those
1858 * values is supposed to be blocked in ocfs2_inode_lock right now. */
1859 static void __ocfs2_stuff_meta_lvb(struct inode
*inode
)
1861 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1862 struct ocfs2_lock_res
*lockres
= &oi
->ip_inode_lockres
;
1863 struct ocfs2_meta_lvb
*lvb
;
1867 lvb
= (struct ocfs2_meta_lvb
*)ocfs2_dlm_lvb(&lockres
->l_lksb
);
1870 * Invalidate the LVB of a deleted inode - this way other
1871 * nodes are forced to go to disk and discover the new inode
1874 if (oi
->ip_flags
& OCFS2_INODE_DELETED
) {
1875 lvb
->lvb_version
= 0;
1879 lvb
->lvb_version
= OCFS2_LVB_VERSION
;
1880 lvb
->lvb_isize
= cpu_to_be64(i_size_read(inode
));
1881 lvb
->lvb_iclusters
= cpu_to_be32(oi
->ip_clusters
);
1882 lvb
->lvb_iuid
= cpu_to_be32(inode
->i_uid
);
1883 lvb
->lvb_igid
= cpu_to_be32(inode
->i_gid
);
1884 lvb
->lvb_imode
= cpu_to_be16(inode
->i_mode
);
1885 lvb
->lvb_inlink
= cpu_to_be16(inode
->i_nlink
);
1886 lvb
->lvb_iatime_packed
=
1887 cpu_to_be64(ocfs2_pack_timespec(&inode
->i_atime
));
1888 lvb
->lvb_ictime_packed
=
1889 cpu_to_be64(ocfs2_pack_timespec(&inode
->i_ctime
));
1890 lvb
->lvb_imtime_packed
=
1891 cpu_to_be64(ocfs2_pack_timespec(&inode
->i_mtime
));
1892 lvb
->lvb_iattr
= cpu_to_be32(oi
->ip_attr
);
1893 lvb
->lvb_idynfeatures
= cpu_to_be16(oi
->ip_dyn_features
);
1894 lvb
->lvb_igeneration
= cpu_to_be32(inode
->i_generation
);
1897 mlog_meta_lvb(0, lockres
);
1902 static void ocfs2_unpack_timespec(struct timespec
*spec
,
1905 spec
->tv_sec
= packed_time
>> OCFS2_SEC_SHIFT
;
1906 spec
->tv_nsec
= packed_time
& OCFS2_NSEC_MASK
;
1909 static void ocfs2_refresh_inode_from_lvb(struct inode
*inode
)
1911 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
1912 struct ocfs2_lock_res
*lockres
= &oi
->ip_inode_lockres
;
1913 struct ocfs2_meta_lvb
*lvb
;
1917 mlog_meta_lvb(0, lockres
);
1919 lvb
= (struct ocfs2_meta_lvb
*)ocfs2_dlm_lvb(&lockres
->l_lksb
);
1921 /* We're safe here without the lockres lock... */
1922 spin_lock(&oi
->ip_lock
);
1923 oi
->ip_clusters
= be32_to_cpu(lvb
->lvb_iclusters
);
1924 i_size_write(inode
, be64_to_cpu(lvb
->lvb_isize
));
1926 oi
->ip_attr
= be32_to_cpu(lvb
->lvb_iattr
);
1927 oi
->ip_dyn_features
= be16_to_cpu(lvb
->lvb_idynfeatures
);
1928 ocfs2_set_inode_flags(inode
);
1930 /* fast-symlinks are a special case */
1931 if (S_ISLNK(inode
->i_mode
) && !oi
->ip_clusters
)
1932 inode
->i_blocks
= 0;
1934 inode
->i_blocks
= ocfs2_inode_sector_count(inode
);
1936 inode
->i_uid
= be32_to_cpu(lvb
->lvb_iuid
);
1937 inode
->i_gid
= be32_to_cpu(lvb
->lvb_igid
);
1938 inode
->i_mode
= be16_to_cpu(lvb
->lvb_imode
);
1939 inode
->i_nlink
= be16_to_cpu(lvb
->lvb_inlink
);
1940 ocfs2_unpack_timespec(&inode
->i_atime
,
1941 be64_to_cpu(lvb
->lvb_iatime_packed
));
1942 ocfs2_unpack_timespec(&inode
->i_mtime
,
1943 be64_to_cpu(lvb
->lvb_imtime_packed
));
1944 ocfs2_unpack_timespec(&inode
->i_ctime
,
1945 be64_to_cpu(lvb
->lvb_ictime_packed
));
1946 spin_unlock(&oi
->ip_lock
);
1951 static inline int ocfs2_meta_lvb_is_trustable(struct inode
*inode
,
1952 struct ocfs2_lock_res
*lockres
)
1954 struct ocfs2_meta_lvb
*lvb
=
1955 (struct ocfs2_meta_lvb
*)ocfs2_dlm_lvb(&lockres
->l_lksb
);
1957 if (lvb
->lvb_version
== OCFS2_LVB_VERSION
1958 && be32_to_cpu(lvb
->lvb_igeneration
) == inode
->i_generation
)
1963 /* Determine whether a lock resource needs to be refreshed, and
1964 * arbitrate who gets to refresh it.
1966 * 0 means no refresh needed.
1968 * > 0 means you need to refresh this and you MUST call
1969 * ocfs2_complete_lock_res_refresh afterwards. */
1970 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res
*lockres
)
1972 unsigned long flags
;
1978 spin_lock_irqsave(&lockres
->l_lock
, flags
);
1979 if (!(lockres
->l_flags
& OCFS2_LOCK_NEEDS_REFRESH
)) {
1980 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1984 if (lockres
->l_flags
& OCFS2_LOCK_REFRESHING
) {
1985 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
1987 ocfs2_wait_on_refreshing_lock(lockres
);
1991 /* Ok, I'll be the one to refresh this lock. */
1992 lockres_or_flags(lockres
, OCFS2_LOCK_REFRESHING
);
1993 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2001 /* If status is non zero, I'll mark it as not being in refresh
2002 * anymroe, but i won't clear the needs refresh flag. */
2003 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res
*lockres
,
2006 unsigned long flags
;
2009 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2010 lockres_clear_flags(lockres
, OCFS2_LOCK_REFRESHING
);
2012 lockres_clear_flags(lockres
, OCFS2_LOCK_NEEDS_REFRESH
);
2013 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2015 wake_up(&lockres
->l_event
);
2020 /* may or may not return a bh if it went to disk. */
2021 static int ocfs2_inode_lock_update(struct inode
*inode
,
2022 struct buffer_head
**bh
)
2025 struct ocfs2_inode_info
*oi
= OCFS2_I(inode
);
2026 struct ocfs2_lock_res
*lockres
= &oi
->ip_inode_lockres
;
2027 struct ocfs2_dinode
*fe
;
2028 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2032 if (ocfs2_mount_local(osb
))
2035 spin_lock(&oi
->ip_lock
);
2036 if (oi
->ip_flags
& OCFS2_INODE_DELETED
) {
2037 mlog(0, "Orphaned inode %llu was deleted while we "
2038 "were waiting on a lock. ip_flags = 0x%x\n",
2039 (unsigned long long)oi
->ip_blkno
, oi
->ip_flags
);
2040 spin_unlock(&oi
->ip_lock
);
2044 spin_unlock(&oi
->ip_lock
);
2046 if (!ocfs2_should_refresh_lock_res(lockres
))
2049 /* This will discard any caching information we might have had
2050 * for the inode metadata. */
2051 ocfs2_metadata_cache_purge(inode
);
2053 ocfs2_extent_map_trunc(inode
, 0);
2055 if (ocfs2_meta_lvb_is_trustable(inode
, lockres
)) {
2056 mlog(0, "Trusting LVB on inode %llu\n",
2057 (unsigned long long)oi
->ip_blkno
);
2058 ocfs2_refresh_inode_from_lvb(inode
);
2060 /* Boo, we have to go to disk. */
2061 /* read bh, cast, ocfs2_refresh_inode */
2062 status
= ocfs2_read_inode_block(inode
, bh
);
2067 fe
= (struct ocfs2_dinode
*) (*bh
)->b_data
;
2069 /* This is a good chance to make sure we're not
2070 * locking an invalid object. ocfs2_read_inode_block()
2071 * already checked that the inode block is sane.
2073 * We bug on a stale inode here because we checked
2074 * above whether it was wiped from disk. The wiping
2075 * node provides a guarantee that we receive that
2076 * message and can mark the inode before dropping any
2077 * locks associated with it. */
2078 mlog_bug_on_msg(inode
->i_generation
!=
2079 le32_to_cpu(fe
->i_generation
),
2080 "Invalid dinode %llu disk generation: %u "
2081 "inode->i_generation: %u\n",
2082 (unsigned long long)oi
->ip_blkno
,
2083 le32_to_cpu(fe
->i_generation
),
2084 inode
->i_generation
);
2085 mlog_bug_on_msg(le64_to_cpu(fe
->i_dtime
) ||
2086 !(fe
->i_flags
& cpu_to_le32(OCFS2_VALID_FL
)),
2087 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2088 (unsigned long long)oi
->ip_blkno
,
2089 (unsigned long long)le64_to_cpu(fe
->i_dtime
),
2090 le32_to_cpu(fe
->i_flags
));
2092 ocfs2_refresh_inode(inode
, fe
);
2093 ocfs2_track_lock_refresh(lockres
);
2098 ocfs2_complete_lock_res_refresh(lockres
, status
);
2104 static int ocfs2_assign_bh(struct inode
*inode
,
2105 struct buffer_head
**ret_bh
,
2106 struct buffer_head
*passed_bh
)
2111 /* Ok, the update went to disk for us, use the
2113 *ret_bh
= passed_bh
;
2119 status
= ocfs2_read_inode_block(inode
, ret_bh
);
2127 * returns < 0 error if the callback will never be called, otherwise
2128 * the result of the lock will be communicated via the callback.
2130 int ocfs2_inode_lock_full(struct inode
*inode
,
2131 struct buffer_head
**ret_bh
,
2135 int status
, level
, acquired
;
2137 struct ocfs2_lock_res
*lockres
= NULL
;
2138 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2139 struct buffer_head
*local_bh
= NULL
;
2145 mlog(0, "inode %llu, take %s META lock\n",
2146 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2147 ex
? "EXMODE" : "PRMODE");
2151 /* We'll allow faking a readonly metadata lock for
2153 if (ocfs2_is_hard_readonly(osb
)) {
2159 if (ocfs2_mount_local(osb
))
2162 if (!(arg_flags
& OCFS2_META_LOCK_RECOVERY
))
2163 ocfs2_wait_for_recovery(osb
);
2165 lockres
= &OCFS2_I(inode
)->ip_inode_lockres
;
2166 level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2168 if (arg_flags
& OCFS2_META_LOCK_NOQUEUE
)
2169 dlm_flags
|= DLM_LKF_NOQUEUE
;
2171 status
= ocfs2_cluster_lock(osb
, lockres
, level
, dlm_flags
, arg_flags
);
2173 if (status
!= -EAGAIN
&& status
!= -EIOCBRETRY
)
2178 /* Notify the error cleanup path to drop the cluster lock. */
2181 /* We wait twice because a node may have died while we were in
2182 * the lower dlm layers. The second time though, we've
2183 * committed to owning this lock so we don't allow signals to
2184 * abort the operation. */
2185 if (!(arg_flags
& OCFS2_META_LOCK_RECOVERY
))
2186 ocfs2_wait_for_recovery(osb
);
2190 * We only see this flag if we're being called from
2191 * ocfs2_read_locked_inode(). It means we're locking an inode
2192 * which hasn't been populated yet, so clear the refresh flag
2193 * and let the caller handle it.
2195 if (inode
->i_state
& I_NEW
) {
2198 ocfs2_complete_lock_res_refresh(lockres
, 0);
2202 /* This is fun. The caller may want a bh back, or it may
2203 * not. ocfs2_inode_lock_update definitely wants one in, but
2204 * may or may not read one, depending on what's in the
2205 * LVB. The result of all of this is that we've *only* gone to
2206 * disk if we have to, so the complexity is worthwhile. */
2207 status
= ocfs2_inode_lock_update(inode
, &local_bh
);
2209 if (status
!= -ENOENT
)
2215 status
= ocfs2_assign_bh(inode
, ret_bh
, local_bh
);
2224 if (ret_bh
&& (*ret_bh
)) {
2229 ocfs2_inode_unlock(inode
, ex
);
2240 * This is working around a lock inversion between tasks acquiring DLM
2241 * locks while holding a page lock and the downconvert thread which
2242 * blocks dlm lock acquiry while acquiring page locks.
2244 * ** These _with_page variantes are only intended to be called from aop
2245 * methods that hold page locks and return a very specific *positive* error
2246 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2248 * The DLM is called such that it returns -EAGAIN if it would have
2249 * blocked waiting for the downconvert thread. In that case we unlock
2250 * our page so the downconvert thread can make progress. Once we've
2251 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2252 * that called us can bubble that back up into the VFS who will then
2253 * immediately retry the aop call.
2255 * We do a blocking lock and immediate unlock before returning, though, so that
2256 * the lock has a great chance of being cached on this node by the time the VFS
2257 * calls back to retry the aop. This has a potential to livelock as nodes
2258 * ping locks back and forth, but that's a risk we're willing to take to avoid
2259 * the lock inversion simply.
2261 int ocfs2_inode_lock_with_page(struct inode
*inode
,
2262 struct buffer_head
**ret_bh
,
2268 ret
= ocfs2_inode_lock_full(inode
, ret_bh
, ex
, OCFS2_LOCK_NONBLOCK
);
2269 if (ret
== -EAGAIN
) {
2271 if (ocfs2_inode_lock(inode
, ret_bh
, ex
) == 0)
2272 ocfs2_inode_unlock(inode
, ex
);
2273 ret
= AOP_TRUNCATED_PAGE
;
2279 int ocfs2_inode_lock_atime(struct inode
*inode
,
2280 struct vfsmount
*vfsmnt
,
2286 ret
= ocfs2_inode_lock(inode
, NULL
, 0);
2293 * If we should update atime, we will get EX lock,
2294 * otherwise we just get PR lock.
2296 if (ocfs2_should_update_atime(inode
, vfsmnt
)) {
2297 struct buffer_head
*bh
= NULL
;
2299 ocfs2_inode_unlock(inode
, 0);
2300 ret
= ocfs2_inode_lock(inode
, &bh
, 1);
2306 if (ocfs2_should_update_atime(inode
, vfsmnt
))
2307 ocfs2_update_inode_atime(inode
, bh
);
2317 void ocfs2_inode_unlock(struct inode
*inode
,
2320 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2321 struct ocfs2_lock_res
*lockres
= &OCFS2_I(inode
)->ip_inode_lockres
;
2322 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
2326 mlog(0, "inode %llu drop %s META lock\n",
2327 (unsigned long long)OCFS2_I(inode
)->ip_blkno
,
2328 ex
? "EXMODE" : "PRMODE");
2330 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode
->i_sb
)) &&
2331 !ocfs2_mount_local(osb
))
2332 ocfs2_cluster_unlock(OCFS2_SB(inode
->i_sb
), lockres
, level
);
2337 int ocfs2_super_lock(struct ocfs2_super
*osb
,
2341 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2342 struct ocfs2_lock_res
*lockres
= &osb
->osb_super_lockres
;
2346 if (ocfs2_is_hard_readonly(osb
))
2349 if (ocfs2_mount_local(osb
))
2352 status
= ocfs2_cluster_lock(osb
, lockres
, level
, 0, 0);
2358 /* The super block lock path is really in the best position to
2359 * know when resources covered by the lock need to be
2360 * refreshed, so we do it here. Of course, making sense of
2361 * everything is up to the caller :) */
2362 status
= ocfs2_should_refresh_lock_res(lockres
);
2368 status
= ocfs2_refresh_slot_info(osb
);
2370 ocfs2_complete_lock_res_refresh(lockres
, status
);
2374 ocfs2_track_lock_refresh(lockres
);
2381 void ocfs2_super_unlock(struct ocfs2_super
*osb
,
2384 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2385 struct ocfs2_lock_res
*lockres
= &osb
->osb_super_lockres
;
2387 if (!ocfs2_mount_local(osb
))
2388 ocfs2_cluster_unlock(osb
, lockres
, level
);
2391 int ocfs2_rename_lock(struct ocfs2_super
*osb
)
2394 struct ocfs2_lock_res
*lockres
= &osb
->osb_rename_lockres
;
2396 if (ocfs2_is_hard_readonly(osb
))
2399 if (ocfs2_mount_local(osb
))
2402 status
= ocfs2_cluster_lock(osb
, lockres
, DLM_LOCK_EX
, 0, 0);
2409 void ocfs2_rename_unlock(struct ocfs2_super
*osb
)
2411 struct ocfs2_lock_res
*lockres
= &osb
->osb_rename_lockres
;
2413 if (!ocfs2_mount_local(osb
))
2414 ocfs2_cluster_unlock(osb
, lockres
, DLM_LOCK_EX
);
2417 int ocfs2_dentry_lock(struct dentry
*dentry
, int ex
)
2420 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2421 struct ocfs2_dentry_lock
*dl
= dentry
->d_fsdata
;
2422 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
2426 if (ocfs2_is_hard_readonly(osb
))
2429 if (ocfs2_mount_local(osb
))
2432 ret
= ocfs2_cluster_lock(osb
, &dl
->dl_lockres
, level
, 0, 0);
2439 void ocfs2_dentry_unlock(struct dentry
*dentry
, int ex
)
2441 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
2442 struct ocfs2_dentry_lock
*dl
= dentry
->d_fsdata
;
2443 struct ocfs2_super
*osb
= OCFS2_SB(dentry
->d_sb
);
2445 if (!ocfs2_mount_local(osb
))
2446 ocfs2_cluster_unlock(osb
, &dl
->dl_lockres
, level
);
2449 /* Reference counting of the dlm debug structure. We want this because
2450 * open references on the debug inodes can live on after a mount, so
2451 * we can't rely on the ocfs2_super to always exist. */
2452 static void ocfs2_dlm_debug_free(struct kref
*kref
)
2454 struct ocfs2_dlm_debug
*dlm_debug
;
2456 dlm_debug
= container_of(kref
, struct ocfs2_dlm_debug
, d_refcnt
);
2461 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug
*dlm_debug
)
2464 kref_put(&dlm_debug
->d_refcnt
, ocfs2_dlm_debug_free
);
2467 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug
*debug
)
2469 kref_get(&debug
->d_refcnt
);
2472 struct ocfs2_dlm_debug
*ocfs2_new_dlm_debug(void)
2474 struct ocfs2_dlm_debug
*dlm_debug
;
2476 dlm_debug
= kmalloc(sizeof(struct ocfs2_dlm_debug
), GFP_KERNEL
);
2478 mlog_errno(-ENOMEM
);
2482 kref_init(&dlm_debug
->d_refcnt
);
2483 INIT_LIST_HEAD(&dlm_debug
->d_lockres_tracking
);
2484 dlm_debug
->d_locking_state
= NULL
;
2489 /* Access to this is arbitrated for us via seq_file->sem. */
2490 struct ocfs2_dlm_seq_priv
{
2491 struct ocfs2_dlm_debug
*p_dlm_debug
;
2492 struct ocfs2_lock_res p_iter_res
;
2493 struct ocfs2_lock_res p_tmp_res
;
2496 static struct ocfs2_lock_res
*ocfs2_dlm_next_res(struct ocfs2_lock_res
*start
,
2497 struct ocfs2_dlm_seq_priv
*priv
)
2499 struct ocfs2_lock_res
*iter
, *ret
= NULL
;
2500 struct ocfs2_dlm_debug
*dlm_debug
= priv
->p_dlm_debug
;
2502 assert_spin_locked(&ocfs2_dlm_tracking_lock
);
2504 list_for_each_entry(iter
, &start
->l_debug_list
, l_debug_list
) {
2505 /* discover the head of the list */
2506 if (&iter
->l_debug_list
== &dlm_debug
->d_lockres_tracking
) {
2507 mlog(0, "End of list found, %p\n", ret
);
2511 /* We track our "dummy" iteration lockres' by a NULL
2513 if (iter
->l_ops
!= NULL
) {
2522 static void *ocfs2_dlm_seq_start(struct seq_file
*m
, loff_t
*pos
)
2524 struct ocfs2_dlm_seq_priv
*priv
= m
->private;
2525 struct ocfs2_lock_res
*iter
;
2527 spin_lock(&ocfs2_dlm_tracking_lock
);
2528 iter
= ocfs2_dlm_next_res(&priv
->p_iter_res
, priv
);
2530 /* Since lockres' have the lifetime of their container
2531 * (which can be inodes, ocfs2_supers, etc) we want to
2532 * copy this out to a temporary lockres while still
2533 * under the spinlock. Obviously after this we can't
2534 * trust any pointers on the copy returned, but that's
2535 * ok as the information we want isn't typically held
2537 priv
->p_tmp_res
= *iter
;
2538 iter
= &priv
->p_tmp_res
;
2540 spin_unlock(&ocfs2_dlm_tracking_lock
);
2545 static void ocfs2_dlm_seq_stop(struct seq_file
*m
, void *v
)
2549 static void *ocfs2_dlm_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2551 struct ocfs2_dlm_seq_priv
*priv
= m
->private;
2552 struct ocfs2_lock_res
*iter
= v
;
2553 struct ocfs2_lock_res
*dummy
= &priv
->p_iter_res
;
2555 spin_lock(&ocfs2_dlm_tracking_lock
);
2556 iter
= ocfs2_dlm_next_res(iter
, priv
);
2557 list_del_init(&dummy
->l_debug_list
);
2559 list_add(&dummy
->l_debug_list
, &iter
->l_debug_list
);
2560 priv
->p_tmp_res
= *iter
;
2561 iter
= &priv
->p_tmp_res
;
2563 spin_unlock(&ocfs2_dlm_tracking_lock
);
2568 /* So that debugfs.ocfs2 can determine which format is being used */
2569 #define OCFS2_DLM_DEBUG_STR_VERSION 2
2570 static int ocfs2_dlm_seq_show(struct seq_file
*m
, void *v
)
2574 struct ocfs2_lock_res
*lockres
= v
;
2579 seq_printf(m
, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION
);
2581 if (lockres
->l_type
== OCFS2_LOCK_TYPE_DENTRY
)
2582 seq_printf(m
, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START
- 1,
2584 (unsigned int)ocfs2_get_dentry_lock_ino(lockres
));
2586 seq_printf(m
, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN
, lockres
->l_name
);
2588 seq_printf(m
, "%d\t"
2599 lockres
->l_unlock_action
,
2600 lockres
->l_ro_holders
,
2601 lockres
->l_ex_holders
,
2602 lockres
->l_requested
,
2603 lockres
->l_blocking
);
2605 /* Dump the raw LVB */
2606 lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
2607 for(i
= 0; i
< DLM_LVB_LEN
; i
++)
2608 seq_printf(m
, "0x%x\t", lvb
[i
]);
2610 #ifdef CONFIG_OCFS2_FS_STATS
2611 # define lock_num_prmode(_l) (_l)->l_lock_num_prmode
2612 # define lock_num_exmode(_l) (_l)->l_lock_num_exmode
2613 # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
2614 # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
2615 # define lock_total_prmode(_l) (_l)->l_lock_total_prmode
2616 # define lock_total_exmode(_l) (_l)->l_lock_total_exmode
2617 # define lock_max_prmode(_l) (_l)->l_lock_max_prmode
2618 # define lock_max_exmode(_l) (_l)->l_lock_max_exmode
2619 # define lock_refresh(_l) (_l)->l_lock_refresh
2621 # define lock_num_prmode(_l) (0ULL)
2622 # define lock_num_exmode(_l) (0ULL)
2623 # define lock_num_prmode_failed(_l) (0)
2624 # define lock_num_exmode_failed(_l) (0)
2625 # define lock_total_prmode(_l) (0ULL)
2626 # define lock_total_exmode(_l) (0ULL)
2627 # define lock_max_prmode(_l) (0)
2628 # define lock_max_exmode(_l) (0)
2629 # define lock_refresh(_l) (0)
2631 /* The following seq_print was added in version 2 of this output */
2632 seq_printf(m
, "%llu\t"
2641 lock_num_prmode(lockres
),
2642 lock_num_exmode(lockres
),
2643 lock_num_prmode_failed(lockres
),
2644 lock_num_exmode_failed(lockres
),
2645 lock_total_prmode(lockres
),
2646 lock_total_exmode(lockres
),
2647 lock_max_prmode(lockres
),
2648 lock_max_exmode(lockres
),
2649 lock_refresh(lockres
));
2652 seq_printf(m
, "\n");
2656 static const struct seq_operations ocfs2_dlm_seq_ops
= {
2657 .start
= ocfs2_dlm_seq_start
,
2658 .stop
= ocfs2_dlm_seq_stop
,
2659 .next
= ocfs2_dlm_seq_next
,
2660 .show
= ocfs2_dlm_seq_show
,
2663 static int ocfs2_dlm_debug_release(struct inode
*inode
, struct file
*file
)
2665 struct seq_file
*seq
= (struct seq_file
*) file
->private_data
;
2666 struct ocfs2_dlm_seq_priv
*priv
= seq
->private;
2667 struct ocfs2_lock_res
*res
= &priv
->p_iter_res
;
2669 ocfs2_remove_lockres_tracking(res
);
2670 ocfs2_put_dlm_debug(priv
->p_dlm_debug
);
2671 return seq_release_private(inode
, file
);
2674 static int ocfs2_dlm_debug_open(struct inode
*inode
, struct file
*file
)
2677 struct ocfs2_dlm_seq_priv
*priv
;
2678 struct seq_file
*seq
;
2679 struct ocfs2_super
*osb
;
2681 priv
= kzalloc(sizeof(struct ocfs2_dlm_seq_priv
), GFP_KERNEL
);
2687 osb
= inode
->i_private
;
2688 ocfs2_get_dlm_debug(osb
->osb_dlm_debug
);
2689 priv
->p_dlm_debug
= osb
->osb_dlm_debug
;
2690 INIT_LIST_HEAD(&priv
->p_iter_res
.l_debug_list
);
2692 ret
= seq_open(file
, &ocfs2_dlm_seq_ops
);
2699 seq
= (struct seq_file
*) file
->private_data
;
2700 seq
->private = priv
;
2702 ocfs2_add_lockres_tracking(&priv
->p_iter_res
,
2709 static const struct file_operations ocfs2_dlm_debug_fops
= {
2710 .open
= ocfs2_dlm_debug_open
,
2711 .release
= ocfs2_dlm_debug_release
,
2713 .llseek
= seq_lseek
,
2716 static int ocfs2_dlm_init_debug(struct ocfs2_super
*osb
)
2719 struct ocfs2_dlm_debug
*dlm_debug
= osb
->osb_dlm_debug
;
2721 dlm_debug
->d_locking_state
= debugfs_create_file("locking_state",
2723 osb
->osb_debug_root
,
2725 &ocfs2_dlm_debug_fops
);
2726 if (!dlm_debug
->d_locking_state
) {
2729 "Unable to create locking state debugfs file.\n");
2733 ocfs2_get_dlm_debug(dlm_debug
);
2738 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super
*osb
)
2740 struct ocfs2_dlm_debug
*dlm_debug
= osb
->osb_dlm_debug
;
2743 debugfs_remove(dlm_debug
->d_locking_state
);
2744 ocfs2_put_dlm_debug(dlm_debug
);
2748 int ocfs2_dlm_init(struct ocfs2_super
*osb
)
2751 struct ocfs2_cluster_connection
*conn
= NULL
;
2755 if (ocfs2_mount_local(osb
)) {
2760 status
= ocfs2_dlm_init_debug(osb
);
2766 /* launch downconvert thread */
2767 osb
->dc_task
= kthread_run(ocfs2_downconvert_thread
, osb
, "ocfs2dc");
2768 if (IS_ERR(osb
->dc_task
)) {
2769 status
= PTR_ERR(osb
->dc_task
);
2770 osb
->dc_task
= NULL
;
2775 /* for now, uuid == domain */
2776 status
= ocfs2_cluster_connect(osb
->osb_cluster_stack
,
2778 strlen(osb
->uuid_str
),
2779 ocfs2_do_node_down
, osb
,
2786 status
= ocfs2_cluster_this_node(&osb
->node_num
);
2790 "could not find this host's node number\n");
2791 ocfs2_cluster_disconnect(conn
, 0);
2796 ocfs2_super_lock_res_init(&osb
->osb_super_lockres
, osb
);
2797 ocfs2_rename_lock_res_init(&osb
->osb_rename_lockres
, osb
);
2804 ocfs2_dlm_shutdown_debug(osb
);
2806 kthread_stop(osb
->dc_task
);
2813 void ocfs2_dlm_shutdown(struct ocfs2_super
*osb
,
2818 ocfs2_drop_osb_locks(osb
);
2821 * Now that we have dropped all locks and ocfs2_dismount_volume()
2822 * has disabled recovery, the DLM won't be talking to us. It's
2823 * safe to tear things down before disconnecting the cluster.
2827 kthread_stop(osb
->dc_task
);
2828 osb
->dc_task
= NULL
;
2831 ocfs2_lock_res_free(&osb
->osb_super_lockres
);
2832 ocfs2_lock_res_free(&osb
->osb_rename_lockres
);
2834 ocfs2_cluster_disconnect(osb
->cconn
, hangup_pending
);
2837 ocfs2_dlm_shutdown_debug(osb
);
2842 static void ocfs2_unlock_ast(void *opaque
, int error
)
2844 struct ocfs2_lock_res
*lockres
= opaque
;
2845 unsigned long flags
;
2849 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres
->l_name
,
2850 lockres
->l_unlock_action
);
2852 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2854 mlog(ML_ERROR
, "Dlm passes error %d for lock %s, "
2855 "unlock_action %d\n", error
, lockres
->l_name
,
2856 lockres
->l_unlock_action
);
2857 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2861 switch(lockres
->l_unlock_action
) {
2862 case OCFS2_UNLOCK_CANCEL_CONVERT
:
2863 mlog(0, "Cancel convert success for %s\n", lockres
->l_name
);
2864 lockres
->l_action
= OCFS2_AST_INVALID
;
2866 case OCFS2_UNLOCK_DROP_LOCK
:
2867 lockres
->l_level
= DLM_LOCK_IV
;
2873 lockres_clear_flags(lockres
, OCFS2_LOCK_BUSY
);
2874 lockres
->l_unlock_action
= OCFS2_UNLOCK_INVALID
;
2875 wake_up(&lockres
->l_event
);
2876 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2881 static int ocfs2_drop_lock(struct ocfs2_super
*osb
,
2882 struct ocfs2_lock_res
*lockres
)
2885 unsigned long flags
;
2888 /* We didn't get anywhere near actually using this lockres. */
2889 if (!(lockres
->l_flags
& OCFS2_LOCK_INITIALIZED
))
2892 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
)
2893 lkm_flags
|= DLM_LKF_VALBLK
;
2895 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2897 mlog_bug_on_msg(!(lockres
->l_flags
& OCFS2_LOCK_FREEING
),
2898 "lockres %s, flags 0x%lx\n",
2899 lockres
->l_name
, lockres
->l_flags
);
2901 while (lockres
->l_flags
& OCFS2_LOCK_BUSY
) {
2902 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2903 "%u, unlock_action = %u\n",
2904 lockres
->l_name
, lockres
->l_flags
, lockres
->l_action
,
2905 lockres
->l_unlock_action
);
2907 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2909 /* XXX: Today we just wait on any busy
2910 * locks... Perhaps we need to cancel converts in the
2912 ocfs2_wait_on_busy_lock(lockres
);
2914 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2917 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
) {
2918 if (lockres
->l_flags
& OCFS2_LOCK_ATTACHED
&&
2919 lockres
->l_level
== DLM_LOCK_EX
&&
2920 !(lockres
->l_flags
& OCFS2_LOCK_NEEDS_REFRESH
))
2921 lockres
->l_ops
->set_lvb(lockres
);
2924 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
)
2925 mlog(ML_ERROR
, "destroying busy lock: \"%s\"\n",
2927 if (lockres
->l_flags
& OCFS2_LOCK_BLOCKED
)
2928 mlog(0, "destroying blocked lock: \"%s\"\n", lockres
->l_name
);
2930 if (!(lockres
->l_flags
& OCFS2_LOCK_ATTACHED
)) {
2931 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2935 lockres_clear_flags(lockres
, OCFS2_LOCK_ATTACHED
);
2937 /* make sure we never get here while waiting for an ast to
2939 BUG_ON(lockres
->l_action
!= OCFS2_AST_INVALID
);
2941 /* is this necessary? */
2942 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
2943 lockres
->l_unlock_action
= OCFS2_UNLOCK_DROP_LOCK
;
2944 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2946 mlog(0, "lock %s\n", lockres
->l_name
);
2948 ret
= ocfs2_dlm_unlock(osb
->cconn
, &lockres
->l_lksb
, lkm_flags
,
2951 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret
, lockres
);
2952 mlog(ML_ERROR
, "lockres flags: %lu\n", lockres
->l_flags
);
2953 ocfs2_dlm_dump_lksb(&lockres
->l_lksb
);
2956 mlog(0, "lock %s, successfull return from ocfs2_dlm_unlock\n",
2959 ocfs2_wait_on_busy_lock(lockres
);
2965 /* Mark the lockres as being dropped. It will no longer be
2966 * queued if blocking, but we still may have to wait on it
2967 * being dequeued from the downconvert thread before we can consider
2970 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2971 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res
*lockres
)
2974 struct ocfs2_mask_waiter mw
;
2975 unsigned long flags
;
2977 ocfs2_init_mask_waiter(&mw
);
2979 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2980 lockres
->l_flags
|= OCFS2_LOCK_FREEING
;
2981 while (lockres
->l_flags
& OCFS2_LOCK_QUEUED
) {
2982 lockres_add_mask_waiter(lockres
, &mw
, OCFS2_LOCK_QUEUED
, 0);
2983 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2985 mlog(0, "Waiting on lockres %s\n", lockres
->l_name
);
2987 status
= ocfs2_wait_for_mask(&mw
);
2991 spin_lock_irqsave(&lockres
->l_lock
, flags
);
2993 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
2996 void ocfs2_simple_drop_lockres(struct ocfs2_super
*osb
,
2997 struct ocfs2_lock_res
*lockres
)
3001 ocfs2_mark_lockres_freeing(lockres
);
3002 ret
= ocfs2_drop_lock(osb
, lockres
);
3007 static void ocfs2_drop_osb_locks(struct ocfs2_super
*osb
)
3009 ocfs2_simple_drop_lockres(osb
, &osb
->osb_super_lockres
);
3010 ocfs2_simple_drop_lockres(osb
, &osb
->osb_rename_lockres
);
3013 int ocfs2_drop_inode_locks(struct inode
*inode
)
3019 /* No need to call ocfs2_mark_lockres_freeing here -
3020 * ocfs2_clear_inode has done it for us. */
3022 err
= ocfs2_drop_lock(OCFS2_SB(inode
->i_sb
),
3023 &OCFS2_I(inode
)->ip_open_lockres
);
3029 err
= ocfs2_drop_lock(OCFS2_SB(inode
->i_sb
),
3030 &OCFS2_I(inode
)->ip_inode_lockres
);
3033 if (err
< 0 && !status
)
3036 err
= ocfs2_drop_lock(OCFS2_SB(inode
->i_sb
),
3037 &OCFS2_I(inode
)->ip_rw_lockres
);
3040 if (err
< 0 && !status
)
3047 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res
*lockres
,
3050 assert_spin_locked(&lockres
->l_lock
);
3052 BUG_ON(lockres
->l_blocking
<= DLM_LOCK_NL
);
3054 if (lockres
->l_level
<= new_level
) {
3055 mlog(ML_ERROR
, "lockres->l_level (%d) <= new_level (%d)\n",
3056 lockres
->l_level
, new_level
);
3060 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
3061 lockres
->l_name
, new_level
, lockres
->l_blocking
);
3063 lockres
->l_action
= OCFS2_AST_DOWNCONVERT
;
3064 lockres
->l_requested
= new_level
;
3065 lockres_or_flags(lockres
, OCFS2_LOCK_BUSY
);
3066 return lockres_set_pending(lockres
);
3069 static int ocfs2_downconvert_lock(struct ocfs2_super
*osb
,
3070 struct ocfs2_lock_res
*lockres
,
3073 unsigned int generation
)
3076 u32 dlm_flags
= DLM_LKF_CONVERT
;
3081 dlm_flags
|= DLM_LKF_VALBLK
;
3083 ret
= ocfs2_dlm_lock(osb
->cconn
,
3088 OCFS2_LOCK_ID_MAX_LEN
- 1,
3090 lockres_clear_pending(lockres
, generation
, osb
);
3092 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret
, lockres
);
3093 ocfs2_recover_from_dlm_error(lockres
, 1);
3103 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3104 static int ocfs2_prepare_cancel_convert(struct ocfs2_super
*osb
,
3105 struct ocfs2_lock_res
*lockres
)
3107 assert_spin_locked(&lockres
->l_lock
);
3110 mlog(0, "lock %s\n", lockres
->l_name
);
3112 if (lockres
->l_unlock_action
== OCFS2_UNLOCK_CANCEL_CONVERT
) {
3113 /* If we're already trying to cancel a lock conversion
3114 * then just drop the spinlock and allow the caller to
3115 * requeue this lock. */
3117 mlog(0, "Lockres %s, skip convert\n", lockres
->l_name
);
3121 /* were we in a convert when we got the bast fire? */
3122 BUG_ON(lockres
->l_action
!= OCFS2_AST_CONVERT
&&
3123 lockres
->l_action
!= OCFS2_AST_DOWNCONVERT
);
3124 /* set things up for the unlockast to know to just
3125 * clear out the ast_action and unset busy, etc. */
3126 lockres
->l_unlock_action
= OCFS2_UNLOCK_CANCEL_CONVERT
;
3128 mlog_bug_on_msg(!(lockres
->l_flags
& OCFS2_LOCK_BUSY
),
3129 "lock %s, invalid flags: 0x%lx\n",
3130 lockres
->l_name
, lockres
->l_flags
);
3135 static int ocfs2_cancel_convert(struct ocfs2_super
*osb
,
3136 struct ocfs2_lock_res
*lockres
)
3141 mlog(0, "lock %s\n", lockres
->l_name
);
3143 ret
= ocfs2_dlm_unlock(osb
->cconn
, &lockres
->l_lksb
,
3144 DLM_LKF_CANCEL
, lockres
);
3146 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret
, lockres
);
3147 ocfs2_recover_from_dlm_error(lockres
, 0);
3150 mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres
->l_name
);
3156 static int ocfs2_unblock_lock(struct ocfs2_super
*osb
,
3157 struct ocfs2_lock_res
*lockres
,
3158 struct ocfs2_unblock_ctl
*ctl
)
3160 unsigned long flags
;
3169 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3171 BUG_ON(!(lockres
->l_flags
& OCFS2_LOCK_BLOCKED
));
3174 if (lockres
->l_flags
& OCFS2_LOCK_BUSY
) {
3176 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3177 * exists entirely for one reason - another thread has set
3178 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3180 * If we do ocfs2_cancel_convert() before the other thread
3181 * calls dlm_lock(), our cancel will do nothing. We will
3182 * get no ast, and we will have no way of knowing the
3183 * cancel failed. Meanwhile, the other thread will call
3184 * into dlm_lock() and wait...forever.
3186 * Why forever? Because another node has asked for the
3187 * lock first; that's why we're here in unblock_lock().
3189 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3190 * set, we just requeue the unblock. Only when the other
3191 * thread has called dlm_lock() and cleared PENDING will
3192 * we then cancel their request.
3194 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3195 * at the same time they set OCFS2_DLM_BUSY. They must
3196 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3198 if (lockres
->l_flags
& OCFS2_LOCK_PENDING
)
3202 ret
= ocfs2_prepare_cancel_convert(osb
, lockres
);
3203 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3205 ret
= ocfs2_cancel_convert(osb
, lockres
);
3212 /* if we're blocking an exclusive and we have *any* holders,
3214 if ((lockres
->l_blocking
== DLM_LOCK_EX
)
3215 && (lockres
->l_ex_holders
|| lockres
->l_ro_holders
))
3218 /* If it's a PR we're blocking, then only
3219 * requeue if we've got any EX holders */
3220 if (lockres
->l_blocking
== DLM_LOCK_PR
&&
3221 lockres
->l_ex_holders
)
3225 * Can we get a lock in this state if the holder counts are
3226 * zero? The meta data unblock code used to check this.
3228 if ((lockres
->l_ops
->flags
& LOCK_TYPE_REQUIRES_REFRESH
)
3229 && (lockres
->l_flags
& OCFS2_LOCK_REFRESHING
))
3232 new_level
= ocfs2_highest_compat_lock_level(lockres
->l_blocking
);
3234 if (lockres
->l_ops
->check_downconvert
3235 && !lockres
->l_ops
->check_downconvert(lockres
, new_level
))
3238 /* If we get here, then we know that there are no more
3239 * incompatible holders (and anyone asking for an incompatible
3240 * lock is blocked). We can now downconvert the lock */
3241 if (!lockres
->l_ops
->downconvert_worker
)
3244 /* Some lockres types want to do a bit of work before
3245 * downconverting a lock. Allow that here. The worker function
3246 * may sleep, so we save off a copy of what we're blocking as
3247 * it may change while we're not holding the spin lock. */
3248 blocking
= lockres
->l_blocking
;
3249 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3251 ctl
->unblock_action
= lockres
->l_ops
->downconvert_worker(lockres
, blocking
);
3253 if (ctl
->unblock_action
== UNBLOCK_STOP_POST
)
3256 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3257 if (blocking
!= lockres
->l_blocking
) {
3258 /* If this changed underneath us, then we can't drop
3266 if (lockres
->l_ops
->flags
& LOCK_TYPE_USES_LVB
) {
3267 if (lockres
->l_level
== DLM_LOCK_EX
)
3271 * We only set the lvb if the lock has been fully
3272 * refreshed - otherwise we risk setting stale
3273 * data. Otherwise, there's no need to actually clear
3274 * out the lvb here as it's value is still valid.
3276 if (set_lvb
&& !(lockres
->l_flags
& OCFS2_LOCK_NEEDS_REFRESH
))
3277 lockres
->l_ops
->set_lvb(lockres
);
3280 gen
= ocfs2_prepare_downconvert(lockres
, new_level
);
3281 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3282 ret
= ocfs2_downconvert_lock(osb
, lockres
, new_level
, set_lvb
,
3290 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3297 static int ocfs2_data_convert_worker(struct ocfs2_lock_res
*lockres
,
3300 struct inode
*inode
;
3301 struct address_space
*mapping
;
3303 inode
= ocfs2_lock_res_inode(lockres
);
3304 mapping
= inode
->i_mapping
;
3306 if (!S_ISREG(inode
->i_mode
))
3310 * We need this before the filemap_fdatawrite() so that it can
3311 * transfer the dirty bit from the PTE to the
3312 * page. Unfortunately this means that even for EX->PR
3313 * downconverts, we'll lose our mappings and have to build
3316 unmap_mapping_range(mapping
, 0, 0, 0);
3318 if (filemap_fdatawrite(mapping
)) {
3319 mlog(ML_ERROR
, "Could not sync inode %llu for downconvert!",
3320 (unsigned long long)OCFS2_I(inode
)->ip_blkno
);
3322 sync_mapping_buffers(mapping
);
3323 if (blocking
== DLM_LOCK_EX
) {
3324 truncate_inode_pages(mapping
, 0);
3326 /* We only need to wait on the I/O if we're not also
3327 * truncating pages because truncate_inode_pages waits
3328 * for us above. We don't truncate pages if we're
3329 * blocking anything < EXMODE because we want to keep
3330 * them around in that case. */
3331 filemap_fdatawait(mapping
);
3335 return UNBLOCK_CONTINUE
;
3338 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res
*lockres
,
3341 struct inode
*inode
= ocfs2_lock_res_inode(lockres
);
3342 int checkpointed
= ocfs2_inode_fully_checkpointed(inode
);
3344 BUG_ON(new_level
!= DLM_LOCK_NL
&& new_level
!= DLM_LOCK_PR
);
3345 BUG_ON(lockres
->l_level
!= DLM_LOCK_EX
&& !checkpointed
);
3350 ocfs2_start_checkpoint(OCFS2_SB(inode
->i_sb
));
3354 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res
*lockres
)
3356 struct inode
*inode
= ocfs2_lock_res_inode(lockres
);
3358 __ocfs2_stuff_meta_lvb(inode
);
3362 * Does the final reference drop on our dentry lock. Right now this
3363 * happens in the downconvert thread, but we could choose to simplify the
3364 * dlmglue API and push these off to the ocfs2_wq in the future.
3366 static void ocfs2_dentry_post_unlock(struct ocfs2_super
*osb
,
3367 struct ocfs2_lock_res
*lockres
)
3369 struct ocfs2_dentry_lock
*dl
= ocfs2_lock_res_dl(lockres
);
3370 ocfs2_dentry_lock_put(osb
, dl
);
3374 * d_delete() matching dentries before the lock downconvert.
3376 * At this point, any process waiting to destroy the
3377 * dentry_lock due to last ref count is stopped by the
3378 * OCFS2_LOCK_QUEUED flag.
3380 * We have two potential problems
3382 * 1) If we do the last reference drop on our dentry_lock (via dput)
3383 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3384 * the downconvert to finish. Instead we take an elevated
3385 * reference and push the drop until after we've completed our
3386 * unblock processing.
3388 * 2) There might be another process with a final reference,
3389 * waiting on us to finish processing. If this is the case, we
3390 * detect it and exit out - there's no more dentries anyway.
3392 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res
*lockres
,
3395 struct ocfs2_dentry_lock
*dl
= ocfs2_lock_res_dl(lockres
);
3396 struct ocfs2_inode_info
*oi
= OCFS2_I(dl
->dl_inode
);
3397 struct dentry
*dentry
;
3398 unsigned long flags
;
3402 * This node is blocking another node from getting a read
3403 * lock. This happens when we've renamed within a
3404 * directory. We've forced the other nodes to d_delete(), but
3405 * we never actually dropped our lock because it's still
3406 * valid. The downconvert code will retain a PR for this node,
3407 * so there's no further work to do.
3409 if (blocking
== DLM_LOCK_PR
)
3410 return UNBLOCK_CONTINUE
;
3413 * Mark this inode as potentially orphaned. The code in
3414 * ocfs2_delete_inode() will figure out whether it actually
3415 * needs to be freed or not.
3417 spin_lock(&oi
->ip_lock
);
3418 oi
->ip_flags
|= OCFS2_INODE_MAYBE_ORPHANED
;
3419 spin_unlock(&oi
->ip_lock
);
3422 * Yuck. We need to make sure however that the check of
3423 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3424 * respect to a reference decrement or the setting of that
3427 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3428 spin_lock(&dentry_attach_lock
);
3429 if (!(lockres
->l_flags
& OCFS2_LOCK_FREEING
)
3434 spin_unlock(&dentry_attach_lock
);
3435 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3437 mlog(0, "extra_ref = %d\n", extra_ref
);
3440 * We have a process waiting on us in ocfs2_dentry_iput(),
3441 * which means we can't have any more outstanding
3442 * aliases. There's no need to do any more work.
3445 return UNBLOCK_CONTINUE
;
3447 spin_lock(&dentry_attach_lock
);
3449 dentry
= ocfs2_find_local_alias(dl
->dl_inode
,
3450 dl
->dl_parent_blkno
, 1);
3453 spin_unlock(&dentry_attach_lock
);
3455 mlog(0, "d_delete(%.*s);\n", dentry
->d_name
.len
,
3456 dentry
->d_name
.name
);
3459 * The following dcache calls may do an
3460 * iput(). Normally we don't want that from the
3461 * downconverting thread, but in this case it's ok
3462 * because the requesting node already has an
3463 * exclusive lock on the inode, so it can't be queued
3464 * for a downconvert.
3469 spin_lock(&dentry_attach_lock
);
3471 spin_unlock(&dentry_attach_lock
);
3474 * If we are the last holder of this dentry lock, there is no
3475 * reason to downconvert so skip straight to the unlock.
3477 if (dl
->dl_count
== 1)
3478 return UNBLOCK_STOP_POST
;
3480 return UNBLOCK_CONTINUE_POST
;
3483 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res
*lockres
)
3485 struct ocfs2_qinfo_lvb
*lvb
;
3486 struct ocfs2_mem_dqinfo
*oinfo
= ocfs2_lock_res_qinfo(lockres
);
3487 struct mem_dqinfo
*info
= sb_dqinfo(oinfo
->dqi_gi
.dqi_sb
,
3488 oinfo
->dqi_gi
.dqi_type
);
3492 lvb
= (struct ocfs2_qinfo_lvb
*)ocfs2_dlm_lvb(&lockres
->l_lksb
);
3493 lvb
->lvb_version
= OCFS2_QINFO_LVB_VERSION
;
3494 lvb
->lvb_bgrace
= cpu_to_be32(info
->dqi_bgrace
);
3495 lvb
->lvb_igrace
= cpu_to_be32(info
->dqi_igrace
);
3496 lvb
->lvb_syncms
= cpu_to_be32(oinfo
->dqi_syncms
);
3497 lvb
->lvb_blocks
= cpu_to_be32(oinfo
->dqi_gi
.dqi_blocks
);
3498 lvb
->lvb_free_blk
= cpu_to_be32(oinfo
->dqi_gi
.dqi_free_blk
);
3499 lvb
->lvb_free_entry
= cpu_to_be32(oinfo
->dqi_gi
.dqi_free_entry
);
3504 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
3506 struct ocfs2_lock_res
*lockres
= &oinfo
->dqi_gqlock
;
3507 struct ocfs2_super
*osb
= OCFS2_SB(oinfo
->dqi_gi
.dqi_sb
);
3508 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
3511 if (!ocfs2_is_hard_readonly(osb
) && !ocfs2_mount_local(osb
))
3512 ocfs2_cluster_unlock(osb
, lockres
, level
);
3516 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo
*oinfo
)
3518 struct mem_dqinfo
*info
= sb_dqinfo(oinfo
->dqi_gi
.dqi_sb
,
3519 oinfo
->dqi_gi
.dqi_type
);
3520 struct ocfs2_lock_res
*lockres
= &oinfo
->dqi_gqlock
;
3521 struct ocfs2_qinfo_lvb
*lvb
= ocfs2_dlm_lvb(&lockres
->l_lksb
);
3522 struct buffer_head
*bh
;
3523 struct ocfs2_global_disk_dqinfo
*gdinfo
;
3526 if (lvb
->lvb_version
== OCFS2_QINFO_LVB_VERSION
) {
3527 info
->dqi_bgrace
= be32_to_cpu(lvb
->lvb_bgrace
);
3528 info
->dqi_igrace
= be32_to_cpu(lvb
->lvb_igrace
);
3529 oinfo
->dqi_syncms
= be32_to_cpu(lvb
->lvb_syncms
);
3530 oinfo
->dqi_gi
.dqi_blocks
= be32_to_cpu(lvb
->lvb_blocks
);
3531 oinfo
->dqi_gi
.dqi_free_blk
= be32_to_cpu(lvb
->lvb_free_blk
);
3532 oinfo
->dqi_gi
.dqi_free_entry
=
3533 be32_to_cpu(lvb
->lvb_free_entry
);
3535 bh
= ocfs2_read_quota_block(oinfo
->dqi_gqinode
, 0, &status
);
3540 gdinfo
= (struct ocfs2_global_disk_dqinfo
*)
3541 (bh
->b_data
+ OCFS2_GLOBAL_INFO_OFF
);
3542 info
->dqi_bgrace
= le32_to_cpu(gdinfo
->dqi_bgrace
);
3543 info
->dqi_igrace
= le32_to_cpu(gdinfo
->dqi_igrace
);
3544 oinfo
->dqi_syncms
= le32_to_cpu(gdinfo
->dqi_syncms
);
3545 oinfo
->dqi_gi
.dqi_blocks
= le32_to_cpu(gdinfo
->dqi_blocks
);
3546 oinfo
->dqi_gi
.dqi_free_blk
= le32_to_cpu(gdinfo
->dqi_free_blk
);
3547 oinfo
->dqi_gi
.dqi_free_entry
=
3548 le32_to_cpu(gdinfo
->dqi_free_entry
);
3550 ocfs2_track_lock_refresh(lockres
);
3557 /* Lock quota info, this function expects at least shared lock on the quota file
3558 * so that we can safely refresh quota info from disk. */
3559 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo
*oinfo
, int ex
)
3561 struct ocfs2_lock_res
*lockres
= &oinfo
->dqi_gqlock
;
3562 struct ocfs2_super
*osb
= OCFS2_SB(oinfo
->dqi_gi
.dqi_sb
);
3563 int level
= ex
? DLM_LOCK_EX
: DLM_LOCK_PR
;
3568 /* On RO devices, locking really isn't needed... */
3569 if (ocfs2_is_hard_readonly(osb
)) {
3574 if (ocfs2_mount_local(osb
))
3577 status
= ocfs2_cluster_lock(osb
, lockres
, level
, 0, 0);
3582 if (!ocfs2_should_refresh_lock_res(lockres
))
3584 /* OK, we have the lock but we need to refresh the quota info */
3585 status
= ocfs2_refresh_qinfo(oinfo
);
3587 ocfs2_qinfo_unlock(oinfo
, ex
);
3588 ocfs2_complete_lock_res_refresh(lockres
, status
);
3595 * This is the filesystem locking protocol. It provides the lock handling
3596 * hooks for the underlying DLM. It has a maximum version number.
3597 * The version number allows interoperability with systems running at
3598 * the same major number and an equal or smaller minor number.
3600 * Whenever the filesystem does new things with locks (adds or removes a
3601 * lock, orders them differently, does different things underneath a lock),
3602 * the version must be changed. The protocol is negotiated when joining
3603 * the dlm domain. A node may join the domain if its major version is
3604 * identical to all other nodes and its minor version is greater than
3605 * or equal to all other nodes. When its minor version is greater than
3606 * the other nodes, it will run at the minor version specified by the
3609 * If a locking change is made that will not be compatible with older
3610 * versions, the major number must be increased and the minor version set
3611 * to zero. If a change merely adds a behavior that can be disabled when
3612 * speaking to older versions, the minor version must be increased. If a
3613 * change adds a fully backwards compatible change (eg, LVB changes that
3614 * are just ignored by older versions), the version does not need to be
3617 static struct ocfs2_locking_protocol lproto
= {
3619 .pv_major
= OCFS2_LOCKING_PROTOCOL_MAJOR
,
3620 .pv_minor
= OCFS2_LOCKING_PROTOCOL_MINOR
,
3622 .lp_lock_ast
= ocfs2_locking_ast
,
3623 .lp_blocking_ast
= ocfs2_blocking_ast
,
3624 .lp_unlock_ast
= ocfs2_unlock_ast
,
3627 void ocfs2_set_locking_protocol(void)
3629 ocfs2_stack_glue_set_locking_protocol(&lproto
);
3633 static void ocfs2_process_blocked_lock(struct ocfs2_super
*osb
,
3634 struct ocfs2_lock_res
*lockres
)
3637 struct ocfs2_unblock_ctl ctl
= {0, 0,};
3638 unsigned long flags
;
3640 /* Our reference to the lockres in this function can be
3641 * considered valid until we remove the OCFS2_LOCK_QUEUED
3647 BUG_ON(!lockres
->l_ops
);
3649 mlog(0, "lockres %s blocked.\n", lockres
->l_name
);
3651 /* Detect whether a lock has been marked as going away while
3652 * the downconvert thread was processing other things. A lock can
3653 * still be marked with OCFS2_LOCK_FREEING after this check,
3654 * but short circuiting here will still save us some
3656 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3657 if (lockres
->l_flags
& OCFS2_LOCK_FREEING
)
3659 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3661 status
= ocfs2_unblock_lock(osb
, lockres
, &ctl
);
3665 spin_lock_irqsave(&lockres
->l_lock
, flags
);
3667 if (lockres
->l_flags
& OCFS2_LOCK_FREEING
|| !ctl
.requeue
) {
3668 lockres_clear_flags(lockres
, OCFS2_LOCK_QUEUED
);
3670 ocfs2_schedule_blocked_lock(osb
, lockres
);
3672 mlog(0, "lockres %s, requeue = %s.\n", lockres
->l_name
,
3673 ctl
.requeue
? "yes" : "no");
3674 spin_unlock_irqrestore(&lockres
->l_lock
, flags
);
3676 if (ctl
.unblock_action
!= UNBLOCK_CONTINUE
3677 && lockres
->l_ops
->post_unlock
)
3678 lockres
->l_ops
->post_unlock(osb
, lockres
);
3683 static void ocfs2_schedule_blocked_lock(struct ocfs2_super
*osb
,
3684 struct ocfs2_lock_res
*lockres
)
3688 assert_spin_locked(&lockres
->l_lock
);
3690 if (lockres
->l_flags
& OCFS2_LOCK_FREEING
) {
3691 /* Do not schedule a lock for downconvert when it's on
3692 * the way to destruction - any nodes wanting access
3693 * to the resource will get it soon. */
3694 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3695 lockres
->l_name
, lockres
->l_flags
);
3699 lockres_or_flags(lockres
, OCFS2_LOCK_QUEUED
);
3701 spin_lock(&osb
->dc_task_lock
);
3702 if (list_empty(&lockres
->l_blocked_list
)) {
3703 list_add_tail(&lockres
->l_blocked_list
,
3704 &osb
->blocked_lock_list
);
3705 osb
->blocked_lock_count
++;
3707 spin_unlock(&osb
->dc_task_lock
);
3712 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super
*osb
)
3714 unsigned long processed
;
3715 struct ocfs2_lock_res
*lockres
;
3719 spin_lock(&osb
->dc_task_lock
);
3720 /* grab this early so we know to try again if a state change and
3721 * wake happens part-way through our work */
3722 osb
->dc_work_sequence
= osb
->dc_wake_sequence
;
3724 processed
= osb
->blocked_lock_count
;
3726 BUG_ON(list_empty(&osb
->blocked_lock_list
));
3728 lockres
= list_entry(osb
->blocked_lock_list
.next
,
3729 struct ocfs2_lock_res
, l_blocked_list
);
3730 list_del_init(&lockres
->l_blocked_list
);
3731 osb
->blocked_lock_count
--;
3732 spin_unlock(&osb
->dc_task_lock
);
3737 ocfs2_process_blocked_lock(osb
, lockres
);
3739 spin_lock(&osb
->dc_task_lock
);
3741 spin_unlock(&osb
->dc_task_lock
);
3746 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super
*osb
)
3750 spin_lock(&osb
->dc_task_lock
);
3751 if (list_empty(&osb
->blocked_lock_list
))
3754 spin_unlock(&osb
->dc_task_lock
);
3758 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super
*osb
)
3760 int should_wake
= 0;
3762 spin_lock(&osb
->dc_task_lock
);
3763 if (osb
->dc_work_sequence
!= osb
->dc_wake_sequence
)
3765 spin_unlock(&osb
->dc_task_lock
);
3770 static int ocfs2_downconvert_thread(void *arg
)
3773 struct ocfs2_super
*osb
= arg
;
3775 /* only quit once we've been asked to stop and there is no more
3777 while (!(kthread_should_stop() &&
3778 ocfs2_downconvert_thread_lists_empty(osb
))) {
3780 wait_event_interruptible(osb
->dc_event
,
3781 ocfs2_downconvert_thread_should_wake(osb
) ||
3782 kthread_should_stop());
3784 mlog(0, "downconvert_thread: awoken\n");
3786 ocfs2_downconvert_thread_do_work(osb
);
3789 osb
->dc_task
= NULL
;
3793 void ocfs2_wake_downconvert_thread(struct ocfs2_super
*osb
)
3795 spin_lock(&osb
->dc_task_lock
);
3796 /* make sure the voting thread gets a swipe at whatever changes
3797 * the caller may have made to the voting state */
3798 osb
->dc_wake_sequence
++;
3799 spin_unlock(&osb
->dc_task_lock
);
3800 wake_up(&osb
->dc_event
);