]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/ocfs2/dlmglue.c
reiserfs: fix kernel panic on corrupted directory
[mirror_ubuntu-artful-kernel.git] / fs / ocfs2 / dlmglue.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmglue.c
5 *
6 * Code which implements an OCFS2 specific interface to our DLM.
7 *
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
29 #include <linux/mm.h>
30 #include <linux/crc32.h>
31 #include <linux/kthread.h>
32 #include <linux/pagemap.h>
33 #include <linux/debugfs.h>
34 #include <linux/seq_file.h>
35
36 #include <cluster/heartbeat.h>
37 #include <cluster/nodemanager.h>
38 #include <cluster/tcp.h>
39
40 #include <dlm/dlmapi.h>
41
42 #define MLOG_MASK_PREFIX ML_DLM_GLUE
43 #include <cluster/masklog.h>
44
45 #include "ocfs2.h"
46
47 #include "alloc.h"
48 #include "dcache.h"
49 #include "dlmglue.h"
50 #include "extent_map.h"
51 #include "file.h"
52 #include "heartbeat.h"
53 #include "inode.h"
54 #include "journal.h"
55 #include "slot_map.h"
56 #include "super.h"
57 #include "uptodate.h"
58 #include "vote.h"
59
60 #include "buffer_head_io.h"
61
62 struct ocfs2_mask_waiter {
63 struct list_head mw_item;
64 int mw_status;
65 struct completion mw_complete;
66 unsigned long mw_mask;
67 unsigned long mw_goal;
68 };
69
70 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
71 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
72
73 /*
74 * Return value from ->downconvert_worker functions.
75 *
76 * These control the precise actions of ocfs2_unblock_lock()
77 * and ocfs2_process_blocked_lock()
78 *
79 */
80 enum ocfs2_unblock_action {
81 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
82 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
83 * ->post_unlock callback */
84 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
85 * ->post_unlock() callback. */
86 };
87
88 struct ocfs2_unblock_ctl {
89 int requeue;
90 enum ocfs2_unblock_action unblock_action;
91 };
92
93 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
94 int new_level);
95 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
96
97 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
98 int blocking);
99
100 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
101 int blocking);
102
103 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
104 struct ocfs2_lock_res *lockres);
105
106
107 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
108
109 /* This aids in debugging situations where a bad LVB might be involved. */
110 static void ocfs2_dump_meta_lvb_info(u64 level,
111 const char *function,
112 unsigned int line,
113 struct ocfs2_lock_res *lockres)
114 {
115 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
116
117 mlog(level, "LVB information for %s (called from %s:%u):\n",
118 lockres->l_name, function, line);
119 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
120 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
121 be32_to_cpu(lvb->lvb_igeneration));
122 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
123 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
124 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
125 be16_to_cpu(lvb->lvb_imode));
126 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
127 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
128 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
129 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
130 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
131 be32_to_cpu(lvb->lvb_iattr));
132 }
133
134
135 /*
136 * OCFS2 Lock Resource Operations
137 *
138 * These fine tune the behavior of the generic dlmglue locking infrastructure.
139 *
140 * The most basic of lock types can point ->l_priv to their respective
141 * struct ocfs2_super and allow the default actions to manage things.
142 *
143 * Right now, each lock type also needs to implement an init function,
144 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
145 * should be called when the lock is no longer needed (i.e., object
146 * destruction time).
147 */
148 struct ocfs2_lock_res_ops {
149 /*
150 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
151 * this callback if ->l_priv is not an ocfs2_super pointer
152 */
153 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
154
155 /*
156 * Optionally called in the downconvert (or "vote") thread
157 * after a successful downconvert. The lockres will not be
158 * referenced after this callback is called, so it is safe to
159 * free memory, etc.
160 *
161 * The exact semantics of when this is called are controlled
162 * by ->downconvert_worker()
163 */
164 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
165
166 /*
167 * Allow a lock type to add checks to determine whether it is
168 * safe to downconvert a lock. Return 0 to re-queue the
169 * downconvert at a later time, nonzero to continue.
170 *
171 * For most locks, the default checks that there are no
172 * incompatible holders are sufficient.
173 *
174 * Called with the lockres spinlock held.
175 */
176 int (*check_downconvert)(struct ocfs2_lock_res *, int);
177
178 /*
179 * Allows a lock type to populate the lock value block. This
180 * is called on downconvert, and when we drop a lock.
181 *
182 * Locks that want to use this should set LOCK_TYPE_USES_LVB
183 * in the flags field.
184 *
185 * Called with the lockres spinlock held.
186 */
187 void (*set_lvb)(struct ocfs2_lock_res *);
188
189 /*
190 * Called from the downconvert thread when it is determined
191 * that a lock will be downconverted. This is called without
192 * any locks held so the function can do work that might
193 * schedule (syncing out data, etc).
194 *
195 * This should return any one of the ocfs2_unblock_action
196 * values, depending on what it wants the thread to do.
197 */
198 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
199
200 /*
201 * LOCK_TYPE_* flags which describe the specific requirements
202 * of a lock type. Descriptions of each individual flag follow.
203 */
204 int flags;
205 };
206
207 /*
208 * Some locks want to "refresh" potentially stale data when a
209 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
210 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
211 * individual lockres l_flags member from the ast function. It is
212 * expected that the locking wrapper will clear the
213 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
214 */
215 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
216
217 /*
218 * Indicate that a lock type makes use of the lock value block. The
219 * ->set_lvb lock type callback must be defined.
220 */
221 #define LOCK_TYPE_USES_LVB 0x2
222
223 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
224 .get_osb = ocfs2_get_inode_osb,
225 .flags = 0,
226 };
227
228 static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
229 .get_osb = ocfs2_get_inode_osb,
230 .check_downconvert = ocfs2_check_meta_downconvert,
231 .set_lvb = ocfs2_set_meta_lvb,
232 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
233 };
234
235 static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
236 .get_osb = ocfs2_get_inode_osb,
237 .downconvert_worker = ocfs2_data_convert_worker,
238 .flags = 0,
239 };
240
241 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
242 .flags = LOCK_TYPE_REQUIRES_REFRESH,
243 };
244
245 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
246 .flags = 0,
247 };
248
249 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
250 .get_osb = ocfs2_get_dentry_osb,
251 .post_unlock = ocfs2_dentry_post_unlock,
252 .downconvert_worker = ocfs2_dentry_convert_worker,
253 .flags = 0,
254 };
255
256 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
257 .get_osb = ocfs2_get_inode_osb,
258 .flags = 0,
259 };
260
261 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
262 {
263 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
264 lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
265 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
266 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
267 }
268
269 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
270 {
271 BUG_ON(!ocfs2_is_inode_lock(lockres));
272
273 return (struct inode *) lockres->l_priv;
274 }
275
276 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
277 {
278 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
279
280 return (struct ocfs2_dentry_lock *)lockres->l_priv;
281 }
282
283 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
284 {
285 if (lockres->l_ops->get_osb)
286 return lockres->l_ops->get_osb(lockres);
287
288 return (struct ocfs2_super *)lockres->l_priv;
289 }
290
291 static int ocfs2_lock_create(struct ocfs2_super *osb,
292 struct ocfs2_lock_res *lockres,
293 int level,
294 int dlm_flags);
295 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
296 int wanted);
297 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
298 struct ocfs2_lock_res *lockres,
299 int level);
300 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
301 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
302 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
303 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
304 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
305 struct ocfs2_lock_res *lockres);
306 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
307 int convert);
308 #define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
309 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
310 "resource %s: %s\n", dlm_errname(_stat), _func, \
311 _lockres->l_name, dlm_errmsg(_stat)); \
312 } while (0)
313 static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
314 struct ocfs2_lock_res *lockres);
315 static int ocfs2_meta_lock_update(struct inode *inode,
316 struct buffer_head **bh);
317 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
318 static inline int ocfs2_highest_compat_lock_level(int level);
319
320 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
321 u64 blkno,
322 u32 generation,
323 char *name)
324 {
325 int len;
326
327 mlog_entry_void();
328
329 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
330
331 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
332 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
333 (long long)blkno, generation);
334
335 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
336
337 mlog(0, "built lock resource with name: %s\n", name);
338
339 mlog_exit_void();
340 }
341
342 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
343
344 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
345 struct ocfs2_dlm_debug *dlm_debug)
346 {
347 mlog(0, "Add tracking for lockres %s\n", res->l_name);
348
349 spin_lock(&ocfs2_dlm_tracking_lock);
350 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
351 spin_unlock(&ocfs2_dlm_tracking_lock);
352 }
353
354 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
355 {
356 spin_lock(&ocfs2_dlm_tracking_lock);
357 if (!list_empty(&res->l_debug_list))
358 list_del_init(&res->l_debug_list);
359 spin_unlock(&ocfs2_dlm_tracking_lock);
360 }
361
362 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
363 struct ocfs2_lock_res *res,
364 enum ocfs2_lock_type type,
365 struct ocfs2_lock_res_ops *ops,
366 void *priv)
367 {
368 res->l_type = type;
369 res->l_ops = ops;
370 res->l_priv = priv;
371
372 res->l_level = LKM_IVMODE;
373 res->l_requested = LKM_IVMODE;
374 res->l_blocking = LKM_IVMODE;
375 res->l_action = OCFS2_AST_INVALID;
376 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
377
378 res->l_flags = OCFS2_LOCK_INITIALIZED;
379
380 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
381 }
382
383 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
384 {
385 /* This also clears out the lock status block */
386 memset(res, 0, sizeof(struct ocfs2_lock_res));
387 spin_lock_init(&res->l_lock);
388 init_waitqueue_head(&res->l_event);
389 INIT_LIST_HEAD(&res->l_blocked_list);
390 INIT_LIST_HEAD(&res->l_mask_waiters);
391 }
392
393 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
394 enum ocfs2_lock_type type,
395 unsigned int generation,
396 struct inode *inode)
397 {
398 struct ocfs2_lock_res_ops *ops;
399
400 switch(type) {
401 case OCFS2_LOCK_TYPE_RW:
402 ops = &ocfs2_inode_rw_lops;
403 break;
404 case OCFS2_LOCK_TYPE_META:
405 ops = &ocfs2_inode_meta_lops;
406 break;
407 case OCFS2_LOCK_TYPE_DATA:
408 ops = &ocfs2_inode_data_lops;
409 break;
410 case OCFS2_LOCK_TYPE_OPEN:
411 ops = &ocfs2_inode_open_lops;
412 break;
413 default:
414 mlog_bug_on_msg(1, "type: %d\n", type);
415 ops = NULL; /* thanks, gcc */
416 break;
417 };
418
419 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
420 generation, res->l_name);
421 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
422 }
423
424 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
425 {
426 struct inode *inode = ocfs2_lock_res_inode(lockres);
427
428 return OCFS2_SB(inode->i_sb);
429 }
430
431 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
432 {
433 __be64 inode_blkno_be;
434
435 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
436 sizeof(__be64));
437
438 return be64_to_cpu(inode_blkno_be);
439 }
440
441 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
442 {
443 struct ocfs2_dentry_lock *dl = lockres->l_priv;
444
445 return OCFS2_SB(dl->dl_inode->i_sb);
446 }
447
448 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
449 u64 parent, struct inode *inode)
450 {
451 int len;
452 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
453 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
454 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
455
456 ocfs2_lock_res_init_once(lockres);
457
458 /*
459 * Unfortunately, the standard lock naming scheme won't work
460 * here because we have two 16 byte values to use. Instead,
461 * we'll stuff the inode number as a binary value. We still
462 * want error prints to show something without garbling the
463 * display, so drop a null byte in there before the inode
464 * number. A future version of OCFS2 will likely use all
465 * binary lock names. The stringified names have been a
466 * tremendous aid in debugging, but now that the debugfs
467 * interface exists, we can mangle things there if need be.
468 *
469 * NOTE: We also drop the standard "pad" value (the total lock
470 * name size stays the same though - the last part is all
471 * zeros due to the memset in ocfs2_lock_res_init_once()
472 */
473 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
474 "%c%016llx",
475 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
476 (long long)parent);
477
478 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
479
480 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
481 sizeof(__be64));
482
483 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
484 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
485 dl);
486 }
487
488 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
489 struct ocfs2_super *osb)
490 {
491 /* Superblock lockres doesn't come from a slab so we call init
492 * once on it manually. */
493 ocfs2_lock_res_init_once(res);
494 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
495 0, res->l_name);
496 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
497 &ocfs2_super_lops, osb);
498 }
499
500 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
501 struct ocfs2_super *osb)
502 {
503 /* Rename lockres doesn't come from a slab so we call init
504 * once on it manually. */
505 ocfs2_lock_res_init_once(res);
506 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
507 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
508 &ocfs2_rename_lops, osb);
509 }
510
511 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
512 {
513 mlog_entry_void();
514
515 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
516 return;
517
518 ocfs2_remove_lockres_tracking(res);
519
520 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
521 "Lockres %s is on the blocked list\n",
522 res->l_name);
523 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
524 "Lockres %s has mask waiters pending\n",
525 res->l_name);
526 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
527 "Lockres %s is locked\n",
528 res->l_name);
529 mlog_bug_on_msg(res->l_ro_holders,
530 "Lockres %s has %u ro holders\n",
531 res->l_name, res->l_ro_holders);
532 mlog_bug_on_msg(res->l_ex_holders,
533 "Lockres %s has %u ex holders\n",
534 res->l_name, res->l_ex_holders);
535
536 /* Need to clear out the lock status block for the dlm */
537 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
538
539 res->l_flags = 0UL;
540 mlog_exit_void();
541 }
542
543 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
544 int level)
545 {
546 mlog_entry_void();
547
548 BUG_ON(!lockres);
549
550 switch(level) {
551 case LKM_EXMODE:
552 lockres->l_ex_holders++;
553 break;
554 case LKM_PRMODE:
555 lockres->l_ro_holders++;
556 break;
557 default:
558 BUG();
559 }
560
561 mlog_exit_void();
562 }
563
564 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
565 int level)
566 {
567 mlog_entry_void();
568
569 BUG_ON(!lockres);
570
571 switch(level) {
572 case LKM_EXMODE:
573 BUG_ON(!lockres->l_ex_holders);
574 lockres->l_ex_holders--;
575 break;
576 case LKM_PRMODE:
577 BUG_ON(!lockres->l_ro_holders);
578 lockres->l_ro_holders--;
579 break;
580 default:
581 BUG();
582 }
583 mlog_exit_void();
584 }
585
586 /* WARNING: This function lives in a world where the only three lock
587 * levels are EX, PR, and NL. It *will* have to be adjusted when more
588 * lock types are added. */
589 static inline int ocfs2_highest_compat_lock_level(int level)
590 {
591 int new_level = LKM_EXMODE;
592
593 if (level == LKM_EXMODE)
594 new_level = LKM_NLMODE;
595 else if (level == LKM_PRMODE)
596 new_level = LKM_PRMODE;
597 return new_level;
598 }
599
600 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
601 unsigned long newflags)
602 {
603 struct ocfs2_mask_waiter *mw, *tmp;
604
605 assert_spin_locked(&lockres->l_lock);
606
607 lockres->l_flags = newflags;
608
609 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
610 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
611 continue;
612
613 list_del_init(&mw->mw_item);
614 mw->mw_status = 0;
615 complete(&mw->mw_complete);
616 }
617 }
618 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
619 {
620 lockres_set_flags(lockres, lockres->l_flags | or);
621 }
622 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
623 unsigned long clear)
624 {
625 lockres_set_flags(lockres, lockres->l_flags & ~clear);
626 }
627
628 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
629 {
630 mlog_entry_void();
631
632 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
633 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
634 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
635 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
636
637 lockres->l_level = lockres->l_requested;
638 if (lockres->l_level <=
639 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
640 lockres->l_blocking = LKM_NLMODE;
641 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
642 }
643 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
644
645 mlog_exit_void();
646 }
647
648 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
649 {
650 mlog_entry_void();
651
652 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
653 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
654
655 /* Convert from RO to EX doesn't really need anything as our
656 * information is already up to data. Convert from NL to
657 * *anything* however should mark ourselves as needing an
658 * update */
659 if (lockres->l_level == LKM_NLMODE &&
660 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
661 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
662
663 lockres->l_level = lockres->l_requested;
664 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
665
666 mlog_exit_void();
667 }
668
669 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
670 {
671 mlog_entry_void();
672
673 BUG_ON((!lockres->l_flags & OCFS2_LOCK_BUSY));
674 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
675
676 if (lockres->l_requested > LKM_NLMODE &&
677 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
678 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
679 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
680
681 lockres->l_level = lockres->l_requested;
682 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
683 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
684
685 mlog_exit_void();
686 }
687
688 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
689 int level)
690 {
691 int needs_downconvert = 0;
692 mlog_entry_void();
693
694 assert_spin_locked(&lockres->l_lock);
695
696 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
697
698 if (level > lockres->l_blocking) {
699 /* only schedule a downconvert if we haven't already scheduled
700 * one that goes low enough to satisfy the level we're
701 * blocking. this also catches the case where we get
702 * duplicate BASTs */
703 if (ocfs2_highest_compat_lock_level(level) <
704 ocfs2_highest_compat_lock_level(lockres->l_blocking))
705 needs_downconvert = 1;
706
707 lockres->l_blocking = level;
708 }
709
710 mlog_exit(needs_downconvert);
711 return needs_downconvert;
712 }
713
714 static void ocfs2_blocking_ast(void *opaque, int level)
715 {
716 struct ocfs2_lock_res *lockres = opaque;
717 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
718 int needs_downconvert;
719 unsigned long flags;
720
721 BUG_ON(level <= LKM_NLMODE);
722
723 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
724 lockres->l_name, level, lockres->l_level,
725 ocfs2_lock_type_string(lockres->l_type));
726
727 spin_lock_irqsave(&lockres->l_lock, flags);
728 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
729 if (needs_downconvert)
730 ocfs2_schedule_blocked_lock(osb, lockres);
731 spin_unlock_irqrestore(&lockres->l_lock, flags);
732
733 wake_up(&lockres->l_event);
734
735 ocfs2_kick_vote_thread(osb);
736 }
737
738 static void ocfs2_locking_ast(void *opaque)
739 {
740 struct ocfs2_lock_res *lockres = opaque;
741 struct dlm_lockstatus *lksb = &lockres->l_lksb;
742 unsigned long flags;
743
744 spin_lock_irqsave(&lockres->l_lock, flags);
745
746 if (lksb->status != DLM_NORMAL) {
747 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
748 lockres->l_name, lksb->status);
749 spin_unlock_irqrestore(&lockres->l_lock, flags);
750 return;
751 }
752
753 switch(lockres->l_action) {
754 case OCFS2_AST_ATTACH:
755 ocfs2_generic_handle_attach_action(lockres);
756 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
757 break;
758 case OCFS2_AST_CONVERT:
759 ocfs2_generic_handle_convert_action(lockres);
760 break;
761 case OCFS2_AST_DOWNCONVERT:
762 ocfs2_generic_handle_downconvert_action(lockres);
763 break;
764 default:
765 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
766 "lockres flags = 0x%lx, unlock action: %u\n",
767 lockres->l_name, lockres->l_action, lockres->l_flags,
768 lockres->l_unlock_action);
769 BUG();
770 }
771
772 /* set it to something invalid so if we get called again we
773 * can catch it. */
774 lockres->l_action = OCFS2_AST_INVALID;
775
776 wake_up(&lockres->l_event);
777 spin_unlock_irqrestore(&lockres->l_lock, flags);
778 }
779
780 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
781 int convert)
782 {
783 unsigned long flags;
784
785 mlog_entry_void();
786 spin_lock_irqsave(&lockres->l_lock, flags);
787 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
788 if (convert)
789 lockres->l_action = OCFS2_AST_INVALID;
790 else
791 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
792 spin_unlock_irqrestore(&lockres->l_lock, flags);
793
794 wake_up(&lockres->l_event);
795 mlog_exit_void();
796 }
797
798 /* Note: If we detect another process working on the lock (i.e.,
799 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
800 * to do the right thing in that case.
801 */
802 static int ocfs2_lock_create(struct ocfs2_super *osb,
803 struct ocfs2_lock_res *lockres,
804 int level,
805 int dlm_flags)
806 {
807 int ret = 0;
808 enum dlm_status status = DLM_NORMAL;
809 unsigned long flags;
810
811 mlog_entry_void();
812
813 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
814 dlm_flags);
815
816 spin_lock_irqsave(&lockres->l_lock, flags);
817 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
818 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
819 spin_unlock_irqrestore(&lockres->l_lock, flags);
820 goto bail;
821 }
822
823 lockres->l_action = OCFS2_AST_ATTACH;
824 lockres->l_requested = level;
825 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
826 spin_unlock_irqrestore(&lockres->l_lock, flags);
827
828 status = dlmlock(osb->dlm,
829 level,
830 &lockres->l_lksb,
831 dlm_flags,
832 lockres->l_name,
833 OCFS2_LOCK_ID_MAX_LEN - 1,
834 ocfs2_locking_ast,
835 lockres,
836 ocfs2_blocking_ast);
837 if (status != DLM_NORMAL) {
838 ocfs2_log_dlm_error("dlmlock", status, lockres);
839 ret = -EINVAL;
840 ocfs2_recover_from_dlm_error(lockres, 1);
841 }
842
843 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
844
845 bail:
846 mlog_exit(ret);
847 return ret;
848 }
849
850 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
851 int flag)
852 {
853 unsigned long flags;
854 int ret;
855
856 spin_lock_irqsave(&lockres->l_lock, flags);
857 ret = lockres->l_flags & flag;
858 spin_unlock_irqrestore(&lockres->l_lock, flags);
859
860 return ret;
861 }
862
863 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
864
865 {
866 wait_event(lockres->l_event,
867 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
868 }
869
870 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
871
872 {
873 wait_event(lockres->l_event,
874 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
875 }
876
877 /* predict what lock level we'll be dropping down to on behalf
878 * of another node, and return true if the currently wanted
879 * level will be compatible with it. */
880 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
881 int wanted)
882 {
883 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
884
885 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
886 }
887
888 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
889 {
890 INIT_LIST_HEAD(&mw->mw_item);
891 init_completion(&mw->mw_complete);
892 }
893
894 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
895 {
896 wait_for_completion(&mw->mw_complete);
897 /* Re-arm the completion in case we want to wait on it again */
898 INIT_COMPLETION(mw->mw_complete);
899 return mw->mw_status;
900 }
901
902 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
903 struct ocfs2_mask_waiter *mw,
904 unsigned long mask,
905 unsigned long goal)
906 {
907 BUG_ON(!list_empty(&mw->mw_item));
908
909 assert_spin_locked(&lockres->l_lock);
910
911 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
912 mw->mw_mask = mask;
913 mw->mw_goal = goal;
914 }
915
916 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
917 * if the mask still hadn't reached its goal */
918 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
919 struct ocfs2_mask_waiter *mw)
920 {
921 unsigned long flags;
922 int ret = 0;
923
924 spin_lock_irqsave(&lockres->l_lock, flags);
925 if (!list_empty(&mw->mw_item)) {
926 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
927 ret = -EBUSY;
928
929 list_del_init(&mw->mw_item);
930 init_completion(&mw->mw_complete);
931 }
932 spin_unlock_irqrestore(&lockres->l_lock, flags);
933
934 return ret;
935
936 }
937
938 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
939 struct ocfs2_lock_res *lockres,
940 int level,
941 int lkm_flags,
942 int arg_flags)
943 {
944 struct ocfs2_mask_waiter mw;
945 enum dlm_status status;
946 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
947 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
948 unsigned long flags;
949
950 mlog_entry_void();
951
952 ocfs2_init_mask_waiter(&mw);
953
954 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
955 lkm_flags |= LKM_VALBLK;
956
957 again:
958 wait = 0;
959
960 if (catch_signals && signal_pending(current)) {
961 ret = -ERESTARTSYS;
962 goto out;
963 }
964
965 spin_lock_irqsave(&lockres->l_lock, flags);
966
967 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
968 "Cluster lock called on freeing lockres %s! flags "
969 "0x%lx\n", lockres->l_name, lockres->l_flags);
970
971 /* We only compare against the currently granted level
972 * here. If the lock is blocked waiting on a downconvert,
973 * we'll get caught below. */
974 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
975 level > lockres->l_level) {
976 /* is someone sitting in dlm_lock? If so, wait on
977 * them. */
978 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
979 wait = 1;
980 goto unlock;
981 }
982
983 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
984 /* lock has not been created yet. */
985 spin_unlock_irqrestore(&lockres->l_lock, flags);
986
987 ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
988 if (ret < 0) {
989 mlog_errno(ret);
990 goto out;
991 }
992 goto again;
993 }
994
995 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
996 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
997 /* is the lock is currently blocked on behalf of
998 * another node */
999 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1000 wait = 1;
1001 goto unlock;
1002 }
1003
1004 if (level > lockres->l_level) {
1005 if (lockres->l_action != OCFS2_AST_INVALID)
1006 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1007 lockres->l_name, lockres->l_action);
1008
1009 lockres->l_action = OCFS2_AST_CONVERT;
1010 lockres->l_requested = level;
1011 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1012 spin_unlock_irqrestore(&lockres->l_lock, flags);
1013
1014 BUG_ON(level == LKM_IVMODE);
1015 BUG_ON(level == LKM_NLMODE);
1016
1017 mlog(0, "lock %s, convert from %d to level = %d\n",
1018 lockres->l_name, lockres->l_level, level);
1019
1020 /* call dlm_lock to upgrade lock now */
1021 status = dlmlock(osb->dlm,
1022 level,
1023 &lockres->l_lksb,
1024 lkm_flags|LKM_CONVERT,
1025 lockres->l_name,
1026 OCFS2_LOCK_ID_MAX_LEN - 1,
1027 ocfs2_locking_ast,
1028 lockres,
1029 ocfs2_blocking_ast);
1030 if (status != DLM_NORMAL) {
1031 if ((lkm_flags & LKM_NOQUEUE) &&
1032 (status == DLM_NOTQUEUED))
1033 ret = -EAGAIN;
1034 else {
1035 ocfs2_log_dlm_error("dlmlock", status,
1036 lockres);
1037 ret = -EINVAL;
1038 }
1039 ocfs2_recover_from_dlm_error(lockres, 1);
1040 goto out;
1041 }
1042
1043 mlog(0, "lock %s, successfull return from dlmlock\n",
1044 lockres->l_name);
1045
1046 /* At this point we've gone inside the dlm and need to
1047 * complete our work regardless. */
1048 catch_signals = 0;
1049
1050 /* wait for busy to clear and carry on */
1051 goto again;
1052 }
1053
1054 /* Ok, if we get here then we're good to go. */
1055 ocfs2_inc_holders(lockres, level);
1056
1057 ret = 0;
1058 unlock:
1059 spin_unlock_irqrestore(&lockres->l_lock, flags);
1060 out:
1061 /*
1062 * This is helping work around a lock inversion between the page lock
1063 * and dlm locks. One path holds the page lock while calling aops
1064 * which block acquiring dlm locks. The voting thread holds dlm
1065 * locks while acquiring page locks while down converting data locks.
1066 * This block is helping an aop path notice the inversion and back
1067 * off to unlock its page lock before trying the dlm lock again.
1068 */
1069 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1070 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1071 wait = 0;
1072 if (lockres_remove_mask_waiter(lockres, &mw))
1073 ret = -EAGAIN;
1074 else
1075 goto again;
1076 }
1077 if (wait) {
1078 ret = ocfs2_wait_for_mask(&mw);
1079 if (ret == 0)
1080 goto again;
1081 mlog_errno(ret);
1082 }
1083
1084 mlog_exit(ret);
1085 return ret;
1086 }
1087
1088 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1089 struct ocfs2_lock_res *lockres,
1090 int level)
1091 {
1092 unsigned long flags;
1093
1094 mlog_entry_void();
1095 spin_lock_irqsave(&lockres->l_lock, flags);
1096 ocfs2_dec_holders(lockres, level);
1097 ocfs2_vote_on_unlock(osb, lockres);
1098 spin_unlock_irqrestore(&lockres->l_lock, flags);
1099 mlog_exit_void();
1100 }
1101
1102 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1103 struct ocfs2_lock_res *lockres,
1104 int ex,
1105 int local)
1106 {
1107 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1108 unsigned long flags;
1109 int lkm_flags = local ? LKM_LOCAL : 0;
1110
1111 spin_lock_irqsave(&lockres->l_lock, flags);
1112 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1113 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1114 spin_unlock_irqrestore(&lockres->l_lock, flags);
1115
1116 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1117 }
1118
1119 /* Grants us an EX lock on the data and metadata resources, skipping
1120 * the normal cluster directory lookup. Use this ONLY on newly created
1121 * inodes which other nodes can't possibly see, and which haven't been
1122 * hashed in the inode hash yet. This can give us a good performance
1123 * increase as it'll skip the network broadcast normally associated
1124 * with creating a new lock resource. */
1125 int ocfs2_create_new_inode_locks(struct inode *inode)
1126 {
1127 int ret;
1128 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1129
1130 BUG_ON(!inode);
1131 BUG_ON(!ocfs2_inode_is_new(inode));
1132
1133 mlog_entry_void();
1134
1135 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1136
1137 /* NOTE: That we don't increment any of the holder counts, nor
1138 * do we add anything to a journal handle. Since this is
1139 * supposed to be a new inode which the cluster doesn't know
1140 * about yet, there is no need to. As far as the LVB handling
1141 * is concerned, this is basically like acquiring an EX lock
1142 * on a resource which has an invalid one -- we'll set it
1143 * valid when we release the EX. */
1144
1145 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1146 if (ret) {
1147 mlog_errno(ret);
1148 goto bail;
1149 }
1150
1151 /*
1152 * We don't want to use LKM_LOCAL on a meta data lock as they
1153 * don't use a generation in their lock names.
1154 */
1155 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_meta_lockres, 1, 0);
1156 if (ret) {
1157 mlog_errno(ret);
1158 goto bail;
1159 }
1160
1161 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_data_lockres, 1, 1);
1162 if (ret) {
1163 mlog_errno(ret);
1164 goto bail;
1165 }
1166
1167 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1168 if (ret) {
1169 mlog_errno(ret);
1170 goto bail;
1171 }
1172
1173 bail:
1174 mlog_exit(ret);
1175 return ret;
1176 }
1177
1178 int ocfs2_rw_lock(struct inode *inode, int write)
1179 {
1180 int status, level;
1181 struct ocfs2_lock_res *lockres;
1182 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1183
1184 BUG_ON(!inode);
1185
1186 mlog_entry_void();
1187
1188 mlog(0, "inode %llu take %s RW lock\n",
1189 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1190 write ? "EXMODE" : "PRMODE");
1191
1192 if (ocfs2_mount_local(osb))
1193 return 0;
1194
1195 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1196
1197 level = write ? LKM_EXMODE : LKM_PRMODE;
1198
1199 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1200 0);
1201 if (status < 0)
1202 mlog_errno(status);
1203
1204 mlog_exit(status);
1205 return status;
1206 }
1207
1208 void ocfs2_rw_unlock(struct inode *inode, int write)
1209 {
1210 int level = write ? LKM_EXMODE : LKM_PRMODE;
1211 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1212 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1213
1214 mlog_entry_void();
1215
1216 mlog(0, "inode %llu drop %s RW lock\n",
1217 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1218 write ? "EXMODE" : "PRMODE");
1219
1220 if (!ocfs2_mount_local(osb))
1221 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1222
1223 mlog_exit_void();
1224 }
1225
1226 /*
1227 * ocfs2_open_lock always get PR mode lock.
1228 */
1229 int ocfs2_open_lock(struct inode *inode)
1230 {
1231 int status = 0;
1232 struct ocfs2_lock_res *lockres;
1233 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1234
1235 BUG_ON(!inode);
1236
1237 mlog_entry_void();
1238
1239 mlog(0, "inode %llu take PRMODE open lock\n",
1240 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1241
1242 if (ocfs2_mount_local(osb))
1243 goto out;
1244
1245 lockres = &OCFS2_I(inode)->ip_open_lockres;
1246
1247 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1248 LKM_PRMODE, 0, 0);
1249 if (status < 0)
1250 mlog_errno(status);
1251
1252 out:
1253 mlog_exit(status);
1254 return status;
1255 }
1256
1257 int ocfs2_try_open_lock(struct inode *inode, int write)
1258 {
1259 int status = 0, level;
1260 struct ocfs2_lock_res *lockres;
1261 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1262
1263 BUG_ON(!inode);
1264
1265 mlog_entry_void();
1266
1267 mlog(0, "inode %llu try to take %s open lock\n",
1268 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1269 write ? "EXMODE" : "PRMODE");
1270
1271 if (ocfs2_mount_local(osb))
1272 goto out;
1273
1274 lockres = &OCFS2_I(inode)->ip_open_lockres;
1275
1276 level = write ? LKM_EXMODE : LKM_PRMODE;
1277
1278 /*
1279 * The file system may already holding a PRMODE/EXMODE open lock.
1280 * Since we pass LKM_NOQUEUE, the request won't block waiting on
1281 * other nodes and the -EAGAIN will indicate to the caller that
1282 * this inode is still in use.
1283 */
1284 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1285 level, LKM_NOQUEUE, 0);
1286
1287 out:
1288 mlog_exit(status);
1289 return status;
1290 }
1291
1292 /*
1293 * ocfs2_open_unlock unlock PR and EX mode open locks.
1294 */
1295 void ocfs2_open_unlock(struct inode *inode)
1296 {
1297 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1298 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1299
1300 mlog_entry_void();
1301
1302 mlog(0, "inode %llu drop open lock\n",
1303 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1304
1305 if (ocfs2_mount_local(osb))
1306 goto out;
1307
1308 if(lockres->l_ro_holders)
1309 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1310 LKM_PRMODE);
1311 if(lockres->l_ex_holders)
1312 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1313 LKM_EXMODE);
1314
1315 out:
1316 mlog_exit_void();
1317 }
1318
1319 int ocfs2_data_lock_full(struct inode *inode,
1320 int write,
1321 int arg_flags)
1322 {
1323 int status = 0, level;
1324 struct ocfs2_lock_res *lockres;
1325 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1326
1327 BUG_ON(!inode);
1328
1329 mlog_entry_void();
1330
1331 mlog(0, "inode %llu take %s DATA lock\n",
1332 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1333 write ? "EXMODE" : "PRMODE");
1334
1335 /* We'll allow faking a readonly data lock for
1336 * rodevices. */
1337 if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
1338 if (write) {
1339 status = -EROFS;
1340 mlog_errno(status);
1341 }
1342 goto out;
1343 }
1344
1345 if (ocfs2_mount_local(osb))
1346 goto out;
1347
1348 lockres = &OCFS2_I(inode)->ip_data_lockres;
1349
1350 level = write ? LKM_EXMODE : LKM_PRMODE;
1351
1352 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
1353 0, arg_flags);
1354 if (status < 0 && status != -EAGAIN)
1355 mlog_errno(status);
1356
1357 out:
1358 mlog_exit(status);
1359 return status;
1360 }
1361
1362 /* see ocfs2_meta_lock_with_page() */
1363 int ocfs2_data_lock_with_page(struct inode *inode,
1364 int write,
1365 struct page *page)
1366 {
1367 int ret;
1368
1369 ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
1370 if (ret == -EAGAIN) {
1371 unlock_page(page);
1372 if (ocfs2_data_lock(inode, write) == 0)
1373 ocfs2_data_unlock(inode, write);
1374 ret = AOP_TRUNCATED_PAGE;
1375 }
1376
1377 return ret;
1378 }
1379
1380 static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
1381 struct ocfs2_lock_res *lockres)
1382 {
1383 int kick = 0;
1384
1385 mlog_entry_void();
1386
1387 /* If we know that another node is waiting on our lock, kick
1388 * the vote thread * pre-emptively when we reach a release
1389 * condition. */
1390 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1391 switch(lockres->l_blocking) {
1392 case LKM_EXMODE:
1393 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1394 kick = 1;
1395 break;
1396 case LKM_PRMODE:
1397 if (!lockres->l_ex_holders)
1398 kick = 1;
1399 break;
1400 default:
1401 BUG();
1402 }
1403 }
1404
1405 if (kick)
1406 ocfs2_kick_vote_thread(osb);
1407
1408 mlog_exit_void();
1409 }
1410
1411 void ocfs2_data_unlock(struct inode *inode,
1412 int write)
1413 {
1414 int level = write ? LKM_EXMODE : LKM_PRMODE;
1415 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
1416 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1417
1418 mlog_entry_void();
1419
1420 mlog(0, "inode %llu drop %s DATA lock\n",
1421 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1422 write ? "EXMODE" : "PRMODE");
1423
1424 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1425 !ocfs2_mount_local(osb))
1426 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1427
1428 mlog_exit_void();
1429 }
1430
1431 #define OCFS2_SEC_BITS 34
1432 #define OCFS2_SEC_SHIFT (64 - 34)
1433 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1434
1435 /* LVB only has room for 64 bits of time here so we pack it for
1436 * now. */
1437 static u64 ocfs2_pack_timespec(struct timespec *spec)
1438 {
1439 u64 res;
1440 u64 sec = spec->tv_sec;
1441 u32 nsec = spec->tv_nsec;
1442
1443 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1444
1445 return res;
1446 }
1447
1448 /* Call this with the lockres locked. I am reasonably sure we don't
1449 * need ip_lock in this function as anyone who would be changing those
1450 * values is supposed to be blocked in ocfs2_meta_lock right now. */
1451 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1452 {
1453 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1454 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1455 struct ocfs2_meta_lvb *lvb;
1456
1457 mlog_entry_void();
1458
1459 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1460
1461 /*
1462 * Invalidate the LVB of a deleted inode - this way other
1463 * nodes are forced to go to disk and discover the new inode
1464 * status.
1465 */
1466 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1467 lvb->lvb_version = 0;
1468 goto out;
1469 }
1470
1471 lvb->lvb_version = OCFS2_LVB_VERSION;
1472 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1473 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1474 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1475 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1476 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1477 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1478 lvb->lvb_iatime_packed =
1479 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1480 lvb->lvb_ictime_packed =
1481 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1482 lvb->lvb_imtime_packed =
1483 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
1484 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
1485 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
1486 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
1487
1488 out:
1489 mlog_meta_lvb(0, lockres);
1490
1491 mlog_exit_void();
1492 }
1493
1494 static void ocfs2_unpack_timespec(struct timespec *spec,
1495 u64 packed_time)
1496 {
1497 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1498 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1499 }
1500
1501 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1502 {
1503 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1504 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1505 struct ocfs2_meta_lvb *lvb;
1506
1507 mlog_entry_void();
1508
1509 mlog_meta_lvb(0, lockres);
1510
1511 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1512
1513 /* We're safe here without the lockres lock... */
1514 spin_lock(&oi->ip_lock);
1515 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1516 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1517
1518 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1519 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
1520 ocfs2_set_inode_flags(inode);
1521
1522 /* fast-symlinks are a special case */
1523 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1524 inode->i_blocks = 0;
1525 else
1526 inode->i_blocks = ocfs2_inode_sector_count(inode);
1527
1528 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1529 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1530 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1531 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1532 ocfs2_unpack_timespec(&inode->i_atime,
1533 be64_to_cpu(lvb->lvb_iatime_packed));
1534 ocfs2_unpack_timespec(&inode->i_mtime,
1535 be64_to_cpu(lvb->lvb_imtime_packed));
1536 ocfs2_unpack_timespec(&inode->i_ctime,
1537 be64_to_cpu(lvb->lvb_ictime_packed));
1538 spin_unlock(&oi->ip_lock);
1539
1540 mlog_exit_void();
1541 }
1542
1543 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1544 struct ocfs2_lock_res *lockres)
1545 {
1546 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1547
1548 if (lvb->lvb_version == OCFS2_LVB_VERSION
1549 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1550 return 1;
1551 return 0;
1552 }
1553
1554 /* Determine whether a lock resource needs to be refreshed, and
1555 * arbitrate who gets to refresh it.
1556 *
1557 * 0 means no refresh needed.
1558 *
1559 * > 0 means you need to refresh this and you MUST call
1560 * ocfs2_complete_lock_res_refresh afterwards. */
1561 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1562 {
1563 unsigned long flags;
1564 int status = 0;
1565
1566 mlog_entry_void();
1567
1568 refresh_check:
1569 spin_lock_irqsave(&lockres->l_lock, flags);
1570 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1571 spin_unlock_irqrestore(&lockres->l_lock, flags);
1572 goto bail;
1573 }
1574
1575 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1576 spin_unlock_irqrestore(&lockres->l_lock, flags);
1577
1578 ocfs2_wait_on_refreshing_lock(lockres);
1579 goto refresh_check;
1580 }
1581
1582 /* Ok, I'll be the one to refresh this lock. */
1583 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1584 spin_unlock_irqrestore(&lockres->l_lock, flags);
1585
1586 status = 1;
1587 bail:
1588 mlog_exit(status);
1589 return status;
1590 }
1591
1592 /* If status is non zero, I'll mark it as not being in refresh
1593 * anymroe, but i won't clear the needs refresh flag. */
1594 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1595 int status)
1596 {
1597 unsigned long flags;
1598 mlog_entry_void();
1599
1600 spin_lock_irqsave(&lockres->l_lock, flags);
1601 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1602 if (!status)
1603 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1604 spin_unlock_irqrestore(&lockres->l_lock, flags);
1605
1606 wake_up(&lockres->l_event);
1607
1608 mlog_exit_void();
1609 }
1610
1611 /* may or may not return a bh if it went to disk. */
1612 static int ocfs2_meta_lock_update(struct inode *inode,
1613 struct buffer_head **bh)
1614 {
1615 int status = 0;
1616 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1617 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1618 struct ocfs2_dinode *fe;
1619 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1620
1621 mlog_entry_void();
1622
1623 if (ocfs2_mount_local(osb))
1624 goto bail;
1625
1626 spin_lock(&oi->ip_lock);
1627 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1628 mlog(0, "Orphaned inode %llu was deleted while we "
1629 "were waiting on a lock. ip_flags = 0x%x\n",
1630 (unsigned long long)oi->ip_blkno, oi->ip_flags);
1631 spin_unlock(&oi->ip_lock);
1632 status = -ENOENT;
1633 goto bail;
1634 }
1635 spin_unlock(&oi->ip_lock);
1636
1637 if (!ocfs2_should_refresh_lock_res(lockres))
1638 goto bail;
1639
1640 /* This will discard any caching information we might have had
1641 * for the inode metadata. */
1642 ocfs2_metadata_cache_purge(inode);
1643
1644 ocfs2_extent_map_trunc(inode, 0);
1645
1646 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1647 mlog(0, "Trusting LVB on inode %llu\n",
1648 (unsigned long long)oi->ip_blkno);
1649 ocfs2_refresh_inode_from_lvb(inode);
1650 } else {
1651 /* Boo, we have to go to disk. */
1652 /* read bh, cast, ocfs2_refresh_inode */
1653 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1654 bh, OCFS2_BH_CACHED, inode);
1655 if (status < 0) {
1656 mlog_errno(status);
1657 goto bail_refresh;
1658 }
1659 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1660
1661 /* This is a good chance to make sure we're not
1662 * locking an invalid object.
1663 *
1664 * We bug on a stale inode here because we checked
1665 * above whether it was wiped from disk. The wiping
1666 * node provides a guarantee that we receive that
1667 * message and can mark the inode before dropping any
1668 * locks associated with it. */
1669 if (!OCFS2_IS_VALID_DINODE(fe)) {
1670 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1671 status = -EIO;
1672 goto bail_refresh;
1673 }
1674 mlog_bug_on_msg(inode->i_generation !=
1675 le32_to_cpu(fe->i_generation),
1676 "Invalid dinode %llu disk generation: %u "
1677 "inode->i_generation: %u\n",
1678 (unsigned long long)oi->ip_blkno,
1679 le32_to_cpu(fe->i_generation),
1680 inode->i_generation);
1681 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1682 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
1683 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1684 (unsigned long long)oi->ip_blkno,
1685 (unsigned long long)le64_to_cpu(fe->i_dtime),
1686 le32_to_cpu(fe->i_flags));
1687
1688 ocfs2_refresh_inode(inode, fe);
1689 }
1690
1691 status = 0;
1692 bail_refresh:
1693 ocfs2_complete_lock_res_refresh(lockres, status);
1694 bail:
1695 mlog_exit(status);
1696 return status;
1697 }
1698
1699 static int ocfs2_assign_bh(struct inode *inode,
1700 struct buffer_head **ret_bh,
1701 struct buffer_head *passed_bh)
1702 {
1703 int status;
1704
1705 if (passed_bh) {
1706 /* Ok, the update went to disk for us, use the
1707 * returned bh. */
1708 *ret_bh = passed_bh;
1709 get_bh(*ret_bh);
1710
1711 return 0;
1712 }
1713
1714 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1715 OCFS2_I(inode)->ip_blkno,
1716 ret_bh,
1717 OCFS2_BH_CACHED,
1718 inode);
1719 if (status < 0)
1720 mlog_errno(status);
1721
1722 return status;
1723 }
1724
1725 /*
1726 * returns < 0 error if the callback will never be called, otherwise
1727 * the result of the lock will be communicated via the callback.
1728 */
1729 int ocfs2_meta_lock_full(struct inode *inode,
1730 struct buffer_head **ret_bh,
1731 int ex,
1732 int arg_flags)
1733 {
1734 int status, level, dlm_flags, acquired;
1735 struct ocfs2_lock_res *lockres = NULL;
1736 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1737 struct buffer_head *local_bh = NULL;
1738
1739 BUG_ON(!inode);
1740
1741 mlog_entry_void();
1742
1743 mlog(0, "inode %llu, take %s META lock\n",
1744 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1745 ex ? "EXMODE" : "PRMODE");
1746
1747 status = 0;
1748 acquired = 0;
1749 /* We'll allow faking a readonly metadata lock for
1750 * rodevices. */
1751 if (ocfs2_is_hard_readonly(osb)) {
1752 if (ex)
1753 status = -EROFS;
1754 goto bail;
1755 }
1756
1757 if (ocfs2_mount_local(osb))
1758 goto local;
1759
1760 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1761 wait_event(osb->recovery_event,
1762 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1763
1764 lockres = &OCFS2_I(inode)->ip_meta_lockres;
1765 level = ex ? LKM_EXMODE : LKM_PRMODE;
1766 dlm_flags = 0;
1767 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1768 dlm_flags |= LKM_NOQUEUE;
1769
1770 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1771 if (status < 0) {
1772 if (status != -EAGAIN && status != -EIOCBRETRY)
1773 mlog_errno(status);
1774 goto bail;
1775 }
1776
1777 /* Notify the error cleanup path to drop the cluster lock. */
1778 acquired = 1;
1779
1780 /* We wait twice because a node may have died while we were in
1781 * the lower dlm layers. The second time though, we've
1782 * committed to owning this lock so we don't allow signals to
1783 * abort the operation. */
1784 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1785 wait_event(osb->recovery_event,
1786 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1787
1788 local:
1789 /*
1790 * We only see this flag if we're being called from
1791 * ocfs2_read_locked_inode(). It means we're locking an inode
1792 * which hasn't been populated yet, so clear the refresh flag
1793 * and let the caller handle it.
1794 */
1795 if (inode->i_state & I_NEW) {
1796 status = 0;
1797 if (lockres)
1798 ocfs2_complete_lock_res_refresh(lockres, 0);
1799 goto bail;
1800 }
1801
1802 /* This is fun. The caller may want a bh back, or it may
1803 * not. ocfs2_meta_lock_update definitely wants one in, but
1804 * may or may not read one, depending on what's in the
1805 * LVB. The result of all of this is that we've *only* gone to
1806 * disk if we have to, so the complexity is worthwhile. */
1807 status = ocfs2_meta_lock_update(inode, &local_bh);
1808 if (status < 0) {
1809 if (status != -ENOENT)
1810 mlog_errno(status);
1811 goto bail;
1812 }
1813
1814 if (ret_bh) {
1815 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1816 if (status < 0) {
1817 mlog_errno(status);
1818 goto bail;
1819 }
1820 }
1821
1822 bail:
1823 if (status < 0) {
1824 if (ret_bh && (*ret_bh)) {
1825 brelse(*ret_bh);
1826 *ret_bh = NULL;
1827 }
1828 if (acquired)
1829 ocfs2_meta_unlock(inode, ex);
1830 }
1831
1832 if (local_bh)
1833 brelse(local_bh);
1834
1835 mlog_exit(status);
1836 return status;
1837 }
1838
1839 /*
1840 * This is working around a lock inversion between tasks acquiring DLM locks
1841 * while holding a page lock and the vote thread which blocks dlm lock acquiry
1842 * while acquiring page locks.
1843 *
1844 * ** These _with_page variantes are only intended to be called from aop
1845 * methods that hold page locks and return a very specific *positive* error
1846 * code that aop methods pass up to the VFS -- test for errors with != 0. **
1847 *
1848 * The DLM is called such that it returns -EAGAIN if it would have blocked
1849 * waiting for the vote thread. In that case we unlock our page so the vote
1850 * thread can make progress. Once we've done this we have to return
1851 * AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
1852 * into the VFS who will then immediately retry the aop call.
1853 *
1854 * We do a blocking lock and immediate unlock before returning, though, so that
1855 * the lock has a great chance of being cached on this node by the time the VFS
1856 * calls back to retry the aop. This has a potential to livelock as nodes
1857 * ping locks back and forth, but that's a risk we're willing to take to avoid
1858 * the lock inversion simply.
1859 */
1860 int ocfs2_meta_lock_with_page(struct inode *inode,
1861 struct buffer_head **ret_bh,
1862 int ex,
1863 struct page *page)
1864 {
1865 int ret;
1866
1867 ret = ocfs2_meta_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
1868 if (ret == -EAGAIN) {
1869 unlock_page(page);
1870 if (ocfs2_meta_lock(inode, ret_bh, ex) == 0)
1871 ocfs2_meta_unlock(inode, ex);
1872 ret = AOP_TRUNCATED_PAGE;
1873 }
1874
1875 return ret;
1876 }
1877
1878 int ocfs2_meta_lock_atime(struct inode *inode,
1879 struct vfsmount *vfsmnt,
1880 int *level)
1881 {
1882 int ret;
1883
1884 mlog_entry_void();
1885 ret = ocfs2_meta_lock(inode, NULL, 0);
1886 if (ret < 0) {
1887 mlog_errno(ret);
1888 return ret;
1889 }
1890
1891 /*
1892 * If we should update atime, we will get EX lock,
1893 * otherwise we just get PR lock.
1894 */
1895 if (ocfs2_should_update_atime(inode, vfsmnt)) {
1896 struct buffer_head *bh = NULL;
1897
1898 ocfs2_meta_unlock(inode, 0);
1899 ret = ocfs2_meta_lock(inode, &bh, 1);
1900 if (ret < 0) {
1901 mlog_errno(ret);
1902 return ret;
1903 }
1904 *level = 1;
1905 if (ocfs2_should_update_atime(inode, vfsmnt))
1906 ocfs2_update_inode_atime(inode, bh);
1907 if (bh)
1908 brelse(bh);
1909 } else
1910 *level = 0;
1911
1912 mlog_exit(ret);
1913 return ret;
1914 }
1915
1916 void ocfs2_meta_unlock(struct inode *inode,
1917 int ex)
1918 {
1919 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1920 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
1921 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1922
1923 mlog_entry_void();
1924
1925 mlog(0, "inode %llu drop %s META lock\n",
1926 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1927 ex ? "EXMODE" : "PRMODE");
1928
1929 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
1930 !ocfs2_mount_local(osb))
1931 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1932
1933 mlog_exit_void();
1934 }
1935
1936 int ocfs2_super_lock(struct ocfs2_super *osb,
1937 int ex)
1938 {
1939 int status = 0;
1940 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1941 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1942 struct buffer_head *bh;
1943 struct ocfs2_slot_info *si = osb->slot_info;
1944
1945 mlog_entry_void();
1946
1947 if (ocfs2_is_hard_readonly(osb))
1948 return -EROFS;
1949
1950 if (ocfs2_mount_local(osb))
1951 goto bail;
1952
1953 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1954 if (status < 0) {
1955 mlog_errno(status);
1956 goto bail;
1957 }
1958
1959 /* The super block lock path is really in the best position to
1960 * know when resources covered by the lock need to be
1961 * refreshed, so we do it here. Of course, making sense of
1962 * everything is up to the caller :) */
1963 status = ocfs2_should_refresh_lock_res(lockres);
1964 if (status < 0) {
1965 mlog_errno(status);
1966 goto bail;
1967 }
1968 if (status) {
1969 bh = si->si_bh;
1970 status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
1971 si->si_inode);
1972 if (status == 0)
1973 ocfs2_update_slot_info(si);
1974
1975 ocfs2_complete_lock_res_refresh(lockres, status);
1976
1977 if (status < 0)
1978 mlog_errno(status);
1979 }
1980 bail:
1981 mlog_exit(status);
1982 return status;
1983 }
1984
1985 void ocfs2_super_unlock(struct ocfs2_super *osb,
1986 int ex)
1987 {
1988 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1989 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1990
1991 if (!ocfs2_mount_local(osb))
1992 ocfs2_cluster_unlock(osb, lockres, level);
1993 }
1994
1995 int ocfs2_rename_lock(struct ocfs2_super *osb)
1996 {
1997 int status;
1998 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1999
2000 if (ocfs2_is_hard_readonly(osb))
2001 return -EROFS;
2002
2003 if (ocfs2_mount_local(osb))
2004 return 0;
2005
2006 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
2007 if (status < 0)
2008 mlog_errno(status);
2009
2010 return status;
2011 }
2012
2013 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2014 {
2015 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2016
2017 if (!ocfs2_mount_local(osb))
2018 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
2019 }
2020
2021 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2022 {
2023 int ret;
2024 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2025 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2026 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2027
2028 BUG_ON(!dl);
2029
2030 if (ocfs2_is_hard_readonly(osb))
2031 return -EROFS;
2032
2033 if (ocfs2_mount_local(osb))
2034 return 0;
2035
2036 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2037 if (ret < 0)
2038 mlog_errno(ret);
2039
2040 return ret;
2041 }
2042
2043 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2044 {
2045 int level = ex ? LKM_EXMODE : LKM_PRMODE;
2046 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2047 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2048
2049 if (!ocfs2_mount_local(osb))
2050 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2051 }
2052
2053 /* Reference counting of the dlm debug structure. We want this because
2054 * open references on the debug inodes can live on after a mount, so
2055 * we can't rely on the ocfs2_super to always exist. */
2056 static void ocfs2_dlm_debug_free(struct kref *kref)
2057 {
2058 struct ocfs2_dlm_debug *dlm_debug;
2059
2060 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2061
2062 kfree(dlm_debug);
2063 }
2064
2065 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2066 {
2067 if (dlm_debug)
2068 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2069 }
2070
2071 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2072 {
2073 kref_get(&debug->d_refcnt);
2074 }
2075
2076 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2077 {
2078 struct ocfs2_dlm_debug *dlm_debug;
2079
2080 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2081 if (!dlm_debug) {
2082 mlog_errno(-ENOMEM);
2083 goto out;
2084 }
2085
2086 kref_init(&dlm_debug->d_refcnt);
2087 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2088 dlm_debug->d_locking_state = NULL;
2089 out:
2090 return dlm_debug;
2091 }
2092
2093 /* Access to this is arbitrated for us via seq_file->sem. */
2094 struct ocfs2_dlm_seq_priv {
2095 struct ocfs2_dlm_debug *p_dlm_debug;
2096 struct ocfs2_lock_res p_iter_res;
2097 struct ocfs2_lock_res p_tmp_res;
2098 };
2099
2100 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2101 struct ocfs2_dlm_seq_priv *priv)
2102 {
2103 struct ocfs2_lock_res *iter, *ret = NULL;
2104 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2105
2106 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2107
2108 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2109 /* discover the head of the list */
2110 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2111 mlog(0, "End of list found, %p\n", ret);
2112 break;
2113 }
2114
2115 /* We track our "dummy" iteration lockres' by a NULL
2116 * l_ops field. */
2117 if (iter->l_ops != NULL) {
2118 ret = iter;
2119 break;
2120 }
2121 }
2122
2123 return ret;
2124 }
2125
2126 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2127 {
2128 struct ocfs2_dlm_seq_priv *priv = m->private;
2129 struct ocfs2_lock_res *iter;
2130
2131 spin_lock(&ocfs2_dlm_tracking_lock);
2132 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2133 if (iter) {
2134 /* Since lockres' have the lifetime of their container
2135 * (which can be inodes, ocfs2_supers, etc) we want to
2136 * copy this out to a temporary lockres while still
2137 * under the spinlock. Obviously after this we can't
2138 * trust any pointers on the copy returned, but that's
2139 * ok as the information we want isn't typically held
2140 * in them. */
2141 priv->p_tmp_res = *iter;
2142 iter = &priv->p_tmp_res;
2143 }
2144 spin_unlock(&ocfs2_dlm_tracking_lock);
2145
2146 return iter;
2147 }
2148
2149 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2150 {
2151 }
2152
2153 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2154 {
2155 struct ocfs2_dlm_seq_priv *priv = m->private;
2156 struct ocfs2_lock_res *iter = v;
2157 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2158
2159 spin_lock(&ocfs2_dlm_tracking_lock);
2160 iter = ocfs2_dlm_next_res(iter, priv);
2161 list_del_init(&dummy->l_debug_list);
2162 if (iter) {
2163 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2164 priv->p_tmp_res = *iter;
2165 iter = &priv->p_tmp_res;
2166 }
2167 spin_unlock(&ocfs2_dlm_tracking_lock);
2168
2169 return iter;
2170 }
2171
2172 /* So that debugfs.ocfs2 can determine which format is being used */
2173 #define OCFS2_DLM_DEBUG_STR_VERSION 1
2174 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2175 {
2176 int i;
2177 char *lvb;
2178 struct ocfs2_lock_res *lockres = v;
2179
2180 if (!lockres)
2181 return -EINVAL;
2182
2183 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2184
2185 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2186 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2187 lockres->l_name,
2188 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2189 else
2190 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2191
2192 seq_printf(m, "%d\t"
2193 "0x%lx\t"
2194 "0x%x\t"
2195 "0x%x\t"
2196 "%u\t"
2197 "%u\t"
2198 "%d\t"
2199 "%d\t",
2200 lockres->l_level,
2201 lockres->l_flags,
2202 lockres->l_action,
2203 lockres->l_unlock_action,
2204 lockres->l_ro_holders,
2205 lockres->l_ex_holders,
2206 lockres->l_requested,
2207 lockres->l_blocking);
2208
2209 /* Dump the raw LVB */
2210 lvb = lockres->l_lksb.lvb;
2211 for(i = 0; i < DLM_LVB_LEN; i++)
2212 seq_printf(m, "0x%x\t", lvb[i]);
2213
2214 /* End the line */
2215 seq_printf(m, "\n");
2216 return 0;
2217 }
2218
2219 static struct seq_operations ocfs2_dlm_seq_ops = {
2220 .start = ocfs2_dlm_seq_start,
2221 .stop = ocfs2_dlm_seq_stop,
2222 .next = ocfs2_dlm_seq_next,
2223 .show = ocfs2_dlm_seq_show,
2224 };
2225
2226 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2227 {
2228 struct seq_file *seq = (struct seq_file *) file->private_data;
2229 struct ocfs2_dlm_seq_priv *priv = seq->private;
2230 struct ocfs2_lock_res *res = &priv->p_iter_res;
2231
2232 ocfs2_remove_lockres_tracking(res);
2233 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2234 return seq_release_private(inode, file);
2235 }
2236
2237 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2238 {
2239 int ret;
2240 struct ocfs2_dlm_seq_priv *priv;
2241 struct seq_file *seq;
2242 struct ocfs2_super *osb;
2243
2244 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2245 if (!priv) {
2246 ret = -ENOMEM;
2247 mlog_errno(ret);
2248 goto out;
2249 }
2250 osb = inode->i_private;
2251 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2252 priv->p_dlm_debug = osb->osb_dlm_debug;
2253 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2254
2255 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2256 if (ret) {
2257 kfree(priv);
2258 mlog_errno(ret);
2259 goto out;
2260 }
2261
2262 seq = (struct seq_file *) file->private_data;
2263 seq->private = priv;
2264
2265 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2266 priv->p_dlm_debug);
2267
2268 out:
2269 return ret;
2270 }
2271
2272 static const struct file_operations ocfs2_dlm_debug_fops = {
2273 .open = ocfs2_dlm_debug_open,
2274 .release = ocfs2_dlm_debug_release,
2275 .read = seq_read,
2276 .llseek = seq_lseek,
2277 };
2278
2279 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2280 {
2281 int ret = 0;
2282 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2283
2284 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2285 S_IFREG|S_IRUSR,
2286 osb->osb_debug_root,
2287 osb,
2288 &ocfs2_dlm_debug_fops);
2289 if (!dlm_debug->d_locking_state) {
2290 ret = -EINVAL;
2291 mlog(ML_ERROR,
2292 "Unable to create locking state debugfs file.\n");
2293 goto out;
2294 }
2295
2296 ocfs2_get_dlm_debug(dlm_debug);
2297 out:
2298 return ret;
2299 }
2300
2301 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2302 {
2303 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2304
2305 if (dlm_debug) {
2306 debugfs_remove(dlm_debug->d_locking_state);
2307 ocfs2_put_dlm_debug(dlm_debug);
2308 }
2309 }
2310
2311 int ocfs2_dlm_init(struct ocfs2_super *osb)
2312 {
2313 int status = 0;
2314 u32 dlm_key;
2315 struct dlm_ctxt *dlm = NULL;
2316
2317 mlog_entry_void();
2318
2319 if (ocfs2_mount_local(osb))
2320 goto local;
2321
2322 status = ocfs2_dlm_init_debug(osb);
2323 if (status < 0) {
2324 mlog_errno(status);
2325 goto bail;
2326 }
2327
2328 /* launch vote thread */
2329 osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
2330 if (IS_ERR(osb->vote_task)) {
2331 status = PTR_ERR(osb->vote_task);
2332 osb->vote_task = NULL;
2333 mlog_errno(status);
2334 goto bail;
2335 }
2336
2337 /* used by the dlm code to make message headers unique, each
2338 * node in this domain must agree on this. */
2339 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2340
2341 /* for now, uuid == domain */
2342 dlm = dlm_register_domain(osb->uuid_str, dlm_key);
2343 if (IS_ERR(dlm)) {
2344 status = PTR_ERR(dlm);
2345 mlog_errno(status);
2346 goto bail;
2347 }
2348
2349 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2350
2351 local:
2352 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2353 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2354
2355 osb->dlm = dlm;
2356
2357 status = 0;
2358 bail:
2359 if (status < 0) {
2360 ocfs2_dlm_shutdown_debug(osb);
2361 if (osb->vote_task)
2362 kthread_stop(osb->vote_task);
2363 }
2364
2365 mlog_exit(status);
2366 return status;
2367 }
2368
2369 void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2370 {
2371 mlog_entry_void();
2372
2373 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2374
2375 ocfs2_drop_osb_locks(osb);
2376
2377 if (osb->vote_task) {
2378 kthread_stop(osb->vote_task);
2379 osb->vote_task = NULL;
2380 }
2381
2382 ocfs2_lock_res_free(&osb->osb_super_lockres);
2383 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2384
2385 dlm_unregister_domain(osb->dlm);
2386 osb->dlm = NULL;
2387
2388 ocfs2_dlm_shutdown_debug(osb);
2389
2390 mlog_exit_void();
2391 }
2392
2393 static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
2394 {
2395 struct ocfs2_lock_res *lockres = opaque;
2396 unsigned long flags;
2397
2398 mlog_entry_void();
2399
2400 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2401 lockres->l_unlock_action);
2402
2403 spin_lock_irqsave(&lockres->l_lock, flags);
2404 /* We tried to cancel a convert request, but it was already
2405 * granted. All we want to do here is clear our unlock
2406 * state. The wake_up call done at the bottom is redundant
2407 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2408 * hurt anything anyway */
2409 if (status == DLM_CANCELGRANT &&
2410 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2411 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2412
2413 /* We don't clear the busy flag in this case as it
2414 * should have been cleared by the ast which the dlm
2415 * has called. */
2416 goto complete_unlock;
2417 }
2418
2419 if (status != DLM_NORMAL) {
2420 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2421 "unlock_action %d\n", status, lockres->l_name,
2422 lockres->l_unlock_action);
2423 spin_unlock_irqrestore(&lockres->l_lock, flags);
2424 return;
2425 }
2426
2427 switch(lockres->l_unlock_action) {
2428 case OCFS2_UNLOCK_CANCEL_CONVERT:
2429 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2430 lockres->l_action = OCFS2_AST_INVALID;
2431 break;
2432 case OCFS2_UNLOCK_DROP_LOCK:
2433 lockres->l_level = LKM_IVMODE;
2434 break;
2435 default:
2436 BUG();
2437 }
2438
2439 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2440 complete_unlock:
2441 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2442 spin_unlock_irqrestore(&lockres->l_lock, flags);
2443
2444 wake_up(&lockres->l_event);
2445
2446 mlog_exit_void();
2447 }
2448
2449 static int ocfs2_drop_lock(struct ocfs2_super *osb,
2450 struct ocfs2_lock_res *lockres)
2451 {
2452 enum dlm_status status;
2453 unsigned long flags;
2454 int lkm_flags = 0;
2455
2456 /* We didn't get anywhere near actually using this lockres. */
2457 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2458 goto out;
2459
2460 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2461 lkm_flags |= LKM_VALBLK;
2462
2463 spin_lock_irqsave(&lockres->l_lock, flags);
2464
2465 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2466 "lockres %s, flags 0x%lx\n",
2467 lockres->l_name, lockres->l_flags);
2468
2469 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2470 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2471 "%u, unlock_action = %u\n",
2472 lockres->l_name, lockres->l_flags, lockres->l_action,
2473 lockres->l_unlock_action);
2474
2475 spin_unlock_irqrestore(&lockres->l_lock, flags);
2476
2477 /* XXX: Today we just wait on any busy
2478 * locks... Perhaps we need to cancel converts in the
2479 * future? */
2480 ocfs2_wait_on_busy_lock(lockres);
2481
2482 spin_lock_irqsave(&lockres->l_lock, flags);
2483 }
2484
2485 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2486 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2487 lockres->l_level == LKM_EXMODE &&
2488 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2489 lockres->l_ops->set_lvb(lockres);
2490 }
2491
2492 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2493 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2494 lockres->l_name);
2495 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2496 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2497
2498 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2499 spin_unlock_irqrestore(&lockres->l_lock, flags);
2500 goto out;
2501 }
2502
2503 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2504
2505 /* make sure we never get here while waiting for an ast to
2506 * fire. */
2507 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2508
2509 /* is this necessary? */
2510 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2511 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2512 spin_unlock_irqrestore(&lockres->l_lock, flags);
2513
2514 mlog(0, "lock %s\n", lockres->l_name);
2515
2516 status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
2517 ocfs2_unlock_ast, lockres);
2518 if (status != DLM_NORMAL) {
2519 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2520 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2521 dlm_print_one_lock(lockres->l_lksb.lockid);
2522 BUG();
2523 }
2524 mlog(0, "lock %s, successfull return from dlmunlock\n",
2525 lockres->l_name);
2526
2527 ocfs2_wait_on_busy_lock(lockres);
2528 out:
2529 mlog_exit(0);
2530 return 0;
2531 }
2532
2533 /* Mark the lockres as being dropped. It will no longer be
2534 * queued if blocking, but we still may have to wait on it
2535 * being dequeued from the vote thread before we can consider
2536 * it safe to drop.
2537 *
2538 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2539 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2540 {
2541 int status;
2542 struct ocfs2_mask_waiter mw;
2543 unsigned long flags;
2544
2545 ocfs2_init_mask_waiter(&mw);
2546
2547 spin_lock_irqsave(&lockres->l_lock, flags);
2548 lockres->l_flags |= OCFS2_LOCK_FREEING;
2549 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2550 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2551 spin_unlock_irqrestore(&lockres->l_lock, flags);
2552
2553 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2554
2555 status = ocfs2_wait_for_mask(&mw);
2556 if (status)
2557 mlog_errno(status);
2558
2559 spin_lock_irqsave(&lockres->l_lock, flags);
2560 }
2561 spin_unlock_irqrestore(&lockres->l_lock, flags);
2562 }
2563
2564 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2565 struct ocfs2_lock_res *lockres)
2566 {
2567 int ret;
2568
2569 ocfs2_mark_lockres_freeing(lockres);
2570 ret = ocfs2_drop_lock(osb, lockres);
2571 if (ret)
2572 mlog_errno(ret);
2573 }
2574
2575 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2576 {
2577 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2578 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
2579 }
2580
2581 int ocfs2_drop_inode_locks(struct inode *inode)
2582 {
2583 int status, err;
2584
2585 mlog_entry_void();
2586
2587 /* No need to call ocfs2_mark_lockres_freeing here -
2588 * ocfs2_clear_inode has done it for us. */
2589
2590 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2591 &OCFS2_I(inode)->ip_open_lockres);
2592 if (err < 0)
2593 mlog_errno(err);
2594
2595 status = err;
2596
2597 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2598 &OCFS2_I(inode)->ip_data_lockres);
2599 if (err < 0)
2600 mlog_errno(err);
2601 if (err < 0 && !status)
2602 status = err;
2603
2604 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2605 &OCFS2_I(inode)->ip_meta_lockres);
2606 if (err < 0)
2607 mlog_errno(err);
2608 if (err < 0 && !status)
2609 status = err;
2610
2611 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2612 &OCFS2_I(inode)->ip_rw_lockres);
2613 if (err < 0)
2614 mlog_errno(err);
2615 if (err < 0 && !status)
2616 status = err;
2617
2618 mlog_exit(status);
2619 return status;
2620 }
2621
2622 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2623 int new_level)
2624 {
2625 assert_spin_locked(&lockres->l_lock);
2626
2627 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2628
2629 if (lockres->l_level <= new_level) {
2630 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2631 lockres->l_level, new_level);
2632 BUG();
2633 }
2634
2635 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2636 lockres->l_name, new_level, lockres->l_blocking);
2637
2638 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2639 lockres->l_requested = new_level;
2640 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2641 }
2642
2643 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2644 struct ocfs2_lock_res *lockres,
2645 int new_level,
2646 int lvb)
2647 {
2648 int ret, dlm_flags = LKM_CONVERT;
2649 enum dlm_status status;
2650
2651 mlog_entry_void();
2652
2653 if (lvb)
2654 dlm_flags |= LKM_VALBLK;
2655
2656 status = dlmlock(osb->dlm,
2657 new_level,
2658 &lockres->l_lksb,
2659 dlm_flags,
2660 lockres->l_name,
2661 OCFS2_LOCK_ID_MAX_LEN - 1,
2662 ocfs2_locking_ast,
2663 lockres,
2664 ocfs2_blocking_ast);
2665 if (status != DLM_NORMAL) {
2666 ocfs2_log_dlm_error("dlmlock", status, lockres);
2667 ret = -EINVAL;
2668 ocfs2_recover_from_dlm_error(lockres, 1);
2669 goto bail;
2670 }
2671
2672 ret = 0;
2673 bail:
2674 mlog_exit(ret);
2675 return ret;
2676 }
2677
2678 /* returns 1 when the caller should unlock and call dlmunlock */
2679 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2680 struct ocfs2_lock_res *lockres)
2681 {
2682 assert_spin_locked(&lockres->l_lock);
2683
2684 mlog_entry_void();
2685 mlog(0, "lock %s\n", lockres->l_name);
2686
2687 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2688 /* If we're already trying to cancel a lock conversion
2689 * then just drop the spinlock and allow the caller to
2690 * requeue this lock. */
2691
2692 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2693 return 0;
2694 }
2695
2696 /* were we in a convert when we got the bast fire? */
2697 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2698 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2699 /* set things up for the unlockast to know to just
2700 * clear out the ast_action and unset busy, etc. */
2701 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2702
2703 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2704 "lock %s, invalid flags: 0x%lx\n",
2705 lockres->l_name, lockres->l_flags);
2706
2707 return 1;
2708 }
2709
2710 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2711 struct ocfs2_lock_res *lockres)
2712 {
2713 int ret;
2714 enum dlm_status status;
2715
2716 mlog_entry_void();
2717 mlog(0, "lock %s\n", lockres->l_name);
2718
2719 ret = 0;
2720 status = dlmunlock(osb->dlm,
2721 &lockres->l_lksb,
2722 LKM_CANCEL,
2723 ocfs2_unlock_ast,
2724 lockres);
2725 if (status != DLM_NORMAL) {
2726 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2727 ret = -EINVAL;
2728 ocfs2_recover_from_dlm_error(lockres, 0);
2729 }
2730
2731 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2732
2733 mlog_exit(ret);
2734 return ret;
2735 }
2736
2737 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2738 struct ocfs2_lock_res *lockres,
2739 struct ocfs2_unblock_ctl *ctl)
2740 {
2741 unsigned long flags;
2742 int blocking;
2743 int new_level;
2744 int ret = 0;
2745 int set_lvb = 0;
2746
2747 mlog_entry_void();
2748
2749 spin_lock_irqsave(&lockres->l_lock, flags);
2750
2751 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2752
2753 recheck:
2754 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2755 ctl->requeue = 1;
2756 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2757 spin_unlock_irqrestore(&lockres->l_lock, flags);
2758 if (ret) {
2759 ret = ocfs2_cancel_convert(osb, lockres);
2760 if (ret < 0)
2761 mlog_errno(ret);
2762 }
2763 goto leave;
2764 }
2765
2766 /* if we're blocking an exclusive and we have *any* holders,
2767 * then requeue. */
2768 if ((lockres->l_blocking == LKM_EXMODE)
2769 && (lockres->l_ex_holders || lockres->l_ro_holders))
2770 goto leave_requeue;
2771
2772 /* If it's a PR we're blocking, then only
2773 * requeue if we've got any EX holders */
2774 if (lockres->l_blocking == LKM_PRMODE &&
2775 lockres->l_ex_holders)
2776 goto leave_requeue;
2777
2778 /*
2779 * Can we get a lock in this state if the holder counts are
2780 * zero? The meta data unblock code used to check this.
2781 */
2782 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2783 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2784 goto leave_requeue;
2785
2786 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2787
2788 if (lockres->l_ops->check_downconvert
2789 && !lockres->l_ops->check_downconvert(lockres, new_level))
2790 goto leave_requeue;
2791
2792 /* If we get here, then we know that there are no more
2793 * incompatible holders (and anyone asking for an incompatible
2794 * lock is blocked). We can now downconvert the lock */
2795 if (!lockres->l_ops->downconvert_worker)
2796 goto downconvert;
2797
2798 /* Some lockres types want to do a bit of work before
2799 * downconverting a lock. Allow that here. The worker function
2800 * may sleep, so we save off a copy of what we're blocking as
2801 * it may change while we're not holding the spin lock. */
2802 blocking = lockres->l_blocking;
2803 spin_unlock_irqrestore(&lockres->l_lock, flags);
2804
2805 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
2806
2807 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2808 goto leave;
2809
2810 spin_lock_irqsave(&lockres->l_lock, flags);
2811 if (blocking != lockres->l_blocking) {
2812 /* If this changed underneath us, then we can't drop
2813 * it just yet. */
2814 goto recheck;
2815 }
2816
2817 downconvert:
2818 ctl->requeue = 0;
2819
2820 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2821 if (lockres->l_level == LKM_EXMODE)
2822 set_lvb = 1;
2823
2824 /*
2825 * We only set the lvb if the lock has been fully
2826 * refreshed - otherwise we risk setting stale
2827 * data. Otherwise, there's no need to actually clear
2828 * out the lvb here as it's value is still valid.
2829 */
2830 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2831 lockres->l_ops->set_lvb(lockres);
2832 }
2833
2834 ocfs2_prepare_downconvert(lockres, new_level);
2835 spin_unlock_irqrestore(&lockres->l_lock, flags);
2836 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
2837 leave:
2838 mlog_exit(ret);
2839 return ret;
2840
2841 leave_requeue:
2842 spin_unlock_irqrestore(&lockres->l_lock, flags);
2843 ctl->requeue = 1;
2844
2845 mlog_exit(0);
2846 return 0;
2847 }
2848
2849 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2850 int blocking)
2851 {
2852 struct inode *inode;
2853 struct address_space *mapping;
2854
2855 inode = ocfs2_lock_res_inode(lockres);
2856 mapping = inode->i_mapping;
2857
2858 /*
2859 * We need this before the filemap_fdatawrite() so that it can
2860 * transfer the dirty bit from the PTE to the
2861 * page. Unfortunately this means that even for EX->PR
2862 * downconverts, we'll lose our mappings and have to build
2863 * them up again.
2864 */
2865 unmap_mapping_range(mapping, 0, 0, 0);
2866
2867 if (filemap_fdatawrite(mapping)) {
2868 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
2869 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2870 }
2871 sync_mapping_buffers(mapping);
2872 if (blocking == LKM_EXMODE) {
2873 truncate_inode_pages(mapping, 0);
2874 } else {
2875 /* We only need to wait on the I/O if we're not also
2876 * truncating pages because truncate_inode_pages waits
2877 * for us above. We don't truncate pages if we're
2878 * blocking anything < EXMODE because we want to keep
2879 * them around in that case. */
2880 filemap_fdatawait(mapping);
2881 }
2882
2883 return UNBLOCK_CONTINUE;
2884 }
2885
2886 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
2887 int new_level)
2888 {
2889 struct inode *inode = ocfs2_lock_res_inode(lockres);
2890 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
2891
2892 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
2893 BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
2894
2895 if (checkpointed)
2896 return 1;
2897
2898 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
2899 return 0;
2900 }
2901
2902 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
2903 {
2904 struct inode *inode = ocfs2_lock_res_inode(lockres);
2905
2906 __ocfs2_stuff_meta_lvb(inode);
2907 }
2908
2909 /*
2910 * Does the final reference drop on our dentry lock. Right now this
2911 * happens in the vote thread, but we could choose to simplify the
2912 * dlmglue API and push these off to the ocfs2_wq in the future.
2913 */
2914 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
2915 struct ocfs2_lock_res *lockres)
2916 {
2917 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2918 ocfs2_dentry_lock_put(osb, dl);
2919 }
2920
2921 /*
2922 * d_delete() matching dentries before the lock downconvert.
2923 *
2924 * At this point, any process waiting to destroy the
2925 * dentry_lock due to last ref count is stopped by the
2926 * OCFS2_LOCK_QUEUED flag.
2927 *
2928 * We have two potential problems
2929 *
2930 * 1) If we do the last reference drop on our dentry_lock (via dput)
2931 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
2932 * the downconvert to finish. Instead we take an elevated
2933 * reference and push the drop until after we've completed our
2934 * unblock processing.
2935 *
2936 * 2) There might be another process with a final reference,
2937 * waiting on us to finish processing. If this is the case, we
2938 * detect it and exit out - there's no more dentries anyway.
2939 */
2940 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
2941 int blocking)
2942 {
2943 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2944 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
2945 struct dentry *dentry;
2946 unsigned long flags;
2947 int extra_ref = 0;
2948
2949 /*
2950 * This node is blocking another node from getting a read
2951 * lock. This happens when we've renamed within a
2952 * directory. We've forced the other nodes to d_delete(), but
2953 * we never actually dropped our lock because it's still
2954 * valid. The downconvert code will retain a PR for this node,
2955 * so there's no further work to do.
2956 */
2957 if (blocking == LKM_PRMODE)
2958 return UNBLOCK_CONTINUE;
2959
2960 /*
2961 * Mark this inode as potentially orphaned. The code in
2962 * ocfs2_delete_inode() will figure out whether it actually
2963 * needs to be freed or not.
2964 */
2965 spin_lock(&oi->ip_lock);
2966 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2967 spin_unlock(&oi->ip_lock);
2968
2969 /*
2970 * Yuck. We need to make sure however that the check of
2971 * OCFS2_LOCK_FREEING and the extra reference are atomic with
2972 * respect to a reference decrement or the setting of that
2973 * flag.
2974 */
2975 spin_lock_irqsave(&lockres->l_lock, flags);
2976 spin_lock(&dentry_attach_lock);
2977 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
2978 && dl->dl_count) {
2979 dl->dl_count++;
2980 extra_ref = 1;
2981 }
2982 spin_unlock(&dentry_attach_lock);
2983 spin_unlock_irqrestore(&lockres->l_lock, flags);
2984
2985 mlog(0, "extra_ref = %d\n", extra_ref);
2986
2987 /*
2988 * We have a process waiting on us in ocfs2_dentry_iput(),
2989 * which means we can't have any more outstanding
2990 * aliases. There's no need to do any more work.
2991 */
2992 if (!extra_ref)
2993 return UNBLOCK_CONTINUE;
2994
2995 spin_lock(&dentry_attach_lock);
2996 while (1) {
2997 dentry = ocfs2_find_local_alias(dl->dl_inode,
2998 dl->dl_parent_blkno, 1);
2999 if (!dentry)
3000 break;
3001 spin_unlock(&dentry_attach_lock);
3002
3003 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
3004 dentry->d_name.name);
3005
3006 /*
3007 * The following dcache calls may do an
3008 * iput(). Normally we don't want that from the
3009 * downconverting thread, but in this case it's ok
3010 * because the requesting node already has an
3011 * exclusive lock on the inode, so it can't be queued
3012 * for a downconvert.
3013 */
3014 d_delete(dentry);
3015 dput(dentry);
3016
3017 spin_lock(&dentry_attach_lock);
3018 }
3019 spin_unlock(&dentry_attach_lock);
3020
3021 /*
3022 * If we are the last holder of this dentry lock, there is no
3023 * reason to downconvert so skip straight to the unlock.
3024 */
3025 if (dl->dl_count == 1)
3026 return UNBLOCK_STOP_POST;
3027
3028 return UNBLOCK_CONTINUE_POST;
3029 }
3030
3031 void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3032 struct ocfs2_lock_res *lockres)
3033 {
3034 int status;
3035 struct ocfs2_unblock_ctl ctl = {0, 0,};
3036 unsigned long flags;
3037
3038 /* Our reference to the lockres in this function can be
3039 * considered valid until we remove the OCFS2_LOCK_QUEUED
3040 * flag. */
3041
3042 mlog_entry_void();
3043
3044 BUG_ON(!lockres);
3045 BUG_ON(!lockres->l_ops);
3046
3047 mlog(0, "lockres %s blocked.\n", lockres->l_name);
3048
3049 /* Detect whether a lock has been marked as going away while
3050 * the vote thread was processing other things. A lock can
3051 * still be marked with OCFS2_LOCK_FREEING after this check,
3052 * but short circuiting here will still save us some
3053 * performance. */
3054 spin_lock_irqsave(&lockres->l_lock, flags);
3055 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3056 goto unqueue;
3057 spin_unlock_irqrestore(&lockres->l_lock, flags);
3058
3059 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3060 if (status < 0)
3061 mlog_errno(status);
3062
3063 spin_lock_irqsave(&lockres->l_lock, flags);
3064 unqueue:
3065 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3066 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3067 } else
3068 ocfs2_schedule_blocked_lock(osb, lockres);
3069
3070 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
3071 ctl.requeue ? "yes" : "no");
3072 spin_unlock_irqrestore(&lockres->l_lock, flags);
3073
3074 if (ctl.unblock_action != UNBLOCK_CONTINUE
3075 && lockres->l_ops->post_unlock)
3076 lockres->l_ops->post_unlock(osb, lockres);
3077
3078 mlog_exit_void();
3079 }
3080
3081 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3082 struct ocfs2_lock_res *lockres)
3083 {
3084 mlog_entry_void();
3085
3086 assert_spin_locked(&lockres->l_lock);
3087
3088 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3089 /* Do not schedule a lock for downconvert when it's on
3090 * the way to destruction - any nodes wanting access
3091 * to the resource will get it soon. */
3092 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
3093 lockres->l_name, lockres->l_flags);
3094 return;
3095 }
3096
3097 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
3098
3099 spin_lock(&osb->vote_task_lock);
3100 if (list_empty(&lockres->l_blocked_list)) {
3101 list_add_tail(&lockres->l_blocked_list,
3102 &osb->blocked_lock_list);
3103 osb->blocked_lock_count++;
3104 }
3105 spin_unlock(&osb->vote_task_lock);
3106
3107 mlog_exit_void();
3108 }