/* The left rotation "pivots" around the link from node to node->right, and
* - node will be linked to node->right's left child, and
- * - node->right's left child will be linked to node's right child. */
+ * - node->right's left child will be linked to node's right child.
+ */
static void __rotate_left(struct interval_node *node,
struct interval_node **root)
{
/* The right rotation "pivots" around the link from node to node->left, and
* - node will be linked to node->left's right child, and
- * - node->left's right child will be linked to node's left child. */
+ * - node->left's right child will be linked to node's left child.
+ */
static void __rotate_right(struct interval_node *node,
struct interval_node **root)
{
* is the "highest lock". This function returns the new KMS value.
* Caller must hold lr_lock already.
*
- * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
+ * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
+ */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
struct ldlm_resource *res = lock->l_resource;
/* don't let another thread in ldlm_extent_shift_kms race in
* just after we finish and take our lock into account in its
- * calculation of the kms */
+ * calculation of the kms
+ */
lock->l_flags |= LDLM_FL_KMS_IGNORE;
list_for_each(tmp, &res->lr_granted) {
return old_kms;
/* This extent _has_ to be smaller than old_kms (checked above)
- * so kms can only ever be smaller or the same as old_kms. */
+ * so kms can only ever be smaller or the same as old_kms.
+ */
if (lck->l_policy_data.l_extent.end + 1 > kms)
kms = lck->l_policy_data.l_extent.end + 1;
}
res->lr_itree[idx].lit_size++;
/* even though we use interval tree to manage the extent lock, we also
- * add the locks into grant list, for debug purpose, .. */
+ * add the locks into grant list, for debug purpose, ..
+ */
ldlm_resource_add_lock(res, &res->lr_granted, lock);
}
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
/* when reaching here, it is under lock_res_and_lock(). Thus,
- need call the nolock version of ldlm_lock_decref_internal*/
+ * need call the nolock version of ldlm_lock_decref_internal
+ */
ldlm_lock_decref_internal_nolock(lock, mode);
}
*err = ELDLM_OK;
/* No blocking ASTs are sent to the clients for
- * Posix file & record locks */
+ * Posix file & record locks
+ */
req->l_blocking_ast = NULL;
reprocess:
if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
/* This loop determines where this processes locks start
- * in the resource lr_granted list. */
+ * in the resource lr_granted list.
+ */
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
lockmode_verify(mode);
/* This loop determines if there are existing locks
- * that conflict with the new lock request. */
+ * that conflict with the new lock request.
+ */
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
}
/* Scan the locks owned by this process that overlap this request.
- * We may have to merge or split existing locks. */
-
+ * We may have to merge or split existing locks.
+ */
if (!ownlocks)
ownlocks = &res->lr_granted;
/* If the modes are the same then we need to process
* locks that overlap OR adjoin the new lock. The extra
* logic condition is necessary to deal with arithmetic
- * overflow and underflow. */
+ * overflow and underflow.
+ */
if ((new->l_policy_data.l_flock.start >
(lock->l_policy_data.l_flock.end + 1))
&& (lock->l_policy_data.l_flock.end !=
* with the request but this would complicate the reply
* processing since updates to req get reflected in the
* reply. The client side replays the lock request so
- * it must see the original lock data in the reply. */
+ * it must see the original lock data in the reply.
+ */
/* XXX - if ldlm_lock_new() can sleep we should
* release the lr_lock, allocate the new lock,
- * and restart processing this lock. */
+ * and restart processing this lock.
+ */
if (!new2) {
unlock_res_and_lock(req);
new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
if (*flags != LDLM_FL_WAIT_NOREPROC) {
/* The only one possible case for client-side calls flock
* policy function is ldlm_flock_completion_ast inside which
- * carries LDLM_FL_WAIT_NOREPROC flag. */
+ * carries LDLM_FL_WAIT_NOREPROC flag.
+ */
CERROR("Illegal parameter for client-side-only module.\n");
LBUG();
}
/* In case we're reprocessing the requested lock we can't destroy
* it until after calling ldlm_add_ast_work_item() above so that laawi()
* can bump the reference count on \a req. Otherwise \a req
- * could be freed before the completion AST can be sent. */
+ * could be freed before the completion AST can be sent.
+ */
if (added)
ldlm_flock_destroy(req, mode, *flags);
/* Import invalidation. We need to actually release the lock
* references being held, so that it can go away. No point in
* holding the lock even if app still believes it has it, since
- * server already dropped it anyway. Only for granted locks too. */
+ * server already dropped it anyway. Only for granted locks too.
+ */
if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
(LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
if (lock->l_req_mode == lock->l_granted_mode &&
} else if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
- * in the lock changes we can decref the appropriate refcount.*/
+ * in the lock changes we can decref the appropriate refcount.
+ */
ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
__u64 noreproc = LDLM_FL_WAIT_NOREPROC;
/* We need to reprocess the lock to do merges or splits
- * with existing locks owned by this process. */
+ * with existing locks owned by this process.
+ */
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
/* Compat code, old clients had no idea about owner field and
* relied solely on pid for ownership. Introduced in LU-104, 2.1,
- * April 2011 */
+ * April 2011
+ */
lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
}
LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs) */
+ * sending nor waiting for any rpcs)
+ */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
void client_destroy_import(struct obd_import *imp)
{
/* Drop security policy instance after all RPCs have finished/aborted
- * to let all busy contexts be released. */
+ * to let all busy contexts be released.
+ */
class_import_get(imp);
class_destroy_import(imp);
sptlrpc_import_sec_put(imp);
int rc;
/* In a more perfect world, we would hang a ptlrpc_client off of
- * obd_type and just use the values from there. */
+ * obd_type and just use the values from there.
+ */
if (!strcmp(name, LUSTRE_OSC_NAME)) {
rq_portal = OST_REQUEST_PORTAL;
rp_portal = OSC_REPLY_PORTAL;
/* This value may be reduced at connect time in
* ptlrpc_connect_interpret() . We initialize it to only
* 1MB until we know what the performance looks like.
- * In the future this should likely be increased. LU-1431 */
+ * In the future this should likely be increased. LU-1431
+ */
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
LNET_MTU >> PAGE_CACHE_SHIFT);
/* Mark import deactivated now, so we don't try to reconnect if any
* of the cleanup RPCs fails (e.g. LDLM cancel, etc). We don't
- * fully deactivate the import, or that would drop all requests. */
+ * fully deactivate the import, or that would drop all requests.
+ */
spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
spin_unlock(&imp->imp_lock);
/* Some non-replayable imports (MDS's OSCs) are pinged, so just
* delete it regardless. (It's safe to delete an import that was
- * never added.) */
+ * never added.)
+ */
(void)ptlrpc_pinger_del_import(imp);
if (obd->obd_namespace) {
}
/* There's no need to hold sem while disconnecting an import,
- * and it may actually cause deadlock in GSS. */
+ * and it may actually cause deadlock in GSS.
+ */
up_write(&cli->cl_sem);
rc = ptlrpc_disconnect_import(imp, 0);
down_write(&cli->cl_sem);
out_disconnect:
/* Use server style - class_disconnect should be always called for
- * o_disconnect. */
+ * o_disconnect.
+ */
err = class_disconnect(exp);
if (!rc && err)
rc = err;
struct obd_device *obd;
/* Check that we still have all structures alive as this may
- * be some late RPC at shutdown time. */
+ * be some late RPC at shutdown time.
+ */
if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
!exp_connect_lru_resize(req->rq_export))) {
lustre_msg_set_slv(req->rq_repmsg, 0);
* reply ref until ptlrpc_handle_rs() is done
* with the reply state (if the send was successful, there
* would have been +1 ref for the net, which
- * reply_out_callback leaves alone) */
+ * reply_out_callback leaves alone)
+ */
rs->rs_on_net = 0;
ptlrpc_rs_addref(rs);
}
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
- * in exp_lock_hash. */
+ * in exp_lock_hash.
+ */
/* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
+ * ldlm_export_lock_keycmp()
+ */
/* coverity[overrun-buffer-val] */
cfs_hash_del(lock->l_export->exp_lock_hash,
&lock->l_remote_handle, &lock->l_exp_hash);
return NULL;
/* It's unlikely but possible that someone marked the lock as
- * destroyed after we did handle2object on it */
+ * destroyed after we did handle2object on it
+ */
if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
lock->l_flags |= LDLM_FL_AST_SENT;
/* If the enqueuing client said so, tell the AST recipient to
- * discard dirty data, rather than writing back. */
+ * discard dirty data, rather than writing back.
+ */
if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA;
LASSERT(list_empty(&lock->l_bl_ast));
if (lock->l_flags & LDLM_FL_LOCAL &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
- * the last reference, cancel the lock. */
+ * the last reference, cancel the lock.
+ */
CDEBUG(D_INFO, "forcing cancel of local lock\n");
lock->l_flags |= LDLM_FL_CBPENDING;
}
if (!lock->l_readers && !lock->l_writers &&
(lock->l_flags & LDLM_FL_CBPENDING)) {
/* If we received a blocked AST and this was the last reference,
- * run the callback. */
+ * run the callback.
+ */
LDLM_DEBUG(lock, "final decref done on cbpending lock");
LDLM_DEBUG(lock, "add lock into lru list");
/* If this is a client-side namespace and this was the last
- * reference, put it on the LRU. */
+ * reference, put it on the LRU.
+ */
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
* are not supported by the server, otherwise, it is done on
- * enqueue. */
+ * enqueue.
+ */
if (!exp_connect_cancelset(lock->l_conn_export) &&
!ns_connect_lru_resize(ns))
ldlm_cancel_lru(ns, 0, LCF_ASYNC, 0);
if (lock->l_policy_data.l_inodebits.bits ==
req->l_policy_data.l_inodebits.bits) {
/* insert point is last lock of
- * the policy group */
+ * the policy group
+ */
prev->res_link =
&policy_end->l_res_link;
prev->mode_link =
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
- * new policy group is started */
+ * new policy group is started
+ */
prev->res_link = &mode_end->l_res_link;
prev->mode_link = &mode_end->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
}
/* insert point is last lock on the queue,
- * new mode group and new policy group are started */
+ * new mode group and new policy group are started
+ */
prev->res_link = queue->prev;
prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
break;
/* Check if this lock can be matched.
- * Used by LU-2919(exclusive open) for open lease lock */
+ * Used by LU-2919(exclusive open) for open lease lock
+ */
if (ldlm_is_excl(lock))
continue;
* if it passes in CBPENDING and the lock still has users.
* this is generally only going to be used by children
* whose parents already hold a lock so forward progress
- * can still happen. */
+ * can still happen.
+ */
if (lock->l_flags & LDLM_FL_CBPENDING &&
!(flags & LDLM_FL_CBPENDING))
continue;
continue;
/* We match if we have existing lock with same or wider set
- of bits. */
+ * of bits.
+ */
if (lock->l_resource->lr_type == LDLM_IBITS &&
((lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits) !=
if (lock->l_req_mode == lock->l_granted_mode) {
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
- * need to do anything else. */
+ * need to do anything else.
+ */
*flags &= ~(LDLM_FL_BLOCK_GRANTED |
LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
goto out;
LBUG();
/* Some flags from the enqueue want to make it into the AST, via the
- * lock's l_flags. */
+ * lock's l_flags.
+ */
lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
/*
* This can't happen with the blocking_ast, however, because we
* will never call the local blocking_ast until we drop our
* reader/writer reference, which we won't do until we get the
- * reply and finish enqueueing. */
+ * reply and finish enqueueing.
+ */
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
list_del_init(&lock->l_cp_ast);
LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
/* save l_completion_ast since it can be changed by
- * mds_intent_policy(), see bug 14225 */
+ * mds_intent_policy(), see bug 14225
+ */
completion_callback = lock->l_completion_ast;
lock->l_flags &= ~LDLM_FL_CP_REQD;
unlock_res_and_lock(lock);
/* We create a ptlrpc request set with flow control extension.
* This request set will use the work_ast_lock function to produce new
* requests and will send a new request each time one completes in order
- * to keep the number of requests in flight to ns_max_parallel_ast */
+ * to keep the number of requests in flight to ns_max_parallel_ast
+ */
arg->set = ptlrpc_prep_fcset(ns->ns_max_parallel_ast ? : UINT_MAX,
work_ast_lock, arg);
if (!arg->set) {
ns = ldlm_res_to_ns(res);
/* Please do not, no matter how tempting, remove this LBUG without
- * talking to me first. -phik */
+ * talking to me first. -phik
+ */
if (lock->l_readers || lock->l_writers) {
LDLM_ERROR(lock, "lock still has references");
LBUG();
ldlm_pool_del(&ns->ns_pool, lock);
/* Make sure we will not be called again for same lock what is possible
- * if not to zero out lock->l_granted_mode */
+ * if not to zero out lock->l_granted_mode
+ */
lock->l_granted_mode = LCK_MINMODE;
unlock_res_and_lock(lock);
}
goto out;
}
} else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
- * variable length */
+ * variable length
+ */
void *lvb_data;
lvb_data = kzalloc(lvb_len, GFP_NOFS);
}
/* If we receive the completion AST before the actual enqueue returned,
- * then we might need to switch lock modes, resources, or extents. */
+ * then we might need to switch lock modes, resources, or extents.
+ */
if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
LDLM_DEBUG(lock, "completion AST, new lock mode");
if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
/* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast. */
+ * Let ldlm_cancel_lru() be fast.
+ */
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
LDLM_DEBUG(lock, "completion AST includes blocking AST");
LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
- /* Let Enqueue to call osc_lock_upcall() and initialize
- * l_ast_data */
+ /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
wake_up(&blp->blp_waitq);
/* can not check blwi->blwi_flags as blwi could be already freed in
- LCF_ASYNC mode */
+ * LCF_ASYNC mode
+ */
if (!(cancel_flags & LCF_ASYNC))
wait_for_completion(&blwi->blwi_comp);
/* Requests arrive in sender's byte order. The ptlrpc service
* handler has already checked and, if necessary, byte-swapped the
* incoming request message body, but I am responsible for the
- * message buffers. */
+ * message buffers.
+ */
/* do nothing for sec context finalize */
if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
}
/* Force a known safe race, send a cancel to the server for a lock
- * which the server has already started a blocking callback on. */
+ * which the server has already started a blocking callback on.
+ */
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
- * should send cancel after dropping the cache. */
+ * should send cancel after dropping the cache.
+ */
if (((lock->l_flags & LDLM_FL_CANCELING) &&
(lock->l_flags & LDLM_FL_BL_DONE)) ||
(lock->l_flags & LDLM_FL_FAILED)) {
return 0;
}
/* BL_AST locks are not needed in LRU.
- * Let ldlm_cancel_lru() be fast. */
+ * Let ldlm_cancel_lru() be fast.
+ */
ldlm_lock_remove_from_lru(lock);
lock->l_flags |= LDLM_FL_BL_AST;
}
* But we'd also like to be able to indicate in the reply that we're
* cancelling right now, because it's unused, or have an intent result
* in the reply, so we might have to push the responsibility for sending
- * the reply down into the AST handlers, alas. */
+ * the reply down into the AST handlers, alas.
+ */
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
/* The special case when we cancel locks in LRU
* asynchronously, we pass the list of locks here.
* Thus locks are marked LDLM_FL_CANCELING, but NOT
- * canceled locally yet. */
+ * canceled locally yet.
+ */
count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
blwi->blwi_count,
LCF_BL_AST);
kmem_cache_destroy(ldlm_resource_slab);
/* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
* synchronize_rcu() to wait a grace period elapsed, so that
- * ldlm_lock_free() get a chance to be called. */
+ * ldlm_lock_free() get a chance to be called.
+ */
synchronize_rcu();
kmem_cache_destroy(ldlm_lock_slab);
kmem_cache_destroy(ldlm_interval_slab);
}
/* We use the same basis for both server side and client side functions
- from a single node. */
+ * from a single node.
+ */
static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
int timeout = at_get(ldlm_lock_to_ns_at(lock));
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
- It would be nice to have some kind of "early reply" mechanism for
- lock callbacks too... */
+ * It would be nice to have some kind of "early reply" mechanism for
+ * lock callbacks too...
+ */
timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
return max(timeout, ldlm_enqueue_min);
}
imp = obd->u.cli.cl_import;
/* Wait a long time for enqueue - server may have to callback a
- lock from another client. Server will evict the other client if it
- doesn't respond reasonably, and then give us the lock. */
+ * lock from another client. Server will evict the other client if it
+ * doesn't respond reasonably, and then give us the lock.
+ */
timeout = ldlm_get_enq_timeout(lock) * 2;
lwd.lwd_lock = lock;
!(lock->l_flags & LDLM_FL_FAILED)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
- * bug 17645 */
+ * bug 17645
+ */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
need_cancel = 1;
ldlm_lock_decref_internal(lock, mode);
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
- * from llite/file.c/ll_file_flock(). */
+ * from llite/file.c/ll_file_flock().
+ */
/* This code makes for the fact that we do not have blocking handler on
* a client for flock locks. As such this is the place where we must
* completely kill failed locks. (interrupted and those that
- * were waiting to be granted when server evicted us. */
+ * were waiting to be granted when server evicted us.
+ */
if (lock->l_resource->lr_type == LDLM_FLOCK) {
lock_res_and_lock(lock);
ldlm_resource_unlink_lock(lock);
/* Key change rehash lock in per-export hash with new key */
if (exp->exp_lock_hash) {
/* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
+ * ldlm_export_lock_keycmp()
+ */
/* coverity[overrun-buffer-val] */
cfs_hash_rehash_key(exp->exp_lock_hash,
&lock->l_remote_handle,
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
LDLM_INHERIT_FLAGS);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
- * to wait with no timeout as well */
+ * to wait with no timeout as well
+ */
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
LDLM_FL_NO_TIMEOUT);
unlock_res_and_lock(lock);
/* If enqueue returned a blocked lock but the completion handler has
* already run, then it fixed up the resource and we don't need to do it
- * again. */
+ * again.
+ */
if ((*flags) & LDLM_FL_LOCK_CHANGED) {
int newmode = reply->lock_desc.l_req_mode;
if ((*flags) & LDLM_FL_AST_SENT ||
/* Cancel extent locks as soon as possible on a liblustre client,
* because it cannot handle asynchronous ASTs robustly (see
- * bug 7311). */
+ * bug 7311).
+ */
(LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
lock_res_and_lock(lock);
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
}
/* If the lock has already been granted by a completion AST, don't
- * clobber the LVB with an older one. */
+ * clobber the LVB with an older one.
+ */
if (lvb_len != 0) {
/* We must lock or a racing completion might update lvb without
* letting us know and we'll clobber the correct value.
- * Cannot unlock after the check either, a that still leaves
- * a tiny window for completion to get in */
+ * Cannot unlock after the check either, as that still leaves
+ * a tiny window for completion to get in
+ */
lock_res_and_lock(lock);
if (lock->l_req_mode != lock->l_granted_mode)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
if (lvb_len && lvb) {
/* Copy the LVB here, and not earlier, because the completion
- * AST (if any) can override what we got in the reply */
+ * AST (if any) can override what we got in the reply
+ */
memcpy(lvb, lock->l_lvb_data, lvb_len);
}
/* Cancel LRU locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
- * RPC, which will make us slower. */
+ * RPC, which will make us slower.
+ */
if (avail > count)
count += ldlm_cancel_lru_local(ns, cancels, to_free,
avail - count, 0, flags);
/* Skip first lock handler in ldlm_request_pack(),
* this method will increment @lock_count according
* to the lock handle amount actually written to
- * the buffer. */
+ * the buffer.
+ */
dlm->lock_count = canceloff;
}
/* Pack into the request @pack lock handles. */
ns = exp->exp_obd->obd_namespace;
/* If we're replaying this lock, just check some invariants.
- * If we're creating a new lock, get everything all setup nice. */
+ * If we're creating a new lock, get everything all setup nicely.
+ */
if (is_replay) {
lock = ldlm_handle2lock_long(lockh, 0);
LASSERT(lock);
lockh, rc);
/* If ldlm_cli_enqueue_fini did not find the lock, we need to free
- * one reference that we took */
+ * one reference that we took
+ */
if (err == -ENOLCK)
LDLM_LOCK_RELEASE(lock);
else
/* XXX: it would be better to pack lock handles grouped by resource.
* so that the server cancel would call filter_lvbo_update() less
- * frequently. */
+ * frequently.
+ */
list_for_each_entry(lock, head, l_bl_ast) {
if (!count--)
break;
/**
* Prepare and send a batched cancel RPC. It will include \a count lock
- * handles of locks given in \a cancels list. */
+ * handles of locks given in \a cancels list.
+ */
static int ldlm_cli_cancel_req(struct obd_export *exp,
struct list_head *cancels,
int count, enum ldlm_cancel_flags flags)
* is the case when server does not support LRU resize feature.
* This is also possible in some recovery cases when server-side
* reqs have no reference to the OBD export and thus access to
- * server-side namespace is not possible. */
+ * server-side namespace is not possible.
+ */
if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
DEBUG_REQ(D_HA, req,
* to the pool thread. We do not access obd_namespace and pool
* directly here as there is no reliable way to make sure that
* they are still alive at cleanup time. Evil races are possible
- * which may cause Oops at that time. */
+ * which may cause Oops at that time.
+ */
write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = new_slv;
obd->obd_pool_limit = new_limit;
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* RPC which goes to canceld portal, so we can cancel other LRU locks
- * here and send them all as one LDLM_CANCEL RPC. */
+ * here and send them all as one LDLM_CANCEL RPC.
+ */
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, &cancels);
/* Until we have compound requests and can send LDLM_CANCEL
* requests batched with generic RPCs, we need to send cancels
* with the LDLM_FL_BL_AST flag in a separate RPC from
- * the one being generated now. */
+ * the one being generated now.
+ */
if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
LDLM_DEBUG(lock, "Cancel lock separately");
list_del_init(&lock->l_bl_ast);
lock_res_and_lock(lock);
/* don't check added & count since we want to process all locks
- * from unused list */
+ * from unused list
+ */
switch (lock->l_resource->lr_type) {
case LDLM_EXTENT:
case LDLM_IBITS:
unsigned long la;
/* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU. */
+ * locks in LRU.
+ */
if (count && added >= count)
return LDLM_POLICY_KEEP_LOCK;
ldlm_pool_set_clv(pl, lv);
/* Stop when SLV is not yet come from server or lv is smaller than
- * it is. */
+ * it is.
+ */
return (slv == 0 || lv < slv) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
int count)
{
/* Stop LRU processing when we reach past @count or have checked all
- * locks in LRU. */
+ * locks in LRU.
+ */
return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
int count)
{
/* Stop LRU processing when we reach past count or have checked all
- * locks in LRU. */
+ * locks in LRU.
+ */
return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
continue;
/* Somebody is already doing CANCEL. No need for this
- * lock in LRU, do not traverse it again. */
+ * lock in LRU, do not traverse it again.
+ */
if (!(lock->l_flags & LDLM_FL_CANCELING))
break;
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
- * by itself, or the lock is no longer unused. */
+ * by itself, or the lock is no longer unused.
+ */
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference,
__func__, current);
* better send cancel notification to server, so that it
* frees appropriate state. This might lead to a race
* where while we are doing cancel here, server is also
- * silently cancelling this lock. */
+ * silently cancelling this lock.
+ */
lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
/* Setting the CBPENDING flag is a little misleading,
* CBPENDING is set, the lock can accumulate no more
* readers/writers. Since readers and writers are
* already zero here, ldlm_lock_decref() won't see
- * this flag and call l_blocking_ast */
+ * this flag and call l_blocking_ast
+ */
lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
/* We can't re-add to l_lru as it confuses the
* arrives after we drop lr_lock below. We use l_bl_ast
* and can't use l_pending_chain as it is used both on
* server and client nevertheless bug 5666 says it is
- * used only on server */
+ * used only on server
+ */
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
int count, rc;
/* Just prepare the list of locks, do not actually cancel them yet.
- * Locks are cancelled later in a separate thread. */
+ * Locks are cancelled later in a separate thread.
+ */
count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
if (rc == 0)
continue;
/* If somebody is already doing CANCEL, or blocking AST came,
- * skip this lock. */
+ * skip this lock.
+ */
if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
continue;
continue;
/* If policy is given and this is IBITS lock, add to list only
- * those locks that match by policy. */
+ * those locks that match by policy.
+ */
if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
!(lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits))
* Usually it is enough to have just 1 RPC, but it is possible that
* there are too many locks to be cancelled in LRU or on a resource.
* It would also speed up the case when the server does not support
- * the feature. */
+ * the feature.
+ */
while (count > 0) {
LASSERT(!list_empty(cancels));
lock = list_entry(cancels->next, struct ldlm_lock,
* Cancel all locks on a resource that have 0 readers/writers.
*
* If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
- * to notify the server. */
+ * to notify the server.
+ */
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
ldlm_policy_data_t *policy,
exp = req->rq_export;
if (exp && exp->exp_lock_hash) {
/* In the function below, .hs_keycmp resolves to
- * ldlm_export_lock_keycmp() */
+ * ldlm_export_lock_keycmp()
+ */
/* coverity[overrun-buffer-val] */
cfs_hash_rehash_key(exp->exp_lock_hash,
&lock->l_remote_handle,
/* If this is reply-less callback lock, we cannot replay it, since
* server might have long dropped it, but notification of that event was
- * lost by network. (and server granted conflicting lock already) */
+ * lost by network. (and server granted conflicting lock already)
+ */
if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
/* notify the server we've replayed all requests.
* also, we mark the request to be put on a dedicated
* queue to be processed after all request replayes.
- * bug 6063 */
+ * bug 6063
+ */
lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
LDLM_DEBUG(lock, "replaying lock:");
/* We don't need to care whether or not LRU resize is enabled
* because the LDLM_CANCEL_NO_WAIT policy doesn't use the
- * count parameter */
+ * count parameter
+ */
canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
struct mutex ldlm_cli_namespace_lock;
/* Client Namespaces that have active resources in them.
* Once all resources go away, ldlm_poold moves such namespaces to the
- * inactive list */
+ * inactive list
+ */
LIST_HEAD(ldlm_cli_active_namespace_list);
/* Client namespaces that don't have any locks in them */
static LIST_HEAD(ldlm_cli_inactive_namespace_list);
struct dentry *ldlm_svc_debugfs_dir;
/* during debug dump certain amount of granted locks for one resource to avoid
- * DDOS. */
+ * DDOS.
+ */
static unsigned int ldlm_dump_granted_max = 256;
static ssize_t
ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
/* Make sure that LRU resize was originally supported before
- * turning it on here. */
+ * turning it on here.
+ */
if (lru_resize &&
(ns->ns_orig_connect_flags & OBD_CONNECT_LRU_RESIZE)) {
CDEBUG(D_DLMTRACE,
struct lustre_handle lockh;
/* First, we look for non-cleaned-yet lock
- * all cleaned locks are marked by CLEANED flag. */
+ * all cleaned locks are marked by CLEANED flag.
+ */
lock_res(res);
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
}
/* Set CBPENDING so nothing in the cancellation path
- * can match this lock. */
+ * can match this lock.
+ */
lock->l_flags |= LDLM_FL_CBPENDING;
lock->l_flags |= LDLM_FL_FAILED;
lock->l_flags |= flags;
/* This is a little bit gross, but much better than the
* alternative: pretend that we got a blocking AST from
* the server, so that when the lock is decref'd, it
- * will go away ... */
+ * will go away ...
+ */
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
if (lock->l_completion_ast)
atomic_read(&ns->ns_bref) == 0, &lwi);
/* Forced cleanups should be able to reclaim all references,
- * so it's safe to wait forever... we can't leak locks... */
+ * so it's safe to wait forever... we can't leak locks...
+ */
if (force && rc == -ETIMEDOUT) {
LCONSOLE_ERROR("Forced cleanup waiting for %s namespace with %d resources in use, (rc=%d)\n",
ldlm_ns_name(ns),
LASSERT(!list_empty(&ns->ns_list_chain));
/* Some asserts and possibly other parts of the code are still
* using list_empty(&ns->ns_list_chain). This is why it is
- * important to use list_del_init() here. */
+ * important to use list_del_init() here.
+ */
list_del_init(&ns->ns_list_chain);
ldlm_namespace_nr_dec(client);
mutex_unlock(ldlm_namespace_lock(client));
ldlm_namespace_unregister(ns, ns->ns_client);
/* Fini pool _before_ parent proc dir is removed. This is important as
* ldlm_pool_fini() removes own proc dir which is child to @dir.
- * Removing it after @dir may cause oops. */
+ * Removing it after @dir may cause oops.
+ */
ldlm_pool_fini(&ns->ns_pool);
ldlm_namespace_debugfs_unregister(ns);
cfs_hash_putref(ns->ns_rs_hash);
/* Namespace \a ns should be not on list at this time, otherwise
* this will cause issues related to using freed \a ns in poold
- * thread. */
+ * thread.
+ */
LASSERT(list_empty(&ns->ns_list_chain));
kfree(ns);
ldlm_put_ref();
lu_ref_init(&res->lr_reference);
/* The creator of the resource must unlock the mutex after LVB
- * initialization. */
+ * initialization.
+ */
mutex_init(&res->lr_lvb_mutex);
mutex_lock(&res->lr_lvb_mutex);
/* Let's see if we happened to be the very first resource in this
* namespace. If so, and this is a client namespace, we need to move
* the namespace into the active namespaces list to be patrolled by
- * the ldlm_poold. */
+ * the ldlm_poold.
+ */
if (ns_refcount == 1) {
mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);