Just call get_seconds() directly.
Cc: Andreas Dilger <andreas.dilger@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: hpdd-discuss <hpdd-discuss@lists.01.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
return jiffies;
}
-static inline time_t cfs_time_current_sec(void)
-{
- return get_seconds();
-}
-
static inline void cfs_fs_time_current(struct timespec *t)
{
*t = CURRENT_TIME;
ksocknal_data.ksnd_connd_starting = 0;
ksocknal_data.ksnd_connd_failed_stamp = 0;
- ksocknal_data.ksnd_connd_starting_stamp = cfs_time_current_sec();
+ ksocknal_data.ksnd_connd_starting_stamp = get_seconds();
/* must have at least 2 connds to remain responsive to accepts while
* connecting */
if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
/* we tried ... */
LASSERT(ksocknal_data.ksnd_connd_starting > 0);
ksocknal_data.ksnd_connd_starting--;
- ksocknal_data.ksnd_connd_failed_stamp = cfs_time_current_sec();
+ ksocknal_data.ksnd_connd_failed_stamp = get_seconds();
return 1;
}
while (!ksocknal_data.ksnd_shuttingdown) {
ksock_route_t *route = NULL;
- long sec = cfs_time_current_sec();
+ long sec = get_seconds();
long timeout = MAX_SCHEDULE_TIMEOUT;
int dropped_lock = 0;
case IOC_LIBCFS_NOTIFY_ROUTER:
return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
cfs_time_current() -
- cfs_time_seconds(cfs_time_current_sec() -
+ cfs_time_seconds(get_seconds() -
(time_t)data->ioc_u64[0]));
case IOC_LIBCFS_PORTALS_COMPATIBILITY:
/* LND will fill in the address part of the NID */
ni->ni_nid = LNET_MKNID(net, 0);
- ni->ni_last_alive = cfs_time_current_sec();
+ ni->ni_last_alive = get_seconds();
list_add_tail(&ni->ni_list, nilist);
return ni;
failed:
}
if (the_lnet.ln_routing &&
- ni->ni_last_alive != cfs_time_current_sec()) {
+ ni->ni_last_alive != get_seconds()) {
lnet_ni_lock(ni);
/* NB: so far here is the only place to set NI status to "up */
- ni->ni_last_alive = cfs_time_current_sec();
+ ni->ni_last_alive = get_seconds();
if (ni->ni_status != NULL &&
ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
ni->ni_status->ns_status = LNET_NI_STATUS_UP;
timeout = router_ping_timeout +
MAX(live_router_check_interval, dead_router_check_interval);
- now = cfs_time_current_sec();
+ now = get_seconds();
list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
if (ni->ni_lnd->lnd_type == LOLND)
continue;
static time_t last = 0;
static int running = 0;
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
int interval = now - last;
int rc;
__u64 version;
if (ni != NULL) {
struct lnet_tx_queue *tq;
char *stat;
- long now = cfs_time_current_sec();
+ long now = get_seconds();
int last_alive = -1;
int i;
int j;
mutex_lock(&console_session.ses_mutex);
- console_session.ses_laststamp = cfs_time_current_sec();
+ console_session.ses_laststamp = get_seconds();
if (console_session.ses_shutdown) {
rc = -ESHUTDOWN;
}
if (!console_session.ses_expired &&
- cfs_time_current_sec() - console_session.ses_laststamp >
+ get_seconds() - console_session.ses_laststamp >
(time_t)console_session.ses_timeout)
console_session.ses_expired = 1;
CDEBUG(D_NET, "Ping %d nodes in session\n", count);
- ptimer->stt_expires = (unsigned long)(cfs_time_current_sec() + LST_PING_INTERVAL);
+ ptimer->stt_expires = (unsigned long)(get_seconds() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
mutex_unlock(&console_session.ses_mutex);
}
ptimer = &console_session.ses_ping_timer;
- ptimer->stt_expires = (unsigned long)(cfs_time_current_sec() + LST_PING_INTERVAL);
+ ptimer->stt_expires = (unsigned long)(get_seconds() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
console_session.ses_expired = 0;
console_session.ses_feats_updated = 0;
console_session.ses_features = LST_FEATS_MASK;
- console_session.ses_laststamp = cfs_time_current_sec();
+ console_session.ses_laststamp = get_seconds();
mutex_init(&console_session.ses_mutex);
sn->sn_timer_active = 1;
timer->stt_expires = cfs_time_add(sn->sn_timeout,
- cfs_time_current_sec());
+ get_seconds());
stt_add_timer(timer);
return;
}
}
if (rc != 0) {
- scd->scd_buf_err_stamp = cfs_time_current_sec();
+ scd->scd_buf_err_stamp = get_seconds();
scd->scd_buf_err = rc;
LASSERT(scd->scd_buf_posting > 0);
timer->stt_data = rpc;
timer->stt_func = srpc_client_rpc_expired;
timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
- cfs_time_current_sec());
+ get_seconds());
stt_add_timer(timer);
return;
}
}
if (scd->scd_buf_err_stamp != 0 &&
- scd->scd_buf_err_stamp < cfs_time_current_sec()) {
+ scd->scd_buf_err_stamp < get_seconds()) {
/* re-enable adding buffer */
scd->scd_buf_err_stamp = 0;
scd->scd_buf_err = 0;
/* 1 second pause to avoid timestamp reuse */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1));
- srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
+ srpc_data.rpc_matchbits = ((__u64) get_seconds()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;
LASSERT(!stt_data.stt_shuttingdown);
LASSERT(timer->stt_func != NULL);
LASSERT(list_empty(&timer->stt_list));
- LASSERT(cfs_time_after(timer->stt_expires, cfs_time_current_sec()));
+ LASSERT(cfs_time_after(timer->stt_expires, get_seconds()));
/* a simple insertion sort */
list_for_each_prev(pos, STTIMER_SLOT(timer->stt_expires)) {
unsigned long now;
unsigned long this_slot;
- now = cfs_time_current_sec();
+ now = get_seconds();
this_slot = now & STTIMER_SLOTTIMEMASK;
spin_lock(&stt_data.stt_lock);
int i;
stt_data.stt_shuttingdown = 0;
- stt_data.stt_prev_slot = cfs_time_current_sec() & STTIMER_SLOTTIMEMASK;
+ stt_data.stt_prev_slot = get_seconds() & STTIMER_SLOTTIMEMASK;
spin_lock_init(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
static inline void set_capa_expiry(struct obd_capa *ocapa)
{
unsigned long expiry = cfs_time_sub((unsigned long)ocapa->c_capa.lc_expiry,
- cfs_time_current_sec());
+ get_seconds());
ocapa->c_expiry = cfs_time_add(cfs_time_current(),
cfs_time_seconds(expiry));
}
static inline int capa_is_expired_sec(struct lustre_capa *capa)
{
- return (capa->lc_expiry - cfs_time_current_sec() <= 0);
+ return (capa->lc_expiry - get_seconds() <= 0);
}
static inline int capa_is_expired(struct obd_capa *ocapa)
{
LASSERT(exp->exp_delayed);
return cfs_time_before(cfs_time_add(exp->exp_last_request_time, age),
- cfs_time_current_sec());
+ get_seconds());
}
static inline int exp_connect_cancelset(struct obd_export *exp)
spin_lock(&at->at_lock);
at->at_current = val;
at->at_worst_ever = val;
- at->at_worst_time = cfs_time_current_sec();
+ at->at_worst_time = get_seconds();
spin_unlock(&at->at_lock);
}
static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
desc = req->rq_bulk;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- req->rq_bulk_deadline > cfs_time_current_sec())
+ req->rq_bulk_deadline > get_seconds())
return 1;
if (!desc)
ptlrpc_client_early(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec())
+ req->rq_reply_deadline > get_seconds())
return 0;
return req->rq_early;
}
ptlrpc_client_replied(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec())
+ req->rq_reply_deadline > get_seconds())
return 0;
return req->rq_replied;
}
ptlrpc_client_recv(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec())
+ req->rq_reply_deadline > get_seconds())
return 1;
return req->rq_receiving_reply;
}
spin_lock(&req->rq_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec()) {
+ req->rq_reply_deadline > get_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
ldlm_error_t rc = ELDLM_OK;
struct ldlm_interval *node = NULL;
- lock->l_last_activity = cfs_time_current_sec();
+ lock->l_last_activity = get_seconds();
/* policies are not executed on the client or during replay */
if ((*flags & (LDLM_FL_HAS_INTENT|LDLM_FL_REPLAY)) == LDLM_FL_HAS_INTENT
&& !local && ns->ns_policy) {
{
time_t recalc_interval_sec;
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
return 0;
spin_lock(&pl->pl_lock);
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock);
return 0;
*/
ldlm_pool_recalc_grant_plan(pl);
- pl->pl_recalc_time = cfs_time_current_sec();
+ pl->pl_recalc_time = get_seconds();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
spin_unlock(&pl->pl_lock);
{
time_t recalc_interval_sec;
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
return 0;
/*
* Check if we need to recalc lists now.
*/
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock);
return 0;
*/
ldlm_cli_pool_pop_slv(pl);
- pl->pl_recalc_time = cfs_time_current_sec();
+ pl->pl_recalc_time = get_seconds();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
spin_unlock(&pl->pl_lock);
time_t recalc_interval_sec;
int count;
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
if (recalc_interval_sec <= 0)
goto recalc;
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
count);
}
- recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() +
+ recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
pl->pl_recalc_period;
return recalc_interval_sec;
spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0);
- pl->pl_recalc_time = cfs_time_current_sec();
+ pl->pl_recalc_time = get_seconds();
atomic_set(&pl->pl_lock_volume_factor, 1);
atomic_set(&pl->pl_grant_rate, 0);
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
lock->l_last_activity,
- cfs_time_sub(cfs_time_current_sec(),
+ cfs_time_sub(get_seconds(),
lock->l_last_activity));
LDLM_DEBUG(lock, "lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago); not entering recovery in "
"server code, just going back to sleep",
lock->l_last_activity,
- cfs_time_sub(cfs_time_current_sec(),
+ cfs_time_sub(get_seconds(),
lock->l_last_activity));
if (cfs_time_after(cfs_time_current(), next_dump)) {
last_dump = next_dump;
LDLM_ERROR(lock, "lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago), entering recovery for %s@%s",
lock->l_last_activity,
- cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
+ cfs_time_sub(get_seconds(), lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
return 0;
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
- delay = cfs_time_sub(cfs_time_current_sec(),
+ delay = cfs_time_sub(get_seconds(),
lock->l_last_activity);
LDLM_DEBUG(lock, "client-side enqueue: granted after "
CFS_DURATION_T"s", delay);
if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
"%s.%ld." LPLD, libcfs_debug_file_path_arr,
- cfs_time_current_sec(), (long_ptr_t)arg);
+ get_seconds(), (long_ptr_t)arg);
printk(KERN_ALERT "LustreError: dumping log to %s\n",
debug_file_name);
cfs_tracefile_dump_all_pages(debug_file_name);
"cur %lu, ex %ld/%ld\n",
cache->uc_name, entry, entry->ue_key,
atomic_read(&entry->ue_refcount), entry->ue_flags,
- cfs_time_current_sec(), entry->ue_acquire_expire,
+ get_seconds(), entry->ue_acquire_expire,
entry->ue_expire);
UC_CACHE_SET_EXPIRED(entry);
if (!atomic_read(&entry->ue_refcount))
if (attr->ia_valid & (ATTR_MTIME | ATTR_CTIME))
CDEBUG(D_INODE, "setting mtime %lu, ctime %lu, now = %lu\n",
LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
- cfs_time_current_sec());
+ get_seconds());
/* If we are changing file size, file content is modified, flag it. */
if (attr->ia_valid & ATTR_SIZE) {
op_data->op_name = name;
op_data->op_namelen = namelen;
op_data->op_mode = mode;
- op_data->op_mod_time = cfs_time_current_sec();
+ op_data->op_mod_time = get_seconds();
op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = cfs_time_current_sec() + resends;
+ req->rq_sent = get_seconds() + resends;
}
/* It is important to obtain rpc_lock first (if applicable), so that
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = cfs_time_current_sec() + resends;
+ req->rq_sent = get_seconds() + resends;
}
level = LUSTRE_IMP_FULL;
resend:
rec->sx_suppgid2 = -1;
rec->sx_fid = *fid;
rec->sx_valid = valid | OBD_MD_FLCTIME;
- rec->sx_time = cfs_time_current_sec();
+ rec->sx_time = get_seconds();
rec->sx_size = output_size;
rec->sx_flags = flags;
INIT_LIST_HEAD(&export->exp_handle.h_link);
INIT_LIST_HEAD(&export->exp_hp_rpcs);
class_handle_hash(&export->exp_handle, &export_handle_ops);
- export->exp_last_request_time = cfs_time_current_sec();
+ export->exp_last_request_time = get_seconds();
spin_lock_init(&export->exp_lock);
spin_lock_init(&export->exp_rpc_lock);
INIT_HLIST_NODE(&export->exp_uuid_hash);
llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
- llh->llh_timestamp = cfs_time_current_sec();
+ llh->llh_timestamp = get_seconds();
if (uuid)
memcpy(&llh->llh_tgtuuid, uuid,
sizeof(llh->llh_tgtuuid));
LPROCFS_CLIMP_CHECK(obd);
imp = obd->u.cli.cl_import;
- now = cfs_time_current_sec();
+ now = get_seconds();
/* Some network health info for kicks */
s2dhms(&ts, now - imp->imp_last_reply_time);
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = cfs_time_current_sec() + resends;
+ req->rq_sent = get_seconds() + resends;
}
rc = ptlrpc_queue_wait(req);
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply()) */
if (aa->aa_resends > new_req->rq_timeout)
- new_req->rq_sent = cfs_time_current_sec() + new_req->rq_timeout;
+ new_req->rq_sent = get_seconds() + new_req->rq_timeout;
else
- new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
+ new_req->rq_sent = get_seconds() + aa->aa_resends;
new_req->rq_generation_set = 1;
new_req->rq_import_generation = request->rq_import_generation;
{
unsigned int nl, oldnl;
struct imp_at *at;
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
LASSERT(req->rq_import);
at = &req->rq_import->imp_at;
olddl = req->rq_deadline;
/* server assumes it now has rq_timeout from when it sent the
* early reply, so client should give it at least that long. */
- req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
+ req->rq_deadline = get_seconds() + req->rq_timeout +
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
"Early reply #%d, new deadline in "CFS_DURATION_T"s "
"("CFS_DURATION_T"s)", req->rq_early_count,
- cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
+ cfs_time_sub(req->rq_deadline, get_seconds()),
cfs_time_sub(req->rq_deadline, olddl));
return rc;
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
req->rq_resend = 1;
int rc;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
- if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
+ if (req->rq_sent && (req->rq_sent > get_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
return 0;
/* delayed resend - skip */
if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > cfs_time_current_sec())
+ req->rq_sent > get_seconds())
continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
{
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
LASSERT(set != NULL);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
struct list_head *tmp;
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
int timeout = 0;
struct ptlrpc_request *req;
int deadline;
*/
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
- request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
+ request->rq_reply_deadline = get_seconds()+LONG_UNLINK;
/*
* Nothing left to do.
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
{
/* re-initialize the req */
req->rq_timeout = obd_timeout;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = get_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
req->rq_reply_deadline = req->rq_deadline;
req->rq_phase = RQ_PHASE_INTERPRET;
sptlrpc_request_out_callback(req);
spin_lock(&req->rq_lock);
- req->rq_real_sent = cfs_time_current_sec();
+ req->rq_real_sent = get_seconds();
if (ev->unlinked)
req->rq_req_unlink = 0;
ev->mlength, ev->offset, req->rq_replen);
}
- req->rq_import->imp_last_reply_time = cfs_time_current_sec();
+ req->rq_import->imp_last_reply_time = get_seconds();
out_wake:
/* NB don't unlock till after wakeup; req can disappear under us
imp->imp_state = state;
imp->imp_state_hist[imp->imp_state_hist_idx].ish_state = state;
imp->imp_state_hist[imp->imp_state_hist_idx].ish_time =
- cfs_time_current_sec();
+ get_seconds();
imp->imp_state_hist_idx = (imp->imp_state_hist_idx + 1) %
IMP_STATE_HIST_LEN;
}
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
struct list_head *tmp, *n;
struct ptlrpc_request *req;
unsigned int timeout = 0;
int at_measured(struct adaptive_timeout *at, unsigned int val)
{
unsigned int old = at->at_current;
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
time_t binlimit = max_t(time_t, at_history / AT_BINS, 1);
LASSERT(at);
cur = at_get(&svcpt->scp_at_estimate);
worst = svcpt->scp_at_estimate.at_worst_ever;
worstt = svcpt->scp_at_estimate.at_worst_time;
- s2dhms(&ts, cfs_time_current_sec() - worstt);
+ s2dhms(&ts, get_seconds() - worstt);
seq_printf(m, "%10s : cur %3u worst %3u (at %ld, "
DHMS_FMT" ago) ", "service",
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0)
- req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
+ req->rq_bulk_deadline = get_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
return 1; /* never registered */
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, cfs_time_current_sec() -
+ int service_time = max_t(int, get_seconds() -
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
if (unlikely(rc))
goto out;
- req->rq_sent = cfs_time_current_sec();
+ req->rq_sent = get_seconds();
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
do_gettimeofday(&request->rq_arrival_time);
- request->rq_sent = cfs_time_current_sec();
+ request->rq_sent = get_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
obd_evict_list);
spin_unlock(&pet_lock);
- expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
+ expire_time = get_seconds() - PING_EVICT_TIMEOUT;
CDEBUG(D_HA, "evicting all exports of obd %s older than %ld\n",
obd->obd_name, expire_time);
obd->obd_name,
obd_uuid2str(&exp->exp_client_uuid),
obd_export_nid2str(exp),
- (long)(cfs_time_current_sec() -
+ (long)(get_seconds() -
exp->exp_last_request_time),
- exp, (long)cfs_time_current_sec(),
+ exp, (long)get_seconds(),
(long)expire_time,
(long)exp->exp_last_request_time);
CDEBUG(D_HA, "Last request was at %ld\n",
spin_lock(&imp->imp_lock);
if (imp->imp_sec_expire &&
- imp->imp_sec_expire < cfs_time_current_sec()) {
+ imp->imp_sec_expire < get_seconds()) {
adapt = 1;
imp->imp_sec_expire = 0;
}
exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
exp->exp_flvr_old[0] = exp->exp_flvr;
- exp->exp_flvr_expire[0] = cfs_time_current_sec() +
+ exp->exp_flvr_expire[0] = get_seconds() +
EXP_FLVR_UPDATE_EXPIRE;
exp->exp_flvr = flavor;
}
if (exp->exp_flvr_expire[0]) {
- if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
+ if (exp->exp_flvr_expire[0] >= get_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
"middle one ("CFS_DURATION_T")\n", exp,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[0] -
- cfs_time_current_sec());
+ get_seconds());
spin_unlock(&exp->exp_lock);
return 0;
}
/* now it doesn't match the current flavor, the only chance we can
* accept it is match the old flavors which is not expired. */
if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
- if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
+ if (exp->exp_flvr_expire[1] >= get_seconds()) {
if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
"oldest one ("CFS_DURATION_T")\n", exp,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] -
- cfs_time_current_sec());
+ get_seconds());
spin_unlock(&exp->exp_lock);
return 0;
}
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_expire[0] ?
(unsigned long) (exp->exp_flvr_expire[0] -
- cfs_time_current_sec()) : 0,
+ get_seconds()) : 0,
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] ?
(unsigned long) (exp->exp_flvr_expire[1] -
- cfs_time_current_sec()) : 0);
+ get_seconds()) : 0);
return -EACCES;
}
EXPORT_SYMBOL(sptlrpc_target_export_check);
page_pools.epp_total_pages,
page_pools.epp_free_pages,
page_pools.epp_idle_idx,
- cfs_time_current_sec() - page_pools.epp_last_shrink,
- cfs_time_current_sec() - page_pools.epp_last_access,
+ get_seconds() - page_pools.epp_last_shrink,
+ get_seconds() - page_pools.epp_last_access,
page_pools.epp_st_max_pages,
page_pools.epp_st_grows,
page_pools.epp_st_grow_fails,
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
*/
- if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ if (unlikely(get_seconds() - page_pools.epp_last_access >
CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
(long)sc->nr_to_scan, page_pools.epp_free_pages);
page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = cfs_time_current_sec();
+ page_pools.epp_last_shrink = get_seconds();
}
spin_unlock(&page_pools.epp_lock);
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
*/
- if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ if (unlikely(get_seconds() - page_pools.epp_last_access >
CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
page_pools.epp_idle_idx = IDLE_IDX_MAX;
if (tick == 0)
tick = cfs_time_current();
- now = cfs_time_current_sec();
+ now = get_seconds();
page_pools.epp_st_missings++;
page_pools.epp_pages_short += desc->bd_iov_count;
this_idle) /
(IDLE_IDX_WEIGHT + 1);
- page_pools.epp_last_access = cfs_time_current_sec();
+ page_pools.epp_last_access = get_seconds();
spin_unlock(&page_pools.epp_lock);
return 0;
page_pools.epp_growing = 0;
page_pools.epp_idle_idx = 0;
- page_pools.epp_last_shrink = cfs_time_current_sec();
- page_pools.epp_last_access = cfs_time_current_sec();
+ page_pools.epp_last_shrink = get_seconds();
+ page_pools.epp_last_access = get_seconds();
spin_lock_init(&page_pools.epp_lock);
page_pools.epp_total_pages = 0;
if (imp) {
spin_lock(&imp->imp_lock);
if (imp->imp_sec)
- imp->imp_sec_expire = cfs_time_current_sec() +
+ imp->imp_sec_expire = get_seconds() +
SEC_ADAPT_DELAY;
spin_unlock(&imp->imp_lock);
}
LASSERT(sec->ps_gc_interval > 0);
LASSERT(list_empty(&sec->ps_gc_list));
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+ sec->ps_gc_next = get_seconds() + sec->ps_gc_interval;
spin_lock(&sec_gc_list_lock);
list_add_tail(&sec_gc_list, &sec->ps_gc_list);
CDEBUG(D_SEC, "check on sec %p(%s)\n", sec, sec->ps_policy->sp_name);
- if (cfs_time_after(sec->ps_gc_next, cfs_time_current_sec()))
+ if (cfs_time_after(sec->ps_gc_next, get_seconds()))
return;
sec->ps_policy->sp_cops->gc_ctx(sec);
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
+ sec->ps_gc_next = get_seconds() + sec->ps_gc_interval;
}
static int sec_gc_main(void *arg)
seq_printf(seq, "gc internal %ld\n", sec->ps_gc_interval);
seq_printf(seq, "gc next %ld\n",
sec->ps_gc_interval ?
- sec->ps_gc_next - cfs_time_current_sec() : 0);
+ sec->ps_gc_next - get_seconds() : 0);
sptlrpc_sec_put(sec);
out:
will make it to the top of the list. */
/* Do not pay attention on 1sec or smaller renewals. */
- new_time = cfs_time_current_sec() + extra_delay;
+ new_time = get_seconds() + extra_delay;
if (exp->exp_last_request_time + 1 /*second */ >= new_time)
return;
/* Note - racing to start/reset the obd_eviction timer is safe */
if (exp->exp_obd->obd_eviction_timer == 0) {
/* Check if the oldest entry is expired. */
- if (cfs_time_current_sec() > (oldest_time + PING_EVICT_TIMEOUT +
+ if (get_seconds() > (oldest_time + PING_EVICT_TIMEOUT +
extra_delay)) {
/* We need a second timer, in case the net was down and
* it just came back. Since the pinger may skip every
* other PING_INTERVAL (see note in ptlrpc_pinger_main),
* we better wait for 3. */
exp->exp_obd->obd_eviction_timer =
- cfs_time_current_sec() + 3 * PING_INTERVAL;
+ get_seconds() + 3 * PING_INTERVAL;
CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
exp->exp_obd->obd_name,
obd_export_nid2str(oldest_exp), oldest_time);
}
} else {
- if (cfs_time_current_sec() >
+ if (get_seconds() >
(exp->exp_obd->obd_eviction_timer + extra_delay)) {
/* The evictor won't evict anyone who we've heard from
* recently, so we don't have to check before we start
}
/* Set timer for closest deadline */
- next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
+ next = (__s32)(array->paa_deadline - get_seconds() -
at_early_margin);
if (next <= 0) {
ptlrpc_at_timer((unsigned long)svcpt);
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_request *reqcopy;
struct lustre_msg *reqmsg;
- cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
+ cfs_duration_t olddl = req->rq_deadline - get_seconds();
time_t newdl;
int rc;
/* Fake our processing time into the future to ask the clients
* for some extra amount of time */
at_measured(&svcpt->scp_at_estimate, at_extra +
- cfs_time_current_sec() -
+ get_seconds() -
req->rq_arrival_time.tv_sec);
/* Check to see if we've actually increased the deadline -
"(%ld/%ld), not sending early reply\n",
olddl, req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate) -
- cfs_time_current_sec());
+ get_seconds());
return -ETIMEDOUT;
}
}
- newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
+ newdl = get_seconds() + at_get(&svcpt->scp_at_estimate);
reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
if (reqcopy == NULL)
struct list_head work_list;
__u32 index, count;
time_t deadline;
- time_t now = cfs_time_current_sec();
+ time_t now = get_seconds();
cfs_duration_t delay;
int first, counter = 0;
}
/* req_in handling should/must be fast */
- if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
+ if (get_seconds() - req->rq_arrival_time.tv_sec > 5)
DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
- cfs_time_sub(cfs_time_current_sec(),
+ cfs_time_sub(get_seconds(),
req->rq_arrival_time.tv_sec));
/* Set rpc server deadline and add it to the timed list */
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
- if (cfs_time_current_sec() > request->rq_deadline) {
+ if (get_seconds() > request->rq_deadline) {
DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
libcfs_id2str(request->rq_peer),
cfs_time_sub(request->rq_deadline,
request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
+ cfs_time_sub(get_seconds(),
request->rq_deadline));
goto put_conn;
}
lu_context_exit(&request->rq_session);
lu_context_fini(&request->rq_session);
- if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
+ if (unlikely(get_seconds() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request,
"Request took longer than estimated ("
CFS_DURATION_T":"CFS_DURATION_T
"s); client may timeout.",
cfs_time_sub(request->rq_deadline,
request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
+ cfs_time_sub(get_seconds(),
request->rq_deadline));
}