This function simply multiplies by HZ and adds jiffies.
This is simple enough to be opencoded, and doing so
makes the code easier to read.
Same for cfs_time_shift_64()
Signed-off-by: NeilBrown <neilb@suse.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
return time_before_eq(t2, t1);
}
-static inline unsigned long cfs_time_shift(int seconds)
-{
- return jiffies + seconds * HZ;
-}
-
/*
* return valid time-out based on user supplied one. Currently we only check
* that time-out is not shorted than allowed.
return d / msecs_to_jiffies(MSEC_PER_SEC);
}
-static inline u64 cfs_time_shift_64(int seconds)
-{
- return get_jiffies_64() + (u64)seconds * HZ;
-}
-
static inline int cfs_time_before_64(u64 t1, u64 t2)
{
return (__s64)t2 - (__s64)t1 > 0;
if (rc)
goto out_fpo;
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
fpo->fpo_owner = fps;
*pp_fpo = fpo;
spin_lock(&fps->fps_lock);
version = fps->fps_version;
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
fpo->fpo_map_count++;
if (fpo->fpo_is_fmr) {
fps->fps_version++;
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ fps->fps_next_retry = jiffies + IBLND_POOL_RETRY * HZ;
}
spin_unlock(&fps->fps_lock);
memset(pool, 0, sizeof(*pool));
INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
pool->po_owner = ps;
pool->po_size = size;
}
continue;
pool->po_allocated++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_deadline = jiffies + IBLND_POOL_DEADLINE * HZ;
node = pool->po_free_list.next;
list_del(node);
if (!rc) {
list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ ps->ps_next_retry = jiffies + IBLND_POOL_RETRY * HZ;
CERROR("Can't allocate new %s pool because out of memory\n",
ps->ps_name);
}
LASSERT(dev->ibd_failover);
dev->ibd_failover = 0;
if (rc >= 0) { /* Device is OK or failover succeed */
- dev->ibd_next_failover = cfs_time_shift(3);
+ dev->ibd_next_failover = jiffies + 3 * HZ;
continue;
}
/* failed to failover, retry later */
dev->ibd_next_failover =
- cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ jiffies + min(dev->ibd_failed_failover, 10) * HZ;
if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
conn->ksnc_tx_last_post = jiffies;
/* Set the deadline for the outgoing HELLO to drain */
conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
- conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ conn->ksnc_tx_deadline = jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
mb(); /* order with adding to peer's conn list */
list_add(&conn->ksnc_list, &peer->ksnp_conns);
if (bufnob < conn->ksnc_tx_bufnob) {
/* something got ACKed */
conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
peer->ksnp_last_alive = now;
conn->ksnc_tx_bufnob = bufnob;
}
* something got ACKed
*/
conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = bufnob;
mb();
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_rx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
mb(); /* order with setting rx_started */
conn->ksnc_rx_started = 1;
/* ZC_REQ is going to be pinned to the peer */
tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
conn->ksnc_peer->ksnp_last_alive = jiffies;
conn->ksnc_tx_bufnob = 0;
ksocknal_find_connecting_route_locked(peer)) {
/* the message is going to be pinned to the peer */
tx->tx_deadline =
- cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
+ jiffies + *ksocknal_tunables.ksnd_timeout * HZ;
/* Queue the message until a connection is established */
list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
* retry 10 secs later, so we wouldn't put pressure
* on this peer if we failed to send keepalive this time
*/
- peer->ksnp_send_keepalive = cfs_time_shift(10);
+ peer->ksnp_send_keepalive = jiffies + 10 * HZ;
conn = ksocknal_find_conn_locked(peer, NULL, 1);
if (conn) {
rule->dr_attr = *attr;
if (attr->u.drop.da_interval) {
- rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
- rule->dr_drop_time = cfs_time_shift(
- prandom_u32_max(attr->u.drop.da_interval));
+ rule->dr_time_base = jiffies + attr->u.drop.da_interval * HZ;
+ rule->dr_drop_time = jiffies +
+ prandom_u32_max(attr->u.drop.da_interval) * HZ;
} else {
rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
}
if (attr->u.drop.da_rate) {
rule->dr_drop_at = prandom_u32_max(attr->u.drop.da_rate);
} else {
- rule->dr_drop_time = cfs_time_shift(
- prandom_u32_max(attr->u.drop.da_interval));
- rule->dr_time_base = cfs_time_shift(attr->u.drop.da_interval);
+ rule->dr_drop_time = jiffies +
+ prandom_u32_max(attr->u.drop.da_interval) * HZ;
+ rule->dr_time_base = jiffies + attr->u.drop.da_interval * HZ;
}
spin_unlock(&rule->dr_lock);
}
list_add_tail(&msg->msg_list, &rule->dl_msg_list);
msg->msg_delay_send = round_timeout(
- cfs_time_shift(attr->u.delay.la_latency));
+ jiffies + attr->u.delay.la_latency * HZ);
if (rule->dl_msg_send == -1) {
rule->dl_msg_send = msg->msg_delay_send;
mod_timer(&rule->dl_timer, rule->dl_msg_send);
rule->dl_attr = *attr;
if (attr->u.delay.la_interval) {
- rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
- rule->dl_delay_time = cfs_time_shift(
- prandom_u32_max(attr->u.delay.la_interval));
+ rule->dl_time_base = jiffies + attr->u.delay.la_interval * HZ;
+ rule->dl_delay_time = jiffies +
+ prandom_u32_max(attr->u.delay.la_interval) * HZ;
} else {
rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
}
rule->dl_delay_at = prandom_u32_max(attr->u.delay.la_rate);
} else {
rule->dl_delay_time =
- cfs_time_shift(prandom_u32_max(
- attr->u.delay.la_interval));
- rule->dl_time_base = cfs_time_shift(attr->u.delay.la_interval);
+ jiffies + prandom_u32_max(
+ attr->u.delay.la_interval) * HZ;
+ rule->dl_time_base = jiffies + attr->u.delay.la_interval * HZ;
}
spin_unlock(&rule->dl_lock);
}
if (!rtr->lp_ping_deadline) {
rtr->lp_ping_deadline =
- cfs_time_shift(router_ping_timeout);
+ jiffies + router_ping_timeout * HZ;
}
lnet_net_unlock(rtr->lp_cpt);
lock->l_last_activity));
if (cfs_time_after(jiffies, next_dump)) {
last_dump = next_dump;
- next_dump = cfs_time_shift(300);
+ next_dump = jiffies + 300 * HZ;
ldlm_namespace_dump(D_DLMTRACE,
ldlm_lock_to_ns(lock));
if (last_dump == 0)
ldlm_res_hash_dump,
(void *)(unsigned long)level, 0);
spin_lock(&ns->ns_lock);
- ns->ns_next_dump = cfs_time_shift(10);
+ ns->ns_next_dump = jiffies + 10 * HZ;
spin_unlock(&ns->ns_lock);
}
* available
*/
err = obd_statfs(NULL, sbi->ll_md_exp, osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_FOR_MDT0);
if (err)
goto out_md_fid;
/* Some amount of caching on the client is allowed */
rc = ll_statfs_internal(sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
0);
if (rc)
return rc;
int rc;
rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc)
return sprintf(buf, "%u\n", osfs.os_bsize);
int rc;
rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int rc;
rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int rc;
rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
int rc;
rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc)
return sprintf(buf, "%llu\n", osfs.os_files);
int rc;
rc = ll_statfs_internal(sbi->ll_sb, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc)
return sprintf(buf, "%llu\n", osfs.os_ffree);
* affect the performance.
*/
if (lli->lli_glimpse_time != 0 &&
- time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
+ time_before(jiffies - 1 * HZ, lli->lli_glimpse_time)) {
up_write(&lli->lli_glimpse_sem);
lli->lli_agl_index = 0;
iput(inode);
return -EFAULT;
rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
0);
if (rc)
return rc;
/* got statfs data */
rc = obd_statfs(NULL, lov->lov_tgts[index]->ltd_exp, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
flags);
if (rc)
return rc;
}
rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
0);
if (rc != 0)
goto out;
obd_kobj);
struct obd_statfs osfs;
int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc)
return sprintf(buf, "%u\n", osfs.os_bsize);
obd_kobj);
struct obd_statfs osfs;
int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
obd_kobj);
struct obd_statfs osfs;
int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
obd_kobj);
struct obd_statfs osfs;
int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc) {
__u32 blk_size = osfs.os_bsize >> 10;
obd_kobj);
struct obd_statfs osfs;
int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc)
return sprintf(buf, "%llu\n", osfs.os_files);
obd_kobj);
struct obd_statfs osfs;
int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
+ get_jiffies_64() - OBD_STATFS_CACHE_SECONDS * HZ,
OBD_STATFS_NODELAY);
if (!rc)
return sprintf(buf, "%llu\n", osfs.os_ffree);
/* obd->obd_osfs_age must be set to a value in the distant
* past to guarantee a fresh statfs is fetched on mount.
*/
- obd->obd_osfs_age = cfs_time_shift_64(-1000);
+ obd->obd_osfs_age = get_jiffies_64() - 1000 * HZ;
/* XXX belongs in setup not attach */
init_rwsem(&obd->obd_observer_link_sem);
void osc_update_next_shrink(struct client_obd *cli)
{
cli->cl_next_shrink_grant =
- cfs_time_shift(cli->cl_grant_shrink_interval);
+ jiffies + cli->cl_grant_shrink_interval * HZ;
CDEBUG(D_CACHE, "next time %ld to shrink grant\n",
cli->cl_next_shrink_grant);
}
at_get(&imp->imp_at.iat_net_latency));
time = min(time, dtime);
}
- imp->imp_next_ping = cfs_time_shift(time);
+ imp->imp_next_ping = jiffies + time * HZ;
}
static inline int imp_is_deactive(struct obd_import *imp)
static inline int ptlrpc_next_reconnect(struct obd_import *imp)
{
if (imp->imp_server_timeout)
- return cfs_time_shift(obd_timeout / 2);
+ return jiffies + obd_timeout / 2 * HZ;
else
- return cfs_time_shift(obd_timeout);
+ return jiffies + obd_timeout * HZ;
}
static long pinger_check_timeout(unsigned long time)
if (next <= 0) {
ptlrpc_at_timer(&svcpt->scp_at_timer);
} else {
- mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
+ mod_timer(&svcpt->scp_at_timer, jiffies + next * HZ);
CDEBUG(D_INFO, "armed %s at %+ds\n",
svcpt->scp_service->srv_name, next);
}