Just use unsigned long everywhere, like the rest of the kernel does.
Cc: Andreas Dilger <andreas.dilger@intel.com>
Cc: Oleg Drokin <oleg.drokin@intel.com>
Cc: hpdd-discuss <hpdd-discuss@lists.01.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
#define CDEBUG_DEFAULT_MIN_DELAY ((cfs_time_seconds(1) + 1) / 2) /* jiffies */
#define CDEBUG_DEFAULT_BACKOFF 2
struct cfs_debug_limit_state {
- cfs_time_t cdls_next;
+ unsigned long cdls_next;
unsigned int cdls_delay;
int cdls_count;
};
void cfs_init_timer(struct timer_list *t);
void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg);
void cfs_timer_done(struct timer_list *t);
-void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_arm(struct timer_list *t, unsigned long deadline);
void cfs_timer_disarm(struct timer_list *t);
int cfs_timer_is_armed(struct timer_list *t);
-cfs_time_t cfs_timer_deadline(struct timer_list *t);
+unsigned long cfs_timer_deadline(struct timer_list *t);
/*
* Memory
* generic time manipulation functions.
*/
-static inline cfs_time_t cfs_time_add(cfs_time_t t, cfs_duration_t d)
+static inline unsigned long cfs_time_add(unsigned long t, cfs_duration_t d)
{
- return (cfs_time_t)(t + d);
+ return (unsigned long)(t + d);
}
-static inline cfs_duration_t cfs_time_sub(cfs_time_t t1, cfs_time_t t2)
+static inline cfs_duration_t cfs_time_sub(unsigned long t1, unsigned long t2)
{
- return (cfs_time_t)(t1 - t2);
+ return (unsigned long)(t1 - t2);
}
-static inline int cfs_time_after(cfs_time_t t1, cfs_time_t t2)
+static inline int cfs_time_after(unsigned long t1, unsigned long t2)
{
return cfs_time_before(t2, t1);
}
-static inline int cfs_time_aftereq(cfs_time_t t1, cfs_time_t t2)
+static inline int cfs_time_aftereq(unsigned long t1, unsigned long t2)
{
return cfs_time_beforeq(t2, t1);
}
-static inline cfs_time_t cfs_time_shift(int seconds)
+static inline unsigned long cfs_time_shift(int seconds)
{
return cfs_time_add(cfs_time_current(), cfs_time_seconds(seconds));
}
return r;
}
-static inline void cfs_slow_warning(cfs_time_t now, int seconds, char *msg)
+static inline void cfs_slow_warning(unsigned long now, int seconds, char *msg)
{
if (cfs_time_after(cfs_time_current(),
cfs_time_add(now, cfs_time_seconds(15))))
/*
* Platform provides three opaque data-types:
*
- * cfs_time_t represents point in time. This is internal kernel
+ * unsigned long represents point in time. This is internal kernel
* time rather than "wall clock". This time bears no
* relation to gettimeofday().
*
* struct timespec represents instance in world-visible time. This is
* used in file-system time-stamps
*
- * cfs_time_t cfs_time_current(void);
- * cfs_time_t cfs_time_add (cfs_time_t, cfs_duration_t);
- * cfs_duration_t cfs_time_sub (cfs_time_t, cfs_time_t);
- * int cfs_impl_time_before (cfs_time_t, cfs_time_t);
- * int cfs_impl_time_before_eq(cfs_time_t, cfs_time_t);
+ * unsigned long cfs_time_current(void);
+ * unsigned long cfs_time_add (unsigned long, cfs_duration_t);
+ * cfs_duration_t cfs_time_sub (unsigned long, unsigned long);
+ * int cfs_impl_time_before (unsigned long, unsigned long);
+ * int cfs_impl_time_before_eq(unsigned long, unsigned long);
*
* cfs_duration_t cfs_duration_build(int64_t);
*
* Generic kernel stuff
*/
-typedef unsigned long cfs_time_t; /* jiffies */
typedef long cfs_duration_t;
-static inline int cfs_time_before(cfs_time_t t1, cfs_time_t t2)
+static inline int cfs_time_before(unsigned long t1, unsigned long t2)
{
return time_before(t1, t2);
}
-static inline int cfs_time_beforeq(cfs_time_t t1, cfs_time_t t2)
+static inline int cfs_time_beforeq(unsigned long t1, unsigned long t2)
{
return time_before_eq(t1, t2);
}
-static inline cfs_time_t cfs_time_current(void)
+static inline unsigned long cfs_time_current(void)
{
return jiffies;
}
atomic_t ue_refcount;
int ue_flags;
wait_queue_head_t ue_waitq;
- cfs_time_t ue_acquire_expire;
- cfs_time_t ue_expire;
+ unsigned long ue_acquire_expire;
+ unsigned long ue_expire;
union {
struct md_identity identity;
} u;
lnet_ni_t *lnet_net2ni_locked(__u32 net, int cpt);
lnet_ni_t *lnet_net2ni(__u32 net);
-int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, cfs_time_t when);
-void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when);
+int lnet_notify(lnet_ni_t *ni, lnet_nid_t peer, int alive, unsigned long when);
+void lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when);
int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway_nid,
unsigned int priority);
int lnet_check_routes(void);
void (*lnd_notify)(struct lnet_ni *ni, lnet_nid_t peer, int alive);
/* query of peer aliveness */
- void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, cfs_time_t *when);
+ void (*lnd_query)(struct lnet_ni *ni, lnet_nid_t peer, unsigned long *when);
/* accept a new connection */
int (*lnd_accept)(struct lnet_ni *ni, struct socket *sock);
unsigned int lp_ping_notsent; /* SEND event outstanding from ping */
int lp_alive_count; /* # times router went dead<->alive */
long lp_txqnob; /* bytes queued for sending */
- cfs_time_t lp_timestamp; /* time of last aliveness news */
- cfs_time_t lp_ping_timestamp; /* time of last ping attempt */
- cfs_time_t lp_ping_deadline; /* != 0 if ping reply expected */
- cfs_time_t lp_last_alive; /* when I was last alive */
- cfs_time_t lp_last_query; /* when lp_ni was queried last time */
+ unsigned long lp_timestamp; /* time of last aliveness news */
+ unsigned long lp_ping_timestamp; /* time of last ping attempt */
+ unsigned long lp_ping_deadline; /* != 0 if ping reply expected */
+ unsigned long lp_last_alive; /* when I was last alive */
+ unsigned long lp_last_query; /* when lp_ni was queried last time */
lnet_ni_t *lp_ni; /* interface peer is on */
lnet_nid_t lp_nid; /* peer's NID */
int lp_refcount; /* # refs */
}
void
-kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
- cfs_time_t last_alive = 0;
- cfs_time_t now = cfs_time_current();
+ unsigned long last_alive = 0;
+ unsigned long now = cfs_time_current();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_peer_t *peer;
unsigned long flags;
}
static int
-kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
+kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
{
if (fpo->fpo_map_count != 0) /* still in use */
return 0;
LIST_HEAD (zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
kib_fmr_poolset_t *fps = fpo->fpo_owner;
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
}
static int
-kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
+kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
{
if (pool->po_allocated != 0) /* still in use */
return 0;
LIST_HEAD (zombies);
kib_poolset_t *ps = pool->po_owner;
kib_pool_t *tmp;
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
spin_lock(&ps->ps_lock);
char ibd_ifname[KIB_IFNAME_SIZE];
int ibd_nnets; /* # nets extant */
- cfs_time_t ibd_next_failover;
+ unsigned long ibd_next_failover;
int ibd_failed_failover; /* # failover failures */
unsigned int ibd_failover; /* failover in progress */
unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
struct list_head ps_pool_list; /* list of pools */
struct list_head ps_failed_pool_list; /* failed pool list */
- cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
+ unsigned long ps_next_retry; /* time stamp for retry if failed to allocate */
int ps_increasing; /* is allocating new pool */
int ps_pool_size; /* new pool size */
int ps_cpt; /* CPT id */
struct list_head po_list; /* chain on pool list */
struct list_head po_free_list; /* pre-allocated node */
kib_poolset_t *po_owner; /* pool_set of this pool */
- cfs_time_t po_deadline; /* deadline of this pool */
+ unsigned long po_deadline; /* deadline of this pool */
int po_allocated; /* # of elements in use */
int po_failed; /* pool is created on failed HCA */
int po_size; /* # of pre-allocated elements */
/* is allocating new pool */
int fps_increasing;
/* time stamp for retry if failed to allocate */
- cfs_time_t fps_next_retry;
+ unsigned long fps_next_retry;
} kib_fmr_poolset_t;
typedef struct
struct kib_hca_dev *fpo_hdev; /* device for this pool */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
- cfs_time_t fpo_deadline; /* deadline of this pool */
+ unsigned long fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
} kib_fmr_pool_t;
int ibp_connecting; /* current active connection attempts */
int ibp_accepting; /* current passive connection attempts */
int ibp_error; /* errno on closing this peer */
- cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
+ unsigned long ibp_last_alive; /* when (in jiffies) I was last alive */
} kib_peer_t;
extern kib_data_t kiblnd_data;
int kiblnd_startup (lnet_ni_t *ni);
void kiblnd_shutdown (lnet_ni_t *ni);
int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
kiblnd_peer_notify (kib_peer_t *peer)
{
int error = 0;
- cfs_time_t last_alive = 0;
+ unsigned long last_alive = 0;
unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ksocknal_peer_failed (ksock_peer_t *peer)
{
int notify = 0;
- cfs_time_t last_alive = 0;
+ unsigned long last_alive = 0;
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer is dead if it's to another kernel and
void
ksocknal_destroy_conn (ksock_conn_t *conn)
{
- cfs_time_t last_rcv;
+ unsigned long last_rcv;
/* Final coup-de-grace of the reaper */
CDEBUG (D_NET, "connection %p\n", conn);
}
void
-ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
int connect = 1;
- cfs_time_t last_alive = 0;
- cfs_time_t now = cfs_time_current();
+ unsigned long last_alive = 0;
+ unsigned long now = cfs_time_current();
ksock_peer_t *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */
struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/
wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */
- cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
+ unsigned long ksnd_reaper_waketime;/* when reaper will wake */
spinlock_t ksnd_reaper_lock; /* serialise */
int ksnd_enomem_tx; /* test ENOMEM sender */
lnet_kiov_t *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */
- cfs_time_t tx_deadline; /* when (in jiffies) tx times out */
+ unsigned long tx_deadline; /* when (in jiffies) tx times out */
ksock_msg_t tx_msg; /* socklnd message buffer */
int tx_desc_size; /* size of this descriptor */
union {
/* reader */
struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */
- cfs_time_t ksnc_rx_deadline; /* when (in jiffies) receive times out */
+ unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */
__u8 ksnc_rx_started; /* started receiving a message */
__u8 ksnc_rx_ready; /* data ready to read */
__u8 ksnc_rx_scheduled;/* being progressed */
struct list_head ksnc_tx_list; /* where I enq waiting for output space */
struct list_head ksnc_tx_queue; /* packets waiting to be sent */
ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */
- cfs_time_t ksnc_tx_deadline; /* when (in jiffies) tx times out */
+ unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */
int ksnc_tx_bufnob; /* send buffer marker */
atomic_t ksnc_tx_nob; /* # bytes queued */
int ksnc_tx_ready; /* write space */
int ksnc_tx_scheduled; /* being progressed */
- cfs_time_t ksnc_tx_last_post; /* time stamp of the last posted TX */
+ unsigned long ksnc_tx_last_post; /* time stamp of the last posted TX */
} ksock_conn_t;
typedef struct ksock_route
struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */
struct ksock_peer *ksnr_peer; /* owning peer */
atomic_t ksnr_refcount; /* # users */
- cfs_time_t ksnr_timeout; /* when (in jiffies) reconnection can happen next */
+ unsigned long ksnr_timeout; /* when (in jiffies) reconnection can happen next */
cfs_duration_t ksnr_retry_interval; /* how long between retries */
__u32 ksnr_myipaddr; /* my IP */
__u32 ksnr_ipaddr; /* IP address to connect to */
typedef struct ksock_peer
{
struct list_head ksnp_list; /* stash on global peer list */
- cfs_time_t ksnp_last_alive; /* when (in jiffies) I was last alive */
+ unsigned long ksnp_last_alive; /* when (in jiffies) I was last alive */
lnet_process_id_t ksnp_id; /* who's on the other end(s) */
atomic_t ksnp_refcount; /* # users */
int ksnp_sharecount; /* lconf usage counter */
struct list_head ksnp_tx_queue; /* waiting packets */
spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */
- cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
+ unsigned long ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
int ksnp_n_passive_ips; /* # of... */
__u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */
extern void ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
int error);
extern void ksocknal_notify (lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
+extern void ksocknal_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
extern void ksocknal_thread_fini (void);
extern void ksocknal_launch_all_connections_locked (ksock_peer_t *peer);
ksock_route_t *
ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
{
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
struct list_head *tmp;
ksock_route_t *route;
int type;
int wanted;
struct socket *sock;
- cfs_time_t deadline;
+ unsigned long deadline;
int retry_later = 0;
int rc = 0;
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
ksock_route_t *route;
- cfs_time_t now;
+ unsigned long now;
now = cfs_time_current();
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer, peers, ksnp_list) {
- cfs_time_t deadline = 0;
+ unsigned long deadline = 0;
int resid = 0;
int n = 0;
cfs_duration_t timeout;
int i;
int peer_index = 0;
- cfs_time_t deadline = cfs_time_current();
+ unsigned long deadline = cfs_time_current();
cfs_block_allsigs ();
int tms = *timeout_ms;
int wait;
wait_queue_t wl;
- cfs_time_t now;
+ unsigned long now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
void
lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
{
- cfs_time_t last_alive = 0;
+ unsigned long last_alive = 0;
LASSERT(lnet_peer_aliveness_enabled(lp));
LASSERT(ni->ni_lnd->lnd_query != NULL);
/* NB: always called with lnet_net_lock held */
static inline int
-lnet_peer_is_alive(lnet_peer_t *lp, cfs_time_t now)
+lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
{
int alive;
- cfs_time_t deadline;
+ unsigned long deadline;
LASSERT(lnet_peer_aliveness_enabled(lp));
int
lnet_peer_alive_locked(lnet_peer_t *lp)
{
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
if (!lnet_peer_aliveness_enabled(lp))
return -ENODEV;
if (lp->lp_last_query != 0) {
static const int lnet_queryinterval = 1;
- cfs_time_t next_query =
+ unsigned long next_query =
cfs_time_add(lp->lp_last_query,
cfs_time_seconds(lnet_queryinterval));
}
void
-lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when)
+lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, unsigned long when)
{
if (cfs_time_before(when, lp->lp_timestamp)) { /* out of date information */
CDEBUG(D_NET, "Out of date\n");
lnet_ping_router_locked (lnet_peer_t *rtr)
{
lnet_rc_data_t *rcd = NULL;
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
int secs;
lnet_peer_addref_locked(rtr);
}
int
-lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
+lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
{
struct lnet_peer *lp = NULL;
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
int cpt = lnet_cpt_of_nid(nid);
LASSERT (!in_interrupt ());
#else
int
-lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
+lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when)
{
return -EOPNOTSUPP;
}
if (peer != NULL) {
lnet_nid_t nid = peer->lp_nid;
- cfs_time_t now = cfs_time_current();
- cfs_time_t deadline = peer->lp_ping_deadline;
+ unsigned long now = cfs_time_current();
+ unsigned long deadline = peer->lp_ping_deadline;
int nrefs = peer->lp_refcount;
int nrtrrefs = peer->lp_rtr_refcount;
int alive_cnt = peer->lp_alive_count;
aliveness = peer->lp_alive ? "up" : "down";
if (lnet_peer_aliveness_enabled(peer)) {
- cfs_time_t now = cfs_time_current();
+ unsigned long now = cfs_time_current();
cfs_duration_t delta;
delta = cfs_time_sub(now, peer->lp_last_alive);
nd = crpc->crp_node;
dur = (cfs_duration_t)cfs_time_sub(crpc->crp_stamp,
- (cfs_time_t)console_session.ses_id.ses_stamp);
+ (unsigned long)console_session.ses_id.ses_stamp);
cfs_duration_usec(dur, &tv);
if (copy_to_user(&ent->rpe_peer,
CDEBUG(D_NET, "Ping %d nodes in session\n", count);
- ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
+ ptimer->stt_expires = (unsigned long)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
mutex_unlock(&console_session.ses_mutex);
}
ptimer = &console_session.ses_ping_timer;
- ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
+ ptimer->stt_expires = (unsigned long)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
/** RPC is embedded in other structure and can't free it */
unsigned int crp_embedded:1;
int crp_status; /* console rpc errors */
- cfs_time_t crp_stamp; /* replied time stamp */
+ unsigned long crp_stamp; /* replied time stamp */
} lstcon_rpc_t;
typedef struct lstcon_rpc_trans {
int nd_ref; /* reference count */
int nd_state; /* state of the node */
int nd_timeout; /* session timeout */
- cfs_time_t nd_stamp; /* timestamp of last replied RPC */
+ unsigned long nd_stamp; /* timestamp of last replied RPC */
struct lstcon_rpc nd_ping; /* ping rpc */
} lstcon_node_t; /*** node descriptor */
atomic_t sn_refcount;
atomic_t sn_brw_errors;
atomic_t sn_ping_errors;
- cfs_time_t sn_started;
+ unsigned long sn_started;
} sfw_session_t;
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
struct st_timer_data {
spinlock_t stt_lock;
/* start time of the slot processed previously */
- cfs_time_t stt_prev_slot;
+ unsigned long stt_prev_slot;
struct list_head stt_hash[STTIMER_NSLOTS];
int stt_shuttingdown;
wait_queue_head_t stt_waitq;
/* called with stt_data.stt_lock held */
int
-stt_expire_list(struct list_head *slot, cfs_time_t now)
+stt_expire_list(struct list_head *slot, unsigned long now)
{
int expired = 0;
stt_timer_t *timer;
}
int
-stt_check_timers(cfs_time_t *last)
+stt_check_timers(unsigned long *last)
{
int expired = 0;
- cfs_time_t now;
- cfs_time_t this_slot;
+ unsigned long now;
+ unsigned long this_slot;
now = cfs_time_current_sec();
this_slot = now & STTIMER_SLOTTIMEMASK;
typedef struct {
struct list_head stt_list;
- cfs_time_t stt_expires;
+ unsigned long stt_expires;
void (*stt_func) (void *);
void *stt_data;
} stt_timer_t;
struct lustre_capa c_capa; /* capa */
atomic_t c_refc; /* ref count */
- cfs_time_t c_expiry; /* jiffies */
+ unsigned long c_expiry; /* jiffies */
spinlock_t c_lock; /* protect capa content */
int c_site;
static inline void set_capa_expiry(struct obd_capa *ocapa)
{
- cfs_time_t expiry = cfs_time_sub((cfs_time_t)ocapa->c_capa.lc_expiry,
+ unsigned long expiry = cfs_time_sub((unsigned long)ocapa->c_capa.lc_expiry,
cfs_time_current_sec());
ocapa->c_expiry = cfs_time_add(cfs_time_current(),
cfs_time_seconds(expiry));
* \see ldlm_namespace_dump. Increased by 10 seconds every time
* it is called.
*/
- cfs_time_t ns_next_dump;
+ unsigned long ns_next_dump;
/** "policy" function that does actual lock conflict determination */
ldlm_res_policy ns_policy;
* Seconds. It will be updated if there is any activity related to
* the lock, e.g. enqueue the lock or send blocking AST.
*/
- cfs_time_t l_last_activity;
+ unsigned long l_last_activity;
/**
* Time last used by e.g. being matched by lock match.
* Jiffies. Should be converted to time if needed.
*/
- cfs_time_t l_last_used;
+ unsigned long l_last_used;
/** Originally requested extent for the extent lock. */
struct ldlm_extent l_req_extent;
* under this lock.
* \see ost_rw_prolong_locks
*/
- cfs_time_t l_callback_timeout;
+ unsigned long l_callback_timeout;
/** Local PID of process which created this lock. */
__u32 l_pid;
void *lr_lvb_data;
/** When the resource was considered as contended. */
- cfs_time_t lr_contention_time;
+ unsigned long lr_contention_time;
/** List of references to this resource. For debugging. */
struct lu_ref lr_reference;
/** Last committed transno for this export */
__u64 exp_last_committed;
/** When was last request received */
- cfs_time_t exp_last_request_time;
+ unsigned long exp_last_request_time;
/** On replay all requests waiting for replay are linked here */
struct list_head exp_req_replay_queue;
/**
enum lustre_sec_part exp_sp_peer;
struct sptlrpc_flavor exp_flvr; /* current */
struct sptlrpc_flavor exp_flvr_old[2]; /* about-to-expire */
- cfs_time_t exp_flvr_expire[2]; /* seconds */
+ unsigned long exp_flvr_expire[2]; /* seconds */
/** protects exp_hp_rpcs */
spinlock_t exp_rpc_lock;
*/
struct ptlrpc_sec *imp_sec;
struct mutex imp_sec_mutex;
- cfs_time_t imp_sec_expire;
+ unsigned long imp_sec_expire;
/** @} */
/** Wait queue for those who need to wait for recovery completion */
*/
struct lustre_handle imp_remote_handle;
/** When to perform next ping. time in jiffies. */
- cfs_time_t imp_next_ping;
+ unsigned long imp_next_ping;
/** When we last successfully connected. time in 64bit jiffies */
__u64 imp_last_success_conn;
/** optional time limit for send attempts */
cfs_duration_t rq_delay_limit;
/** time request was first queued */
- cfs_time_t rq_queued_time;
+ unsigned long rq_queued_time;
/* server-side... */
/** request arrival time */
/** early reply timer */
struct timer_list scp_at_timer;
/** debug */
- cfs_time_t scp_at_checktime;
+ unsigned long scp_at_checktime;
/** check early replies */
unsigned scp_at_check;
/** @} */
atomic_t cc_refcount;
struct ptlrpc_sec *cc_sec;
struct ptlrpc_ctx_ops *cc_ops;
- cfs_time_t cc_expire; /* in seconds */
+ unsigned long cc_expire; /* in seconds */
unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
* garbage collection
*/
struct list_head ps_gc_list;
- cfs_time_t ps_gc_interval; /* in seconds */
- cfs_time_t ps_gc_next; /* in seconds */
+ unsigned long ps_gc_interval; /* in seconds */
+ unsigned long ps_gc_next; /* in seconds */
};
static inline int sec_is_reverse(struct ptlrpc_sec *sec)
struct timeout_item {
enum timeout_event ti_event;
- cfs_time_t ti_timeout;
+ unsigned long ti_timeout;
timeout_cb_t ti_cb;
void *ti_cb_data;
struct list_head ti_obd_list;
* See osc_{reserve|unreserve}_grant for details. */
long cl_reserved_grant;
struct list_head cl_cache_waiters; /* waiting for cache/grant */
- cfs_time_t cl_next_shrink_grant; /* jiffies */
+ unsigned long cl_next_shrink_grant; /* jiffies */
struct list_head cl_grant_shrink_list; /* Timeout event list */
int cl_grant_shrink_interval; /* seconds */
static struct ldlm_state *ldlm_state;
-inline cfs_time_t round_timeout(cfs_time_t timeout)
+inline unsigned long round_timeout(unsigned long timeout)
{
return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
}
struct obd_device *obd;
if (lock->l_conn_export == NULL) {
- static cfs_time_t next_dump = 0, last_dump = 0;
+ static unsigned long next_dump = 0, last_dump = 0;
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
int unused, int added,
int count)
{
- cfs_time_t cur = cfs_time_current();
+ unsigned long cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
__u64 slv, lvf, lv;
- cfs_time_t la;
+ unsigned long la;
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU. */
}
EXPORT_SYMBOL(cfs_timer_done);
-void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *t, unsigned long deadline)
{
mod_timer(t, deadline);
}
}
EXPORT_SYMBOL(cfs_timer_is_armed);
-cfs_time_t cfs_timer_deadline(struct timer_list *t)
+unsigned long cfs_timer_deadline(struct timer_list *t)
{
return t->expires;
}
static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa);
-static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
+static inline void update_capa_timer(struct obd_capa *ocapa, unsigned long expiry)
{
if (cfs_time_before(expiry, ll_capa_timer.expires) ||
!timer_pending(&ll_capa_timer)) {
}
}
-static inline cfs_time_t capa_renewal_time(struct obd_capa *ocapa)
+static inline unsigned long capa_renewal_time(struct obd_capa *ocapa)
{
return cfs_time_sub(ocapa->c_expiry,
cfs_time_seconds(ocapa->c_capa.lc_timeout) / 2);
return ocapa;
}
-static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
+static inline void delay_capa_renew(struct obd_capa *oc, unsigned long delay)
{
/* NB: set a fake expiry for this capa to prevent it renew too soon */
oc->c_expiry = cfs_time_add(oc->c_expiry, cfs_time_seconds(delay));
* capability needs renewal */
atomic_t lli_open_count;
struct obd_capa *lli_mds_capa;
- cfs_time_t lli_rmtperm_time;
+ unsigned long lli_rmtperm_time;
/* handle is to be sent to MDS later on done_writing and setattr.
* Open handle data are needed for the recovery to reconstruct
struct mutex f_write_mutex;
struct rw_semaphore f_glimpse_sem;
- cfs_time_t f_glimpse_time;
+ unsigned long f_glimpse_time;
struct list_head f_agl_list;
__u64 f_agl_index;
struct ptlrpc_request *req = NULL;
struct mdt_remote_perm *perm;
struct obd_capa *oc;
- cfs_time_t save;
+ unsigned long save;
int i = 0, rc;
do {
* True if locking against this stripe got -EUSERS.
*/
int oo_contended;
- cfs_time_t oo_contention_time;
+ unsigned long oo_contention_time;
/**
* List of pages in transfer.
*/
/**
* Submit time - the time when the page is starting RPC. For debugging.
*/
- cfs_time_t ops_submit_time;
+ unsigned long ops_submit_time;
/**
* A lock of which we hold a reference covers this page. Only used by
{
struct osc_device *dev = lu2osc_dev(obj->oo_cl.co_lu.lo_dev);
int osc_contention_time = dev->od_contention_time;
- cfs_time_t cur_time = cfs_time_current();
- cfs_time_t retry_time;
+ unsigned long cur_time = cfs_time_current();
+ unsigned long retry_time;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_OBJECT_CONTENTION))
return 1;
return list_empty(head) ? "-" : "+";
}
-static inline cfs_time_t osc_submit_duration(struct osc_page *opg)
+static inline unsigned long osc_submit_duration(struct osc_page *opg)
{
if (opg->ops_submit_time == 0)
return 0;
static int osc_should_shrink_grant(struct client_obd *client)
{
- cfs_time_t time = cfs_time_current();
- cfs_time_t next_shrink = client->cl_next_shrink_grant;
+ unsigned long time = cfs_time_current();
+ unsigned long next_shrink = client->cl_next_shrink_grant;
if ((client->cl_import->imp_connect_data.ocd_connect_flags &
OBD_CONNECT_GRANT_SHRINK) == 0)
return cfs_time_shift(obd_timeout);
}
-cfs_duration_t pinger_check_timeout(cfs_time_t time)
+cfs_duration_t pinger_check_timeout(unsigned long time)
{
struct timeout_item *item;
- cfs_time_t timeout = PING_INTERVAL;
+ unsigned long timeout = PING_INTERVAL;
/* The timeout list is a increase order sorted list */
mutex_lock(&pinger_mutex);
/* And now, loop forever, pinging as needed. */
while (1) {
- cfs_time_t this_ping = cfs_time_current();
+ unsigned long this_ping = cfs_time_current();
struct l_wait_info lwi;
cfs_duration_t time_to_next_wake;
struct timeout_item *item;
unsigned long epp_st_missings; /* # of cache missing */
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
- cfs_time_t epp_st_max_wait; /* in jiffies */
+ unsigned long epp_st_max_wait; /* in jiffies */
/*
* pointers to pools
*/
{
wait_queue_t waitlink;
unsigned long this_idle = -1;
- cfs_time_t tick = 0;
+ unsigned long tick = 0;
long now;
int p_idx, g_idx;
int i;