return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
}
-/*
- * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
- * for last close replay.
- *
- * All of the above fields are protected by the client_mutex.
- */
-static struct list_head close_lru;
-
/*
* We store the NONE, READ, WRITE, and BOTH bits separately in the
* st_{access,deny}_bmap field of the stateid, in order to track not
}
static void
-move_to_close_lru(struct nfs4_openowner *oo)
+move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
- list_move_tail(&oo->oo_close_lru, &close_lru);
+ list_move_tail(&oo->oo_close_lru, &nn->close_lru);
oo->oo_time = get_seconds();
}
unhash_delegation(dp);
}
test_val = nfsd4_lease;
- list_for_each_safe(pos, next, &close_lru) {
+ list_for_each_safe(pos, next, &nn->close_lru) {
oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
u = oo->oo_time - cutoff;
* little while to handle CLOSE replay.
*/
if (list_empty(&oo->oo_owner.so_stateids))
- move_to_close_lru(oo);
+ move_to_close_lru(oo, SVC_NET(rqstp));
}
}
out:
for (i = 0; i < FILE_HASH_SIZE; i++) {
INIT_LIST_HEAD(&file_hashtbl[i]);
}
- INIT_LIST_HEAD(&close_lru);
INIT_LIST_HEAD(&del_recall_lru);
}
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
INIT_LIST_HEAD(&nn->client_lru);
+ INIT_LIST_HEAD(&nn->close_lru);
return 0;