nfs: make recovery state manager operations privileged
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / nfs / nfs4state.c
... / ...
CommitLineData
1/*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41#include <linux/kernel.h>
42#include <linux/slab.h>
43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
46#include <linux/kthread.h>
47#include <linux/module.h>
48#include <linux/random.h>
49#include <linux/workqueue.h>
50#include <linux/bitops.h>
51
52#include "nfs4_fs.h"
53#include "callback.h"
54#include "delegation.h"
55#include "internal.h"
56
57#define OPENOWNER_POOL_SIZE 8
58
59const nfs4_stateid zero_stateid;
60
61static LIST_HEAD(nfs4_clientid_list);
62
63int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
64{
65 unsigned short port;
66 int status;
67
68 port = nfs_callback_tcpport;
69 if (clp->cl_addr.ss_family == AF_INET6)
70 port = nfs_callback_tcpport6;
71
72 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
73 if (status == 0)
74 status = nfs4_proc_setclientid_confirm(clp, cred);
75 if (status == 0)
76 nfs4_schedule_state_renewal(clp);
77 return status;
78}
79
80struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
81{
82 struct rpc_cred *cred = NULL;
83
84 if (clp->cl_machine_cred != NULL)
85 cred = get_rpccred(clp->cl_machine_cred);
86 return cred;
87}
88
89static void nfs4_clear_machine_cred(struct nfs_client *clp)
90{
91 struct rpc_cred *cred;
92
93 spin_lock(&clp->cl_lock);
94 cred = clp->cl_machine_cred;
95 clp->cl_machine_cred = NULL;
96 spin_unlock(&clp->cl_lock);
97 if (cred != NULL)
98 put_rpccred(cred);
99}
100
101struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
102{
103 struct nfs4_state_owner *sp;
104 struct rb_node *pos;
105 struct rpc_cred *cred = NULL;
106
107 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
108 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
109 if (list_empty(&sp->so_states))
110 continue;
111 cred = get_rpccred(sp->so_cred);
112 break;
113 }
114 return cred;
115}
116
117#if defined(CONFIG_NFS_V4_1)
118
119static int nfs41_setup_state_renewal(struct nfs_client *clp)
120{
121 int status;
122 struct nfs_fsinfo fsinfo;
123
124 status = nfs4_proc_get_lease_time(clp, &fsinfo);
125 if (status == 0) {
126 /* Update lease time and schedule renewal */
127 spin_lock(&clp->cl_lock);
128 clp->cl_lease_time = fsinfo.lease_time * HZ;
129 clp->cl_last_renewal = jiffies;
130 spin_unlock(&clp->cl_lock);
131
132 nfs4_schedule_state_renewal(clp);
133 }
134
135 return status;
136}
137
138static void nfs41_end_drain_session(struct nfs_client *clp,
139 struct nfs4_session *ses)
140{
141 int max_slots;
142
143 if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
144 spin_lock(&ses->fc_slot_table.slot_tbl_lock);
145 max_slots = ses->fc_slot_table.max_slots;
146 while (max_slots--) {
147 struct rpc_task *task;
148
149 task = rpc_wake_up_next(&ses->fc_slot_table.
150 slot_tbl_waitq);
151 if (!task)
152 break;
153 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
154 }
155 spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
156 }
157}
158
159static int nfs41_begin_drain_session(struct nfs_client *clp,
160 struct nfs4_session *ses)
161{
162 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
163
164 spin_lock(&tbl->slot_tbl_lock);
165 set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
166 if (tbl->highest_used_slotid != -1) {
167 INIT_COMPLETION(ses->complete);
168 spin_unlock(&tbl->slot_tbl_lock);
169 return wait_for_completion_interruptible(&ses->complete);
170 }
171 spin_unlock(&tbl->slot_tbl_lock);
172 return 0;
173}
174
175int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
176{
177 int status;
178
179 status = nfs41_begin_drain_session(clp, clp->cl_session);
180 if (status != 0)
181 goto out;
182 status = nfs4_proc_exchange_id(clp, cred);
183 if (status != 0)
184 goto out;
185 status = nfs4_proc_create_session(clp);
186 if (status != 0)
187 goto out;
188 nfs41_end_drain_session(clp, clp->cl_session);
189 nfs41_setup_state_renewal(clp);
190 nfs_mark_client_ready(clp, NFS_CS_READY);
191out:
192 return status;
193}
194
195struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
196{
197 struct rpc_cred *cred;
198
199 spin_lock(&clp->cl_lock);
200 cred = nfs4_get_machine_cred_locked(clp);
201 spin_unlock(&clp->cl_lock);
202 return cred;
203}
204
205#endif /* CONFIG_NFS_V4_1 */
206
207struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
208{
209 struct nfs4_state_owner *sp;
210 struct rb_node *pos;
211 struct rpc_cred *cred;
212
213 spin_lock(&clp->cl_lock);
214 cred = nfs4_get_machine_cred_locked(clp);
215 if (cred != NULL)
216 goto out;
217 pos = rb_first(&clp->cl_state_owners);
218 if (pos != NULL) {
219 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
220 cred = get_rpccred(sp->so_cred);
221 }
222out:
223 spin_unlock(&clp->cl_lock);
224 return cred;
225}
226
227static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
228 __u64 minval, int maxbits)
229{
230 struct rb_node **p, *parent;
231 struct nfs_unique_id *pos;
232 __u64 mask = ~0ULL;
233
234 if (maxbits < 64)
235 mask = (1ULL << maxbits) - 1ULL;
236
237 /* Ensure distribution is more or less flat */
238 get_random_bytes(&new->id, sizeof(new->id));
239 new->id &= mask;
240 if (new->id < minval)
241 new->id += minval;
242retry:
243 p = &root->rb_node;
244 parent = NULL;
245
246 while (*p != NULL) {
247 parent = *p;
248 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
249
250 if (new->id < pos->id)
251 p = &(*p)->rb_left;
252 else if (new->id > pos->id)
253 p = &(*p)->rb_right;
254 else
255 goto id_exists;
256 }
257 rb_link_node(&new->rb_node, parent, p);
258 rb_insert_color(&new->rb_node, root);
259 return;
260id_exists:
261 for (;;) {
262 new->id++;
263 if (new->id < minval || (new->id & mask) != new->id) {
264 new->id = minval;
265 break;
266 }
267 parent = rb_next(parent);
268 if (parent == NULL)
269 break;
270 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
271 if (new->id < pos->id)
272 break;
273 }
274 goto retry;
275}
276
277static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
278{
279 rb_erase(&id->rb_node, root);
280}
281
282static struct nfs4_state_owner *
283nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
284{
285 struct nfs_client *clp = server->nfs_client;
286 struct rb_node **p = &clp->cl_state_owners.rb_node,
287 *parent = NULL;
288 struct nfs4_state_owner *sp, *res = NULL;
289
290 while (*p != NULL) {
291 parent = *p;
292 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
293
294 if (server < sp->so_server) {
295 p = &parent->rb_left;
296 continue;
297 }
298 if (server > sp->so_server) {
299 p = &parent->rb_right;
300 continue;
301 }
302 if (cred < sp->so_cred)
303 p = &parent->rb_left;
304 else if (cred > sp->so_cred)
305 p = &parent->rb_right;
306 else {
307 atomic_inc(&sp->so_count);
308 res = sp;
309 break;
310 }
311 }
312 return res;
313}
314
315static struct nfs4_state_owner *
316nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
317{
318 struct rb_node **p = &clp->cl_state_owners.rb_node,
319 *parent = NULL;
320 struct nfs4_state_owner *sp;
321
322 while (*p != NULL) {
323 parent = *p;
324 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
325
326 if (new->so_server < sp->so_server) {
327 p = &parent->rb_left;
328 continue;
329 }
330 if (new->so_server > sp->so_server) {
331 p = &parent->rb_right;
332 continue;
333 }
334 if (new->so_cred < sp->so_cred)
335 p = &parent->rb_left;
336 else if (new->so_cred > sp->so_cred)
337 p = &parent->rb_right;
338 else {
339 atomic_inc(&sp->so_count);
340 return sp;
341 }
342 }
343 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
344 rb_link_node(&new->so_client_node, parent, p);
345 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
346 return new;
347}
348
349static void
350nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
351{
352 if (!RB_EMPTY_NODE(&sp->so_client_node))
353 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
354 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
355}
356
357/*
358 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
359 * create a new state_owner.
360 *
361 */
362static struct nfs4_state_owner *
363nfs4_alloc_state_owner(void)
364{
365 struct nfs4_state_owner *sp;
366
367 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
368 if (!sp)
369 return NULL;
370 spin_lock_init(&sp->so_lock);
371 INIT_LIST_HEAD(&sp->so_states);
372 INIT_LIST_HEAD(&sp->so_delegations);
373 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
374 sp->so_seqid.sequence = &sp->so_sequence;
375 spin_lock_init(&sp->so_sequence.lock);
376 INIT_LIST_HEAD(&sp->so_sequence.list);
377 atomic_set(&sp->so_count, 1);
378 return sp;
379}
380
381static void
382nfs4_drop_state_owner(struct nfs4_state_owner *sp)
383{
384 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
385 struct nfs_client *clp = sp->so_client;
386
387 spin_lock(&clp->cl_lock);
388 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
389 RB_CLEAR_NODE(&sp->so_client_node);
390 spin_unlock(&clp->cl_lock);
391 }
392}
393
394struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
395{
396 struct nfs_client *clp = server->nfs_client;
397 struct nfs4_state_owner *sp, *new;
398
399 spin_lock(&clp->cl_lock);
400 sp = nfs4_find_state_owner(server, cred);
401 spin_unlock(&clp->cl_lock);
402 if (sp != NULL)
403 return sp;
404 new = nfs4_alloc_state_owner();
405 if (new == NULL)
406 return NULL;
407 new->so_client = clp;
408 new->so_server = server;
409 new->so_cred = cred;
410 spin_lock(&clp->cl_lock);
411 sp = nfs4_insert_state_owner(clp, new);
412 spin_unlock(&clp->cl_lock);
413 if (sp == new)
414 get_rpccred(cred);
415 else {
416 rpc_destroy_wait_queue(&new->so_sequence.wait);
417 kfree(new);
418 }
419 return sp;
420}
421
422void nfs4_put_state_owner(struct nfs4_state_owner *sp)
423{
424 struct nfs_client *clp = sp->so_client;
425 struct rpc_cred *cred = sp->so_cred;
426
427 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
428 return;
429 nfs4_remove_state_owner(clp, sp);
430 spin_unlock(&clp->cl_lock);
431 rpc_destroy_wait_queue(&sp->so_sequence.wait);
432 put_rpccred(cred);
433 kfree(sp);
434}
435
436static struct nfs4_state *
437nfs4_alloc_open_state(void)
438{
439 struct nfs4_state *state;
440
441 state = kzalloc(sizeof(*state), GFP_KERNEL);
442 if (!state)
443 return NULL;
444 atomic_set(&state->count, 1);
445 INIT_LIST_HEAD(&state->lock_states);
446 spin_lock_init(&state->state_lock);
447 seqlock_init(&state->seqlock);
448 return state;
449}
450
451void
452nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
453{
454 if (state->state == fmode)
455 return;
456 /* NB! List reordering - see the reclaim code for why. */
457 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
458 if (fmode & FMODE_WRITE)
459 list_move(&state->open_states, &state->owner->so_states);
460 else
461 list_move_tail(&state->open_states, &state->owner->so_states);
462 }
463 state->state = fmode;
464}
465
466static struct nfs4_state *
467__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
468{
469 struct nfs_inode *nfsi = NFS_I(inode);
470 struct nfs4_state *state;
471
472 list_for_each_entry(state, &nfsi->open_states, inode_states) {
473 if (state->owner != owner)
474 continue;
475 if (atomic_inc_not_zero(&state->count))
476 return state;
477 }
478 return NULL;
479}
480
481static void
482nfs4_free_open_state(struct nfs4_state *state)
483{
484 kfree(state);
485}
486
487struct nfs4_state *
488nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
489{
490 struct nfs4_state *state, *new;
491 struct nfs_inode *nfsi = NFS_I(inode);
492
493 spin_lock(&inode->i_lock);
494 state = __nfs4_find_state_byowner(inode, owner);
495 spin_unlock(&inode->i_lock);
496 if (state)
497 goto out;
498 new = nfs4_alloc_open_state();
499 spin_lock(&owner->so_lock);
500 spin_lock(&inode->i_lock);
501 state = __nfs4_find_state_byowner(inode, owner);
502 if (state == NULL && new != NULL) {
503 state = new;
504 state->owner = owner;
505 atomic_inc(&owner->so_count);
506 list_add(&state->inode_states, &nfsi->open_states);
507 state->inode = igrab(inode);
508 spin_unlock(&inode->i_lock);
509 /* Note: The reclaim code dictates that we add stateless
510 * and read-only stateids to the end of the list */
511 list_add_tail(&state->open_states, &owner->so_states);
512 spin_unlock(&owner->so_lock);
513 } else {
514 spin_unlock(&inode->i_lock);
515 spin_unlock(&owner->so_lock);
516 if (new)
517 nfs4_free_open_state(new);
518 }
519out:
520 return state;
521}
522
523void nfs4_put_open_state(struct nfs4_state *state)
524{
525 struct inode *inode = state->inode;
526 struct nfs4_state_owner *owner = state->owner;
527
528 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
529 return;
530 spin_lock(&inode->i_lock);
531 list_del(&state->inode_states);
532 list_del(&state->open_states);
533 spin_unlock(&inode->i_lock);
534 spin_unlock(&owner->so_lock);
535 iput(inode);
536 nfs4_free_open_state(state);
537 nfs4_put_state_owner(owner);
538}
539
540/*
541 * Close the current file.
542 */
543static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
544{
545 struct nfs4_state_owner *owner = state->owner;
546 int call_close = 0;
547 fmode_t newstate;
548
549 atomic_inc(&owner->so_count);
550 /* Protect against nfs4_find_state() */
551 spin_lock(&owner->so_lock);
552 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
553 case FMODE_READ:
554 state->n_rdonly--;
555 break;
556 case FMODE_WRITE:
557 state->n_wronly--;
558 break;
559 case FMODE_READ|FMODE_WRITE:
560 state->n_rdwr--;
561 }
562 newstate = FMODE_READ|FMODE_WRITE;
563 if (state->n_rdwr == 0) {
564 if (state->n_rdonly == 0) {
565 newstate &= ~FMODE_READ;
566 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
567 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
568 }
569 if (state->n_wronly == 0) {
570 newstate &= ~FMODE_WRITE;
571 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
572 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
573 }
574 if (newstate == 0)
575 clear_bit(NFS_DELEGATED_STATE, &state->flags);
576 }
577 nfs4_state_set_mode_locked(state, newstate);
578 spin_unlock(&owner->so_lock);
579
580 if (!call_close) {
581 nfs4_put_open_state(state);
582 nfs4_put_state_owner(owner);
583 } else
584 nfs4_do_close(path, state, wait);
585}
586
587void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
588{
589 __nfs4_close(path, state, fmode, 0);
590}
591
592void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
593{
594 __nfs4_close(path, state, fmode, 1);
595}
596
597/*
598 * Search the state->lock_states for an existing lock_owner
599 * that is compatible with current->files
600 */
601static struct nfs4_lock_state *
602__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
603{
604 struct nfs4_lock_state *pos;
605 list_for_each_entry(pos, &state->lock_states, ls_locks) {
606 if (pos->ls_owner != fl_owner)
607 continue;
608 atomic_inc(&pos->ls_count);
609 return pos;
610 }
611 return NULL;
612}
613
614/*
615 * Return a compatible lock_state. If no initialized lock_state structure
616 * exists, return an uninitialized one.
617 *
618 */
619static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
620{
621 struct nfs4_lock_state *lsp;
622 struct nfs_client *clp = state->owner->so_client;
623
624 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
625 if (lsp == NULL)
626 return NULL;
627 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
628 spin_lock_init(&lsp->ls_sequence.lock);
629 INIT_LIST_HEAD(&lsp->ls_sequence.list);
630 lsp->ls_seqid.sequence = &lsp->ls_sequence;
631 atomic_set(&lsp->ls_count, 1);
632 lsp->ls_state = state;
633 lsp->ls_owner = fl_owner;
634 spin_lock(&clp->cl_lock);
635 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
636 spin_unlock(&clp->cl_lock);
637 INIT_LIST_HEAD(&lsp->ls_locks);
638 return lsp;
639}
640
641static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
642{
643 struct nfs_client *clp = lsp->ls_state->owner->so_client;
644
645 spin_lock(&clp->cl_lock);
646 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
647 spin_unlock(&clp->cl_lock);
648 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
649 kfree(lsp);
650}
651
652/*
653 * Return a compatible lock_state. If no initialized lock_state structure
654 * exists, return an uninitialized one.
655 *
656 */
657static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
658{
659 struct nfs4_lock_state *lsp, *new = NULL;
660
661 for(;;) {
662 spin_lock(&state->state_lock);
663 lsp = __nfs4_find_lock_state(state, owner);
664 if (lsp != NULL)
665 break;
666 if (new != NULL) {
667 list_add(&new->ls_locks, &state->lock_states);
668 set_bit(LK_STATE_IN_USE, &state->flags);
669 lsp = new;
670 new = NULL;
671 break;
672 }
673 spin_unlock(&state->state_lock);
674 new = nfs4_alloc_lock_state(state, owner);
675 if (new == NULL)
676 return NULL;
677 }
678 spin_unlock(&state->state_lock);
679 if (new != NULL)
680 nfs4_free_lock_state(new);
681 return lsp;
682}
683
684/*
685 * Release reference to lock_state, and free it if we see that
686 * it is no longer in use
687 */
688void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
689{
690 struct nfs4_state *state;
691
692 if (lsp == NULL)
693 return;
694 state = lsp->ls_state;
695 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
696 return;
697 list_del(&lsp->ls_locks);
698 if (list_empty(&state->lock_states))
699 clear_bit(LK_STATE_IN_USE, &state->flags);
700 spin_unlock(&state->state_lock);
701 nfs4_free_lock_state(lsp);
702}
703
704static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
705{
706 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
707
708 dst->fl_u.nfs4_fl.owner = lsp;
709 atomic_inc(&lsp->ls_count);
710}
711
712static void nfs4_fl_release_lock(struct file_lock *fl)
713{
714 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
715}
716
717static const struct file_lock_operations nfs4_fl_lock_ops = {
718 .fl_copy_lock = nfs4_fl_copy_lock,
719 .fl_release_private = nfs4_fl_release_lock,
720};
721
722int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
723{
724 struct nfs4_lock_state *lsp;
725
726 if (fl->fl_ops != NULL)
727 return 0;
728 lsp = nfs4_get_lock_state(state, fl->fl_owner);
729 if (lsp == NULL)
730 return -ENOMEM;
731 fl->fl_u.nfs4_fl.owner = lsp;
732 fl->fl_ops = &nfs4_fl_lock_ops;
733 return 0;
734}
735
736/*
737 * Byte-range lock aware utility to initialize the stateid of read/write
738 * requests.
739 */
740void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
741{
742 struct nfs4_lock_state *lsp;
743 int seq;
744
745 do {
746 seq = read_seqbegin(&state->seqlock);
747 memcpy(dst, &state->stateid, sizeof(*dst));
748 } while (read_seqretry(&state->seqlock, seq));
749 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
750 return;
751
752 spin_lock(&state->state_lock);
753 lsp = __nfs4_find_lock_state(state, fl_owner);
754 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
755 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
756 spin_unlock(&state->state_lock);
757 nfs4_put_lock_state(lsp);
758}
759
760struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
761{
762 struct nfs_seqid *new;
763
764 new = kmalloc(sizeof(*new), GFP_KERNEL);
765 if (new != NULL) {
766 new->sequence = counter;
767 INIT_LIST_HEAD(&new->list);
768 }
769 return new;
770}
771
772void nfs_free_seqid(struct nfs_seqid *seqid)
773{
774 if (!list_empty(&seqid->list)) {
775 struct rpc_sequence *sequence = seqid->sequence->sequence;
776
777 spin_lock(&sequence->lock);
778 list_del(&seqid->list);
779 spin_unlock(&sequence->lock);
780 rpc_wake_up(&sequence->wait);
781 }
782 kfree(seqid);
783}
784
785/*
786 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
787 * failed with a seqid incrementing error -
788 * see comments nfs_fs.h:seqid_mutating_error()
789 */
790static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
791{
792 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
793 switch (status) {
794 case 0:
795 break;
796 case -NFS4ERR_BAD_SEQID:
797 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
798 return;
799 printk(KERN_WARNING "NFS: v4 server returned a bad"
800 " sequence-id error on an"
801 " unconfirmed sequence %p!\n",
802 seqid->sequence);
803 case -NFS4ERR_STALE_CLIENTID:
804 case -NFS4ERR_STALE_STATEID:
805 case -NFS4ERR_BAD_STATEID:
806 case -NFS4ERR_BADXDR:
807 case -NFS4ERR_RESOURCE:
808 case -NFS4ERR_NOFILEHANDLE:
809 /* Non-seqid mutating errors */
810 return;
811 };
812 /*
813 * Note: no locking needed as we are guaranteed to be first
814 * on the sequence list
815 */
816 seqid->sequence->counter++;
817}
818
819void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
820{
821 struct nfs4_state_owner *sp = container_of(seqid->sequence,
822 struct nfs4_state_owner, so_seqid);
823 struct nfs_server *server = sp->so_server;
824
825 if (status == -NFS4ERR_BAD_SEQID)
826 nfs4_drop_state_owner(sp);
827 if (!nfs4_has_session(server->nfs_client))
828 nfs_increment_seqid(status, seqid);
829}
830
831/*
832 * Increment the seqid if the LOCK/LOCKU succeeded, or
833 * failed with a seqid incrementing error -
834 * see comments nfs_fs.h:seqid_mutating_error()
835 */
836void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
837{
838 nfs_increment_seqid(status, seqid);
839}
840
841int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
842{
843 struct rpc_sequence *sequence = seqid->sequence->sequence;
844 int status = 0;
845
846 spin_lock(&sequence->lock);
847 if (list_empty(&seqid->list))
848 list_add_tail(&seqid->list, &sequence->list);
849 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
850 goto unlock;
851 rpc_sleep_on(&sequence->wait, task, NULL);
852 status = -EAGAIN;
853unlock:
854 spin_unlock(&sequence->lock);
855 return status;
856}
857
858static int nfs4_run_state_manager(void *);
859
860static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
861{
862 smp_mb__before_clear_bit();
863 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
864 smp_mb__after_clear_bit();
865 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
866 rpc_wake_up(&clp->cl_rpcwaitq);
867}
868
869/*
870 * Schedule the nfs_client asynchronous state management routine
871 */
872void nfs4_schedule_state_manager(struct nfs_client *clp)
873{
874 struct task_struct *task;
875
876 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
877 return;
878 __module_get(THIS_MODULE);
879 atomic_inc(&clp->cl_count);
880 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
881 rpc_peeraddr2str(clp->cl_rpcclient,
882 RPC_DISPLAY_ADDR));
883 if (!IS_ERR(task))
884 return;
885 nfs4_clear_state_manager_bit(clp);
886 nfs_put_client(clp);
887 module_put(THIS_MODULE);
888}
889
890/*
891 * Schedule a state recovery attempt
892 */
893void nfs4_schedule_state_recovery(struct nfs_client *clp)
894{
895 if (!clp)
896 return;
897 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
898 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
899 nfs4_schedule_state_manager(clp);
900}
901
902static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
903{
904
905 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
906 /* Don't recover state that expired before the reboot */
907 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
908 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
909 return 0;
910 }
911 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
912 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
913 return 1;
914}
915
916int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
917{
918 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
919 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
920 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
921 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
922 return 1;
923}
924
925static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
926{
927 struct inode *inode = state->inode;
928 struct nfs_inode *nfsi = NFS_I(inode);
929 struct file_lock *fl;
930 int status = 0;
931
932 if (inode->i_flock == NULL)
933 return 0;
934
935 /* Guard against delegation returns and new lock/unlock calls */
936 down_write(&nfsi->rwsem);
937 /* Protect inode->i_flock using the BKL */
938 lock_kernel();
939 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
940 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
941 continue;
942 if (nfs_file_open_context(fl->fl_file)->state != state)
943 continue;
944 unlock_kernel();
945 status = ops->recover_lock(state, fl);
946 switch (status) {
947 case 0:
948 break;
949 case -ESTALE:
950 case -NFS4ERR_ADMIN_REVOKED:
951 case -NFS4ERR_STALE_STATEID:
952 case -NFS4ERR_BAD_STATEID:
953 case -NFS4ERR_EXPIRED:
954 case -NFS4ERR_NO_GRACE:
955 case -NFS4ERR_STALE_CLIENTID:
956 case -NFS4ERR_BADSESSION:
957 case -NFS4ERR_BADSLOT:
958 case -NFS4ERR_BAD_HIGH_SLOT:
959 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
960 goto out;
961 default:
962 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
963 __func__, status);
964 case -ENOMEM:
965 case -NFS4ERR_DENIED:
966 case -NFS4ERR_RECLAIM_BAD:
967 case -NFS4ERR_RECLAIM_CONFLICT:
968 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
969 status = 0;
970 }
971 lock_kernel();
972 }
973 unlock_kernel();
974out:
975 up_write(&nfsi->rwsem);
976 return status;
977}
978
979static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
980{
981 struct nfs4_state *state;
982 struct nfs4_lock_state *lock;
983 int status = 0;
984
985 /* Note: we rely on the sp->so_states list being ordered
986 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
987 * states first.
988 * This is needed to ensure that the server won't give us any
989 * read delegations that we have to return if, say, we are
990 * recovering after a network partition or a reboot from a
991 * server that doesn't support a grace period.
992 */
993restart:
994 spin_lock(&sp->so_lock);
995 list_for_each_entry(state, &sp->so_states, open_states) {
996 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
997 continue;
998 if (state->state == 0)
999 continue;
1000 atomic_inc(&state->count);
1001 spin_unlock(&sp->so_lock);
1002 status = ops->recover_open(sp, state);
1003 if (status >= 0) {
1004 status = nfs4_reclaim_locks(state, ops);
1005 if (status >= 0) {
1006 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1007 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1008 printk("%s: Lock reclaim failed!\n",
1009 __func__);
1010 }
1011 nfs4_put_open_state(state);
1012 goto restart;
1013 }
1014 }
1015 switch (status) {
1016 default:
1017 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
1018 __func__, status);
1019 case -ENOENT:
1020 case -ENOMEM:
1021 case -ESTALE:
1022 /*
1023 * Open state on this file cannot be recovered
1024 * All we can do is revert to using the zero stateid.
1025 */
1026 memset(state->stateid.data, 0,
1027 sizeof(state->stateid.data));
1028 /* Mark the file as being 'closed' */
1029 state->state = 0;
1030 break;
1031 case -NFS4ERR_ADMIN_REVOKED:
1032 case -NFS4ERR_STALE_STATEID:
1033 case -NFS4ERR_BAD_STATEID:
1034 case -NFS4ERR_RECLAIM_BAD:
1035 case -NFS4ERR_RECLAIM_CONFLICT:
1036 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1037 break;
1038 case -NFS4ERR_EXPIRED:
1039 case -NFS4ERR_NO_GRACE:
1040 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
1041 case -NFS4ERR_STALE_CLIENTID:
1042 case -NFS4ERR_BADSESSION:
1043 case -NFS4ERR_BADSLOT:
1044 case -NFS4ERR_BAD_HIGH_SLOT:
1045 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1046 goto out_err;
1047 }
1048 nfs4_put_open_state(state);
1049 goto restart;
1050 }
1051 spin_unlock(&sp->so_lock);
1052 return 0;
1053out_err:
1054 nfs4_put_open_state(state);
1055 return status;
1056}
1057
1058static void nfs4_clear_open_state(struct nfs4_state *state)
1059{
1060 struct nfs4_lock_state *lock;
1061
1062 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1063 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1064 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1065 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1066 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1067 lock->ls_seqid.flags = 0;
1068 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
1069 }
1070}
1071
1072static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1073{
1074 struct nfs4_state_owner *sp;
1075 struct rb_node *pos;
1076 struct nfs4_state *state;
1077
1078 /* Reset all sequence ids to zero */
1079 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1080 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1081 sp->so_seqid.flags = 0;
1082 spin_lock(&sp->so_lock);
1083 list_for_each_entry(state, &sp->so_states, open_states) {
1084 if (mark_reclaim(clp, state))
1085 nfs4_clear_open_state(state);
1086 }
1087 spin_unlock(&sp->so_lock);
1088 }
1089}
1090
1091static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
1092{
1093 /* Mark all delegations for reclaim */
1094 nfs_delegation_mark_reclaim(clp);
1095 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
1096}
1097
1098static void nfs4_reclaim_complete(struct nfs_client *clp,
1099 const struct nfs4_state_recovery_ops *ops)
1100{
1101 /* Notify the server we're done reclaiming our state */
1102 if (ops->reclaim_complete)
1103 (void)ops->reclaim_complete(clp);
1104}
1105
1106static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1107{
1108 struct nfs4_state_owner *sp;
1109 struct rb_node *pos;
1110 struct nfs4_state *state;
1111
1112 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1113 return;
1114
1115 nfs4_reclaim_complete(clp,
1116 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1117
1118 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1119 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1120 spin_lock(&sp->so_lock);
1121 list_for_each_entry(state, &sp->so_states, open_states) {
1122 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1123 continue;
1124 nfs4_state_mark_reclaim_nograce(clp, state);
1125 }
1126 spin_unlock(&sp->so_lock);
1127 }
1128
1129 nfs_delegation_reap_unclaimed(clp);
1130}
1131
1132static void nfs_delegation_clear_all(struct nfs_client *clp)
1133{
1134 nfs_delegation_mark_reclaim(clp);
1135 nfs_delegation_reap_unclaimed(clp);
1136}
1137
1138static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1139{
1140 nfs_delegation_clear_all(clp);
1141 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1142}
1143
1144static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1145{
1146 switch (error) {
1147 case -NFS4ERR_CB_PATH_DOWN:
1148 nfs_handle_cb_pathdown(clp);
1149 return 0;
1150 case -NFS4ERR_NO_GRACE:
1151 nfs4_state_end_reclaim_reboot(clp);
1152 return 0;
1153 case -NFS4ERR_STALE_CLIENTID:
1154 case -NFS4ERR_LEASE_MOVED:
1155 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1156 nfs4_state_end_reclaim_reboot(clp);
1157 nfs4_state_start_reclaim_reboot(clp);
1158 break;
1159 case -NFS4ERR_EXPIRED:
1160 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1161 nfs4_state_start_reclaim_nograce(clp);
1162 break;
1163 case -NFS4ERR_BADSESSION:
1164 case -NFS4ERR_BADSLOT:
1165 case -NFS4ERR_BAD_HIGH_SLOT:
1166 case -NFS4ERR_DEADSESSION:
1167 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1168 case -NFS4ERR_SEQ_FALSE_RETRY:
1169 case -NFS4ERR_SEQ_MISORDERED:
1170 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1171 /* Zero session reset errors */
1172 return 0;
1173 }
1174 return error;
1175}
1176
1177static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1178{
1179 struct rb_node *pos;
1180 int status = 0;
1181
1182restart:
1183 spin_lock(&clp->cl_lock);
1184 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1185 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1186 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1187 continue;
1188 atomic_inc(&sp->so_count);
1189 spin_unlock(&clp->cl_lock);
1190 status = nfs4_reclaim_open_state(sp, ops);
1191 if (status < 0) {
1192 set_bit(ops->owner_flag_bit, &sp->so_flags);
1193 nfs4_put_state_owner(sp);
1194 return nfs4_recovery_handle_error(clp, status);
1195 }
1196 nfs4_put_state_owner(sp);
1197 goto restart;
1198 }
1199 spin_unlock(&clp->cl_lock);
1200 return status;
1201}
1202
1203static int nfs4_check_lease(struct nfs_client *clp)
1204{
1205 struct rpc_cred *cred;
1206 struct nfs4_state_maintenance_ops *ops =
1207 nfs4_state_renewal_ops[clp->cl_minorversion];
1208 int status = -NFS4ERR_EXPIRED;
1209
1210 /* Is the client already known to have an expired lease? */
1211 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1212 return 0;
1213 spin_lock(&clp->cl_lock);
1214 cred = ops->get_state_renewal_cred_locked(clp);
1215 spin_unlock(&clp->cl_lock);
1216 if (cred == NULL) {
1217 cred = nfs4_get_setclientid_cred(clp);
1218 if (cred == NULL)
1219 goto out;
1220 }
1221 status = ops->renew_lease(clp, cred);
1222 put_rpccred(cred);
1223out:
1224 return nfs4_recovery_handle_error(clp, status);
1225}
1226
1227static int nfs4_reclaim_lease(struct nfs_client *clp)
1228{
1229 struct rpc_cred *cred;
1230 struct nfs4_state_recovery_ops *ops =
1231 nfs4_reboot_recovery_ops[clp->cl_minorversion];
1232 int status = -ENOENT;
1233
1234 cred = ops->get_clid_cred(clp);
1235 if (cred != NULL) {
1236 status = ops->establish_clid(clp, cred);
1237 put_rpccred(cred);
1238 /* Handle case where the user hasn't set up machine creds */
1239 if (status == -EACCES && cred == clp->cl_machine_cred) {
1240 nfs4_clear_machine_cred(clp);
1241 status = -EAGAIN;
1242 }
1243 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1244 status = -EPROTONOSUPPORT;
1245 }
1246 return status;
1247}
1248
1249#ifdef CONFIG_NFS_V4_1
1250void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1251{
1252 if (!flags)
1253 return;
1254 else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) {
1255 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1256 nfs4_state_start_reclaim_reboot(clp);
1257 nfs4_schedule_state_recovery(clp);
1258 } else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
1259 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
1260 SEQ4_STATUS_ADMIN_STATE_REVOKED |
1261 SEQ4_STATUS_RECALLABLE_STATE_REVOKED |
1262 SEQ4_STATUS_LEASE_MOVED)) {
1263 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1264 nfs4_state_start_reclaim_nograce(clp);
1265 nfs4_schedule_state_recovery(clp);
1266 } else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1267 SEQ4_STATUS_BACKCHANNEL_FAULT |
1268 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1269 nfs_expire_all_delegations(clp);
1270}
1271
1272static int nfs4_reset_session(struct nfs_client *clp)
1273{
1274 struct nfs4_session *ses = clp->cl_session;
1275 int status;
1276
1277 status = nfs41_begin_drain_session(clp, ses);
1278 if (status != 0)
1279 return status;
1280
1281 status = nfs4_proc_destroy_session(clp->cl_session);
1282 if (status && status != -NFS4ERR_BADSESSION &&
1283 status != -NFS4ERR_DEADSESSION) {
1284 status = nfs4_recovery_handle_error(clp, status);
1285 goto out;
1286 }
1287
1288 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1289 status = nfs4_proc_create_session(clp);
1290 if (status)
1291 status = nfs4_recovery_handle_error(clp, status);
1292
1293out:
1294 /*
1295 * Let the state manager reestablish state
1296 * without waking other tasks yet.
1297 */
1298 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1299 /* Wake up the next rpc task */
1300 nfs41_end_drain_session(clp, ses);
1301 if (status == 0)
1302 nfs41_setup_state_renewal(clp);
1303 }
1304 return status;
1305}
1306
1307#else /* CONFIG_NFS_V4_1 */
1308static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1309#endif /* CONFIG_NFS_V4_1 */
1310
1311/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1312 * on EXCHANGE_ID for v4.1
1313 */
1314static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1315{
1316 if (nfs4_has_session(clp)) {
1317 switch (status) {
1318 case -NFS4ERR_DELAY:
1319 case -NFS4ERR_CLID_INUSE:
1320 case -EAGAIN:
1321 break;
1322
1323 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1324 * in nfs4_exchange_id */
1325 default:
1326 return;
1327 }
1328 }
1329 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1330}
1331
1332static void nfs4_state_manager(struct nfs_client *clp)
1333{
1334 int status = 0;
1335
1336 /* Ensure exclusive access to NFSv4 state */
1337 for(;;) {
1338 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1339 /* We're going to have to re-establish a clientid */
1340 status = nfs4_reclaim_lease(clp);
1341 if (status) {
1342 nfs4_set_lease_expired(clp, status);
1343 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1344 &clp->cl_state))
1345 continue;
1346 if (clp->cl_cons_state ==
1347 NFS_CS_SESSION_INITING)
1348 nfs_mark_client_ready(clp, status);
1349 goto out_error;
1350 }
1351 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1352 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
1353 }
1354
1355 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1356 status = nfs4_check_lease(clp);
1357 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1358 continue;
1359 if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
1360 goto out_error;
1361 }
1362
1363 /* Initialize or reset the session */
1364 if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
1365 && nfs4_has_session(clp)) {
1366 status = nfs4_reset_session(clp);
1367 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1368 continue;
1369 if (status < 0)
1370 goto out_error;
1371 }
1372
1373 /* First recover reboot state... */
1374 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1375 status = nfs4_do_reclaim(clp,
1376 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1377 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1378 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1379 continue;
1380 nfs4_state_end_reclaim_reboot(clp);
1381 if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
1382 continue;
1383 if (status < 0)
1384 goto out_error;
1385 }
1386
1387 /* Now recover expired state... */
1388 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1389 status = nfs4_do_reclaim(clp,
1390 nfs4_nograce_recovery_ops[clp->cl_minorversion]);
1391 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1392 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1393 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1394 continue;
1395 if (status < 0)
1396 goto out_error;
1397 }
1398
1399 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1400 nfs_client_return_marked_delegations(clp);
1401 continue;
1402 }
1403
1404 nfs4_clear_state_manager_bit(clp);
1405 /* Did we race with an attempt to give us more work? */
1406 if (clp->cl_state == 0)
1407 break;
1408 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1409 break;
1410 }
1411 return;
1412out_error:
1413 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1414 " with error %d\n", clp->cl_hostname, -status);
1415 nfs4_clear_state_manager_bit(clp);
1416}
1417
1418static int nfs4_run_state_manager(void *ptr)
1419{
1420 struct nfs_client *clp = ptr;
1421
1422 allow_signal(SIGKILL);
1423 nfs4_state_manager(clp);
1424 nfs_put_client(clp);
1425 module_put_and_exit(0);
1426 return 0;
1427}
1428
1429/*
1430 * Local variables:
1431 * c-basic-offset: 8
1432 * End:
1433 */