NFSv4: Return any delegations before sillyrenaming the file
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / nfs / nfs4state.c
CommitLineData
1da177e4
LT
1/*
2 * fs/nfs/nfs4state.c
3 *
4 * Client-side XDR for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
38 * subsequent patch.
39 */
40
41#include <linux/config.h>
42#include <linux/slab.h>
43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h>
46#include <linux/workqueue.h>
47#include <linux/bitops.h>
48
4ce79717 49#include "nfs4_fs.h"
1da177e4
LT
50#include "callback.h"
51#include "delegation.h"
52
53#define OPENOWNER_POOL_SIZE 8
54
4ce79717 55const nfs4_stateid zero_stateid;
1da177e4 56
4ce79717 57static DEFINE_SPINLOCK(state_spinlock);
1da177e4
LT
58static LIST_HEAD(nfs4_clientid_list);
59
60static void nfs4_recover_state(void *);
1da177e4
LT
61
62void
63init_nfsv4_state(struct nfs_server *server)
64{
65 server->nfs4_state = NULL;
66 INIT_LIST_HEAD(&server->nfs4_siblings);
67}
68
69void
70destroy_nfsv4_state(struct nfs_server *server)
71{
72 if (server->mnt_path) {
73 kfree(server->mnt_path);
74 server->mnt_path = NULL;
75 }
76 if (server->nfs4_state) {
77 nfs4_put_client(server->nfs4_state);
78 server->nfs4_state = NULL;
79 }
80}
81
82/*
83 * nfs4_get_client(): returns an empty client structure
84 * nfs4_put_client(): drops reference to client structure
85 *
86 * Since these are allocated/deallocated very rarely, we don't
87 * bother putting them in a slab cache...
88 */
89static struct nfs4_client *
90nfs4_alloc_client(struct in_addr *addr)
91{
92 struct nfs4_client *clp;
93
94 if (nfs_callback_up() < 0)
95 return NULL;
96 if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
97 nfs_callback_down();
98 return NULL;
99 }
100 memset(clp, 0, sizeof(*clp));
101 memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
102 init_rwsem(&clp->cl_sem);
103 INIT_LIST_HEAD(&clp->cl_delegations);
104 INIT_LIST_HEAD(&clp->cl_state_owners);
105 INIT_LIST_HEAD(&clp->cl_unused);
106 spin_lock_init(&clp->cl_lock);
107 atomic_set(&clp->cl_count, 1);
108 INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
109 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
110 INIT_LIST_HEAD(&clp->cl_superblocks);
111 init_waitqueue_head(&clp->cl_waitq);
112 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
6a19275a 113 clp->cl_rpcclient = ERR_PTR(-EINVAL);
1da177e4
LT
114 clp->cl_boot_time = CURRENT_TIME;
115 clp->cl_state = 1 << NFS4CLNT_OK;
116 return clp;
117}
118
119static void
120nfs4_free_client(struct nfs4_client *clp)
121{
122 struct nfs4_state_owner *sp;
123
124 while (!list_empty(&clp->cl_unused)) {
125 sp = list_entry(clp->cl_unused.next,
126 struct nfs4_state_owner,
127 so_list);
128 list_del(&sp->so_list);
129 kfree(sp);
130 }
131 BUG_ON(!list_empty(&clp->cl_state_owners));
132 if (clp->cl_cred)
133 put_rpccred(clp->cl_cred);
134 nfs_idmap_delete(clp);
6a19275a 135 if (!IS_ERR(clp->cl_rpcclient))
1da177e4
LT
136 rpc_shutdown_client(clp->cl_rpcclient);
137 kfree(clp);
138 nfs_callback_down();
139}
140
141static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
142{
143 struct nfs4_client *clp;
144 list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
145 if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
146 atomic_inc(&clp->cl_count);
147 return clp;
148 }
149 }
150 return NULL;
151}
152
153struct nfs4_client *nfs4_find_client(struct in_addr *addr)
154{
155 struct nfs4_client *clp;
156 spin_lock(&state_spinlock);
157 clp = __nfs4_find_client(addr);
158 spin_unlock(&state_spinlock);
159 return clp;
160}
161
162struct nfs4_client *
163nfs4_get_client(struct in_addr *addr)
164{
165 struct nfs4_client *clp, *new = NULL;
166
167 spin_lock(&state_spinlock);
168 for (;;) {
169 clp = __nfs4_find_client(addr);
170 if (clp != NULL)
171 break;
172 clp = new;
173 if (clp != NULL) {
174 list_add(&clp->cl_servers, &nfs4_clientid_list);
175 new = NULL;
176 break;
177 }
178 spin_unlock(&state_spinlock);
179 new = nfs4_alloc_client(addr);
180 spin_lock(&state_spinlock);
181 if (new == NULL)
182 break;
183 }
184 spin_unlock(&state_spinlock);
185 if (new)
186 nfs4_free_client(new);
187 return clp;
188}
189
190void
191nfs4_put_client(struct nfs4_client *clp)
192{
193 if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
194 return;
195 list_del(&clp->cl_servers);
196 spin_unlock(&state_spinlock);
197 BUG_ON(!list_empty(&clp->cl_superblocks));
198 wake_up_all(&clp->cl_waitq);
199 rpc_wake_up(&clp->cl_rpcwaitq);
200 nfs4_kill_renewd(clp);
201 nfs4_free_client(clp);
202}
203
204static int __nfs4_init_client(struct nfs4_client *clp)
205{
206 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
207 if (status == 0)
208 status = nfs4_proc_setclientid_confirm(clp);
209 if (status == 0)
210 nfs4_schedule_state_renewal(clp);
211 return status;
212}
213
214int nfs4_init_client(struct nfs4_client *clp)
215{
216 return nfs4_map_errors(__nfs4_init_client(clp));
217}
218
219u32
220nfs4_alloc_lockowner_id(struct nfs4_client *clp)
221{
222 return clp->cl_lockowner_id ++;
223}
224
225static struct nfs4_state_owner *
226nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
227{
228 struct nfs4_state_owner *sp = NULL;
229
230 if (!list_empty(&clp->cl_unused)) {
231 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
232 atomic_inc(&sp->so_count);
233 sp->so_cred = cred;
234 list_move(&sp->so_list, &clp->cl_state_owners);
235 clp->cl_nunused--;
236 }
237 return sp;
238}
239
240static struct nfs4_state_owner *
241nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
242{
243 struct nfs4_state_owner *sp, *res = NULL;
244
245 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
246 if (sp->so_cred != cred)
247 continue;
248 atomic_inc(&sp->so_count);
249 /* Move to the head of the list */
250 list_move(&sp->so_list, &clp->cl_state_owners);
251 res = sp;
252 break;
253 }
254 return res;
255}
256
257/*
258 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
259 * create a new state_owner.
260 *
261 */
262static struct nfs4_state_owner *
263nfs4_alloc_state_owner(void)
264{
265 struct nfs4_state_owner *sp;
266
cee54fc9 267 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
1da177e4
LT
268 if (!sp)
269 return NULL;
ec073428 270 spin_lock_init(&sp->so_lock);
1da177e4
LT
271 INIT_LIST_HEAD(&sp->so_states);
272 INIT_LIST_HEAD(&sp->so_delegations);
cee54fc9
TM
273 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
274 sp->so_seqid.sequence = &sp->so_sequence;
275 spin_lock_init(&sp->so_sequence.lock);
276 INIT_LIST_HEAD(&sp->so_sequence.list);
1da177e4
LT
277 atomic_set(&sp->so_count, 1);
278 return sp;
279}
280
281void
282nfs4_drop_state_owner(struct nfs4_state_owner *sp)
283{
284 struct nfs4_client *clp = sp->so_client;
285 spin_lock(&clp->cl_lock);
286 list_del_init(&sp->so_list);
287 spin_unlock(&clp->cl_lock);
288}
289
290/*
291 * Note: must be called with clp->cl_sem held in order to prevent races
292 * with reboot recovery!
293 */
294struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
295{
296 struct nfs4_client *clp = server->nfs4_state;
297 struct nfs4_state_owner *sp, *new;
298
299 get_rpccred(cred);
300 new = nfs4_alloc_state_owner();
301 spin_lock(&clp->cl_lock);
302 sp = nfs4_find_state_owner(clp, cred);
303 if (sp == NULL)
304 sp = nfs4_client_grab_unused(clp, cred);
305 if (sp == NULL && new != NULL) {
306 list_add(&new->so_list, &clp->cl_state_owners);
307 new->so_client = clp;
308 new->so_id = nfs4_alloc_lockowner_id(clp);
309 new->so_cred = cred;
310 sp = new;
311 new = NULL;
312 }
313 spin_unlock(&clp->cl_lock);
314 if (new)
315 kfree(new);
316 if (sp != NULL)
317 return sp;
318 put_rpccred(cred);
319 return NULL;
320}
321
322/*
323 * Must be called with clp->cl_sem held in order to avoid races
324 * with state recovery...
325 */
326void nfs4_put_state_owner(struct nfs4_state_owner *sp)
327{
328 struct nfs4_client *clp = sp->so_client;
329 struct rpc_cred *cred = sp->so_cred;
330
331 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
332 return;
333 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
334 goto out_free;
335 if (list_empty(&sp->so_list))
336 goto out_free;
337 list_move(&sp->so_list, &clp->cl_unused);
338 clp->cl_nunused++;
339 spin_unlock(&clp->cl_lock);
340 put_rpccred(cred);
341 cred = NULL;
342 return;
343out_free:
344 list_del(&sp->so_list);
345 spin_unlock(&clp->cl_lock);
346 put_rpccred(cred);
347 kfree(sp);
348}
349
350static struct nfs4_state *
351nfs4_alloc_open_state(void)
352{
353 struct nfs4_state *state;
354
355 state = kmalloc(sizeof(*state), GFP_KERNEL);
356 if (!state)
357 return NULL;
358 state->state = 0;
359 state->nreaders = 0;
360 state->nwriters = 0;
361 state->flags = 0;
362 memset(state->stateid.data, 0, sizeof(state->stateid.data));
363 atomic_set(&state->count, 1);
364 INIT_LIST_HEAD(&state->lock_states);
8d0a8a9d 365 spin_lock_init(&state->state_lock);
1da177e4
LT
366 return state;
367}
368
4cecb76f
TM
369void
370nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
371{
372 if (state->state == mode)
373 return;
374 /* NB! List reordering - see the reclaim code for why. */
375 if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
376 if (mode & FMODE_WRITE)
377 list_move(&state->open_states, &state->owner->so_states);
378 else
379 list_move_tail(&state->open_states, &state->owner->so_states);
380 }
381 if (mode == 0)
382 list_del_init(&state->inode_states);
383 state->state = mode;
384}
385
1da177e4
LT
386static struct nfs4_state *
387__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
388{
389 struct nfs_inode *nfsi = NFS_I(inode);
390 struct nfs4_state *state;
391
392 list_for_each_entry(state, &nfsi->open_states, inode_states) {
393 /* Is this in the process of being freed? */
4cecb76f 394 if (state->state == 0)
1da177e4
LT
395 continue;
396 if (state->owner == owner) {
397 atomic_inc(&state->count);
398 return state;
399 }
400 }
401 return NULL;
402}
403
1da177e4
LT
404static void
405nfs4_free_open_state(struct nfs4_state *state)
406{
407 kfree(state);
408}
409
410struct nfs4_state *
411nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
412{
413 struct nfs4_state *state, *new;
414 struct nfs_inode *nfsi = NFS_I(inode);
415
416 spin_lock(&inode->i_lock);
417 state = __nfs4_find_state_byowner(inode, owner);
418 spin_unlock(&inode->i_lock);
419 if (state)
420 goto out;
421 new = nfs4_alloc_open_state();
ec073428 422 spin_lock(&owner->so_lock);
1da177e4
LT
423 spin_lock(&inode->i_lock);
424 state = __nfs4_find_state_byowner(inode, owner);
425 if (state == NULL && new != NULL) {
426 state = new;
1da177e4
LT
427 state->owner = owner;
428 atomic_inc(&owner->so_count);
429 list_add(&state->inode_states, &nfsi->open_states);
430 state->inode = igrab(inode);
431 spin_unlock(&inode->i_lock);
ec073428
TM
432 /* Note: The reclaim code dictates that we add stateless
433 * and read-only stateids to the end of the list */
434 list_add_tail(&state->open_states, &owner->so_states);
435 spin_unlock(&owner->so_lock);
1da177e4
LT
436 } else {
437 spin_unlock(&inode->i_lock);
ec073428 438 spin_unlock(&owner->so_lock);
1da177e4
LT
439 if (new)
440 nfs4_free_open_state(new);
441 }
442out:
443 return state;
444}
445
446/*
447 * Beware! Caller must be holding exactly one
e6dfa553 448 * reference to clp->cl_sem!
1da177e4
LT
449 */
450void nfs4_put_open_state(struct nfs4_state *state)
451{
452 struct inode *inode = state->inode;
453 struct nfs4_state_owner *owner = state->owner;
454
ec073428 455 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
1da177e4 456 return;
ec073428 457 spin_lock(&inode->i_lock);
1da177e4
LT
458 if (!list_empty(&state->inode_states))
459 list_del(&state->inode_states);
1da177e4 460 list_del(&state->open_states);
ec073428
TM
461 spin_unlock(&inode->i_lock);
462 spin_unlock(&owner->so_lock);
1da177e4 463 iput(inode);
1da177e4
LT
464 nfs4_free_open_state(state);
465 nfs4_put_state_owner(owner);
466}
467
468/*
83c9d41e 469 * Close the current file.
1da177e4
LT
470 */
471void nfs4_close_state(struct nfs4_state *state, mode_t mode)
472{
473 struct inode *inode = state->inode;
474 struct nfs4_state_owner *owner = state->owner;
4cecb76f 475 int oldstate, newstate = 0;
1da177e4
LT
476
477 atomic_inc(&owner->so_count);
1da177e4 478 /* Protect against nfs4_find_state() */
ec073428 479 spin_lock(&owner->so_lock);
1da177e4
LT
480 spin_lock(&inode->i_lock);
481 if (mode & FMODE_READ)
482 state->nreaders--;
483 if (mode & FMODE_WRITE)
484 state->nwriters--;
4cecb76f
TM
485 oldstate = newstate = state->state;
486 if (state->nreaders == 0)
487 newstate &= ~FMODE_READ;
488 if (state->nwriters == 0)
489 newstate &= ~FMODE_WRITE;
490 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
491 nfs4_state_set_mode_locked(state, newstate);
492 oldstate = newstate;
1da177e4
LT
493 }
494 spin_unlock(&inode->i_lock);
ec073428 495 spin_unlock(&owner->so_lock);
4cecb76f
TM
496
497 if (oldstate != newstate && nfs4_do_close(inode, state) == 0)
498 return;
1da177e4 499 nfs4_put_open_state(state);
1da177e4 500 nfs4_put_state_owner(owner);
1da177e4
LT
501}
502
503/*
504 * Search the state->lock_states for an existing lock_owner
505 * that is compatible with current->files
506 */
507static struct nfs4_lock_state *
508__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
509{
510 struct nfs4_lock_state *pos;
511 list_for_each_entry(pos, &state->lock_states, ls_locks) {
512 if (pos->ls_owner != fl_owner)
513 continue;
514 atomic_inc(&pos->ls_count);
515 return pos;
516 }
517 return NULL;
518}
519
1da177e4
LT
520/*
521 * Return a compatible lock_state. If no initialized lock_state structure
522 * exists, return an uninitialized one.
523 *
1da177e4
LT
524 */
525static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
526{
527 struct nfs4_lock_state *lsp;
528 struct nfs4_client *clp = state->owner->so_client;
529
cee54fc9 530 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
1da177e4
LT
531 if (lsp == NULL)
532 return NULL;
cee54fc9 533 lsp->ls_seqid.sequence = &state->owner->so_sequence;
1da177e4
LT
534 atomic_set(&lsp->ls_count, 1);
535 lsp->ls_owner = fl_owner;
1da177e4
LT
536 spin_lock(&clp->cl_lock);
537 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
538 spin_unlock(&clp->cl_lock);
8d0a8a9d 539 INIT_LIST_HEAD(&lsp->ls_locks);
1da177e4
LT
540 return lsp;
541}
542
543/*
544 * Return a compatible lock_state. If no initialized lock_state structure
545 * exists, return an uninitialized one.
546 *
e6dfa553 547 * The caller must be holding clp->cl_sem
1da177e4 548 */
8d0a8a9d 549static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
1da177e4 550{
8d0a8a9d 551 struct nfs4_lock_state *lsp, *new = NULL;
1da177e4 552
8d0a8a9d
TM
553 for(;;) {
554 spin_lock(&state->state_lock);
555 lsp = __nfs4_find_lock_state(state, owner);
556 if (lsp != NULL)
557 break;
558 if (new != NULL) {
559 new->ls_state = state;
560 list_add(&new->ls_locks, &state->lock_states);
561 set_bit(LK_STATE_IN_USE, &state->flags);
562 lsp = new;
563 new = NULL;
564 break;
565 }
566 spin_unlock(&state->state_lock);
567 new = nfs4_alloc_lock_state(state, owner);
568 if (new == NULL)
569 return NULL;
570 }
571 spin_unlock(&state->state_lock);
572 kfree(new);
1da177e4
LT
573 return lsp;
574}
575
576/*
8d0a8a9d
TM
577 * Release reference to lock_state, and free it if we see that
578 * it is no longer in use
1da177e4 579 */
faf5f49c 580void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
1da177e4 581{
8d0a8a9d 582 struct nfs4_state *state;
1da177e4 583
8d0a8a9d
TM
584 if (lsp == NULL)
585 return;
586 state = lsp->ls_state;
587 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
588 return;
589 list_del(&lsp->ls_locks);
590 if (list_empty(&state->lock_states))
591 clear_bit(LK_STATE_IN_USE, &state->flags);
592 spin_unlock(&state->state_lock);
593 kfree(lsp);
1da177e4
LT
594}
595
8d0a8a9d 596static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
1da177e4 597{
8d0a8a9d 598 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
1da177e4 599
8d0a8a9d
TM
600 dst->fl_u.nfs4_fl.owner = lsp;
601 atomic_inc(&lsp->ls_count);
602}
1da177e4 603
8d0a8a9d 604static void nfs4_fl_release_lock(struct file_lock *fl)
1da177e4 605{
8d0a8a9d 606 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
1da177e4
LT
607}
608
8d0a8a9d
TM
609static struct file_lock_operations nfs4_fl_lock_ops = {
610 .fl_copy_lock = nfs4_fl_copy_lock,
611 .fl_release_private = nfs4_fl_release_lock,
612};
613
614int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
1da177e4 615{
8d0a8a9d
TM
616 struct nfs4_lock_state *lsp;
617
618 if (fl->fl_ops != NULL)
619 return 0;
620 lsp = nfs4_get_lock_state(state, fl->fl_owner);
621 if (lsp == NULL)
622 return -ENOMEM;
623 fl->fl_u.nfs4_fl.owner = lsp;
624 fl->fl_ops = &nfs4_fl_lock_ops;
625 return 0;
1da177e4
LT
626}
627
8d0a8a9d
TM
628/*
629 * Byte-range lock aware utility to initialize the stateid of read/write
630 * requests.
1da177e4 631 */
8d0a8a9d 632void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
1da177e4 633{
8d0a8a9d 634 struct nfs4_lock_state *lsp;
1da177e4 635
8d0a8a9d
TM
636 memcpy(dst, &state->stateid, sizeof(*dst));
637 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
638 return;
1da177e4 639
8d0a8a9d
TM
640 spin_lock(&state->state_lock);
641 lsp = __nfs4_find_lock_state(state, fl_owner);
642 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
643 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
644 spin_unlock(&state->state_lock);
1da177e4
LT
645 nfs4_put_lock_state(lsp);
646}
647
cee54fc9
TM
648struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
649{
cee54fc9
TM
650 struct nfs_seqid *new;
651
652 new = kmalloc(sizeof(*new), GFP_KERNEL);
653 if (new != NULL) {
654 new->sequence = counter;
4e51336a 655 INIT_LIST_HEAD(&new->list);
cee54fc9
TM
656 }
657 return new;
658}
659
660void nfs_free_seqid(struct nfs_seqid *seqid)
1da177e4 661{
cee54fc9 662 struct rpc_sequence *sequence = seqid->sequence->sequence;
cee54fc9 663
4e51336a
TM
664 if (!list_empty(&seqid->list)) {
665 spin_lock(&sequence->lock);
666 list_del(&seqid->list);
667 spin_unlock(&sequence->lock);
668 }
669 rpc_wake_up_next(&sequence->wait);
cee54fc9 670 kfree(seqid);
1da177e4
LT
671}
672
673/*
cee54fc9
TM
674 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
675 * failed with a seqid incrementing error -
676 * see comments nfs_fs.h:seqid_mutating_error()
677 */
678static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
679{
680 switch (status) {
681 case 0:
682 break;
683 case -NFS4ERR_BAD_SEQID:
684 case -NFS4ERR_STALE_CLIENTID:
685 case -NFS4ERR_STALE_STATEID:
686 case -NFS4ERR_BAD_STATEID:
687 case -NFS4ERR_BADXDR:
688 case -NFS4ERR_RESOURCE:
689 case -NFS4ERR_NOFILEHANDLE:
690 /* Non-seqid mutating errors */
691 return;
692 };
693 /*
694 * Note: no locking needed as we are guaranteed to be first
695 * on the sequence list
696 */
697 seqid->sequence->counter++;
698}
699
700void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
701{
702 if (status == -NFS4ERR_BAD_SEQID) {
703 struct nfs4_state_owner *sp = container_of(seqid->sequence,
704 struct nfs4_state_owner, so_seqid);
1da177e4 705 nfs4_drop_state_owner(sp);
cee54fc9
TM
706 }
707 return nfs_increment_seqid(status, seqid);
708}
709
710/*
cee54fc9
TM
711 * Increment the seqid if the LOCK/LOCKU succeeded, or
712 * failed with a seqid incrementing error -
713 * see comments nfs_fs.h:seqid_mutating_error()
714 */
715void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
716{
717 return nfs_increment_seqid(status, seqid);
718}
719
720int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
721{
722 struct rpc_sequence *sequence = seqid->sequence->sequence;
723 int status = 0;
724
4e51336a
TM
725 if (sequence->list.next == &seqid->list)
726 goto out;
cee54fc9 727 spin_lock(&sequence->lock);
4e51336a 728 if (!list_empty(&sequence->list)) {
cee54fc9
TM
729 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
730 status = -EAGAIN;
4e51336a
TM
731 } else
732 list_add(&seqid->list, &sequence->list);
cee54fc9 733 spin_unlock(&sequence->lock);
4e51336a 734out:
cee54fc9 735 return status;
1da177e4
LT
736}
737
738static int reclaimer(void *);
739struct reclaimer_args {
740 struct nfs4_client *clp;
741 struct completion complete;
742};
743
744/*
745 * State recovery routine
746 */
747void
748nfs4_recover_state(void *data)
749{
750 struct nfs4_client *clp = (struct nfs4_client *)data;
751 struct reclaimer_args args = {
752 .clp = clp,
753 };
754 might_sleep();
755
756 init_completion(&args.complete);
757
758 if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
759 goto out_failed_clear;
760 wait_for_completion(&args.complete);
761 return;
762out_failed_clear:
763 set_bit(NFS4CLNT_OK, &clp->cl_state);
764 wake_up_all(&clp->cl_waitq);
765 rpc_wake_up(&clp->cl_rpcwaitq);
766}
767
768/*
769 * Schedule a state recovery attempt
770 */
771void
772nfs4_schedule_state_recovery(struct nfs4_client *clp)
773{
774 if (!clp)
775 return;
776 if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
777 schedule_work(&clp->cl_recoverd);
778}
779
780static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
781{
782 struct inode *inode = state->inode;
783 struct file_lock *fl;
784 int status = 0;
785
786 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
787 if (!(fl->fl_flags & FL_POSIX))
788 continue;
789 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
790 continue;
791 status = ops->recover_lock(state, fl);
792 if (status >= 0)
793 continue;
794 switch (status) {
795 default:
796 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
797 __FUNCTION__, status);
798 case -NFS4ERR_EXPIRED:
799 case -NFS4ERR_NO_GRACE:
800 case -NFS4ERR_RECLAIM_BAD:
801 case -NFS4ERR_RECLAIM_CONFLICT:
802 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
803 break;
804 case -NFS4ERR_STALE_CLIENTID:
805 goto out_err;
806 }
807 }
808 return 0;
809out_err:
810 return status;
811}
812
813static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
814{
815 struct nfs4_state *state;
816 struct nfs4_lock_state *lock;
817 int status = 0;
818
819 /* Note: we rely on the sp->so_states list being ordered
820 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
821 * states first.
822 * This is needed to ensure that the server won't give us any
823 * read delegations that we have to return if, say, we are
824 * recovering after a network partition or a reboot from a
825 * server that doesn't support a grace period.
826 */
827 list_for_each_entry(state, &sp->so_states, open_states) {
828 if (state->state == 0)
829 continue;
830 status = ops->recover_open(sp, state);
1da177e4
LT
831 if (status >= 0) {
832 status = nfs4_reclaim_locks(ops, state);
833 if (status < 0)
834 goto out_err;
835 list_for_each_entry(lock, &state->lock_states, ls_locks) {
836 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
837 printk("%s: Lock reclaim failed!\n",
838 __FUNCTION__);
839 }
840 continue;
841 }
842 switch (status) {
843 default:
844 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
845 __FUNCTION__, status);
846 case -ENOENT:
847 case -NFS4ERR_RECLAIM_BAD:
848 case -NFS4ERR_RECLAIM_CONFLICT:
849 /*
850 * Open state on this file cannot be recovered
851 * All we can do is revert to using the zero stateid.
852 */
853 memset(state->stateid.data, 0,
854 sizeof(state->stateid.data));
855 /* Mark the file as being 'closed' */
856 state->state = 0;
857 break;
858 case -NFS4ERR_EXPIRED:
859 case -NFS4ERR_NO_GRACE:
860 case -NFS4ERR_STALE_CLIENTID:
861 goto out_err;
862 }
863 }
864 return 0;
865out_err:
866 return status;
867}
868
cee54fc9
TM
869static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
870{
871 struct nfs4_state_owner *sp;
872 struct nfs4_state *state;
873 struct nfs4_lock_state *lock;
874
875 /* Reset all sequence ids to zero */
876 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
877 sp->so_seqid.counter = 0;
878 sp->so_seqid.flags = 0;
ec073428 879 spin_lock(&sp->so_lock);
cee54fc9
TM
880 list_for_each_entry(state, &sp->so_states, open_states) {
881 list_for_each_entry(lock, &state->lock_states, ls_locks) {
882 lock->ls_seqid.counter = 0;
883 lock->ls_seqid.flags = 0;
884 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
885 }
886 }
ec073428 887 spin_unlock(&sp->so_lock);
cee54fc9
TM
888 }
889}
890
1da177e4
LT
891static int reclaimer(void *ptr)
892{
893 struct reclaimer_args *args = (struct reclaimer_args *)ptr;
894 struct nfs4_client *clp = args->clp;
895 struct nfs4_state_owner *sp;
896 struct nfs4_state_recovery_ops *ops;
897 int status = 0;
898
899 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
900 allow_signal(SIGKILL);
901
902 atomic_inc(&clp->cl_count);
903 complete(&args->complete);
904
905 /* Ensure exclusive access to NFSv4 state */
906 lock_kernel();
907 down_write(&clp->cl_sem);
908 /* Are there any NFS mounts out there? */
909 if (list_empty(&clp->cl_superblocks))
910 goto out;
911restart_loop:
912 status = nfs4_proc_renew(clp);
913 switch (status) {
914 case 0:
915 case -NFS4ERR_CB_PATH_DOWN:
916 goto out;
917 case -NFS4ERR_STALE_CLIENTID:
918 case -NFS4ERR_LEASE_MOVED:
919 ops = &nfs4_reboot_recovery_ops;
920 break;
921 default:
922 ops = &nfs4_network_partition_recovery_ops;
923 };
cee54fc9 924 nfs4_state_mark_reclaim(clp);
1da177e4
LT
925 status = __nfs4_init_client(clp);
926 if (status)
927 goto out_error;
928 /* Mark all delegations for reclaim */
929 nfs_delegation_mark_reclaim(clp);
930 /* Note: list is protected by exclusive lock on cl->cl_sem */
931 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
932 status = nfs4_reclaim_open_state(ops, sp);
933 if (status < 0) {
934 if (status == -NFS4ERR_NO_GRACE) {
935 ops = &nfs4_network_partition_recovery_ops;
936 status = nfs4_reclaim_open_state(ops, sp);
937 }
938 if (status == -NFS4ERR_STALE_CLIENTID)
939 goto restart_loop;
940 if (status == -NFS4ERR_EXPIRED)
941 goto restart_loop;
942 }
943 }
944 nfs_delegation_reap_unclaimed(clp);
945out:
946 set_bit(NFS4CLNT_OK, &clp->cl_state);
947 up_write(&clp->cl_sem);
948 unlock_kernel();
949 wake_up_all(&clp->cl_waitq);
950 rpc_wake_up(&clp->cl_rpcwaitq);
951 if (status == -NFS4ERR_CB_PATH_DOWN)
952 nfs_handle_cb_pathdown(clp);
953 nfs4_put_client(clp);
954 return 0;
955out_error:
956 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
957 NIPQUAD(clp->cl_addr.s_addr), -status);
958 goto out;
959}
960
961/*
962 * Local variables:
963 * c-basic-offset: 8
964 * End:
965 */