nfsd4: look up stateid's per clientid
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/sunrpc/svcauth_gss.h>
42 #include <linux/sunrpc/clnt.h>
43 #include "xdr4.h"
44 #include "vfs.h"
45
46 #define NFSDDBG_FACILITY NFSDDBG_PROC
47
48 /* Globals */
49 time_t nfsd4_lease = 90; /* default lease time */
50 time_t nfsd4_grace = 90;
51 static time_t boot_time;
52 static stateid_t zerostateid; /* bits all 0 */
53 static stateid_t onestateid; /* bits all 1 */
54 static u64 current_sessionid = 1;
55
56 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
57 #define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
58
59 /* forward declarations */
60 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
61
62 /* Locking: */
63
64 /* Currently used for almost all code touching nfsv4 state: */
65 static DEFINE_MUTEX(client_mutex);
66
67 /*
68 * Currently used for the del_recall_lru and file hash table. In an
69 * effort to decrease the scope of the client_mutex, this spinlock may
70 * eventually cover more:
71 */
72 static DEFINE_SPINLOCK(recall_lock);
73
74 static struct kmem_cache *openowner_slab = NULL;
75 static struct kmem_cache *lockowner_slab = NULL;
76 static struct kmem_cache *file_slab = NULL;
77 static struct kmem_cache *stateid_slab = NULL;
78 static struct kmem_cache *deleg_slab = NULL;
79
80 void
81 nfs4_lock_state(void)
82 {
83 mutex_lock(&client_mutex);
84 }
85
86 void
87 nfs4_unlock_state(void)
88 {
89 mutex_unlock(&client_mutex);
90 }
91
92 static inline u32
93 opaque_hashval(const void *ptr, int nbytes)
94 {
95 unsigned char *cptr = (unsigned char *) ptr;
96
97 u32 x = 0;
98 while (nbytes--) {
99 x *= 37;
100 x += *cptr++;
101 }
102 return x;
103 }
104
105 static struct list_head del_recall_lru;
106
107 static inline void
108 put_nfs4_file(struct nfs4_file *fi)
109 {
110 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
111 list_del(&fi->fi_hash);
112 spin_unlock(&recall_lock);
113 iput(fi->fi_inode);
114 kmem_cache_free(file_slab, fi);
115 }
116 }
117
118 static inline void
119 get_nfs4_file(struct nfs4_file *fi)
120 {
121 atomic_inc(&fi->fi_ref);
122 }
123
124 static int num_delegations;
125 unsigned int max_delegations;
126
127 /*
128 * Open owner state (share locks)
129 */
130
131 /* hash tables for open owners */
132 #define OPEN_OWNER_HASH_BITS 8
133 #define OPEN_OWNER_HASH_SIZE (1 << OPEN_OWNER_HASH_BITS)
134 #define OPEN_OWNER_HASH_MASK (OPEN_OWNER_HASH_SIZE - 1)
135
136 static unsigned int open_ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
137 {
138 unsigned int ret;
139
140 ret = opaque_hashval(ownername->data, ownername->len);
141 ret += clientid;
142 return ret & OPEN_OWNER_HASH_MASK;
143 }
144
145 static struct list_head open_ownerstr_hashtbl[OPEN_OWNER_HASH_SIZE];
146
147 /* hash table for nfs4_file */
148 #define FILE_HASH_BITS 8
149 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
150
151 static unsigned int file_hashval(struct inode *ino)
152 {
153 /* XXX: why are we hashing on inode pointer, anyway? */
154 return hash_ptr(ino, FILE_HASH_BITS);
155 }
156
157 static struct list_head file_hashtbl[FILE_HASH_SIZE];
158
159 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
160 {
161 BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
162 atomic_inc(&fp->fi_access[oflag]);
163 }
164
165 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
166 {
167 if (oflag == O_RDWR) {
168 __nfs4_file_get_access(fp, O_RDONLY);
169 __nfs4_file_get_access(fp, O_WRONLY);
170 } else
171 __nfs4_file_get_access(fp, oflag);
172 }
173
174 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
175 {
176 if (fp->fi_fds[oflag]) {
177 fput(fp->fi_fds[oflag]);
178 fp->fi_fds[oflag] = NULL;
179 }
180 }
181
182 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
183 {
184 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
185 nfs4_file_put_fd(fp, oflag);
186 /*
187 * It's also safe to get rid of the RDWR open *if*
188 * we no longer have need of the other kind of access
189 * or if we already have the other kind of open:
190 */
191 if (fp->fi_fds[1-oflag]
192 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
193 nfs4_file_put_fd(fp, O_RDWR);
194 }
195 }
196
197 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
198 {
199 if (oflag == O_RDWR) {
200 __nfs4_file_put_access(fp, O_RDONLY);
201 __nfs4_file_put_access(fp, O_WRONLY);
202 } else
203 __nfs4_file_put_access(fp, oflag);
204 }
205
206 static inline int get_new_stid(struct nfs4_stid *stid)
207 {
208 static int min_stateid = 0;
209 struct idr *stateids = &stid->sc_client->cl_stateids;
210 int new_stid;
211 int error;
212
213 if (!idr_pre_get(stateids, GFP_KERNEL))
214 return -ENOMEM;
215
216 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
217 /*
218 * All this code is currently serialized; the preallocation
219 * above should still be ours:
220 */
221 BUG_ON(error);
222 /*
223 * It shouldn't be a problem to reuse an opaque stateid value.
224 * I don't think it is for 4.1. But with 4.0 I worry that, for
225 * example, a stray write retransmission could be accepted by
226 * the server when it should have been rejected. Therefore,
227 * adopt a trick from the sctp code to attempt to maximize the
228 * amount of time until an id is reused, by ensuring they always
229 * "increase" (mod INT_MAX):
230 */
231
232 min_stateid = new_stid+1;
233 if (min_stateid == INT_MAX)
234 min_stateid = 0;
235 return new_stid;
236 }
237
238 static inline __be32 init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
239 {
240 stateid_t *s = &stid->sc_stateid;
241 int new_id;
242
243 stid->sc_type = type;
244 stid->sc_client = cl;
245 s->si_opaque.so_clid = cl->cl_clientid;
246 new_id = get_new_stid(stid);
247 if (new_id < 0)
248 return nfserr_jukebox;
249 s->si_opaque.so_id = (u32)new_id;
250 /* Will be incremented before return to client: */
251 s->si_generation = 0;
252 return 0;
253 }
254
255 static struct nfs4_delegation *
256 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
257 {
258 struct nfs4_delegation *dp;
259 struct nfs4_file *fp = stp->st_file;
260 __be32 status;
261
262 dprintk("NFSD alloc_init_deleg\n");
263 /*
264 * Major work on the lease subsystem (for example, to support
265 * calbacks on stat) will be required before we can support
266 * write delegations properly.
267 */
268 if (type != NFS4_OPEN_DELEGATE_READ)
269 return NULL;
270 if (fp->fi_had_conflict)
271 return NULL;
272 if (num_delegations > max_delegations)
273 return NULL;
274 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
275 if (dp == NULL)
276 return dp;
277 status = init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
278 if (status) {
279 kmem_cache_free(deleg_slab, dp);
280 return NULL;
281 }
282 /*
283 * delegation seqid's are never incremented. The 4.1 special
284 * meaning of seqid 0 isn't meaningful, really, but let's avoid
285 * 0 anyway just for consistency and use 1:
286 */
287 dp->dl_stid.sc_stateid.si_generation = 1;
288 num_delegations++;
289 INIT_LIST_HEAD(&dp->dl_perfile);
290 INIT_LIST_HEAD(&dp->dl_perclnt);
291 INIT_LIST_HEAD(&dp->dl_recall_lru);
292 get_nfs4_file(fp);
293 dp->dl_file = fp;
294 dp->dl_type = type;
295 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
296 dp->dl_time = 0;
297 atomic_set(&dp->dl_count, 1);
298 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
299 return dp;
300 }
301
302 void
303 nfs4_put_delegation(struct nfs4_delegation *dp)
304 {
305 if (atomic_dec_and_test(&dp->dl_count)) {
306 dprintk("NFSD: freeing dp %p\n",dp);
307 put_nfs4_file(dp->dl_file);
308 kmem_cache_free(deleg_slab, dp);
309 num_delegations--;
310 }
311 }
312
313 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
314 {
315 if (atomic_dec_and_test(&fp->fi_delegees)) {
316 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
317 fp->fi_lease = NULL;
318 fput(fp->fi_deleg_file);
319 fp->fi_deleg_file = NULL;
320 }
321 }
322
323 static void unhash_stid(struct nfs4_stid *s)
324 {
325 struct idr *stateids = &s->sc_client->cl_stateids;
326
327 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
328 }
329
330 /* Called under the state lock. */
331 static void
332 unhash_delegation(struct nfs4_delegation *dp)
333 {
334 unhash_stid(&dp->dl_stid);
335 list_del_init(&dp->dl_perclnt);
336 spin_lock(&recall_lock);
337 list_del_init(&dp->dl_perfile);
338 list_del_init(&dp->dl_recall_lru);
339 spin_unlock(&recall_lock);
340 nfs4_put_deleg_lease(dp->dl_file);
341 nfs4_put_delegation(dp);
342 }
343
344 /*
345 * SETCLIENTID state
346 */
347
348 /* client_lock protects the client lru list and session hash table */
349 static DEFINE_SPINLOCK(client_lock);
350
351 /* Hash tables for nfs4_clientid state */
352 #define CLIENT_HASH_BITS 4
353 #define CLIENT_HASH_SIZE (1 << CLIENT_HASH_BITS)
354 #define CLIENT_HASH_MASK (CLIENT_HASH_SIZE - 1)
355
356 static unsigned int clientid_hashval(u32 id)
357 {
358 return id & CLIENT_HASH_MASK;
359 }
360
361 static unsigned int clientstr_hashval(const char *name)
362 {
363 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
364 }
365
366 /*
367 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
368 * used in reboot/reset lease grace period processing
369 *
370 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
371 * setclientid_confirmed info.
372 *
373 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed
374 * setclientid info.
375 *
376 * client_lru holds client queue ordered by nfs4_client.cl_time
377 * for lease renewal.
378 *
379 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
380 * for last close replay.
381 */
382 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
383 static int reclaim_str_hashtbl_size = 0;
384 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
385 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
386 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
387 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
388 static struct list_head client_lru;
389 static struct list_head close_lru;
390
391 /*
392 * We store the NONE, READ, WRITE, and BOTH bits separately in the
393 * st_{access,deny}_bmap field of the stateid, in order to track not
394 * only what share bits are currently in force, but also what
395 * combinations of share bits previous opens have used. This allows us
396 * to enforce the recommendation of rfc 3530 14.2.19 that the server
397 * return an error if the client attempt to downgrade to a combination
398 * of share bits not explicable by closing some of its previous opens.
399 *
400 * XXX: This enforcement is actually incomplete, since we don't keep
401 * track of access/deny bit combinations; so, e.g., we allow:
402 *
403 * OPEN allow read, deny write
404 * OPEN allow both, deny none
405 * DOWNGRADE allow read, deny none
406 *
407 * which we should reject.
408 */
409 static void
410 set_access(unsigned int *access, unsigned long bmap) {
411 int i;
412
413 *access = 0;
414 for (i = 1; i < 4; i++) {
415 if (test_bit(i, &bmap))
416 *access |= i;
417 }
418 }
419
420 static void
421 set_deny(unsigned int *deny, unsigned long bmap) {
422 int i;
423
424 *deny = 0;
425 for (i = 0; i < 4; i++) {
426 if (test_bit(i, &bmap))
427 *deny |= i ;
428 }
429 }
430
431 static int
432 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
433 unsigned int access, deny;
434
435 set_access(&access, stp->st_access_bmap);
436 set_deny(&deny, stp->st_deny_bmap);
437 if ((access & open->op_share_deny) || (deny & open->op_share_access))
438 return 0;
439 return 1;
440 }
441
442 static int nfs4_access_to_omode(u32 access)
443 {
444 switch (access & NFS4_SHARE_ACCESS_BOTH) {
445 case NFS4_SHARE_ACCESS_READ:
446 return O_RDONLY;
447 case NFS4_SHARE_ACCESS_WRITE:
448 return O_WRONLY;
449 case NFS4_SHARE_ACCESS_BOTH:
450 return O_RDWR;
451 }
452 BUG();
453 }
454
455 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
456 {
457 list_del(&stp->st_perfile);
458 list_del(&stp->st_perstateowner);
459 }
460
461 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
462 {
463 int i;
464
465 if (stp->st_access_bmap) {
466 for (i = 1; i < 4; i++) {
467 if (test_bit(i, &stp->st_access_bmap))
468 nfs4_file_put_access(stp->st_file,
469 nfs4_access_to_omode(i));
470 __clear_bit(i, &stp->st_access_bmap);
471 }
472 }
473 put_nfs4_file(stp->st_file);
474 stp->st_file = NULL;
475 }
476
477 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
478 {
479 kmem_cache_free(stateid_slab, stp);
480 }
481
482 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
483 {
484 struct file *file;
485
486 unhash_generic_stateid(stp);
487 unhash_stid(&stp->st_stid);
488 file = find_any_file(stp->st_file);
489 if (file)
490 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
491 close_generic_stateid(stp);
492 free_generic_stateid(stp);
493 }
494
495 static void unhash_lockowner(struct nfs4_lockowner *lo)
496 {
497 struct nfs4_ol_stateid *stp;
498
499 list_del(&lo->lo_owner.so_strhash);
500 list_del(&lo->lo_perstateid);
501 while (!list_empty(&lo->lo_owner.so_stateids)) {
502 stp = list_first_entry(&lo->lo_owner.so_stateids,
503 struct nfs4_ol_stateid, st_perstateowner);
504 release_lock_stateid(stp);
505 }
506 }
507
508 static void release_lockowner(struct nfs4_lockowner *lo)
509 {
510 unhash_lockowner(lo);
511 nfs4_free_lockowner(lo);
512 }
513
514 static void
515 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
516 {
517 struct nfs4_lockowner *lo;
518
519 while (!list_empty(&open_stp->st_lockowners)) {
520 lo = list_entry(open_stp->st_lockowners.next,
521 struct nfs4_lockowner, lo_perstateid);
522 release_lockowner(lo);
523 }
524 }
525
526 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
527 {
528 unhash_generic_stateid(stp);
529 release_stateid_lockowners(stp);
530 close_generic_stateid(stp);
531 }
532
533 static void release_open_stateid(struct nfs4_ol_stateid *stp)
534 {
535 unhash_open_stateid(stp);
536 unhash_stid(&stp->st_stid);
537 free_generic_stateid(stp);
538 }
539
540 static void unhash_openowner(struct nfs4_openowner *oo)
541 {
542 struct nfs4_ol_stateid *stp;
543
544 list_del(&oo->oo_owner.so_strhash);
545 list_del(&oo->oo_perclient);
546 while (!list_empty(&oo->oo_owner.so_stateids)) {
547 stp = list_first_entry(&oo->oo_owner.so_stateids,
548 struct nfs4_ol_stateid, st_perstateowner);
549 release_open_stateid(stp);
550 }
551 }
552
553 static void release_last_closed_stateid(struct nfs4_openowner *oo)
554 {
555 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
556
557 if (s) {
558 unhash_stid(&s->st_stid);
559 free_generic_stateid(s);
560 oo->oo_last_closed_stid = NULL;
561 }
562 }
563
564 static void release_openowner(struct nfs4_openowner *oo)
565 {
566 unhash_openowner(oo);
567 list_del(&oo->oo_close_lru);
568 release_last_closed_stateid(oo);
569 nfs4_free_openowner(oo);
570 }
571
572 #define SESSION_HASH_SIZE 512
573 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
574
575 static inline int
576 hash_sessionid(struct nfs4_sessionid *sessionid)
577 {
578 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
579
580 return sid->sequence % SESSION_HASH_SIZE;
581 }
582
583 static inline void
584 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
585 {
586 u32 *ptr = (u32 *)(&sessionid->data[0]);
587 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
588 }
589
590 static void
591 gen_sessionid(struct nfsd4_session *ses)
592 {
593 struct nfs4_client *clp = ses->se_client;
594 struct nfsd4_sessionid *sid;
595
596 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
597 sid->clientid = clp->cl_clientid;
598 sid->sequence = current_sessionid++;
599 sid->reserved = 0;
600 }
601
602 /*
603 * The protocol defines ca_maxresponssize_cached to include the size of
604 * the rpc header, but all we need to cache is the data starting after
605 * the end of the initial SEQUENCE operation--the rest we regenerate
606 * each time. Therefore we can advertise a ca_maxresponssize_cached
607 * value that is the number of bytes in our cache plus a few additional
608 * bytes. In order to stay on the safe side, and not promise more than
609 * we can cache, those additional bytes must be the minimum possible: 24
610 * bytes of rpc header (xid through accept state, with AUTH_NULL
611 * verifier), 12 for the compound header (with zero-length tag), and 44
612 * for the SEQUENCE op response:
613 */
614 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
615
616 static void
617 free_session_slots(struct nfsd4_session *ses)
618 {
619 int i;
620
621 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
622 kfree(ses->se_slots[i]);
623 }
624
625 /*
626 * We don't actually need to cache the rpc and session headers, so we
627 * can allocate a little less for each slot:
628 */
629 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
630 {
631 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
632 }
633
634 static int nfsd4_sanitize_slot_size(u32 size)
635 {
636 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
637 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
638
639 return size;
640 }
641
642 /*
643 * XXX: If we run out of reserved DRC memory we could (up to a point)
644 * re-negotiate active sessions and reduce their slot usage to make
645 * rooom for new connections. For now we just fail the create session.
646 */
647 static int nfsd4_get_drc_mem(int slotsize, u32 num)
648 {
649 int avail;
650
651 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
652
653 spin_lock(&nfsd_drc_lock);
654 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
655 nfsd_drc_max_mem - nfsd_drc_mem_used);
656 num = min_t(int, num, avail / slotsize);
657 nfsd_drc_mem_used += num * slotsize;
658 spin_unlock(&nfsd_drc_lock);
659
660 return num;
661 }
662
663 static void nfsd4_put_drc_mem(int slotsize, int num)
664 {
665 spin_lock(&nfsd_drc_lock);
666 nfsd_drc_mem_used -= slotsize * num;
667 spin_unlock(&nfsd_drc_lock);
668 }
669
670 static struct nfsd4_session *alloc_session(int slotsize, int numslots)
671 {
672 struct nfsd4_session *new;
673 int mem, i;
674
675 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
676 + sizeof(struct nfsd4_session) > PAGE_SIZE);
677 mem = numslots * sizeof(struct nfsd4_slot *);
678
679 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
680 if (!new)
681 return NULL;
682 /* allocate each struct nfsd4_slot and data cache in one piece */
683 for (i = 0; i < numslots; i++) {
684 mem = sizeof(struct nfsd4_slot) + slotsize;
685 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
686 if (!new->se_slots[i])
687 goto out_free;
688 }
689 return new;
690 out_free:
691 while (i--)
692 kfree(new->se_slots[i]);
693 kfree(new);
694 return NULL;
695 }
696
697 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
698 {
699 u32 maxrpc = nfsd_serv->sv_max_mesg;
700
701 new->maxreqs = numslots;
702 new->maxresp_cached = min_t(u32, req->maxresp_cached,
703 slotsize + NFSD_MIN_HDR_SEQ_SZ);
704 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
705 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
706 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
707 }
708
709 static void free_conn(struct nfsd4_conn *c)
710 {
711 svc_xprt_put(c->cn_xprt);
712 kfree(c);
713 }
714
715 static void nfsd4_conn_lost(struct svc_xpt_user *u)
716 {
717 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
718 struct nfs4_client *clp = c->cn_session->se_client;
719
720 spin_lock(&clp->cl_lock);
721 if (!list_empty(&c->cn_persession)) {
722 list_del(&c->cn_persession);
723 free_conn(c);
724 }
725 spin_unlock(&clp->cl_lock);
726 nfsd4_probe_callback(clp);
727 }
728
729 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
730 {
731 struct nfsd4_conn *conn;
732
733 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
734 if (!conn)
735 return NULL;
736 svc_xprt_get(rqstp->rq_xprt);
737 conn->cn_xprt = rqstp->rq_xprt;
738 conn->cn_flags = flags;
739 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
740 return conn;
741 }
742
743 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
744 {
745 conn->cn_session = ses;
746 list_add(&conn->cn_persession, &ses->se_conns);
747 }
748
749 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
750 {
751 struct nfs4_client *clp = ses->se_client;
752
753 spin_lock(&clp->cl_lock);
754 __nfsd4_hash_conn(conn, ses);
755 spin_unlock(&clp->cl_lock);
756 }
757
758 static int nfsd4_register_conn(struct nfsd4_conn *conn)
759 {
760 conn->cn_xpt_user.callback = nfsd4_conn_lost;
761 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
762 }
763
764 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
765 {
766 struct nfsd4_conn *conn;
767 int ret;
768
769 conn = alloc_conn(rqstp, dir);
770 if (!conn)
771 return nfserr_jukebox;
772 nfsd4_hash_conn(conn, ses);
773 ret = nfsd4_register_conn(conn);
774 if (ret)
775 /* oops; xprt is already down: */
776 nfsd4_conn_lost(&conn->cn_xpt_user);
777 return nfs_ok;
778 }
779
780 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
781 {
782 u32 dir = NFS4_CDFC4_FORE;
783
784 if (ses->se_flags & SESSION4_BACK_CHAN)
785 dir |= NFS4_CDFC4_BACK;
786
787 return nfsd4_new_conn(rqstp, ses, dir);
788 }
789
790 /* must be called under client_lock */
791 static void nfsd4_del_conns(struct nfsd4_session *s)
792 {
793 struct nfs4_client *clp = s->se_client;
794 struct nfsd4_conn *c;
795
796 spin_lock(&clp->cl_lock);
797 while (!list_empty(&s->se_conns)) {
798 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
799 list_del_init(&c->cn_persession);
800 spin_unlock(&clp->cl_lock);
801
802 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
803 free_conn(c);
804
805 spin_lock(&clp->cl_lock);
806 }
807 spin_unlock(&clp->cl_lock);
808 }
809
810 void free_session(struct kref *kref)
811 {
812 struct nfsd4_session *ses;
813 int mem;
814
815 ses = container_of(kref, struct nfsd4_session, se_ref);
816 nfsd4_del_conns(ses);
817 spin_lock(&nfsd_drc_lock);
818 mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
819 nfsd_drc_mem_used -= mem;
820 spin_unlock(&nfsd_drc_lock);
821 free_session_slots(ses);
822 kfree(ses);
823 }
824
825 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
826 {
827 struct nfsd4_session *new;
828 struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
829 int numslots, slotsize;
830 int status;
831 int idx;
832
833 /*
834 * Note decreasing slot size below client's request may
835 * make it difficult for client to function correctly, whereas
836 * decreasing the number of slots will (just?) affect
837 * performance. When short on memory we therefore prefer to
838 * decrease number of slots instead of their size.
839 */
840 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
841 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
842 if (numslots < 1)
843 return NULL;
844
845 new = alloc_session(slotsize, numslots);
846 if (!new) {
847 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
848 return NULL;
849 }
850 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
851
852 new->se_client = clp;
853 gen_sessionid(new);
854
855 INIT_LIST_HEAD(&new->se_conns);
856
857 new->se_cb_seq_nr = 1;
858 new->se_flags = cses->flags;
859 new->se_cb_prog = cses->callback_prog;
860 kref_init(&new->se_ref);
861 idx = hash_sessionid(&new->se_sessionid);
862 spin_lock(&client_lock);
863 list_add(&new->se_hash, &sessionid_hashtbl[idx]);
864 spin_lock(&clp->cl_lock);
865 list_add(&new->se_perclnt, &clp->cl_sessions);
866 spin_unlock(&clp->cl_lock);
867 spin_unlock(&client_lock);
868
869 status = nfsd4_new_conn_from_crses(rqstp, new);
870 /* whoops: benny points out, status is ignored! (err, or bogus) */
871 if (status) {
872 free_session(&new->se_ref);
873 return NULL;
874 }
875 if (cses->flags & SESSION4_BACK_CHAN) {
876 struct sockaddr *sa = svc_addr(rqstp);
877 /*
878 * This is a little silly; with sessions there's no real
879 * use for the callback address. Use the peer address
880 * as a reasonable default for now, but consider fixing
881 * the rpc client not to require an address in the
882 * future:
883 */
884 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
885 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
886 }
887 nfsd4_probe_callback(clp);
888 return new;
889 }
890
891 /* caller must hold client_lock */
892 static struct nfsd4_session *
893 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
894 {
895 struct nfsd4_session *elem;
896 int idx;
897
898 dump_sessionid(__func__, sessionid);
899 idx = hash_sessionid(sessionid);
900 /* Search in the appropriate list */
901 list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
902 if (!memcmp(elem->se_sessionid.data, sessionid->data,
903 NFS4_MAX_SESSIONID_LEN)) {
904 return elem;
905 }
906 }
907
908 dprintk("%s: session not found\n", __func__);
909 return NULL;
910 }
911
912 /* caller must hold client_lock */
913 static void
914 unhash_session(struct nfsd4_session *ses)
915 {
916 list_del(&ses->se_hash);
917 spin_lock(&ses->se_client->cl_lock);
918 list_del(&ses->se_perclnt);
919 spin_unlock(&ses->se_client->cl_lock);
920 }
921
922 /* must be called under the client_lock */
923 static inline void
924 renew_client_locked(struct nfs4_client *clp)
925 {
926 if (is_client_expired(clp)) {
927 dprintk("%s: client (clientid %08x/%08x) already expired\n",
928 __func__,
929 clp->cl_clientid.cl_boot,
930 clp->cl_clientid.cl_id);
931 return;
932 }
933
934 /*
935 * Move client to the end to the LRU list.
936 */
937 dprintk("renewing client (clientid %08x/%08x)\n",
938 clp->cl_clientid.cl_boot,
939 clp->cl_clientid.cl_id);
940 list_move_tail(&clp->cl_lru, &client_lru);
941 clp->cl_time = get_seconds();
942 }
943
944 static inline void
945 renew_client(struct nfs4_client *clp)
946 {
947 spin_lock(&client_lock);
948 renew_client_locked(clp);
949 spin_unlock(&client_lock);
950 }
951
952 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
953 static int
954 STALE_CLIENTID(clientid_t *clid)
955 {
956 if (clid->cl_boot == boot_time)
957 return 0;
958 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
959 clid->cl_boot, clid->cl_id, boot_time);
960 return 1;
961 }
962
963 /*
964 * XXX Should we use a slab cache ?
965 * This type of memory management is somewhat inefficient, but we use it
966 * anyway since SETCLIENTID is not a common operation.
967 */
968 static struct nfs4_client *alloc_client(struct xdr_netobj name)
969 {
970 struct nfs4_client *clp;
971
972 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
973 if (clp == NULL)
974 return NULL;
975 clp->cl_name.data = kmalloc(name.len, GFP_KERNEL);
976 if (clp->cl_name.data == NULL) {
977 kfree(clp);
978 return NULL;
979 }
980 memcpy(clp->cl_name.data, name.data, name.len);
981 clp->cl_name.len = name.len;
982 return clp;
983 }
984
985 static inline void
986 free_client(struct nfs4_client *clp)
987 {
988 while (!list_empty(&clp->cl_sessions)) {
989 struct nfsd4_session *ses;
990 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
991 se_perclnt);
992 list_del(&ses->se_perclnt);
993 nfsd4_put_session(ses);
994 }
995 if (clp->cl_cred.cr_group_info)
996 put_group_info(clp->cl_cred.cr_group_info);
997 kfree(clp->cl_principal);
998 kfree(clp->cl_name.data);
999 kfree(clp);
1000 }
1001
1002 void
1003 release_session_client(struct nfsd4_session *session)
1004 {
1005 struct nfs4_client *clp = session->se_client;
1006
1007 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
1008 return;
1009 if (is_client_expired(clp)) {
1010 free_client(clp);
1011 session->se_client = NULL;
1012 } else
1013 renew_client_locked(clp);
1014 spin_unlock(&client_lock);
1015 }
1016
1017 /* must be called under the client_lock */
1018 static inline void
1019 unhash_client_locked(struct nfs4_client *clp)
1020 {
1021 struct nfsd4_session *ses;
1022
1023 mark_client_expired(clp);
1024 list_del(&clp->cl_lru);
1025 spin_lock(&clp->cl_lock);
1026 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1027 list_del_init(&ses->se_hash);
1028 spin_unlock(&clp->cl_lock);
1029 }
1030
1031 static void
1032 expire_client(struct nfs4_client *clp)
1033 {
1034 struct nfs4_openowner *oo;
1035 struct nfs4_delegation *dp;
1036 struct list_head reaplist;
1037
1038 INIT_LIST_HEAD(&reaplist);
1039 spin_lock(&recall_lock);
1040 while (!list_empty(&clp->cl_delegations)) {
1041 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1042 list_del_init(&dp->dl_perclnt);
1043 list_move(&dp->dl_recall_lru, &reaplist);
1044 }
1045 spin_unlock(&recall_lock);
1046 while (!list_empty(&reaplist)) {
1047 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1048 list_del_init(&dp->dl_recall_lru);
1049 unhash_delegation(dp);
1050 }
1051 while (!list_empty(&clp->cl_openowners)) {
1052 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1053 release_openowner(oo);
1054 }
1055 nfsd4_shutdown_callback(clp);
1056 if (clp->cl_cb_conn.cb_xprt)
1057 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1058 list_del(&clp->cl_idhash);
1059 list_del(&clp->cl_strhash);
1060 spin_lock(&client_lock);
1061 unhash_client_locked(clp);
1062 if (atomic_read(&clp->cl_refcount) == 0)
1063 free_client(clp);
1064 spin_unlock(&client_lock);
1065 }
1066
1067 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1068 {
1069 memcpy(target->cl_verifier.data, source->data,
1070 sizeof(target->cl_verifier.data));
1071 }
1072
1073 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1074 {
1075 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1076 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1077 }
1078
1079 static void copy_cred(struct svc_cred *target, struct svc_cred *source)
1080 {
1081 target->cr_uid = source->cr_uid;
1082 target->cr_gid = source->cr_gid;
1083 target->cr_group_info = source->cr_group_info;
1084 get_group_info(target->cr_group_info);
1085 }
1086
1087 static int same_name(const char *n1, const char *n2)
1088 {
1089 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1090 }
1091
1092 static int
1093 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1094 {
1095 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1096 }
1097
1098 static int
1099 same_clid(clientid_t *cl1, clientid_t *cl2)
1100 {
1101 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1102 }
1103
1104 /* XXX what about NGROUP */
1105 static int
1106 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1107 {
1108 return cr1->cr_uid == cr2->cr_uid;
1109 }
1110
1111 static void gen_clid(struct nfs4_client *clp)
1112 {
1113 static u32 current_clientid = 1;
1114
1115 clp->cl_clientid.cl_boot = boot_time;
1116 clp->cl_clientid.cl_id = current_clientid++;
1117 }
1118
1119 static void gen_confirm(struct nfs4_client *clp)
1120 {
1121 static u32 i;
1122 u32 *p;
1123
1124 p = (u32 *)clp->cl_confirm.data;
1125 *p++ = get_seconds();
1126 *p++ = i++;
1127 }
1128
1129 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1130 {
1131 return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1132 }
1133
1134 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1135 {
1136 struct nfs4_stid *s;
1137
1138 s = find_stateid(cl, t);
1139 if (!s)
1140 return NULL;
1141 if (typemask & s->sc_type)
1142 return s;
1143 return NULL;
1144 }
1145
1146 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1147 struct svc_rqst *rqstp, nfs4_verifier *verf)
1148 {
1149 struct nfs4_client *clp;
1150 struct sockaddr *sa = svc_addr(rqstp);
1151 char *princ;
1152
1153 clp = alloc_client(name);
1154 if (clp == NULL)
1155 return NULL;
1156
1157 INIT_LIST_HEAD(&clp->cl_sessions);
1158
1159 princ = svc_gss_principal(rqstp);
1160 if (princ) {
1161 clp->cl_principal = kstrdup(princ, GFP_KERNEL);
1162 if (clp->cl_principal == NULL) {
1163 free_client(clp);
1164 return NULL;
1165 }
1166 }
1167
1168 idr_init(&clp->cl_stateids);
1169 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1170 atomic_set(&clp->cl_refcount, 0);
1171 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1172 INIT_LIST_HEAD(&clp->cl_idhash);
1173 INIT_LIST_HEAD(&clp->cl_strhash);
1174 INIT_LIST_HEAD(&clp->cl_openowners);
1175 INIT_LIST_HEAD(&clp->cl_delegations);
1176 INIT_LIST_HEAD(&clp->cl_lru);
1177 INIT_LIST_HEAD(&clp->cl_callbacks);
1178 spin_lock_init(&clp->cl_lock);
1179 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
1180 clp->cl_time = get_seconds();
1181 clear_bit(0, &clp->cl_cb_slot_busy);
1182 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1183 copy_verf(clp, verf);
1184 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1185 clp->cl_flavor = rqstp->rq_flavor;
1186 copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1187 gen_confirm(clp);
1188 clp->cl_cb_session = NULL;
1189 return clp;
1190 }
1191
1192 static int check_name(struct xdr_netobj name)
1193 {
1194 if (name.len == 0)
1195 return 0;
1196 if (name.len > NFS4_OPAQUE_LIMIT) {
1197 dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
1198 return 0;
1199 }
1200 return 1;
1201 }
1202
1203 static void
1204 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
1205 {
1206 unsigned int idhashval;
1207
1208 list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
1209 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1210 list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
1211 renew_client(clp);
1212 }
1213
1214 static void
1215 move_to_confirmed(struct nfs4_client *clp)
1216 {
1217 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1218 unsigned int strhashval;
1219
1220 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1221 list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
1222 strhashval = clientstr_hashval(clp->cl_recdir);
1223 list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
1224 renew_client(clp);
1225 }
1226
1227 static struct nfs4_client *
1228 find_confirmed_client(clientid_t *clid)
1229 {
1230 struct nfs4_client *clp;
1231 unsigned int idhashval = clientid_hashval(clid->cl_id);
1232
1233 list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
1234 if (same_clid(&clp->cl_clientid, clid))
1235 return clp;
1236 }
1237 return NULL;
1238 }
1239
1240 static struct nfs4_client *
1241 find_unconfirmed_client(clientid_t *clid)
1242 {
1243 struct nfs4_client *clp;
1244 unsigned int idhashval = clientid_hashval(clid->cl_id);
1245
1246 list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
1247 if (same_clid(&clp->cl_clientid, clid))
1248 return clp;
1249 }
1250 return NULL;
1251 }
1252
1253 static bool clp_used_exchangeid(struct nfs4_client *clp)
1254 {
1255 return clp->cl_exchange_flags != 0;
1256 }
1257
1258 static struct nfs4_client *
1259 find_confirmed_client_by_str(const char *dname, unsigned int hashval)
1260 {
1261 struct nfs4_client *clp;
1262
1263 list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
1264 if (same_name(clp->cl_recdir, dname))
1265 return clp;
1266 }
1267 return NULL;
1268 }
1269
1270 static struct nfs4_client *
1271 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
1272 {
1273 struct nfs4_client *clp;
1274
1275 list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
1276 if (same_name(clp->cl_recdir, dname))
1277 return clp;
1278 }
1279 return NULL;
1280 }
1281
1282 static void
1283 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1284 {
1285 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1286 struct sockaddr *sa = svc_addr(rqstp);
1287 u32 scopeid = rpc_get_scope_id(sa);
1288 unsigned short expected_family;
1289
1290 /* Currently, we only support tcp and tcp6 for the callback channel */
1291 if (se->se_callback_netid_len == 3 &&
1292 !memcmp(se->se_callback_netid_val, "tcp", 3))
1293 expected_family = AF_INET;
1294 else if (se->se_callback_netid_len == 4 &&
1295 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1296 expected_family = AF_INET6;
1297 else
1298 goto out_err;
1299
1300 conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
1301 se->se_callback_addr_len,
1302 (struct sockaddr *)&conn->cb_addr,
1303 sizeof(conn->cb_addr));
1304
1305 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1306 goto out_err;
1307
1308 if (conn->cb_addr.ss_family == AF_INET6)
1309 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1310
1311 conn->cb_prog = se->se_callback_prog;
1312 conn->cb_ident = se->se_callback_ident;
1313 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1314 return;
1315 out_err:
1316 conn->cb_addr.ss_family = AF_UNSPEC;
1317 conn->cb_addrlen = 0;
1318 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1319 "will not receive delegations\n",
1320 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1321
1322 return;
1323 }
1324
1325 /*
1326 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1327 */
1328 void
1329 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1330 {
1331 struct nfsd4_slot *slot = resp->cstate.slot;
1332 unsigned int base;
1333
1334 dprintk("--> %s slot %p\n", __func__, slot);
1335
1336 slot->sl_opcnt = resp->opcnt;
1337 slot->sl_status = resp->cstate.status;
1338
1339 if (nfsd4_not_cached(resp)) {
1340 slot->sl_datalen = 0;
1341 return;
1342 }
1343 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1344 base = (char *)resp->cstate.datap -
1345 (char *)resp->xbuf->head[0].iov_base;
1346 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1347 slot->sl_datalen))
1348 WARN("%s: sessions DRC could not cache compound\n", __func__);
1349 return;
1350 }
1351
1352 /*
1353 * Encode the replay sequence operation from the slot values.
1354 * If cachethis is FALSE encode the uncached rep error on the next
1355 * operation which sets resp->p and increments resp->opcnt for
1356 * nfs4svc_encode_compoundres.
1357 *
1358 */
1359 static __be32
1360 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1361 struct nfsd4_compoundres *resp)
1362 {
1363 struct nfsd4_op *op;
1364 struct nfsd4_slot *slot = resp->cstate.slot;
1365
1366 dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1367 resp->opcnt, resp->cstate.slot->sl_cachethis);
1368
1369 /* Encode the replayed sequence operation */
1370 op = &args->ops[resp->opcnt - 1];
1371 nfsd4_encode_operation(resp, op);
1372
1373 /* Return nfserr_retry_uncached_rep in next operation. */
1374 if (args->opcnt > 1 && slot->sl_cachethis == 0) {
1375 op = &args->ops[resp->opcnt++];
1376 op->status = nfserr_retry_uncached_rep;
1377 nfsd4_encode_operation(resp, op);
1378 }
1379 return op->status;
1380 }
1381
1382 /*
1383 * The sequence operation is not cached because we can use the slot and
1384 * session values.
1385 */
1386 __be32
1387 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1388 struct nfsd4_sequence *seq)
1389 {
1390 struct nfsd4_slot *slot = resp->cstate.slot;
1391 __be32 status;
1392
1393 dprintk("--> %s slot %p\n", __func__, slot);
1394
1395 /* Either returns 0 or nfserr_retry_uncached */
1396 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1397 if (status == nfserr_retry_uncached_rep)
1398 return status;
1399
1400 /* The sequence operation has been encoded, cstate->datap set. */
1401 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1402
1403 resp->opcnt = slot->sl_opcnt;
1404 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1405 status = slot->sl_status;
1406
1407 return status;
1408 }
1409
1410 /*
1411 * Set the exchange_id flags returned by the server.
1412 */
1413 static void
1414 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1415 {
1416 /* pNFS is not supported */
1417 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1418
1419 /* Referrals are supported, Migration is not. */
1420 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1421
1422 /* set the wire flags to return to client. */
1423 clid->flags = new->cl_exchange_flags;
1424 }
1425
1426 __be32
1427 nfsd4_exchange_id(struct svc_rqst *rqstp,
1428 struct nfsd4_compound_state *cstate,
1429 struct nfsd4_exchange_id *exid)
1430 {
1431 struct nfs4_client *unconf, *conf, *new;
1432 int status;
1433 unsigned int strhashval;
1434 char dname[HEXDIR_LEN];
1435 char addr_str[INET6_ADDRSTRLEN];
1436 nfs4_verifier verf = exid->verifier;
1437 struct sockaddr *sa = svc_addr(rqstp);
1438
1439 rpc_ntop(sa, addr_str, sizeof(addr_str));
1440 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1441 "ip_addr=%s flags %x, spa_how %d\n",
1442 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1443 addr_str, exid->flags, exid->spa_how);
1444
1445 if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
1446 return nfserr_inval;
1447
1448 /* Currently only support SP4_NONE */
1449 switch (exid->spa_how) {
1450 case SP4_NONE:
1451 break;
1452 case SP4_SSV:
1453 return nfserr_serverfault;
1454 default:
1455 BUG(); /* checked by xdr code */
1456 case SP4_MACH_CRED:
1457 return nfserr_serverfault; /* no excuse :-/ */
1458 }
1459
1460 status = nfs4_make_rec_clidname(dname, &exid->clname);
1461
1462 if (status)
1463 goto error;
1464
1465 strhashval = clientstr_hashval(dname);
1466
1467 nfs4_lock_state();
1468 status = nfs_ok;
1469
1470 conf = find_confirmed_client_by_str(dname, strhashval);
1471 if (conf) {
1472 if (!clp_used_exchangeid(conf)) {
1473 status = nfserr_clid_inuse; /* XXX: ? */
1474 goto out;
1475 }
1476 if (!same_verf(&verf, &conf->cl_verifier)) {
1477 /* 18.35.4 case 8 */
1478 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1479 status = nfserr_not_same;
1480 goto out;
1481 }
1482 /* Client reboot: destroy old state */
1483 expire_client(conf);
1484 goto out_new;
1485 }
1486 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
1487 /* 18.35.4 case 9 */
1488 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1489 status = nfserr_perm;
1490 goto out;
1491 }
1492 expire_client(conf);
1493 goto out_new;
1494 }
1495 /*
1496 * Set bit when the owner id and verifier map to an already
1497 * confirmed client id (18.35.3).
1498 */
1499 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1500
1501 /*
1502 * Falling into 18.35.4 case 2, possible router replay.
1503 * Leave confirmed record intact and return same result.
1504 */
1505 copy_verf(conf, &verf);
1506 new = conf;
1507 goto out_copy;
1508 }
1509
1510 /* 18.35.4 case 7 */
1511 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1512 status = nfserr_noent;
1513 goto out;
1514 }
1515
1516 unconf = find_unconfirmed_client_by_str(dname, strhashval);
1517 if (unconf) {
1518 /*
1519 * Possible retry or client restart. Per 18.35.4 case 4,
1520 * a new unconfirmed record should be generated regardless
1521 * of whether any properties have changed.
1522 */
1523 expire_client(unconf);
1524 }
1525
1526 out_new:
1527 /* Normal case */
1528 new = create_client(exid->clname, dname, rqstp, &verf);
1529 if (new == NULL) {
1530 status = nfserr_jukebox;
1531 goto out;
1532 }
1533
1534 gen_clid(new);
1535 add_to_unconfirmed(new, strhashval);
1536 out_copy:
1537 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1538 exid->clientid.cl_id = new->cl_clientid.cl_id;
1539
1540 exid->seqid = 1;
1541 nfsd4_set_ex_flags(new, exid);
1542
1543 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1544 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1545 status = nfs_ok;
1546
1547 out:
1548 nfs4_unlock_state();
1549 error:
1550 dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
1551 return status;
1552 }
1553
1554 static int
1555 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1556 {
1557 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1558 slot_seqid);
1559
1560 /* The slot is in use, and no response has been sent. */
1561 if (slot_inuse) {
1562 if (seqid == slot_seqid)
1563 return nfserr_jukebox;
1564 else
1565 return nfserr_seq_misordered;
1566 }
1567 /* Normal */
1568 if (likely(seqid == slot_seqid + 1))
1569 return nfs_ok;
1570 /* Replay */
1571 if (seqid == slot_seqid)
1572 return nfserr_replay_cache;
1573 /* Wraparound */
1574 if (seqid == 1 && (slot_seqid + 1) == 0)
1575 return nfs_ok;
1576 /* Misordered replay or misordered new request */
1577 return nfserr_seq_misordered;
1578 }
1579
1580 /*
1581 * Cache the create session result into the create session single DRC
1582 * slot cache by saving the xdr structure. sl_seqid has been set.
1583 * Do this for solo or embedded create session operations.
1584 */
1585 static void
1586 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1587 struct nfsd4_clid_slot *slot, int nfserr)
1588 {
1589 slot->sl_status = nfserr;
1590 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1591 }
1592
1593 static __be32
1594 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1595 struct nfsd4_clid_slot *slot)
1596 {
1597 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1598 return slot->sl_status;
1599 }
1600
1601 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1602 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1603 1 + /* MIN tag is length with zero, only length */ \
1604 3 + /* version, opcount, opcode */ \
1605 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1606 /* seqid, slotID, slotID, cache */ \
1607 4 ) * sizeof(__be32))
1608
1609 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1610 2 + /* verifier: AUTH_NULL, length 0 */\
1611 1 + /* status */ \
1612 1 + /* MIN tag is length with zero, only length */ \
1613 3 + /* opcount, opcode, opstatus*/ \
1614 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1615 /* seqid, slotID, slotID, slotID, status */ \
1616 5 ) * sizeof(__be32))
1617
1618 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1619 {
1620 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1621 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1622 }
1623
1624 __be32
1625 nfsd4_create_session(struct svc_rqst *rqstp,
1626 struct nfsd4_compound_state *cstate,
1627 struct nfsd4_create_session *cr_ses)
1628 {
1629 struct sockaddr *sa = svc_addr(rqstp);
1630 struct nfs4_client *conf, *unconf;
1631 struct nfsd4_session *new;
1632 struct nfsd4_clid_slot *cs_slot = NULL;
1633 bool confirm_me = false;
1634 int status = 0;
1635
1636 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1637 return nfserr_inval;
1638
1639 nfs4_lock_state();
1640 unconf = find_unconfirmed_client(&cr_ses->clientid);
1641 conf = find_confirmed_client(&cr_ses->clientid);
1642
1643 if (conf) {
1644 cs_slot = &conf->cl_cs_slot;
1645 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1646 if (status == nfserr_replay_cache) {
1647 dprintk("Got a create_session replay! seqid= %d\n",
1648 cs_slot->sl_seqid);
1649 /* Return the cached reply status */
1650 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1651 goto out;
1652 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1653 status = nfserr_seq_misordered;
1654 dprintk("Sequence misordered!\n");
1655 dprintk("Expected seqid= %d but got seqid= %d\n",
1656 cs_slot->sl_seqid, cr_ses->seqid);
1657 goto out;
1658 }
1659 } else if (unconf) {
1660 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1661 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1662 status = nfserr_clid_inuse;
1663 goto out;
1664 }
1665
1666 cs_slot = &unconf->cl_cs_slot;
1667 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1668 if (status) {
1669 /* an unconfirmed replay returns misordered */
1670 status = nfserr_seq_misordered;
1671 goto out;
1672 }
1673
1674 confirm_me = true;
1675 conf = unconf;
1676 } else {
1677 status = nfserr_stale_clientid;
1678 goto out;
1679 }
1680
1681 /*
1682 * XXX: we should probably set this at creation time, and check
1683 * for consistent minorversion use throughout:
1684 */
1685 conf->cl_minorversion = 1;
1686 /*
1687 * We do not support RDMA or persistent sessions
1688 */
1689 cr_ses->flags &= ~SESSION4_PERSIST;
1690 cr_ses->flags &= ~SESSION4_RDMA;
1691
1692 status = nfserr_toosmall;
1693 if (check_forechannel_attrs(cr_ses->fore_channel))
1694 goto out;
1695
1696 status = nfserr_jukebox;
1697 new = alloc_init_session(rqstp, conf, cr_ses);
1698 if (!new)
1699 goto out;
1700 status = nfs_ok;
1701 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1702 NFS4_MAX_SESSIONID_LEN);
1703 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1704 sizeof(struct nfsd4_channel_attrs));
1705 cs_slot->sl_seqid++;
1706 cr_ses->seqid = cs_slot->sl_seqid;
1707
1708 /* cache solo and embedded create sessions under the state lock */
1709 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1710 if (confirm_me)
1711 move_to_confirmed(conf);
1712 out:
1713 nfs4_unlock_state();
1714 dprintk("%s returns %d\n", __func__, ntohl(status));
1715 return status;
1716 }
1717
1718 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1719 {
1720 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1721 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1722
1723 return argp->opcnt == resp->opcnt;
1724 }
1725
1726 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1727 {
1728 switch (*dir) {
1729 case NFS4_CDFC4_FORE:
1730 case NFS4_CDFC4_BACK:
1731 return nfs_ok;
1732 case NFS4_CDFC4_FORE_OR_BOTH:
1733 case NFS4_CDFC4_BACK_OR_BOTH:
1734 *dir = NFS4_CDFC4_BOTH;
1735 return nfs_ok;
1736 };
1737 return nfserr_inval;
1738 }
1739
1740 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1741 struct nfsd4_compound_state *cstate,
1742 struct nfsd4_bind_conn_to_session *bcts)
1743 {
1744 __be32 status;
1745
1746 if (!nfsd4_last_compound_op(rqstp))
1747 return nfserr_not_only_op;
1748 spin_lock(&client_lock);
1749 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1750 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1751 * client_lock iself: */
1752 if (cstate->session) {
1753 nfsd4_get_session(cstate->session);
1754 atomic_inc(&cstate->session->se_client->cl_refcount);
1755 }
1756 spin_unlock(&client_lock);
1757 if (!cstate->session)
1758 return nfserr_badsession;
1759
1760 status = nfsd4_map_bcts_dir(&bcts->dir);
1761 if (!status)
1762 nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
1763 return status;
1764 }
1765
1766 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1767 {
1768 if (!session)
1769 return 0;
1770 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1771 }
1772
1773 __be32
1774 nfsd4_destroy_session(struct svc_rqst *r,
1775 struct nfsd4_compound_state *cstate,
1776 struct nfsd4_destroy_session *sessionid)
1777 {
1778 struct nfsd4_session *ses;
1779 u32 status = nfserr_badsession;
1780
1781 /* Notes:
1782 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1783 * - Should we return nfserr_back_chan_busy if waiting for
1784 * callbacks on to-be-destroyed session?
1785 * - Do we need to clear any callback info from previous session?
1786 */
1787
1788 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1789 if (!nfsd4_last_compound_op(r))
1790 return nfserr_not_only_op;
1791 }
1792 dump_sessionid(__func__, &sessionid->sessionid);
1793 spin_lock(&client_lock);
1794 ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1795 if (!ses) {
1796 spin_unlock(&client_lock);
1797 goto out;
1798 }
1799
1800 unhash_session(ses);
1801 spin_unlock(&client_lock);
1802
1803 nfs4_lock_state();
1804 nfsd4_probe_callback_sync(ses->se_client);
1805 nfs4_unlock_state();
1806
1807 nfsd4_del_conns(ses);
1808
1809 nfsd4_put_session(ses);
1810 status = nfs_ok;
1811 out:
1812 dprintk("%s returns %d\n", __func__, ntohl(status));
1813 return status;
1814 }
1815
1816 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1817 {
1818 struct nfsd4_conn *c;
1819
1820 list_for_each_entry(c, &s->se_conns, cn_persession) {
1821 if (c->cn_xprt == xpt) {
1822 return c;
1823 }
1824 }
1825 return NULL;
1826 }
1827
1828 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1829 {
1830 struct nfs4_client *clp = ses->se_client;
1831 struct nfsd4_conn *c;
1832 int ret;
1833
1834 spin_lock(&clp->cl_lock);
1835 c = __nfsd4_find_conn(new->cn_xprt, ses);
1836 if (c) {
1837 spin_unlock(&clp->cl_lock);
1838 free_conn(new);
1839 return;
1840 }
1841 __nfsd4_hash_conn(new, ses);
1842 spin_unlock(&clp->cl_lock);
1843 ret = nfsd4_register_conn(new);
1844 if (ret)
1845 /* oops; xprt is already down: */
1846 nfsd4_conn_lost(&new->cn_xpt_user);
1847 return;
1848 }
1849
1850 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
1851 {
1852 struct nfsd4_compoundargs *args = rqstp->rq_argp;
1853
1854 return args->opcnt > session->se_fchannel.maxops;
1855 }
1856
1857 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
1858 struct nfsd4_session *session)
1859 {
1860 struct xdr_buf *xb = &rqstp->rq_arg;
1861
1862 return xb->len > session->se_fchannel.maxreq_sz;
1863 }
1864
1865 __be32
1866 nfsd4_sequence(struct svc_rqst *rqstp,
1867 struct nfsd4_compound_state *cstate,
1868 struct nfsd4_sequence *seq)
1869 {
1870 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1871 struct nfsd4_session *session;
1872 struct nfsd4_slot *slot;
1873 struct nfsd4_conn *conn;
1874 int status;
1875
1876 if (resp->opcnt != 1)
1877 return nfserr_sequence_pos;
1878
1879 /*
1880 * Will be either used or freed by nfsd4_sequence_check_conn
1881 * below.
1882 */
1883 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
1884 if (!conn)
1885 return nfserr_jukebox;
1886
1887 spin_lock(&client_lock);
1888 status = nfserr_badsession;
1889 session = find_in_sessionid_hashtbl(&seq->sessionid);
1890 if (!session)
1891 goto out;
1892
1893 status = nfserr_too_many_ops;
1894 if (nfsd4_session_too_many_ops(rqstp, session))
1895 goto out;
1896
1897 status = nfserr_req_too_big;
1898 if (nfsd4_request_too_big(rqstp, session))
1899 goto out;
1900
1901 status = nfserr_badslot;
1902 if (seq->slotid >= session->se_fchannel.maxreqs)
1903 goto out;
1904
1905 slot = session->se_slots[seq->slotid];
1906 dprintk("%s: slotid %d\n", __func__, seq->slotid);
1907
1908 /* We do not negotiate the number of slots yet, so set the
1909 * maxslots to the session maxreqs which is used to encode
1910 * sr_highest_slotid and the sr_target_slot id to maxslots */
1911 seq->maxslots = session->se_fchannel.maxreqs;
1912
1913 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
1914 if (status == nfserr_replay_cache) {
1915 cstate->slot = slot;
1916 cstate->session = session;
1917 /* Return the cached reply status and set cstate->status
1918 * for nfsd4_proc_compound processing */
1919 status = nfsd4_replay_cache_entry(resp, seq);
1920 cstate->status = nfserr_replay_cache;
1921 goto out;
1922 }
1923 if (status)
1924 goto out;
1925
1926 nfsd4_sequence_check_conn(conn, session);
1927 conn = NULL;
1928
1929 /* Success! bump slot seqid */
1930 slot->sl_inuse = true;
1931 slot->sl_seqid = seq->seqid;
1932 slot->sl_cachethis = seq->cachethis;
1933
1934 cstate->slot = slot;
1935 cstate->session = session;
1936
1937 out:
1938 /* Hold a session reference until done processing the compound. */
1939 if (cstate->session) {
1940 struct nfs4_client *clp = session->se_client;
1941
1942 nfsd4_get_session(cstate->session);
1943 atomic_inc(&clp->cl_refcount);
1944 if (clp->cl_cb_state == NFSD4_CB_DOWN)
1945 seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
1946 }
1947 kfree(conn);
1948 spin_unlock(&client_lock);
1949 dprintk("%s: return %d\n", __func__, ntohl(status));
1950 return status;
1951 }
1952
1953 __be32
1954 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
1955 {
1956 int status = 0;
1957
1958 if (rc->rca_one_fs) {
1959 if (!cstate->current_fh.fh_dentry)
1960 return nfserr_nofilehandle;
1961 /*
1962 * We don't take advantage of the rca_one_fs case.
1963 * That's OK, it's optional, we can safely ignore it.
1964 */
1965 return nfs_ok;
1966 }
1967
1968 nfs4_lock_state();
1969 status = nfserr_complete_already;
1970 if (cstate->session->se_client->cl_firststate)
1971 goto out;
1972
1973 status = nfserr_stale_clientid;
1974 if (is_client_expired(cstate->session->se_client))
1975 /*
1976 * The following error isn't really legal.
1977 * But we only get here if the client just explicitly
1978 * destroyed the client. Surely it no longer cares what
1979 * error it gets back on an operation for the dead
1980 * client.
1981 */
1982 goto out;
1983
1984 status = nfs_ok;
1985 nfsd4_create_clid_dir(cstate->session->se_client);
1986 out:
1987 nfs4_unlock_state();
1988 return status;
1989 }
1990
1991 __be32
1992 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1993 struct nfsd4_setclientid *setclid)
1994 {
1995 struct xdr_netobj clname = {
1996 .len = setclid->se_namelen,
1997 .data = setclid->se_name,
1998 };
1999 nfs4_verifier clverifier = setclid->se_verf;
2000 unsigned int strhashval;
2001 struct nfs4_client *conf, *unconf, *new;
2002 __be32 status;
2003 char dname[HEXDIR_LEN];
2004
2005 if (!check_name(clname))
2006 return nfserr_inval;
2007
2008 status = nfs4_make_rec_clidname(dname, &clname);
2009 if (status)
2010 return status;
2011
2012 /*
2013 * XXX The Duplicate Request Cache (DRC) has been checked (??)
2014 * We get here on a DRC miss.
2015 */
2016
2017 strhashval = clientstr_hashval(dname);
2018
2019 nfs4_lock_state();
2020 conf = find_confirmed_client_by_str(dname, strhashval);
2021 if (conf) {
2022 /* RFC 3530 14.2.33 CASE 0: */
2023 status = nfserr_clid_inuse;
2024 if (clp_used_exchangeid(conf))
2025 goto out;
2026 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2027 char addr_str[INET6_ADDRSTRLEN];
2028 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2029 sizeof(addr_str));
2030 dprintk("NFSD: setclientid: string in use by client "
2031 "at %s\n", addr_str);
2032 goto out;
2033 }
2034 }
2035 /*
2036 * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
2037 * has a description of SETCLIENTID request processing consisting
2038 * of 5 bullet points, labeled as CASE0 - CASE4 below.
2039 */
2040 unconf = find_unconfirmed_client_by_str(dname, strhashval);
2041 status = nfserr_jukebox;
2042 if (!conf) {
2043 /*
2044 * RFC 3530 14.2.33 CASE 4:
2045 * placed first, because it is the normal case
2046 */
2047 if (unconf)
2048 expire_client(unconf);
2049 new = create_client(clname, dname, rqstp, &clverifier);
2050 if (new == NULL)
2051 goto out;
2052 gen_clid(new);
2053 } else if (same_verf(&conf->cl_verifier, &clverifier)) {
2054 /*
2055 * RFC 3530 14.2.33 CASE 1:
2056 * probable callback update
2057 */
2058 if (unconf) {
2059 /* Note this is removing unconfirmed {*x***},
2060 * which is stronger than RFC recommended {vxc**}.
2061 * This has the advantage that there is at most
2062 * one {*x***} in either list at any time.
2063 */
2064 expire_client(unconf);
2065 }
2066 new = create_client(clname, dname, rqstp, &clverifier);
2067 if (new == NULL)
2068 goto out;
2069 copy_clid(new, conf);
2070 } else if (!unconf) {
2071 /*
2072 * RFC 3530 14.2.33 CASE 2:
2073 * probable client reboot; state will be removed if
2074 * confirmed.
2075 */
2076 new = create_client(clname, dname, rqstp, &clverifier);
2077 if (new == NULL)
2078 goto out;
2079 gen_clid(new);
2080 } else {
2081 /*
2082 * RFC 3530 14.2.33 CASE 3:
2083 * probable client reboot; state will be removed if
2084 * confirmed.
2085 */
2086 expire_client(unconf);
2087 new = create_client(clname, dname, rqstp, &clverifier);
2088 if (new == NULL)
2089 goto out;
2090 gen_clid(new);
2091 }
2092 /*
2093 * XXX: we should probably set this at creation time, and check
2094 * for consistent minorversion use throughout:
2095 */
2096 new->cl_minorversion = 0;
2097 gen_callback(new, setclid, rqstp);
2098 add_to_unconfirmed(new, strhashval);
2099 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2100 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2101 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2102 status = nfs_ok;
2103 out:
2104 nfs4_unlock_state();
2105 return status;
2106 }
2107
2108
2109 /*
2110 * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
2111 * a description of SETCLIENTID_CONFIRM request processing consisting of 4
2112 * bullets, labeled as CASE1 - CASE4 below.
2113 */
2114 __be32
2115 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2116 struct nfsd4_compound_state *cstate,
2117 struct nfsd4_setclientid_confirm *setclientid_confirm)
2118 {
2119 struct sockaddr *sa = svc_addr(rqstp);
2120 struct nfs4_client *conf, *unconf;
2121 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2122 clientid_t * clid = &setclientid_confirm->sc_clientid;
2123 __be32 status;
2124
2125 if (STALE_CLIENTID(clid))
2126 return nfserr_stale_clientid;
2127 /*
2128 * XXX The Duplicate Request Cache (DRC) has been checked (??)
2129 * We get here on a DRC miss.
2130 */
2131
2132 nfs4_lock_state();
2133
2134 conf = find_confirmed_client(clid);
2135 unconf = find_unconfirmed_client(clid);
2136
2137 status = nfserr_clid_inuse;
2138 if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
2139 goto out;
2140 if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
2141 goto out;
2142
2143 /*
2144 * section 14.2.34 of RFC 3530 has a description of
2145 * SETCLIENTID_CONFIRM request processing consisting
2146 * of 4 bullet points, labeled as CASE1 - CASE4 below.
2147 */
2148 if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
2149 /*
2150 * RFC 3530 14.2.34 CASE 1:
2151 * callback update
2152 */
2153 if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
2154 status = nfserr_clid_inuse;
2155 else {
2156 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2157 nfsd4_probe_callback(conf);
2158 expire_client(unconf);
2159 status = nfs_ok;
2160
2161 }
2162 } else if (conf && !unconf) {
2163 /*
2164 * RFC 3530 14.2.34 CASE 2:
2165 * probable retransmitted request; play it safe and
2166 * do nothing.
2167 */
2168 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
2169 status = nfserr_clid_inuse;
2170 else
2171 status = nfs_ok;
2172 } else if (!conf && unconf
2173 && same_verf(&unconf->cl_confirm, &confirm)) {
2174 /*
2175 * RFC 3530 14.2.34 CASE 3:
2176 * Normal case; new or rebooted client:
2177 */
2178 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
2179 status = nfserr_clid_inuse;
2180 } else {
2181 unsigned int hash =
2182 clientstr_hashval(unconf->cl_recdir);
2183 conf = find_confirmed_client_by_str(unconf->cl_recdir,
2184 hash);
2185 if (conf) {
2186 nfsd4_remove_clid_dir(conf);
2187 expire_client(conf);
2188 }
2189 move_to_confirmed(unconf);
2190 conf = unconf;
2191 nfsd4_probe_callback(conf);
2192 status = nfs_ok;
2193 }
2194 } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
2195 && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
2196 &confirm)))) {
2197 /*
2198 * RFC 3530 14.2.34 CASE 4:
2199 * Client probably hasn't noticed that we rebooted yet.
2200 */
2201 status = nfserr_stale_clientid;
2202 } else {
2203 /* check that we have hit one of the cases...*/
2204 status = nfserr_clid_inuse;
2205 }
2206 out:
2207 nfs4_unlock_state();
2208 return status;
2209 }
2210
2211 /* OPEN Share state helper functions */
2212 static inline struct nfs4_file *
2213 alloc_init_file(struct inode *ino)
2214 {
2215 struct nfs4_file *fp;
2216 unsigned int hashval = file_hashval(ino);
2217
2218 fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
2219 if (fp) {
2220 atomic_set(&fp->fi_ref, 1);
2221 INIT_LIST_HEAD(&fp->fi_hash);
2222 INIT_LIST_HEAD(&fp->fi_stateids);
2223 INIT_LIST_HEAD(&fp->fi_delegations);
2224 fp->fi_inode = igrab(ino);
2225 fp->fi_had_conflict = false;
2226 fp->fi_lease = NULL;
2227 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2228 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2229 spin_lock(&recall_lock);
2230 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2231 spin_unlock(&recall_lock);
2232 return fp;
2233 }
2234 return NULL;
2235 }
2236
2237 static void
2238 nfsd4_free_slab(struct kmem_cache **slab)
2239 {
2240 if (*slab == NULL)
2241 return;
2242 kmem_cache_destroy(*slab);
2243 *slab = NULL;
2244 }
2245
2246 void
2247 nfsd4_free_slabs(void)
2248 {
2249 nfsd4_free_slab(&openowner_slab);
2250 nfsd4_free_slab(&lockowner_slab);
2251 nfsd4_free_slab(&file_slab);
2252 nfsd4_free_slab(&stateid_slab);
2253 nfsd4_free_slab(&deleg_slab);
2254 }
2255
2256 static int
2257 nfsd4_init_slabs(void)
2258 {
2259 openowner_slab = kmem_cache_create("nfsd4_openowners",
2260 sizeof(struct nfs4_openowner), 0, 0, NULL);
2261 if (openowner_slab == NULL)
2262 goto out_nomem;
2263 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2264 sizeof(struct nfs4_openowner), 0, 0, NULL);
2265 if (lockowner_slab == NULL)
2266 goto out_nomem;
2267 file_slab = kmem_cache_create("nfsd4_files",
2268 sizeof(struct nfs4_file), 0, 0, NULL);
2269 if (file_slab == NULL)
2270 goto out_nomem;
2271 stateid_slab = kmem_cache_create("nfsd4_stateids",
2272 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2273 if (stateid_slab == NULL)
2274 goto out_nomem;
2275 deleg_slab = kmem_cache_create("nfsd4_delegations",
2276 sizeof(struct nfs4_delegation), 0, 0, NULL);
2277 if (deleg_slab == NULL)
2278 goto out_nomem;
2279 return 0;
2280 out_nomem:
2281 nfsd4_free_slabs();
2282 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2283 return -ENOMEM;
2284 }
2285
2286 void nfs4_free_openowner(struct nfs4_openowner *oo)
2287 {
2288 kfree(oo->oo_owner.so_owner.data);
2289 kmem_cache_free(openowner_slab, oo);
2290 }
2291
2292 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2293 {
2294 kfree(lo->lo_owner.so_owner.data);
2295 kmem_cache_free(lockowner_slab, lo);
2296 }
2297
2298 static void init_nfs4_replay(struct nfs4_replay *rp)
2299 {
2300 rp->rp_status = nfserr_serverfault;
2301 rp->rp_buflen = 0;
2302 rp->rp_buf = rp->rp_ibuf;
2303 }
2304
2305 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2306 {
2307 struct nfs4_stateowner *sop;
2308
2309 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2310 if (!sop)
2311 return NULL;
2312
2313 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2314 if (!sop->so_owner.data) {
2315 kmem_cache_free(slab, sop);
2316 return NULL;
2317 }
2318 sop->so_owner.len = owner->len;
2319
2320 INIT_LIST_HEAD(&sop->so_stateids);
2321 sop->so_client = clp;
2322 init_nfs4_replay(&sop->so_replay);
2323 return sop;
2324 }
2325
2326 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2327 {
2328 list_add(&oo->oo_owner.so_strhash, &open_ownerstr_hashtbl[strhashval]);
2329 list_add(&oo->oo_perclient, &clp->cl_openowners);
2330 }
2331
2332 static struct nfs4_openowner *
2333 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2334 struct nfs4_openowner *oo;
2335
2336 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2337 if (!oo)
2338 return NULL;
2339 oo->oo_owner.so_is_open_owner = 1;
2340 oo->oo_owner.so_seqid = open->op_seqid;
2341 oo->oo_flags = 0;
2342 oo->oo_time = 0;
2343 oo->oo_last_closed_stid = NULL;
2344 INIT_LIST_HEAD(&oo->oo_close_lru);
2345 hash_openowner(oo, clp, strhashval);
2346 return oo;
2347 }
2348
2349 static inline __be32 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2350 struct nfs4_openowner *oo = open->op_openowner;
2351 struct nfs4_client *clp = oo->oo_owner.so_client;
2352 __be32 status;
2353
2354 status = init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2355 if (status)
2356 return status;
2357 INIT_LIST_HEAD(&stp->st_lockowners);
2358 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2359 list_add(&stp->st_perfile, &fp->fi_stateids);
2360 stp->st_stateowner = &oo->oo_owner;
2361 get_nfs4_file(fp);
2362 stp->st_file = fp;
2363 stp->st_access_bmap = 0;
2364 stp->st_deny_bmap = 0;
2365 __set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK,
2366 &stp->st_access_bmap);
2367 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2368 stp->st_openstp = NULL;
2369 return nfs_ok;
2370 }
2371
2372 static void
2373 move_to_close_lru(struct nfs4_openowner *oo)
2374 {
2375 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2376
2377 list_move_tail(&oo->oo_close_lru, &close_lru);
2378 oo->oo_time = get_seconds();
2379 }
2380
2381 static int
2382 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2383 clientid_t *clid)
2384 {
2385 return (sop->so_owner.len == owner->len) &&
2386 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2387 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2388 }
2389
2390 static struct nfs4_openowner *
2391 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
2392 {
2393 struct nfs4_stateowner *so = NULL;
2394
2395 list_for_each_entry(so, &open_ownerstr_hashtbl[hashval], so_strhash) {
2396 if (same_owner_str(so, &open->op_owner, &open->op_clientid))
2397 return container_of(so, struct nfs4_openowner, oo_owner);
2398 }
2399 return NULL;
2400 }
2401
2402 /* search file_hashtbl[] for file */
2403 static struct nfs4_file *
2404 find_file(struct inode *ino)
2405 {
2406 unsigned int hashval = file_hashval(ino);
2407 struct nfs4_file *fp;
2408
2409 spin_lock(&recall_lock);
2410 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2411 if (fp->fi_inode == ino) {
2412 get_nfs4_file(fp);
2413 spin_unlock(&recall_lock);
2414 return fp;
2415 }
2416 }
2417 spin_unlock(&recall_lock);
2418 return NULL;
2419 }
2420
2421 static inline int access_valid(u32 x, u32 minorversion)
2422 {
2423 if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ)
2424 return 0;
2425 if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH)
2426 return 0;
2427 x &= ~NFS4_SHARE_ACCESS_MASK;
2428 if (minorversion && x) {
2429 if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL)
2430 return 0;
2431 if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED)
2432 return 0;
2433 x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK);
2434 }
2435 if (x)
2436 return 0;
2437 return 1;
2438 }
2439
2440 static inline int deny_valid(u32 x)
2441 {
2442 /* Note: unlike access bits, deny bits may be zero. */
2443 return x <= NFS4_SHARE_DENY_BOTH;
2444 }
2445
2446 /*
2447 * Called to check deny when READ with all zero stateid or
2448 * WRITE with all zero or all one stateid
2449 */
2450 static __be32
2451 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2452 {
2453 struct inode *ino = current_fh->fh_dentry->d_inode;
2454 struct nfs4_file *fp;
2455 struct nfs4_ol_stateid *stp;
2456 __be32 ret;
2457
2458 dprintk("NFSD: nfs4_share_conflict\n");
2459
2460 fp = find_file(ino);
2461 if (!fp)
2462 return nfs_ok;
2463 ret = nfserr_locked;
2464 /* Search for conflicting share reservations */
2465 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2466 if (test_bit(deny_type, &stp->st_deny_bmap) ||
2467 test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
2468 goto out;
2469 }
2470 ret = nfs_ok;
2471 out:
2472 put_nfs4_file(fp);
2473 return ret;
2474 }
2475
2476 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2477 {
2478 /* We're assuming the state code never drops its reference
2479 * without first removing the lease. Since we're in this lease
2480 * callback (and since the lease code is serialized by the kernel
2481 * lock) we know the server hasn't removed the lease yet, we know
2482 * it's safe to take a reference: */
2483 atomic_inc(&dp->dl_count);
2484
2485 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2486
2487 /* only place dl_time is set. protected by lock_flocks*/
2488 dp->dl_time = get_seconds();
2489
2490 nfsd4_cb_recall(dp);
2491 }
2492
2493 /* Called from break_lease() with lock_flocks() held. */
2494 static void nfsd_break_deleg_cb(struct file_lock *fl)
2495 {
2496 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2497 struct nfs4_delegation *dp;
2498
2499 BUG_ON(!fp);
2500 /* We assume break_lease is only called once per lease: */
2501 BUG_ON(fp->fi_had_conflict);
2502 /*
2503 * We don't want the locks code to timeout the lease for us;
2504 * we'll remove it ourself if a delegation isn't returned
2505 * in time:
2506 */
2507 fl->fl_break_time = 0;
2508
2509 spin_lock(&recall_lock);
2510 fp->fi_had_conflict = true;
2511 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2512 nfsd_break_one_deleg(dp);
2513 spin_unlock(&recall_lock);
2514 }
2515
2516 static
2517 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2518 {
2519 if (arg & F_UNLCK)
2520 return lease_modify(onlist, arg);
2521 else
2522 return -EAGAIN;
2523 }
2524
2525 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2526 .lm_break = nfsd_break_deleg_cb,
2527 .lm_change = nfsd_change_deleg_cb,
2528 };
2529
2530 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2531 {
2532 if (nfsd4_has_session(cstate))
2533 return nfs_ok;
2534 if (seqid == so->so_seqid - 1)
2535 return nfserr_replay_me;
2536 if (seqid == so->so_seqid)
2537 return nfs_ok;
2538 return nfserr_bad_seqid;
2539 }
2540
2541 __be32
2542 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2543 struct nfsd4_open *open)
2544 {
2545 clientid_t *clientid = &open->op_clientid;
2546 struct nfs4_client *clp = NULL;
2547 unsigned int strhashval;
2548 struct nfs4_openowner *oo = NULL;
2549 __be32 status;
2550
2551 if (!check_name(open->op_owner))
2552 return nfserr_inval;
2553
2554 if (STALE_CLIENTID(&open->op_clientid))
2555 return nfserr_stale_clientid;
2556
2557 strhashval = open_ownerstr_hashval(clientid->cl_id, &open->op_owner);
2558 oo = find_openstateowner_str(strhashval, open);
2559 open->op_openowner = oo;
2560 if (!oo) {
2561 /* Make sure the client's lease hasn't expired. */
2562 clp = find_confirmed_client(clientid);
2563 if (clp == NULL)
2564 return nfserr_expired;
2565 goto renew;
2566 }
2567 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2568 /* Replace unconfirmed owners without checking for replay. */
2569 clp = oo->oo_owner.so_client;
2570 release_openowner(oo);
2571 open->op_openowner = NULL;
2572 goto renew;
2573 }
2574 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2575 if (status)
2576 return status;
2577 renew:
2578 if (open->op_openowner == NULL) {
2579 oo = alloc_init_open_stateowner(strhashval, clp, open);
2580 if (oo == NULL)
2581 return nfserr_jukebox;
2582 open->op_openowner = oo;
2583 }
2584 list_del_init(&oo->oo_close_lru);
2585 renew_client(oo->oo_owner.so_client);
2586 return nfs_ok;
2587 }
2588
2589 static inline __be32
2590 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2591 {
2592 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2593 return nfserr_openmode;
2594 else
2595 return nfs_ok;
2596 }
2597
2598 static int share_access_to_flags(u32 share_access)
2599 {
2600 share_access &= ~NFS4_SHARE_WANT_MASK;
2601
2602 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2603 }
2604
2605 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2606 {
2607 struct nfs4_stid *ret;
2608
2609 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2610 if (!ret)
2611 return NULL;
2612 return delegstateid(ret);
2613 }
2614
2615 static __be32
2616 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2617 struct nfs4_delegation **dp)
2618 {
2619 int flags;
2620 __be32 status = nfserr_bad_stateid;
2621
2622 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2623 if (*dp == NULL)
2624 goto out;
2625 flags = share_access_to_flags(open->op_share_access);
2626 status = nfs4_check_delegmode(*dp, flags);
2627 if (status)
2628 *dp = NULL;
2629 out:
2630 if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
2631 return nfs_ok;
2632 if (status)
2633 return status;
2634 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2635 return nfs_ok;
2636 }
2637
2638 static __be32
2639 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2640 {
2641 struct nfs4_ol_stateid *local;
2642 struct nfs4_openowner *oo = open->op_openowner;
2643
2644 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2645 /* ignore lock owners */
2646 if (local->st_stateowner->so_is_open_owner == 0)
2647 continue;
2648 /* remember if we have seen this open owner */
2649 if (local->st_stateowner == &oo->oo_owner)
2650 *stpp = local;
2651 /* check for conflicting share reservations */
2652 if (!test_share(local, open))
2653 return nfserr_share_denied;
2654 }
2655 return nfs_ok;
2656 }
2657
2658 static inline struct nfs4_ol_stateid *
2659 nfs4_alloc_stateid(void)
2660 {
2661 return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
2662 }
2663
2664 static inline int nfs4_access_to_access(u32 nfs4_access)
2665 {
2666 int flags = 0;
2667
2668 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2669 flags |= NFSD_MAY_READ;
2670 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2671 flags |= NFSD_MAY_WRITE;
2672 return flags;
2673 }
2674
2675 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2676 struct svc_fh *cur_fh, struct nfsd4_open *open)
2677 {
2678 __be32 status;
2679 int oflag = nfs4_access_to_omode(open->op_share_access);
2680 int access = nfs4_access_to_access(open->op_share_access);
2681
2682 /* CLAIM_DELEGATE_CUR is used in response to a broken lease;
2683 * allowing it to break the lease and return EAGAIN leaves the
2684 * client unable to make progress in returning the delegation */
2685 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2686 access |= NFSD_MAY_NOT_BREAK_LEASE;
2687
2688 if (!fp->fi_fds[oflag]) {
2689 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2690 &fp->fi_fds[oflag]);
2691 if (status)
2692 return status;
2693 }
2694 nfs4_file_get_access(fp, oflag);
2695
2696 return nfs_ok;
2697 }
2698
2699 static __be32
2700 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_ol_stateid **stpp,
2701 struct nfs4_file *fp, struct svc_fh *cur_fh,
2702 struct nfsd4_open *open)
2703 {
2704 struct nfs4_ol_stateid *stp;
2705 __be32 status;
2706
2707 stp = nfs4_alloc_stateid();
2708 if (stp == NULL)
2709 return nfserr_jukebox;
2710
2711 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2712 if (status) {
2713 kmem_cache_free(stateid_slab, stp);
2714 return status;
2715 }
2716 *stpp = stp;
2717 return 0;
2718 }
2719
2720 static inline __be32
2721 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2722 struct nfsd4_open *open)
2723 {
2724 struct iattr iattr = {
2725 .ia_valid = ATTR_SIZE,
2726 .ia_size = 0,
2727 };
2728 if (!open->op_truncate)
2729 return 0;
2730 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2731 return nfserr_inval;
2732 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2733 }
2734
2735 static __be32
2736 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2737 {
2738 u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2739 bool new_access;
2740 __be32 status;
2741
2742 new_access = !test_bit(op_share_access, &stp->st_access_bmap);
2743 if (new_access) {
2744 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2745 if (status)
2746 return status;
2747 }
2748 status = nfsd4_truncate(rqstp, cur_fh, open);
2749 if (status) {
2750 if (new_access) {
2751 int oflag = nfs4_access_to_omode(op_share_access);
2752 nfs4_file_put_access(fp, oflag);
2753 }
2754 return status;
2755 }
2756 /* remember the open */
2757 __set_bit(op_share_access, &stp->st_access_bmap);
2758 __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2759
2760 return nfs_ok;
2761 }
2762
2763
2764 static void
2765 nfs4_set_claim_prev(struct nfsd4_open *open)
2766 {
2767 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2768 open->op_openowner->oo_owner.so_client->cl_firststate = 1;
2769 }
2770
2771 /* Should we give out recallable state?: */
2772 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2773 {
2774 if (clp->cl_cb_state == NFSD4_CB_UP)
2775 return true;
2776 /*
2777 * In the sessions case, since we don't have to establish a
2778 * separate connection for callbacks, we assume it's OK
2779 * until we hear otherwise:
2780 */
2781 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2782 }
2783
2784 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2785 {
2786 struct file_lock *fl;
2787
2788 fl = locks_alloc_lock();
2789 if (!fl)
2790 return NULL;
2791 locks_init_lock(fl);
2792 fl->fl_lmops = &nfsd_lease_mng_ops;
2793 fl->fl_flags = FL_LEASE;
2794 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2795 fl->fl_end = OFFSET_MAX;
2796 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2797 fl->fl_pid = current->tgid;
2798 return fl;
2799 }
2800
2801 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2802 {
2803 struct nfs4_file *fp = dp->dl_file;
2804 struct file_lock *fl;
2805 int status;
2806
2807 fl = nfs4_alloc_init_lease(dp, flag);
2808 if (!fl)
2809 return -ENOMEM;
2810 fl->fl_file = find_readable_file(fp);
2811 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2812 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2813 if (status) {
2814 list_del_init(&dp->dl_perclnt);
2815 locks_free_lock(fl);
2816 return -ENOMEM;
2817 }
2818 fp->fi_lease = fl;
2819 fp->fi_deleg_file = fl->fl_file;
2820 get_file(fp->fi_deleg_file);
2821 atomic_set(&fp->fi_delegees, 1);
2822 list_add(&dp->dl_perfile, &fp->fi_delegations);
2823 return 0;
2824 }
2825
2826 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2827 {
2828 struct nfs4_file *fp = dp->dl_file;
2829
2830 if (!fp->fi_lease)
2831 return nfs4_setlease(dp, flag);
2832 spin_lock(&recall_lock);
2833 if (fp->fi_had_conflict) {
2834 spin_unlock(&recall_lock);
2835 return -EAGAIN;
2836 }
2837 atomic_inc(&fp->fi_delegees);
2838 list_add(&dp->dl_perfile, &fp->fi_delegations);
2839 spin_unlock(&recall_lock);
2840 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2841 return 0;
2842 }
2843
2844 /*
2845 * Attempt to hand out a delegation.
2846 */
2847 static void
2848 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2849 {
2850 struct nfs4_delegation *dp;
2851 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2852 int cb_up;
2853 int status, flag = 0;
2854
2855 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2856 flag = NFS4_OPEN_DELEGATE_NONE;
2857 open->op_recall = 0;
2858 switch (open->op_claim_type) {
2859 case NFS4_OPEN_CLAIM_PREVIOUS:
2860 if (!cb_up)
2861 open->op_recall = 1;
2862 flag = open->op_delegate_type;
2863 if (flag == NFS4_OPEN_DELEGATE_NONE)
2864 goto out;
2865 break;
2866 case NFS4_OPEN_CLAIM_NULL:
2867 /* Let's not give out any delegations till everyone's
2868 * had the chance to reclaim theirs.... */
2869 if (locks_in_grace())
2870 goto out;
2871 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2872 goto out;
2873 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2874 flag = NFS4_OPEN_DELEGATE_WRITE;
2875 else
2876 flag = NFS4_OPEN_DELEGATE_READ;
2877 break;
2878 default:
2879 goto out;
2880 }
2881
2882 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2883 if (dp == NULL)
2884 goto out_no_deleg;
2885 status = nfs4_set_delegation(dp, flag);
2886 if (status)
2887 goto out_free;
2888
2889 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2890
2891 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2892 STATEID_VAL(&dp->dl_stid.sc_stateid));
2893 out:
2894 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
2895 && flag == NFS4_OPEN_DELEGATE_NONE
2896 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2897 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2898 open->op_delegate_type = flag;
2899 return;
2900 out_free:
2901 nfs4_put_delegation(dp);
2902 out_no_deleg:
2903 flag = NFS4_OPEN_DELEGATE_NONE;
2904 goto out;
2905 }
2906
2907 /*
2908 * called with nfs4_lock_state() held.
2909 */
2910 __be32
2911 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2912 {
2913 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2914 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
2915 struct nfs4_file *fp = NULL;
2916 struct inode *ino = current_fh->fh_dentry->d_inode;
2917 struct nfs4_ol_stateid *stp = NULL;
2918 struct nfs4_delegation *dp = NULL;
2919 __be32 status;
2920
2921 status = nfserr_inval;
2922 if (!access_valid(open->op_share_access, resp->cstate.minorversion)
2923 || !deny_valid(open->op_share_deny))
2924 goto out;
2925 /*
2926 * Lookup file; if found, lookup stateid and check open request,
2927 * and check for delegations in the process of being recalled.
2928 * If not found, create the nfs4_file struct
2929 */
2930 fp = find_file(ino);
2931 if (fp) {
2932 if ((status = nfs4_check_open(fp, open, &stp)))
2933 goto out;
2934 status = nfs4_check_deleg(cl, fp, open, &dp);
2935 if (status)
2936 goto out;
2937 } else {
2938 status = nfserr_bad_stateid;
2939 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2940 goto out;
2941 status = nfserr_jukebox;
2942 fp = alloc_init_file(ino);
2943 if (fp == NULL)
2944 goto out;
2945 }
2946
2947 /*
2948 * OPEN the file, or upgrade an existing OPEN.
2949 * If truncate fails, the OPEN fails.
2950 */
2951 if (stp) {
2952 /* Stateid was found, this is an OPEN upgrade */
2953 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2954 if (status)
2955 goto out;
2956 } else {
2957 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
2958 if (status)
2959 goto out;
2960 status = init_open_stateid(stp, fp, open);
2961 if (status) {
2962 release_open_stateid(stp);
2963 goto out;
2964 }
2965 status = nfsd4_truncate(rqstp, current_fh, open);
2966 if (status) {
2967 release_open_stateid(stp);
2968 goto out;
2969 }
2970 }
2971 update_stateid(&stp->st_stid.sc_stateid);
2972 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2973
2974 if (nfsd4_has_session(&resp->cstate))
2975 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2976
2977 /*
2978 * Attempt to hand out a delegation. No error return, because the
2979 * OPEN succeeds even if we fail.
2980 */
2981 nfs4_open_delegation(current_fh, open, stp);
2982
2983 status = nfs_ok;
2984
2985 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
2986 STATEID_VAL(&stp->st_stid.sc_stateid));
2987 out:
2988 if (fp)
2989 put_nfs4_file(fp);
2990 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
2991 nfs4_set_claim_prev(open);
2992 /*
2993 * To finish the open response, we just need to set the rflags.
2994 */
2995 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
2996 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
2997 !nfsd4_has_session(&resp->cstate))
2998 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
2999
3000 return status;
3001 }
3002
3003 __be32
3004 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3005 clientid_t *clid)
3006 {
3007 struct nfs4_client *clp;
3008 __be32 status;
3009
3010 nfs4_lock_state();
3011 dprintk("process_renew(%08x/%08x): starting\n",
3012 clid->cl_boot, clid->cl_id);
3013 status = nfserr_stale_clientid;
3014 if (STALE_CLIENTID(clid))
3015 goto out;
3016 clp = find_confirmed_client(clid);
3017 status = nfserr_expired;
3018 if (clp == NULL) {
3019 /* We assume the client took too long to RENEW. */
3020 dprintk("nfsd4_renew: clientid not found!\n");
3021 goto out;
3022 }
3023 renew_client(clp);
3024 status = nfserr_cb_path_down;
3025 if (!list_empty(&clp->cl_delegations)
3026 && clp->cl_cb_state != NFSD4_CB_UP)
3027 goto out;
3028 status = nfs_ok;
3029 out:
3030 nfs4_unlock_state();
3031 return status;
3032 }
3033
3034 static struct lock_manager nfsd4_manager = {
3035 };
3036
3037 static void
3038 nfsd4_end_grace(void)
3039 {
3040 dprintk("NFSD: end of grace period\n");
3041 nfsd4_recdir_purge_old();
3042 locks_end_grace(&nfsd4_manager);
3043 /*
3044 * Now that every NFSv4 client has had the chance to recover and
3045 * to see the (possibly new, possibly shorter) lease time, we
3046 * can safely set the next grace time to the current lease time:
3047 */
3048 nfsd4_grace = nfsd4_lease;
3049 }
3050
3051 static time_t
3052 nfs4_laundromat(void)
3053 {
3054 struct nfs4_client *clp;
3055 struct nfs4_openowner *oo;
3056 struct nfs4_delegation *dp;
3057 struct list_head *pos, *next, reaplist;
3058 time_t cutoff = get_seconds() - nfsd4_lease;
3059 time_t t, clientid_val = nfsd4_lease;
3060 time_t u, test_val = nfsd4_lease;
3061
3062 nfs4_lock_state();
3063
3064 dprintk("NFSD: laundromat service - starting\n");
3065 if (locks_in_grace())
3066 nfsd4_end_grace();
3067 INIT_LIST_HEAD(&reaplist);
3068 spin_lock(&client_lock);
3069 list_for_each_safe(pos, next, &client_lru) {
3070 clp = list_entry(pos, struct nfs4_client, cl_lru);
3071 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3072 t = clp->cl_time - cutoff;
3073 if (clientid_val > t)
3074 clientid_val = t;
3075 break;
3076 }
3077 if (atomic_read(&clp->cl_refcount)) {
3078 dprintk("NFSD: client in use (clientid %08x)\n",
3079 clp->cl_clientid.cl_id);
3080 continue;
3081 }
3082 unhash_client_locked(clp);
3083 list_add(&clp->cl_lru, &reaplist);
3084 }
3085 spin_unlock(&client_lock);
3086 list_for_each_safe(pos, next, &reaplist) {
3087 clp = list_entry(pos, struct nfs4_client, cl_lru);
3088 dprintk("NFSD: purging unused client (clientid %08x)\n",
3089 clp->cl_clientid.cl_id);
3090 nfsd4_remove_clid_dir(clp);
3091 expire_client(clp);
3092 }
3093 spin_lock(&recall_lock);
3094 list_for_each_safe(pos, next, &del_recall_lru) {
3095 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3096 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3097 u = dp->dl_time - cutoff;
3098 if (test_val > u)
3099 test_val = u;
3100 break;
3101 }
3102 list_move(&dp->dl_recall_lru, &reaplist);
3103 }
3104 spin_unlock(&recall_lock);
3105 list_for_each_safe(pos, next, &reaplist) {
3106 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3107 list_del_init(&dp->dl_recall_lru);
3108 unhash_delegation(dp);
3109 }
3110 test_val = nfsd4_lease;
3111 list_for_each_safe(pos, next, &close_lru) {
3112 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3113 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3114 u = oo->oo_time - cutoff;
3115 if (test_val > u)
3116 test_val = u;
3117 break;
3118 }
3119 release_openowner(oo);
3120 }
3121 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3122 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3123 nfs4_unlock_state();
3124 return clientid_val;
3125 }
3126
3127 static struct workqueue_struct *laundry_wq;
3128 static void laundromat_main(struct work_struct *);
3129 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3130
3131 static void
3132 laundromat_main(struct work_struct *not_used)
3133 {
3134 time_t t;
3135
3136 t = nfs4_laundromat();
3137 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3138 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3139 }
3140
3141 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3142 {
3143 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3144 return nfserr_bad_stateid;
3145 return nfs_ok;
3146 }
3147
3148 static int
3149 STALE_STATEID(stateid_t *stateid)
3150 {
3151 if (stateid->si_opaque.so_clid.cl_boot == boot_time)
3152 return 0;
3153 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3154 STATEID_VAL(stateid));
3155 return 1;
3156 }
3157
3158 static inline int
3159 access_permit_read(unsigned long access_bmap)
3160 {
3161 return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
3162 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
3163 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
3164 }
3165
3166 static inline int
3167 access_permit_write(unsigned long access_bmap)
3168 {
3169 return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
3170 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
3171 }
3172
3173 static
3174 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3175 {
3176 __be32 status = nfserr_openmode;
3177
3178 /* For lock stateid's, we test the parent open, not the lock: */
3179 if (stp->st_openstp)
3180 stp = stp->st_openstp;
3181 if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
3182 goto out;
3183 if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
3184 goto out;
3185 status = nfs_ok;
3186 out:
3187 return status;
3188 }
3189
3190 static inline __be32
3191 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3192 {
3193 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3194 return nfs_ok;
3195 else if (locks_in_grace()) {
3196 /* Answer in remaining cases depends on existence of
3197 * conflicting state; so we must wait out the grace period. */
3198 return nfserr_grace;
3199 } else if (flags & WR_STATE)
3200 return nfs4_share_conflict(current_fh,
3201 NFS4_SHARE_DENY_WRITE);
3202 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3203 return nfs4_share_conflict(current_fh,
3204 NFS4_SHARE_DENY_READ);
3205 }
3206
3207 /*
3208 * Allow READ/WRITE during grace period on recovered state only for files
3209 * that are not able to provide mandatory locking.
3210 */
3211 static inline int
3212 grace_disallows_io(struct inode *inode)
3213 {
3214 return locks_in_grace() && mandatory_lock(inode);
3215 }
3216
3217 /* Returns true iff a is later than b: */
3218 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3219 {
3220 return (s32)a->si_generation - (s32)b->si_generation > 0;
3221 }
3222
3223 static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3224 {
3225 /*
3226 * When sessions are used the stateid generation number is ignored
3227 * when it is zero.
3228 */
3229 if (has_session && in->si_generation == 0)
3230 return nfs_ok;
3231
3232 if (in->si_generation == ref->si_generation)
3233 return nfs_ok;
3234
3235 /* If the client sends us a stateid from the future, it's buggy: */
3236 if (stateid_generation_after(in, ref))
3237 return nfserr_bad_stateid;
3238 /*
3239 * However, we could see a stateid from the past, even from a
3240 * non-buggy client. For example, if the client sends a lock
3241 * while some IO is outstanding, the lock may bump si_generation
3242 * while the IO is still in flight. The client could avoid that
3243 * situation by waiting for responses on all the IO requests,
3244 * but better performance may result in retrying IO that
3245 * receives an old_stateid error if requests are rarely
3246 * reordered in flight:
3247 */
3248 return nfserr_old_stateid;
3249 }
3250
3251 __be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3252 {
3253 struct nfs4_stid *s;
3254 struct nfs4_ol_stateid *ols;
3255 __be32 status;
3256
3257 if (STALE_STATEID(stateid))
3258 return nfserr_stale_stateid;
3259
3260 s = find_stateid(cl, stateid);
3261 if (!s)
3262 return nfserr_stale_stateid;
3263 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3264 if (status)
3265 return status;
3266 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3267 return nfs_ok;
3268 ols = openlockstateid(s);
3269 if (ols->st_stateowner->so_is_open_owner
3270 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3271 return nfserr_bad_stateid;
3272 return nfs_ok;
3273 }
3274
3275 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
3276 {
3277 struct nfs4_client *cl;
3278
3279 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3280 return nfserr_bad_stateid;
3281 if (STALE_STATEID(stateid))
3282 return nfserr_stale_stateid;
3283 cl = find_confirmed_client(&stateid->si_opaque.so_clid);
3284 if (!cl)
3285 return nfserr_expired;
3286 *s = find_stateid_by_type(cl, stateid, typemask);
3287 if (!*s)
3288 return nfserr_bad_stateid;
3289 return nfs_ok;
3290
3291 }
3292
3293 /*
3294 * Checks for stateid operations
3295 */
3296 __be32
3297 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3298 stateid_t *stateid, int flags, struct file **filpp)
3299 {
3300 struct nfs4_stid *s;
3301 struct nfs4_ol_stateid *stp = NULL;
3302 struct nfs4_delegation *dp = NULL;
3303 struct svc_fh *current_fh = &cstate->current_fh;
3304 struct inode *ino = current_fh->fh_dentry->d_inode;
3305 __be32 status;
3306
3307 if (filpp)
3308 *filpp = NULL;
3309
3310 if (grace_disallows_io(ino))
3311 return nfserr_grace;
3312
3313 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3314 return check_special_stateids(current_fh, stateid, flags);
3315
3316 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
3317 if (status)
3318 return status;
3319 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3320 if (status)
3321 goto out;
3322 switch (s->sc_type) {
3323 case NFS4_DELEG_STID:
3324 dp = delegstateid(s);
3325 status = nfs4_check_delegmode(dp, flags);
3326 if (status)
3327 goto out;
3328 renew_client(dp->dl_stid.sc_client);
3329 if (filpp) {
3330 *filpp = dp->dl_file->fi_deleg_file;
3331 BUG_ON(!*filpp);
3332 }
3333 break;
3334 case NFS4_OPEN_STID:
3335 case NFS4_LOCK_STID:
3336 stp = openlockstateid(s);
3337 status = nfs4_check_fh(current_fh, stp);
3338 if (status)
3339 goto out;
3340 if (stp->st_stateowner->so_is_open_owner
3341 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3342 goto out;
3343 status = nfs4_check_openmode(stp, flags);
3344 if (status)
3345 goto out;
3346 renew_client(stp->st_stateowner->so_client);
3347 if (filpp) {
3348 if (flags & RD_STATE)
3349 *filpp = find_readable_file(stp->st_file);
3350 else
3351 *filpp = find_writeable_file(stp->st_file);
3352 }
3353 break;
3354 default:
3355 return nfserr_bad_stateid;
3356 }
3357 status = nfs_ok;
3358 out:
3359 return status;
3360 }
3361
3362 static __be32
3363 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3364 {
3365 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3366 return nfserr_locks_held;
3367 release_lock_stateid(stp);
3368 return nfs_ok;
3369 }
3370
3371 /*
3372 * Test if the stateid is valid
3373 */
3374 __be32
3375 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3376 struct nfsd4_test_stateid *test_stateid)
3377 {
3378 /* real work is done during encoding */
3379 return nfs_ok;
3380 }
3381
3382 __be32
3383 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3384 struct nfsd4_free_stateid *free_stateid)
3385 {
3386 stateid_t *stateid = &free_stateid->fr_stateid;
3387 struct nfs4_stid *s;
3388 struct nfs4_client *cl = cstate->session->se_client;
3389 __be32 ret = nfserr_bad_stateid;
3390
3391 nfs4_lock_state();
3392 s = find_stateid(cl, stateid);
3393 if (!s)
3394 goto out;
3395 switch (s->sc_type) {
3396 case NFS4_DELEG_STID:
3397 ret = nfserr_locks_held;
3398 goto out;
3399 case NFS4_OPEN_STID:
3400 case NFS4_LOCK_STID:
3401 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3402 if (ret)
3403 goto out;
3404 if (s->sc_type == NFS4_LOCK_STID)
3405 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3406 else
3407 ret = nfserr_locks_held;
3408 break;
3409 default:
3410 ret = nfserr_bad_stateid;
3411 }
3412 out:
3413 nfs4_unlock_state();
3414 return ret;
3415 }
3416
3417 static inline int
3418 setlkflg (int type)
3419 {
3420 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3421 RD_STATE : WR_STATE;
3422 }
3423
3424 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3425 {
3426 struct svc_fh *current_fh = &cstate->current_fh;
3427 struct nfs4_stateowner *sop = stp->st_stateowner;
3428 __be32 status;
3429
3430 status = nfsd4_check_seqid(cstate, sop, seqid);
3431 if (status)
3432 return status;
3433 if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3434 /*
3435 * "Closed" stateid's exist *only* to return
3436 * nfserr_replay_me from the previous step.
3437 */
3438 return nfserr_bad_stateid;
3439 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3440 if (status)
3441 return status;
3442 return nfs4_check_fh(current_fh, stp);
3443 }
3444
3445 /*
3446 * Checks for sequence id mutating operations.
3447 */
3448 static __be32
3449 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3450 stateid_t *stateid, char typemask,
3451 struct nfs4_ol_stateid **stpp)
3452 {
3453 __be32 status;
3454 struct nfs4_stid *s;
3455
3456 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3457 seqid, STATEID_VAL(stateid));
3458
3459 *stpp = NULL;
3460 status = nfsd4_lookup_stateid(stateid, typemask, &s);
3461 if (status)
3462 return status;
3463 *stpp = openlockstateid(s);
3464 cstate->replay_owner = (*stpp)->st_stateowner;
3465 renew_client((*stpp)->st_stateowner->so_client);
3466
3467 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3468 }
3469
3470 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
3471 {
3472 __be32 status;
3473 struct nfs4_openowner *oo;
3474
3475 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3476 NFS4_OPEN_STID, stpp);
3477 if (status)
3478 return status;
3479 oo = openowner((*stpp)->st_stateowner);
3480 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3481 return nfserr_bad_stateid;
3482 return nfs_ok;
3483 }
3484
3485 __be32
3486 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3487 struct nfsd4_open_confirm *oc)
3488 {
3489 __be32 status;
3490 struct nfs4_openowner *oo;
3491 struct nfs4_ol_stateid *stp;
3492
3493 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3494 (int)cstate->current_fh.fh_dentry->d_name.len,
3495 cstate->current_fh.fh_dentry->d_name.name);
3496
3497 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3498 if (status)
3499 return status;
3500
3501 nfs4_lock_state();
3502
3503 status = nfs4_preprocess_seqid_op(cstate,
3504 oc->oc_seqid, &oc->oc_req_stateid,
3505 NFS4_OPEN_STID, &stp);
3506 if (status)
3507 goto out;
3508 oo = openowner(stp->st_stateowner);
3509 status = nfserr_bad_stateid;
3510 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3511 goto out;
3512 oo->oo_flags |= NFS4_OO_CONFIRMED;
3513 update_stateid(&stp->st_stid.sc_stateid);
3514 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3515 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3516 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3517
3518 nfsd4_create_clid_dir(oo->oo_owner.so_client);
3519 status = nfs_ok;
3520 out:
3521 if (!cstate->replay_owner)
3522 nfs4_unlock_state();
3523 return status;
3524 }
3525
3526 static inline void nfs4_file_downgrade(struct nfs4_ol_stateid *stp, unsigned int to_access)
3527 {
3528 int i;
3529
3530 for (i = 1; i < 4; i++) {
3531 if (test_bit(i, &stp->st_access_bmap)
3532 && ((i & to_access) != i)) {
3533 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(i));
3534 __clear_bit(i, &stp->st_access_bmap);
3535 }
3536 }
3537 }
3538
3539 static void
3540 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
3541 {
3542 int i;
3543 for (i = 0; i < 4; i++) {
3544 if ((i & deny) != i)
3545 __clear_bit(i, bmap);
3546 }
3547 }
3548
3549 __be32
3550 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3551 struct nfsd4_compound_state *cstate,
3552 struct nfsd4_open_downgrade *od)
3553 {
3554 __be32 status;
3555 struct nfs4_ol_stateid *stp;
3556
3557 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
3558 (int)cstate->current_fh.fh_dentry->d_name.len,
3559 cstate->current_fh.fh_dentry->d_name.name);
3560
3561 if (!access_valid(od->od_share_access, cstate->minorversion)
3562 || !deny_valid(od->od_share_deny))
3563 return nfserr_inval;
3564
3565 nfs4_lock_state();
3566 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3567 &od->od_stateid, &stp);
3568 if (status)
3569 goto out;
3570 status = nfserr_inval;
3571 if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
3572 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
3573 stp->st_access_bmap, od->od_share_access);
3574 goto out;
3575 }
3576 if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
3577 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3578 stp->st_deny_bmap, od->od_share_deny);
3579 goto out;
3580 }
3581 nfs4_file_downgrade(stp, od->od_share_access);
3582
3583 reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
3584
3585 update_stateid(&stp->st_stid.sc_stateid);
3586 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3587 status = nfs_ok;
3588 out:
3589 if (!cstate->replay_owner)
3590 nfs4_unlock_state();
3591 return status;
3592 }
3593
3594 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3595 {
3596 struct nfs4_openowner *oo;
3597 struct nfs4_ol_stateid *s;
3598
3599 if (!so->so_is_open_owner)
3600 return;
3601 oo = openowner(so);
3602 s = oo->oo_last_closed_stid;
3603 if (!s)
3604 return;
3605 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3606 /* Release the last_closed_stid on the next seqid bump: */
3607 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3608 return;
3609 }
3610 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3611 release_last_closed_stateid(oo);
3612 }
3613
3614 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3615 {
3616 unhash_open_stateid(s);
3617 s->st_stid.sc_type = NFS4_CLOSED_STID;
3618 }
3619
3620 /*
3621 * nfs4_unlock_state() called after encode
3622 */
3623 __be32
3624 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3625 struct nfsd4_close *close)
3626 {
3627 __be32 status;
3628 struct nfs4_openowner *oo;
3629 struct nfs4_ol_stateid *stp;
3630
3631 dprintk("NFSD: nfsd4_close on file %.*s\n",
3632 (int)cstate->current_fh.fh_dentry->d_name.len,
3633 cstate->current_fh.fh_dentry->d_name.name);
3634
3635 nfs4_lock_state();
3636 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3637 &close->cl_stateid,
3638 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3639 &stp);
3640 if (status)
3641 goto out;
3642 oo = openowner(stp->st_stateowner);
3643 status = nfs_ok;
3644 update_stateid(&stp->st_stid.sc_stateid);
3645 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3646
3647 nfsd4_close_open_stateid(stp);
3648 oo->oo_last_closed_stid = stp;
3649
3650 /* place unused nfs4_stateowners on so_close_lru list to be
3651 * released by the laundromat service after the lease period
3652 * to enable us to handle CLOSE replay
3653 */
3654 if (list_empty(&oo->oo_owner.so_stateids))
3655 move_to_close_lru(oo);
3656 out:
3657 if (!cstate->replay_owner)
3658 nfs4_unlock_state();
3659 return status;
3660 }
3661
3662 __be32
3663 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3664 struct nfsd4_delegreturn *dr)
3665 {
3666 struct nfs4_delegation *dp;
3667 stateid_t *stateid = &dr->dr_stateid;
3668 struct nfs4_stid *s;
3669 struct inode *inode;
3670 __be32 status;
3671
3672 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3673 return status;
3674 inode = cstate->current_fh.fh_dentry->d_inode;
3675
3676 nfs4_lock_state();
3677 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s);
3678 if (status)
3679 goto out;
3680 dp = delegstateid(s);
3681 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3682 if (status)
3683 goto out;
3684 renew_client(dp->dl_stid.sc_client);
3685
3686 unhash_delegation(dp);
3687 out:
3688 nfs4_unlock_state();
3689
3690 return status;
3691 }
3692
3693
3694 /*
3695 * Lock owner state (byte-range locks)
3696 */
3697 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
3698 #define LOCK_HASH_BITS 8
3699 #define LOCK_HASH_SIZE (1 << LOCK_HASH_BITS)
3700 #define LOCK_HASH_MASK (LOCK_HASH_SIZE - 1)
3701
3702 static inline u64
3703 end_offset(u64 start, u64 len)
3704 {
3705 u64 end;
3706
3707 end = start + len;
3708 return end >= start ? end: NFS4_MAX_UINT64;
3709 }
3710
3711 /* last octet in a range */
3712 static inline u64
3713 last_byte_offset(u64 start, u64 len)
3714 {
3715 u64 end;
3716
3717 BUG_ON(!len);
3718 end = start + len;
3719 return end > start ? end - 1: NFS4_MAX_UINT64;
3720 }
3721
3722 static inline unsigned int
3723 lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
3724 struct xdr_netobj *ownername)
3725 {
3726 return (file_hashval(inode) + cl_id
3727 + opaque_hashval(ownername->data, ownername->len))
3728 & LOCK_HASH_MASK;
3729 }
3730
3731 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
3732
3733 /*
3734 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3735 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3736 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
3737 * locking, this prevents us from being completely protocol-compliant. The
3738 * real solution to this problem is to start using unsigned file offsets in
3739 * the VFS, but this is a very deep change!
3740 */
3741 static inline void
3742 nfs4_transform_lock_offset(struct file_lock *lock)
3743 {
3744 if (lock->fl_start < 0)
3745 lock->fl_start = OFFSET_MAX;
3746 if (lock->fl_end < 0)
3747 lock->fl_end = OFFSET_MAX;
3748 }
3749
3750 /* Hack!: For now, we're defining this just so we can use a pointer to it
3751 * as a unique cookie to identify our (NFSv4's) posix locks. */
3752 static const struct lock_manager_operations nfsd_posix_mng_ops = {
3753 };
3754
3755 static inline void
3756 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3757 {
3758 struct nfs4_lockowner *lo;
3759
3760 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3761 lo = (struct nfs4_lockowner *) fl->fl_owner;
3762 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3763 lo->lo_owner.so_owner.len, GFP_KERNEL);
3764 if (!deny->ld_owner.data)
3765 /* We just don't care that much */
3766 goto nevermind;
3767 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3768 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3769 } else {
3770 nevermind:
3771 deny->ld_owner.len = 0;
3772 deny->ld_owner.data = NULL;
3773 deny->ld_clientid.cl_boot = 0;
3774 deny->ld_clientid.cl_id = 0;
3775 }
3776 deny->ld_start = fl->fl_start;
3777 deny->ld_length = NFS4_MAX_UINT64;
3778 if (fl->fl_end != NFS4_MAX_UINT64)
3779 deny->ld_length = fl->fl_end - fl->fl_start + 1;
3780 deny->ld_type = NFS4_READ_LT;
3781 if (fl->fl_type != F_RDLCK)
3782 deny->ld_type = NFS4_WRITE_LT;
3783 }
3784
3785 static struct nfs4_lockowner *
3786 find_lockowner_str(struct inode *inode, clientid_t *clid,
3787 struct xdr_netobj *owner)
3788 {
3789 unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
3790 struct nfs4_stateowner *op;
3791
3792 list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
3793 if (same_owner_str(op, owner, clid))
3794 return lockowner(op);
3795 }
3796 return NULL;
3797 }
3798
3799 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3800 {
3801 list_add(&lo->lo_owner.so_strhash, &lock_ownerstr_hashtbl[strhashval]);
3802 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
3803 }
3804
3805 /*
3806 * Alloc a lock owner structure.
3807 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
3808 * occurred.
3809 *
3810 * strhashval = lock_ownerstr_hashval
3811 */
3812
3813 static struct nfs4_lockowner *
3814 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3815 struct nfs4_lockowner *lo;
3816
3817 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3818 if (!lo)
3819 return NULL;
3820 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
3821 lo->lo_owner.so_is_open_owner = 0;
3822 /* It is the openowner seqid that will be incremented in encode in the
3823 * case of new lockowners; so increment the lock seqid manually: */
3824 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
3825 hash_lockowner(lo, strhashval, clp, open_stp);
3826 return lo;
3827 }
3828
3829 static struct nfs4_ol_stateid *
3830 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
3831 {
3832 struct nfs4_ol_stateid *stp;
3833 struct nfs4_client *clp = lo->lo_owner.so_client;
3834 __be32 status;
3835
3836 stp = nfs4_alloc_stateid();
3837 if (stp == NULL)
3838 return NULL;
3839 status = init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
3840 if (status) {
3841 free_generic_stateid(stp);
3842 return NULL;
3843 }
3844 list_add(&stp->st_perfile, &fp->fi_stateids);
3845 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3846 stp->st_stateowner = &lo->lo_owner;
3847 get_nfs4_file(fp);
3848 stp->st_file = fp;
3849 stp->st_access_bmap = 0;
3850 stp->st_deny_bmap = open_stp->st_deny_bmap;
3851 stp->st_openstp = open_stp;
3852 return stp;
3853 }
3854
3855 static int
3856 check_lock_length(u64 offset, u64 length)
3857 {
3858 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
3859 LOFF_OVERFLOW(offset, length)));
3860 }
3861
3862 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
3863 {
3864 struct nfs4_file *fp = lock_stp->st_file;
3865 int oflag = nfs4_access_to_omode(access);
3866
3867 if (test_bit(access, &lock_stp->st_access_bmap))
3868 return;
3869 nfs4_file_get_access(fp, oflag);
3870 __set_bit(access, &lock_stp->st_access_bmap);
3871 }
3872
3873 /*
3874 * LOCK operation
3875 */
3876 __be32
3877 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3878 struct nfsd4_lock *lock)
3879 {
3880 struct nfs4_openowner *open_sop = NULL;
3881 struct nfs4_lockowner *lock_sop = NULL;
3882 struct nfs4_ol_stateid *lock_stp;
3883 struct nfs4_file *fp;
3884 struct file *filp = NULL;
3885 struct file_lock file_lock;
3886 struct file_lock conflock;
3887 __be32 status = 0;
3888 unsigned int strhashval;
3889 int lkflg;
3890 int err;
3891
3892 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
3893 (long long) lock->lk_offset,
3894 (long long) lock->lk_length);
3895
3896 if (check_lock_length(lock->lk_offset, lock->lk_length))
3897 return nfserr_inval;
3898
3899 if ((status = fh_verify(rqstp, &cstate->current_fh,
3900 S_IFREG, NFSD_MAY_LOCK))) {
3901 dprintk("NFSD: nfsd4_lock: permission denied!\n");
3902 return status;
3903 }
3904
3905 nfs4_lock_state();
3906
3907 if (lock->lk_is_new) {
3908 /*
3909 * Client indicates that this is a new lockowner.
3910 * Use open owner and open stateid to create lock owner and
3911 * lock stateid.
3912 */
3913 struct nfs4_ol_stateid *open_stp = NULL;
3914
3915 status = nfserr_stale_clientid;
3916 if (!nfsd4_has_session(cstate) &&
3917 STALE_CLIENTID(&lock->lk_new_clientid))
3918 goto out;
3919
3920 /* validate and update open stateid and open seqid */
3921 status = nfs4_preprocess_confirmed_seqid_op(cstate,
3922 lock->lk_new_open_seqid,
3923 &lock->lk_new_open_stateid,
3924 &open_stp);
3925 if (status)
3926 goto out;
3927 open_sop = openowner(open_stp->st_stateowner);
3928 status = nfserr_bad_stateid;
3929 if (!nfsd4_has_session(cstate) &&
3930 !same_clid(&open_sop->oo_owner.so_client->cl_clientid,
3931 &lock->v.new.clientid))
3932 goto out;
3933 /* create lockowner and lock stateid */
3934 fp = open_stp->st_file;
3935 strhashval = lock_ownerstr_hashval(fp->fi_inode,
3936 open_sop->oo_owner.so_client->cl_clientid.cl_id,
3937 &lock->v.new.owner);
3938 /* XXX: Do we need to check for duplicate stateowners on
3939 * the same file, or should they just be allowed (and
3940 * create new stateids)? */
3941 status = nfserr_jukebox;
3942 lock_sop = alloc_init_lock_stateowner(strhashval,
3943 open_sop->oo_owner.so_client, open_stp, lock);
3944 if (lock_sop == NULL)
3945 goto out;
3946 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
3947 if (lock_stp == NULL)
3948 goto out;
3949 } else {
3950 /* lock (lock owner + lock stateid) already exists */
3951 status = nfs4_preprocess_seqid_op(cstate,
3952 lock->lk_old_lock_seqid,
3953 &lock->lk_old_lock_stateid,
3954 NFS4_LOCK_STID, &lock_stp);
3955 if (status)
3956 goto out;
3957 lock_sop = lockowner(lock_stp->st_stateowner);
3958 fp = lock_stp->st_file;
3959 }
3960 /* lock_sop and lock_stp have been created or found */
3961
3962 lkflg = setlkflg(lock->lk_type);
3963 status = nfs4_check_openmode(lock_stp, lkflg);
3964 if (status)
3965 goto out;
3966
3967 status = nfserr_grace;
3968 if (locks_in_grace() && !lock->lk_reclaim)
3969 goto out;
3970 status = nfserr_no_grace;
3971 if (!locks_in_grace() && lock->lk_reclaim)
3972 goto out;
3973
3974 locks_init_lock(&file_lock);
3975 switch (lock->lk_type) {
3976 case NFS4_READ_LT:
3977 case NFS4_READW_LT:
3978 filp = find_readable_file(lock_stp->st_file);
3979 if (filp)
3980 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
3981 file_lock.fl_type = F_RDLCK;
3982 break;
3983 case NFS4_WRITE_LT:
3984 case NFS4_WRITEW_LT:
3985 filp = find_writeable_file(lock_stp->st_file);
3986 if (filp)
3987 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
3988 file_lock.fl_type = F_WRLCK;
3989 break;
3990 default:
3991 status = nfserr_inval;
3992 goto out;
3993 }
3994 if (!filp) {
3995 status = nfserr_openmode;
3996 goto out;
3997 }
3998 file_lock.fl_owner = (fl_owner_t)lock_sop;
3999 file_lock.fl_pid = current->tgid;
4000 file_lock.fl_file = filp;
4001 file_lock.fl_flags = FL_POSIX;
4002 file_lock.fl_lmops = &nfsd_posix_mng_ops;
4003
4004 file_lock.fl_start = lock->lk_offset;
4005 file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4006 nfs4_transform_lock_offset(&file_lock);
4007
4008 /*
4009 * Try to lock the file in the VFS.
4010 * Note: locks.c uses the BKL to protect the inode's lock list.
4011 */
4012
4013 err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
4014 switch (-err) {
4015 case 0: /* success! */
4016 update_stateid(&lock_stp->st_stid.sc_stateid);
4017 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4018 sizeof(stateid_t));
4019 status = 0;
4020 break;
4021 case (EAGAIN): /* conflock holds conflicting lock */
4022 status = nfserr_denied;
4023 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4024 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
4025 break;
4026 case (EDEADLK):
4027 status = nfserr_deadlock;
4028 break;
4029 default:
4030 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4031 status = nfserrno(err);
4032 break;
4033 }
4034 out:
4035 if (status && lock->lk_is_new && lock_sop)
4036 release_lockowner(lock_sop);
4037 if (!cstate->replay_owner)
4038 nfs4_unlock_state();
4039 return status;
4040 }
4041
4042 /*
4043 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4044 * so we do a temporary open here just to get an open file to pass to
4045 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4046 * inode operation.)
4047 */
4048 static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4049 {
4050 struct file *file;
4051 int err;
4052
4053 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4054 if (err)
4055 return err;
4056 err = vfs_test_lock(file, lock);
4057 nfsd_close(file);
4058 return err;
4059 }
4060
4061 /*
4062 * LOCKT operation
4063 */
4064 __be32
4065 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4066 struct nfsd4_lockt *lockt)
4067 {
4068 struct inode *inode;
4069 struct file_lock file_lock;
4070 struct nfs4_lockowner *lo;
4071 int error;
4072 __be32 status;
4073
4074 if (locks_in_grace())
4075 return nfserr_grace;
4076
4077 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4078 return nfserr_inval;
4079
4080 nfs4_lock_state();
4081
4082 status = nfserr_stale_clientid;
4083 if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
4084 goto out;
4085
4086 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4087 goto out;
4088
4089 inode = cstate->current_fh.fh_dentry->d_inode;
4090 locks_init_lock(&file_lock);
4091 switch (lockt->lt_type) {
4092 case NFS4_READ_LT:
4093 case NFS4_READW_LT:
4094 file_lock.fl_type = F_RDLCK;
4095 break;
4096 case NFS4_WRITE_LT:
4097 case NFS4_WRITEW_LT:
4098 file_lock.fl_type = F_WRLCK;
4099 break;
4100 default:
4101 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4102 status = nfserr_inval;
4103 goto out;
4104 }
4105
4106 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4107 if (lo)
4108 file_lock.fl_owner = (fl_owner_t)lo;
4109 file_lock.fl_pid = current->tgid;
4110 file_lock.fl_flags = FL_POSIX;
4111
4112 file_lock.fl_start = lockt->lt_offset;
4113 file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4114
4115 nfs4_transform_lock_offset(&file_lock);
4116
4117 status = nfs_ok;
4118 error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4119 if (error) {
4120 status = nfserrno(error);
4121 goto out;
4122 }
4123 if (file_lock.fl_type != F_UNLCK) {
4124 status = nfserr_denied;
4125 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4126 }
4127 out:
4128 nfs4_unlock_state();
4129 return status;
4130 }
4131
4132 __be32
4133 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4134 struct nfsd4_locku *locku)
4135 {
4136 struct nfs4_ol_stateid *stp;
4137 struct file *filp = NULL;
4138 struct file_lock file_lock;
4139 __be32 status;
4140 int err;
4141
4142 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4143 (long long) locku->lu_offset,
4144 (long long) locku->lu_length);
4145
4146 if (check_lock_length(locku->lu_offset, locku->lu_length))
4147 return nfserr_inval;
4148
4149 nfs4_lock_state();
4150
4151 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4152 &locku->lu_stateid, NFS4_LOCK_STID, &stp);
4153 if (status)
4154 goto out;
4155 filp = find_any_file(stp->st_file);
4156 if (!filp) {
4157 status = nfserr_lock_range;
4158 goto out;
4159 }
4160 BUG_ON(!filp);
4161 locks_init_lock(&file_lock);
4162 file_lock.fl_type = F_UNLCK;
4163 file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4164 file_lock.fl_pid = current->tgid;
4165 file_lock.fl_file = filp;
4166 file_lock.fl_flags = FL_POSIX;
4167 file_lock.fl_lmops = &nfsd_posix_mng_ops;
4168 file_lock.fl_start = locku->lu_offset;
4169
4170 file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
4171 nfs4_transform_lock_offset(&file_lock);
4172
4173 /*
4174 * Try to unlock the file in the VFS.
4175 */
4176 err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
4177 if (err) {
4178 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4179 goto out_nfserr;
4180 }
4181 /*
4182 * OK, unlock succeeded; the only thing left to do is update the stateid.
4183 */
4184 update_stateid(&stp->st_stid.sc_stateid);
4185 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4186
4187 out:
4188 nfs4_unlock_state();
4189 return status;
4190
4191 out_nfserr:
4192 status = nfserrno(err);
4193 goto out;
4194 }
4195
4196 /*
4197 * returns
4198 * 1: locks held by lockowner
4199 * 0: no locks held by lockowner
4200 */
4201 static int
4202 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4203 {
4204 struct file_lock **flpp;
4205 struct inode *inode = filp->fi_inode;
4206 int status = 0;
4207
4208 lock_flocks();
4209 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4210 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4211 status = 1;
4212 goto out;
4213 }
4214 }
4215 out:
4216 unlock_flocks();
4217 return status;
4218 }
4219
4220 __be32
4221 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4222 struct nfsd4_compound_state *cstate,
4223 struct nfsd4_release_lockowner *rlockowner)
4224 {
4225 clientid_t *clid = &rlockowner->rl_clientid;
4226 struct nfs4_stateowner *sop;
4227 struct nfs4_lockowner *lo;
4228 struct nfs4_ol_stateid *stp;
4229 struct xdr_netobj *owner = &rlockowner->rl_owner;
4230 struct list_head matches;
4231 int i;
4232 __be32 status;
4233
4234 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4235 clid->cl_boot, clid->cl_id);
4236
4237 /* XXX check for lease expiration */
4238
4239 status = nfserr_stale_clientid;
4240 if (STALE_CLIENTID(clid))
4241 return status;
4242
4243 nfs4_lock_state();
4244
4245 status = nfserr_locks_held;
4246 /* XXX: we're doing a linear search through all the lockowners.
4247 * Yipes! For now we'll just hope clients aren't really using
4248 * release_lockowner much, but eventually we have to fix these
4249 * data structures. */
4250 INIT_LIST_HEAD(&matches);
4251 for (i = 0; i < LOCK_HASH_SIZE; i++) {
4252 list_for_each_entry(sop, &lock_ownerstr_hashtbl[i], so_strhash) {
4253 if (!same_owner_str(sop, owner, clid))
4254 continue;
4255 list_for_each_entry(stp, &sop->so_stateids,
4256 st_perstateowner) {
4257 lo = lockowner(sop);
4258 if (check_for_locks(stp->st_file, lo))
4259 goto out;
4260 list_add(&lo->lo_list, &matches);
4261 }
4262 }
4263 }
4264 /* Clients probably won't expect us to return with some (but not all)
4265 * of the lockowner state released; so don't release any until all
4266 * have been checked. */
4267 status = nfs_ok;
4268 while (!list_empty(&matches)) {
4269 lo = list_entry(matches.next, struct nfs4_lockowner,
4270 lo_list);
4271 /* unhash_stateowner deletes so_perclient only
4272 * for openowners. */
4273 list_del(&lo->lo_list);
4274 release_lockowner(lo);
4275 }
4276 out:
4277 nfs4_unlock_state();
4278 return status;
4279 }
4280
4281 static inline struct nfs4_client_reclaim *
4282 alloc_reclaim(void)
4283 {
4284 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4285 }
4286
4287 int
4288 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4289 {
4290 unsigned int strhashval = clientstr_hashval(name);
4291 struct nfs4_client *clp;
4292
4293 clp = find_confirmed_client_by_str(name, strhashval);
4294 return clp ? 1 : 0;
4295 }
4296
4297 /*
4298 * failure => all reset bets are off, nfserr_no_grace...
4299 */
4300 int
4301 nfs4_client_to_reclaim(const char *name)
4302 {
4303 unsigned int strhashval;
4304 struct nfs4_client_reclaim *crp = NULL;
4305
4306 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4307 crp = alloc_reclaim();
4308 if (!crp)
4309 return 0;
4310 strhashval = clientstr_hashval(name);
4311 INIT_LIST_HEAD(&crp->cr_strhash);
4312 list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
4313 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4314 reclaim_str_hashtbl_size++;
4315 return 1;
4316 }
4317
4318 static void
4319 nfs4_release_reclaim(void)
4320 {
4321 struct nfs4_client_reclaim *crp = NULL;
4322 int i;
4323
4324 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4325 while (!list_empty(&reclaim_str_hashtbl[i])) {
4326 crp = list_entry(reclaim_str_hashtbl[i].next,
4327 struct nfs4_client_reclaim, cr_strhash);
4328 list_del(&crp->cr_strhash);
4329 kfree(crp);
4330 reclaim_str_hashtbl_size--;
4331 }
4332 }
4333 BUG_ON(reclaim_str_hashtbl_size);
4334 }
4335
4336 /*
4337 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4338 static struct nfs4_client_reclaim *
4339 nfs4_find_reclaim_client(clientid_t *clid)
4340 {
4341 unsigned int strhashval;
4342 struct nfs4_client *clp;
4343 struct nfs4_client_reclaim *crp = NULL;
4344
4345
4346 /* find clientid in conf_id_hashtbl */
4347 clp = find_confirmed_client(clid);
4348 if (clp == NULL)
4349 return NULL;
4350
4351 dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4352 clp->cl_name.len, clp->cl_name.data,
4353 clp->cl_recdir);
4354
4355 /* find clp->cl_name in reclaim_str_hashtbl */
4356 strhashval = clientstr_hashval(clp->cl_recdir);
4357 list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
4358 if (same_name(crp->cr_recdir, clp->cl_recdir)) {
4359 return crp;
4360 }
4361 }
4362 return NULL;
4363 }
4364
4365 /*
4366 * Called from OPEN. Look for clientid in reclaim list.
4367 */
4368 __be32
4369 nfs4_check_open_reclaim(clientid_t *clid)
4370 {
4371 return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad;
4372 }
4373
4374 /* initialization to perform at module load time: */
4375
4376 int
4377 nfs4_state_init(void)
4378 {
4379 int i, status;
4380
4381 status = nfsd4_init_slabs();
4382 if (status)
4383 return status;
4384 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4385 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
4386 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
4387 INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
4388 INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
4389 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
4390 }
4391 for (i = 0; i < SESSION_HASH_SIZE; i++)
4392 INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4393 for (i = 0; i < FILE_HASH_SIZE; i++) {
4394 INIT_LIST_HEAD(&file_hashtbl[i]);
4395 }
4396 for (i = 0; i < OPEN_OWNER_HASH_SIZE; i++) {
4397 INIT_LIST_HEAD(&open_ownerstr_hashtbl[i]);
4398 }
4399 for (i = 0; i < LOCK_HASH_SIZE; i++) {
4400 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
4401 }
4402 memset(&onestateid, ~0, sizeof(stateid_t));
4403 INIT_LIST_HEAD(&close_lru);
4404 INIT_LIST_HEAD(&client_lru);
4405 INIT_LIST_HEAD(&del_recall_lru);
4406 reclaim_str_hashtbl_size = 0;
4407 return 0;
4408 }
4409
4410 static void
4411 nfsd4_load_reboot_recovery_data(void)
4412 {
4413 int status;
4414
4415 nfs4_lock_state();
4416 nfsd4_init_recdir();
4417 status = nfsd4_recdir_load();
4418 nfs4_unlock_state();
4419 if (status)
4420 printk("NFSD: Failure reading reboot recovery data\n");
4421 }
4422
4423 /*
4424 * Since the lifetime of a delegation isn't limited to that of an open, a
4425 * client may quite reasonably hang on to a delegation as long as it has
4426 * the inode cached. This becomes an obvious problem the first time a
4427 * client's inode cache approaches the size of the server's total memory.
4428 *
4429 * For now we avoid this problem by imposing a hard limit on the number
4430 * of delegations, which varies according to the server's memory size.
4431 */
4432 static void
4433 set_max_delegations(void)
4434 {
4435 /*
4436 * Allow at most 4 delegations per megabyte of RAM. Quick
4437 * estimates suggest that in the worst case (where every delegation
4438 * is for a different inode), a delegation could take about 1.5K,
4439 * giving a worst case usage of about 6% of memory.
4440 */
4441 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4442 }
4443
4444 /* initialization to perform when the nfsd service is started: */
4445
4446 static int
4447 __nfs4_state_start(void)
4448 {
4449 int ret;
4450
4451 boot_time = get_seconds();
4452 locks_start_grace(&nfsd4_manager);
4453 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4454 nfsd4_grace);
4455 ret = set_callback_cred();
4456 if (ret)
4457 return -ENOMEM;
4458 laundry_wq = create_singlethread_workqueue("nfsd4");
4459 if (laundry_wq == NULL)
4460 return -ENOMEM;
4461 ret = nfsd4_create_callback_queue();
4462 if (ret)
4463 goto out_free_laundry;
4464 queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4465 set_max_delegations();
4466 return 0;
4467 out_free_laundry:
4468 destroy_workqueue(laundry_wq);
4469 return ret;
4470 }
4471
4472 int
4473 nfs4_state_start(void)
4474 {
4475 nfsd4_load_reboot_recovery_data();
4476 return __nfs4_state_start();
4477 }
4478
4479 static void
4480 __nfs4_state_shutdown(void)
4481 {
4482 int i;
4483 struct nfs4_client *clp = NULL;
4484 struct nfs4_delegation *dp = NULL;
4485 struct list_head *pos, *next, reaplist;
4486
4487 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4488 while (!list_empty(&conf_id_hashtbl[i])) {
4489 clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4490 expire_client(clp);
4491 }
4492 while (!list_empty(&unconf_str_hashtbl[i])) {
4493 clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
4494 expire_client(clp);
4495 }
4496 }
4497 INIT_LIST_HEAD(&reaplist);
4498 spin_lock(&recall_lock);
4499 list_for_each_safe(pos, next, &del_recall_lru) {
4500 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4501 list_move(&dp->dl_recall_lru, &reaplist);
4502 }
4503 spin_unlock(&recall_lock);
4504 list_for_each_safe(pos, next, &reaplist) {
4505 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4506 list_del_init(&dp->dl_recall_lru);
4507 unhash_delegation(dp);
4508 }
4509
4510 nfsd4_shutdown_recdir();
4511 }
4512
4513 void
4514 nfs4_state_shutdown(void)
4515 {
4516 cancel_delayed_work_sync(&laundromat_work);
4517 destroy_workqueue(laundry_wq);
4518 locks_end_grace(&nfsd4_manager);
4519 nfs4_lock_state();
4520 nfs4_release_reclaim();
4521 __nfs4_state_shutdown();
4522 nfs4_unlock_state();
4523 nfsd4_destroy_callback_queue();
4524 }