nfsd: make unconf_name_tree per net
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/clnt.h>
44 #include "xdr4.h"
45 #include "vfs.h"
46 #include "current_stateid.h"
47 #include "fault_inject.h"
48
49 #include "netns.h"
50
51 #define NFSDDBG_FACILITY NFSDDBG_PROC
52
53 /* Globals */
54 time_t nfsd4_lease = 90; /* default lease time */
55 time_t nfsd4_grace = 90;
56
57 #define all_ones {{~0,~0},~0}
58 static const stateid_t one_stateid = {
59 .si_generation = ~0,
60 .si_opaque = all_ones,
61 };
62 static const stateid_t zero_stateid = {
63 /* all fields zero */
64 };
65 static const stateid_t currentstateid = {
66 .si_generation = 1,
67 };
68
69 static u64 current_sessionid = 1;
70
71 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
72 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
73 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
74
75 /* forward declarations */
76 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
77
78 /* Locking: */
79
80 /* Currently used for almost all code touching nfsv4 state: */
81 static DEFINE_MUTEX(client_mutex);
82
83 /*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
88 static DEFINE_SPINLOCK(recall_lock);
89
90 static struct kmem_cache *openowner_slab = NULL;
91 static struct kmem_cache *lockowner_slab = NULL;
92 static struct kmem_cache *file_slab = NULL;
93 static struct kmem_cache *stateid_slab = NULL;
94 static struct kmem_cache *deleg_slab = NULL;
95
96 void
97 nfs4_lock_state(void)
98 {
99 mutex_lock(&client_mutex);
100 }
101
102 static void free_session(struct kref *);
103
104 /* Must be called under the client_lock */
105 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
106 {
107 kref_put(&ses->se_ref, free_session);
108 }
109
110 static void nfsd4_get_session(struct nfsd4_session *ses)
111 {
112 kref_get(&ses->se_ref);
113 }
114
115 void
116 nfs4_unlock_state(void)
117 {
118 mutex_unlock(&client_mutex);
119 }
120
121 static inline u32
122 opaque_hashval(const void *ptr, int nbytes)
123 {
124 unsigned char *cptr = (unsigned char *) ptr;
125
126 u32 x = 0;
127 while (nbytes--) {
128 x *= 37;
129 x += *cptr++;
130 }
131 return x;
132 }
133
134 static struct list_head del_recall_lru;
135
136 static void nfsd4_free_file(struct nfs4_file *f)
137 {
138 kmem_cache_free(file_slab, f);
139 }
140
141 static inline void
142 put_nfs4_file(struct nfs4_file *fi)
143 {
144 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
145 list_del(&fi->fi_hash);
146 spin_unlock(&recall_lock);
147 iput(fi->fi_inode);
148 nfsd4_free_file(fi);
149 }
150 }
151
152 static inline void
153 get_nfs4_file(struct nfs4_file *fi)
154 {
155 atomic_inc(&fi->fi_ref);
156 }
157
158 static int num_delegations;
159 unsigned int max_delegations;
160
161 /*
162 * Open owner state (share locks)
163 */
164
165 /* hash tables for lock and open owners */
166 #define OWNER_HASH_BITS 8
167 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
168 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
169
170 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
171 {
172 unsigned int ret;
173
174 ret = opaque_hashval(ownername->data, ownername->len);
175 ret += clientid;
176 return ret & OWNER_HASH_MASK;
177 }
178
179 static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE];
180
181 /* hash table for nfs4_file */
182 #define FILE_HASH_BITS 8
183 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
184
185 static unsigned int file_hashval(struct inode *ino)
186 {
187 /* XXX: why are we hashing on inode pointer, anyway? */
188 return hash_ptr(ino, FILE_HASH_BITS);
189 }
190
191 static struct list_head file_hashtbl[FILE_HASH_SIZE];
192
193 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
194 {
195 BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
196 atomic_inc(&fp->fi_access[oflag]);
197 }
198
199 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
200 {
201 if (oflag == O_RDWR) {
202 __nfs4_file_get_access(fp, O_RDONLY);
203 __nfs4_file_get_access(fp, O_WRONLY);
204 } else
205 __nfs4_file_get_access(fp, oflag);
206 }
207
208 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
209 {
210 if (fp->fi_fds[oflag]) {
211 fput(fp->fi_fds[oflag]);
212 fp->fi_fds[oflag] = NULL;
213 }
214 }
215
216 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
217 {
218 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
219 nfs4_file_put_fd(fp, oflag);
220 /*
221 * It's also safe to get rid of the RDWR open *if*
222 * we no longer have need of the other kind of access
223 * or if we already have the other kind of open:
224 */
225 if (fp->fi_fds[1-oflag]
226 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
227 nfs4_file_put_fd(fp, O_RDWR);
228 }
229 }
230
231 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
232 {
233 if (oflag == O_RDWR) {
234 __nfs4_file_put_access(fp, O_RDONLY);
235 __nfs4_file_put_access(fp, O_WRONLY);
236 } else
237 __nfs4_file_put_access(fp, oflag);
238 }
239
240 static inline int get_new_stid(struct nfs4_stid *stid)
241 {
242 static int min_stateid = 0;
243 struct idr *stateids = &stid->sc_client->cl_stateids;
244 int new_stid;
245 int error;
246
247 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
248 /*
249 * Note: the necessary preallocation was done in
250 * nfs4_alloc_stateid(). The idr code caps the number of
251 * preallocations that can exist at a time, but the state lock
252 * prevents anyone from using ours before we get here:
253 */
254 BUG_ON(error);
255 /*
256 * It shouldn't be a problem to reuse an opaque stateid value.
257 * I don't think it is for 4.1. But with 4.0 I worry that, for
258 * example, a stray write retransmission could be accepted by
259 * the server when it should have been rejected. Therefore,
260 * adopt a trick from the sctp code to attempt to maximize the
261 * amount of time until an id is reused, by ensuring they always
262 * "increase" (mod INT_MAX):
263 */
264
265 min_stateid = new_stid+1;
266 if (min_stateid == INT_MAX)
267 min_stateid = 0;
268 return new_stid;
269 }
270
271 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
272 {
273 stateid_t *s = &stid->sc_stateid;
274 int new_id;
275
276 stid->sc_type = type;
277 stid->sc_client = cl;
278 s->si_opaque.so_clid = cl->cl_clientid;
279 new_id = get_new_stid(stid);
280 s->si_opaque.so_id = (u32)new_id;
281 /* Will be incremented before return to client: */
282 s->si_generation = 0;
283 }
284
285 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
286 {
287 struct idr *stateids = &cl->cl_stateids;
288
289 if (!idr_pre_get(stateids, GFP_KERNEL))
290 return NULL;
291 /*
292 * Note: if we fail here (or any time between now and the time
293 * we actually get the new idr), we won't need to undo the idr
294 * preallocation, since the idr code caps the number of
295 * preallocated entries.
296 */
297 return kmem_cache_alloc(slab, GFP_KERNEL);
298 }
299
300 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
301 {
302 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
303 }
304
305 static struct nfs4_delegation *
306 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
307 {
308 struct nfs4_delegation *dp;
309 struct nfs4_file *fp = stp->st_file;
310
311 dprintk("NFSD alloc_init_deleg\n");
312 /*
313 * Major work on the lease subsystem (for example, to support
314 * calbacks on stat) will be required before we can support
315 * write delegations properly.
316 */
317 if (type != NFS4_OPEN_DELEGATE_READ)
318 return NULL;
319 if (fp->fi_had_conflict)
320 return NULL;
321 if (num_delegations > max_delegations)
322 return NULL;
323 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
324 if (dp == NULL)
325 return dp;
326 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
327 /*
328 * delegation seqid's are never incremented. The 4.1 special
329 * meaning of seqid 0 isn't meaningful, really, but let's avoid
330 * 0 anyway just for consistency and use 1:
331 */
332 dp->dl_stid.sc_stateid.si_generation = 1;
333 num_delegations++;
334 INIT_LIST_HEAD(&dp->dl_perfile);
335 INIT_LIST_HEAD(&dp->dl_perclnt);
336 INIT_LIST_HEAD(&dp->dl_recall_lru);
337 get_nfs4_file(fp);
338 dp->dl_file = fp;
339 dp->dl_type = type;
340 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
341 dp->dl_time = 0;
342 atomic_set(&dp->dl_count, 1);
343 nfsd4_init_callback(&dp->dl_recall);
344 return dp;
345 }
346
347 void
348 nfs4_put_delegation(struct nfs4_delegation *dp)
349 {
350 if (atomic_dec_and_test(&dp->dl_count)) {
351 dprintk("NFSD: freeing dp %p\n",dp);
352 put_nfs4_file(dp->dl_file);
353 kmem_cache_free(deleg_slab, dp);
354 num_delegations--;
355 }
356 }
357
358 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
359 {
360 if (atomic_dec_and_test(&fp->fi_delegees)) {
361 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
362 fp->fi_lease = NULL;
363 fput(fp->fi_deleg_file);
364 fp->fi_deleg_file = NULL;
365 }
366 }
367
368 static void unhash_stid(struct nfs4_stid *s)
369 {
370 struct idr *stateids = &s->sc_client->cl_stateids;
371
372 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
373 }
374
375 /* Called under the state lock. */
376 static void
377 unhash_delegation(struct nfs4_delegation *dp)
378 {
379 unhash_stid(&dp->dl_stid);
380 list_del_init(&dp->dl_perclnt);
381 spin_lock(&recall_lock);
382 list_del_init(&dp->dl_perfile);
383 list_del_init(&dp->dl_recall_lru);
384 spin_unlock(&recall_lock);
385 nfs4_put_deleg_lease(dp->dl_file);
386 nfs4_put_delegation(dp);
387 }
388
389 /*
390 * SETCLIENTID state
391 */
392
393 /* client_lock protects the client lru list and session hash table */
394 static DEFINE_SPINLOCK(client_lock);
395
396 static unsigned int clientid_hashval(u32 id)
397 {
398 return id & CLIENT_HASH_MASK;
399 }
400
401 static unsigned int clientstr_hashval(const char *name)
402 {
403 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
404 }
405
406 /*
407 * client_lru holds client queue ordered by nfs4_client.cl_time
408 * for lease renewal.
409 *
410 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
411 * for last close replay.
412 *
413 * All of the above fields are protected by the client_mutex.
414 */
415 static struct list_head client_lru;
416 static struct list_head close_lru;
417
418 /*
419 * We store the NONE, READ, WRITE, and BOTH bits separately in the
420 * st_{access,deny}_bmap field of the stateid, in order to track not
421 * only what share bits are currently in force, but also what
422 * combinations of share bits previous opens have used. This allows us
423 * to enforce the recommendation of rfc 3530 14.2.19 that the server
424 * return an error if the client attempt to downgrade to a combination
425 * of share bits not explicable by closing some of its previous opens.
426 *
427 * XXX: This enforcement is actually incomplete, since we don't keep
428 * track of access/deny bit combinations; so, e.g., we allow:
429 *
430 * OPEN allow read, deny write
431 * OPEN allow both, deny none
432 * DOWNGRADE allow read, deny none
433 *
434 * which we should reject.
435 */
436 static unsigned int
437 bmap_to_share_mode(unsigned long bmap) {
438 int i;
439 unsigned int access = 0;
440
441 for (i = 1; i < 4; i++) {
442 if (test_bit(i, &bmap))
443 access |= i;
444 }
445 return access;
446 }
447
448 static bool
449 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
450 unsigned int access, deny;
451
452 access = bmap_to_share_mode(stp->st_access_bmap);
453 deny = bmap_to_share_mode(stp->st_deny_bmap);
454 if ((access & open->op_share_deny) || (deny & open->op_share_access))
455 return false;
456 return true;
457 }
458
459 /* set share access for a given stateid */
460 static inline void
461 set_access(u32 access, struct nfs4_ol_stateid *stp)
462 {
463 __set_bit(access, &stp->st_access_bmap);
464 }
465
466 /* clear share access for a given stateid */
467 static inline void
468 clear_access(u32 access, struct nfs4_ol_stateid *stp)
469 {
470 __clear_bit(access, &stp->st_access_bmap);
471 }
472
473 /* test whether a given stateid has access */
474 static inline bool
475 test_access(u32 access, struct nfs4_ol_stateid *stp)
476 {
477 return test_bit(access, &stp->st_access_bmap);
478 }
479
480 /* set share deny for a given stateid */
481 static inline void
482 set_deny(u32 access, struct nfs4_ol_stateid *stp)
483 {
484 __set_bit(access, &stp->st_deny_bmap);
485 }
486
487 /* clear share deny for a given stateid */
488 static inline void
489 clear_deny(u32 access, struct nfs4_ol_stateid *stp)
490 {
491 __clear_bit(access, &stp->st_deny_bmap);
492 }
493
494 /* test whether a given stateid is denying specific access */
495 static inline bool
496 test_deny(u32 access, struct nfs4_ol_stateid *stp)
497 {
498 return test_bit(access, &stp->st_deny_bmap);
499 }
500
501 static int nfs4_access_to_omode(u32 access)
502 {
503 switch (access & NFS4_SHARE_ACCESS_BOTH) {
504 case NFS4_SHARE_ACCESS_READ:
505 return O_RDONLY;
506 case NFS4_SHARE_ACCESS_WRITE:
507 return O_WRONLY;
508 case NFS4_SHARE_ACCESS_BOTH:
509 return O_RDWR;
510 }
511 BUG();
512 }
513
514 /* release all access and file references for a given stateid */
515 static void
516 release_all_access(struct nfs4_ol_stateid *stp)
517 {
518 int i;
519
520 for (i = 1; i < 4; i++) {
521 if (test_access(i, stp))
522 nfs4_file_put_access(stp->st_file,
523 nfs4_access_to_omode(i));
524 clear_access(i, stp);
525 }
526 }
527
528 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
529 {
530 list_del(&stp->st_perfile);
531 list_del(&stp->st_perstateowner);
532 }
533
534 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
535 {
536 release_all_access(stp);
537 put_nfs4_file(stp->st_file);
538 stp->st_file = NULL;
539 }
540
541 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
542 {
543 kmem_cache_free(stateid_slab, stp);
544 }
545
546 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
547 {
548 struct file *file;
549
550 unhash_generic_stateid(stp);
551 unhash_stid(&stp->st_stid);
552 file = find_any_file(stp->st_file);
553 if (file)
554 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
555 close_generic_stateid(stp);
556 free_generic_stateid(stp);
557 }
558
559 static void unhash_lockowner(struct nfs4_lockowner *lo)
560 {
561 struct nfs4_ol_stateid *stp;
562
563 list_del(&lo->lo_owner.so_strhash);
564 list_del(&lo->lo_perstateid);
565 list_del(&lo->lo_owner_ino_hash);
566 while (!list_empty(&lo->lo_owner.so_stateids)) {
567 stp = list_first_entry(&lo->lo_owner.so_stateids,
568 struct nfs4_ol_stateid, st_perstateowner);
569 release_lock_stateid(stp);
570 }
571 }
572
573 static void release_lockowner(struct nfs4_lockowner *lo)
574 {
575 unhash_lockowner(lo);
576 nfs4_free_lockowner(lo);
577 }
578
579 static void
580 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
581 {
582 struct nfs4_lockowner *lo;
583
584 while (!list_empty(&open_stp->st_lockowners)) {
585 lo = list_entry(open_stp->st_lockowners.next,
586 struct nfs4_lockowner, lo_perstateid);
587 release_lockowner(lo);
588 }
589 }
590
591 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
592 {
593 unhash_generic_stateid(stp);
594 release_stateid_lockowners(stp);
595 close_generic_stateid(stp);
596 }
597
598 static void release_open_stateid(struct nfs4_ol_stateid *stp)
599 {
600 unhash_open_stateid(stp);
601 unhash_stid(&stp->st_stid);
602 free_generic_stateid(stp);
603 }
604
605 static void unhash_openowner(struct nfs4_openowner *oo)
606 {
607 struct nfs4_ol_stateid *stp;
608
609 list_del(&oo->oo_owner.so_strhash);
610 list_del(&oo->oo_perclient);
611 while (!list_empty(&oo->oo_owner.so_stateids)) {
612 stp = list_first_entry(&oo->oo_owner.so_stateids,
613 struct nfs4_ol_stateid, st_perstateowner);
614 release_open_stateid(stp);
615 }
616 }
617
618 static void release_last_closed_stateid(struct nfs4_openowner *oo)
619 {
620 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
621
622 if (s) {
623 unhash_stid(&s->st_stid);
624 free_generic_stateid(s);
625 oo->oo_last_closed_stid = NULL;
626 }
627 }
628
629 static void release_openowner(struct nfs4_openowner *oo)
630 {
631 unhash_openowner(oo);
632 list_del(&oo->oo_close_lru);
633 release_last_closed_stateid(oo);
634 nfs4_free_openowner(oo);
635 }
636
637 #define SESSION_HASH_SIZE 512
638 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
639
640 static inline int
641 hash_sessionid(struct nfs4_sessionid *sessionid)
642 {
643 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
644
645 return sid->sequence % SESSION_HASH_SIZE;
646 }
647
648 #ifdef NFSD_DEBUG
649 static inline void
650 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
651 {
652 u32 *ptr = (u32 *)(&sessionid->data[0]);
653 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
654 }
655 #else
656 static inline void
657 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
658 {
659 }
660 #endif
661
662
663 static void
664 gen_sessionid(struct nfsd4_session *ses)
665 {
666 struct nfs4_client *clp = ses->se_client;
667 struct nfsd4_sessionid *sid;
668
669 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
670 sid->clientid = clp->cl_clientid;
671 sid->sequence = current_sessionid++;
672 sid->reserved = 0;
673 }
674
675 /*
676 * The protocol defines ca_maxresponssize_cached to include the size of
677 * the rpc header, but all we need to cache is the data starting after
678 * the end of the initial SEQUENCE operation--the rest we regenerate
679 * each time. Therefore we can advertise a ca_maxresponssize_cached
680 * value that is the number of bytes in our cache plus a few additional
681 * bytes. In order to stay on the safe side, and not promise more than
682 * we can cache, those additional bytes must be the minimum possible: 24
683 * bytes of rpc header (xid through accept state, with AUTH_NULL
684 * verifier), 12 for the compound header (with zero-length tag), and 44
685 * for the SEQUENCE op response:
686 */
687 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
688
689 static void
690 free_session_slots(struct nfsd4_session *ses)
691 {
692 int i;
693
694 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
695 kfree(ses->se_slots[i]);
696 }
697
698 /*
699 * We don't actually need to cache the rpc and session headers, so we
700 * can allocate a little less for each slot:
701 */
702 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
703 {
704 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
705 }
706
707 static int nfsd4_sanitize_slot_size(u32 size)
708 {
709 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
710 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
711
712 return size;
713 }
714
715 /*
716 * XXX: If we run out of reserved DRC memory we could (up to a point)
717 * re-negotiate active sessions and reduce their slot usage to make
718 * room for new connections. For now we just fail the create session.
719 */
720 static int nfsd4_get_drc_mem(int slotsize, u32 num)
721 {
722 int avail;
723
724 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
725
726 spin_lock(&nfsd_drc_lock);
727 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
728 nfsd_drc_max_mem - nfsd_drc_mem_used);
729 num = min_t(int, num, avail / slotsize);
730 nfsd_drc_mem_used += num * slotsize;
731 spin_unlock(&nfsd_drc_lock);
732
733 return num;
734 }
735
736 static void nfsd4_put_drc_mem(int slotsize, int num)
737 {
738 spin_lock(&nfsd_drc_lock);
739 nfsd_drc_mem_used -= slotsize * num;
740 spin_unlock(&nfsd_drc_lock);
741 }
742
743 static struct nfsd4_session *__alloc_session(int slotsize, int numslots)
744 {
745 struct nfsd4_session *new;
746 int mem, i;
747
748 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
749 + sizeof(struct nfsd4_session) > PAGE_SIZE);
750 mem = numslots * sizeof(struct nfsd4_slot *);
751
752 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
753 if (!new)
754 return NULL;
755 /* allocate each struct nfsd4_slot and data cache in one piece */
756 for (i = 0; i < numslots; i++) {
757 mem = sizeof(struct nfsd4_slot) + slotsize;
758 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
759 if (!new->se_slots[i])
760 goto out_free;
761 }
762 return new;
763 out_free:
764 while (i--)
765 kfree(new->se_slots[i]);
766 kfree(new);
767 return NULL;
768 }
769
770 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
771 {
772 u32 maxrpc = nfsd_serv->sv_max_mesg;
773
774 new->maxreqs = numslots;
775 new->maxresp_cached = min_t(u32, req->maxresp_cached,
776 slotsize + NFSD_MIN_HDR_SEQ_SZ);
777 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
778 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
779 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
780 }
781
782 static void free_conn(struct nfsd4_conn *c)
783 {
784 svc_xprt_put(c->cn_xprt);
785 kfree(c);
786 }
787
788 static void nfsd4_conn_lost(struct svc_xpt_user *u)
789 {
790 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
791 struct nfs4_client *clp = c->cn_session->se_client;
792
793 spin_lock(&clp->cl_lock);
794 if (!list_empty(&c->cn_persession)) {
795 list_del(&c->cn_persession);
796 free_conn(c);
797 }
798 spin_unlock(&clp->cl_lock);
799 nfsd4_probe_callback(clp);
800 }
801
802 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
803 {
804 struct nfsd4_conn *conn;
805
806 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
807 if (!conn)
808 return NULL;
809 svc_xprt_get(rqstp->rq_xprt);
810 conn->cn_xprt = rqstp->rq_xprt;
811 conn->cn_flags = flags;
812 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
813 return conn;
814 }
815
816 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
817 {
818 conn->cn_session = ses;
819 list_add(&conn->cn_persession, &ses->se_conns);
820 }
821
822 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
823 {
824 struct nfs4_client *clp = ses->se_client;
825
826 spin_lock(&clp->cl_lock);
827 __nfsd4_hash_conn(conn, ses);
828 spin_unlock(&clp->cl_lock);
829 }
830
831 static int nfsd4_register_conn(struct nfsd4_conn *conn)
832 {
833 conn->cn_xpt_user.callback = nfsd4_conn_lost;
834 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
835 }
836
837 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
838 {
839 int ret;
840
841 nfsd4_hash_conn(conn, ses);
842 ret = nfsd4_register_conn(conn);
843 if (ret)
844 /* oops; xprt is already down: */
845 nfsd4_conn_lost(&conn->cn_xpt_user);
846 if (conn->cn_flags & NFS4_CDFC4_BACK) {
847 /* callback channel may be back up */
848 nfsd4_probe_callback(ses->se_client);
849 }
850 }
851
852 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
853 {
854 u32 dir = NFS4_CDFC4_FORE;
855
856 if (cses->flags & SESSION4_BACK_CHAN)
857 dir |= NFS4_CDFC4_BACK;
858 return alloc_conn(rqstp, dir);
859 }
860
861 /* must be called under client_lock */
862 static void nfsd4_del_conns(struct nfsd4_session *s)
863 {
864 struct nfs4_client *clp = s->se_client;
865 struct nfsd4_conn *c;
866
867 spin_lock(&clp->cl_lock);
868 while (!list_empty(&s->se_conns)) {
869 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
870 list_del_init(&c->cn_persession);
871 spin_unlock(&clp->cl_lock);
872
873 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
874 free_conn(c);
875
876 spin_lock(&clp->cl_lock);
877 }
878 spin_unlock(&clp->cl_lock);
879 }
880
881 static void __free_session(struct nfsd4_session *ses)
882 {
883 nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs);
884 free_session_slots(ses);
885 kfree(ses);
886 }
887
888 static void free_session(struct kref *kref)
889 {
890 struct nfsd4_session *ses;
891
892 lockdep_assert_held(&client_lock);
893 ses = container_of(kref, struct nfsd4_session, se_ref);
894 nfsd4_del_conns(ses);
895 __free_session(ses);
896 }
897
898 void nfsd4_put_session(struct nfsd4_session *ses)
899 {
900 spin_lock(&client_lock);
901 nfsd4_put_session_locked(ses);
902 spin_unlock(&client_lock);
903 }
904
905 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan)
906 {
907 struct nfsd4_session *new;
908 int numslots, slotsize;
909 /*
910 * Note decreasing slot size below client's request may
911 * make it difficult for client to function correctly, whereas
912 * decreasing the number of slots will (just?) affect
913 * performance. When short on memory we therefore prefer to
914 * decrease number of slots instead of their size.
915 */
916 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
917 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
918 if (numslots < 1)
919 return NULL;
920
921 new = __alloc_session(slotsize, numslots);
922 if (!new) {
923 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
924 return NULL;
925 }
926 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
927 return new;
928 }
929
930 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
931 {
932 int idx;
933
934 new->se_client = clp;
935 gen_sessionid(new);
936
937 INIT_LIST_HEAD(&new->se_conns);
938
939 new->se_cb_seq_nr = 1;
940 new->se_flags = cses->flags;
941 new->se_cb_prog = cses->callback_prog;
942 new->se_cb_sec = cses->cb_sec;
943 kref_init(&new->se_ref);
944 idx = hash_sessionid(&new->se_sessionid);
945 spin_lock(&client_lock);
946 list_add(&new->se_hash, &sessionid_hashtbl[idx]);
947 spin_lock(&clp->cl_lock);
948 list_add(&new->se_perclnt, &clp->cl_sessions);
949 spin_unlock(&clp->cl_lock);
950 spin_unlock(&client_lock);
951
952 if (cses->flags & SESSION4_BACK_CHAN) {
953 struct sockaddr *sa = svc_addr(rqstp);
954 /*
955 * This is a little silly; with sessions there's no real
956 * use for the callback address. Use the peer address
957 * as a reasonable default for now, but consider fixing
958 * the rpc client not to require an address in the
959 * future:
960 */
961 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
962 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
963 }
964 }
965
966 /* caller must hold client_lock */
967 static struct nfsd4_session *
968 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
969 {
970 struct nfsd4_session *elem;
971 int idx;
972
973 dump_sessionid(__func__, sessionid);
974 idx = hash_sessionid(sessionid);
975 /* Search in the appropriate list */
976 list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
977 if (!memcmp(elem->se_sessionid.data, sessionid->data,
978 NFS4_MAX_SESSIONID_LEN)) {
979 return elem;
980 }
981 }
982
983 dprintk("%s: session not found\n", __func__);
984 return NULL;
985 }
986
987 /* caller must hold client_lock */
988 static void
989 unhash_session(struct nfsd4_session *ses)
990 {
991 list_del(&ses->se_hash);
992 spin_lock(&ses->se_client->cl_lock);
993 list_del(&ses->se_perclnt);
994 spin_unlock(&ses->se_client->cl_lock);
995 }
996
997 /* must be called under the client_lock */
998 static inline void
999 renew_client_locked(struct nfs4_client *clp)
1000 {
1001 if (is_client_expired(clp)) {
1002 WARN_ON(1);
1003 printk("%s: client (clientid %08x/%08x) already expired\n",
1004 __func__,
1005 clp->cl_clientid.cl_boot,
1006 clp->cl_clientid.cl_id);
1007 return;
1008 }
1009
1010 dprintk("renewing client (clientid %08x/%08x)\n",
1011 clp->cl_clientid.cl_boot,
1012 clp->cl_clientid.cl_id);
1013 list_move_tail(&clp->cl_lru, &client_lru);
1014 clp->cl_time = get_seconds();
1015 }
1016
1017 static inline void
1018 renew_client(struct nfs4_client *clp)
1019 {
1020 spin_lock(&client_lock);
1021 renew_client_locked(clp);
1022 spin_unlock(&client_lock);
1023 }
1024
1025 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1026 static int
1027 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1028 {
1029 if (clid->cl_boot == nn->boot_time)
1030 return 0;
1031 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1032 clid->cl_boot, clid->cl_id, nn->boot_time);
1033 return 1;
1034 }
1035
1036 /*
1037 * XXX Should we use a slab cache ?
1038 * This type of memory management is somewhat inefficient, but we use it
1039 * anyway since SETCLIENTID is not a common operation.
1040 */
1041 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1042 {
1043 struct nfs4_client *clp;
1044
1045 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1046 if (clp == NULL)
1047 return NULL;
1048 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1049 if (clp->cl_name.data == NULL) {
1050 kfree(clp);
1051 return NULL;
1052 }
1053 clp->cl_name.len = name.len;
1054 return clp;
1055 }
1056
1057 static inline void
1058 free_client(struct nfs4_client *clp)
1059 {
1060 lockdep_assert_held(&client_lock);
1061 while (!list_empty(&clp->cl_sessions)) {
1062 struct nfsd4_session *ses;
1063 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1064 se_perclnt);
1065 list_del(&ses->se_perclnt);
1066 nfsd4_put_session_locked(ses);
1067 }
1068 free_svc_cred(&clp->cl_cred);
1069 kfree(clp->cl_name.data);
1070 kfree(clp);
1071 }
1072
1073 void
1074 release_session_client(struct nfsd4_session *session)
1075 {
1076 struct nfs4_client *clp = session->se_client;
1077
1078 if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
1079 return;
1080 if (is_client_expired(clp)) {
1081 free_client(clp);
1082 session->se_client = NULL;
1083 } else
1084 renew_client_locked(clp);
1085 spin_unlock(&client_lock);
1086 }
1087
1088 /* must be called under the client_lock */
1089 static inline void
1090 unhash_client_locked(struct nfs4_client *clp)
1091 {
1092 struct nfsd4_session *ses;
1093
1094 mark_client_expired(clp);
1095 list_del(&clp->cl_lru);
1096 spin_lock(&clp->cl_lock);
1097 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1098 list_del_init(&ses->se_hash);
1099 spin_unlock(&clp->cl_lock);
1100 }
1101
1102 static void
1103 destroy_client(struct nfs4_client *clp)
1104 {
1105 struct nfs4_openowner *oo;
1106 struct nfs4_delegation *dp;
1107 struct list_head reaplist;
1108 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1109
1110 INIT_LIST_HEAD(&reaplist);
1111 spin_lock(&recall_lock);
1112 while (!list_empty(&clp->cl_delegations)) {
1113 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1114 list_del_init(&dp->dl_perclnt);
1115 list_move(&dp->dl_recall_lru, &reaplist);
1116 }
1117 spin_unlock(&recall_lock);
1118 while (!list_empty(&reaplist)) {
1119 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1120 unhash_delegation(dp);
1121 }
1122 while (!list_empty(&clp->cl_openowners)) {
1123 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1124 release_openowner(oo);
1125 }
1126 nfsd4_shutdown_callback(clp);
1127 if (clp->cl_cb_conn.cb_xprt)
1128 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1129 list_del(&clp->cl_idhash);
1130 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1131 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1132 else
1133 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1134 spin_lock(&client_lock);
1135 unhash_client_locked(clp);
1136 if (atomic_read(&clp->cl_refcount) == 0)
1137 free_client(clp);
1138 spin_unlock(&client_lock);
1139 }
1140
1141 static void expire_client(struct nfs4_client *clp)
1142 {
1143 nfsd4_client_record_remove(clp);
1144 destroy_client(clp);
1145 }
1146
1147 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1148 {
1149 memcpy(target->cl_verifier.data, source->data,
1150 sizeof(target->cl_verifier.data));
1151 }
1152
1153 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1154 {
1155 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1156 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1157 }
1158
1159 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1160 {
1161 if (source->cr_principal) {
1162 target->cr_principal =
1163 kstrdup(source->cr_principal, GFP_KERNEL);
1164 if (target->cr_principal == NULL)
1165 return -ENOMEM;
1166 } else
1167 target->cr_principal = NULL;
1168 target->cr_flavor = source->cr_flavor;
1169 target->cr_uid = source->cr_uid;
1170 target->cr_gid = source->cr_gid;
1171 target->cr_group_info = source->cr_group_info;
1172 get_group_info(target->cr_group_info);
1173 return 0;
1174 }
1175
1176 static long long
1177 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1178 {
1179 long long res;
1180
1181 res = o1->len - o2->len;
1182 if (res)
1183 return res;
1184 return (long long)memcmp(o1->data, o2->data, o1->len);
1185 }
1186
1187 static int same_name(const char *n1, const char *n2)
1188 {
1189 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1190 }
1191
1192 static int
1193 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1194 {
1195 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1196 }
1197
1198 static int
1199 same_clid(clientid_t *cl1, clientid_t *cl2)
1200 {
1201 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1202 }
1203
1204 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1205 {
1206 int i;
1207
1208 if (g1->ngroups != g2->ngroups)
1209 return false;
1210 for (i=0; i<g1->ngroups; i++)
1211 if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1212 return false;
1213 return true;
1214 }
1215
1216 /*
1217 * RFC 3530 language requires clid_inuse be returned when the
1218 * "principal" associated with a requests differs from that previously
1219 * used. We use uid, gid's, and gss principal string as our best
1220 * approximation. We also don't want to allow non-gss use of a client
1221 * established using gss: in theory cr_principal should catch that
1222 * change, but in practice cr_principal can be null even in the gss case
1223 * since gssd doesn't always pass down a principal string.
1224 */
1225 static bool is_gss_cred(struct svc_cred *cr)
1226 {
1227 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1228 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1229 }
1230
1231
1232 static bool
1233 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1234 {
1235 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1236 || (cr1->cr_uid != cr2->cr_uid)
1237 || (cr1->cr_gid != cr2->cr_gid)
1238 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1239 return false;
1240 if (cr1->cr_principal == cr2->cr_principal)
1241 return true;
1242 if (!cr1->cr_principal || !cr2->cr_principal)
1243 return false;
1244 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1245 }
1246
1247 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1248 {
1249 static u32 current_clientid = 1;
1250
1251 clp->cl_clientid.cl_boot = nn->boot_time;
1252 clp->cl_clientid.cl_id = current_clientid++;
1253 }
1254
1255 static void gen_confirm(struct nfs4_client *clp)
1256 {
1257 __be32 verf[2];
1258 static u32 i;
1259
1260 verf[0] = (__be32)get_seconds();
1261 verf[1] = (__be32)i++;
1262 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1263 }
1264
1265 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1266 {
1267 return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1268 }
1269
1270 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1271 {
1272 struct nfs4_stid *s;
1273
1274 s = find_stateid(cl, t);
1275 if (!s)
1276 return NULL;
1277 if (typemask & s->sc_type)
1278 return s;
1279 return NULL;
1280 }
1281
1282 static struct nfs4_client *create_client(struct xdr_netobj name,
1283 struct svc_rqst *rqstp, nfs4_verifier *verf)
1284 {
1285 struct nfs4_client *clp;
1286 struct sockaddr *sa = svc_addr(rqstp);
1287 int ret;
1288 struct net *net = SVC_NET(rqstp);
1289
1290 clp = alloc_client(name);
1291 if (clp == NULL)
1292 return NULL;
1293
1294 INIT_LIST_HEAD(&clp->cl_sessions);
1295 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1296 if (ret) {
1297 spin_lock(&client_lock);
1298 free_client(clp);
1299 spin_unlock(&client_lock);
1300 return NULL;
1301 }
1302 idr_init(&clp->cl_stateids);
1303 atomic_set(&clp->cl_refcount, 0);
1304 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1305 INIT_LIST_HEAD(&clp->cl_idhash);
1306 INIT_LIST_HEAD(&clp->cl_openowners);
1307 INIT_LIST_HEAD(&clp->cl_delegations);
1308 INIT_LIST_HEAD(&clp->cl_lru);
1309 INIT_LIST_HEAD(&clp->cl_callbacks);
1310 spin_lock_init(&clp->cl_lock);
1311 nfsd4_init_callback(&clp->cl_cb_null);
1312 clp->cl_time = get_seconds();
1313 clear_bit(0, &clp->cl_cb_slot_busy);
1314 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1315 copy_verf(clp, verf);
1316 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1317 gen_confirm(clp);
1318 clp->cl_cb_session = NULL;
1319 clp->net = net;
1320 return clp;
1321 }
1322
1323 static void
1324 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1325 {
1326 struct rb_node **new = &(root->rb_node), *parent = NULL;
1327 struct nfs4_client *clp;
1328
1329 while (*new) {
1330 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1331 parent = *new;
1332
1333 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1334 new = &((*new)->rb_left);
1335 else
1336 new = &((*new)->rb_right);
1337 }
1338
1339 rb_link_node(&new_clp->cl_namenode, parent, new);
1340 rb_insert_color(&new_clp->cl_namenode, root);
1341 }
1342
1343 static struct nfs4_client *
1344 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1345 {
1346 long long cmp;
1347 struct rb_node *node = root->rb_node;
1348 struct nfs4_client *clp;
1349
1350 while (node) {
1351 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1352 cmp = compare_blob(&clp->cl_name, name);
1353 if (cmp > 0)
1354 node = node->rb_left;
1355 else if (cmp < 0)
1356 node = node->rb_right;
1357 else
1358 return clp;
1359 }
1360 return NULL;
1361 }
1362
1363 static void
1364 add_to_unconfirmed(struct nfs4_client *clp)
1365 {
1366 unsigned int idhashval;
1367 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1368
1369 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1370 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1371 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1372 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1373 renew_client(clp);
1374 }
1375
1376 static void
1377 move_to_confirmed(struct nfs4_client *clp)
1378 {
1379 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1380 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1381
1382 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1383 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1384 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1385 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1386 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1387 renew_client(clp);
1388 }
1389
1390 static struct nfs4_client *
1391 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1392 {
1393 struct nfs4_client *clp;
1394 unsigned int idhashval = clientid_hashval(clid->cl_id);
1395
1396 list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) {
1397 if (same_clid(&clp->cl_clientid, clid)) {
1398 if ((bool)clp->cl_minorversion != sessions)
1399 return NULL;
1400 renew_client(clp);
1401 return clp;
1402 }
1403 }
1404 return NULL;
1405 }
1406
1407 static struct nfs4_client *
1408 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1409 {
1410 struct nfs4_client *clp;
1411 unsigned int idhashval = clientid_hashval(clid->cl_id);
1412
1413 list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) {
1414 if (same_clid(&clp->cl_clientid, clid)) {
1415 if ((bool)clp->cl_minorversion != sessions)
1416 return NULL;
1417 return clp;
1418 }
1419 }
1420 return NULL;
1421 }
1422
1423 static bool clp_used_exchangeid(struct nfs4_client *clp)
1424 {
1425 return clp->cl_exchange_flags != 0;
1426 }
1427
1428 static struct nfs4_client *
1429 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1430 {
1431 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1432 }
1433
1434 static struct nfs4_client *
1435 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1436 {
1437 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1438 }
1439
1440 static void
1441 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1442 {
1443 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1444 struct sockaddr *sa = svc_addr(rqstp);
1445 u32 scopeid = rpc_get_scope_id(sa);
1446 unsigned short expected_family;
1447
1448 /* Currently, we only support tcp and tcp6 for the callback channel */
1449 if (se->se_callback_netid_len == 3 &&
1450 !memcmp(se->se_callback_netid_val, "tcp", 3))
1451 expected_family = AF_INET;
1452 else if (se->se_callback_netid_len == 4 &&
1453 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1454 expected_family = AF_INET6;
1455 else
1456 goto out_err;
1457
1458 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1459 se->se_callback_addr_len,
1460 (struct sockaddr *)&conn->cb_addr,
1461 sizeof(conn->cb_addr));
1462
1463 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1464 goto out_err;
1465
1466 if (conn->cb_addr.ss_family == AF_INET6)
1467 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1468
1469 conn->cb_prog = se->se_callback_prog;
1470 conn->cb_ident = se->se_callback_ident;
1471 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1472 return;
1473 out_err:
1474 conn->cb_addr.ss_family = AF_UNSPEC;
1475 conn->cb_addrlen = 0;
1476 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1477 "will not receive delegations\n",
1478 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1479
1480 return;
1481 }
1482
1483 /*
1484 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1485 */
1486 void
1487 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1488 {
1489 struct nfsd4_slot *slot = resp->cstate.slot;
1490 unsigned int base;
1491
1492 dprintk("--> %s slot %p\n", __func__, slot);
1493
1494 slot->sl_opcnt = resp->opcnt;
1495 slot->sl_status = resp->cstate.status;
1496
1497 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1498 if (nfsd4_not_cached(resp)) {
1499 slot->sl_datalen = 0;
1500 return;
1501 }
1502 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1503 base = (char *)resp->cstate.datap -
1504 (char *)resp->xbuf->head[0].iov_base;
1505 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1506 slot->sl_datalen))
1507 WARN("%s: sessions DRC could not cache compound\n", __func__);
1508 return;
1509 }
1510
1511 /*
1512 * Encode the replay sequence operation from the slot values.
1513 * If cachethis is FALSE encode the uncached rep error on the next
1514 * operation which sets resp->p and increments resp->opcnt for
1515 * nfs4svc_encode_compoundres.
1516 *
1517 */
1518 static __be32
1519 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1520 struct nfsd4_compoundres *resp)
1521 {
1522 struct nfsd4_op *op;
1523 struct nfsd4_slot *slot = resp->cstate.slot;
1524
1525 /* Encode the replayed sequence operation */
1526 op = &args->ops[resp->opcnt - 1];
1527 nfsd4_encode_operation(resp, op);
1528
1529 /* Return nfserr_retry_uncached_rep in next operation. */
1530 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1531 op = &args->ops[resp->opcnt++];
1532 op->status = nfserr_retry_uncached_rep;
1533 nfsd4_encode_operation(resp, op);
1534 }
1535 return op->status;
1536 }
1537
1538 /*
1539 * The sequence operation is not cached because we can use the slot and
1540 * session values.
1541 */
1542 __be32
1543 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1544 struct nfsd4_sequence *seq)
1545 {
1546 struct nfsd4_slot *slot = resp->cstate.slot;
1547 __be32 status;
1548
1549 dprintk("--> %s slot %p\n", __func__, slot);
1550
1551 /* Either returns 0 or nfserr_retry_uncached */
1552 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1553 if (status == nfserr_retry_uncached_rep)
1554 return status;
1555
1556 /* The sequence operation has been encoded, cstate->datap set. */
1557 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1558
1559 resp->opcnt = slot->sl_opcnt;
1560 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1561 status = slot->sl_status;
1562
1563 return status;
1564 }
1565
1566 /*
1567 * Set the exchange_id flags returned by the server.
1568 */
1569 static void
1570 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1571 {
1572 /* pNFS is not supported */
1573 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1574
1575 /* Referrals are supported, Migration is not. */
1576 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1577
1578 /* set the wire flags to return to client. */
1579 clid->flags = new->cl_exchange_flags;
1580 }
1581
1582 static bool client_has_state(struct nfs4_client *clp)
1583 {
1584 /*
1585 * Note clp->cl_openowners check isn't quite right: there's no
1586 * need to count owners without stateid's.
1587 *
1588 * Also note we should probably be using this in 4.0 case too.
1589 */
1590 return !list_empty(&clp->cl_openowners)
1591 || !list_empty(&clp->cl_delegations)
1592 || !list_empty(&clp->cl_sessions);
1593 }
1594
1595 __be32
1596 nfsd4_exchange_id(struct svc_rqst *rqstp,
1597 struct nfsd4_compound_state *cstate,
1598 struct nfsd4_exchange_id *exid)
1599 {
1600 struct nfs4_client *unconf, *conf, *new;
1601 __be32 status;
1602 char addr_str[INET6_ADDRSTRLEN];
1603 nfs4_verifier verf = exid->verifier;
1604 struct sockaddr *sa = svc_addr(rqstp);
1605 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1606 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1607
1608 rpc_ntop(sa, addr_str, sizeof(addr_str));
1609 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1610 "ip_addr=%s flags %x, spa_how %d\n",
1611 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1612 addr_str, exid->flags, exid->spa_how);
1613
1614 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1615 return nfserr_inval;
1616
1617 /* Currently only support SP4_NONE */
1618 switch (exid->spa_how) {
1619 case SP4_NONE:
1620 break;
1621 case SP4_SSV:
1622 return nfserr_serverfault;
1623 default:
1624 BUG(); /* checked by xdr code */
1625 case SP4_MACH_CRED:
1626 return nfserr_serverfault; /* no excuse :-/ */
1627 }
1628
1629 /* Cases below refer to rfc 5661 section 18.35.4: */
1630 nfs4_lock_state();
1631 conf = find_confirmed_client_by_name(&exid->clname, nn);
1632 if (conf) {
1633 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1634 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1635
1636 if (update) {
1637 if (!clp_used_exchangeid(conf)) { /* buggy client */
1638 status = nfserr_inval;
1639 goto out;
1640 }
1641 if (!creds_match) { /* case 9 */
1642 status = nfserr_perm;
1643 goto out;
1644 }
1645 if (!verfs_match) { /* case 8 */
1646 status = nfserr_not_same;
1647 goto out;
1648 }
1649 /* case 6 */
1650 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1651 new = conf;
1652 goto out_copy;
1653 }
1654 if (!creds_match) { /* case 3 */
1655 if (client_has_state(conf)) {
1656 status = nfserr_clid_inuse;
1657 goto out;
1658 }
1659 expire_client(conf);
1660 goto out_new;
1661 }
1662 if (verfs_match) { /* case 2 */
1663 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1664 new = conf;
1665 goto out_copy;
1666 }
1667 /* case 5, client reboot */
1668 goto out_new;
1669 }
1670
1671 if (update) { /* case 7 */
1672 status = nfserr_noent;
1673 goto out;
1674 }
1675
1676 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1677 if (unconf) /* case 4, possible retry or client restart */
1678 expire_client(unconf);
1679
1680 /* case 1 (normal case) */
1681 out_new:
1682 new = create_client(exid->clname, rqstp, &verf);
1683 if (new == NULL) {
1684 status = nfserr_jukebox;
1685 goto out;
1686 }
1687 new->cl_minorversion = 1;
1688
1689 gen_clid(new, nn);
1690 add_to_unconfirmed(new);
1691 out_copy:
1692 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1693 exid->clientid.cl_id = new->cl_clientid.cl_id;
1694
1695 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1696 nfsd4_set_ex_flags(new, exid);
1697
1698 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1699 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1700 status = nfs_ok;
1701
1702 out:
1703 nfs4_unlock_state();
1704 return status;
1705 }
1706
1707 static __be32
1708 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1709 {
1710 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1711 slot_seqid);
1712
1713 /* The slot is in use, and no response has been sent. */
1714 if (slot_inuse) {
1715 if (seqid == slot_seqid)
1716 return nfserr_jukebox;
1717 else
1718 return nfserr_seq_misordered;
1719 }
1720 /* Note unsigned 32-bit arithmetic handles wraparound: */
1721 if (likely(seqid == slot_seqid + 1))
1722 return nfs_ok;
1723 if (seqid == slot_seqid)
1724 return nfserr_replay_cache;
1725 return nfserr_seq_misordered;
1726 }
1727
1728 /*
1729 * Cache the create session result into the create session single DRC
1730 * slot cache by saving the xdr structure. sl_seqid has been set.
1731 * Do this for solo or embedded create session operations.
1732 */
1733 static void
1734 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1735 struct nfsd4_clid_slot *slot, __be32 nfserr)
1736 {
1737 slot->sl_status = nfserr;
1738 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1739 }
1740
1741 static __be32
1742 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1743 struct nfsd4_clid_slot *slot)
1744 {
1745 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1746 return slot->sl_status;
1747 }
1748
1749 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1750 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1751 1 + /* MIN tag is length with zero, only length */ \
1752 3 + /* version, opcount, opcode */ \
1753 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1754 /* seqid, slotID, slotID, cache */ \
1755 4 ) * sizeof(__be32))
1756
1757 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1758 2 + /* verifier: AUTH_NULL, length 0 */\
1759 1 + /* status */ \
1760 1 + /* MIN tag is length with zero, only length */ \
1761 3 + /* opcount, opcode, opstatus*/ \
1762 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1763 /* seqid, slotID, slotID, slotID, status */ \
1764 5 ) * sizeof(__be32))
1765
1766 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1767 {
1768 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1769 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1770 }
1771
1772 __be32
1773 nfsd4_create_session(struct svc_rqst *rqstp,
1774 struct nfsd4_compound_state *cstate,
1775 struct nfsd4_create_session *cr_ses)
1776 {
1777 struct sockaddr *sa = svc_addr(rqstp);
1778 struct nfs4_client *conf, *unconf;
1779 struct nfsd4_session *new;
1780 struct nfsd4_conn *conn;
1781 struct nfsd4_clid_slot *cs_slot = NULL;
1782 __be32 status = 0;
1783 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1784
1785 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1786 return nfserr_inval;
1787 if (check_forechannel_attrs(cr_ses->fore_channel))
1788 return nfserr_toosmall;
1789 new = alloc_session(&cr_ses->fore_channel);
1790 if (!new)
1791 return nfserr_jukebox;
1792 status = nfserr_jukebox;
1793 conn = alloc_conn_from_crses(rqstp, cr_ses);
1794 if (!conn)
1795 goto out_free_session;
1796
1797 nfs4_lock_state();
1798 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
1799 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
1800
1801 if (conf) {
1802 cs_slot = &conf->cl_cs_slot;
1803 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1804 if (status == nfserr_replay_cache) {
1805 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1806 goto out_free_conn;
1807 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1808 status = nfserr_seq_misordered;
1809 goto out_free_conn;
1810 }
1811 } else if (unconf) {
1812 struct nfs4_client *old;
1813 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1814 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1815 status = nfserr_clid_inuse;
1816 goto out_free_conn;
1817 }
1818 cs_slot = &unconf->cl_cs_slot;
1819 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1820 if (status) {
1821 /* an unconfirmed replay returns misordered */
1822 status = nfserr_seq_misordered;
1823 goto out_free_conn;
1824 }
1825 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
1826 if (old)
1827 expire_client(old);
1828 move_to_confirmed(unconf);
1829 conf = unconf;
1830 } else {
1831 status = nfserr_stale_clientid;
1832 goto out_free_conn;
1833 }
1834 status = nfs_ok;
1835 /*
1836 * We do not support RDMA or persistent sessions
1837 */
1838 cr_ses->flags &= ~SESSION4_PERSIST;
1839 cr_ses->flags &= ~SESSION4_RDMA;
1840
1841 init_session(rqstp, new, conf, cr_ses);
1842 nfsd4_init_conn(rqstp, conn, new);
1843
1844 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1845 NFS4_MAX_SESSIONID_LEN);
1846 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1847 sizeof(struct nfsd4_channel_attrs));
1848 cs_slot->sl_seqid++;
1849 cr_ses->seqid = cs_slot->sl_seqid;
1850
1851 /* cache solo and embedded create sessions under the state lock */
1852 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1853 out:
1854 nfs4_unlock_state();
1855 dprintk("%s returns %d\n", __func__, ntohl(status));
1856 return status;
1857 out_free_conn:
1858 free_conn(conn);
1859 out_free_session:
1860 __free_session(new);
1861 goto out;
1862 }
1863
1864 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1865 {
1866 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1867 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1868
1869 return argp->opcnt == resp->opcnt;
1870 }
1871
1872 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1873 {
1874 switch (*dir) {
1875 case NFS4_CDFC4_FORE:
1876 case NFS4_CDFC4_BACK:
1877 return nfs_ok;
1878 case NFS4_CDFC4_FORE_OR_BOTH:
1879 case NFS4_CDFC4_BACK_OR_BOTH:
1880 *dir = NFS4_CDFC4_BOTH;
1881 return nfs_ok;
1882 };
1883 return nfserr_inval;
1884 }
1885
1886 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
1887 {
1888 struct nfsd4_session *session = cstate->session;
1889
1890 spin_lock(&client_lock);
1891 session->se_cb_prog = bc->bc_cb_program;
1892 session->se_cb_sec = bc->bc_cb_sec;
1893 spin_unlock(&client_lock);
1894
1895 nfsd4_probe_callback(session->se_client);
1896
1897 return nfs_ok;
1898 }
1899
1900 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1901 struct nfsd4_compound_state *cstate,
1902 struct nfsd4_bind_conn_to_session *bcts)
1903 {
1904 __be32 status;
1905 struct nfsd4_conn *conn;
1906
1907 if (!nfsd4_last_compound_op(rqstp))
1908 return nfserr_not_only_op;
1909 spin_lock(&client_lock);
1910 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1911 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1912 * client_lock iself: */
1913 if (cstate->session) {
1914 nfsd4_get_session(cstate->session);
1915 atomic_inc(&cstate->session->se_client->cl_refcount);
1916 }
1917 spin_unlock(&client_lock);
1918 if (!cstate->session)
1919 return nfserr_badsession;
1920
1921 status = nfsd4_map_bcts_dir(&bcts->dir);
1922 if (status)
1923 return status;
1924 conn = alloc_conn(rqstp, bcts->dir);
1925 if (!conn)
1926 return nfserr_jukebox;
1927 nfsd4_init_conn(rqstp, conn, cstate->session);
1928 return nfs_ok;
1929 }
1930
1931 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1932 {
1933 if (!session)
1934 return 0;
1935 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1936 }
1937
1938 __be32
1939 nfsd4_destroy_session(struct svc_rqst *r,
1940 struct nfsd4_compound_state *cstate,
1941 struct nfsd4_destroy_session *sessionid)
1942 {
1943 struct nfsd4_session *ses;
1944 __be32 status = nfserr_badsession;
1945
1946 /* Notes:
1947 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1948 * - Should we return nfserr_back_chan_busy if waiting for
1949 * callbacks on to-be-destroyed session?
1950 * - Do we need to clear any callback info from previous session?
1951 */
1952
1953 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1954 if (!nfsd4_last_compound_op(r))
1955 return nfserr_not_only_op;
1956 }
1957 dump_sessionid(__func__, &sessionid->sessionid);
1958 spin_lock(&client_lock);
1959 ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1960 if (!ses) {
1961 spin_unlock(&client_lock);
1962 goto out;
1963 }
1964
1965 unhash_session(ses);
1966 spin_unlock(&client_lock);
1967
1968 nfs4_lock_state();
1969 nfsd4_probe_callback_sync(ses->se_client);
1970 nfs4_unlock_state();
1971
1972 spin_lock(&client_lock);
1973 nfsd4_del_conns(ses);
1974 nfsd4_put_session_locked(ses);
1975 spin_unlock(&client_lock);
1976 status = nfs_ok;
1977 out:
1978 dprintk("%s returns %d\n", __func__, ntohl(status));
1979 return status;
1980 }
1981
1982 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1983 {
1984 struct nfsd4_conn *c;
1985
1986 list_for_each_entry(c, &s->se_conns, cn_persession) {
1987 if (c->cn_xprt == xpt) {
1988 return c;
1989 }
1990 }
1991 return NULL;
1992 }
1993
1994 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1995 {
1996 struct nfs4_client *clp = ses->se_client;
1997 struct nfsd4_conn *c;
1998 int ret;
1999
2000 spin_lock(&clp->cl_lock);
2001 c = __nfsd4_find_conn(new->cn_xprt, ses);
2002 if (c) {
2003 spin_unlock(&clp->cl_lock);
2004 free_conn(new);
2005 return;
2006 }
2007 __nfsd4_hash_conn(new, ses);
2008 spin_unlock(&clp->cl_lock);
2009 ret = nfsd4_register_conn(new);
2010 if (ret)
2011 /* oops; xprt is already down: */
2012 nfsd4_conn_lost(&new->cn_xpt_user);
2013 return;
2014 }
2015
2016 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2017 {
2018 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2019
2020 return args->opcnt > session->se_fchannel.maxops;
2021 }
2022
2023 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2024 struct nfsd4_session *session)
2025 {
2026 struct xdr_buf *xb = &rqstp->rq_arg;
2027
2028 return xb->len > session->se_fchannel.maxreq_sz;
2029 }
2030
2031 __be32
2032 nfsd4_sequence(struct svc_rqst *rqstp,
2033 struct nfsd4_compound_state *cstate,
2034 struct nfsd4_sequence *seq)
2035 {
2036 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2037 struct nfsd4_session *session;
2038 struct nfsd4_slot *slot;
2039 struct nfsd4_conn *conn;
2040 __be32 status;
2041
2042 if (resp->opcnt != 1)
2043 return nfserr_sequence_pos;
2044
2045 /*
2046 * Will be either used or freed by nfsd4_sequence_check_conn
2047 * below.
2048 */
2049 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2050 if (!conn)
2051 return nfserr_jukebox;
2052
2053 spin_lock(&client_lock);
2054 status = nfserr_badsession;
2055 session = find_in_sessionid_hashtbl(&seq->sessionid);
2056 if (!session)
2057 goto out;
2058
2059 status = nfserr_too_many_ops;
2060 if (nfsd4_session_too_many_ops(rqstp, session))
2061 goto out;
2062
2063 status = nfserr_req_too_big;
2064 if (nfsd4_request_too_big(rqstp, session))
2065 goto out;
2066
2067 status = nfserr_badslot;
2068 if (seq->slotid >= session->se_fchannel.maxreqs)
2069 goto out;
2070
2071 slot = session->se_slots[seq->slotid];
2072 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2073
2074 /* We do not negotiate the number of slots yet, so set the
2075 * maxslots to the session maxreqs which is used to encode
2076 * sr_highest_slotid and the sr_target_slot id to maxslots */
2077 seq->maxslots = session->se_fchannel.maxreqs;
2078
2079 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2080 slot->sl_flags & NFSD4_SLOT_INUSE);
2081 if (status == nfserr_replay_cache) {
2082 status = nfserr_seq_misordered;
2083 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2084 goto out;
2085 cstate->slot = slot;
2086 cstate->session = session;
2087 /* Return the cached reply status and set cstate->status
2088 * for nfsd4_proc_compound processing */
2089 status = nfsd4_replay_cache_entry(resp, seq);
2090 cstate->status = nfserr_replay_cache;
2091 goto out;
2092 }
2093 if (status)
2094 goto out;
2095
2096 nfsd4_sequence_check_conn(conn, session);
2097 conn = NULL;
2098
2099 /* Success! bump slot seqid */
2100 slot->sl_seqid = seq->seqid;
2101 slot->sl_flags |= NFSD4_SLOT_INUSE;
2102 if (seq->cachethis)
2103 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2104 else
2105 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2106
2107 cstate->slot = slot;
2108 cstate->session = session;
2109
2110 out:
2111 /* Hold a session reference until done processing the compound. */
2112 if (cstate->session) {
2113 struct nfs4_client *clp = session->se_client;
2114
2115 nfsd4_get_session(cstate->session);
2116 atomic_inc(&clp->cl_refcount);
2117 switch (clp->cl_cb_state) {
2118 case NFSD4_CB_DOWN:
2119 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2120 break;
2121 case NFSD4_CB_FAULT:
2122 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2123 break;
2124 default:
2125 seq->status_flags = 0;
2126 }
2127 }
2128 kfree(conn);
2129 spin_unlock(&client_lock);
2130 dprintk("%s: return %d\n", __func__, ntohl(status));
2131 return status;
2132 }
2133
2134 __be32
2135 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2136 {
2137 struct nfs4_client *conf, *unconf, *clp;
2138 __be32 status = 0;
2139 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2140
2141 nfs4_lock_state();
2142 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2143 conf = find_confirmed_client(&dc->clientid, true, nn);
2144
2145 if (conf) {
2146 clp = conf;
2147
2148 if (!is_client_expired(conf) && client_has_state(conf)) {
2149 status = nfserr_clientid_busy;
2150 goto out;
2151 }
2152
2153 /* rfc5661 18.50.3 */
2154 if (cstate->session && conf == cstate->session->se_client) {
2155 status = nfserr_clientid_busy;
2156 goto out;
2157 }
2158 } else if (unconf)
2159 clp = unconf;
2160 else {
2161 status = nfserr_stale_clientid;
2162 goto out;
2163 }
2164
2165 expire_client(clp);
2166 out:
2167 nfs4_unlock_state();
2168 dprintk("%s return %d\n", __func__, ntohl(status));
2169 return status;
2170 }
2171
2172 __be32
2173 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2174 {
2175 __be32 status = 0;
2176
2177 if (rc->rca_one_fs) {
2178 if (!cstate->current_fh.fh_dentry)
2179 return nfserr_nofilehandle;
2180 /*
2181 * We don't take advantage of the rca_one_fs case.
2182 * That's OK, it's optional, we can safely ignore it.
2183 */
2184 return nfs_ok;
2185 }
2186
2187 nfs4_lock_state();
2188 status = nfserr_complete_already;
2189 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2190 &cstate->session->se_client->cl_flags))
2191 goto out;
2192
2193 status = nfserr_stale_clientid;
2194 if (is_client_expired(cstate->session->se_client))
2195 /*
2196 * The following error isn't really legal.
2197 * But we only get here if the client just explicitly
2198 * destroyed the client. Surely it no longer cares what
2199 * error it gets back on an operation for the dead
2200 * client.
2201 */
2202 goto out;
2203
2204 status = nfs_ok;
2205 nfsd4_client_record_create(cstate->session->se_client);
2206 out:
2207 nfs4_unlock_state();
2208 return status;
2209 }
2210
2211 __be32
2212 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2213 struct nfsd4_setclientid *setclid)
2214 {
2215 struct xdr_netobj clname = setclid->se_name;
2216 nfs4_verifier clverifier = setclid->se_verf;
2217 struct nfs4_client *conf, *unconf, *new;
2218 __be32 status;
2219 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2220
2221 /* Cases below refer to rfc 3530 section 14.2.33: */
2222 nfs4_lock_state();
2223 conf = find_confirmed_client_by_name(&clname, nn);
2224 if (conf) {
2225 /* case 0: */
2226 status = nfserr_clid_inuse;
2227 if (clp_used_exchangeid(conf))
2228 goto out;
2229 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2230 char addr_str[INET6_ADDRSTRLEN];
2231 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2232 sizeof(addr_str));
2233 dprintk("NFSD: setclientid: string in use by client "
2234 "at %s\n", addr_str);
2235 goto out;
2236 }
2237 }
2238 unconf = find_unconfirmed_client_by_name(&clname, nn);
2239 if (unconf)
2240 expire_client(unconf);
2241 status = nfserr_jukebox;
2242 new = create_client(clname, rqstp, &clverifier);
2243 if (new == NULL)
2244 goto out;
2245 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2246 /* case 1: probable callback update */
2247 copy_clid(new, conf);
2248 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2249 gen_clid(new, nn);
2250 new->cl_minorversion = 0;
2251 gen_callback(new, setclid, rqstp);
2252 add_to_unconfirmed(new);
2253 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2254 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2255 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2256 status = nfs_ok;
2257 out:
2258 nfs4_unlock_state();
2259 return status;
2260 }
2261
2262
2263 __be32
2264 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2265 struct nfsd4_compound_state *cstate,
2266 struct nfsd4_setclientid_confirm *setclientid_confirm)
2267 {
2268 struct nfs4_client *conf, *unconf;
2269 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2270 clientid_t * clid = &setclientid_confirm->sc_clientid;
2271 __be32 status;
2272 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2273
2274 if (STALE_CLIENTID(clid, nn))
2275 return nfserr_stale_clientid;
2276 nfs4_lock_state();
2277
2278 conf = find_confirmed_client(clid, false, nn);
2279 unconf = find_unconfirmed_client(clid, false, nn);
2280 /*
2281 * We try hard to give out unique clientid's, so if we get an
2282 * attempt to confirm the same clientid with a different cred,
2283 * there's a bug somewhere. Let's charitably assume it's our
2284 * bug.
2285 */
2286 status = nfserr_serverfault;
2287 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2288 goto out;
2289 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2290 goto out;
2291 /* cases below refer to rfc 3530 section 14.2.34: */
2292 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2293 if (conf && !unconf) /* case 2: probable retransmit */
2294 status = nfs_ok;
2295 else /* case 4: client hasn't noticed we rebooted yet? */
2296 status = nfserr_stale_clientid;
2297 goto out;
2298 }
2299 status = nfs_ok;
2300 if (conf) { /* case 1: callback update */
2301 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2302 nfsd4_probe_callback(conf);
2303 expire_client(unconf);
2304 } else { /* case 3: normal case; new or rebooted client */
2305 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2306 if (conf)
2307 expire_client(conf);
2308 move_to_confirmed(unconf);
2309 nfsd4_probe_callback(unconf);
2310 }
2311 out:
2312 nfs4_unlock_state();
2313 return status;
2314 }
2315
2316 static struct nfs4_file *nfsd4_alloc_file(void)
2317 {
2318 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2319 }
2320
2321 /* OPEN Share state helper functions */
2322 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2323 {
2324 unsigned int hashval = file_hashval(ino);
2325
2326 atomic_set(&fp->fi_ref, 1);
2327 INIT_LIST_HEAD(&fp->fi_hash);
2328 INIT_LIST_HEAD(&fp->fi_stateids);
2329 INIT_LIST_HEAD(&fp->fi_delegations);
2330 fp->fi_inode = igrab(ino);
2331 fp->fi_had_conflict = false;
2332 fp->fi_lease = NULL;
2333 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2334 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2335 spin_lock(&recall_lock);
2336 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2337 spin_unlock(&recall_lock);
2338 }
2339
2340 static void
2341 nfsd4_free_slab(struct kmem_cache **slab)
2342 {
2343 if (*slab == NULL)
2344 return;
2345 kmem_cache_destroy(*slab);
2346 *slab = NULL;
2347 }
2348
2349 void
2350 nfsd4_free_slabs(void)
2351 {
2352 nfsd4_free_slab(&openowner_slab);
2353 nfsd4_free_slab(&lockowner_slab);
2354 nfsd4_free_slab(&file_slab);
2355 nfsd4_free_slab(&stateid_slab);
2356 nfsd4_free_slab(&deleg_slab);
2357 }
2358
2359 int
2360 nfsd4_init_slabs(void)
2361 {
2362 openowner_slab = kmem_cache_create("nfsd4_openowners",
2363 sizeof(struct nfs4_openowner), 0, 0, NULL);
2364 if (openowner_slab == NULL)
2365 goto out_nomem;
2366 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2367 sizeof(struct nfs4_lockowner), 0, 0, NULL);
2368 if (lockowner_slab == NULL)
2369 goto out_nomem;
2370 file_slab = kmem_cache_create("nfsd4_files",
2371 sizeof(struct nfs4_file), 0, 0, NULL);
2372 if (file_slab == NULL)
2373 goto out_nomem;
2374 stateid_slab = kmem_cache_create("nfsd4_stateids",
2375 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2376 if (stateid_slab == NULL)
2377 goto out_nomem;
2378 deleg_slab = kmem_cache_create("nfsd4_delegations",
2379 sizeof(struct nfs4_delegation), 0, 0, NULL);
2380 if (deleg_slab == NULL)
2381 goto out_nomem;
2382 return 0;
2383 out_nomem:
2384 nfsd4_free_slabs();
2385 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2386 return -ENOMEM;
2387 }
2388
2389 void nfs4_free_openowner(struct nfs4_openowner *oo)
2390 {
2391 kfree(oo->oo_owner.so_owner.data);
2392 kmem_cache_free(openowner_slab, oo);
2393 }
2394
2395 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2396 {
2397 kfree(lo->lo_owner.so_owner.data);
2398 kmem_cache_free(lockowner_slab, lo);
2399 }
2400
2401 static void init_nfs4_replay(struct nfs4_replay *rp)
2402 {
2403 rp->rp_status = nfserr_serverfault;
2404 rp->rp_buflen = 0;
2405 rp->rp_buf = rp->rp_ibuf;
2406 }
2407
2408 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2409 {
2410 struct nfs4_stateowner *sop;
2411
2412 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2413 if (!sop)
2414 return NULL;
2415
2416 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2417 if (!sop->so_owner.data) {
2418 kmem_cache_free(slab, sop);
2419 return NULL;
2420 }
2421 sop->so_owner.len = owner->len;
2422
2423 INIT_LIST_HEAD(&sop->so_stateids);
2424 sop->so_client = clp;
2425 init_nfs4_replay(&sop->so_replay);
2426 return sop;
2427 }
2428
2429 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2430 {
2431 list_add(&oo->oo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
2432 list_add(&oo->oo_perclient, &clp->cl_openowners);
2433 }
2434
2435 static struct nfs4_openowner *
2436 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2437 struct nfs4_openowner *oo;
2438
2439 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2440 if (!oo)
2441 return NULL;
2442 oo->oo_owner.so_is_open_owner = 1;
2443 oo->oo_owner.so_seqid = open->op_seqid;
2444 oo->oo_flags = NFS4_OO_NEW;
2445 oo->oo_time = 0;
2446 oo->oo_last_closed_stid = NULL;
2447 INIT_LIST_HEAD(&oo->oo_close_lru);
2448 hash_openowner(oo, clp, strhashval);
2449 return oo;
2450 }
2451
2452 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2453 struct nfs4_openowner *oo = open->op_openowner;
2454 struct nfs4_client *clp = oo->oo_owner.so_client;
2455
2456 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2457 INIT_LIST_HEAD(&stp->st_lockowners);
2458 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2459 list_add(&stp->st_perfile, &fp->fi_stateids);
2460 stp->st_stateowner = &oo->oo_owner;
2461 get_nfs4_file(fp);
2462 stp->st_file = fp;
2463 stp->st_access_bmap = 0;
2464 stp->st_deny_bmap = 0;
2465 set_access(open->op_share_access, stp);
2466 set_deny(open->op_share_deny, stp);
2467 stp->st_openstp = NULL;
2468 }
2469
2470 static void
2471 move_to_close_lru(struct nfs4_openowner *oo)
2472 {
2473 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2474
2475 list_move_tail(&oo->oo_close_lru, &close_lru);
2476 oo->oo_time = get_seconds();
2477 }
2478
2479 static int
2480 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2481 clientid_t *clid)
2482 {
2483 return (sop->so_owner.len == owner->len) &&
2484 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2485 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2486 }
2487
2488 static struct nfs4_openowner *
2489 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, bool sessions)
2490 {
2491 struct nfs4_stateowner *so;
2492 struct nfs4_openowner *oo;
2493 struct nfs4_client *clp;
2494
2495 list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
2496 if (!so->so_is_open_owner)
2497 continue;
2498 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2499 oo = openowner(so);
2500 clp = oo->oo_owner.so_client;
2501 if ((bool)clp->cl_minorversion != sessions)
2502 return NULL;
2503 renew_client(oo->oo_owner.so_client);
2504 return oo;
2505 }
2506 }
2507 return NULL;
2508 }
2509
2510 /* search file_hashtbl[] for file */
2511 static struct nfs4_file *
2512 find_file(struct inode *ino)
2513 {
2514 unsigned int hashval = file_hashval(ino);
2515 struct nfs4_file *fp;
2516
2517 spin_lock(&recall_lock);
2518 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2519 if (fp->fi_inode == ino) {
2520 get_nfs4_file(fp);
2521 spin_unlock(&recall_lock);
2522 return fp;
2523 }
2524 }
2525 spin_unlock(&recall_lock);
2526 return NULL;
2527 }
2528
2529 /*
2530 * Called to check deny when READ with all zero stateid or
2531 * WRITE with all zero or all one stateid
2532 */
2533 static __be32
2534 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2535 {
2536 struct inode *ino = current_fh->fh_dentry->d_inode;
2537 struct nfs4_file *fp;
2538 struct nfs4_ol_stateid *stp;
2539 __be32 ret;
2540
2541 dprintk("NFSD: nfs4_share_conflict\n");
2542
2543 fp = find_file(ino);
2544 if (!fp)
2545 return nfs_ok;
2546 ret = nfserr_locked;
2547 /* Search for conflicting share reservations */
2548 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2549 if (test_deny(deny_type, stp) ||
2550 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2551 goto out;
2552 }
2553 ret = nfs_ok;
2554 out:
2555 put_nfs4_file(fp);
2556 return ret;
2557 }
2558
2559 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2560 {
2561 /* We're assuming the state code never drops its reference
2562 * without first removing the lease. Since we're in this lease
2563 * callback (and since the lease code is serialized by the kernel
2564 * lock) we know the server hasn't removed the lease yet, we know
2565 * it's safe to take a reference: */
2566 atomic_inc(&dp->dl_count);
2567
2568 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2569
2570 /* only place dl_time is set. protected by lock_flocks*/
2571 dp->dl_time = get_seconds();
2572
2573 nfsd4_cb_recall(dp);
2574 }
2575
2576 /* Called from break_lease() with lock_flocks() held. */
2577 static void nfsd_break_deleg_cb(struct file_lock *fl)
2578 {
2579 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2580 struct nfs4_delegation *dp;
2581
2582 if (!fp) {
2583 WARN(1, "(%p)->fl_owner NULL\n", fl);
2584 return;
2585 }
2586 if (fp->fi_had_conflict) {
2587 WARN(1, "duplicate break on %p\n", fp);
2588 return;
2589 }
2590 /*
2591 * We don't want the locks code to timeout the lease for us;
2592 * we'll remove it ourself if a delegation isn't returned
2593 * in time:
2594 */
2595 fl->fl_break_time = 0;
2596
2597 spin_lock(&recall_lock);
2598 fp->fi_had_conflict = true;
2599 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2600 nfsd_break_one_deleg(dp);
2601 spin_unlock(&recall_lock);
2602 }
2603
2604 static
2605 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2606 {
2607 if (arg & F_UNLCK)
2608 return lease_modify(onlist, arg);
2609 else
2610 return -EAGAIN;
2611 }
2612
2613 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2614 .lm_break = nfsd_break_deleg_cb,
2615 .lm_change = nfsd_change_deleg_cb,
2616 };
2617
2618 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2619 {
2620 if (nfsd4_has_session(cstate))
2621 return nfs_ok;
2622 if (seqid == so->so_seqid - 1)
2623 return nfserr_replay_me;
2624 if (seqid == so->so_seqid)
2625 return nfs_ok;
2626 return nfserr_bad_seqid;
2627 }
2628
2629 __be32
2630 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2631 struct nfsd4_open *open)
2632 {
2633 clientid_t *clientid = &open->op_clientid;
2634 struct nfs4_client *clp = NULL;
2635 unsigned int strhashval;
2636 struct nfs4_openowner *oo = NULL;
2637 __be32 status;
2638 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
2639
2640 if (STALE_CLIENTID(&open->op_clientid, nn))
2641 return nfserr_stale_clientid;
2642 /*
2643 * In case we need it later, after we've already created the
2644 * file and don't want to risk a further failure:
2645 */
2646 open->op_file = nfsd4_alloc_file();
2647 if (open->op_file == NULL)
2648 return nfserr_jukebox;
2649
2650 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2651 oo = find_openstateowner_str(strhashval, open, cstate->minorversion);
2652 open->op_openowner = oo;
2653 if (!oo) {
2654 clp = find_confirmed_client(clientid, cstate->minorversion,
2655 nn);
2656 if (clp == NULL)
2657 return nfserr_expired;
2658 goto new_owner;
2659 }
2660 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2661 /* Replace unconfirmed owners without checking for replay. */
2662 clp = oo->oo_owner.so_client;
2663 release_openowner(oo);
2664 open->op_openowner = NULL;
2665 goto new_owner;
2666 }
2667 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2668 if (status)
2669 return status;
2670 clp = oo->oo_owner.so_client;
2671 goto alloc_stateid;
2672 new_owner:
2673 oo = alloc_init_open_stateowner(strhashval, clp, open);
2674 if (oo == NULL)
2675 return nfserr_jukebox;
2676 open->op_openowner = oo;
2677 alloc_stateid:
2678 open->op_stp = nfs4_alloc_stateid(clp);
2679 if (!open->op_stp)
2680 return nfserr_jukebox;
2681 return nfs_ok;
2682 }
2683
2684 static inline __be32
2685 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2686 {
2687 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2688 return nfserr_openmode;
2689 else
2690 return nfs_ok;
2691 }
2692
2693 static int share_access_to_flags(u32 share_access)
2694 {
2695 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2696 }
2697
2698 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2699 {
2700 struct nfs4_stid *ret;
2701
2702 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2703 if (!ret)
2704 return NULL;
2705 return delegstateid(ret);
2706 }
2707
2708 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2709 {
2710 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2711 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2712 }
2713
2714 static __be32
2715 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2716 struct nfs4_delegation **dp)
2717 {
2718 int flags;
2719 __be32 status = nfserr_bad_stateid;
2720
2721 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2722 if (*dp == NULL)
2723 goto out;
2724 flags = share_access_to_flags(open->op_share_access);
2725 status = nfs4_check_delegmode(*dp, flags);
2726 if (status)
2727 *dp = NULL;
2728 out:
2729 if (!nfsd4_is_deleg_cur(open))
2730 return nfs_ok;
2731 if (status)
2732 return status;
2733 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2734 return nfs_ok;
2735 }
2736
2737 static __be32
2738 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2739 {
2740 struct nfs4_ol_stateid *local;
2741 struct nfs4_openowner *oo = open->op_openowner;
2742
2743 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2744 /* ignore lock owners */
2745 if (local->st_stateowner->so_is_open_owner == 0)
2746 continue;
2747 /* remember if we have seen this open owner */
2748 if (local->st_stateowner == &oo->oo_owner)
2749 *stpp = local;
2750 /* check for conflicting share reservations */
2751 if (!test_share(local, open))
2752 return nfserr_share_denied;
2753 }
2754 return nfs_ok;
2755 }
2756
2757 static inline int nfs4_access_to_access(u32 nfs4_access)
2758 {
2759 int flags = 0;
2760
2761 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2762 flags |= NFSD_MAY_READ;
2763 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2764 flags |= NFSD_MAY_WRITE;
2765 return flags;
2766 }
2767
2768 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2769 struct svc_fh *cur_fh, struct nfsd4_open *open)
2770 {
2771 __be32 status;
2772 int oflag = nfs4_access_to_omode(open->op_share_access);
2773 int access = nfs4_access_to_access(open->op_share_access);
2774
2775 if (!fp->fi_fds[oflag]) {
2776 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2777 &fp->fi_fds[oflag]);
2778 if (status)
2779 return status;
2780 }
2781 nfs4_file_get_access(fp, oflag);
2782
2783 return nfs_ok;
2784 }
2785
2786 static inline __be32
2787 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2788 struct nfsd4_open *open)
2789 {
2790 struct iattr iattr = {
2791 .ia_valid = ATTR_SIZE,
2792 .ia_size = 0,
2793 };
2794 if (!open->op_truncate)
2795 return 0;
2796 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2797 return nfserr_inval;
2798 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2799 }
2800
2801 static __be32
2802 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2803 {
2804 u32 op_share_access = open->op_share_access;
2805 bool new_access;
2806 __be32 status;
2807
2808 new_access = !test_access(op_share_access, stp);
2809 if (new_access) {
2810 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2811 if (status)
2812 return status;
2813 }
2814 status = nfsd4_truncate(rqstp, cur_fh, open);
2815 if (status) {
2816 if (new_access) {
2817 int oflag = nfs4_access_to_omode(op_share_access);
2818 nfs4_file_put_access(fp, oflag);
2819 }
2820 return status;
2821 }
2822 /* remember the open */
2823 set_access(op_share_access, stp);
2824 set_deny(open->op_share_deny, stp);
2825
2826 return nfs_ok;
2827 }
2828
2829
2830 static void
2831 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
2832 {
2833 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2834 }
2835
2836 /* Should we give out recallable state?: */
2837 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2838 {
2839 if (clp->cl_cb_state == NFSD4_CB_UP)
2840 return true;
2841 /*
2842 * In the sessions case, since we don't have to establish a
2843 * separate connection for callbacks, we assume it's OK
2844 * until we hear otherwise:
2845 */
2846 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2847 }
2848
2849 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2850 {
2851 struct file_lock *fl;
2852
2853 fl = locks_alloc_lock();
2854 if (!fl)
2855 return NULL;
2856 locks_init_lock(fl);
2857 fl->fl_lmops = &nfsd_lease_mng_ops;
2858 fl->fl_flags = FL_LEASE;
2859 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2860 fl->fl_end = OFFSET_MAX;
2861 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2862 fl->fl_pid = current->tgid;
2863 return fl;
2864 }
2865
2866 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2867 {
2868 struct nfs4_file *fp = dp->dl_file;
2869 struct file_lock *fl;
2870 int status;
2871
2872 fl = nfs4_alloc_init_lease(dp, flag);
2873 if (!fl)
2874 return -ENOMEM;
2875 fl->fl_file = find_readable_file(fp);
2876 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2877 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2878 if (status) {
2879 list_del_init(&dp->dl_perclnt);
2880 locks_free_lock(fl);
2881 return -ENOMEM;
2882 }
2883 fp->fi_lease = fl;
2884 fp->fi_deleg_file = get_file(fl->fl_file);
2885 atomic_set(&fp->fi_delegees, 1);
2886 list_add(&dp->dl_perfile, &fp->fi_delegations);
2887 return 0;
2888 }
2889
2890 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2891 {
2892 struct nfs4_file *fp = dp->dl_file;
2893
2894 if (!fp->fi_lease)
2895 return nfs4_setlease(dp, flag);
2896 spin_lock(&recall_lock);
2897 if (fp->fi_had_conflict) {
2898 spin_unlock(&recall_lock);
2899 return -EAGAIN;
2900 }
2901 atomic_inc(&fp->fi_delegees);
2902 list_add(&dp->dl_perfile, &fp->fi_delegations);
2903 spin_unlock(&recall_lock);
2904 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2905 return 0;
2906 }
2907
2908 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2909 {
2910 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2911 if (status == -EAGAIN)
2912 open->op_why_no_deleg = WND4_CONTENTION;
2913 else {
2914 open->op_why_no_deleg = WND4_RESOURCE;
2915 switch (open->op_deleg_want) {
2916 case NFS4_SHARE_WANT_READ_DELEG:
2917 case NFS4_SHARE_WANT_WRITE_DELEG:
2918 case NFS4_SHARE_WANT_ANY_DELEG:
2919 break;
2920 case NFS4_SHARE_WANT_CANCEL:
2921 open->op_why_no_deleg = WND4_CANCELLED;
2922 break;
2923 case NFS4_SHARE_WANT_NO_DELEG:
2924 BUG(); /* not supposed to get here */
2925 }
2926 }
2927 }
2928
2929 /*
2930 * Attempt to hand out a delegation.
2931 */
2932 static void
2933 nfs4_open_delegation(struct net *net, struct svc_fh *fh,
2934 struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2935 {
2936 struct nfs4_delegation *dp;
2937 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2938 int cb_up;
2939 int status = 0, flag = 0;
2940
2941 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2942 flag = NFS4_OPEN_DELEGATE_NONE;
2943 open->op_recall = 0;
2944 switch (open->op_claim_type) {
2945 case NFS4_OPEN_CLAIM_PREVIOUS:
2946 if (!cb_up)
2947 open->op_recall = 1;
2948 flag = open->op_delegate_type;
2949 if (flag == NFS4_OPEN_DELEGATE_NONE)
2950 goto out;
2951 break;
2952 case NFS4_OPEN_CLAIM_NULL:
2953 /* Let's not give out any delegations till everyone's
2954 * had the chance to reclaim theirs.... */
2955 if (locks_in_grace(net))
2956 goto out;
2957 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2958 goto out;
2959 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2960 flag = NFS4_OPEN_DELEGATE_WRITE;
2961 else
2962 flag = NFS4_OPEN_DELEGATE_READ;
2963 break;
2964 default:
2965 goto out;
2966 }
2967
2968 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2969 if (dp == NULL)
2970 goto out_no_deleg;
2971 status = nfs4_set_delegation(dp, flag);
2972 if (status)
2973 goto out_free;
2974
2975 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2976
2977 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2978 STATEID_VAL(&dp->dl_stid.sc_stateid));
2979 out:
2980 open->op_delegate_type = flag;
2981 if (flag == NFS4_OPEN_DELEGATE_NONE) {
2982 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2983 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2984 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2985
2986 /* 4.1 client asking for a delegation? */
2987 if (open->op_deleg_want)
2988 nfsd4_open_deleg_none_ext(open, status);
2989 }
2990 return;
2991 out_free:
2992 nfs4_put_delegation(dp);
2993 out_no_deleg:
2994 flag = NFS4_OPEN_DELEGATE_NONE;
2995 goto out;
2996 }
2997
2998 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
2999 struct nfs4_delegation *dp)
3000 {
3001 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3002 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3003 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3004 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3005 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3006 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3007 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3008 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3009 }
3010 /* Otherwise the client must be confused wanting a delegation
3011 * it already has, therefore we don't return
3012 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3013 */
3014 }
3015
3016 /*
3017 * called with nfs4_lock_state() held.
3018 */
3019 __be32
3020 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3021 {
3022 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3023 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3024 struct nfs4_file *fp = NULL;
3025 struct inode *ino = current_fh->fh_dentry->d_inode;
3026 struct nfs4_ol_stateid *stp = NULL;
3027 struct nfs4_delegation *dp = NULL;
3028 __be32 status;
3029
3030 /*
3031 * Lookup file; if found, lookup stateid and check open request,
3032 * and check for delegations in the process of being recalled.
3033 * If not found, create the nfs4_file struct
3034 */
3035 fp = find_file(ino);
3036 if (fp) {
3037 if ((status = nfs4_check_open(fp, open, &stp)))
3038 goto out;
3039 status = nfs4_check_deleg(cl, fp, open, &dp);
3040 if (status)
3041 goto out;
3042 } else {
3043 status = nfserr_bad_stateid;
3044 if (nfsd4_is_deleg_cur(open))
3045 goto out;
3046 status = nfserr_jukebox;
3047 fp = open->op_file;
3048 open->op_file = NULL;
3049 nfsd4_init_file(fp, ino);
3050 }
3051
3052 /*
3053 * OPEN the file, or upgrade an existing OPEN.
3054 * If truncate fails, the OPEN fails.
3055 */
3056 if (stp) {
3057 /* Stateid was found, this is an OPEN upgrade */
3058 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3059 if (status)
3060 goto out;
3061 } else {
3062 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3063 if (status)
3064 goto out;
3065 status = nfsd4_truncate(rqstp, current_fh, open);
3066 if (status)
3067 goto out;
3068 stp = open->op_stp;
3069 open->op_stp = NULL;
3070 init_open_stateid(stp, fp, open);
3071 }
3072 update_stateid(&stp->st_stid.sc_stateid);
3073 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3074
3075 if (nfsd4_has_session(&resp->cstate)) {
3076 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3077
3078 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3079 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3080 open->op_why_no_deleg = WND4_NOT_WANTED;
3081 goto nodeleg;
3082 }
3083 }
3084
3085 /*
3086 * Attempt to hand out a delegation. No error return, because the
3087 * OPEN succeeds even if we fail.
3088 */
3089 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3090 nodeleg:
3091 status = nfs_ok;
3092
3093 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3094 STATEID_VAL(&stp->st_stid.sc_stateid));
3095 out:
3096 /* 4.1 client trying to upgrade/downgrade delegation? */
3097 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3098 open->op_deleg_want)
3099 nfsd4_deleg_xgrade_none_ext(open, dp);
3100
3101 if (fp)
3102 put_nfs4_file(fp);
3103 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3104 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3105 /*
3106 * To finish the open response, we just need to set the rflags.
3107 */
3108 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3109 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3110 !nfsd4_has_session(&resp->cstate))
3111 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3112
3113 return status;
3114 }
3115
3116 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3117 {
3118 if (open->op_openowner) {
3119 struct nfs4_openowner *oo = open->op_openowner;
3120
3121 if (!list_empty(&oo->oo_owner.so_stateids))
3122 list_del_init(&oo->oo_close_lru);
3123 if (oo->oo_flags & NFS4_OO_NEW) {
3124 if (status) {
3125 release_openowner(oo);
3126 open->op_openowner = NULL;
3127 } else
3128 oo->oo_flags &= ~NFS4_OO_NEW;
3129 }
3130 }
3131 if (open->op_file)
3132 nfsd4_free_file(open->op_file);
3133 if (open->op_stp)
3134 free_generic_stateid(open->op_stp);
3135 }
3136
3137 __be32
3138 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3139 clientid_t *clid)
3140 {
3141 struct nfs4_client *clp;
3142 __be32 status;
3143 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3144
3145 nfs4_lock_state();
3146 dprintk("process_renew(%08x/%08x): starting\n",
3147 clid->cl_boot, clid->cl_id);
3148 status = nfserr_stale_clientid;
3149 if (STALE_CLIENTID(clid, nn))
3150 goto out;
3151 clp = find_confirmed_client(clid, cstate->minorversion, nn);
3152 status = nfserr_expired;
3153 if (clp == NULL) {
3154 /* We assume the client took too long to RENEW. */
3155 dprintk("nfsd4_renew: clientid not found!\n");
3156 goto out;
3157 }
3158 status = nfserr_cb_path_down;
3159 if (!list_empty(&clp->cl_delegations)
3160 && clp->cl_cb_state != NFSD4_CB_UP)
3161 goto out;
3162 status = nfs_ok;
3163 out:
3164 nfs4_unlock_state();
3165 return status;
3166 }
3167
3168 static void
3169 nfsd4_end_grace(struct net *net)
3170 {
3171 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3172
3173 /* do nothing if grace period already ended */
3174 if (nn->grace_ended)
3175 return;
3176
3177 dprintk("NFSD: end of grace period\n");
3178 nn->grace_ended = true;
3179 nfsd4_record_grace_done(net, nn->boot_time);
3180 locks_end_grace(&nn->nfsd4_manager);
3181 /*
3182 * Now that every NFSv4 client has had the chance to recover and
3183 * to see the (possibly new, possibly shorter) lease time, we
3184 * can safely set the next grace time to the current lease time:
3185 */
3186 nfsd4_grace = nfsd4_lease;
3187 }
3188
3189 static time_t
3190 nfs4_laundromat(void)
3191 {
3192 struct nfs4_client *clp;
3193 struct nfs4_openowner *oo;
3194 struct nfs4_delegation *dp;
3195 struct list_head *pos, *next, reaplist;
3196 time_t cutoff = get_seconds() - nfsd4_lease;
3197 time_t t, clientid_val = nfsd4_lease;
3198 time_t u, test_val = nfsd4_lease;
3199
3200 nfs4_lock_state();
3201
3202 dprintk("NFSD: laundromat service - starting\n");
3203 nfsd4_end_grace(&init_net);
3204 INIT_LIST_HEAD(&reaplist);
3205 spin_lock(&client_lock);
3206 list_for_each_safe(pos, next, &client_lru) {
3207 clp = list_entry(pos, struct nfs4_client, cl_lru);
3208 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3209 t = clp->cl_time - cutoff;
3210 if (clientid_val > t)
3211 clientid_val = t;
3212 break;
3213 }
3214 if (atomic_read(&clp->cl_refcount)) {
3215 dprintk("NFSD: client in use (clientid %08x)\n",
3216 clp->cl_clientid.cl_id);
3217 continue;
3218 }
3219 unhash_client_locked(clp);
3220 list_add(&clp->cl_lru, &reaplist);
3221 }
3222 spin_unlock(&client_lock);
3223 list_for_each_safe(pos, next, &reaplist) {
3224 clp = list_entry(pos, struct nfs4_client, cl_lru);
3225 dprintk("NFSD: purging unused client (clientid %08x)\n",
3226 clp->cl_clientid.cl_id);
3227 expire_client(clp);
3228 }
3229 spin_lock(&recall_lock);
3230 list_for_each_safe(pos, next, &del_recall_lru) {
3231 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3232 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3233 u = dp->dl_time - cutoff;
3234 if (test_val > u)
3235 test_val = u;
3236 break;
3237 }
3238 list_move(&dp->dl_recall_lru, &reaplist);
3239 }
3240 spin_unlock(&recall_lock);
3241 list_for_each_safe(pos, next, &reaplist) {
3242 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3243 unhash_delegation(dp);
3244 }
3245 test_val = nfsd4_lease;
3246 list_for_each_safe(pos, next, &close_lru) {
3247 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3248 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3249 u = oo->oo_time - cutoff;
3250 if (test_val > u)
3251 test_val = u;
3252 break;
3253 }
3254 release_openowner(oo);
3255 }
3256 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3257 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3258 nfs4_unlock_state();
3259 return clientid_val;
3260 }
3261
3262 static struct workqueue_struct *laundry_wq;
3263 static void laundromat_main(struct work_struct *);
3264 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3265
3266 static void
3267 laundromat_main(struct work_struct *not_used)
3268 {
3269 time_t t;
3270
3271 t = nfs4_laundromat();
3272 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3273 queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3274 }
3275
3276 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3277 {
3278 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3279 return nfserr_bad_stateid;
3280 return nfs_ok;
3281 }
3282
3283 static int
3284 STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn)
3285 {
3286 if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time)
3287 return 0;
3288 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3289 STATEID_VAL(stateid));
3290 return 1;
3291 }
3292
3293 static inline int
3294 access_permit_read(struct nfs4_ol_stateid *stp)
3295 {
3296 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3297 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3298 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3299 }
3300
3301 static inline int
3302 access_permit_write(struct nfs4_ol_stateid *stp)
3303 {
3304 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3305 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3306 }
3307
3308 static
3309 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3310 {
3311 __be32 status = nfserr_openmode;
3312
3313 /* For lock stateid's, we test the parent open, not the lock: */
3314 if (stp->st_openstp)
3315 stp = stp->st_openstp;
3316 if ((flags & WR_STATE) && !access_permit_write(stp))
3317 goto out;
3318 if ((flags & RD_STATE) && !access_permit_read(stp))
3319 goto out;
3320 status = nfs_ok;
3321 out:
3322 return status;
3323 }
3324
3325 static inline __be32
3326 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3327 {
3328 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3329 return nfs_ok;
3330 else if (locks_in_grace(net)) {
3331 /* Answer in remaining cases depends on existence of
3332 * conflicting state; so we must wait out the grace period. */
3333 return nfserr_grace;
3334 } else if (flags & WR_STATE)
3335 return nfs4_share_conflict(current_fh,
3336 NFS4_SHARE_DENY_WRITE);
3337 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3338 return nfs4_share_conflict(current_fh,
3339 NFS4_SHARE_DENY_READ);
3340 }
3341
3342 /*
3343 * Allow READ/WRITE during grace period on recovered state only for files
3344 * that are not able to provide mandatory locking.
3345 */
3346 static inline int
3347 grace_disallows_io(struct net *net, struct inode *inode)
3348 {
3349 return locks_in_grace(net) && mandatory_lock(inode);
3350 }
3351
3352 /* Returns true iff a is later than b: */
3353 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3354 {
3355 return (s32)a->si_generation - (s32)b->si_generation > 0;
3356 }
3357
3358 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3359 {
3360 /*
3361 * When sessions are used the stateid generation number is ignored
3362 * when it is zero.
3363 */
3364 if (has_session && in->si_generation == 0)
3365 return nfs_ok;
3366
3367 if (in->si_generation == ref->si_generation)
3368 return nfs_ok;
3369
3370 /* If the client sends us a stateid from the future, it's buggy: */
3371 if (stateid_generation_after(in, ref))
3372 return nfserr_bad_stateid;
3373 /*
3374 * However, we could see a stateid from the past, even from a
3375 * non-buggy client. For example, if the client sends a lock
3376 * while some IO is outstanding, the lock may bump si_generation
3377 * while the IO is still in flight. The client could avoid that
3378 * situation by waiting for responses on all the IO requests,
3379 * but better performance may result in retrying IO that
3380 * receives an old_stateid error if requests are rarely
3381 * reordered in flight:
3382 */
3383 return nfserr_old_stateid;
3384 }
3385
3386 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3387 {
3388 struct nfs4_stid *s;
3389 struct nfs4_ol_stateid *ols;
3390 __be32 status;
3391
3392 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3393 return nfserr_bad_stateid;
3394 /* Client debugging aid. */
3395 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3396 char addr_str[INET6_ADDRSTRLEN];
3397 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
3398 sizeof(addr_str));
3399 pr_warn_ratelimited("NFSD: client %s testing state ID "
3400 "with incorrect client ID\n", addr_str);
3401 return nfserr_bad_stateid;
3402 }
3403 s = find_stateid(cl, stateid);
3404 if (!s)
3405 return nfserr_bad_stateid;
3406 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3407 if (status)
3408 return status;
3409 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3410 return nfs_ok;
3411 ols = openlockstateid(s);
3412 if (ols->st_stateowner->so_is_open_owner
3413 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3414 return nfserr_bad_stateid;
3415 return nfs_ok;
3416 }
3417
3418 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, bool sessions)
3419 {
3420 struct nfs4_client *cl;
3421 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
3422
3423 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3424 return nfserr_bad_stateid;
3425 if (STALE_STATEID(stateid, nn))
3426 return nfserr_stale_stateid;
3427 cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn);
3428 if (!cl)
3429 return nfserr_expired;
3430 *s = find_stateid_by_type(cl, stateid, typemask);
3431 if (!*s)
3432 return nfserr_bad_stateid;
3433 return nfs_ok;
3434
3435 }
3436
3437 /*
3438 * Checks for stateid operations
3439 */
3440 __be32
3441 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3442 stateid_t *stateid, int flags, struct file **filpp)
3443 {
3444 struct nfs4_stid *s;
3445 struct nfs4_ol_stateid *stp = NULL;
3446 struct nfs4_delegation *dp = NULL;
3447 struct svc_fh *current_fh = &cstate->current_fh;
3448 struct inode *ino = current_fh->fh_dentry->d_inode;
3449 __be32 status;
3450
3451 if (filpp)
3452 *filpp = NULL;
3453
3454 if (grace_disallows_io(net, ino))
3455 return nfserr_grace;
3456
3457 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3458 return check_special_stateids(net, current_fh, stateid, flags);
3459
3460 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s, cstate->minorversion);
3461 if (status)
3462 return status;
3463 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3464 if (status)
3465 goto out;
3466 switch (s->sc_type) {
3467 case NFS4_DELEG_STID:
3468 dp = delegstateid(s);
3469 status = nfs4_check_delegmode(dp, flags);
3470 if (status)
3471 goto out;
3472 if (filpp) {
3473 *filpp = dp->dl_file->fi_deleg_file;
3474 BUG_ON(!*filpp);
3475 }
3476 break;
3477 case NFS4_OPEN_STID:
3478 case NFS4_LOCK_STID:
3479 stp = openlockstateid(s);
3480 status = nfs4_check_fh(current_fh, stp);
3481 if (status)
3482 goto out;
3483 if (stp->st_stateowner->so_is_open_owner
3484 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3485 goto out;
3486 status = nfs4_check_openmode(stp, flags);
3487 if (status)
3488 goto out;
3489 if (filpp) {
3490 if (flags & RD_STATE)
3491 *filpp = find_readable_file(stp->st_file);
3492 else
3493 *filpp = find_writeable_file(stp->st_file);
3494 }
3495 break;
3496 default:
3497 return nfserr_bad_stateid;
3498 }
3499 status = nfs_ok;
3500 out:
3501 return status;
3502 }
3503
3504 static __be32
3505 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3506 {
3507 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3508 return nfserr_locks_held;
3509 release_lock_stateid(stp);
3510 return nfs_ok;
3511 }
3512
3513 /*
3514 * Test if the stateid is valid
3515 */
3516 __be32
3517 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3518 struct nfsd4_test_stateid *test_stateid)
3519 {
3520 struct nfsd4_test_stateid_id *stateid;
3521 struct nfs4_client *cl = cstate->session->se_client;
3522
3523 nfs4_lock_state();
3524 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3525 stateid->ts_id_status =
3526 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3527 nfs4_unlock_state();
3528
3529 return nfs_ok;
3530 }
3531
3532 __be32
3533 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3534 struct nfsd4_free_stateid *free_stateid)
3535 {
3536 stateid_t *stateid = &free_stateid->fr_stateid;
3537 struct nfs4_stid *s;
3538 struct nfs4_client *cl = cstate->session->se_client;
3539 __be32 ret = nfserr_bad_stateid;
3540
3541 nfs4_lock_state();
3542 s = find_stateid(cl, stateid);
3543 if (!s)
3544 goto out;
3545 switch (s->sc_type) {
3546 case NFS4_DELEG_STID:
3547 ret = nfserr_locks_held;
3548 goto out;
3549 case NFS4_OPEN_STID:
3550 case NFS4_LOCK_STID:
3551 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3552 if (ret)
3553 goto out;
3554 if (s->sc_type == NFS4_LOCK_STID)
3555 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3556 else
3557 ret = nfserr_locks_held;
3558 break;
3559 default:
3560 ret = nfserr_bad_stateid;
3561 }
3562 out:
3563 nfs4_unlock_state();
3564 return ret;
3565 }
3566
3567 static inline int
3568 setlkflg (int type)
3569 {
3570 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3571 RD_STATE : WR_STATE;
3572 }
3573
3574 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3575 {
3576 struct svc_fh *current_fh = &cstate->current_fh;
3577 struct nfs4_stateowner *sop = stp->st_stateowner;
3578 __be32 status;
3579
3580 status = nfsd4_check_seqid(cstate, sop, seqid);
3581 if (status)
3582 return status;
3583 if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3584 /*
3585 * "Closed" stateid's exist *only* to return
3586 * nfserr_replay_me from the previous step.
3587 */
3588 return nfserr_bad_stateid;
3589 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3590 if (status)
3591 return status;
3592 return nfs4_check_fh(current_fh, stp);
3593 }
3594
3595 /*
3596 * Checks for sequence id mutating operations.
3597 */
3598 static __be32
3599 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3600 stateid_t *stateid, char typemask,
3601 struct nfs4_ol_stateid **stpp)
3602 {
3603 __be32 status;
3604 struct nfs4_stid *s;
3605
3606 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3607 seqid, STATEID_VAL(stateid));
3608
3609 *stpp = NULL;
3610 status = nfsd4_lookup_stateid(stateid, typemask, &s, cstate->minorversion);
3611 if (status)
3612 return status;
3613 *stpp = openlockstateid(s);
3614 cstate->replay_owner = (*stpp)->st_stateowner;
3615
3616 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3617 }
3618
3619 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
3620 {
3621 __be32 status;
3622 struct nfs4_openowner *oo;
3623
3624 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3625 NFS4_OPEN_STID, stpp);
3626 if (status)
3627 return status;
3628 oo = openowner((*stpp)->st_stateowner);
3629 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3630 return nfserr_bad_stateid;
3631 return nfs_ok;
3632 }
3633
3634 __be32
3635 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3636 struct nfsd4_open_confirm *oc)
3637 {
3638 __be32 status;
3639 struct nfs4_openowner *oo;
3640 struct nfs4_ol_stateid *stp;
3641
3642 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3643 (int)cstate->current_fh.fh_dentry->d_name.len,
3644 cstate->current_fh.fh_dentry->d_name.name);
3645
3646 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3647 if (status)
3648 return status;
3649
3650 nfs4_lock_state();
3651
3652 status = nfs4_preprocess_seqid_op(cstate,
3653 oc->oc_seqid, &oc->oc_req_stateid,
3654 NFS4_OPEN_STID, &stp);
3655 if (status)
3656 goto out;
3657 oo = openowner(stp->st_stateowner);
3658 status = nfserr_bad_stateid;
3659 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3660 goto out;
3661 oo->oo_flags |= NFS4_OO_CONFIRMED;
3662 update_stateid(&stp->st_stid.sc_stateid);
3663 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3664 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3665 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3666
3667 nfsd4_client_record_create(oo->oo_owner.so_client);
3668 status = nfs_ok;
3669 out:
3670 if (!cstate->replay_owner)
3671 nfs4_unlock_state();
3672 return status;
3673 }
3674
3675 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3676 {
3677 if (!test_access(access, stp))
3678 return;
3679 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3680 clear_access(access, stp);
3681 }
3682
3683 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
3684 {
3685 switch (to_access) {
3686 case NFS4_SHARE_ACCESS_READ:
3687 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
3688 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3689 break;
3690 case NFS4_SHARE_ACCESS_WRITE:
3691 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
3692 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3693 break;
3694 case NFS4_SHARE_ACCESS_BOTH:
3695 break;
3696 default:
3697 BUG();
3698 }
3699 }
3700
3701 static void
3702 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3703 {
3704 int i;
3705 for (i = 0; i < 4; i++) {
3706 if ((i & deny) != i)
3707 clear_deny(i, stp);
3708 }
3709 }
3710
3711 __be32
3712 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3713 struct nfsd4_compound_state *cstate,
3714 struct nfsd4_open_downgrade *od)
3715 {
3716 __be32 status;
3717 struct nfs4_ol_stateid *stp;
3718
3719 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
3720 (int)cstate->current_fh.fh_dentry->d_name.len,
3721 cstate->current_fh.fh_dentry->d_name.name);
3722
3723 /* We don't yet support WANT bits: */
3724 if (od->od_deleg_want)
3725 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3726 od->od_deleg_want);
3727
3728 nfs4_lock_state();
3729 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3730 &od->od_stateid, &stp);
3731 if (status)
3732 goto out;
3733 status = nfserr_inval;
3734 if (!test_access(od->od_share_access, stp)) {
3735 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3736 stp->st_access_bmap, od->od_share_access);
3737 goto out;
3738 }
3739 if (!test_deny(od->od_share_deny, stp)) {
3740 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3741 stp->st_deny_bmap, od->od_share_deny);
3742 goto out;
3743 }
3744 nfs4_stateid_downgrade(stp, od->od_share_access);
3745
3746 reset_union_bmap_deny(od->od_share_deny, stp);
3747
3748 update_stateid(&stp->st_stid.sc_stateid);
3749 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3750 status = nfs_ok;
3751 out:
3752 if (!cstate->replay_owner)
3753 nfs4_unlock_state();
3754 return status;
3755 }
3756
3757 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3758 {
3759 struct nfs4_openowner *oo;
3760 struct nfs4_ol_stateid *s;
3761
3762 if (!so->so_is_open_owner)
3763 return;
3764 oo = openowner(so);
3765 s = oo->oo_last_closed_stid;
3766 if (!s)
3767 return;
3768 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3769 /* Release the last_closed_stid on the next seqid bump: */
3770 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3771 return;
3772 }
3773 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3774 release_last_closed_stateid(oo);
3775 }
3776
3777 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3778 {
3779 unhash_open_stateid(s);
3780 s->st_stid.sc_type = NFS4_CLOSED_STID;
3781 }
3782
3783 /*
3784 * nfs4_unlock_state() called after encode
3785 */
3786 __be32
3787 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3788 struct nfsd4_close *close)
3789 {
3790 __be32 status;
3791 struct nfs4_openowner *oo;
3792 struct nfs4_ol_stateid *stp;
3793
3794 dprintk("NFSD: nfsd4_close on file %.*s\n",
3795 (int)cstate->current_fh.fh_dentry->d_name.len,
3796 cstate->current_fh.fh_dentry->d_name.name);
3797
3798 nfs4_lock_state();
3799 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3800 &close->cl_stateid,
3801 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3802 &stp);
3803 if (status)
3804 goto out;
3805 oo = openowner(stp->st_stateowner);
3806 status = nfs_ok;
3807 update_stateid(&stp->st_stid.sc_stateid);
3808 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3809
3810 nfsd4_close_open_stateid(stp);
3811 release_last_closed_stateid(oo);
3812 oo->oo_last_closed_stid = stp;
3813
3814 if (list_empty(&oo->oo_owner.so_stateids)) {
3815 if (cstate->minorversion) {
3816 release_openowner(oo);
3817 cstate->replay_owner = NULL;
3818 } else {
3819 /*
3820 * In the 4.0 case we need to keep the owners around a
3821 * little while to handle CLOSE replay.
3822 */
3823 if (list_empty(&oo->oo_owner.so_stateids))
3824 move_to_close_lru(oo);
3825 }
3826 }
3827 out:
3828 if (!cstate->replay_owner)
3829 nfs4_unlock_state();
3830 return status;
3831 }
3832
3833 __be32
3834 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3835 struct nfsd4_delegreturn *dr)
3836 {
3837 struct nfs4_delegation *dp;
3838 stateid_t *stateid = &dr->dr_stateid;
3839 struct nfs4_stid *s;
3840 __be32 status;
3841
3842 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3843 return status;
3844
3845 nfs4_lock_state();
3846 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s, cstate->minorversion);
3847 if (status)
3848 goto out;
3849 dp = delegstateid(s);
3850 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3851 if (status)
3852 goto out;
3853
3854 unhash_delegation(dp);
3855 out:
3856 nfs4_unlock_state();
3857
3858 return status;
3859 }
3860
3861
3862 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
3863
3864 #define LOCKOWNER_INO_HASH_BITS 8
3865 #define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS)
3866 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
3867
3868 static inline u64
3869 end_offset(u64 start, u64 len)
3870 {
3871 u64 end;
3872
3873 end = start + len;
3874 return end >= start ? end: NFS4_MAX_UINT64;
3875 }
3876
3877 /* last octet in a range */
3878 static inline u64
3879 last_byte_offset(u64 start, u64 len)
3880 {
3881 u64 end;
3882
3883 BUG_ON(!len);
3884 end = start + len;
3885 return end > start ? end - 1: NFS4_MAX_UINT64;
3886 }
3887
3888 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
3889 {
3890 return (file_hashval(inode) + cl_id
3891 + opaque_hashval(ownername->data, ownername->len))
3892 & LOCKOWNER_INO_HASH_MASK;
3893 }
3894
3895 static struct list_head lockowner_ino_hashtbl[LOCKOWNER_INO_HASH_SIZE];
3896
3897 /*
3898 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3899 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3900 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
3901 * locking, this prevents us from being completely protocol-compliant. The
3902 * real solution to this problem is to start using unsigned file offsets in
3903 * the VFS, but this is a very deep change!
3904 */
3905 static inline void
3906 nfs4_transform_lock_offset(struct file_lock *lock)
3907 {
3908 if (lock->fl_start < 0)
3909 lock->fl_start = OFFSET_MAX;
3910 if (lock->fl_end < 0)
3911 lock->fl_end = OFFSET_MAX;
3912 }
3913
3914 /* Hack!: For now, we're defining this just so we can use a pointer to it
3915 * as a unique cookie to identify our (NFSv4's) posix locks. */
3916 static const struct lock_manager_operations nfsd_posix_mng_ops = {
3917 };
3918
3919 static inline void
3920 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3921 {
3922 struct nfs4_lockowner *lo;
3923
3924 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3925 lo = (struct nfs4_lockowner *) fl->fl_owner;
3926 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3927 lo->lo_owner.so_owner.len, GFP_KERNEL);
3928 if (!deny->ld_owner.data)
3929 /* We just don't care that much */
3930 goto nevermind;
3931 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3932 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3933 } else {
3934 nevermind:
3935 deny->ld_owner.len = 0;
3936 deny->ld_owner.data = NULL;
3937 deny->ld_clientid.cl_boot = 0;
3938 deny->ld_clientid.cl_id = 0;
3939 }
3940 deny->ld_start = fl->fl_start;
3941 deny->ld_length = NFS4_MAX_UINT64;
3942 if (fl->fl_end != NFS4_MAX_UINT64)
3943 deny->ld_length = fl->fl_end - fl->fl_start + 1;
3944 deny->ld_type = NFS4_READ_LT;
3945 if (fl->fl_type != F_RDLCK)
3946 deny->ld_type = NFS4_WRITE_LT;
3947 }
3948
3949 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
3950 {
3951 struct nfs4_ol_stateid *lst;
3952
3953 if (!same_owner_str(&lo->lo_owner, owner, clid))
3954 return false;
3955 lst = list_first_entry(&lo->lo_owner.so_stateids,
3956 struct nfs4_ol_stateid, st_perstateowner);
3957 return lst->st_file->fi_inode == inode;
3958 }
3959
3960 static struct nfs4_lockowner *
3961 find_lockowner_str(struct inode *inode, clientid_t *clid,
3962 struct xdr_netobj *owner)
3963 {
3964 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
3965 struct nfs4_lockowner *lo;
3966
3967 list_for_each_entry(lo, &lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
3968 if (same_lockowner_ino(lo, inode, clid, owner))
3969 return lo;
3970 }
3971 return NULL;
3972 }
3973
3974 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3975 {
3976 struct inode *inode = open_stp->st_file->fi_inode;
3977 unsigned int inohash = lockowner_ino_hashval(inode,
3978 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
3979
3980 list_add(&lo->lo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
3981 list_add(&lo->lo_owner_ino_hash, &lockowner_ino_hashtbl[inohash]);
3982 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
3983 }
3984
3985 /*
3986 * Alloc a lock owner structure.
3987 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
3988 * occurred.
3989 *
3990 * strhashval = ownerstr_hashval
3991 */
3992
3993 static struct nfs4_lockowner *
3994 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3995 struct nfs4_lockowner *lo;
3996
3997 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3998 if (!lo)
3999 return NULL;
4000 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4001 lo->lo_owner.so_is_open_owner = 0;
4002 /* It is the openowner seqid that will be incremented in encode in the
4003 * case of new lockowners; so increment the lock seqid manually: */
4004 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4005 hash_lockowner(lo, strhashval, clp, open_stp);
4006 return lo;
4007 }
4008
4009 static struct nfs4_ol_stateid *
4010 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
4011 {
4012 struct nfs4_ol_stateid *stp;
4013 struct nfs4_client *clp = lo->lo_owner.so_client;
4014
4015 stp = nfs4_alloc_stateid(clp);
4016 if (stp == NULL)
4017 return NULL;
4018 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
4019 list_add(&stp->st_perfile, &fp->fi_stateids);
4020 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4021 stp->st_stateowner = &lo->lo_owner;
4022 get_nfs4_file(fp);
4023 stp->st_file = fp;
4024 stp->st_access_bmap = 0;
4025 stp->st_deny_bmap = open_stp->st_deny_bmap;
4026 stp->st_openstp = open_stp;
4027 return stp;
4028 }
4029
4030 static int
4031 check_lock_length(u64 offset, u64 length)
4032 {
4033 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
4034 LOFF_OVERFLOW(offset, length)));
4035 }
4036
4037 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4038 {
4039 struct nfs4_file *fp = lock_stp->st_file;
4040 int oflag = nfs4_access_to_omode(access);
4041
4042 if (test_access(access, lock_stp))
4043 return;
4044 nfs4_file_get_access(fp, oflag);
4045 set_access(access, lock_stp);
4046 }
4047
4048 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4049 {
4050 struct nfs4_file *fi = ost->st_file;
4051 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4052 struct nfs4_client *cl = oo->oo_owner.so_client;
4053 struct nfs4_lockowner *lo;
4054 unsigned int strhashval;
4055
4056 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, &lock->v.new.owner);
4057 if (lo) {
4058 if (!cstate->minorversion)
4059 return nfserr_bad_seqid;
4060 /* XXX: a lockowner always has exactly one stateid: */
4061 *lst = list_first_entry(&lo->lo_owner.so_stateids,
4062 struct nfs4_ol_stateid, st_perstateowner);
4063 return nfs_ok;
4064 }
4065 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4066 &lock->v.new.owner);
4067 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4068 if (lo == NULL)
4069 return nfserr_jukebox;
4070 *lst = alloc_init_lock_stateid(lo, fi, ost);
4071 if (*lst == NULL) {
4072 release_lockowner(lo);
4073 return nfserr_jukebox;
4074 }
4075 *new = true;
4076 return nfs_ok;
4077 }
4078
4079 /*
4080 * LOCK operation
4081 */
4082 __be32
4083 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4084 struct nfsd4_lock *lock)
4085 {
4086 struct nfs4_openowner *open_sop = NULL;
4087 struct nfs4_lockowner *lock_sop = NULL;
4088 struct nfs4_ol_stateid *lock_stp;
4089 struct file *filp = NULL;
4090 struct file_lock *file_lock = NULL;
4091 struct file_lock *conflock = NULL;
4092 __be32 status = 0;
4093 bool new_state = false;
4094 int lkflg;
4095 int err;
4096 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4097
4098 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4099 (long long) lock->lk_offset,
4100 (long long) lock->lk_length);
4101
4102 if (check_lock_length(lock->lk_offset, lock->lk_length))
4103 return nfserr_inval;
4104
4105 if ((status = fh_verify(rqstp, &cstate->current_fh,
4106 S_IFREG, NFSD_MAY_LOCK))) {
4107 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4108 return status;
4109 }
4110
4111 nfs4_lock_state();
4112
4113 if (lock->lk_is_new) {
4114 struct nfs4_ol_stateid *open_stp = NULL;
4115
4116 if (nfsd4_has_session(cstate))
4117 /* See rfc 5661 18.10.3: given clientid is ignored: */
4118 memcpy(&lock->v.new.clientid,
4119 &cstate->session->se_client->cl_clientid,
4120 sizeof(clientid_t));
4121
4122 status = nfserr_stale_clientid;
4123 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
4124 goto out;
4125
4126 /* validate and update open stateid and open seqid */
4127 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4128 lock->lk_new_open_seqid,
4129 &lock->lk_new_open_stateid,
4130 &open_stp);
4131 if (status)
4132 goto out;
4133 open_sop = openowner(open_stp->st_stateowner);
4134 status = nfserr_bad_stateid;
4135 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4136 &lock->v.new.clientid))
4137 goto out;
4138 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4139 &lock_stp, &new_state);
4140 } else
4141 status = nfs4_preprocess_seqid_op(cstate,
4142 lock->lk_old_lock_seqid,
4143 &lock->lk_old_lock_stateid,
4144 NFS4_LOCK_STID, &lock_stp);
4145 if (status)
4146 goto out;
4147 lock_sop = lockowner(lock_stp->st_stateowner);
4148
4149 lkflg = setlkflg(lock->lk_type);
4150 status = nfs4_check_openmode(lock_stp, lkflg);
4151 if (status)
4152 goto out;
4153
4154 status = nfserr_grace;
4155 if (locks_in_grace(SVC_NET(rqstp)) && !lock->lk_reclaim)
4156 goto out;
4157 status = nfserr_no_grace;
4158 if (!locks_in_grace(SVC_NET(rqstp)) && lock->lk_reclaim)
4159 goto out;
4160
4161 file_lock = locks_alloc_lock();
4162 if (!file_lock) {
4163 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4164 status = nfserr_jukebox;
4165 goto out;
4166 }
4167
4168 locks_init_lock(file_lock);
4169 switch (lock->lk_type) {
4170 case NFS4_READ_LT:
4171 case NFS4_READW_LT:
4172 filp = find_readable_file(lock_stp->st_file);
4173 if (filp)
4174 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4175 file_lock->fl_type = F_RDLCK;
4176 break;
4177 case NFS4_WRITE_LT:
4178 case NFS4_WRITEW_LT:
4179 filp = find_writeable_file(lock_stp->st_file);
4180 if (filp)
4181 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4182 file_lock->fl_type = F_WRLCK;
4183 break;
4184 default:
4185 status = nfserr_inval;
4186 goto out;
4187 }
4188 if (!filp) {
4189 status = nfserr_openmode;
4190 goto out;
4191 }
4192 file_lock->fl_owner = (fl_owner_t)lock_sop;
4193 file_lock->fl_pid = current->tgid;
4194 file_lock->fl_file = filp;
4195 file_lock->fl_flags = FL_POSIX;
4196 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4197 file_lock->fl_start = lock->lk_offset;
4198 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4199 nfs4_transform_lock_offset(file_lock);
4200
4201 conflock = locks_alloc_lock();
4202 if (!conflock) {
4203 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4204 status = nfserr_jukebox;
4205 goto out;
4206 }
4207
4208 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4209 switch (-err) {
4210 case 0: /* success! */
4211 update_stateid(&lock_stp->st_stid.sc_stateid);
4212 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4213 sizeof(stateid_t));
4214 status = 0;
4215 break;
4216 case (EAGAIN): /* conflock holds conflicting lock */
4217 status = nfserr_denied;
4218 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4219 nfs4_set_lock_denied(conflock, &lock->lk_denied);
4220 break;
4221 case (EDEADLK):
4222 status = nfserr_deadlock;
4223 break;
4224 default:
4225 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4226 status = nfserrno(err);
4227 break;
4228 }
4229 out:
4230 if (status && new_state)
4231 release_lockowner(lock_sop);
4232 if (!cstate->replay_owner)
4233 nfs4_unlock_state();
4234 if (file_lock)
4235 locks_free_lock(file_lock);
4236 if (conflock)
4237 locks_free_lock(conflock);
4238 return status;
4239 }
4240
4241 /*
4242 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4243 * so we do a temporary open here just to get an open file to pass to
4244 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4245 * inode operation.)
4246 */
4247 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4248 {
4249 struct file *file;
4250 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4251 if (!err) {
4252 err = nfserrno(vfs_test_lock(file, lock));
4253 nfsd_close(file);
4254 }
4255 return err;
4256 }
4257
4258 /*
4259 * LOCKT operation
4260 */
4261 __be32
4262 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4263 struct nfsd4_lockt *lockt)
4264 {
4265 struct inode *inode;
4266 struct file_lock *file_lock = NULL;
4267 struct nfs4_lockowner *lo;
4268 __be32 status;
4269 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4270
4271 if (locks_in_grace(SVC_NET(rqstp)))
4272 return nfserr_grace;
4273
4274 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4275 return nfserr_inval;
4276
4277 nfs4_lock_state();
4278
4279 status = nfserr_stale_clientid;
4280 if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid, nn))
4281 goto out;
4282
4283 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4284 goto out;
4285
4286 inode = cstate->current_fh.fh_dentry->d_inode;
4287 file_lock = locks_alloc_lock();
4288 if (!file_lock) {
4289 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4290 status = nfserr_jukebox;
4291 goto out;
4292 }
4293 locks_init_lock(file_lock);
4294 switch (lockt->lt_type) {
4295 case NFS4_READ_LT:
4296 case NFS4_READW_LT:
4297 file_lock->fl_type = F_RDLCK;
4298 break;
4299 case NFS4_WRITE_LT:
4300 case NFS4_WRITEW_LT:
4301 file_lock->fl_type = F_WRLCK;
4302 break;
4303 default:
4304 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4305 status = nfserr_inval;
4306 goto out;
4307 }
4308
4309 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4310 if (lo)
4311 file_lock->fl_owner = (fl_owner_t)lo;
4312 file_lock->fl_pid = current->tgid;
4313 file_lock->fl_flags = FL_POSIX;
4314
4315 file_lock->fl_start = lockt->lt_offset;
4316 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4317
4318 nfs4_transform_lock_offset(file_lock);
4319
4320 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4321 if (status)
4322 goto out;
4323
4324 if (file_lock->fl_type != F_UNLCK) {
4325 status = nfserr_denied;
4326 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4327 }
4328 out:
4329 nfs4_unlock_state();
4330 if (file_lock)
4331 locks_free_lock(file_lock);
4332 return status;
4333 }
4334
4335 __be32
4336 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4337 struct nfsd4_locku *locku)
4338 {
4339 struct nfs4_ol_stateid *stp;
4340 struct file *filp = NULL;
4341 struct file_lock *file_lock = NULL;
4342 __be32 status;
4343 int err;
4344
4345 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4346 (long long) locku->lu_offset,
4347 (long long) locku->lu_length);
4348
4349 if (check_lock_length(locku->lu_offset, locku->lu_length))
4350 return nfserr_inval;
4351
4352 nfs4_lock_state();
4353
4354 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4355 &locku->lu_stateid, NFS4_LOCK_STID, &stp);
4356 if (status)
4357 goto out;
4358 filp = find_any_file(stp->st_file);
4359 if (!filp) {
4360 status = nfserr_lock_range;
4361 goto out;
4362 }
4363 file_lock = locks_alloc_lock();
4364 if (!file_lock) {
4365 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4366 status = nfserr_jukebox;
4367 goto out;
4368 }
4369 locks_init_lock(file_lock);
4370 file_lock->fl_type = F_UNLCK;
4371 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4372 file_lock->fl_pid = current->tgid;
4373 file_lock->fl_file = filp;
4374 file_lock->fl_flags = FL_POSIX;
4375 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4376 file_lock->fl_start = locku->lu_offset;
4377
4378 file_lock->fl_end = last_byte_offset(locku->lu_offset,
4379 locku->lu_length);
4380 nfs4_transform_lock_offset(file_lock);
4381
4382 /*
4383 * Try to unlock the file in the VFS.
4384 */
4385 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4386 if (err) {
4387 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4388 goto out_nfserr;
4389 }
4390 /*
4391 * OK, unlock succeeded; the only thing left to do is update the stateid.
4392 */
4393 update_stateid(&stp->st_stid.sc_stateid);
4394 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4395
4396 out:
4397 if (!cstate->replay_owner)
4398 nfs4_unlock_state();
4399 if (file_lock)
4400 locks_free_lock(file_lock);
4401 return status;
4402
4403 out_nfserr:
4404 status = nfserrno(err);
4405 goto out;
4406 }
4407
4408 /*
4409 * returns
4410 * 1: locks held by lockowner
4411 * 0: no locks held by lockowner
4412 */
4413 static int
4414 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4415 {
4416 struct file_lock **flpp;
4417 struct inode *inode = filp->fi_inode;
4418 int status = 0;
4419
4420 lock_flocks();
4421 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4422 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4423 status = 1;
4424 goto out;
4425 }
4426 }
4427 out:
4428 unlock_flocks();
4429 return status;
4430 }
4431
4432 __be32
4433 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4434 struct nfsd4_compound_state *cstate,
4435 struct nfsd4_release_lockowner *rlockowner)
4436 {
4437 clientid_t *clid = &rlockowner->rl_clientid;
4438 struct nfs4_stateowner *sop;
4439 struct nfs4_lockowner *lo;
4440 struct nfs4_ol_stateid *stp;
4441 struct xdr_netobj *owner = &rlockowner->rl_owner;
4442 struct list_head matches;
4443 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4444 __be32 status;
4445 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4446
4447 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4448 clid->cl_boot, clid->cl_id);
4449
4450 /* XXX check for lease expiration */
4451
4452 status = nfserr_stale_clientid;
4453 if (STALE_CLIENTID(clid, nn))
4454 return status;
4455
4456 nfs4_lock_state();
4457
4458 status = nfserr_locks_held;
4459 INIT_LIST_HEAD(&matches);
4460
4461 list_for_each_entry(sop, &ownerstr_hashtbl[hashval], so_strhash) {
4462 if (sop->so_is_open_owner)
4463 continue;
4464 if (!same_owner_str(sop, owner, clid))
4465 continue;
4466 list_for_each_entry(stp, &sop->so_stateids,
4467 st_perstateowner) {
4468 lo = lockowner(sop);
4469 if (check_for_locks(stp->st_file, lo))
4470 goto out;
4471 list_add(&lo->lo_list, &matches);
4472 }
4473 }
4474 /* Clients probably won't expect us to return with some (but not all)
4475 * of the lockowner state released; so don't release any until all
4476 * have been checked. */
4477 status = nfs_ok;
4478 while (!list_empty(&matches)) {
4479 lo = list_entry(matches.next, struct nfs4_lockowner,
4480 lo_list);
4481 /* unhash_stateowner deletes so_perclient only
4482 * for openowners. */
4483 list_del(&lo->lo_list);
4484 release_lockowner(lo);
4485 }
4486 out:
4487 nfs4_unlock_state();
4488 return status;
4489 }
4490
4491 static inline struct nfs4_client_reclaim *
4492 alloc_reclaim(void)
4493 {
4494 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4495 }
4496
4497 bool
4498 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
4499 {
4500 struct nfs4_client_reclaim *crp;
4501
4502 crp = nfsd4_find_reclaim_client(name, nn);
4503 return (crp && crp->cr_clp);
4504 }
4505
4506 /*
4507 * failure => all reset bets are off, nfserr_no_grace...
4508 */
4509 struct nfs4_client_reclaim *
4510 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
4511 {
4512 unsigned int strhashval;
4513 struct nfs4_client_reclaim *crp;
4514
4515 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4516 crp = alloc_reclaim();
4517 if (crp) {
4518 strhashval = clientstr_hashval(name);
4519 INIT_LIST_HEAD(&crp->cr_strhash);
4520 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
4521 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4522 crp->cr_clp = NULL;
4523 nn->reclaim_str_hashtbl_size++;
4524 }
4525 return crp;
4526 }
4527
4528 void
4529 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
4530 {
4531 list_del(&crp->cr_strhash);
4532 kfree(crp);
4533 nn->reclaim_str_hashtbl_size--;
4534 }
4535
4536 void
4537 nfs4_release_reclaim(struct nfsd_net *nn)
4538 {
4539 struct nfs4_client_reclaim *crp = NULL;
4540 int i;
4541
4542 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4543 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
4544 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
4545 struct nfs4_client_reclaim, cr_strhash);
4546 nfs4_remove_reclaim_record(crp, nn);
4547 }
4548 }
4549 BUG_ON(nn->reclaim_str_hashtbl_size);
4550 }
4551
4552 /*
4553 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4554 struct nfs4_client_reclaim *
4555 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
4556 {
4557 unsigned int strhashval;
4558 struct nfs4_client_reclaim *crp = NULL;
4559
4560 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
4561
4562 strhashval = clientstr_hashval(recdir);
4563 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
4564 if (same_name(crp->cr_recdir, recdir)) {
4565 return crp;
4566 }
4567 }
4568 return NULL;
4569 }
4570
4571 /*
4572 * Called from OPEN. Look for clientid in reclaim list.
4573 */
4574 __be32
4575 nfs4_check_open_reclaim(clientid_t *clid, bool sessions)
4576 {
4577 struct nfs4_client *clp;
4578 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
4579
4580 /* find clientid in conf_id_hashtbl */
4581 clp = find_confirmed_client(clid, sessions, nn);
4582 if (clp == NULL)
4583 return nfserr_reclaim_bad;
4584
4585 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4586 }
4587
4588 #ifdef CONFIG_NFSD_FAULT_INJECTION
4589
4590 void nfsd_forget_clients(u64 num)
4591 {
4592 struct nfs4_client *clp, *next;
4593 int count = 0;
4594
4595 nfs4_lock_state();
4596 list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
4597 expire_client(clp);
4598 if (++count == num)
4599 break;
4600 }
4601 nfs4_unlock_state();
4602
4603 printk(KERN_INFO "NFSD: Forgot %d clients", count);
4604 }
4605
4606 static void release_lockowner_sop(struct nfs4_stateowner *sop)
4607 {
4608 release_lockowner(lockowner(sop));
4609 }
4610
4611 static void release_openowner_sop(struct nfs4_stateowner *sop)
4612 {
4613 release_openowner(openowner(sop));
4614 }
4615
4616 static int nfsd_release_n_owners(u64 num, bool is_open_owner,
4617 void (*release_sop)(struct nfs4_stateowner *))
4618 {
4619 int i, count = 0;
4620 struct nfs4_stateowner *sop, *next;
4621
4622 for (i = 0; i < OWNER_HASH_SIZE; i++) {
4623 list_for_each_entry_safe(sop, next, &ownerstr_hashtbl[i], so_strhash) {
4624 if (sop->so_is_open_owner != is_open_owner)
4625 continue;
4626 release_sop(sop);
4627 if (++count == num)
4628 return count;
4629 }
4630 }
4631 return count;
4632 }
4633
4634 void nfsd_forget_locks(u64 num)
4635 {
4636 int count;
4637
4638 nfs4_lock_state();
4639 count = nfsd_release_n_owners(num, false, release_lockowner_sop);
4640 nfs4_unlock_state();
4641
4642 printk(KERN_INFO "NFSD: Forgot %d locks", count);
4643 }
4644
4645 void nfsd_forget_openowners(u64 num)
4646 {
4647 int count;
4648
4649 nfs4_lock_state();
4650 count = nfsd_release_n_owners(num, true, release_openowner_sop);
4651 nfs4_unlock_state();
4652
4653 printk(KERN_INFO "NFSD: Forgot %d open owners", count);
4654 }
4655
4656 static int nfsd_process_n_delegations(u64 num, struct list_head *list)
4657 {
4658 int i, count = 0;
4659 struct nfs4_file *fp, *fnext;
4660 struct nfs4_delegation *dp, *dnext;
4661
4662 for (i = 0; i < FILE_HASH_SIZE; i++) {
4663 list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) {
4664 list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) {
4665 list_move(&dp->dl_recall_lru, list);
4666 if (++count == num)
4667 return count;
4668 }
4669 }
4670 }
4671
4672 return count;
4673 }
4674
4675 void nfsd_forget_delegations(u64 num)
4676 {
4677 unsigned int count;
4678 LIST_HEAD(victims);
4679 struct nfs4_delegation *dp, *dnext;
4680
4681 spin_lock(&recall_lock);
4682 count = nfsd_process_n_delegations(num, &victims);
4683 spin_unlock(&recall_lock);
4684
4685 nfs4_lock_state();
4686 list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru)
4687 unhash_delegation(dp);
4688 nfs4_unlock_state();
4689
4690 printk(KERN_INFO "NFSD: Forgot %d delegations", count);
4691 }
4692
4693 void nfsd_recall_delegations(u64 num)
4694 {
4695 unsigned int count;
4696 LIST_HEAD(victims);
4697 struct nfs4_delegation *dp, *dnext;
4698
4699 spin_lock(&recall_lock);
4700 count = nfsd_process_n_delegations(num, &victims);
4701 list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru) {
4702 list_del(&dp->dl_recall_lru);
4703 nfsd_break_one_deleg(dp);
4704 }
4705 spin_unlock(&recall_lock);
4706
4707 printk(KERN_INFO "NFSD: Recalled %d delegations", count);
4708 }
4709
4710 #endif /* CONFIG_NFSD_FAULT_INJECTION */
4711
4712 /* initialization to perform at module load time: */
4713
4714 void
4715 nfs4_state_init(void)
4716 {
4717 int i;
4718
4719 for (i = 0; i < SESSION_HASH_SIZE; i++)
4720 INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4721 for (i = 0; i < FILE_HASH_SIZE; i++) {
4722 INIT_LIST_HEAD(&file_hashtbl[i]);
4723 }
4724 for (i = 0; i < OWNER_HASH_SIZE; i++) {
4725 INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
4726 }
4727 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
4728 INIT_LIST_HEAD(&lockowner_ino_hashtbl[i]);
4729 INIT_LIST_HEAD(&close_lru);
4730 INIT_LIST_HEAD(&client_lru);
4731 INIT_LIST_HEAD(&del_recall_lru);
4732 }
4733
4734 /*
4735 * Since the lifetime of a delegation isn't limited to that of an open, a
4736 * client may quite reasonably hang on to a delegation as long as it has
4737 * the inode cached. This becomes an obvious problem the first time a
4738 * client's inode cache approaches the size of the server's total memory.
4739 *
4740 * For now we avoid this problem by imposing a hard limit on the number
4741 * of delegations, which varies according to the server's memory size.
4742 */
4743 static void
4744 set_max_delegations(void)
4745 {
4746 /*
4747 * Allow at most 4 delegations per megabyte of RAM. Quick
4748 * estimates suggest that in the worst case (where every delegation
4749 * is for a different inode), a delegation could take about 1.5K,
4750 * giving a worst case usage of about 6% of memory.
4751 */
4752 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4753 }
4754
4755 static int nfs4_state_start_net(struct net *net)
4756 {
4757 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4758 int i;
4759
4760 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
4761 CLIENT_HASH_SIZE, GFP_KERNEL);
4762 if (!nn->conf_id_hashtbl)
4763 goto err;
4764 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
4765 CLIENT_HASH_SIZE, GFP_KERNEL);
4766 if (!nn->unconf_id_hashtbl)
4767 goto err_unconf_id;
4768
4769 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4770 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
4771 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
4772 }
4773 nn->conf_name_tree = RB_ROOT;
4774 nn->unconf_name_tree = RB_ROOT;
4775
4776 return 0;
4777
4778 err_unconf_id:
4779 kfree(nn->conf_id_hashtbl);
4780 err:
4781 return -ENOMEM;
4782 }
4783
4784 static void
4785 __nfs4_state_shutdown_net(struct net *net)
4786 {
4787 int i;
4788 struct nfs4_client *clp = NULL;
4789 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4790 struct rb_node *node, *tmp;
4791
4792 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4793 while (!list_empty(&nn->conf_id_hashtbl[i])) {
4794 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4795 destroy_client(clp);
4796 }
4797 }
4798
4799 node = rb_first(&nn->unconf_name_tree);
4800 while (node != NULL) {
4801 tmp = node;
4802 node = rb_next(tmp);
4803 clp = rb_entry(tmp, struct nfs4_client, cl_namenode);
4804 rb_erase(tmp, &nn->unconf_name_tree);
4805 destroy_client(clp);
4806 }
4807
4808 kfree(nn->unconf_id_hashtbl);
4809 kfree(nn->conf_id_hashtbl);
4810 }
4811
4812 /* initialization to perform when the nfsd service is started: */
4813
4814 int
4815 nfs4_state_start(void)
4816 {
4817 struct net *net = &init_net;
4818 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4819 int ret;
4820
4821 /*
4822 * FIXME: For now, we hang most of the pernet global stuff off of
4823 * init_net until nfsd is fully containerized. Eventually, we'll
4824 * need to pass a net pointer into this function, take a reference
4825 * to that instead and then do most of the rest of this on a per-net
4826 * basis.
4827 */
4828 get_net(net);
4829 ret = nfs4_state_start_net(net);
4830 if (ret)
4831 return ret;
4832 nfsd4_client_tracking_init(net);
4833 nn->boot_time = get_seconds();
4834 locks_start_grace(net, &nn->nfsd4_manager);
4835 nn->grace_ended = false;
4836 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4837 nfsd4_grace);
4838 ret = set_callback_cred();
4839 if (ret) {
4840 ret = -ENOMEM;
4841 goto out_recovery;
4842 }
4843 laundry_wq = create_singlethread_workqueue("nfsd4");
4844 if (laundry_wq == NULL) {
4845 ret = -ENOMEM;
4846 goto out_recovery;
4847 }
4848 ret = nfsd4_create_callback_queue();
4849 if (ret)
4850 goto out_free_laundry;
4851 queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4852 set_max_delegations();
4853 return 0;
4854 out_free_laundry:
4855 destroy_workqueue(laundry_wq);
4856 out_recovery:
4857 nfsd4_client_tracking_exit(net);
4858 __nfs4_state_shutdown_net(net);
4859 put_net(net);
4860 return ret;
4861 }
4862
4863 /* should be called with the state lock held */
4864 static void
4865 __nfs4_state_shutdown(struct net *net)
4866 {
4867 struct nfs4_delegation *dp = NULL;
4868 struct list_head *pos, *next, reaplist;
4869
4870 __nfs4_state_shutdown_net(net);
4871
4872 INIT_LIST_HEAD(&reaplist);
4873 spin_lock(&recall_lock);
4874 list_for_each_safe(pos, next, &del_recall_lru) {
4875 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4876 list_move(&dp->dl_recall_lru, &reaplist);
4877 }
4878 spin_unlock(&recall_lock);
4879 list_for_each_safe(pos, next, &reaplist) {
4880 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4881 unhash_delegation(dp);
4882 }
4883
4884 nfsd4_client_tracking_exit(&init_net);
4885 put_net(&init_net);
4886 }
4887
4888 void
4889 nfs4_state_shutdown(void)
4890 {
4891 struct net *net = &init_net;
4892 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4893
4894 cancel_delayed_work_sync(&laundromat_work);
4895 destroy_workqueue(laundry_wq);
4896 locks_end_grace(&nn->nfsd4_manager);
4897 nfs4_lock_state();
4898 __nfs4_state_shutdown(net);
4899 nfs4_unlock_state();
4900 nfsd4_destroy_callback_queue();
4901 }
4902
4903 static void
4904 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4905 {
4906 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
4907 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
4908 }
4909
4910 static void
4911 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4912 {
4913 if (cstate->minorversion) {
4914 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
4915 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4916 }
4917 }
4918
4919 void
4920 clear_current_stateid(struct nfsd4_compound_state *cstate)
4921 {
4922 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4923 }
4924
4925 /*
4926 * functions to set current state id
4927 */
4928 void
4929 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
4930 {
4931 put_stateid(cstate, &odp->od_stateid);
4932 }
4933
4934 void
4935 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
4936 {
4937 put_stateid(cstate, &open->op_stateid);
4938 }
4939
4940 void
4941 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
4942 {
4943 put_stateid(cstate, &close->cl_stateid);
4944 }
4945
4946 void
4947 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
4948 {
4949 put_stateid(cstate, &lock->lk_resp_stateid);
4950 }
4951
4952 /*
4953 * functions to consume current state id
4954 */
4955
4956 void
4957 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
4958 {
4959 get_stateid(cstate, &odp->od_stateid);
4960 }
4961
4962 void
4963 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
4964 {
4965 get_stateid(cstate, &drp->dr_stateid);
4966 }
4967
4968 void
4969 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
4970 {
4971 get_stateid(cstate, &fsp->fr_stateid);
4972 }
4973
4974 void
4975 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
4976 {
4977 get_stateid(cstate, &setattr->sa_stateid);
4978 }
4979
4980 void
4981 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
4982 {
4983 get_stateid(cstate, &close->cl_stateid);
4984 }
4985
4986 void
4987 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
4988 {
4989 get_stateid(cstate, &locku->lu_stateid);
4990 }
4991
4992 void
4993 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
4994 {
4995 get_stateid(cstate, &read->rd_stateid);
4996 }
4997
4998 void
4999 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
5000 {
5001 get_stateid(cstate, &write->wr_stateid);
5002 }